Backed out changeset a10cdf32fb5a (bug 1552549) for causing a spike in xpcshell failures. CLOSED TREE

This commit is contained in:
Cosmin Sabou 2019-05-22 02:05:22 +03:00
Родитель 0e7add25c1
Коммит bdf1d2a559
82 изменённых файлов: 2915 добавлений и 5721 удалений

55
Cargo.lock сгенерированный
Просмотреть файл

@ -1211,7 +1211,7 @@ dependencies = [
"malloc_size_of 0.0.1",
"nsstring 0.1.0",
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
"selectors 0.21.0",
"servo_arc 0.1.1",
"smallvec 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1280,7 +1280,7 @@ dependencies = [
"rsdparsa_capi 0.1.0",
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"storage 0.1.0",
"u2fhid 0.2.4",
"u2fhid 0.2.3",
"webrender_bindings 0.1.0",
"xpcom 0.1.0",
"xulstore 0.1.0",
@ -1291,7 +1291,7 @@ name = "gkrust_utils"
version = "0.1.0"
dependencies = [
"nsstring 0.1.0",
"uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)",
"uuid 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1648,10 +1648,11 @@ dependencies = [
[[package]]
name = "lock_api"
version = "0.2.0"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1889,7 +1890,7 @@ dependencies = [
"nserror 0.1.0",
"nsstring 0.1.0",
"url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)",
"uuid 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"xpcom 0.1.0",
]
@ -2118,25 +2119,20 @@ dependencies = [
[[package]]
name = "parking_lot"
version = "0.8.0"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"lock_api 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "parking_lot_core"
version = "0.5.0"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.6 (git+https://github.com/froydnj/winapi-rs?branch=aarch64)",
]
@ -2531,7 +2527,7 @@ dependencies = [
"serde 1.0.88 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_derive 1.0.88 (git+https://github.com/servo/serde?branch=deserialize_from_enums10)",
"url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)",
"uuid 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -2630,11 +2626,6 @@ name = "scopeguard"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "scopeguard"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "scroll"
version = "0.9.2"
@ -2898,7 +2889,7 @@ dependencies = [
"num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ordered-float 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)",
"precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -3299,7 +3290,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "u2fhid"
version = "0.2.4"
version = "0.2.3"
dependencies = [
"bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
"boxfnonce 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -3308,7 +3299,7 @@ dependencies = [
"libc 0.2.51 (registry+https://github.com/rust-lang/crates.io-index)",
"libudev 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)",
"runloop 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.6 (git+https://github.com/froydnj/winapi-rs?branch=aarch64)",
]
@ -3388,11 +3379,8 @@ dependencies = [
[[package]]
name = "uuid"
version = "0.7.4"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "vcpkg"
@ -3863,7 +3851,7 @@ dependencies = [
"checksum linked-hash-map 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "70fb39025bc7cdd76305867c4eccf2f2dcf6e9a57f5b21a93e1c2d86cd03ec9e"
"checksum lmdb-rkv 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1452294309db7977dc75e1e8135a8c654d9e52e04ff0c0bd06c880897a91defd"
"checksum lmdb-rkv-sys 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)" = "1470e0168f1832e35afd6d0931ae60db625685332837b97aa156773ec9c5e393"
"checksum lock_api 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ed946d4529956a20f2d63ebe1b69996d5a2137c91913fe3ebbeff957f5bca7ff"
"checksum lock_api 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c"
"checksum log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b"
"checksum log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c84ec4b527950aa83a329754b01dbe3f58361d1c5efacd1f6d68c494d08a17c6"
"checksum lzw 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7d947cbb889ed21c2a84be6ffbaebf5b4e0f4340638cba0444907e38b56be084"
@ -3903,8 +3891,8 @@ dependencies = [
"checksum ordermap 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a86ed3f5f244b372d6b1a00b72ef7f8876d0bc6a78a4c9985c53614041512063"
"checksum owning_ref 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13"
"checksum packed_simd 0.3.3 (git+https://github.com/hsivonen/packed_simd?branch=rust_1_32)" = "<none>"
"checksum parking_lot 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fa7767817701cce701d5585b9c4db3cdd02086398322c1d7e8bf5094a96a2ce7"
"checksum parking_lot_core 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cb88cb1cb3790baa6776844f968fea3be44956cf184fa1be5a03341f5491278c"
"checksum parking_lot 0.6.3 (registry+https://github.com/rust-lang/crates.io-index)" = "69376b761943787ebd5cc85a5bc95958651a22609c5c1c2b65de21786baec72b"
"checksum parking_lot_core 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "4db1a8ccf734a7bce794cc19b3df06ed87ab2f3907036b693c68f56b4d4537fa"
"checksum peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
"checksum percent-encoding 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de154f638187706bde41d9b4738748933d64e6b37bdbffc0b47a97d16a6ae356"
"checksum petgraph 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3659d1ee90221741f65dd128d9998311b0e40c5d3c23a62445938214abce4f"
@ -3959,7 +3947,6 @@ dependencies = [
"checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d"
"checksum scoped_threadpool 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "1d51f5df5af43ab3f1360b429fa5e0152ac5ce8c0bd6485cae490332e96846a8"
"checksum scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c79eb2c3ac4bc2507cda80e7f3ac5b88bd8eae4c0914d5663e6a8933994be918"
"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d"
"checksum scroll 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2f84d114ef17fd144153d608fba7c446b0145d038985e7a8cc5d08bb0ce20383"
"checksum scroll_derive 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)" = "8f1aa96c45e7f5a91cb7fabe7b279f02fea7126239fc40b732316e8b6a2d0fcb"
"checksum semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a3186ec9e65071a2095434b1f5bb24838d4e8e130f584c790f6033c79943537"
@ -4025,7 +4012,7 @@ dependencies = [
"checksum url 1.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dd4e7c0d531266369519a4aa4f399d748bd37043b00bde1e4ff1f60a120b355a"
"checksum utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "662fab6525a98beff2921d7f61a39e7d59e0b425ebc7d0d9e66d316e55124122"
"checksum uuid 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e1436e58182935dcd9ce0add9ea0b558e8a87befe01c1a301e6020aeb0876363"
"checksum uuid 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90dbc611eb48397705a6b0f6e917da23ae517e4d127123d2cf7674206627d32a"
"checksum uuid 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "dab5c5526c5caa3d106653401a267fed923e7046f35895ffcb5ca42db64942e6"
"checksum vcpkg 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9e0a7d8bed3178a8fb112199d466eeca9ed09a14ba8ad67718179b4fd5487d0b"
"checksum vec_map 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "887b5b631c2ad01628bbbaa7dd4c869f80d3186688f8d0b6f58774fbe324988c"
"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"

Просмотреть файл

@ -1,6 +1,6 @@
[package]
name = "u2fhid"
version = "0.2.4"
version = "0.2.3"
authors = ["Kyle Machulis <kyle@nonpolynomial.com>", "J.C. Jones <jc@mozilla.com>", "Tim Taubert <ttaubert@mozilla.com>"]
[target.'cfg(target_os = "linux")'.dependencies]
@ -23,7 +23,7 @@ features = [
]
[dependencies]
rand = "0.6"
rand = "0.3"
log = "0.4"
libc = "^0.2"
boxfnonce = "0.0.3"

Просмотреть файл

@ -6,7 +6,7 @@
extern crate std;
use rand::{thread_rng, RngCore};
use rand::{thread_rng, Rng};
use std::ffi::CString;
use std::io;
use std::io::{Read, Write};
@ -214,7 +214,7 @@ where
#[cfg(test)]
mod tests {
use rand::{thread_rng, RngCore};
use rand::{thread_rng, Rng};
use super::{init_device, send_apdu, sendrecv, U2FDevice};
use consts::{CID_BROADCAST, SW_NO_ERROR, U2FHID_INIT, U2FHID_MSG, U2FHID_PING};

Просмотреть файл

@ -8,4 +8,4 @@ url = "1.7.2"
nserror = { path = "../../../xpcom/rust/nserror" }
nsstring = { path = "../../../xpcom/rust/nsstring" }
xpcom = { path = "../../../xpcom/rust/xpcom" }
uuid = { version = "0.7.2", features = ["v4"] }
uuid = { version = "0.6", features = ["v4"] }

Просмотреть файл

@ -187,8 +187,6 @@ Please commit or stash these changes before vendoring, or re-run with `--ignore-
RUNTIME_LICENSE_FILE_PACKAGE_WHITELIST = {
# MIT
'deque': '6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb',
# we're whitelisting this fuchsia crate because it doesn't get built in the final product but has a license-file that needs ignoring
'fuchsia-cprng' : '03b114f53e6587a398931762ee11e2395bfdba252a329940e2c8c9e81813845b',
}
@staticmethod

Просмотреть файл

@ -55,7 +55,7 @@ num-traits = "0.2"
num-derive = "0.2"
ordered-float = "1.0"
owning_ref = "0.4"
parking_lot = "0.8"
parking_lot = "0.6"
precomputed-hash = "0.1.1"
rayon = "1"
selectors = { path = "../selectors" }

Просмотреть файл

@ -22,7 +22,7 @@ log = {version = "0.4", features = ["release_max_level_info"]}
malloc_size_of = {path = "../../components/malloc_size_of"}
nsstring = {path = "../../../xpcom/rust/nsstring/"}
num-traits = "0.2"
parking_lot = "0.8"
parking_lot = "0.6"
selectors = {path = "../../components/selectors"}
servo_arc = {path = "../../components/servo_arc"}
smallvec = "0.6"

Просмотреть файл

@ -15,7 +15,7 @@ app_units = "0.7"
cssparser = "0.25"
euclid = "0.19"
html5ever = "0.22"
parking_lot = "0.8"
parking_lot = "0.6"
rayon = "1"
serde_json = "1.0"
selectors = {path = "../../../components/selectors"}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"4e6804e66f9429156bfe15d0d796baceb73a3f06d358608afcbea95cdf0086ba","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","src/lib.rs":"d9ed1f911f058d066ebfd024940da8a5c1ebbab6cfd65a633dfbc613573dd823","src/mutex.rs":"eeaab6ce6e50aed906bebe598c1b151258327e101eec08b0ff9ccd9c87daddfb","src/remutex.rs":"24cbd5b5b77dd746b065c6d3494dcb2095e81a062341052003b96210a1297ba8","src/rwlock.rs":"a3789a7e820f5c22c8661c4c9e279510a3db50e24894fb380e49dde6b110ddb1"},"package":"ed946d4529956a20f2d63ebe1b69996d5a2137c91913fe3ebbeff957f5bca7ff"}
{"files":{"Cargo.toml":"ab2a7a96105e15de46900fb0da37edbab44e5513a9818672153dae44ed318f7e","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","src/lib.rs":"4a16128f58e3380b22b26b137ee1096732995b7e401f3d227dd7b0738b6bd604","src/mutex.rs":"fee397f72325621812c5f78c7a6b9369ea7ec14e71bb0049678a50349519c0c7","src/remutex.rs":"ed76d7b93a56b6248d79676de2aaa66b607b64f1b773c9dd7326b8324e2bc71a","src/rwlock.rs":"5ab1aab614358cfdaf23e8ff8a0ac5e0c7656b777f385aca2e5422f0aa8f0985"},"package":"62ebf1391f6acad60e5c8b43706dde4582df75c06698ab44511d15016bc2442c"}

12
third_party/rust/lock_api/Cargo.toml поставляемый
Просмотреть файл

@ -3,7 +3,7 @@
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
# to registry (e.g. crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
@ -11,9 +11,8 @@
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "lock_api"
version = "0.2.0"
version = "0.1.5"
authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
description = "Wrappers to create fully-featured Mutex and RwLock types. Compatible with no_std."
keywords = ["mutex", "rwlock", "lock", "no_std"]
@ -25,12 +24,7 @@ version = "0.4"
optional = true
[dependencies.scopeguard]
version = "1.0"
default-features = false
[dependencies.serde]
version = "1.0.90"
optional = true
version = "0.3"
default-features = false
[features]

14
third_party/rust/lock_api/src/lib.rs поставляемый
Просмотреть файл

@ -28,14 +28,14 @@
//!
//! ```
//! use lock_api::{RawMutex, Mutex, GuardSend};
//! use std::sync::atomic::{AtomicBool, Ordering};
//! use std::sync::atomic::{AtomicBool, Ordering, ATOMIC_BOOL_INIT};
//!
//! // 1. Define our raw lock type
//! pub struct RawSpinlock(AtomicBool);
//!
//! // 2. Implement RawMutex for this type
//! unsafe impl RawMutex for RawSpinlock {
//! const INIT: RawSpinlock = RawSpinlock(AtomicBool::new(false));
//! const INIT: RawSpinlock = RawSpinlock(ATOMIC_BOOL_INIT);
//!
//! // A spinlock guard can be sent to another thread and unlocked there
//! type GuardMarker = GuardSend;
@ -85,12 +85,14 @@
#![no_std]
#![warn(missing_docs)]
#![warn(rust_2018_idioms)]
#![cfg_attr(feature = "nightly", feature(const_fn))]
#[macro_use]
extern crate scopeguard;
#[cfg(feature = "owning_ref")]
extern crate owning_ref;
/// Marker type which indicates that the Guard type for a lock is `Send`.
pub struct GuardSend(());
@ -98,10 +100,10 @@ pub struct GuardSend(());
pub struct GuardNoSend(*mut ());
mod mutex;
pub use crate::mutex::*;
pub use mutex::*;
mod remutex;
pub use crate::remutex::*;
pub use remutex::*;
mod rwlock;
pub use crate::rwlock::*;
pub use rwlock::*;

97
third_party/rust/lock_api/src/mutex.rs поставляемый
Просмотреть файл

@ -14,9 +14,6 @@ use core::ops::{Deref, DerefMut};
#[cfg(feature = "owning_ref")]
use owning_ref::StableAddress;
#[cfg(feature = "serde")]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
/// Basic operations for a mutex.
///
/// Types implementing this trait can be used by `Mutex` to form a safe and
@ -96,35 +93,6 @@ pub struct Mutex<R: RawMutex, T: ?Sized> {
data: UnsafeCell<T>,
}
// Copied and modified from serde
#[cfg(feature = "serde")]
impl<R, T> Serialize for Mutex<R, T>
where
R: RawMutex,
T: Serialize + ?Sized,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.lock().serialize(serializer)
}
}
#[cfg(feature = "serde")]
impl<'de, R, T> Deserialize<'de> for Mutex<R, T>
where
R: RawMutex,
T: Deserialize<'de> + ?Sized,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(Mutex::new)
}
}
unsafe impl<R: RawMutex + Send, T: ?Sized + Send> Send for Mutex<R, T> {}
unsafe impl<R: RawMutex + Sync, T: ?Sized + Send> Sync for Mutex<R, T> {}
@ -159,7 +127,7 @@ impl<R: RawMutex, T> Mutex<R, T> {
impl<R: RawMutex, T: ?Sized> Mutex<R, T> {
#[inline]
fn guard(&self) -> MutexGuard<'_, R, T> {
fn guard(&self) -> MutexGuard<R, T> {
MutexGuard {
mutex: self,
marker: PhantomData,
@ -176,7 +144,7 @@ impl<R: RawMutex, T: ?Sized> Mutex<R, T> {
/// Attempts to lock a mutex in the thread which already holds the lock will
/// result in a deadlock.
#[inline]
pub fn lock(&self) -> MutexGuard<'_, R, T> {
pub fn lock(&self) -> MutexGuard<R, T> {
self.raw.lock();
self.guard()
}
@ -189,7 +157,7 @@ impl<R: RawMutex, T: ?Sized> Mutex<R, T> {
///
/// This function does not block.
#[inline]
pub fn try_lock(&self) -> Option<MutexGuard<'_, R, T>> {
pub fn try_lock(&self) -> Option<MutexGuard<R, T>> {
if self.raw.try_lock() {
Some(self.guard())
} else {
@ -262,7 +230,7 @@ impl<R: RawMutexTimed, T: ?Sized> Mutex<R, T> {
/// `None` is returned. Otherwise, an RAII guard is returned. The lock will
/// be unlocked when the guard is dropped.
#[inline]
pub fn try_lock_for(&self, timeout: R::Duration) -> Option<MutexGuard<'_, R, T>> {
pub fn try_lock_for(&self, timeout: R::Duration) -> Option<MutexGuard<R, T>> {
if self.raw.try_lock_for(timeout) {
Some(self.guard())
} else {
@ -276,7 +244,7 @@ impl<R: RawMutexTimed, T: ?Sized> Mutex<R, T> {
/// `None` is returned. Otherwise, an RAII guard is returned. The lock will
/// be unlocked when the guard is dropped.
#[inline]
pub fn try_lock_until(&self, timeout: R::Instant) -> Option<MutexGuard<'_, R, T>> {
pub fn try_lock_until(&self, timeout: R::Instant) -> Option<MutexGuard<R, T>> {
if self.raw.try_lock_until(timeout) {
Some(self.guard())
} else {
@ -300,21 +268,10 @@ impl<R: RawMutex, T> From<T> for Mutex<R, T> {
}
impl<R: RawMutex, T: ?Sized + fmt::Debug> fmt::Debug for Mutex<R, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_lock() {
Some(guard) => f.debug_struct("Mutex").field("data", &&*guard).finish(),
None => {
struct LockedPlaceholder;
impl fmt::Debug for LockedPlaceholder {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("<locked>")
}
}
f.debug_struct("Mutex")
.field("data", &LockedPlaceholder)
.finish()
}
None => f.pad("Mutex { <locked> }"),
}
}
}
@ -324,8 +281,8 @@ impl<R: RawMutex, T: ?Sized + fmt::Debug> fmt::Debug for Mutex<R, T> {
///
/// The data protected by the mutex can be accessed through this guard via its
/// `Deref` and `DerefMut` implementations.
#[must_use = "if unused the Mutex will immediately unlock"]
pub struct MutexGuard<'a, R: RawMutex, T: ?Sized> {
#[must_use]
pub struct MutexGuard<'a, R: RawMutex + 'a, T: ?Sized + 'a> {
mutex: &'a Mutex<R, T>,
marker: PhantomData<(&'a mut T, R::GuardMarker)>,
}
@ -471,18 +428,6 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MutexGuard<'a, R, T> {
}
}
impl<'a, R: RawMutex + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MutexGuard<'a, R, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, R: RawMutex + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display for MutexGuard<'a, R, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
}
#[cfg(feature = "owning_ref")]
unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MutexGuard<'a, R, T> {}
@ -493,8 +438,8 @@ unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MutexGuard<'
/// former doesn't support temporarily unlocking and re-locking, since that
/// could introduce soundness issues if the locked object is modified by another
/// thread.
#[must_use = "if unused the Mutex will immediately unlock"]
pub struct MappedMutexGuard<'a, R: RawMutex, T: ?Sized> {
#[must_use]
pub struct MappedMutexGuard<'a, R: RawMutex + 'a, T: ?Sized + 'a> {
raw: &'a R,
data: *mut T,
marker: PhantomData<&'a mut T>,
@ -502,12 +447,10 @@ pub struct MappedMutexGuard<'a, R: RawMutex, T: ?Sized> {
unsafe impl<'a, R: RawMutex + Sync + 'a, T: ?Sized + Sync + 'a> Sync
for MappedMutexGuard<'a, R, T>
{
}
{}
unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Send for MappedMutexGuard<'a, R, T> where
R::GuardMarker: Send
{
}
{}
impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> {
/// Makes a new `MappedMutexGuard` for a component of the locked data.
@ -603,19 +546,5 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MappedMutexGuard<'a, R, T> {
}
}
impl<'a, R: RawMutex + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for MappedMutexGuard<'a, R, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, R: RawMutex + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
for MappedMutexGuard<'a, R, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
}
#[cfg(feature = "owning_ref")]
unsafe impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> StableAddress for MappedMutexGuard<'a, R, T> {}

131
third_party/rust/lock_api/src/remutex.rs поставляемый
Просмотреть файл

@ -5,21 +5,18 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::mutex::{RawMutex, RawMutexFair, RawMutexTimed};
use crate::GuardNoSend;
use core::cell::{Cell, UnsafeCell};
use core::fmt;
use core::marker::PhantomData;
use core::mem;
use core::ops::Deref;
use core::sync::atomic::{AtomicUsize, Ordering};
use mutex::{RawMutex, RawMutexFair, RawMutexTimed};
use GuardNoSend;
#[cfg(feature = "owning_ref")]
use owning_ref::StableAddress;
#[cfg(feature = "serde")]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
/// Helper trait which returns a non-zero thread ID.
///
/// The simplest way to implement this trait is to return the address of a
@ -143,45 +140,12 @@ pub struct ReentrantMutex<R: RawMutex, G: GetThreadId, T: ?Sized> {
data: UnsafeCell<T>,
}
// Copied and modified from serde
#[cfg(feature = "serde")]
impl<R, G, T> Serialize for ReentrantMutex<R, G, T>
where
R: RawMutex,
G: GetThreadId,
T: Serialize + ?Sized,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.lock().serialize(serializer)
}
}
#[cfg(feature = "serde")]
impl<'de, R, G, T> Deserialize<'de> for ReentrantMutex<R, G, T>
where
R: RawMutex,
G: GetThreadId,
T: Deserialize<'de> + ?Sized,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(ReentrantMutex::new)
}
}
unsafe impl<R: RawMutex + Send, G: GetThreadId + Send, T: ?Sized + Send> Send
for ReentrantMutex<R, G, T>
{
}
{}
unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync, T: ?Sized + Send> Sync
for ReentrantMutex<R, G, T>
{
}
{}
impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> {
/// Creates a new reentrant mutex in an unlocked state ready for use.
@ -224,7 +188,7 @@ impl<R: RawMutex, G: GetThreadId, T> ReentrantMutex<R, G, T> {
impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
#[inline]
fn guard(&self) -> ReentrantMutexGuard<'_, R, G, T> {
fn guard(&self) -> ReentrantMutexGuard<R, G, T> {
ReentrantMutexGuard {
remutex: &self,
marker: PhantomData,
@ -242,7 +206,7 @@ impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
/// returned to allow scoped unlock of the lock. When the guard goes out of
/// scope, the mutex will be unlocked.
#[inline]
pub fn lock(&self) -> ReentrantMutexGuard<'_, R, G, T> {
pub fn lock(&self) -> ReentrantMutexGuard<R, G, T> {
self.raw.lock();
self.guard()
}
@ -255,7 +219,7 @@ impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
///
/// This function does not block.
#[inline]
pub fn try_lock(&self) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
pub fn try_lock(&self) -> Option<ReentrantMutexGuard<R, G, T>> {
if self.raw.try_lock() {
Some(self.guard())
} else {
@ -328,7 +292,7 @@ impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
/// `None` is returned. Otherwise, an RAII guard is returned. The lock will
/// be unlocked when the guard is dropped.
#[inline]
pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
pub fn try_lock_for(&self, timeout: R::Duration) -> Option<ReentrantMutexGuard<R, G, T>> {
if self.raw.try_lock_for(timeout) {
Some(self.guard())
} else {
@ -342,7 +306,7 @@ impl<R: RawMutexTimed, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
/// `None` is returned. Otherwise, an RAII guard is returned. The lock will
/// be unlocked when the guard is dropped.
#[inline]
pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<'_, R, G, T>> {
pub fn try_lock_until(&self, timeout: R::Instant) -> Option<ReentrantMutexGuard<R, G, T>> {
if self.raw.try_lock_until(timeout) {
Some(self.guard())
} else {
@ -366,24 +330,13 @@ impl<R: RawMutex, G: GetThreadId, T> From<T> for ReentrantMutex<R, G, T> {
}
impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for ReentrantMutex<R, G, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_lock() {
Some(guard) => f
.debug_struct("ReentrantMutex")
.field("data", &&*guard)
.finish(),
None => {
struct LockedPlaceholder;
impl fmt::Debug for LockedPlaceholder {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("<locked>")
}
}
f.debug_struct("ReentrantMutex")
.field("data", &LockedPlaceholder)
.finish()
}
None => f.pad("ReentrantMutex { <locked> }"),
}
}
}
@ -393,16 +346,15 @@ impl<R: RawMutex, G: GetThreadId, T: ?Sized + fmt::Debug> fmt::Debug for Reentra
///
/// The data protected by the mutex can be accessed through this guard via its
/// `Deref` implementation.
#[must_use = "if unused the ReentrantMutex will immediately unlock"]
pub struct ReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
#[must_use]
pub struct ReentrantMutexGuard<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> {
remutex: &'a ReentrantMutex<R, G, T>,
marker: PhantomData<(&'a T, GuardNoSend)>,
}
unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
for ReentrantMutexGuard<'a, R, G, T>
{
}
{}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGuard<'a, R, G, T> {
/// Returns a reference to the original `ReentrantMutex` object.
@ -443,10 +395,7 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGu
/// used as `ReentrantMutexGuard::map(...)`. A method would interfere with methods of
/// the same name on the contents of the locked data.
#[inline]
pub fn try_map<U: ?Sized, F>(
s: Self,
f: F,
) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
where
F: FnOnce(&mut T) -> Option<&mut U>,
{
@ -545,27 +494,10 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
for ReentrantMutexGuard<'a, R, G, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
for ReentrantMutexGuard<'a, R, G, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
}
#[cfg(feature = "owning_ref")]
unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
for ReentrantMutexGuard<'a, R, G, T>
{
}
{}
/// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a
/// subfield of the protected data.
@ -574,8 +506,8 @@ unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAdd
/// former doesn't support temporarily unlocking and re-locking, since that
/// could introduce soundness issues if the locked object is modified by another
/// thread.
#[must_use = "if unused the ReentrantMutex will immediately unlock"]
pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized> {
#[must_use]
pub struct MappedReentrantMutexGuard<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> {
raw: &'a RawReentrantMutex<R, G>,
data: *const T,
marker: PhantomData<&'a T>,
@ -583,8 +515,7 @@ pub struct MappedReentrantMutexGuard<'a, R: RawMutex, G: GetThreadId, T: ?Sized>
unsafe impl<'a, R: RawMutex + Sync + 'a, G: GetThreadId + Sync + 'a, T: ?Sized + Sync + 'a> Sync
for MappedReentrantMutexGuard<'a, R, G, T>
{
}
{}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
MappedReentrantMutexGuard<'a, R, G, T>
@ -622,10 +553,7 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
/// used as `MappedReentrantMutexGuard::map(...)`. A method would interfere with methods of
/// the same name on the contents of the locked data.
#[inline]
pub fn try_map<U: ?Sized, F>(
s: Self,
f: F,
) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
pub fn try_map<U: ?Sized, F>(s: Self, f: F) -> Result<MappedReentrantMutexGuard<'a, R, G, U>, Self>
where
F: FnOnce(&T) -> Option<&U>,
{
@ -684,24 +612,7 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
for MappedReentrantMutexGuard<'a, R, G, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
for MappedReentrantMutexGuard<'a, R, G, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
}
#[cfg(feature = "owning_ref")]
unsafe impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> StableAddress
for MappedReentrantMutexGuard<'a, R, G, T>
{
}
{}

208
third_party/rust/lock_api/src/rwlock.rs поставляемый
Просмотреть файл

@ -14,9 +14,6 @@ use core::ops::{Deref, DerefMut};
#[cfg(feature = "owning_ref")]
use owning_ref::StableAddress;
#[cfg(feature = "serde")]
use serde::{Deserialize, Deserializer, Serialize, Serializer};
/// Basic operations for a reader-writer lock.
///
/// Types implementing this trait can be used by `RwLock` to form a safe and
@ -233,35 +230,6 @@ pub struct RwLock<R: RawRwLock, T: ?Sized> {
data: UnsafeCell<T>,
}
// Copied and modified from serde
#[cfg(feature = "serde")]
impl<R, T> Serialize for RwLock<R, T>
where
R: RawRwLock,
T: Serialize + ?Sized,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.read().serialize(serializer)
}
}
#[cfg(feature = "serde")]
impl<'de, R, T> Deserialize<'de> for RwLock<R, T>
where
R: RawRwLock,
T: Deserialize<'de> + ?Sized,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Deserialize::deserialize(deserializer).map(RwLock::new)
}
}
unsafe impl<R: RawRwLock + Send, T: ?Sized + Send> Send for RwLock<R, T> {}
unsafe impl<R: RawRwLock + Sync, T: ?Sized + Send + Sync> Sync for RwLock<R, T> {}
@ -296,7 +264,7 @@ impl<R: RawRwLock, T> RwLock<R, T> {
impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
#[inline]
fn read_guard(&self) -> RwLockReadGuard<'_, R, T> {
fn read_guard(&self) -> RwLockReadGuard<R, T> {
RwLockReadGuard {
rwlock: self,
marker: PhantomData,
@ -304,7 +272,7 @@ impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
}
#[inline]
fn write_guard(&self) -> RwLockWriteGuard<'_, R, T> {
fn write_guard(&self) -> RwLockWriteGuard<R, T> {
RwLockWriteGuard {
rwlock: self,
marker: PhantomData,
@ -324,7 +292,7 @@ impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
/// Returns an RAII guard which will release this thread's shared access
/// once it is dropped.
#[inline]
pub fn read(&self) -> RwLockReadGuard<'_, R, T> {
pub fn read(&self) -> RwLockReadGuard<R, T> {
self.raw.lock_shared();
self.read_guard()
}
@ -337,7 +305,7 @@ impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
///
/// This function does not block.
#[inline]
pub fn try_read(&self) -> Option<RwLockReadGuard<'_, R, T>> {
pub fn try_read(&self) -> Option<RwLockReadGuard<R, T>> {
if self.raw.try_lock_shared() {
Some(self.read_guard())
} else {
@ -354,7 +322,7 @@ impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
/// Returns an RAII guard which will drop the write access of this `RwLock`
/// when dropped.
#[inline]
pub fn write(&self) -> RwLockWriteGuard<'_, R, T> {
pub fn write(&self) -> RwLockWriteGuard<R, T> {
self.raw.lock_exclusive();
self.write_guard()
}
@ -367,7 +335,7 @@ impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
///
/// This function does not block.
#[inline]
pub fn try_write(&self) -> Option<RwLockWriteGuard<'_, R, T>> {
pub fn try_write(&self) -> Option<RwLockWriteGuard<R, T>> {
if self.raw.try_lock_exclusive() {
Some(self.write_guard())
} else {
@ -473,7 +441,7 @@ impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> {
/// `None` is returned. Otherwise, an RAII guard is returned which will
/// release the shared access when it is dropped.
#[inline]
pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<'_, R, T>> {
pub fn try_read_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<R, T>> {
if self.raw.try_lock_shared_for(timeout) {
Some(self.read_guard())
} else {
@ -488,7 +456,7 @@ impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> {
/// `None` is returned. Otherwise, an RAII guard is returned which will
/// release the shared access when it is dropped.
#[inline]
pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<'_, R, T>> {
pub fn try_read_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<R, T>> {
if self.raw.try_lock_shared_until(timeout) {
Some(self.read_guard())
} else {
@ -503,7 +471,7 @@ impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> {
/// `None` is returned. Otherwise, an RAII guard is returned which will
/// release the exclusive access when it is dropped.
#[inline]
pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<'_, R, T>> {
pub fn try_write_for(&self, timeout: R::Duration) -> Option<RwLockWriteGuard<R, T>> {
if self.raw.try_lock_exclusive_for(timeout) {
Some(self.write_guard())
} else {
@ -518,7 +486,7 @@ impl<R: RawRwLockTimed, T: ?Sized> RwLock<R, T> {
/// `None` is returned. Otherwise, an RAII guard is returned which will
/// release the exclusive access when it is dropped.
#[inline]
pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<'_, R, T>> {
pub fn try_write_until(&self, timeout: R::Instant) -> Option<RwLockWriteGuard<R, T>> {
if self.raw.try_lock_exclusive_until(timeout) {
Some(self.write_guard())
} else {
@ -544,7 +512,7 @@ impl<R: RawRwLockRecursive, T: ?Sized> RwLock<R, T> {
/// Returns an RAII guard which will release this thread's shared access
/// once it is dropped.
#[inline]
pub fn read_recursive(&self) -> RwLockReadGuard<'_, R, T> {
pub fn read_recursive(&self) -> RwLockReadGuard<R, T> {
self.raw.lock_shared_recursive();
self.read_guard()
}
@ -560,7 +528,7 @@ impl<R: RawRwLockRecursive, T: ?Sized> RwLock<R, T> {
///
/// This function does not block.
#[inline]
pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<'_, R, T>> {
pub fn try_read_recursive(&self) -> Option<RwLockReadGuard<R, T>> {
if self.raw.try_lock_shared_recursive() {
Some(self.read_guard())
} else {
@ -581,10 +549,7 @@ impl<R: RawRwLockRecursiveTimed, T: ?Sized> RwLock<R, T> {
/// lock is held at the time of the call. See the documentation for
/// `read_recursive` for details.
#[inline]
pub fn try_read_recursive_for(
&self,
timeout: R::Duration,
) -> Option<RwLockReadGuard<'_, R, T>> {
pub fn try_read_recursive_for(&self, timeout: R::Duration) -> Option<RwLockReadGuard<R, T>> {
if self.raw.try_lock_shared_recursive_for(timeout) {
Some(self.read_guard())
} else {
@ -599,10 +564,7 @@ impl<R: RawRwLockRecursiveTimed, T: ?Sized> RwLock<R, T> {
/// `None` is returned. Otherwise, an RAII guard is returned which will
/// release the shared access when it is dropped.
#[inline]
pub fn try_read_recursive_until(
&self,
timeout: R::Instant,
) -> Option<RwLockReadGuard<'_, R, T>> {
pub fn try_read_recursive_until(&self, timeout: R::Instant) -> Option<RwLockReadGuard<R, T>> {
if self.raw.try_lock_shared_recursive_until(timeout) {
Some(self.read_guard())
} else {
@ -613,7 +575,7 @@ impl<R: RawRwLockRecursiveTimed, T: ?Sized> RwLock<R, T> {
impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> {
#[inline]
fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
fn upgradable_guard(&self) -> RwLockUpgradableReadGuard<R, T> {
RwLockUpgradableReadGuard {
rwlock: self,
marker: PhantomData,
@ -630,7 +592,7 @@ impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> {
/// Returns an RAII guard which will release this thread's shared access
/// once it is dropped.
#[inline]
pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<'_, R, T> {
pub fn upgradable_read(&self) -> RwLockUpgradableReadGuard<R, T> {
self.raw.lock_upgradable();
self.upgradable_guard()
}
@ -643,7 +605,7 @@ impl<R: RawRwLockUpgrade, T: ?Sized> RwLock<R, T> {
///
/// This function does not block.
#[inline]
pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
pub fn try_upgradable_read(&self) -> Option<RwLockUpgradableReadGuard<R, T>> {
if self.raw.try_lock_upgradable() {
Some(self.upgradable_guard())
} else {
@ -663,7 +625,7 @@ impl<R: RawRwLockUpgradeTimed, T: ?Sized> RwLock<R, T> {
pub fn try_upgradable_read_for(
&self,
timeout: R::Duration,
) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
) -> Option<RwLockUpgradableReadGuard<R, T>> {
if self.raw.try_lock_upgradable_for(timeout) {
Some(self.upgradable_guard())
} else {
@ -681,7 +643,7 @@ impl<R: RawRwLockUpgradeTimed, T: ?Sized> RwLock<R, T> {
pub fn try_upgradable_read_until(
&self,
timeout: R::Instant,
) -> Option<RwLockUpgradableReadGuard<'_, R, T>> {
) -> Option<RwLockUpgradableReadGuard<R, T>> {
if self.raw.try_lock_upgradable_until(timeout) {
Some(self.upgradable_guard())
} else {
@ -705,29 +667,18 @@ impl<R: RawRwLock, T> From<T> for RwLock<R, T> {
}
impl<R: RawRwLock, T: ?Sized + fmt::Debug> fmt::Debug for RwLock<R, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_read() {
Some(guard) => f.debug_struct("RwLock").field("data", &&*guard).finish(),
None => {
struct LockedPlaceholder;
impl fmt::Debug for LockedPlaceholder {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("<locked>")
}
}
f.debug_struct("RwLock")
.field("data", &LockedPlaceholder)
.finish()
}
None => f.pad("RwLock { <locked> }"),
}
}
}
/// RAII structure used to release the shared read access of a lock when
/// dropped.
#[must_use = "if unused the RwLock will immediately unlock"]
pub struct RwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
#[must_use]
pub struct RwLockReadGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
rwlock: &'a RwLock<R, T>,
marker: PhantomData<(&'a T, R::GuardMarker)>,
}
@ -868,27 +819,13 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> {
}
}
impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockReadGuard<'a, R, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
for RwLockReadGuard<'a, R, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
}
#[cfg(feature = "owning_ref")]
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockReadGuard<'a, R, T> {}
/// RAII structure used to release the exclusive write access of a lock when
/// dropped.
#[must_use = "if unused the RwLock will immediately unlock"]
pub struct RwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
#[must_use]
pub struct RwLockWriteGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
rwlock: &'a RwLock<R, T>,
marker: PhantomData<(&'a mut T, R::GuardMarker)>,
}
@ -1070,35 +1007,20 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T>
}
}
impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug for RwLockWriteGuard<'a, R, T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
for RwLockWriteGuard<'a, R, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
}
#[cfg(feature = "owning_ref")]
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress for RwLockWriteGuard<'a, R, T> {}
/// RAII structure used to release the upgradable read access of a lock when
/// dropped.
#[must_use = "if unused the RwLock will immediately unlock"]
pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade, T: ?Sized> {
#[must_use]
pub struct RwLockUpgradableReadGuard<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> {
rwlock: &'a RwLock<R, T>,
marker: PhantomData<(&'a T, R::GuardMarker)>,
}
unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + Sync + 'a> Sync
for RwLockUpgradableReadGuard<'a, R, T>
{
}
{}
impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a, R, T> {
/// Returns a reference to the original reader-writer lock object.
@ -1274,27 +1196,10 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableRead
}
}
impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
for RwLockUpgradableReadGuard<'a, R, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, R: RawRwLockUpgrade + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
for RwLockUpgradableReadGuard<'a, R, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
}
#[cfg(feature = "owning_ref")]
unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress
for RwLockUpgradableReadGuard<'a, R, T>
{
}
{}
/// An RAII read lock guard returned by `RwLockReadGuard::map`, which can point to a
/// subfield of the protected data.
@ -1303,8 +1208,8 @@ unsafe impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> StableAddress
/// former doesn't support temporarily unlocking and re-locking, since that
/// could introduce soundness issues if the locked object is modified by another
/// thread.
#[must_use = "if unused the RwLock will immediately unlock"]
pub struct MappedRwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
#[must_use]
pub struct MappedRwLockReadGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
raw: &'a R,
data: *const T,
marker: PhantomData<&'a T>,
@ -1313,8 +1218,7 @@ pub struct MappedRwLockReadGuard<'a, R: RawRwLock, T: ?Sized> {
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync for MappedRwLockReadGuard<'a, R, T> {}
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockReadGuard<'a, R, T> where
R::GuardMarker: Send
{
}
{}
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T> {
/// Make a new `MappedRwLockReadGuard` for a component of the locked data.
@ -1403,27 +1307,10 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R
}
}
impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
for MappedRwLockReadGuard<'a, R, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
for MappedRwLockReadGuard<'a, R, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
}
#[cfg(feature = "owning_ref")]
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
for MappedRwLockReadGuard<'a, R, T>
{
}
{}
/// An RAII write lock guard returned by `RwLockWriteGuard::map`, which can point to a
/// subfield of the protected data.
@ -1432,8 +1319,8 @@ unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
/// former doesn't support temporarily unlocking and re-locking, since that
/// could introduce soundness issues if the locked object is modified by another
/// thread.
#[must_use = "if unused the RwLock will immediately unlock"]
pub struct MappedRwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
#[must_use]
pub struct MappedRwLockWriteGuard<'a, R: RawRwLock + 'a, T: ?Sized + 'a> {
raw: &'a R,
data: *mut T,
marker: PhantomData<&'a mut T>,
@ -1441,12 +1328,10 @@ pub struct MappedRwLockWriteGuard<'a, R: RawRwLock, T: ?Sized> {
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + Sync + 'a> Sync
for MappedRwLockWriteGuard<'a, R, T>
{
}
{}
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Send for MappedRwLockWriteGuard<'a, R, T> where
R::GuardMarker: Send
{
}
{}
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
/// Make a new `MappedRwLockWriteGuard` for a component of the locked data.
@ -1562,24 +1447,7 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a,
}
}
impl<'a, R: RawRwLock + 'a, T: fmt::Debug + ?Sized + 'a> fmt::Debug
for MappedRwLockWriteGuard<'a, R, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&**self, f)
}
}
impl<'a, R: RawRwLock + 'a, T: fmt::Display + ?Sized + 'a> fmt::Display
for MappedRwLockWriteGuard<'a, R, T>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(**self).fmt(f)
}
}
#[cfg(feature = "owning_ref")]
unsafe impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> StableAddress
for MappedRwLockWriteGuard<'a, R, T>
{
}
{}

Просмотреть файл

@ -1 +1 @@
{"files":{"CHANGELOG.md":"f9a9c82373818d32816c42e0f127f6f14a64d37925f02041c10c66a528e0d454","Cargo.toml":"ef3558536eff060103a0c35e6e9ecfe723240c4a37429cf3d7d84d1eb4fda5e3","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"61723e013019e0254522485795be4ff3f1cb4f580ebf4a8daa8fabeb4b9b9e6b","appveyor.yml":"fd584e381a2eb990c8d5eb44998d9c91ff4d538f4b9c62acc018a7bb94cb1fe7","build.rs":"4ed00d73d71057bcdf6c186559468927fc130fd65cfd806ee5d46d28540bc653","src/condvar.rs":"d7cf8af884d577a726f40ed043cbbf2a24424df6e20e1cc4718f4ae390cbb861","src/deadlock.rs":"081dbf009539b113f67ad0a1abd7af889dad684a47aa1a7dc00ae91f08975ef6","src/elision.rs":"00f7af80021fd602879fb7205befb6ff941cd8dc932a5c0a534b430fefe421ea","src/lib.rs":"acfb6cd0d6e69ab49325defc2d9dd624088d442c9c0dae71e20dd8eced84cae3","src/mutex.rs":"e3a48933b7e19d26eab4b5f44ed4e9bcb069b57cdd4a0569d1e65f6c3839b766","src/once.rs":"3b0c1254acbcff840048c722220066988df69f9d9487ac188356f64b7bcad54f","src/raw_mutex.rs":"9eeccbe797116f8c3f1a19e4803ac1bb57c6c5ec9b2d2770fb42ee5aee5a1002","src/raw_rwlock.rs":"5bb1d74a90a52f0f573d49776a2a68f00a2301c25c8400af2934d3e018728e79","src/remutex.rs":"85b3cff3aaa0ca4c644fcb7cd06447128e8e6065d6a632c436085841ac244022","src/rwlock.rs":"63be04f2af7eda7aa33f704846eb413a2ffd76135d248cb250dc91bd20d7dd66","src/util.rs":"8bd40151fea0a7ffb2fdcb751a5dfd868d8d4d275b0f1b04a7fc5d2a0ba41766"},"package":"fa7767817701cce701d5585b9c4db3cdd02086398322c1d7e8bf5094a96a2ce7"}
{"files":{"CHANGELOG.md":"e254fac6600c725edb746f31f41b1b2ceeb9cfc85f4f9a3e6af874c70b020823","Cargo.toml":"215d5b3a2c18f556b5c66ac6d27eea71d7dd7e6b4857ecd6966c2e5cc03270ea","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"a52cf38f796e7f12215662e8a3a23aa9802c170a09ecba0e4be766c88f95a9c5","appveyor.yml":"cb1d02316926d88e174976bfc6781194569ca27f386c50e3091d8e52587d30a2","src/condvar.rs":"ce127f75bad5c175abb8147aac4b5be78aabdb599c5f8f3aad77f6bc3705274d","src/deadlock.rs":"8916c2e2820bfd3a55860ddb9f1b907888406b68cdae2b7a2093c825d28f3b99","src/elision.rs":"89072fe0aca87d53abc0f56490ae77bcf9d77e28e291bd13e861b1924bbb079f","src/lib.rs":"3e259bf3421f10c3e920daca511a4880b2620145a1fcb070a37548835c4f429a","src/mutex.rs":"0ac3e654e4aa2c3078a6aa22c83428d604e7f3f8ed4c261c40d030d232ca7b64","src/once.rs":"606e0e88d6c1ff82b69bda56e7409ec3a1aefa66b45b7fa42b88cba07ae70598","src/raw_mutex.rs":"881e75a843d76399d01c4ae0f09cd23b93b137b5035a47bd7886505132e58165","src/raw_rwlock.rs":"2e3c13e80cd06be53118ae2bcc7bdec708dda8c139c371ee12885f48903cf69c","src/remutex.rs":"bad8022610344086010b0661998a416db4b458c222e671b67df03fc4795c0298","src/rwlock.rs":"fc826cbcf2d7862ecb184b657a82bb8794a9e26ac329c8f87b589fa09f15d245","src/util.rs":"2d07c0c010a857790ae2ed6a1215eeed8af76859e076797ea1ba8dec82169e84"},"package":"69376b761943787ebd5cc85a5bc95958651a22609c5c1c2b65de21786baec72b"}

11
third_party/rust/parking_lot/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,14 +1,3 @@
0.7.1 (2019-01-01)
==================
- Fixed potential deadlock when upgrading a RwLock.
- Fixed overflow panic on very long timeouts (#111).
0.7.0 (2018-11-20)
==================
- Return if or how many threads were notified from `Condvar::notify_*`
0.6.3 (2018-07-18)
==================

22
third_party/rust/parking_lot/Cargo.toml поставляемый
Просмотреть файл

@ -3,7 +3,7 @@
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
# to registry (e.g. crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
@ -11,9 +11,8 @@
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "parking_lot"
version = "0.8.0"
version = "0.6.3"
authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
description = "More compact and efficient implementations of the standard synchronization primitives."
readme = "README.md"
@ -22,24 +21,15 @@ categories = ["concurrency"]
license = "Apache-2.0/MIT"
repository = "https://github.com/Amanieu/parking_lot"
[dependencies.lock_api]
version = "0.2"
version = "0.1"
[dependencies.parking_lot_core]
version = "0.5"
[dev-dependencies.bincode]
version = "1.1.3"
[dev-dependencies.lazy_static]
version = "1.0"
[dev-dependencies.rand]
version = "0.6"
[build-dependencies.rustc_version]
version = "0.2"
[dev-dependencies.rand]
version = "0.5"
[features]
deadlock_detection = ["parking_lot_core/deadlock_detection"]
default = []
default = ["owning_ref"]
nightly = ["parking_lot_core/nightly", "lock_api/nightly"]
owning_ref = ["lock_api/owning_ref"]
serde = ["lock_api/serde"]

12
third_party/rust/parking_lot/README.md поставляемый
Просмотреть файл

@ -68,9 +68,6 @@ in the Rust standard library:
can be enabled via the `deadlock_detection` feature.
17. `RwLock` supports atomically upgrading an "upgradable" read lock into a
write lock.
18. Optional support for [serde](https://docs.serde.rs/serde/). Enable via the
feature `serde`. **NOTE!** this support is for `Mutex`, `ReentrantMutex`,
and `RwLock` only; `Condvar` and `Once` are not currently supported.
## The parking lot
@ -102,7 +99,7 @@ Add this to your `Cargo.toml`:
```toml
[dependencies]
parking_lot = "0.8"
parking_lot = "0.6"
```
and this to your crate root:
@ -115,7 +112,7 @@ To enable nightly-only features, add this to your `Cargo.toml` instead:
```toml
[dependencies]
parking_lot = {version = "0.8", features = ["nightly"]}
parking_lot = {version = "0.6", features = ["nightly"]}
```
The experimental deadlock detector can be enabled with the
@ -125,11 +122,6 @@ The core parking lot API is provided by the `parking_lot_core` crate. It is
separate from the synchronization primitives in the `parking_lot` crate so that
changes to the core API do not cause breaking changes for users of `parking_lot`.
## Minimum Rust version
The current minimum required Rust version is 1.31. Any change to this is
considered a breaking change and will require a major version bump.
## License
Licensed under either of

10
third_party/rust/parking_lot/appveyor.yml поставляемый
Просмотреть файл

@ -6,10 +6,10 @@ environment:
- TARGET: nightly-i686-pc-windows-msvc
- TARGET: nightly-x86_64-pc-windows-gnu
- TARGET: nightly-i686-pc-windows-gnu
- TARGET: 1.31.0-x86_64-pc-windows-msvc
- TARGET: 1.31.0-i686-pc-windows-msvc
- TARGET: 1.31.0-x86_64-pc-windows-gnu
- TARGET: 1.31.0-i686-pc-windows-gnu
- TARGET: 1.24.0-x86_64-pc-windows-msvc
- TARGET: 1.24.0-i686-pc-windows-msvc
- TARGET: 1.24.0-x86_64-pc-windows-gnu
- TARGET: 1.24.0-i686-pc-windows-gnu
install:
- SET PATH=C:\Python27;C:\Python27\Scripts;%PATH%;%APPDATA%\Python\Scripts
@ -25,5 +25,5 @@ build_script:
test_script:
- travis-cargo test
- travis-cargo --only nightly test -- --features=deadlock_detection
- travis-cargo test -- --features=deadlock_detection
- travis-cargo doc

8
third_party/rust/parking_lot/build.rs поставляемый
Просмотреть файл

@ -1,8 +0,0 @@
use rustc_version::{version, Version};
fn main() {
if version().unwrap() >= Version::parse("1.34.0").unwrap() {
println!("cargo:rustc-cfg=has_sized_atomics");
println!("cargo:rustc-cfg=has_checked_instant");
}
}

245
third_party/rust/parking_lot/src/condvar.rs поставляемый
Просмотреть файл

@ -5,16 +5,14 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::mutex::MutexGuard;
use crate::raw_mutex::{RawMutex, TOKEN_HANDOFF, TOKEN_NORMAL};
use crate::{deadlock, util};
use core::{
fmt, ptr,
sync::atomic::{AtomicPtr, Ordering},
};
use deadlock;
use lock_api::RawMutex as RawMutexTrait;
use mutex::MutexGuard;
use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN};
use raw_mutex::{RawMutex, TOKEN_HANDOFF, TOKEN_NORMAL};
use std::sync::atomic::{AtomicPtr, Ordering};
use std::time::{Duration, Instant};
use std::{fmt, ptr};
/// A type indicating whether a timed wait on a condition variable returned
/// due to a time out or not.
@ -89,6 +87,7 @@ pub struct Condvar {
impl Condvar {
/// Creates a new condition variable which is ready to be waited on and
/// notified.
#[cfg(feature = "nightly")]
#[inline]
pub const fn new() -> Condvar {
Condvar {
@ -96,105 +95,71 @@ impl Condvar {
}
}
/// Creates a new condition variable which is ready to be waited on and
/// notified.
#[cfg(not(feature = "nightly"))]
#[inline]
pub fn new() -> Condvar {
Condvar {
state: AtomicPtr::new(ptr::null_mut()),
}
}
/// Wakes up one blocked thread on this condvar.
///
/// Returns whether a thread was woken up.
///
/// If there is a blocked thread on this condition variable, then it will
/// be woken up from its call to `wait` or `wait_timeout`. Calls to
/// `notify_one` are not buffered in any way.
///
/// To wake up all threads, see `notify_all()`.
///
/// # Examples
///
/// ```
/// use parking_lot::Condvar;
///
/// let condvar = Condvar::new();
///
/// // do something with condvar, share it with other threads
///
/// if !condvar.notify_one() {
/// println!("Nobody was listening for this.");
/// }
/// ```
#[inline]
pub fn notify_one(&self) -> bool {
pub fn notify_one(&self) {
// Nothing to do if there are no waiting threads
let state = self.state.load(Ordering::Relaxed);
if state.is_null() {
return false;
if self.state.load(Ordering::Relaxed).is_null() {
return;
}
self.notify_one_slow(state)
self.notify_one_slow();
}
#[cold]
#[inline(never)]
fn notify_one_slow(&self, mutex: *mut RawMutex) -> bool {
fn notify_one_slow(&self) {
unsafe {
// Unpark one thread and requeue the rest onto the mutex
let from = self as *const _ as usize;
let to = mutex as usize;
let validate = || {
// Make sure that our atomic state still points to the same
// mutex. If not then it means that all threads on the current
// mutex were woken up and a new waiting thread switched to a
// different mutex. In that case we can get away with doing
// nothing.
if self.state.load(Ordering::Relaxed) != mutex {
return RequeueOp::Abort;
}
// Unpark one thread if the mutex is unlocked, otherwise just
// requeue everything to the mutex. This is safe to do here
// since unlocking the mutex when the parked bit is set requires
// locking the queue. There is the possibility of a race if the
// mutex gets locked after we check, but that doesn't matter in
// this case.
if (*mutex).mark_parked_if_locked() {
RequeueOp::RequeueOne
} else {
RequeueOp::UnparkOne
}
};
let callback = |_op, result: UnparkResult| {
// Unpark one thread
let addr = self as *const _ as usize;
let callback = |result: UnparkResult| {
// Clear our state if there are no more waiting threads
if !result.have_more_threads {
self.state.store(ptr::null_mut(), Ordering::Relaxed);
}
TOKEN_NORMAL
};
let res = parking_lot_core::unpark_requeue(from, to, validate, callback);
res.unparked_threads + res.requeued_threads != 0
parking_lot_core::unpark_one(addr, callback);
}
}
/// Wakes up all blocked threads on this condvar.
///
/// Returns the number of threads woken up.
///
/// This method will ensure that any current waiters on the condition
/// variable are awoken. Calls to `notify_all()` are not buffered in any
/// way.
///
/// To wake up only one thread, see `notify_one()`.
#[inline]
pub fn notify_all(&self) -> usize {
pub fn notify_all(&self) {
// Nothing to do if there are no waiting threads
let state = self.state.load(Ordering::Relaxed);
if state.is_null() {
return 0;
return;
}
self.notify_all_slow(state)
self.notify_all_slow(state);
}
#[cold]
#[inline(never)]
fn notify_all_slow(&self, mutex: *mut RawMutex) -> usize {
fn notify_all_slow(&self, mutex: *mut RawMutex) {
unsafe {
// Unpark one thread and requeue the rest onto the mutex
let from = self as *const _ as usize;
@ -228,14 +193,12 @@ impl Condvar {
let callback = |op, result: UnparkResult| {
// If we requeued threads to the mutex, mark it as having
// parked threads. The RequeueAll case is already handled above.
if op == RequeueOp::UnparkOneRequeueRest && result.requeued_threads != 0 {
if op == RequeueOp::UnparkOneRequeueRest && result.have_more_threads {
(*mutex).mark_parked();
}
TOKEN_NORMAL
};
let res = parking_lot_core::unpark_requeue(from, to, validate, callback);
res.unparked_threads + res.requeued_threads
parking_lot_core::unpark_requeue(from, to, validate, callback);
}
}
@ -253,7 +216,7 @@ impl Condvar {
/// This function will panic if another thread is waiting on the `Condvar`
/// with a different `Mutex` object.
#[inline]
pub fn wait<T: ?Sized>(&self, mutex_guard: &mut MutexGuard<'_, T>) {
pub fn wait<T: ?Sized>(&self, mutex_guard: &mut MutexGuard<T>) {
self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, None);
}
@ -283,7 +246,7 @@ impl Condvar {
#[inline]
pub fn wait_until<T: ?Sized>(
&self,
mutex_guard: &mut MutexGuard<'_, T>,
mutex_guard: &mut MutexGuard<T>,
timeout: Instant,
) -> WaitTimeoutResult {
self.wait_until_internal(
@ -294,7 +257,11 @@ impl Condvar {
// This is a non-generic function to reduce the monomorphization cost of
// using `wait_until`.
fn wait_until_internal(&self, mutex: &RawMutex, timeout: Option<Instant>) -> WaitTimeoutResult {
fn wait_until_internal(
&self,
mutex: &RawMutex,
timeout: Option<Instant>,
) -> WaitTimeoutResult {
unsafe {
let result;
let mut bad_mutex = false;
@ -378,20 +345,13 @@ impl Condvar {
///
/// Like `wait`, the lock specified will be re-acquired when this function
/// returns, regardless of whether the timeout elapsed or not.
///
/// # Panics
///
/// Panics if the given `timeout` is so large that it can't be added to the current time.
/// This panic is not possible if the crate is built with the `nightly` feature, then a too
/// large `timeout` becomes equivalent to just calling `wait`.
#[inline]
pub fn wait_for<T: ?Sized>(
&self,
mutex_guard: &mut MutexGuard<'_, T>,
guard: &mut MutexGuard<T>,
timeout: Duration,
) -> WaitTimeoutResult {
let deadline = util::to_deadline(timeout);
self.wait_until_internal(unsafe { MutexGuard::mutex(mutex_guard).raw() }, deadline)
self.wait_until(guard, Instant::now() + timeout)
}
}
@ -403,18 +363,18 @@ impl Default for Condvar {
}
impl fmt::Debug for Condvar {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.pad("Condvar { .. }")
}
}
#[cfg(test)]
mod tests {
use crate::{Condvar, Mutex, MutexGuard};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use std::time::{Duration, Instant};
use {Condvar, Mutex};
#[test]
fn smoke() {
@ -474,70 +434,6 @@ mod tests {
}
}
#[test]
fn notify_one_return_true() {
let m = Arc::new(Mutex::new(()));
let m2 = m.clone();
let c = Arc::new(Condvar::new());
let c2 = c.clone();
let mut g = m.lock();
let _t = thread::spawn(move || {
let _g = m2.lock();
assert!(c2.notify_one());
});
c.wait(&mut g);
}
#[test]
fn notify_one_return_false() {
let m = Arc::new(Mutex::new(()));
let c = Arc::new(Condvar::new());
let _t = thread::spawn(move || {
let _g = m.lock();
assert!(!c.notify_one());
});
}
#[test]
fn notify_all_return() {
const N: usize = 10;
let data = Arc::new((Mutex::new(0), Condvar::new()));
let (tx, rx) = channel();
for _ in 0..N {
let data = data.clone();
let tx = tx.clone();
thread::spawn(move || {
let &(ref lock, ref cond) = &*data;
let mut cnt = lock.lock();
*cnt += 1;
if *cnt == N {
tx.send(()).unwrap();
}
while *cnt != 0 {
cond.wait(&mut cnt);
}
tx.send(()).unwrap();
});
}
drop(tx);
let &(ref lock, ref cond) = &*data;
rx.recv().unwrap();
let mut cnt = lock.lock();
*cnt = 0;
assert_eq!(cond.notify_all(), N);
drop(cnt);
for _ in 0..N {
rx.recv().unwrap();
}
assert_eq!(cond.notify_all(), 0);
}
#[test]
fn wait_for() {
let m = Arc::new(Mutex::new(()));
@ -548,21 +444,12 @@ mod tests {
let mut g = m.lock();
let no_timeout = c.wait_for(&mut g, Duration::from_millis(1));
assert!(no_timeout.timed_out());
let _t = thread::spawn(move || {
let _g = m2.lock();
c2.notify_one();
});
// Non-nightly panics on too large timeouts. Nightly treats it as indefinite wait.
let very_long_timeout = if cfg!(feature = "nightly") {
Duration::from_secs(u64::max_value())
} else {
Duration::from_millis(u32::max_value() as u64)
};
let timeout_res = c.wait_for(&mut g, very_long_timeout);
let timeout_res = c.wait_for(&mut g, Duration::from_millis(u32::max_value() as u64));
assert!(!timeout_res.timed_out());
drop(g);
}
@ -643,50 +530,4 @@ mod tests {
let c = Condvar::new();
assert_eq!(format!("{:?}", c), "Condvar { .. }");
}
#[test]
fn test_condvar_requeue() {
let m = Arc::new(Mutex::new(()));
let m2 = m.clone();
let c = Arc::new(Condvar::new());
let c2 = c.clone();
let t = thread::spawn(move || {
let mut g = m2.lock();
c2.wait(&mut g);
});
let mut g = m.lock();
while !c.notify_one() {
// Wait for the thread to get into wait()
MutexGuard::bump(&mut g);
}
// The thread should have been requeued to the mutex, which we wake up now.
drop(g);
t.join().unwrap();
}
#[test]
fn test_issue_129() {
let locks = Arc::new((Mutex::new(()), Condvar::new()));
let (tx, rx) = channel();
for _ in 0..4 {
let locks = locks.clone();
let tx = tx.clone();
thread::spawn(move || {
let mut guard = locks.0.lock();
locks.1.wait(&mut guard);
locks.1.wait_for(&mut guard, Duration::from_millis(1));
locks.1.notify_one();
tx.send(()).unwrap();
});
}
thread::sleep(Duration::from_millis(100));
locks.1.notify_one();
for _ in 0..4 {
assert_eq!(rx.recv_timeout(Duration::from_millis(500)), Ok(()));
}
}
}

18
third_party/rust/parking_lot/src/deadlock.rs поставляемый
Просмотреть файл

@ -40,15 +40,10 @@ pub(crate) use parking_lot_core::deadlock::{acquire_resource, release_resource};
#[cfg(test)]
#[cfg(feature = "deadlock_detection")]
mod tests {
use crate::{Mutex, ReentrantMutex, RwLock};
use std::sync::{Arc, Barrier};
use std::thread::{self, sleep};
use std::time::Duration;
// We need to serialize these tests since deadlock detection uses global state
lazy_static::lazy_static! {
static ref DEADLOCK_DETECTION_LOCK: Mutex<()> = Mutex::new(());
}
use {Mutex, ReentrantMutex, RwLock};
fn check_deadlock() -> bool {
use parking_lot_core::deadlock::check_deadlock;
@ -57,8 +52,6 @@ mod tests {
#[test]
fn test_mutex_deadlock() {
let _guard = DEADLOCK_DETECTION_LOCK.lock();
let m1: Arc<Mutex<()>> = Default::default();
let m2: Arc<Mutex<()>> = Default::default();
let m3: Arc<Mutex<()>> = Default::default();
@ -102,8 +95,6 @@ mod tests {
#[test]
fn test_mutex_deadlock_reentrant() {
let _guard = DEADLOCK_DETECTION_LOCK.lock();
let m1: Arc<Mutex<()>> = Default::default();
assert!(!check_deadlock());
@ -121,8 +112,6 @@ mod tests {
#[test]
fn test_remutex_deadlock() {
let _guard = DEADLOCK_DETECTION_LOCK.lock();
let m1: Arc<ReentrantMutex<()>> = Default::default();
let m2: Arc<ReentrantMutex<()>> = Default::default();
let m3: Arc<ReentrantMutex<()>> = Default::default();
@ -169,8 +158,6 @@ mod tests {
#[test]
fn test_rwlock_deadlock() {
let _guard = DEADLOCK_DETECTION_LOCK.lock();
let m1: Arc<RwLock<()>> = Default::default();
let m2: Arc<RwLock<()>> = Default::default();
let m3: Arc<RwLock<()>> = Default::default();
@ -212,11 +199,8 @@ mod tests {
assert!(!check_deadlock());
}
#[cfg(rwlock_deadlock_detection_not_supported)]
#[test]
fn test_rwlock_deadlock_reentrant() {
let _guard = DEADLOCK_DETECTION_LOCK.lock();
let m1: Arc<RwLock<()>> = Default::default();
assert!(!check_deadlock());

113
third_party/rust/parking_lot/src/elision.rs поставляемый
Просмотреть файл

@ -12,14 +12,17 @@ pub trait AtomicElisionExt {
type IntType;
// Perform a compare_exchange and start a transaction
fn elision_compare_exchange_acquire(
fn elision_acquire(
&self,
current: Self::IntType,
new: Self::IntType,
) -> Result<Self::IntType, Self::IntType>;
// Perform a compare_exchange and end a transaction
fn elision_release(
&self,
current: Self::IntType,
new: Self::IntType,
) -> Result<Self::IntType, Self::IntType>;
// Perform a fetch_sub and end a transaction
fn elision_fetch_sub_release(&self, val: Self::IntType) -> Self::IntType;
}
// Indicates whether the target architecture supports lock elision
@ -38,23 +41,22 @@ impl AtomicElisionExt for AtomicUsize {
type IntType = usize;
#[inline]
fn elision_compare_exchange_acquire(&self, _: usize, _: usize) -> Result<usize, usize> {
fn elision_acquire(&self, _: usize, _: usize) -> Result<usize, usize> {
unreachable!();
}
#[inline]
fn elision_fetch_sub_release(&self, _: usize) -> usize {
fn elision_release(&self, _: usize, _: usize) -> Result<usize, usize> {
unreachable!();
}
}
#[cfg(all(feature = "nightly", any(target_arch = "x86", target_arch = "x86_64")))]
#[cfg(all(feature = "nightly", target_arch = "x86"))]
impl AtomicElisionExt for AtomicUsize {
type IntType = usize;
#[cfg(target_pointer_width = "32")]
#[inline]
fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
fn elision_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
unsafe {
let prev: usize;
asm!("xacquire; lock; cmpxchgl $2, $1"
@ -69,9 +71,70 @@ impl AtomicElisionExt for AtomicUsize {
}
}
}
#[cfg(target_pointer_width = "64")]
#[inline]
fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
fn elision_release(&self, current: usize, new: usize) -> Result<usize, usize> {
unsafe {
let prev: usize;
asm!("xrelease; lock; cmpxchgl $2, $1"
: "={eax}" (prev), "+*m" (self)
: "r" (new), "{eax}" (current)
: "memory"
: "volatile");
if prev == current {
Ok(prev)
} else {
Err(prev)
}
}
}
}
#[cfg(all(feature = "nightly", target_arch = "x86_64", target_pointer_width = "32"))]
impl AtomicElisionExt for AtomicUsize {
type IntType = usize;
#[inline]
fn elision_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
unsafe {
let prev: usize;
asm!("xacquire; lock; cmpxchgl $2, $1"
: "={rax}" (prev), "+*m" (self)
: "r" (new), "{rax}" (current)
: "memory"
: "volatile");
if prev == current {
Ok(prev)
} else {
Err(prev)
}
}
}
#[inline]
fn elision_release(&self, current: usize, new: usize) -> Result<usize, usize> {
unsafe {
let prev: usize;
asm!("xrelease; lock; cmpxchgl $2, $1"
: "={rax}" (prev), "+*m" (self)
: "r" (new), "{rax}" (current)
: "memory"
: "volatile");
if prev == current {
Ok(prev)
} else {
Err(prev)
}
}
}
}
#[cfg(all(feature = "nightly", target_arch = "x86_64", target_pointer_width = "64"))]
impl AtomicElisionExt for AtomicUsize {
type IntType = usize;
#[inline]
fn elision_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
unsafe {
let prev: usize;
asm!("xacquire; lock; cmpxchgq $2, $1"
@ -87,30 +150,20 @@ impl AtomicElisionExt for AtomicUsize {
}
}
#[cfg(target_pointer_width = "32")]
#[inline]
fn elision_fetch_sub_release(&self, val: usize) -> usize {
fn elision_release(&self, current: usize, new: usize) -> Result<usize, usize> {
unsafe {
let prev: usize;
asm!("xrelease; lock; xaddl $2, $1"
: "=r" (prev), "+*m" (self)
: "0" (val.wrapping_neg())
asm!("xrelease; lock; cmpxchgq $2, $1"
: "={rax}" (prev), "+*m" (self)
: "r" (new), "{rax}" (current)
: "memory"
: "volatile");
prev
}
}
#[cfg(target_pointer_width = "64")]
#[inline]
fn elision_fetch_sub_release(&self, val: usize) -> usize {
unsafe {
let prev: usize;
asm!("xrelease; lock; xaddq $2, $1"
: "=r" (prev), "+*m" (self)
: "0" (val.wrapping_neg())
: "memory"
: "volatile");
prev
if prev == current {
Ok(prev)
} else {
Err(prev)
}
}
}
}

22
third_party/rust/parking_lot/src/lib.rs поставляемый
Просмотреть файл

@ -10,9 +10,13 @@
//! standard library. It also provides a `ReentrantMutex` type.
#![warn(missing_docs)]
#![warn(rust_2018_idioms)]
#![cfg_attr(feature = "nightly", feature(const_fn))]
#![cfg_attr(feature = "nightly", feature(integer_atomics))]
#![cfg_attr(feature = "nightly", feature(asm))]
extern crate lock_api;
extern crate parking_lot_core;
mod condvar;
mod elision;
mod mutex;
@ -28,15 +32,13 @@ pub mod deadlock;
#[cfg(not(feature = "deadlock_detection"))]
mod deadlock;
pub use self::condvar::{Condvar, WaitTimeoutResult};
pub use self::mutex::{MappedMutexGuard, Mutex, MutexGuard};
pub use self::once::{Once, OnceState};
pub use self::raw_mutex::RawMutex;
pub use self::raw_rwlock::RawRwLock;
pub use self::remutex::{
MappedReentrantMutexGuard, RawThreadId, ReentrantMutex, ReentrantMutexGuard,
};
pub use self::rwlock::{
pub use condvar::{Condvar, WaitTimeoutResult};
pub use mutex::{MappedMutexGuard, Mutex, MutexGuard};
pub use once::{Once, OnceState, ONCE_INIT};
pub use raw_mutex::RawMutex;
pub use raw_rwlock::RawRwLock;
pub use remutex::{MappedReentrantMutexGuard, RawThreadId, ReentrantMutex, ReentrantMutexGuard};
pub use rwlock::{
MappedRwLockReadGuard, MappedRwLockWriteGuard, RwLock, RwLockReadGuard,
RwLockUpgradableReadGuard, RwLockWriteGuard,
};

36
third_party/rust/parking_lot/src/mutex.rs поставляемый
Просмотреть файл

@ -5,8 +5,8 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::raw_mutex::RawMutex;
use lock_api;
use raw_mutex::RawMutex;
/// A mutual exclusion primitive useful for protecting shared data
///
@ -69,7 +69,7 @@ use lock_api;
///
/// let (tx, rx) = channel();
/// for _ in 0..10 {
/// let (data, tx) = (Arc::clone(&data), tx.clone());
/// let (data, tx) = (data.clone(), tx.clone());
/// thread::spawn(move || {
/// // The shared state can only be accessed once the lock is held.
/// // Our non-atomic increment is safe because we're the only thread
@ -105,14 +105,11 @@ pub type MappedMutexGuard<'a, T> = lock_api::MappedMutexGuard<'a, RawMutex, T>;
#[cfg(test)]
mod tests {
use crate::{Condvar, Mutex};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
#[cfg(feature = "serde")]
use bincode::{deserialize, serialize};
use {Condvar, Mutex};
struct Packet<T>(Arc<(Mutex<T>, Condvar)>);
@ -256,8 +253,7 @@ mod tests {
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
}).join();
let lock = arc.lock();
assert_eq!(*lock, 2);
}
@ -287,20 +283,16 @@ mod tests {
let mutex = Mutex::new(vec![0u8, 10]);
assert_eq!(format!("{:?}", mutex), "Mutex { data: [0, 10] }");
assert_eq!(
format!("{:#?}", mutex),
"Mutex {
data: [
0,
10
]
}"
);
let _lock = mutex.lock();
assert_eq!(format!("{:?}", mutex), "Mutex { data: <locked> }");
}
#[cfg(feature = "serde")]
#[test]
fn test_serde() {
let contents: Vec<u8> = vec![0, 1, 2];
let mutex = Mutex::new(contents.clone());
let serialized = serialize(&mutex).unwrap();
let deserialized: Mutex<Vec<u8>> = deserialize(&serialized).unwrap();
assert_eq!(*(mutex.lock()), *(deserialized.lock()));
assert_eq!(contents, *(deserialized.lock()));
assert_eq!(format!("{:?}", mutex), "Mutex { <locked> }");
}
}

78
third_party/rust/parking_lot/src/once.rs поставляемый
Просмотреть файл

@ -5,21 +5,21 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::util::UncheckedOptionExt;
#[cfg(has_sized_atomics)]
use core::sync::atomic::AtomicU8;
#[cfg(not(has_sized_atomics))]
use core::sync::atomic::AtomicUsize as AtomicU8;
use core::{
fmt, mem,
sync::atomic::{fence, Ordering},
};
use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
#[cfg(has_sized_atomics)]
use std::sync::atomic::{fence, Ordering};
#[cfg(feature = "nightly")]
use std::sync::atomic::{ATOMIC_U8_INIT, AtomicU8};
#[cfg(feature = "nightly")]
type U8 = u8;
#[cfg(not(has_sized_atomics))]
#[cfg(not(feature = "nightly"))]
use std::sync::atomic::AtomicUsize as AtomicU8;
#[cfg(not(feature = "nightly"))]
use std::sync::atomic::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT;
#[cfg(not(feature = "nightly"))]
type U8 = usize;
use parking_lot_core::{self, SpinWait, DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
use std::fmt;
use std::mem;
use util::UncheckedOptionExt;
const DONE_BIT: U8 = 1;
const POISON_BIT: U8 = 2;
@ -38,14 +38,14 @@ pub enum OnceState {
/// A thread is currently executing a closure.
InProgress,
/// A closure has completed successfully.
/// A closure has completed sucessfully.
Done,
}
impl OnceState {
/// Returns whether the associated `Once` has been poisoned.
///
/// Once an initialization routine for a `Once` has panicked it will forever
/// Once an initalization routine for a `Once` has panicked it will forever
/// indicate to future forced initialization routines that it is poisoned.
#[inline]
pub fn poisoned(&self) -> bool {
@ -55,7 +55,7 @@ impl OnceState {
}
}
/// Returns whether the associated `Once` has successfully executed a
/// Returns whether the associated `Once` has successfullly executed a
/// closure.
#[inline]
pub fn done(&self) -> bool {
@ -81,9 +81,9 @@ impl OnceState {
/// # Examples
///
/// ```
/// use parking_lot::Once;
/// use parking_lot::{Once, ONCE_INIT};
///
/// static START: Once = Once::new();
/// static START: Once = ONCE_INIT;
///
/// START.call_once(|| {
/// // run initialization here
@ -91,11 +91,22 @@ impl OnceState {
/// ```
pub struct Once(AtomicU8);
/// Initialization value for static `Once` values.
pub const ONCE_INIT: Once = Once(ATOMIC_U8_INIT);
impl Once {
/// Creates a new `Once` value.
#[cfg(feature = "nightly")]
#[inline]
pub const fn new() -> Once {
Once(AtomicU8::new(0))
Once(ATOMIC_U8_INIT)
}
/// Creates a new `Once` value.
#[cfg(not(feature = "nightly"))]
#[inline]
pub fn new() -> Once {
Once(ATOMIC_U8_INIT)
}
/// Returns the current state of this `Once`.
@ -130,10 +141,10 @@ impl Once {
/// # Examples
///
/// ```
/// use parking_lot::Once;
/// use parking_lot::{Once, ONCE_INIT};
///
/// static mut VAL: usize = 0;
/// static INIT: Once = Once::new();
/// static INIT: Once = ONCE_INIT;
///
/// // Accessing a `static mut` is unsafe much of the time, but if we do so
/// // in a synchronized fashion (e.g. write once or read all) then we're
@ -212,7 +223,7 @@ impl Once {
// without some allocation overhead.
#[cold]
#[inline(never)]
fn call_once_slow(&self, ignore_poison: bool, f: &mut dyn FnMut(OnceState)) {
fn call_once_slow(&self, ignore_poison: bool, f: &mut FnMut(OnceState)) {
let mut spinwait = SpinWait::new();
let mut state = self.0.load(Ordering::Relaxed);
loop {
@ -333,7 +344,7 @@ impl Default for Once {
}
impl fmt::Debug for Once {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Once")
.field("state", &self.state())
.finish()
@ -342,14 +353,15 @@ impl fmt::Debug for Once {
#[cfg(test)]
mod tests {
use crate::Once;
#[cfg(feature = "nightly")]
use std::panic;
use std::sync::mpsc::channel;
use std::thread;
use {Once, ONCE_INIT};
#[test]
fn smoke_once() {
static O: Once = Once::new();
static O: Once = ONCE_INIT;
let mut a = 0;
O.call_once(|| a += 1);
assert_eq!(a, 1);
@ -359,7 +371,7 @@ mod tests {
#[test]
fn stampede_once() {
static O: Once = Once::new();
static O: Once = ONCE_INIT;
static mut RUN: bool = false;
let (tx, rx) = channel();
@ -393,9 +405,10 @@ mod tests {
}
}
#[cfg(feature = "nightly")]
#[test]
fn poison_bad() {
static O: Once = Once::new();
static O: Once = ONCE_INIT;
// poison the once
let t = panic::catch_unwind(|| {
@ -421,9 +434,10 @@ mod tests {
O.call_once(|| {});
}
#[cfg(feature = "nightly")]
#[test]
fn wait_for_force_to_finish() {
static O: Once = Once::new();
static O: Once = ONCE_INIT;
// poison the once
let t = panic::catch_unwind(|| {
@ -461,8 +475,14 @@ mod tests {
#[test]
fn test_once_debug() {
static O: Once = Once::new();
static O: Once = ONCE_INIT;
assert_eq!(format!("{:?}", O), "Once { state: New }");
assert_eq!(
format!("{:#?}", O),
"Once {
state: New
}"
);
}
}

42
third_party/rust/parking_lot/src/raw_mutex.rs поставляемый
Просмотреть файл

@ -5,20 +5,21 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::{deadlock, util};
#[cfg(has_sized_atomics)]
use core::sync::atomic::AtomicU8;
#[cfg(not(has_sized_atomics))]
use core::sync::atomic::AtomicUsize as AtomicU8;
use core::{sync::atomic::Ordering, time::Duration};
use std::sync::atomic::Ordering;
#[cfg(feature = "nightly")]
use std::sync::atomic::{ATOMIC_U8_INIT, AtomicU8};
#[cfg(feature = "nightly")]
type U8 = u8;
#[cfg(not(feature = "nightly"))]
use std::sync::atomic::AtomicUsize as AtomicU8;
#[cfg(not(feature = "nightly"))]
use std::sync::atomic::ATOMIC_USIZE_INIT as ATOMIC_U8_INIT;
#[cfg(not(feature = "nightly"))]
type U8 = usize;
use deadlock;
use lock_api::{GuardNoSend, RawMutex as RawMutexTrait, RawMutexFair, RawMutexTimed};
use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN};
use std::time::Instant;
#[cfg(has_sized_atomics)]
type U8 = u8;
#[cfg(not(has_sized_atomics))]
type U8 = usize;
use std::time::{Duration, Instant};
// UnparkToken used to indicate that that the target thread should attempt to
// lock the mutex again as soon as it is unparked.
@ -38,7 +39,7 @@ pub struct RawMutex {
unsafe impl RawMutexTrait for RawMutex {
const INIT: RawMutex = RawMutex {
state: AtomicU8::new(0),
state: ATOMIC_U8_INIT,
};
type GuardMarker = GuardNoSend;
@ -82,7 +83,7 @@ unsafe impl RawMutexTrait for RawMutex {
unsafe { deadlock::release_resource(self as *const _ as usize) };
if self
.state
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
@ -97,7 +98,7 @@ unsafe impl RawMutexFair for RawMutex {
unsafe { deadlock::release_resource(self as *const _ as usize) };
if self
.state
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.compare_exchange_weak(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
@ -143,7 +144,7 @@ unsafe impl RawMutexTimed for RawMutex {
{
true
} else {
self.lock_slow(util::to_deadline(timeout))
self.lock_slow(Some(Instant::now() + timeout))
};
if result {
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
@ -263,6 +264,15 @@ impl RawMutex {
#[cold]
#[inline(never)]
fn unlock_slow(&self, force_fair: bool) {
// Unlock directly if there are no parked threads
if self
.state
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
}
// Unpark one thread and leave the parked bit set if there might
// still be parked threads on this address.
unsafe {

1630
third_party/rust/parking_lot/src/raw_rwlock.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

37
third_party/rust/parking_lot/src/remutex.rs поставляемый
Просмотреть файл

@ -5,8 +5,8 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::raw_mutex::RawMutex;
use lock_api::{self, GetThreadId};
use raw_mutex::RawMutex;
/// Implementation of the `GetThreadId` trait for `lock_api::ReentrantMutex`.
pub struct RawThreadId;
@ -40,7 +40,8 @@ pub type ReentrantMutex<T> = lock_api::ReentrantMutex<RawMutex, RawThreadId, T>;
///
/// The data protected by the mutex can be accessed through this guard via its
/// `Deref` implementation.
pub type ReentrantMutexGuard<'a, T> = lock_api::ReentrantMutexGuard<'a, RawMutex, RawThreadId, T>;
pub type ReentrantMutexGuard<'a, T> =
lock_api::ReentrantMutexGuard<'a, RawMutex, RawThreadId, T>;
/// An RAII mutex guard returned by `ReentrantMutexGuard::map`, which can point to a
/// subfield of the protected data.
@ -54,13 +55,10 @@ pub type MappedReentrantMutexGuard<'a, T> =
#[cfg(test)]
mod tests {
use crate::ReentrantMutex;
use std::cell::RefCell;
use std::sync::Arc;
use std::thread;
#[cfg(feature = "serde")]
use bincode::{deserialize, serialize};
use ReentrantMutex;
#[test]
fn smoke() {
@ -105,9 +103,8 @@ mod tests {
thread::spawn(move || {
let lock = m2.try_lock();
assert!(lock.is_none());
})
.join()
.unwrap();
}).join()
.unwrap();
let _lock3 = m.try_lock();
}
@ -116,18 +113,14 @@ mod tests {
let mutex = ReentrantMutex::new(vec![0u8, 10]);
assert_eq!(format!("{:?}", mutex), "ReentrantMutex { data: [0, 10] }");
}
#[cfg(feature = "serde")]
#[test]
fn test_serde() {
let contents: Vec<u8> = vec![0, 1, 2];
let mutex = ReentrantMutex::new(contents.clone());
let serialized = serialize(&mutex).unwrap();
let deserialized: ReentrantMutex<Vec<u8>> = deserialize(&serialized).unwrap();
assert_eq!(*(mutex.lock()), *(deserialized.lock()));
assert_eq!(contents, *(deserialized.lock()));
assert_eq!(
format!("{:#?}", mutex),
"ReentrantMutex {
data: [
0,
10
]
}"
);
}
}

62
third_party/rust/parking_lot/src/rwlock.rs поставляемый
Просмотреть файл

@ -5,8 +5,8 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::raw_rwlock::RawRwLock;
use lock_api;
use raw_rwlock::RawRwLock;
/// A reader-writer lock
///
@ -116,20 +116,19 @@ pub type MappedRwLockWriteGuard<'a, T> = lock_api::MappedRwLockWriteGuard<'a, Ra
/// RAII structure used to release the upgradable read access of a lock when
/// dropped.
pub type RwLockUpgradableReadGuard<'a, T> = lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>;
pub type RwLockUpgradableReadGuard<'a, T> =
lock_api::RwLockUpgradableReadGuard<'a, RawRwLock, T>;
#[cfg(test)]
mod tests {
use crate::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard};
use rand::Rng;
extern crate rand;
use self::rand::Rng;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
#[cfg(feature = "serde")]
use bincode::{deserialize, serialize};
use {RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard};
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
@ -179,8 +178,7 @@ mod tests {
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.write();
panic!();
})
.join();
}).join();
let lock = arc.read();
assert_eq!(*lock, 1);
}
@ -192,8 +190,7 @@ mod tests {
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.write();
panic!();
})
.join();
}).join();
let lock = arc.write();
assert_eq!(*lock, 1);
}
@ -205,8 +202,7 @@ mod tests {
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.read();
panic!();
})
.join();
}).join();
let lock = arc.read();
assert_eq!(*lock, 1);
}
@ -218,8 +214,7 @@ mod tests {
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.read();
panic!()
})
.join();
}).join();
let lock = arc.write();
assert_eq!(*lock, 1);
}
@ -334,8 +329,7 @@ mod tests {
}
let _u = Unwinder { i: arc2 };
panic!();
})
.join();
}).join();
let lock = arc.read();
assert_eq!(*lock, 2);
}
@ -536,15 +530,7 @@ mod tests {
thread::spawn(move || {
let _lock = arc2.write();
});
if cfg!(not(all(target_env = "sgx", target_vendor = "fortanix"))) {
thread::sleep(Duration::from_millis(100));
} else {
// FIXME: https://github.com/fortanix/rust-sgx/issues/31
for _ in 0..100 {
thread::yield_now();
}
}
thread::sleep(Duration::from_millis(100));
// A normal read would block here since there is a pending writer
let _lock2 = arc.read_recursive();
@ -555,8 +541,17 @@ mod tests {
let x = RwLock::new(vec![0u8, 10]);
assert_eq!(format!("{:?}", x), "RwLock { data: [0, 10] }");
assert_eq!(
format!("{:#?}", x),
"RwLock {
data: [
0,
10
]
}"
);
let _lock = x.write();
assert_eq!(format!("{:?}", x), "RwLock { data: <locked> }");
assert_eq!(format!("{:?}", x), "RwLock { <locked> }");
}
#[test]
@ -566,17 +561,4 @@ mod tests {
let b = a.clone();
assert_eq!(Arc::strong_count(&b), 2);
}
#[cfg(feature = "serde")]
#[test]
fn test_serde() {
let contents: Vec<u8> = vec![0, 1, 2];
let mutex = RwLock::new(contents.clone());
let serialized = serialize(&mutex).unwrap();
let deserialized: RwLock<Vec<u8>> = deserialize(&serialized).unwrap();
assert_eq!(*(mutex.read()), *(deserialized.read()));
assert_eq!(contents, *(deserialized.read()));
}
}

12
third_party/rust/parking_lot/src/util.rs поставляемый
Просмотреть файл

@ -5,8 +5,6 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use std::time::{Duration, Instant};
// Option::unchecked_unwrap
pub trait UncheckedOptionExt<T> {
unsafe fn unchecked_unwrap(self) -> T;
@ -32,13 +30,3 @@ unsafe fn unreachable() -> ! {
match *(1 as *const Void) {}
}
}
#[inline]
pub fn to_deadline(timeout: Duration) -> Option<Instant> {
#[cfg(has_checked_instant)]
let deadline = Instant::now().checked_add(timeout);
#[cfg(not(has_checked_instant))]
let deadline = Some(Instant::now() + timeout);
deadline
}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"99e468923e11bcd61cd9961fd5c0a8e0151fae5b6695c1aaaa802a9ee790b91b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","build.rs":"d6aa24b67fdcacf238778c5efaf1f622ec7f7a7ec27fa051f415a1e2d31f3532","src/lib.rs":"4b754002784224bf94136139eadc30136c638feacdc0d25dab44834a29805e60","src/parking_lot.rs":"ecda8f1230f796d4f8014699f13436b42a3e3b02edf2694cff5d02497d0d8f19","src/spinwait.rs":"d568d8a81f9144ec4c4a139dc934d7d04ee1656a4a221eb548742fe7aba09ab1","src/thread_parker/cloudabi.rs":"8096eefdf3a7b6fe1af223b548eabae067e4a838e49f1834b3dbb92c6c10169f","src/thread_parker/generic.rs":"fb89e50fba40956e2322a4aa8bd409cf14186c757a6a525cca3e71215b814e59","src/thread_parker/linux.rs":"d52fc55e2c17e9111d5d5a00efe58a87d0e72def22f18f1f34f5364744c79ff6","src/thread_parker/redox.rs":"4fa0ac04dcc740ebab57653dc685853d9fb950af545bbba93dbe61d985788a8e","src/thread_parker/sgx.rs":"0e30172ecf48c56bc85e26d976661c493142eeb71bd7713e21465067256ded90","src/thread_parker/unix.rs":"09418fec4845d0d6cc3039c4196cec7c96d53e65d552eb1b0c0a88fb6f72dd3e","src/thread_parker/wasm.rs":"29f5c518184a73f83d00097c7f3747406b008dc112937520414e5d41e50f2779","src/thread_parker/windows/keyed_event.rs":"e0c2ed647e0550bffa003160405b5f4ddd40500134c2eb15c3eb598792c30e84","src/thread_parker/windows/mod.rs":"7252790b6d1126d773f17760692e3664c140abebea9930058c84113bedd3b48d","src/thread_parker/windows/waitaddress.rs":"06d994633006e237dc940f377432ea00cf1609e56096d69d46f7bb3b80eeb857","src/util.rs":"2d07c0c010a857790ae2ed6a1215eeed8af76859e076797ea1ba8dec82169e84","src/word_lock.rs":"471b4fdf7877da693d26f5b80c732120af752f6eaffc65d4ca316bca3601e44a"},"package":"cb88cb1cb3790baa6776844f968fea3be44956cf184fa1be5a03341f5491278c"}
{"files":{"Cargo.toml":"220144666e4c0a4b3b3235e7d3b10f4f34cb3b8ca292ee19437f23c9a15758de","src/lib.rs":"e80f927665ef24660878e5e4a4ea3c26892c2849889d59aacee6beb59d02020d","src/parking_lot.rs":"2da388ff4c13003fc30531bb6110e4feedac30ad3ce905912e657711a6b0fdad","src/spinwait.rs":"cbd2d2464ef6fa5fb05109bdb3ca588467949dcd4ee9194deafef6004d10215e","src/thread_parker/generic.rs":"0c30db3d1c96bd5ef284a4761a829aba8d21fc813b3d1d70b2baf5f00744e006","src/thread_parker/linux.rs":"1c4c023ebb58fcc16451683c6c8b68311e87ab34537dc17a060ddf5aad02a215","src/thread_parker/unix.rs":"dc6f4af965618cc2d87d3bef6455ba78b44ffe5b38dff9d41fb86e1526cbbcd1","src/thread_parker/windows/keyed_event.rs":"efe64f7bcdfe03049a7b901d2573bc7db1bb73b8ab4a040245423d95c8f9514f","src/thread_parker/windows/mod.rs":"f31eed53f3e402477d80a70a7c6d474c01ba4c9ad952bbe562509448cd3cc1ad","src/thread_parker/windows/waitaddress.rs":"09d1e6a5a6c3f23f375ae4beee946290f7c66d183e69d476ce69b21a4a5aa7af","src/util.rs":"2d07c0c010a857790ae2ed6a1215eeed8af76859e076797ea1ba8dec82169e84","src/word_lock.rs":"692f443c52672c6e88c0cad259cf7c89dc2a1b54aa95eeeea582401b2a7d058d"},"package":"4db1a8ccf734a7bce794cc19b3df06ed87ab2f3907036b693c68f56b4d4537fa"}

18
third_party/rust/parking_lot_core/Cargo.toml поставляемый
Просмотреть файл

@ -3,7 +3,7 @@
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
# to registry (e.g. crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
@ -11,28 +11,24 @@
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "parking_lot_core"
version = "0.5.0"
version = "0.2.14"
authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
description = "An advanced API for creating custom synchronization primitives."
documentation = "https://amanieu.github.io/parking_lot/parking_lot_core/index.html"
keywords = ["mutex", "condvar", "rwlock", "once", "thread"]
categories = ["concurrency"]
license = "Apache-2.0/MIT"
repository = "https://github.com/Amanieu/parking_lot"
[dependencies.backtrace]
version = "0.3.2"
optional = true
[dependencies.cfg-if]
version = "0.1"
[dependencies.petgraph]
version = "0.4.5"
optional = true
[dependencies.rand]
version = "0.6"
version = "0.4"
[dependencies.smallvec]
version = "0.6"
@ -40,16 +36,10 @@ version = "0.6"
[dependencies.thread-id]
version = "3.2.0"
optional = true
[build-dependencies.rustc_version]
version = "0.2"
[features]
deadlock_detection = ["petgraph", "thread-id", "backtrace"]
nightly = []
[target."cfg(target_os = \"cloudabi\")".dependencies.cloudabi]
version = "0.0.3"
[target."cfg(target_os = \"redox\")".dependencies.redox_syscall]
version = "0.1"
[target."cfg(unix)".dependencies.libc]
version = "0.2.27"
[target."cfg(windows)".dependencies.winapi]

Просмотреть файл

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
third_party/rust/parking_lot_core/LICENSE-MIT поставляемый
Просмотреть файл

@ -1,25 +0,0 @@
Copyright (c) 2016 The Rust Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

7
third_party/rust/parking_lot_core/build.rs поставляемый
Просмотреть файл

@ -1,7 +0,0 @@
use rustc_version::{version, Version};
fn main() {
if version().unwrap() >= Version::parse("1.34.0").unwrap() {
println!("cargo:rustc-cfg=has_sized_atomics");
}
}

93
third_party/rust/parking_lot_core/src/lib.rs поставляемый
Просмотреть файл

@ -38,67 +38,44 @@
//! reference count and the two mutex bits in the same atomic word.
#![warn(missing_docs)]
#![warn(rust_2018_idioms)]
#![cfg_attr(
all(target_env = "sgx", target_vendor = "fortanix"),
feature(sgx_platform)
)]
#![cfg_attr(
all(
feature = "nightly",
target_arch = "wasm32",
target_feature = "atomics"
),
feature(checked_duration_since, stdsimd)
)]
#![cfg_attr(
all(feature = "nightly", target_os = "cloudabi",),
feature(thread_local, checked_duration_since)
)]
#![cfg_attr(all(feature = "nightly", target_os = "linux"), feature(integer_atomics))]
use cfg_if::cfg_if;
extern crate rand;
extern crate smallvec;
cfg_if! {
if #[cfg(all(has_sized_atomics, target_os = "linux"))] {
#[path = "thread_parker/linux.rs"]
mod thread_parker;
} else if #[cfg(unix)] {
#[path = "thread_parker/unix.rs"]
mod thread_parker;
} else if #[cfg(windows)] {
#[path = "thread_parker/windows/mod.rs"]
mod thread_parker;
} else if #[cfg(all(has_sized_atomics, target_os = "redox"))] {
#[path = "thread_parker/redox.rs"]
mod thread_parker;
} else if #[cfg(all(target_env = "sgx", target_vendor = "fortanix"))] {
#[path = "thread_parker/sgx.rs"]
mod thread_parker;
} else if #[cfg(all(
feature = "nightly",
target_arch = "wasm32",
target_feature = "atomics"
))] {
#[path = "thread_parker/wasm.rs"]
mod thread_parker;
} else if #[cfg(all(feature = "nightly", target_os = "cloudabi"))] {
#[path = "thread_parker/cloudabi.rs"]
mod thread_parker;
} else {
#[path = "thread_parker/generic.rs"]
mod thread_parker;
}
}
#[cfg(feature = "deadlock_detection")]
extern crate backtrace;
#[cfg(feature = "deadlock_detection")]
extern crate petgraph;
#[cfg(feature = "deadlock_detection")]
extern crate thread_id;
#[cfg(unix)]
extern crate libc;
#[cfg(windows)]
extern crate winapi;
#[cfg(all(feature = "nightly", target_os = "linux"))]
#[path = "thread_parker/linux.rs"]
mod thread_parker;
#[cfg(all(unix, not(all(feature = "nightly", target_os = "linux"))))]
#[path = "thread_parker/unix.rs"]
mod thread_parker;
#[cfg(windows)]
#[path = "thread_parker/windows/mod.rs"]
mod thread_parker;
#[cfg(not(any(windows, unix)))]
#[path = "thread_parker/generic.rs"]
mod thread_parker;
mod parking_lot;
mod spinwait;
mod util;
mod spinwait;
mod word_lock;
mod parking_lot;
pub use self::parking_lot::deadlock;
pub use self::parking_lot::{park, unpark_all, unpark_filter, unpark_one, unpark_requeue};
pub use self::parking_lot::{
FilterOp, ParkResult, ParkToken, RequeueOp, UnparkResult, UnparkToken,
};
pub use self::parking_lot::{DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
pub use self::spinwait::SpinWait;
pub use parking_lot::{FilterOp, ParkResult, ParkToken, RequeueOp, UnparkResult, UnparkToken};
pub use parking_lot::{DEFAULT_PARK_TOKEN, DEFAULT_UNPARK_TOKEN};
pub use parking_lot::{park, unpark_all, unpark_filter, unpark_one, unpark_requeue};
pub use spinwait::SpinWait;
pub use parking_lot::deadlock;

Просмотреть файл

@ -5,20 +5,22 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::thread_parker::ThreadParker;
use crate::util::UncheckedOptionExt;
use crate::word_lock::WordLock;
use core::{
cell::{Cell, UnsafeCell},
ptr,
sync::atomic::{AtomicPtr, AtomicUsize, Ordering},
};
use rand::{rngs::SmallRng, FromEntropy, Rng};
use smallvec::SmallVec;
use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::time::{Duration, Instant};
use std::cell::{Cell, UnsafeCell};
use std::ptr;
use std::mem;
use std::thread::LocalKey;
#[cfg(not(feature = "nightly"))]
use std::panic;
use smallvec::SmallVec;
use rand::{self, Rng, XorShiftRng};
use thread_parker::ThreadParker;
use word_lock::WordLock;
use util::UncheckedOptionExt;
static NUM_THREADS: AtomicUsize = AtomicUsize::new(0);
static HASHTABLE: AtomicPtr<HashTable> = AtomicPtr::new(ptr::null_mut());
static NUM_THREADS: AtomicUsize = ATOMIC_USIZE_INIT;
static HASHTABLE: AtomicUsize = ATOMIC_USIZE_INIT;
// Even with 3x more buckets than threads, the memory overhead per thread is
// still only a few hundred bytes per thread.
@ -36,26 +38,24 @@ struct HashTable {
}
impl HashTable {
#[inline]
fn new(num_threads: usize, prev: *const HashTable) -> Box<HashTable> {
let new_size = (num_threads * LOAD_FACTOR).next_power_of_two();
let hash_bits = 0usize.leading_zeros() - new_size.leading_zeros() - 1;
let now = Instant::now();
let mut entries = Vec::with_capacity(new_size);
for _ in 0..new_size {
entries.push(Bucket::new(now));
}
let bucket = Bucket {
mutex: WordLock::new(),
queue_head: Cell::new(ptr::null()),
queue_tail: Cell::new(ptr::null()),
fair_timeout: UnsafeCell::new(FairTimeout::new()),
_padding: unsafe { mem::uninitialized() },
};
Box::new(HashTable {
entries: entries.into_boxed_slice(),
hash_bits,
entries: vec![bucket; new_size].into_boxed_slice(),
hash_bits: hash_bits,
_prev: prev,
})
}
}
#[repr(align(64))]
struct Bucket {
// Lock protecting the queue
mutex: WordLock,
@ -66,16 +66,22 @@ struct Bucket {
// Next time at which point be_fair should be set
fair_timeout: UnsafeCell<FairTimeout>,
// Padding to avoid false sharing between buckets. Ideally we would just
// align the bucket structure to 64 bytes, but Rust doesn't support that
// yet.
_padding: [u8; 64],
}
impl Bucket {
#[inline]
pub fn new(timeout: Instant) -> Self {
Self {
mutex: WordLock::INIT,
// Implementation of Clone for Bucket, needed to make vec![] work
impl Clone for Bucket {
fn clone(&self) -> Bucket {
Bucket {
mutex: WordLock::new(),
queue_head: Cell::new(ptr::null()),
queue_tail: Cell::new(ptr::null()),
fair_timeout: UnsafeCell::new(FairTimeout::new(timeout)),
fair_timeout: UnsafeCell::new(FairTimeout::new()),
_padding: unsafe { mem::uninitialized() },
}
}
}
@ -85,20 +91,18 @@ struct FairTimeout {
timeout: Instant,
// Random number generator for calculating the next timeout
rng: SmallRng,
rng: XorShiftRng,
}
impl FairTimeout {
#[inline]
fn new(timeout: Instant) -> FairTimeout {
fn new() -> FairTimeout {
FairTimeout {
timeout,
rng: SmallRng::from_entropy(),
timeout: Instant::now(),
rng: rand::weak_rng(),
}
}
// Determine whether we should force a fair unlock, and update the timeout
#[inline]
fn should_timeout(&mut self) -> bool {
let now = Instant::now();
if now > self.timeout {
@ -130,8 +134,8 @@ struct ThreadData {
parked_with_timeout: Cell<bool>,
// Extra data for deadlock detection
#[cfg(feature = "deadlock_detection")]
deadlock_data: deadlock::DeadlockData,
// TODO: once supported in stable replace with #[cfg...] & remove dummy struct/impl
#[allow(dead_code)] deadlock_data: deadlock::DeadlockData,
}
impl ThreadData {
@ -150,28 +154,34 @@ impl ThreadData {
unpark_token: Cell::new(DEFAULT_UNPARK_TOKEN),
park_token: Cell::new(DEFAULT_PARK_TOKEN),
parked_with_timeout: Cell::new(false),
#[cfg(feature = "deadlock_detection")]
deadlock_data: deadlock::DeadlockData::new(),
}
}
}
// Invokes the given closure with a reference to the current thread `ThreadData`.
#[inline(always)]
fn with_thread_data<F, T>(f: F) -> T
where
F: FnOnce(&ThreadData) -> T,
{
// Unlike word_lock::ThreadData, parking_lot::ThreadData is always expensive
// to construct. Try to use a thread-local version if possible. Otherwise just
// create a ThreadData on the stack
let mut thread_data_storage = None;
thread_local!(static THREAD_DATA: ThreadData = ThreadData::new());
let thread_data_ptr = THREAD_DATA
.try_with(|x| x as *const ThreadData)
.unwrap_or_else(|_| thread_data_storage.get_or_insert_with(ThreadData::new));
// Returns a ThreadData structure for the current thread
unsafe fn get_thread_data(local: &mut Option<ThreadData>) -> &ThreadData {
// Try to read from thread-local storage, but return None if the TLS has
// already been destroyed.
#[cfg(feature = "nightly")]
fn try_get_tls(key: &'static LocalKey<ThreadData>) -> Option<*const ThreadData> {
key.try_with(|x| x as *const ThreadData).ok()
}
#[cfg(not(feature = "nightly"))]
fn try_get_tls(key: &'static LocalKey<ThreadData>) -> Option<*const ThreadData> {
panic::catch_unwind(|| key.with(|x| x as *const ThreadData)).ok()
}
f(unsafe { &*thread_data_ptr })
// Unlike word_lock::ThreadData, parking_lot::ThreadData is always expensive
// to construct. Try to use a thread-local version if possible.
thread_local!(static THREAD_DATA: ThreadData = ThreadData::new());
if let Some(tls) = try_get_tls(&THREAD_DATA) {
return &*tls;
}
// Otherwise just create a ThreadData on the stack
*local = Some(ThreadData::new());
local.as_ref().unwrap()
}
impl Drop for ThreadData {
@ -181,41 +191,30 @@ impl Drop for ThreadData {
}
// Get a pointer to the latest hash table, creating one if it doesn't exist yet.
#[inline]
fn get_hashtable() -> *mut HashTable {
let table = HASHTABLE.load(Ordering::Acquire);
unsafe fn get_hashtable() -> *const HashTable {
let mut table = HASHTABLE.load(Ordering::Acquire);
// If there is no table, create one
if table.is_null() {
create_hashtable()
} else {
table
}
}
if table == 0 {
let new_table = Box::into_raw(HashTable::new(LOAD_FACTOR, ptr::null()));
// Get a pointer to the latest hash table, creating one if it doesn't exist yet.
#[cold]
#[inline(never)]
fn create_hashtable() -> *mut HashTable {
let new_table = Box::into_raw(HashTable::new(LOAD_FACTOR, ptr::null()));
// If this fails then it means some other thread created the hash
// table first.
match HASHTABLE.compare_exchange(
ptr::null_mut(),
new_table,
Ordering::Release,
Ordering::Relaxed,
) {
Ok(_) => new_table,
Err(old_table) => {
// Free the table we created
unsafe {
Box::from_raw(new_table);
}
old_table
// If this fails then it means some other thread created the hash
// table first.
match HASHTABLE.compare_exchange(
0,
new_table as usize,
Ordering::Release,
Ordering::Relaxed,
) {
Ok(_) => return new_table,
Err(x) => table = x,
}
// Free the table we created
Box::from_raw(new_table);
}
table as *const HashTable
}
// Grow the hash table so that it is big enough for the given number of threads.
@ -223,18 +222,13 @@ fn create_hashtable() -> *mut HashTable {
// created, which only happens once per thread.
unsafe fn grow_hashtable(num_threads: usize) {
// If there is no table, create one
if HASHTABLE.load(Ordering::Relaxed).is_null() {
if HASHTABLE.load(Ordering::Relaxed) == 0 {
let new_table = Box::into_raw(HashTable::new(num_threads, ptr::null()));
// If this fails then it means some other thread created the hash
// table first.
if HASHTABLE
.compare_exchange(
ptr::null_mut(),
new_table,
Ordering::Release,
Ordering::Relaxed,
)
.compare_exchange(0, new_table as usize, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
@ -246,7 +240,7 @@ unsafe fn grow_hashtable(num_threads: usize) {
let mut old_table;
loop {
old_table = HASHTABLE.load(Ordering::Acquire);
old_table = HASHTABLE.load(Ordering::Acquire) as *mut HashTable;
// Check if we need to resize the existing table
if (*old_table).entries.len() >= LOAD_FACTOR * num_threads {
@ -261,7 +255,7 @@ unsafe fn grow_hashtable(num_threads: usize) {
// Now check if our table is still the latest one. Another thread could
// have grown the hash table between us reading HASHTABLE and locking
// the buckets.
if HASHTABLE.load(Ordering::Relaxed) == old_table {
if HASHTABLE.load(Ordering::Relaxed) == old_table as usize {
break;
}
@ -296,7 +290,7 @@ unsafe fn grow_hashtable(num_threads: usize) {
// Publish the new table. No races are possible at this point because
// any other thread trying to grow the hash table is blocked on the bucket
// locks in the old table.
HASHTABLE.store(Box::into_raw(new_table), Ordering::Release);
HASHTABLE.store(Box::into_raw(new_table) as usize, Ordering::Release);
// Unlock all buckets in the old table
for b in &(*old_table).entries[..] {
@ -306,18 +300,15 @@ unsafe fn grow_hashtable(num_threads: usize) {
// Hash function for addresses
#[cfg(target_pointer_width = "32")]
#[inline]
fn hash(key: usize, bits: u32) -> usize {
key.wrapping_mul(0x9E3779B9) >> (32 - bits)
}
#[cfg(target_pointer_width = "64")]
#[inline]
fn hash(key: usize, bits: u32) -> usize {
key.wrapping_mul(0x9E3779B97F4A7C15) >> (64 - bits)
}
// Lock the bucket for the given key
#[inline]
unsafe fn lock_bucket<'a>(key: usize) -> &'a Bucket {
let mut bucket;
loop {
@ -331,7 +322,7 @@ unsafe fn lock_bucket<'a>(key: usize) -> &'a Bucket {
// If no other thread has rehashed the table before we grabbed the lock
// then we are good to go! The lock we grabbed prevents any rehashes.
if HASHTABLE.load(Ordering::Relaxed) == hashtable {
if HASHTABLE.load(Ordering::Relaxed) == hashtable as usize {
return bucket;
}
@ -342,7 +333,6 @@ unsafe fn lock_bucket<'a>(key: usize) -> &'a Bucket {
// Lock the bucket for the given key, but check that the key hasn't been changed
// in the meantime due to a requeue.
#[inline]
unsafe fn lock_bucket_checked<'a>(key: &AtomicUsize) -> (usize, &'a Bucket) {
let mut bucket;
loop {
@ -358,7 +348,7 @@ unsafe fn lock_bucket_checked<'a>(key: &AtomicUsize) -> (usize, &'a Bucket) {
// Check that both the hash table and key are correct while the bucket
// is locked. Note that the key can't change once we locked the proper
// bucket for it, so we just keep trying until we have the correct key.
if HASHTABLE.load(Ordering::Relaxed) == hashtable
if HASHTABLE.load(Ordering::Relaxed) == hashtable as usize
&& key.load(Ordering::Relaxed) == current_key
{
return (current_key, bucket);
@ -370,7 +360,6 @@ unsafe fn lock_bucket_checked<'a>(key: &AtomicUsize) -> (usize, &'a Bucket) {
}
// Lock the two buckets for the given pair of keys
#[inline]
unsafe fn lock_bucket_pair<'a>(key1: usize, key2: usize) -> (&'a Bucket, &'a Bucket) {
let mut bucket1;
loop {
@ -390,7 +379,7 @@ unsafe fn lock_bucket_pair<'a>(key1: usize, key2: usize) -> (&'a Bucket, &'a Buc
// If no other thread has rehashed the table before we grabbed the lock
// then we are good to go! The lock we grabbed prevents any rehashes.
if HASHTABLE.load(Ordering::Relaxed) == hashtable {
if HASHTABLE.load(Ordering::Relaxed) == hashtable as usize {
// Now lock the second bucket and return the two buckets
if hash1 == hash2 {
return (bucket1, bucket1);
@ -411,7 +400,6 @@ unsafe fn lock_bucket_pair<'a>(key1: usize, key2: usize) -> (&'a Bucket, &'a Buc
}
// Unlock a pair of buckets
#[inline]
unsafe fn unlock_bucket_pair(bucket1: &Bucket, bucket2: &Bucket) {
if bucket1 as *const _ == bucket2 as *const _ {
bucket1.mutex.unlock();
@ -439,7 +427,6 @@ pub enum ParkResult {
impl ParkResult {
/// Returns true if we were unparked by another thread.
#[inline]
pub fn is_unparked(self) -> bool {
if let ParkResult::Unparked(_) = self {
true
@ -450,14 +437,11 @@ impl ParkResult {
}
/// Result of an unpark operation.
#[derive(Copy, Clone, Default, Eq, PartialEq, Debug)]
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub struct UnparkResult {
/// The number of threads that were unparked.
pub unparked_threads: usize,
/// The number of threads that were requeued.
pub requeued_threads: usize,
/// Whether there are any threads remaining in the queue. This only returns
/// true if a thread was unparked.
pub have_more_threads: bool,
@ -466,9 +450,6 @@ pub struct UnparkResult {
/// should be used to switch to a fair unlocking mechanism for a particular
/// unlock.
pub be_fair: bool,
/// Private field so new fields can be added without breakage.
_sealed: (),
}
/// Operation that `unpark_requeue` should perform.
@ -482,12 +463,6 @@ pub enum RequeueOp {
/// Requeue all threads onto the target queue.
RequeueAll,
/// Unpark one thread and leave the rest parked. No requeuing is done.
UnparkOne,
/// Requeue one thread and leave the rest parked on the original queue.
RequeueOne,
}
/// Operation that `unpark_filter` should perform for each thread.
@ -559,109 +534,129 @@ where
B: FnOnce(),
T: FnOnce(usize, bool),
{
let mut v = Some(validate);
let mut b = Some(before_sleep);
let mut t = Some(timed_out);
park_internal(
key,
&mut || v.take().unchecked_unwrap()(),
&mut || b.take().unchecked_unwrap()(),
&mut |key, was_last_thread| t.take().unchecked_unwrap()(key, was_last_thread),
park_token,
timeout,
)
}
// Non-generic version to reduce monomorphization cost
unsafe fn park_internal(
key: usize,
validate: &mut FnMut() -> bool,
before_sleep: &mut FnMut(),
timed_out: &mut FnMut(usize, bool),
park_token: ParkToken,
timeout: Option<Instant>,
) -> ParkResult {
// Grab our thread data, this also ensures that the hash table exists
with_thread_data(|thread_data| {
// Lock the bucket for the given key
let bucket = lock_bucket(key);
let mut thread_data = None;
let thread_data = get_thread_data(&mut thread_data);
// If the validation function fails, just return
if !validate() {
bucket.mutex.unlock();
return ParkResult::Invalid;
}
// Lock the bucket for the given key
let bucket = lock_bucket(key);
// Append our thread data to the queue and unlock the bucket
thread_data.parked_with_timeout.set(timeout.is_some());
thread_data.next_in_queue.set(ptr::null());
thread_data.key.store(key, Ordering::Relaxed);
thread_data.park_token.set(park_token);
thread_data.parker.prepare_park();
if !bucket.queue_head.get().is_null() {
(*bucket.queue_tail.get()).next_in_queue.set(thread_data);
} else {
bucket.queue_head.set(thread_data);
}
bucket.queue_tail.set(thread_data);
// If the validation function fails, just return
if !validate() {
bucket.mutex.unlock();
return ParkResult::Invalid;
}
// Invoke the pre-sleep callback
before_sleep();
// Append our thread data to the queue and unlock the bucket
thread_data.parked_with_timeout.set(timeout.is_some());
thread_data.next_in_queue.set(ptr::null());
thread_data.key.store(key, Ordering::Relaxed);
thread_data.park_token.set(park_token);
thread_data.parker.prepare_park();
if !bucket.queue_head.get().is_null() {
(*bucket.queue_tail.get()).next_in_queue.set(thread_data);
} else {
bucket.queue_head.set(thread_data);
}
bucket.queue_tail.set(thread_data);
bucket.mutex.unlock();
// Park our thread and determine whether we were woken up by an unpark or by
// our timeout. Note that this isn't precise: we can still be unparked since
// we are still in the queue.
let unparked = match timeout {
Some(timeout) => thread_data.parker.park_until(timeout),
None => {
thread_data.parker.park();
// call deadlock detection on_unpark hook
deadlock::on_unpark(thread_data);
true
}
};
// Invoke the pre-sleep callback
before_sleep();
// If we were unparked, return now
if unparked {
return ParkResult::Unparked(thread_data.unpark_token.get());
// Park our thread and determine whether we were woken up by an unpark or by
// our timeout. Note that this isn't precise: we can still be unparked since
// we are still in the queue.
let unparked = match timeout {
Some(timeout) => thread_data.parker.park_until(timeout),
None => {
thread_data.parker.park();
// call deadlock detection on_unpark hook
deadlock::on_unpark(thread_data);
true
}
};
// Lock our bucket again. Note that the hashtable may have been rehashed in
// the meantime. Our key may also have changed if we were requeued.
let (key, bucket) = lock_bucket_checked(&thread_data.key);
// If we were unparked, return now
if unparked {
return ParkResult::Unparked(thread_data.unpark_token.get());
}
// Now we need to check again if we were unparked or timed out. Unlike the
// last check this is precise because we hold the bucket lock.
if !thread_data.parker.timed_out() {
bucket.mutex.unlock();
return ParkResult::Unparked(thread_data.unpark_token.get());
}
// Lock our bucket again. Note that the hashtable may have been rehashed in
// the meantime. Our key may also have changed if we were requeued.
let (key, bucket) = lock_bucket_checked(&thread_data.key);
// We timed out, so we now need to remove our thread from the queue
let mut link = &bucket.queue_head;
let mut current = bucket.queue_head.get();
let mut previous = ptr::null();
let mut was_last_thread = true;
while !current.is_null() {
if current == thread_data {
let next = (*current).next_in_queue.get();
link.set(next);
if bucket.queue_tail.get() == current {
bucket.queue_tail.set(previous);
} else {
// Scan the rest of the queue to see if there are any other
// entries with the given key.
let mut scan = next;
while !scan.is_null() {
if (*scan).key.load(Ordering::Relaxed) == key {
was_last_thread = false;
break;
}
scan = (*scan).next_in_queue.get();
}
}
// Now we need to check again if we were unparked or timed out. Unlike the
// last check this is precise because we hold the bucket lock.
if !thread_data.parker.timed_out() {
bucket.mutex.unlock();
return ParkResult::Unparked(thread_data.unpark_token.get());
}
// Callback to indicate that we timed out, and whether we were the
// last thread on the queue.
timed_out(key, was_last_thread);
break;
// We timed out, so we now need to remove our thread from the queue
let mut link = &bucket.queue_head;
let mut current = bucket.queue_head.get();
let mut previous = ptr::null();
while !current.is_null() {
if current == thread_data {
let next = (*current).next_in_queue.get();
link.set(next);
let mut was_last_thread = true;
if bucket.queue_tail.get() == current {
bucket.queue_tail.set(previous);
} else {
if (*current).key.load(Ordering::Relaxed) == key {
was_last_thread = false;
// Scan the rest of the queue to see if there are any other
// entries with the given key.
let mut scan = next;
while !scan.is_null() {
if (*scan).key.load(Ordering::Relaxed) == key {
was_last_thread = false;
break;
}
scan = (*scan).next_in_queue.get();
}
link = &(*current).next_in_queue;
previous = current;
current = link.get();
}
// Callback to indicate that we timed out, and whether we were the
// last thread on the queue.
timed_out(key, was_last_thread);
break;
} else {
link = &(*current).next_in_queue;
previous = current;
current = link.get();
}
}
// There should be no way for our thread to have been removed from the queue
// if we timed out.
debug_assert!(!current.is_null());
// There should be no way for our thread to have been removed from the queue
// if we timed out.
debug_assert!(!current.is_null());
// Unlock the bucket, we are done
bucket.mutex.unlock();
ParkResult::TimedOut
})
// Unlock the bucket, we are done
bucket.mutex.unlock();
ParkResult::TimedOut
}
/// Unparks one thread from the queue associated with the given key.
@ -688,6 +683,15 @@ pub unsafe fn unpark_one<C>(key: usize, callback: C) -> UnparkResult
where
C: FnOnce(UnparkResult) -> UnparkToken,
{
let mut c = Some(callback);
unpark_one_internal(key, &mut |result| c.take().unchecked_unwrap()(result))
}
// Non-generic version to reduce monomorphization cost
unsafe fn unpark_one_internal(
key: usize,
callback: &mut FnMut(UnparkResult) -> UnparkToken,
) -> UnparkResult {
// Lock the bucket for the given key
let bucket = lock_bucket(key);
@ -695,7 +699,11 @@ where
let mut link = &bucket.queue_head;
let mut current = bucket.queue_head.get();
let mut previous = ptr::null();
let mut result = UnparkResult::default();
let mut result = UnparkResult {
unparked_threads: 0,
have_more_threads: false,
be_fair: false,
};
while !current.is_null() {
if (*current).key.load(Ordering::Relaxed) == key {
// Remove the thread from the queue
@ -758,7 +766,6 @@ where
/// You should only call this function with an address that you control, since
/// you could otherwise interfere with the operation of other synchronization
/// primitives.
#[inline]
pub unsafe fn unpark_all(key: usize, unpark_token: UnparkToken) -> usize {
// Lock the bucket for the given key
let bucket = lock_bucket(key);
@ -809,10 +816,11 @@ pub unsafe fn unpark_all(key: usize, unpark_token: UnparkToken) -> usize {
/// unparks the first one and requeues the rest onto the queue associated with
/// `key_to`.
///
/// The `validate` function is called while both queues are locked. Its return
/// value will determine which operation is performed, or whether the operation
/// should be aborted. See `RequeueOp` for details about the different possible
/// return values.
/// The `validate` function is called while both queues are locked and can abort
/// the operation by returning `RequeueOp::Abort`. It can also choose to
/// unpark the first thread in the source queue while moving the rest by
/// returning `RequeueOp::UnparkFirstRequeueRest`. Returning
/// `RequeueOp::RequeueAll` will move all threads to the destination queue.
///
/// The `callback` function is also called while both queues are locked. It is
/// passed the `RequeueOp` returned by `validate` and an `UnparkResult`
@ -843,11 +851,32 @@ where
V: FnOnce() -> RequeueOp,
C: FnOnce(RequeueOp, UnparkResult) -> UnparkToken,
{
let mut v = Some(validate);
let mut c = Some(callback);
unpark_requeue_internal(
key_from,
key_to,
&mut || v.take().unchecked_unwrap()(),
&mut |op, r| c.take().unchecked_unwrap()(op, r),
)
}
// Non-generic version to reduce monomorphization cost
unsafe fn unpark_requeue_internal(
key_from: usize,
key_to: usize,
validate: &mut FnMut() -> RequeueOp,
callback: &mut FnMut(RequeueOp, UnparkResult) -> UnparkToken,
) -> UnparkResult {
// Lock the two buckets for the given key
let (bucket_from, bucket_to) = lock_bucket_pair(key_from, key_to);
// If the validation function fails, just return
let mut result = UnparkResult::default();
let mut result = UnparkResult {
unparked_threads: 0,
have_more_threads: false,
be_fair: false,
};
let op = validate();
if op == RequeueOp::Abort {
unlock_bucket_pair(bucket_from, bucket_to);
@ -871,9 +900,7 @@ where
}
// Prepare the first thread for wakeup and requeue the rest.
if (op == RequeueOp::UnparkOneRequeueRest || op == RequeueOp::UnparkOne)
&& wakeup_thread.is_none()
{
if op == RequeueOp::UnparkOneRequeueRest && wakeup_thread.is_none() {
wakeup_thread = Some(current);
result.unparked_threads = 1;
} else {
@ -884,20 +911,7 @@ where
}
requeue_threads_tail = current;
(*current).key.store(key_to, Ordering::Relaxed);
result.requeued_threads += 1;
}
if op == RequeueOp::UnparkOne || op == RequeueOp::RequeueOne {
// Scan the rest of the queue to see if there are any other
// entries with the given key.
let mut scan = next;
while !scan.is_null() {
if (*scan).key.load(Ordering::Relaxed) == key_from {
result.have_more_threads = true;
break;
}
scan = (*scan).next_in_queue.get();
}
break;
result.have_more_threads = true;
}
current = next;
} else {
@ -971,6 +985,16 @@ where
F: FnMut(ParkToken) -> FilterOp,
C: FnOnce(UnparkResult) -> UnparkToken,
{
let mut c = Some(callback);
unpark_filter_internal(key, &mut filter, &mut |r| c.take().unchecked_unwrap()(r))
}
// Non-generic version to reduce monomorphization cost
unsafe fn unpark_filter_internal(
key: usize,
filter: &mut FnMut(ParkToken) -> FilterOp,
callback: &mut FnMut(UnparkResult) -> UnparkToken,
) -> UnparkResult {
// Lock the bucket for the given key
let bucket = lock_bucket(key);
@ -979,7 +1003,11 @@ where
let mut current = bucket.queue_head.get();
let mut previous = ptr::null();
let mut threads = SmallVec::<[_; 8]>::new();
let mut result = UnparkResult::default();
let mut result = UnparkResult {
unparked_threads: 0,
have_more_threads: false,
be_fair: false,
};
while !current.is_null() {
if (*current).key.load(Ordering::Relaxed) == key {
// Call the filter function with the thread's ParkToken
@ -1040,7 +1068,7 @@ where
result
}
/// \[Experimental\] Deadlock detection
/// [Experimental] Deadlock detection
///
/// Enabled via the `deadlock_detection` feature flag.
pub mod deadlock {
@ -1050,6 +1078,16 @@ pub mod deadlock {
#[cfg(feature = "deadlock_detection")]
pub(super) use super::deadlock_impl::DeadlockData;
#[cfg(not(feature = "deadlock_detection"))]
pub(super) struct DeadlockData {}
#[cfg(not(feature = "deadlock_detection"))]
impl DeadlockData {
pub(super) fn new() -> Self {
DeadlockData {}
}
}
/// Acquire a resource identified by key in the deadlock detector
/// Noop if deadlock_detection feature isn't enabled.
/// Note: Call after the resource is acquired
@ -1087,16 +1125,15 @@ pub mod deadlock {
#[cfg(feature = "deadlock_detection")]
mod deadlock_impl {
use super::{get_hashtable, lock_bucket, with_thread_data, ThreadData, NUM_THREADS};
use crate::word_lock::WordLock;
use super::{get_hashtable, get_thread_data, lock_bucket, ThreadData, NUM_THREADS};
use std::cell::{Cell, UnsafeCell};
use std::sync::mpsc;
use std::sync::atomic::Ordering;
use std::collections::HashSet;
use thread_id;
use backtrace::Backtrace;
use petgraph;
use petgraph::graphmap::DiGraphMap;
use std::cell::{Cell, UnsafeCell};
use std::collections::HashSet;
use std::sync::atomic::Ordering;
use std::sync::mpsc;
use thread_id;
/// Representation of a deadlocked thread
pub struct DeadlockedThread {
@ -1161,19 +1198,19 @@ mod deadlock_impl {
}
pub unsafe fn acquire_resource(key: usize) {
with_thread_data(|thread_data| {
(*thread_data.deadlock_data.resources.get()).push(key);
});
let mut thread_data = None;
let thread_data = get_thread_data(&mut thread_data);
(*thread_data.deadlock_data.resources.get()).push(key);
}
pub unsafe fn release_resource(key: usize) {
with_thread_data(|thread_data| {
let resources = &mut (*thread_data.deadlock_data.resources.get());
match resources.iter().rposition(|x| *x == key) {
Some(p) => resources.swap_remove(p),
None => panic!("key {} not found in thread resources", key),
};
});
let mut thread_data = None;
let thread_data = get_thread_data(&mut thread_data);
let resources = &mut (*thread_data.deadlock_data.resources.get());
match resources.iter().rposition(|x| *x == key) {
Some(p) => resources.swap_remove(p),
None => panic!("key {} not found in thread resources", key),
};
}
pub fn check_deadlock() -> Vec<Vec<DeadlockedThread>> {
@ -1226,13 +1263,10 @@ mod deadlock_impl {
use self::WaitGraphNode::*;
// Contrary to the _fast variant this locks the entries table before looking for cycles.
// Contrary to the _fast variant this locks the entrie table before looking for cycles.
// Returns all detected thread wait cycles.
// Note that once a cycle is reported it's never reported again.
unsafe fn check_wait_graph_slow() -> Vec<Vec<DeadlockedThread>> {
static DEADLOCK_DETECTION_LOCK: WordLock = WordLock::INIT;
DEADLOCK_DETECTION_LOCK.lock();
let mut table = get_hashtable();
loop {
// Lock all buckets in the old table
@ -1306,8 +1340,6 @@ mod deadlock_impl {
results.push(receiver.iter().collect());
}
DEADLOCK_DETECTION_LOCK.unlock();
results
}
@ -1330,15 +1362,14 @@ mod deadlock_impl {
// returns all thread cycles in the wait graph
fn graph_cycles(g: &DiGraphMap<WaitGraphNode, ()>) -> Vec<Vec<*const ThreadData>> {
use petgraph::visit::NodeIndexable;
use petgraph::visit::depth_first_search;
use petgraph::visit::DfsEvent;
use petgraph::visit::NodeIndexable;
let mut cycles = HashSet::new();
let mut path = Vec::with_capacity(g.node_bound());
// start from threads to get the correct threads cycle
let threads = g
.nodes()
let threads = g.nodes()
.filter(|n| if let &Thread(_) = n { true } else { false });
depth_first_search(g, threads, |e| match e {

Просмотреть файл

@ -5,9 +5,54 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::thread_parker;
#[cfg(unix)]
use libc;
#[cfg(windows)]
use winapi;
#[cfg(not(any(windows, unix)))]
use std::thread;
use std::sync::atomic::spin_loop_hint;
// Yields the rest of the current timeslice to the OS
#[cfg(windows)]
#[inline]
fn thread_yield() {
// Note that this is manually defined here rather than using the definition
// through `winapi`. The `winapi` definition comes from the `synchapi`
// header which enables the "synchronization.lib" library. It turns out,
// however that `Sleep` comes from `kernel32.dll` so this activation isn't
// necessary.
//
// This was originally identified in rust-lang/rust where on MinGW the
// libsynchronization.a library pulls in a dependency on a newer DLL not
// present in older versions of Windows. (see rust-lang/rust#49438)
//
// This is a bit of a hack for now and ideally we'd fix MinGW's own import
// libraries, but that'll probably take a lot longer than patching this here
// and avoiding the `synchapi` feature entirely.
extern "system" {
fn Sleep(a: winapi::shared::minwindef::DWORD);
}
unsafe {
// We don't use SwitchToThread here because it doesn't consider all
// threads in the system and the thread we are waiting for may not get
// selected.
Sleep(0);
}
}
#[cfg(unix)]
#[inline]
fn thread_yield() {
unsafe {
libc::sched_yield();
}
}
#[cfg(not(any(windows, unix)))]
#[inline]
fn thread_yield() {
thread::yield_now();
}
// Wastes some CPU time for the given number of iterations,
// using a hint to indicate to the CPU that we are spinning.
#[inline]
@ -18,7 +63,6 @@ fn cpu_relax(iterations: u32) {
}
/// A counter used to perform exponential backoff in spin loops.
#[derive(Default)]
pub struct SpinWait {
counter: u32,
}
@ -26,8 +70,8 @@ pub struct SpinWait {
impl SpinWait {
/// Creates a new `SpinWait`.
#[inline]
pub fn new() -> Self {
Self::default()
pub fn new() -> SpinWait {
SpinWait { counter: 0 }
}
/// Resets a `SpinWait` to its initial state.
@ -46,14 +90,14 @@ impl SpinWait {
/// to yielding the CPU to the OS after a few iterations.
#[inline]
pub fn spin(&mut self) -> bool {
if self.counter >= 10 {
if self.counter >= 20 {
return false;
}
self.counter += 1;
if self.counter <= 3 {
cpu_relax(1 << self.counter);
if self.counter <= 10 {
cpu_relax(4 << self.counter);
} else {
thread_parker::thread_yield();
thread_yield();
}
true
}
@ -69,6 +113,13 @@ impl SpinWait {
if self.counter > 10 {
self.counter = 10;
}
cpu_relax(1 << self.counter);
cpu_relax(4 << self.counter);
}
}
impl Default for SpinWait {
#[inline]
fn default() -> SpinWait {
SpinWait::new()
}
}

Просмотреть файл

@ -1,325 +0,0 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use cloudabi as abi;
use core::{
cell::Cell,
mem,
sync::atomic::{AtomicU32, Ordering},
};
use std::{convert::TryFrom, thread, time::Instant};
extern "C" {
#[thread_local]
static __pthread_thread_id: abi::tid;
}
struct Lock {
lock: AtomicU32,
}
impl Lock {
#[inline]
pub fn new() -> Self {
Lock {
lock: AtomicU32::new(abi::LOCK_UNLOCKED.0),
}
}
#[inline]
fn try_lock(&self) -> Option<LockGuard<'_>> {
// Attempt to acquire the lock.
if let Err(old) = self.lock.compare_exchange(
abi::LOCK_UNLOCKED.0,
unsafe { __pthread_thread_id.0 } | abi::LOCK_WRLOCKED.0,
Ordering::Acquire,
Ordering::Relaxed,
) {
// Failure. Crash upon recursive acquisition.
debug_assert_ne!(
old & !abi::LOCK_KERNEL_MANAGED.0,
unsafe { __pthread_thread_id.0 } | abi::LOCK_WRLOCKED.0,
"Attempted to recursive write-lock a lock",
);
None
} else {
Some(LockGuard { inner: &self })
}
}
#[inline]
pub fn lock(&self) -> LockGuard<'_> {
self.try_lock().unwrap_or_else(|| {
// Call into the kernel to acquire a write lock.
unsafe {
let subscription = abi::subscription {
type_: abi::eventtype::LOCK_WRLOCK,
union: abi::subscription_union {
lock: abi::subscription_lock {
lock: self.ptr(),
lock_scope: abi::scope::PRIVATE,
},
},
..mem::zeroed()
};
let mut event: abi::event = mem::uninitialized();
let mut nevents: usize = mem::uninitialized();
let ret = abi::poll(&subscription, &mut event, 1, &mut nevents);
debug_assert_eq!(ret, abi::errno::SUCCESS);
debug_assert_eq!(event.error, abi::errno::SUCCESS);
}
LockGuard { inner: &self }
})
}
#[inline]
fn ptr(&self) -> *mut abi::lock {
&self.lock as *const AtomicU32 as *mut abi::lock
}
}
struct LockGuard<'a> {
inner: &'a Lock,
}
impl LockGuard<'_> {
#[inline]
fn ptr(&self) -> *mut abi::lock {
&self.inner.lock as *const AtomicU32 as *mut abi::lock
}
}
impl Drop for LockGuard<'_> {
fn drop(&mut self) {
debug_assert_eq!(
self.inner.lock.load(Ordering::Relaxed) & !abi::LOCK_KERNEL_MANAGED.0,
unsafe { __pthread_thread_id.0 } | abi::LOCK_WRLOCKED.0,
"This lock is not write-locked by this thread"
);
if !self
.inner
.lock
.compare_exchange(
unsafe { __pthread_thread_id.0 } | abi::LOCK_WRLOCKED.0,
abi::LOCK_UNLOCKED.0,
Ordering::Release,
Ordering::Relaxed,
)
.is_ok()
{
// Lock is managed by kernelspace. Call into the kernel
// to unblock waiting threads.
let ret = unsafe { abi::lock_unlock(self.ptr(), abi::scope::PRIVATE) };
debug_assert_eq!(ret, abi::errno::SUCCESS);
}
}
}
struct Condvar {
condvar: AtomicU32,
}
impl Condvar {
#[inline]
pub fn new() -> Self {
Condvar {
condvar: AtomicU32::new(abi::CONDVAR_HAS_NO_WAITERS.0),
}
}
#[inline]
pub fn wait(&self, lock: &LockGuard<'_>) {
unsafe {
let subscription = abi::subscription {
type_: abi::eventtype::CONDVAR,
union: abi::subscription_union {
condvar: abi::subscription_condvar {
condvar: self.ptr(),
condvar_scope: abi::scope::PRIVATE,
lock: lock.ptr(),
lock_scope: abi::scope::PRIVATE,
},
},
..mem::zeroed()
};
let mut event: abi::event = mem::uninitialized();
let mut nevents: usize = mem::uninitialized();
let ret = abi::poll(&subscription, &mut event, 1, &mut nevents);
debug_assert_eq!(ret, abi::errno::SUCCESS);
debug_assert_eq!(event.error, abi::errno::SUCCESS);
}
}
/// Waits for a signal on the condvar.
/// Returns false if it times out before anyone notified us.
#[inline]
pub fn wait_timeout(&self, lock: &LockGuard<'_>, timeout: abi::timestamp) -> bool {
unsafe {
let subscriptions = [
abi::subscription {
type_: abi::eventtype::CONDVAR,
union: abi::subscription_union {
condvar: abi::subscription_condvar {
condvar: self.ptr(),
condvar_scope: abi::scope::PRIVATE,
lock: lock.ptr(),
lock_scope: abi::scope::PRIVATE,
},
},
..mem::zeroed()
},
abi::subscription {
type_: abi::eventtype::CLOCK,
union: abi::subscription_union {
clock: abi::subscription_clock {
clock_id: abi::clockid::MONOTONIC,
timeout,
..mem::zeroed()
},
},
..mem::zeroed()
},
];
let mut events: [abi::event; 2] = mem::uninitialized();
let mut nevents: usize = mem::uninitialized();
let ret = abi::poll(subscriptions.as_ptr(), events.as_mut_ptr(), 2, &mut nevents);
debug_assert_eq!(ret, abi::errno::SUCCESS);
for i in 0..nevents {
debug_assert_eq!(events[i].error, abi::errno::SUCCESS);
if events[i].type_ == abi::eventtype::CONDVAR {
return true;
}
}
}
false
}
#[inline]
pub fn notify(&self) {
let ret = unsafe { abi::condvar_signal(self.ptr(), abi::scope::PRIVATE, 1) };
debug_assert_eq!(ret, abi::errno::SUCCESS);
}
#[inline]
fn ptr(&self) -> *mut abi::condvar {
&self.condvar as *const AtomicU32 as *mut abi::condvar
}
}
// Helper type for putting a thread to sleep until some other thread wakes it up
pub struct ThreadParker {
should_park: Cell<bool>,
lock: Lock,
condvar: Condvar,
}
impl ThreadParker {
pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
#[inline]
pub fn new() -> ThreadParker {
ThreadParker {
should_park: Cell::new(false),
lock: Lock::new(),
condvar: Condvar::new(),
}
}
// Prepares the parker. This should be called before adding it to the queue.
#[inline]
pub fn prepare_park(&self) {
self.should_park.set(true);
}
// Checks if the park timed out. This should be called while holding the
// queue lock after park_until has returned false.
#[inline]
pub fn timed_out(&self) -> bool {
// We need to grab the lock here because another thread may be
// concurrently executing UnparkHandle::unpark, which is done without
// holding the queue lock.
let _guard = self.lock.lock();
self.should_park.get()
}
// Parks the thread until it is unparked. This should be called after it has
// been added to the queue, after unlocking the queue.
#[inline]
pub fn park(&self) {
let guard = self.lock.lock();
while self.should_park.get() {
self.condvar.wait(&guard);
}
}
// Parks the thread until it is unparked or the timeout is reached. This
// should be called after it has been added to the queue, after unlocking
// the queue. Returns true if we were unparked and false if we timed out.
#[inline]
pub fn park_until(&self, timeout: Instant) -> bool {
let guard = self.lock.lock();
while self.should_park.get() {
if let Some(duration_left) = timeout.checked_duration_since(Instant::now()) {
if let Ok(nanos_left) = abi::timestamp::try_from(duration_left.as_nanos()) {
self.condvar.wait_timeout(&guard, nanos_left);
} else {
// remaining timeout overflows an abi::timestamp. Sleep indefinitely
self.condvar.wait(&guard);
}
} else {
// We timed out
return false;
}
}
true
}
// Locks the parker to prevent the target thread from exiting. This is
// necessary to ensure that thread-local ThreadData objects remain valid.
// This should be called while holding the queue lock.
#[inline]
pub fn unpark_lock(&self) -> UnparkHandle<'_> {
let _lock_guard = self.lock.lock();
UnparkHandle {
thread_parker: self,
_lock_guard,
}
}
}
// Handle for a thread that is about to be unparked. We need to mark the thread
// as unparked while holding the queue lock, but we delay the actual unparking
// until after the queue lock is released.
pub struct UnparkHandle<'a> {
thread_parker: *const ThreadParker,
_lock_guard: LockGuard<'a>,
}
impl UnparkHandle<'_> {
// Wakes up the parked thread. This should be called after the queue lock is
// released to avoid blocking the queue for too long.
#[inline]
pub fn unpark(self) {
unsafe {
(*self.thread_parker).should_park.set(false);
// We notify while holding the lock here to avoid races with the target
// thread. In particular, the thread could exit after we unlock the
// mutex, which would make the condvar access invalid memory.
(*self.thread_parker).condvar.notify();
}
}
}
#[inline]
pub fn thread_yield() {
thread::yield_now();
}

Просмотреть файл

@ -5,59 +5,62 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//! A simple spin lock based thread parker. Used on platforms without better
//! parking facilities available.
use core::sync::atomic::{spin_loop_hint, AtomicBool, Ordering};
use std::{thread, time::Instant};
use std::sync::{Condvar, Mutex, MutexGuard};
use std::cell::Cell;
use std::time::Instant;
// Helper type for putting a thread to sleep until some other thread wakes it up
pub struct ThreadParker {
parked: AtomicBool,
should_park: Cell<bool>,
mutex: Mutex<()>,
condvar: Condvar,
}
impl ThreadParker {
pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
#[inline]
pub fn new() -> ThreadParker {
ThreadParker {
parked: AtomicBool::new(false),
should_park: Cell::new(false),
mutex: Mutex::new(()),
condvar: Condvar::new(),
}
}
// Prepares the parker. This should be called before adding it to the queue.
#[inline]
pub fn prepare_park(&self) {
self.parked.store(true, Ordering::Relaxed);
pub unsafe fn prepare_park(&self) {
self.should_park.set(true);
}
// Checks if the park timed out. This should be called while holding the
// queue lock after park_until has returned false.
#[inline]
pub fn timed_out(&self) -> bool {
self.parked.load(Ordering::Relaxed) != false
pub unsafe fn timed_out(&self) -> bool {
// We need to grab the mutex here because another thread may be
// concurrently executing UnparkHandle::unpark, which is done without
// holding the queue lock.
let _lock = self.mutex.lock().unwrap();
self.should_park.get()
}
// Parks the thread until it is unparked. This should be called after it has
// been added to the queue, after unlocking the queue.
#[inline]
pub fn park(&self) {
while self.parked.load(Ordering::Acquire) != false {
spin_loop_hint();
pub unsafe fn park(&self) {
let mut lock = self.mutex.lock().unwrap();
while self.should_park.get() {
lock = self.condvar.wait(lock).unwrap();
}
}
// Parks the thread until it is unparked or the timeout is reached. This
// should be called after it has been added to the queue, after unlocking
// the queue. Returns true if we were unparked and false if we timed out.
#[inline]
pub fn park_until(&self, timeout: Instant) -> bool {
while self.parked.load(Ordering::Acquire) != false {
if Instant::now() >= timeout {
pub unsafe fn park_until(&self, timeout: Instant) -> bool {
let mut lock = self.mutex.lock().unwrap();
while self.should_park.get() {
let now = Instant::now();
if timeout <= now {
return false;
}
spin_loop_hint();
let (new_lock, _) = self.condvar.wait_timeout(lock, timeout - now).unwrap();
lock = new_lock;
}
true
}
@ -65,27 +68,31 @@ impl ThreadParker {
// Locks the parker to prevent the target thread from exiting. This is
// necessary to ensure that thread-local ThreadData objects remain valid.
// This should be called while holding the queue lock.
#[inline]
pub fn unpark_lock(&self) -> UnparkHandle {
// We don't need to lock anything, just clear the state
self.parked.store(false, Ordering::Release);
UnparkHandle(())
pub unsafe fn unpark_lock(&self) -> UnparkHandle {
UnparkHandle {
thread_parker: self,
_guard: self.mutex.lock().unwrap(),
}
}
}
// Handle for a thread that is about to be unparked. We need to mark the thread
// as unparked while holding the queue lock, but we delay the actual unparking
// until after the queue lock is released.
pub struct UnparkHandle(());
pub struct UnparkHandle<'a> {
thread_parker: *const ThreadParker,
_guard: MutexGuard<'a, ()>,
}
impl UnparkHandle {
impl<'a> UnparkHandle<'a> {
// Wakes up the parked thread. This should be called after the queue lock is
// released to avoid blocking the queue for too long.
#[inline]
pub fn unpark(self) {}
}
pub unsafe fn unpark(self) {
(*self.thread_parker).should_park.set(false);
#[inline]
pub fn thread_yield() {
thread::yield_now();
// We notify while holding the lock here to avoid races with the target
// thread. In particular, the thread could exit after we unlock the
// mutex, which would make the condvar access invalid memory.
(*self.thread_parker).condvar.notify_one();
}
}

Просмотреть файл

@ -5,12 +5,9 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::{
ptr,
sync::atomic::{AtomicI32, Ordering},
};
use std::sync::atomic::{AtomicI32, Ordering};
use std::time::Instant;
use libc;
use std::{thread, time::Instant};
const FUTEX_WAIT: i32 = 0;
const FUTEX_WAKE: i32 = 1;
@ -31,9 +28,6 @@ pub struct ThreadParker {
}
impl ThreadParker {
pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
#[inline]
pub fn new() -> ThreadParker {
ThreadParker {
futex: AtomicI32::new(0),
@ -41,32 +35,41 @@ impl ThreadParker {
}
// Prepares the parker. This should be called before adding it to the queue.
#[inline]
pub fn prepare_park(&self) {
pub unsafe fn prepare_park(&self) {
self.futex.store(1, Ordering::Relaxed);
}
// Checks if the park timed out. This should be called while holding the
// queue lock after park_until has returned false.
#[inline]
pub fn timed_out(&self) -> bool {
pub unsafe fn timed_out(&self) -> bool {
self.futex.load(Ordering::Relaxed) != 0
}
// Parks the thread until it is unparked. This should be called after it has
// been added to the queue, after unlocking the queue.
#[inline]
pub fn park(&self) {
pub unsafe fn park(&self) {
while self.futex.load(Ordering::Acquire) != 0 {
self.futex_wait(None);
let r = libc::syscall(
libc::SYS_futex,
&self.futex,
FUTEX_WAIT | FUTEX_PRIVATE,
1,
0,
);
debug_assert!(r == 0 || r == -1);
if r == -1 {
debug_assert!(
*libc::__errno_location() == libc::EINTR
|| *libc::__errno_location() == libc::EAGAIN
);
}
}
}
// Parks the thread until it is unparked or the timeout is reached. This
// should be called after it has been added to the queue, after unlocking
// the queue. Returns true if we were unparked and false if we timed out.
#[inline]
pub fn park_until(&self, timeout: Instant) -> bool {
pub unsafe fn park_until(&self, timeout: Instant) -> bool {
while self.futex.load(Ordering::Acquire) != 0 {
let now = Instant::now();
if timeout <= now {
@ -82,43 +85,29 @@ impl ThreadParker {
tv_sec: diff.as_secs() as libc::time_t,
tv_nsec: diff.subsec_nanos() as tv_nsec_t,
};
self.futex_wait(Some(ts));
}
true
}
#[inline]
fn futex_wait(&self, ts: Option<libc::timespec>) {
let ts_ptr = ts
.as_ref()
.map(|ts_ref| ts_ref as *const _)
.unwrap_or(ptr::null());
let r = unsafe {
libc::syscall(
let r = libc::syscall(
libc::SYS_futex,
&self.futex,
FUTEX_WAIT | FUTEX_PRIVATE,
1,
ts_ptr,
)
};
debug_assert!(r == 0 || r == -1);
if r == -1 {
unsafe {
&ts,
);
debug_assert!(r == 0 || r == -1);
if r == -1 {
debug_assert!(
*libc::__errno_location() == libc::EINTR
|| *libc::__errno_location() == libc::EAGAIN
|| (ts.is_some() && *libc::__errno_location() == libc::ETIMEDOUT)
|| *libc::__errno_location() == libc::ETIMEDOUT
);
}
}
true
}
// Locks the parker to prevent the target thread from exiting. This is
// necessary to ensure that thread-local ThreadData objects remain valid.
// This should be called while holding the queue lock.
#[inline]
pub fn unpark_lock(&self) -> UnparkHandle {
pub unsafe fn unpark_lock(&self) -> UnparkHandle {
// We don't need to lock anything, just clear the state
self.futex.store(0, Ordering::Release);
@ -136,20 +125,13 @@ pub struct UnparkHandle {
impl UnparkHandle {
// Wakes up the parked thread. This should be called after the queue lock is
// released to avoid blocking the queue for too long.
#[inline]
pub fn unpark(self) {
pub unsafe fn unpark(self) {
// The thread data may have been freed at this point, but it doesn't
// matter since the syscall will just return EFAULT in that case.
let r =
unsafe { libc::syscall(libc::SYS_futex, self.futex, FUTEX_WAKE | FUTEX_PRIVATE, 1) };
let r = libc::syscall(libc::SYS_futex, self.futex, FUTEX_WAKE | FUTEX_PRIVATE, 1);
debug_assert!(r == 0 || r == 1 || r == -1);
if r == -1 {
debug_assert_eq!(unsafe { *libc::__errno_location() }, libc::EFAULT);
debug_assert_eq!(*libc::__errno_location(), libc::EFAULT);
}
}
}
#[inline]
pub fn thread_yield() {
thread::yield_now();
}

Просмотреть файл

@ -1,150 +0,0 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::{
ptr,
sync::atomic::{AtomicI32, Ordering},
};
use std::{thread, time::Instant};
use syscall::{
call::futex,
data::TimeSpec,
error::{Error, EAGAIN, EFAULT, EINTR, ETIMEDOUT},
flag::{FUTEX_WAIT, FUTEX_WAKE},
};
const UNPARKED: i32 = 0;
const PARKED: i32 = 1;
// Helper type for putting a thread to sleep until some other thread wakes it up
pub struct ThreadParker {
futex: AtomicI32,
}
impl ThreadParker {
pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
#[inline]
pub fn new() -> ThreadParker {
ThreadParker {
futex: AtomicI32::new(UNPARKED),
}
}
// Prepares the parker. This should be called before adding it to the queue.
#[inline]
pub fn prepare_park(&self) {
self.futex.store(PARKED, Ordering::Relaxed);
}
// Checks if the park timed out. This should be called while holding the
// queue lock after park_until has returned false.
#[inline]
pub fn timed_out(&self) -> bool {
self.futex.load(Ordering::Relaxed) != UNPARKED
}
// Parks the thread until it is unparked. This should be called after it has
// been added to the queue, after unlocking the queue.
#[inline]
pub fn park(&self) {
while self.futex.load(Ordering::Acquire) != UNPARKED {
self.futex_wait(None);
}
}
// Parks the thread until it is unparked or the timeout is reached. This
// should be called after it has been added to the queue, after unlocking
// the queue. Returns true if we were unparked and false if we timed out.
#[inline]
pub fn park_until(&self, timeout: Instant) -> bool {
while self.futex.load(Ordering::Acquire) != UNPARKED {
let now = Instant::now();
if timeout <= now {
return false;
}
let diff = timeout - now;
if diff.as_secs() > i64::max_value() as u64 {
// Timeout overflowed, just sleep indefinitely
self.park();
return true;
}
let ts = TimeSpec {
tv_sec: diff.as_secs() as i64,
tv_nsec: diff.subsec_nanos() as i32,
};
self.futex_wait(Some(ts));
}
true
}
#[inline]
fn futex_wait(&self, ts: Option<TimeSpec>) {
let ts_ptr = ts
.as_ref()
.map(|ts_ref| ts_ref as *const _)
.unwrap_or(ptr::null());
let r = unsafe {
futex(
self.ptr(),
FUTEX_WAIT,
PARKED,
ts_ptr as usize,
ptr::null_mut(),
)
};
match r {
Ok(r) => debug_assert_eq!(r, 0),
Err(Error { errno }) => {
debug_assert!(errno == EINTR || errno == EAGAIN || errno == ETIMEDOUT);
}
}
}
// Locks the parker to prevent the target thread from exiting. This is
// necessary to ensure that thread-local ThreadData objects remain valid.
// This should be called while holding the queue lock.
#[inline]
pub fn unpark_lock(&self) -> UnparkHandle {
// We don't need to lock anything, just clear the state
self.futex.store(UNPARKED, Ordering::Release);
UnparkHandle { futex: self.ptr() }
}
#[inline]
fn ptr(&self) -> *mut i32 {
&self.futex as *const AtomicI32 as *mut i32
}
}
// Handle for a thread that is about to be unparked. We need to mark the thread
// as unparked while holding the queue lock, but we delay the actual unparking
// until after the queue lock is released.
pub struct UnparkHandle {
futex: *mut i32,
}
impl UnparkHandle {
// Wakes up the parked thread. This should be called after the queue lock is
// released to avoid blocking the queue for too long.
#[inline]
pub fn unpark(self) {
// The thread data may have been freed at this point, but it doesn't
// matter since the syscall will just return EFAULT in that case.
let r = unsafe { futex(self.futex, FUTEX_WAKE, PARKED, 0, ptr::null_mut()) };
match r {
Ok(num_woken) => debug_assert!(num_woken == 0 || num_woken == 1),
Err(Error { errno }) => debug_assert_eq!(errno, EFAULT),
}
}
}
#[inline]
pub fn thread_yield() {
thread::yield_now();
}

Просмотреть файл

@ -1,108 +0,0 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::sync::atomic::{AtomicBool, Ordering};
use std::{
io,
os::fortanix_sgx::{
thread::current as current_tcs,
usercalls::{
self,
raw::{Tcs, EV_UNPARK, WAIT_INDEFINITE},
},
},
thread,
time::Instant,
};
// Helper type for putting a thread to sleep until some other thread wakes it up
pub struct ThreadParker {
parked: AtomicBool,
tcs: Tcs,
}
impl ThreadParker {
pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
#[inline]
pub fn new() -> ThreadParker {
ThreadParker {
parked: AtomicBool::new(false),
tcs: current_tcs(),
}
}
// Prepares the parker. This should be called before adding it to the queue.
#[inline]
pub fn prepare_park(&self) {
self.parked.store(true, Ordering::Relaxed);
}
// Checks if the park timed out. This should be called while holding the
// queue lock after park_until has returned false.
#[inline]
pub fn timed_out(&self) -> bool {
self.parked.load(Ordering::Relaxed)
}
// Parks the thread until it is unparked. This should be called after it has
// been added to the queue, after unlocking the queue.
#[inline]
pub fn park(&self) {
while self.parked.load(Ordering::Acquire) {
let result = usercalls::wait(EV_UNPARK, WAIT_INDEFINITE);
debug_assert_eq!(result.expect("wait returned error") & EV_UNPARK, EV_UNPARK);
}
}
// Parks the thread until it is unparked or the timeout is reached. This
// should be called after it has been added to the queue, after unlocking
// the queue. Returns true if we were unparked and false if we timed out.
#[inline]
pub fn park_until(&self, _timeout: Instant) -> bool {
// FIXME: https://github.com/fortanix/rust-sgx/issues/31
panic!("timeout not supported in SGX");
}
// Locks the parker to prevent the target thread from exiting. This is
// necessary to ensure that thread-local ThreadData objects remain valid.
// This should be called while holding the queue lock.
#[inline]
pub fn unpark_lock(&self) -> UnparkHandle {
// We don't need to lock anything, just clear the state
self.parked.store(false, Ordering::Release);
UnparkHandle(self.tcs)
}
}
// Handle for a thread that is about to be unparked. We need to mark the thread
// as unparked while holding the queue lock, but we delay the actual unparking
// until after the queue lock is released.
pub struct UnparkHandle(Tcs);
impl UnparkHandle {
// Wakes up the parked thread. This should be called after the queue lock is
// released to avoid blocking the queue for too long.
#[inline]
pub fn unpark(self) {
let result = usercalls::send(EV_UNPARK, Some(self.0));
if cfg!(debug_assertions) {
if let Err(error) = result {
// `InvalidInput` may be returned if the thread we send to has
// already been unparked and exited.
if error.kind() != io::ErrorKind::InvalidInput {
panic!("send returned an unexpected error: {:?}", error);
}
}
}
}
}
#[inline]
pub fn thread_yield() {
thread::yield_now();
}

Просмотреть файл

@ -5,26 +5,12 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
#[cfg(any(target_os = "macos", target_os = "ios"))]
use core::ptr;
use core::{
cell::{Cell, UnsafeCell},
mem,
};
use std::cell::{Cell, UnsafeCell};
use std::time::{Duration, Instant};
use libc;
use std::{
thread,
time::{Duration, Instant},
};
// x32 Linux uses a non-standard type for tv_nsec in timespec.
// See https://sourceware.org/bugzilla/show_bug.cgi?id=16437
#[cfg(all(target_arch = "x86_64", target_pointer_width = "32"))]
#[allow(non_camel_case_types)]
type tv_nsec_t = i64;
#[cfg(not(all(target_arch = "x86_64", target_pointer_width = "32")))]
#[allow(non_camel_case_types)]
type tv_nsec_t = libc::c_long;
use std::mem;
#[cfg(any(target_os = "macos", target_os = "ios"))]
use std::ptr;
// Helper type for putting a thread to sleep until some other thread wakes it up
pub struct ThreadParker {
@ -35,9 +21,6 @@ pub struct ThreadParker {
}
impl ThreadParker {
pub const IS_CHEAP_TO_CONSTRUCT: bool = false;
#[inline]
pub fn new() -> ThreadParker {
ThreadParker {
should_park: Cell::new(false),
@ -49,10 +32,8 @@ impl ThreadParker {
// Initializes the condvar to use CLOCK_MONOTONIC instead of CLOCK_REALTIME.
#[cfg(any(target_os = "macos", target_os = "ios", target_os = "android"))]
#[inline]
unsafe fn init(&self) {}
#[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "android")))]
#[inline]
unsafe fn init(&self) {
let mut attr: libc::pthread_condattr_t = mem::uninitialized();
let r = libc::pthread_condattr_init(&mut attr);
@ -66,7 +47,6 @@ impl ThreadParker {
}
// Prepares the parker. This should be called before adding it to the queue.
#[inline]
pub unsafe fn prepare_park(&self) {
self.should_park.set(true);
if !self.initialized.get() {
@ -77,7 +57,6 @@ impl ThreadParker {
// Checks if the park timed out. This should be called while holding the
// queue lock after park_until has returned false.
#[inline]
pub unsafe fn timed_out(&self) -> bool {
// We need to grab the mutex here because another thread may be
// concurrently executing UnparkHandle::unpark, which is done without
@ -92,7 +71,6 @@ impl ThreadParker {
// Parks the thread until it is unparked. This should be called after it has
// been added to the queue, after unlocking the queue.
#[inline]
pub unsafe fn park(&self) {
let r = libc::pthread_mutex_lock(self.mutex.get());
debug_assert_eq!(r, 0);
@ -107,7 +85,6 @@ impl ThreadParker {
// Parks the thread until it is unparked or the timeout is reached. This
// should be called after it has been added to the queue, after unlocking
// the queue. Returns true if we were unparked and false if we timed out.
#[inline]
pub unsafe fn park_until(&self, timeout: Instant) -> bool {
let r = libc::pthread_mutex_lock(self.mutex.get());
debug_assert_eq!(r, 0);
@ -143,7 +120,6 @@ impl ThreadParker {
// Locks the parker to prevent the target thread from exiting. This is
// necessary to ensure that thread-local ThreadData objects remain valid.
// This should be called while holding the queue lock.
#[inline]
pub unsafe fn unpark_lock(&self) -> UnparkHandle {
let r = libc::pthread_mutex_lock(self.mutex.get());
debug_assert_eq!(r, 0);
@ -155,7 +131,6 @@ impl ThreadParker {
}
impl Drop for ThreadParker {
#[inline]
fn drop(&mut self) {
// On DragonFly pthread_mutex_destroy() returns EINVAL if called on a
// mutex that was just initialized with libc::PTHREAD_MUTEX_INITIALIZER.
@ -188,7 +163,6 @@ pub struct UnparkHandle {
impl UnparkHandle {
// Wakes up the parked thread. This should be called after the queue lock is
// released to avoid blocking the queue for too long.
#[inline]
pub unsafe fn unpark(self) {
(*self.thread_parker).should_park.set(false);
@ -204,20 +178,18 @@ impl UnparkHandle {
// Returns the current time on the clock used by pthread_cond_t as a timespec.
#[cfg(any(target_os = "macos", target_os = "ios"))]
#[inline]
fn timespec_now() -> libc::timespec {
let mut now: libc::timeval = unsafe { mem::uninitialized() };
let r = unsafe { libc::gettimeofday(&mut now, ptr::null_mut()) };
unsafe fn timespec_now() -> libc::timespec {
let mut now: libc::timeval = mem::uninitialized();
let r = libc::gettimeofday(&mut now, ptr::null_mut());
debug_assert_eq!(r, 0);
libc::timespec {
tv_sec: now.tv_sec,
tv_nsec: now.tv_usec as tv_nsec_t * 1000,
tv_nsec: now.tv_usec as libc::c_long * 1000,
}
}
#[cfg(not(any(target_os = "macos", target_os = "ios")))]
#[inline]
fn timespec_now() -> libc::timespec {
let mut now: libc::timespec = unsafe { mem::uninitialized() };
unsafe fn timespec_now() -> libc::timespec {
let mut now: libc::timespec = mem::uninitialized();
let clock = if cfg!(target_os = "android") {
// Android doesn't support pthread_condattr_setclock, so we need to
// specify the timeout in CLOCK_REALTIME.
@ -225,22 +197,21 @@ fn timespec_now() -> libc::timespec {
} else {
libc::CLOCK_MONOTONIC
};
let r = unsafe { libc::clock_gettime(clock, &mut now) };
let r = libc::clock_gettime(clock, &mut now);
debug_assert_eq!(r, 0);
now
}
// Converts a relative timeout into an absolute timeout in the clock used by
// pthread_cond_t.
#[inline]
fn timeout_to_timespec(timeout: Duration) -> Option<libc::timespec> {
unsafe fn timeout_to_timespec(timeout: Duration) -> Option<libc::timespec> {
// Handle overflows early on
if timeout.as_secs() > libc::time_t::max_value() as u64 {
return None;
}
let now = timespec_now();
let mut nsec = now.tv_nsec + timeout.subsec_nanos() as tv_nsec_t;
let mut nsec = now.tv_nsec + timeout.subsec_nanos() as libc::c_long;
let mut sec = now.tv_sec.checked_add(timeout.as_secs() as libc::time_t);
if nsec >= 1_000_000_000 {
nsec -= 1_000_000_000;
@ -252,8 +223,3 @@ fn timeout_to_timespec(timeout: Duration) -> Option<libc::timespec> {
tv_sec: sec,
})
}
#[inline]
pub fn thread_yield() {
thread::yield_now();
}

Просмотреть файл

@ -1,108 +0,0 @@
// Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::{
arch::wasm32,
sync::atomic::{AtomicI32, Ordering},
};
use std::{convert::TryFrom, thread, time::Instant};
// Helper type for putting a thread to sleep until some other thread wakes it up
pub struct ThreadParker {
parked: AtomicI32,
}
const UNPARKED: i32 = 0;
const PARKED: i32 = 1;
impl ThreadParker {
pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
#[inline]
pub fn new() -> ThreadParker {
ThreadParker {
parked: AtomicI32::new(UNPARKED),
}
}
// Prepares the parker. This should be called before adding it to the queue.
#[inline]
pub fn prepare_park(&self) {
self.parked.store(PARKED, Ordering::Relaxed);
}
// Checks if the park timed out. This should be called while holding the
// queue lock after park_until has returned false.
#[inline]
pub fn timed_out(&self) -> bool {
self.parked.load(Ordering::Relaxed) == PARKED
}
// Parks the thread until it is unparked. This should be called after it has
// been added to the queue, after unlocking the queue.
#[inline]
pub fn park(&self) {
while self.parked.load(Ordering::Acquire) == PARKED {
let r = unsafe { wasm32::i32_atomic_wait(self.ptr(), PARKED, -1) };
// we should have either woken up (0) or got a not-equal due to a
// race (1). We should never time out (2)
debug_assert!(r == 0 || r == 1);
}
}
// Parks the thread until it is unparked or the timeout is reached. This
// should be called after it has been added to the queue, after unlocking
// the queue. Returns true if we were unparked and false if we timed out.
#[inline]
pub fn park_until(&self, timeout: Instant) -> bool {
while self.parked.load(Ordering::Acquire) == PARKED {
if let Some(left) = timeout.checked_duration_since(Instant::now()) {
let nanos_left = i64::try_from(left.as_nanos()).unwrap_or(i64::max_value());
let r = unsafe { wasm32::i32_atomic_wait(self.ptr(), PARKED, nanos_left) };
debug_assert!(r == 0 || r == 1 || r == 2);
} else {
return false;
}
}
true
}
// Locks the parker to prevent the target thread from exiting. This is
// necessary to ensure that thread-local ThreadData objects remain valid.
// This should be called while holding the queue lock.
#[inline]
pub fn unpark_lock(&self) -> UnparkHandle {
// We don't need to lock anything, just clear the state
self.parked.store(UNPARKED, Ordering::Release);
UnparkHandle(self.ptr())
}
#[inline]
fn ptr(&self) -> *mut i32 {
&self.parked as *const AtomicI32 as *mut i32
}
}
// Handle for a thread that is about to be unparked. We need to mark the thread
// as unparked while holding the queue lock, but we delay the actual unparking
// until after the queue lock is released.
pub struct UnparkHandle(*mut i32);
impl UnparkHandle {
// Wakes up the parked thread. This should be called after the queue lock is
// released to avoid blocking the queue for too long.
#[inline]
pub fn unpark(self) {
let num_notified = unsafe { wasm32::atomic_notify(self.0 as *mut i32, 1) };
debug_assert!(num_notified == 0 || num_notified == 1);
}
}
#[inline]
pub fn thread_yield() {
thread::yield_now();
}

Просмотреть файл

@ -5,26 +5,18 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::{mem, ptr};
use std::{
sync::atomic::{AtomicUsize, Ordering},
time::Instant,
};
use winapi::{
shared::{
minwindef::{TRUE, ULONG},
ntdef::NTSTATUS,
ntstatus::{STATUS_SUCCESS, STATUS_TIMEOUT},
},
um::{
handleapi::CloseHandle,
libloaderapi::{GetModuleHandleA, GetProcAddress},
winnt::{
ACCESS_MASK, BOOLEAN, GENERIC_READ, GENERIC_WRITE, HANDLE, LARGE_INTEGER, LPCSTR,
PHANDLE, PLARGE_INTEGER, PVOID,
},
},
};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Instant;
use std::ptr;
use std::mem;
use winapi::shared::minwindef::{TRUE, ULONG};
use winapi::shared::ntdef::NTSTATUS;
use winapi::shared::ntstatus::{STATUS_SUCCESS, STATUS_TIMEOUT};
use winapi::um::handleapi::CloseHandle;
use winapi::um::libloaderapi::{GetModuleHandleA, GetProcAddress};
use winapi::um::winnt::{ACCESS_MASK, GENERIC_READ, GENERIC_WRITE, LPCSTR};
use winapi::um::winnt::{BOOLEAN, HANDLE, LARGE_INTEGER, PHANDLE, PLARGE_INTEGER, PVOID};
const STATE_UNPARKED: usize = 0;
const STATE_PARKED: usize = 1;
@ -48,82 +40,73 @@ pub struct KeyedEvent {
}
impl KeyedEvent {
#[inline]
unsafe fn wait_for(&self, key: PVOID, timeout: PLARGE_INTEGER) -> NTSTATUS {
(self.NtWaitForKeyedEvent)(self.handle, key, 0, timeout)
}
#[inline]
unsafe fn release(&self, key: PVOID) -> NTSTATUS {
(self.NtReleaseKeyedEvent)(self.handle, key, 0, ptr::null_mut())
}
#[allow(non_snake_case)]
pub fn create() -> Option<KeyedEvent> {
unsafe {
let ntdll = GetModuleHandleA(b"ntdll.dll\0".as_ptr() as LPCSTR);
if ntdll.is_null() {
return None;
}
let NtCreateKeyedEvent =
GetProcAddress(ntdll, b"NtCreateKeyedEvent\0".as_ptr() as LPCSTR);
if NtCreateKeyedEvent.is_null() {
return None;
}
let NtReleaseKeyedEvent =
GetProcAddress(ntdll, b"NtReleaseKeyedEvent\0".as_ptr() as LPCSTR);
if NtReleaseKeyedEvent.is_null() {
return None;
}
let NtWaitForKeyedEvent =
GetProcAddress(ntdll, b"NtWaitForKeyedEvent\0".as_ptr() as LPCSTR);
if NtWaitForKeyedEvent.is_null() {
return None;
}
let NtCreateKeyedEvent: extern "system" fn(
KeyedEventHandle: PHANDLE,
DesiredAccess: ACCESS_MASK,
ObjectAttributes: PVOID,
Flags: ULONG,
) -> NTSTATUS = mem::transmute(NtCreateKeyedEvent);
let mut handle = mem::uninitialized();
let status = NtCreateKeyedEvent(
&mut handle,
GENERIC_READ | GENERIC_WRITE,
ptr::null_mut(),
0,
);
if status != STATUS_SUCCESS {
return None;
}
Some(KeyedEvent {
handle,
NtReleaseKeyedEvent: mem::transmute(NtReleaseKeyedEvent),
NtWaitForKeyedEvent: mem::transmute(NtWaitForKeyedEvent),
})
pub unsafe fn create() -> Option<KeyedEvent> {
let ntdll = GetModuleHandleA(b"ntdll.dll\0".as_ptr() as LPCSTR);
if ntdll.is_null() {
return None;
}
let NtCreateKeyedEvent = GetProcAddress(ntdll, b"NtCreateKeyedEvent\0".as_ptr() as LPCSTR);
if NtCreateKeyedEvent.is_null() {
return None;
}
let NtReleaseKeyedEvent =
GetProcAddress(ntdll, b"NtReleaseKeyedEvent\0".as_ptr() as LPCSTR);
if NtReleaseKeyedEvent.is_null() {
return None;
}
let NtWaitForKeyedEvent =
GetProcAddress(ntdll, b"NtWaitForKeyedEvent\0".as_ptr() as LPCSTR);
if NtWaitForKeyedEvent.is_null() {
return None;
}
let NtCreateKeyedEvent: extern "system" fn(
KeyedEventHandle: PHANDLE,
DesiredAccess: ACCESS_MASK,
ObjectAttributes: PVOID,
Flags: ULONG,
) -> NTSTATUS = mem::transmute(NtCreateKeyedEvent);
let mut handle = mem::uninitialized();
let status = NtCreateKeyedEvent(
&mut handle,
GENERIC_READ | GENERIC_WRITE,
ptr::null_mut(),
0,
);
if status != STATUS_SUCCESS {
return None;
}
Some(KeyedEvent {
handle,
NtReleaseKeyedEvent: mem::transmute(NtReleaseKeyedEvent),
NtWaitForKeyedEvent: mem::transmute(NtWaitForKeyedEvent),
})
}
#[inline]
pub fn prepare_park(&'static self, key: &AtomicUsize) {
pub unsafe fn prepare_park(&'static self, key: &AtomicUsize) {
key.store(STATE_PARKED, Ordering::Relaxed);
}
#[inline]
pub fn timed_out(&'static self, key: &AtomicUsize) -> bool {
pub unsafe fn timed_out(&'static self, key: &AtomicUsize) -> bool {
key.load(Ordering::Relaxed) == STATE_TIMED_OUT
}
#[inline]
pub unsafe fn park(&'static self, key: &AtomicUsize) {
let status = self.wait_for(key as *const _ as PVOID, ptr::null_mut());
debug_assert_eq!(status, STATUS_SUCCESS);
}
#[inline]
pub unsafe fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool {
let now = Instant::now();
if timeout <= now {
@ -169,7 +152,6 @@ impl KeyedEvent {
false
}
#[inline]
pub unsafe fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle {
// If the state was STATE_PARKED then we need to wake up the thread
if key.swap(STATE_UNPARKED, Ordering::Relaxed) == STATE_PARKED {
@ -187,7 +169,6 @@ impl KeyedEvent {
}
impl Drop for KeyedEvent {
#[inline]
fn drop(&mut self) {
unsafe {
let ok = CloseHandle(self.handle);
@ -207,7 +188,6 @@ pub struct UnparkHandle {
impl UnparkHandle {
// Wakes up the parked thread. This should be called after the queue lock is
// released to avoid blocking the queue for too long.
#[inline]
pub unsafe fn unpark(self) {
if !self.key.is_null() {
let status = self.keyed_event.release(self.key as PVOID);

Просмотреть файл

@ -5,10 +5,7 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::{
ptr,
sync::atomic::{AtomicPtr, AtomicUsize, Ordering},
};
use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
use std::time::Instant;
mod keyed_event;
@ -19,23 +16,16 @@ enum Backend {
WaitAddress(waitaddress::WaitAddress),
}
static BACKEND: AtomicPtr<Backend> = AtomicPtr::new(ptr::null_mut());
impl Backend {
#[inline]
fn get() -> &'static Backend {
unsafe fn get() -> &'static Backend {
static BACKEND: AtomicUsize = ATOMIC_USIZE_INIT;
// Fast path: use the existing object
let backend_ptr = BACKEND.load(Ordering::Acquire);
if !backend_ptr.is_null() {
return unsafe { &*backend_ptr };
let backend = BACKEND.load(Ordering::Acquire);
if backend != 0 {
return &*(backend as *const Backend);
};
Backend::create()
}
#[cold]
#[inline(never)]
fn create() -> &'static Backend {
// Try to create a new Backend
let backend;
if let Some(waitaddress) = waitaddress::WaitAddress::create() {
@ -49,21 +39,14 @@ impl Backend {
);
}
// Try to set our new Backend as the global one
let backend_ptr = Box::into_raw(Box::new(backend));
match BACKEND.compare_exchange(
ptr::null_mut(),
backend_ptr,
Ordering::Release,
Ordering::Relaxed,
) {
Ok(_) => unsafe { &*backend_ptr },
Err(global_backend_ptr) => {
unsafe {
// We lost the race, free our object and return the global one
Box::from_raw(backend_ptr);
&*global_backend_ptr
}
// Try to create a new object
let backend = Box::into_raw(Box::new(backend));
match BACKEND.compare_exchange(0, backend as usize, Ordering::Release, Ordering::Relaxed) {
Ok(_) => &*(backend as *const Backend),
Err(x) => {
// We lost the race, free our object and return the global one
Box::from_raw(backend);
&*(x as *const Backend)
}
}
}
@ -76,22 +59,18 @@ pub struct ThreadParker {
}
impl ThreadParker {
pub const IS_CHEAP_TO_CONSTRUCT: bool = true;
#[inline]
pub fn new() -> ThreadParker {
// Initialize the backend here to ensure we don't get any panics
// later on, which could leave synchronization primitives in a broken
// state.
ThreadParker {
key: AtomicUsize::new(0),
backend: Backend::get(),
backend: unsafe { Backend::get() },
}
}
// Prepares the parker. This should be called before adding it to the queue.
#[inline]
pub fn prepare_park(&self) {
pub unsafe fn prepare_park(&self) {
match *self.backend {
Backend::KeyedEvent(ref x) => x.prepare_park(&self.key),
Backend::WaitAddress(ref x) => x.prepare_park(&self.key),
@ -100,8 +79,7 @@ impl ThreadParker {
// Checks if the park timed out. This should be called while holding the
// queue lock after park_until has returned false.
#[inline]
pub fn timed_out(&self) -> bool {
pub unsafe fn timed_out(&self) -> bool {
match *self.backend {
Backend::KeyedEvent(ref x) => x.timed_out(&self.key),
Backend::WaitAddress(ref x) => x.timed_out(&self.key),
@ -110,7 +88,6 @@ impl ThreadParker {
// Parks the thread until it is unparked. This should be called after it has
// been added to the queue, after unlocking the queue.
#[inline]
pub unsafe fn park(&self) {
match *self.backend {
Backend::KeyedEvent(ref x) => x.park(&self.key),
@ -121,7 +98,6 @@ impl ThreadParker {
// Parks the thread until it is unparked or the timeout is reached. This
// should be called after it has been added to the queue, after unlocking
// the queue. Returns true if we were unparked and false if we timed out.
#[inline]
pub unsafe fn park_until(&self, timeout: Instant) -> bool {
match *self.backend {
Backend::KeyedEvent(ref x) => x.park_until(&self.key, timeout),
@ -132,7 +108,6 @@ impl ThreadParker {
// Locks the parker to prevent the target thread from exiting. This is
// necessary to ensure that thread-local ThreadData objects remain valid.
// This should be called while holding the queue lock.
#[inline]
pub unsafe fn unpark_lock(&self) -> UnparkHandle {
match *self.backend {
Backend::KeyedEvent(ref x) => UnparkHandle::KeyedEvent(x.unpark_lock(&self.key)),
@ -152,7 +127,6 @@ pub enum UnparkHandle {
impl UnparkHandle {
// Wakes up the parked thread. This should be called after the queue lock is
// released to avoid blocking the queue for too long.
#[inline]
pub unsafe fn unpark(self) {
match self {
UnparkHandle::KeyedEvent(x) => x.unpark(),
@ -160,30 +134,3 @@ impl UnparkHandle {
}
}
}
// Yields the rest of the current timeslice to the OS
#[inline]
pub fn thread_yield() {
// Note that this is manually defined here rather than using the definition
// through `winapi`. The `winapi` definition comes from the `synchapi`
// header which enables the "synchronization.lib" library. It turns out,
// however that `Sleep` comes from `kernel32.dll` so this activation isn't
// necessary.
//
// This was originally identified in rust-lang/rust where on MinGW the
// libsynchronization.a library pulls in a dependency on a newer DLL not
// present in older versions of Windows. (see rust-lang/rust#49438)
//
// This is a bit of a hack for now and ideally we'd fix MinGW's own import
// libraries, but that'll probably take a lot longer than patching this here
// and avoiding the `synchapi` feature entirely.
extern "system" {
fn Sleep(a: winapi::shared::minwindef::DWORD);
}
unsafe {
// We don't use SwitchToThread here because it doesn't consider all
// threads in the system and the thread we are waiting for may not get
// selected.
Sleep(0);
}
}

Просмотреть файл

@ -5,24 +5,17 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use core::{
mem,
sync::atomic::{AtomicUsize, Ordering},
};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Instant;
use winapi::{
shared::{
basetsd::SIZE_T,
minwindef::{BOOL, DWORD, FALSE, TRUE},
winerror::ERROR_TIMEOUT,
},
um::{
errhandlingapi::GetLastError,
libloaderapi::{GetModuleHandleA, GetProcAddress},
winbase::INFINITE,
winnt::{LPCSTR, PVOID},
},
};
use std::mem;
use winapi::shared::basetsd::SIZE_T;
use winapi::shared::minwindef::{BOOL, DWORD, FALSE, TRUE};
use winapi::shared::winerror::ERROR_TIMEOUT;
use winapi::um::errhandlingapi::GetLastError;
use winapi::um::libloaderapi::{GetModuleHandleA, GetProcAddress};
use winapi::um::winbase::INFINITE;
use winapi::um::winnt::{LPCSTR, PVOID};
#[allow(non_snake_case)]
pub struct WaitAddress {
@ -37,60 +30,58 @@ pub struct WaitAddress {
impl WaitAddress {
#[allow(non_snake_case)]
pub fn create() -> Option<WaitAddress> {
unsafe {
// MSDN claims that that WaitOnAddress and WakeByAddressSingle are
// located in kernel32.dll, but they are lying...
let synch_dll =
GetModuleHandleA(b"api-ms-win-core-synch-l1-2-0.dll\0".as_ptr() as LPCSTR);
if synch_dll.is_null() {
return None;
}
let WaitOnAddress = GetProcAddress(synch_dll, b"WaitOnAddress\0".as_ptr() as LPCSTR);
if WaitOnAddress.is_null() {
return None;
}
let WakeByAddressSingle =
GetProcAddress(synch_dll, b"WakeByAddressSingle\0".as_ptr() as LPCSTR);
if WakeByAddressSingle.is_null() {
return None;
}
Some(WaitAddress {
WaitOnAddress: mem::transmute(WaitOnAddress),
WakeByAddressSingle: mem::transmute(WakeByAddressSingle),
})
pub unsafe fn create() -> Option<WaitAddress> {
// MSDN claims that that WaitOnAddress and WakeByAddressSingle are
// located in kernel32.dll, but they are lying...
let synch_dll = GetModuleHandleA(b"api-ms-win-core-synch-l1-2-0.dll\0".as_ptr() as LPCSTR);
if synch_dll.is_null() {
return None;
}
let WaitOnAddress = GetProcAddress(synch_dll, b"WaitOnAddress\0".as_ptr() as LPCSTR);
if WaitOnAddress.is_null() {
return None;
}
let WakeByAddressSingle =
GetProcAddress(synch_dll, b"WakeByAddressSingle\0".as_ptr() as LPCSTR);
if WakeByAddressSingle.is_null() {
return None;
}
Some(WaitAddress {
WaitOnAddress: mem::transmute(WaitOnAddress),
WakeByAddressSingle: mem::transmute(WakeByAddressSingle),
})
}
#[inline]
pub fn prepare_park(&'static self, key: &AtomicUsize) {
pub unsafe fn prepare_park(&'static self, key: &AtomicUsize) {
key.store(1, Ordering::Relaxed);
}
#[inline]
pub fn timed_out(&'static self, key: &AtomicUsize) -> bool {
pub unsafe fn timed_out(&'static self, key: &AtomicUsize) -> bool {
key.load(Ordering::Relaxed) != 0
}
#[inline]
pub fn park(&'static self, key: &AtomicUsize) {
pub unsafe fn park(&'static self, key: &AtomicUsize) {
while key.load(Ordering::Acquire) != 0 {
let r = self.wait_on_address(key, INFINITE);
let cmp = 1usize;
let r = (self.WaitOnAddress)(
key as *const _ as PVOID,
&cmp as *const _ as PVOID,
mem::size_of::<usize>() as SIZE_T,
INFINITE,
);
debug_assert!(r == TRUE);
}
}
#[inline]
pub fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool {
pub unsafe fn park_until(&'static self, key: &AtomicUsize, timeout: Instant) -> bool {
while key.load(Ordering::Acquire) != 0 {
let now = Instant::now();
if timeout <= now {
return false;
}
let diff = timeout - now;
let timeout = diff
.as_secs()
let timeout = diff.as_secs()
.checked_mul(1000)
.and_then(|x| x.checked_add((diff.subsec_nanos() as u64 + 999999) / 1000000))
.map(|ms| {
@ -101,15 +92,21 @@ impl WaitAddress {
}
})
.unwrap_or(INFINITE);
if self.wait_on_address(key, timeout) == FALSE {
debug_assert_eq!(unsafe { GetLastError() }, ERROR_TIMEOUT);
let cmp = 1usize;
let r = (self.WaitOnAddress)(
key as *const _ as PVOID,
&cmp as *const _ as PVOID,
mem::size_of::<usize>() as SIZE_T,
timeout,
);
if r == FALSE {
debug_assert_eq!(GetLastError(), ERROR_TIMEOUT);
}
}
true
}
#[inline]
pub fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle {
pub unsafe fn unpark_lock(&'static self, key: &AtomicUsize) -> UnparkHandle {
// We don't need to lock anything, just clear the state
key.store(0, Ordering::Release);
@ -118,17 +115,6 @@ impl WaitAddress {
waitaddress: self,
}
}
#[inline]
fn wait_on_address(&'static self, key: &AtomicUsize, timeout: DWORD) -> BOOL {
let cmp = 1usize;
(self.WaitOnAddress)(
key as *const _ as PVOID,
&cmp as *const _ as PVOID,
mem::size_of::<usize>() as SIZE_T,
timeout,
)
}
}
// Handle for a thread that is about to be unparked. We need to mark the thread
@ -142,8 +128,7 @@ pub struct UnparkHandle {
impl UnparkHandle {
// Wakes up the parked thread. This should be called after the queue lock is
// released to avoid blocking the queue for too long.
#[inline]
pub fn unpark(self) {
pub unsafe fn unpark(self) {
(self.waitaddress.WakeByAddressSingle)(self.key as PVOID);
}
}

Просмотреть файл

@ -5,13 +5,15 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::spinwait::SpinWait;
use crate::thread_parker::ThreadParker;
use core::{
cell::Cell,
mem, ptr,
sync::atomic::{fence, AtomicUsize, Ordering},
};
use std::sync::atomic::{fence, AtomicUsize, Ordering};
use std::ptr;
use std::mem;
use std::cell::Cell;
use std::thread::LocalKey;
#[cfg(not(feature = "nightly"))]
use std::panic;
use spinwait::SpinWait;
use thread_parker::ThreadParker;
struct ThreadData {
parker: ThreadParker,
@ -34,9 +36,7 @@ struct ThreadData {
}
impl ThreadData {
#[inline]
fn new() -> ThreadData {
assert!(mem::align_of::<ThreadData>() > !QUEUE_MASK);
ThreadData {
parker: ThreadParker::new(),
queue_tail: Cell::new(ptr::null()),
@ -46,28 +46,31 @@ impl ThreadData {
}
}
// Invokes the given closure with a reference to the current thread `ThreadData`.
#[inline]
fn with_thread_data<F, T>(f: F) -> T
where
F: FnOnce(&ThreadData) -> T,
{
let mut thread_data_ptr = ptr::null();
// If ThreadData is expensive to construct, then we want to use a cached
// version in thread-local storage if possible.
if !ThreadParker::IS_CHEAP_TO_CONSTRUCT {
thread_local!(static THREAD_DATA: ThreadData = ThreadData::new());
if let Ok(tls_thread_data) = THREAD_DATA.try_with(|x| x as *const ThreadData) {
thread_data_ptr = tls_thread_data;
}
// Returns a ThreadData structure for the current thread
unsafe fn get_thread_data(local: &mut Option<ThreadData>) -> &ThreadData {
// Try to read from thread-local storage, but return None if the TLS has
// already been destroyed.
#[cfg(feature = "nightly")]
fn try_get_tls(key: &'static LocalKey<ThreadData>) -> Option<*const ThreadData> {
key.try_with(|x| x as *const ThreadData).ok()
}
// Otherwise just create a ThreadData on the stack
let mut thread_data_storage = None;
if thread_data_ptr.is_null() {
thread_data_ptr = thread_data_storage.get_or_insert_with(ThreadData::new);
#[cfg(not(feature = "nightly"))]
fn try_get_tls(key: &'static LocalKey<ThreadData>) -> Option<*const ThreadData> {
panic::catch_unwind(|| key.with(|x| x as *const ThreadData)).ok()
}
f(unsafe { &*thread_data_ptr })
// If ThreadData is expensive to construct, then we want to use a cached
// version in thread-local storage if possible.
if !cfg!(windows) && !cfg!(all(feature = "nightly", target_os = "linux")) {
thread_local!(static THREAD_DATA: ThreadData = ThreadData::new());
if let Some(tls) = try_get_tls(&THREAD_DATA) {
return &*tls;
}
}
// Otherwise just create a ThreadData on the stack
*local = Some(ThreadData::new());
local.as_ref().unwrap()
}
const LOCKED_BIT: usize = 1;
@ -81,14 +84,16 @@ pub struct WordLock {
}
impl WordLock {
pub const INIT: WordLock = WordLock {
state: AtomicUsize::new(0),
};
#[inline]
pub fn new() -> WordLock {
WordLock {
state: AtomicUsize::new(0),
}
}
#[inline]
pub fn lock(&self) {
if self
.state
pub unsafe fn lock(&self) {
if self.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
@ -97,11 +102,10 @@ impl WordLock {
self.lock_slow();
}
/// Must not be called on an already unlocked `WordLock`!
#[inline]
pub unsafe fn unlock(&self) {
let state = self.state.fetch_sub(LOCKED_BIT, Ordering::Release);
if state.is_queue_locked() || state.queue_head().is_null() {
if state & QUEUE_LOCKED_BIT != 0 || state & QUEUE_MASK == 0 {
return;
}
self.unlock_slow();
@ -109,12 +113,12 @@ impl WordLock {
#[cold]
#[inline(never)]
fn lock_slow(&self) {
unsafe fn lock_slow(&self) {
let mut spinwait = SpinWait::new();
let mut state = self.state.load(Ordering::Relaxed);
loop {
// Grab the lock if it isn't locked, even if there is a queue on it
if !state.is_locked() {
if state & LOCKED_BIT == 0 {
match self.state.compare_exchange_weak(
state,
state | LOCKED_BIT,
@ -128,62 +132,55 @@ impl WordLock {
}
// If there is no queue, try spinning a few times
if state.queue_head().is_null() && spinwait.spin() {
if state & QUEUE_MASK == 0 && spinwait.spin() {
state = self.state.load(Ordering::Relaxed);
continue;
}
// Get our thread data and prepare it for parking
state = with_thread_data(|thread_data| {
// The pthread implementation is still unsafe, so we need to surround `prepare_park`
// with `unsafe {}`.
#[allow(unused_unsafe)]
unsafe {
thread_data.parker.prepare_park();
}
let mut thread_data = None;
let thread_data = get_thread_data(&mut thread_data);
assert!(mem::align_of_val(thread_data) > !QUEUE_MASK);
thread_data.parker.prepare_park();
// Add our thread to the front of the queue
let queue_head = state.queue_head();
if queue_head.is_null() {
thread_data.queue_tail.set(thread_data);
thread_data.prev.set(ptr::null());
} else {
thread_data.queue_tail.set(ptr::null());
thread_data.prev.set(ptr::null());
thread_data.next.set(queue_head);
}
if let Err(x) = self.state.compare_exchange_weak(
state,
state.with_queue_head(thread_data),
Ordering::Release,
Ordering::Relaxed,
) {
return x;
}
// Add our thread to the front of the queue
let queue_head = (state & QUEUE_MASK) as *const ThreadData;
if queue_head.is_null() {
thread_data.queue_tail.set(thread_data);
thread_data.prev.set(ptr::null());
} else {
thread_data.queue_tail.set(ptr::null());
thread_data.prev.set(ptr::null());
thread_data.next.set(queue_head);
}
if let Err(x) = self.state.compare_exchange_weak(
state,
(state & !QUEUE_MASK) | thread_data as *const _ as usize,
Ordering::Release,
Ordering::Relaxed,
) {
state = x;
continue;
}
// Sleep until we are woken up by an unlock
// Ignoring unused unsafe, since it's only a few platforms where this is unsafe.
#[allow(unused_unsafe)]
unsafe {
thread_data.parker.park();
}
// Sleep until we are woken up by an unlock
thread_data.parker.park();
// Loop back and try locking again
spinwait.reset();
self.state.load(Ordering::Relaxed)
});
// Loop back and try locking again
spinwait.reset();
self.state.load(Ordering::Relaxed);
}
}
#[cold]
#[inline(never)]
fn unlock_slow(&self) {
unsafe fn unlock_slow(&self) {
let mut state = self.state.load(Ordering::Relaxed);
loop {
// We just unlocked the WordLock. Just check if there is a thread
// to wake up. If the queue is locked then another thread is already
// taking care of waking up a thread.
if state.is_queue_locked() || state.queue_head().is_null() {
if state & QUEUE_LOCKED_BIT != 0 || state & QUEUE_MASK == 0 {
return;
}
@ -204,31 +201,27 @@ impl WordLock {
// First, we need to fill in the prev pointers for any newly added
// threads. We do this until we reach a node that we previously
// processed, which has a non-null queue_tail pointer.
let queue_head = state.queue_head();
let queue_head = (state & QUEUE_MASK) as *const ThreadData;
let mut queue_tail;
let mut current = queue_head;
loop {
queue_tail = unsafe { (*current).queue_tail.get() };
queue_tail = (*current).queue_tail.get();
if !queue_tail.is_null() {
break;
}
unsafe {
let next = (*current).next.get();
(*next).prev.set(current);
current = next;
}
let next = (*current).next.get();
(*next).prev.set(current);
current = next;
}
// Set queue_tail on the queue head to indicate that the whole list
// has prev pointers set correctly.
unsafe {
(*queue_head).queue_tail.set(queue_tail);
}
(*queue_head).queue_tail.set(queue_tail);
// If the WordLock is locked, then there is no point waking up a
// thread now. Instead we let the next unlocker take care of waking
// up a thread.
if state.is_locked() {
if state & LOCKED_BIT != 0 {
match self.state.compare_exchange_weak(
state,
state & !QUEUE_LOCKED_BIT,
@ -245,7 +238,7 @@ impl WordLock {
}
// Remove the last thread from the queue and unlock the queue
let new_tail = unsafe { (*queue_tail).prev.get() };
let new_tail = (*queue_tail).prev.get();
if new_tail.is_null() {
loop {
match self.state.compare_exchange_weak(
@ -261,7 +254,7 @@ impl WordLock {
// If the compare_exchange failed because a new thread was
// added to the queue then we need to re-scan the queue to
// find the previous element.
if state.queue_head().is_null() {
if state & QUEUE_MASK == 0 {
continue;
} else {
// Need an acquire fence before reading the new queue
@ -270,9 +263,7 @@ impl WordLock {
}
}
} else {
unsafe {
(*queue_head).queue_tail.set(new_tail);
}
(*queue_head).queue_tail.set(new_tail);
self.state.fetch_and(!QUEUE_LOCKED_BIT, Ordering::Release);
}
@ -280,39 +271,8 @@ impl WordLock {
// we don't need to worry about any races here since the thread is
// guaranteed to be sleeping right now and we are the only one who
// can wake it up.
unsafe {
(*queue_tail).parker.unpark_lock().unpark();
}
(*queue_tail).parker.unpark_lock().unpark();
break;
}
}
}
trait LockState {
fn is_locked(self) -> bool;
fn is_queue_locked(self) -> bool;
fn queue_head(self) -> *const ThreadData;
fn with_queue_head(self, thread_data: *const ThreadData) -> Self;
}
impl LockState for usize {
#[inline]
fn is_locked(self) -> bool {
self & LOCKED_BIT != 0
}
#[inline]
fn is_queue_locked(self) -> bool {
self & QUEUE_LOCKED_BIT != 0
}
#[inline]
fn queue_head(self) -> *const ThreadData {
(self & QUEUE_MASK) as *const ThreadData
}
#[inline]
fn with_queue_head(self, thread_data: *const ThreadData) -> Self {
(self & !QUEUE_MASK) | thread_data as *const _ as usize
}
}

Просмотреть файл

@ -1 +0,0 @@
{"files":{"Cargo.toml":"9be8c3913111b0a14c16ff1c5dc5613033b3ba6fd9af93262de9e119d0909a90","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.rst":"ae67e170de747e739273914a468cb93e6e4079b8e277c224c62a18353f660a11","examples/readme.rs":"5a01391acf2acc52a7a2e0ba58dc8ded3e8cc57d54b45778af5e8ba577158f86","src/lib.rs":"2ec3a38a7ca647c94a4c054a2938afd4a07e9cf636b432824623c2fa27a192a3"},"package":"c79eb2c3ac4bc2507cda80e7f3ac5b88bd8eae4c0914d5663e6a8933994be918"}

26
third_party/rust/scopeguard-0.3.2/Cargo.toml поставляемый
Просмотреть файл

@ -1,26 +0,0 @@
[package]
name = "scopeguard"
version = "0.3.2"
license = "MIT/Apache-2.0"
repository = "https://github.com/bluss/scopeguard"
documentation = "https://docs.rs/scopeguard/"
authors = ["bluss"]
description = """
A RAII scope guard that will run a given closure when it goes out of scope,
even if the code between panics (assuming unwinding panic).
Defines the macros `defer!` and `defer_on_unwind!`; the latter only runs
if the scope is extited through unwinding on panic.
"""
keywords = ["scope-guard", "defer", "panic"]
categories = ["rust-patterns"]
[features]
default = ["use_std"]
use_std = []
[package.metadata.release]
#no-dev-version = true

Просмотреть файл

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
third_party/rust/scopeguard-0.3.2/LICENSE-MIT поставляемый
Просмотреть файл

@ -1,25 +0,0 @@
Copyright (c) 2015 The Rust Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

76
third_party/rust/scopeguard-0.3.2/README.rst поставляемый
Просмотреть файл

@ -1,76 +0,0 @@
scopeguard
==========
Rust crate for a convenient RAII scope guard that will run a given closure when
it goes out of scope, even if the code between panics (assuming unwinding panic).
The `defer!` macro and `guard` are `no_std` compatible (require only core),
but the on unwinding strategy requires linking to `std`.
Requires Rust 1.11.
Please read the `API documentation here`__
__ https://docs.rs/scopeguard/
|build_status|_ |crates|_
.. |build_status| image:: https://travis-ci.org/bluss/scopeguard.svg
.. _build_status: https://travis-ci.org/bluss/scopeguard
.. |crates| image:: http://meritbadge.herokuapp.com/scopeguard
.. _crates: https://crates.io/crates/scopeguard
How to use
----------
.. code:: rust
#[macro_use(defer)] extern crate scopeguard;
use scopeguard::guard;
fn f() {
defer!(println!("Called at return or panic"));
panic!();
}
use std::fs::File;
use std::io::Write;
fn g() {
let f = File::create("newfile.txt").unwrap();
let mut file = guard(f, |f| {
// write file at return or panic
let _ = f.sync_all();
});
// Access the file through the scope guard itself
file.write(b"test me\n").unwrap();
}
Recent Changes
--------------
- 0.3.2
- Add crate categories
- 0.3.1
- Add ``defer_on_unwind!``, ``Strategy`` trait
- Rename ``Guard````ScopeGuard``
- Add ``ScopeGuard::with_strategy``.
- ``ScopeGuard`` now implements ``Debug``.
- Require Rust 1.11
- 0.2.0
- Require Rust 1.6
- Use `no_std` unconditionally
- No other changes
- 0.1.2
- Add macro ``defer!()``

Просмотреть файл

@ -1,27 +0,0 @@
#[macro_use(defer)] extern crate scopeguard;
use scopeguard::guard;
fn f() {
defer!(println!("Called at return or panic"));
panic!();
}
use std::fs::File;
use std::io::Write;
fn g() {
let f = File::create("newfile.txt").unwrap();
let mut file = guard(f, |f| {
// write file at return or panic
let _ = f.sync_all();
});
// Access the file through the scope guard itself
file.write(b"test me\n").unwrap();
}
fn main() {
f();
g();
}

268
third_party/rust/scopeguard-0.3.2/src/lib.rs поставляемый
Просмотреть файл

@ -1,268 +0,0 @@
//! A scope guard will run a given closure when it goes out of scope,
//! even if the code between panics.
//! (as long as panic doesn't abort)
#![cfg_attr(not(any(test, feature = "use_std")), no_std)]
//!
//!
//! Crate features:
//!
//! - `use_std`
//! + Enabled by default. Enables the `OnUnwind` strategy.
//! + Disable to use `no_std`.
#[cfg(not(any(test, feature = "use_std")))]
extern crate core as std;
use std::fmt;
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
pub trait Strategy {
/// Return `true` if the guards associated code should run
/// (in the context where this method is called).
fn should_run() -> bool;
}
/// Always run on scope exit.
///
/// “Always” run: on regular exit from a scope or on unwinding from a panic.
/// Can not run on abort, process exit, and other catastrophic events where
/// destructors dont run.
#[derive(Debug)]
pub enum Always {}
/// Run on scope exit through unwinding.
///
/// Requires crate feature `use_std`.
#[cfg(feature = "use_std")]
#[derive(Debug)]
pub enum OnUnwind {}
/// Run on regular scope exit, when not unwinding.
///
/// Requires crate feature `use_std`.
#[cfg(feature = "use_std")]
#[derive(Debug)]
#[cfg(test)]
enum OnSuccess {}
impl Strategy for Always {
#[inline(always)]
fn should_run() -> bool { true }
}
#[cfg(feature = "use_std")]
impl Strategy for OnUnwind {
#[inline(always)]
fn should_run() -> bool { std::thread::panicking() }
}
#[cfg(feature = "use_std")]
#[cfg(test)]
impl Strategy for OnSuccess {
#[inline(always)]
fn should_run() -> bool { !std::thread::panicking() }
}
/// Macro to create a `ScopeGuard` (always run).
///
/// The macro takes one expression `$e`, which is the body of a closure
/// that will run when the scope is exited. The expression can
/// be a whole block.
#[macro_export]
macro_rules! defer {
($e:expr) => {
let _guard = $crate::guard((), |_| $e);
}
}
/// Macro to create a `ScopeGuard` (run on successful scope exit).
///
/// The macro takes one expression `$e`, which is the body of a closure
/// that will run when the scope is exited. The expression can
/// be a whole block.
///
/// Requires crate feature `use_std`.
#[cfg(test)]
macro_rules! defer_on_success {
($e:expr) => {
let _guard = $crate::guard_on_success((), |_| $e);
}
}
/// Macro to create a `ScopeGuard` (run on unwinding from panic).
///
/// The macro takes one expression `$e`, which is the body of a closure
/// that will run when the scope is exited. The expression can
/// be a whole block.
///
/// Requires crate feature `use_std`.
#[macro_export]
macro_rules! defer_on_unwind {
($e:expr) => {
let _guard = $crate::guard_on_unwind((), |_| $e);
}
}
/// `ScopeGuard` is a scope guard that may own a protected value.
///
/// If you place a guard in a local variable, the closure can
/// run regardless how you leave the scope — through regular return or panic
/// (except if panic or other code aborts; so as long as destructors run).
/// It is run only once.
///
/// The `S` parameter for [`Strategy`](Strategy.t.html) determines if
/// the closure actually runs.
///
/// The guard's closure will be called with a mut ref to the held value
/// in the destructor. It's called only once.
///
/// The `ScopeGuard` implements `Deref` so that you can access the inner value.
pub struct ScopeGuard<T, F, S: Strategy = Always>
where F: FnMut(&mut T)
{
__dropfn: F,
__value: T,
strategy: PhantomData<S>,
}
impl<T, F, S> ScopeGuard<T, F, S>
where F: FnMut(&mut T),
S: Strategy,
{
/// Create a `ScopeGuard` that owns `v` (accessible through deref) and calls
/// `dropfn` when its destructor runs.
///
/// The `Strategy` decides whether the scope guard's closure should run.
pub fn with_strategy(v: T, dropfn: F) -> ScopeGuard<T, F, S> {
ScopeGuard {
__value: v,
__dropfn: dropfn,
strategy: PhantomData,
}
}
}
/// Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`.
pub fn guard<T, F>(v: T, dropfn: F) -> ScopeGuard<T, F, Always>
where F: FnMut(&mut T)
{
ScopeGuard::with_strategy(v, dropfn)
}
#[cfg(feature = "use_std")]
/// Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`.
///
/// Requires crate feature `use_std`.
#[cfg(test)]
fn guard_on_success<T, F>(v: T, dropfn: F) -> ScopeGuard<T, F, OnSuccess>
where F: FnMut(&mut T)
{
ScopeGuard::with_strategy(v, dropfn)
}
#[cfg(feature = "use_std")]
/// Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`.
///
/// Requires crate feature `use_std`.
pub fn guard_on_unwind<T, F>(v: T, dropfn: F) -> ScopeGuard<T, F, OnUnwind>
where F: FnMut(&mut T)
{
ScopeGuard::with_strategy(v, dropfn)
}
impl<T, F, S: Strategy> Deref for ScopeGuard<T, F, S>
where F: FnMut(&mut T)
{
type Target = T;
fn deref(&self) -> &T {
&self.__value
}
}
impl<T, F, S: Strategy> DerefMut for ScopeGuard<T, F, S>
where F: FnMut(&mut T)
{
fn deref_mut(&mut self) -> &mut T {
&mut self.__value
}
}
impl<T, F, S: Strategy> Drop for ScopeGuard<T, F, S>
where F: FnMut(&mut T)
{
fn drop(&mut self) {
if S::should_run() {
(self.__dropfn)(&mut self.__value)
}
}
}
impl<T, F, S> fmt::Debug for ScopeGuard<T, F, S>
where T: fmt::Debug,
F: FnMut(&mut T),
S: Strategy + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ScopeGuard")
.field("value", &self.__value)
.finish()
}
}
#[cfg(test)]
mod tests {
use std::cell::Cell;
use std::panic::catch_unwind;
use std::panic::AssertUnwindSafe;
#[test]
fn test_defer() {
let drops = Cell::new(0);
defer!(drops.set(1000));
assert_eq!(drops.get(), 0);
}
#[test]
fn test_defer_success_1() {
let drops = Cell::new(0);
{
defer_on_success!(drops.set(1));
assert_eq!(drops.get(), 0);
}
assert_eq!(drops.get(), 1);
}
#[test]
fn test_defer_success_2() {
let drops = Cell::new(0);
let _ = catch_unwind(AssertUnwindSafe(|| {
defer_on_success!(drops.set(1));
panic!("failure")
}));
assert_eq!(drops.get(), 0);
}
#[test]
fn test_defer_unwind_1() {
let drops = Cell::new(0);
let _ = catch_unwind(AssertUnwindSafe(|| {
defer_on_unwind!(drops.set(1));
assert_eq!(drops.get(), 0);
panic!("failure")
}));
assert_eq!(drops.get(), 1);
}
#[test]
fn test_defer_unwind_2() {
let drops = Cell::new(0);
{
defer_on_unwind!(drops.set(1));
}
assert_eq!(drops.get(), 0);
}
}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"3338b13bca1bd8bf830f563dd201bc5deed2a4848c7f6485c40ea2f3469c8279","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.rst":"dc20b385e388c7989454e3a6a96e8112e48680258895562097bf8c809b4d8106","examples/readme.rs":"d00fe19aecd7ca1632bcf176306f7a13ed8fdefa890761aa2c532f8c97532a33","src/lib.rs":"957f1f548d91129c6c9b248dd28c3857a1c988123a70da34dacef416d4204bec"},"package":"b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d"}
{"files":{"Cargo.toml":"9be8c3913111b0a14c16ff1c5dc5613033b3ba6fd9af93262de9e119d0909a90","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.rst":"ae67e170de747e739273914a468cb93e6e4079b8e277c224c62a18353f660a11","examples/readme.rs":"5a01391acf2acc52a7a2e0ba58dc8ded3e8cc57d54b45778af5e8ba577158f86","src/lib.rs":"2ec3a38a7ca647c94a4c054a2938afd4a07e9cf636b432824623c2fa27a192a3"},"package":"c79eb2c3ac4bc2507cda80e7f3ac5b88bd8eae4c0914d5663e6a8933994be918"}

38
third_party/rust/scopeguard/Cargo.toml поставляемый
Просмотреть файл

@ -1,28 +1,26 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g. crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "scopeguard"
version = "1.0.0"
authors = ["bluss"]
description = "A RAII scope guard that will run a given closure when it goes out of scope,\neven if the code between panics (assuming unwinding panic).\n\nDefines the macros `defer!`, `defer_on_unwind!`, `defer_on_success!` as\nshorthands for guards with one of the implemented strategies.\n"
documentation = "https://docs.rs/scopeguard/"
keywords = ["scope-guard", "defer", "panic", "unwind"]
categories = ["rust-patterns", "no-std"]
version = "0.3.2"
license = "MIT/Apache-2.0"
repository = "https://github.com/bluss/scopeguard"
[package.metadata.release]
no-dev-version = true
documentation = "https://docs.rs/scopeguard/"
authors = ["bluss"]
description = """
A RAII scope guard that will run a given closure when it goes out of scope,
even if the code between panics (assuming unwinding panic).
Defines the macros `defer!` and `defer_on_unwind!`; the latter only runs
if the scope is extited through unwinding on panic.
"""
keywords = ["scope-guard", "defer", "panic"]
categories = ["rust-patterns"]
[features]
default = ["use_std"]
use_std = []
[package.metadata.release]
#no-dev-version = true

28
third_party/rust/scopeguard/README.rst поставляемый
Просмотреть файл

@ -6,9 +6,9 @@ Rust crate for a convenient RAII scope guard that will run a given closure when
it goes out of scope, even if the code between panics (assuming unwinding panic).
The `defer!` macro and `guard` are `no_std` compatible (require only core),
but the on unwinding / not on uwinding strategies requires linking to `std`.
but the on unwinding strategy requires linking to `std`.
Requires Rust 1.20.
Requires Rust 1.11.
Please read the `API documentation here`__
@ -47,34 +47,12 @@ How to use
let _ = f.sync_all();
});
// Access the file through the scope guard itself
file.write_all(b"test me\n").unwrap();
file.write(b"test me\n").unwrap();
}
Recent Changes
--------------
- 1.0.0
- Change the closure type from ``FnMut(&mut T)`` to ``FnOnce(T)``:
Passing the inner value by value instead of a mutable reference is a
breaking change, but allows the guard closure to consume it. (by @tormol)
- Add ``defer_on_success!{}``, ``guard_on_success()`` and ``OnSuccess``
strategy, which triggers when scope is exited *without* panic. It's the
opposite to ``OnUnwind`` / ``guard_on_unwind()`` / ``defer_on_unwind!{}``.
- Add ``ScopeGuard::into_inner()``, which "defuses" the guard and returns the
guarded value. (by @tormol)
- Implement ``Sync`` for guards with non-``Sync`` closures.
- Require Rust 1.20
- 0.3.3
- Use ``#[inline]`` on a few more functions by @stjepang (#14)
- Add examples to crate documentation
- 0.3.2
- Add crate categories

Просмотреть файл

@ -18,7 +18,7 @@ fn g() {
let _ = f.sync_all();
});
// Access the file through the scope guard itself
file.write_all(b"test me\n").unwrap();
file.write(b"test me\n").unwrap();
}
fn main() {

400
third_party/rust/scopeguard/src/lib.rs поставляемый
Просмотреть файл

@ -1,203 +1,24 @@
#![cfg_attr(not(any(test, feature = "use_std")), no_std)]
#![doc(html_root_url = "https://docs.rs/scopeguard/1/")]
//! A scope guard will run a given closure when it goes out of scope,
//! even if the code between panics.
//! (as long as panic doesn't abort)
//!
//! # Examples
//!
//! ## Hello World
//!
//! This example creates a scope guard with an example function:
//!
//! ```
//! extern crate scopeguard;
//!
//! fn f() {
//! let _guard = scopeguard::guard((), |_| {
//! println!("Hello Scope Exit!");
//! });
//!
//! // rest of the code here.
//!
//! // Here, at the end of `_guard`'s scope, the guard's closure is called.
//! // It is also called if we exit this scope through unwinding instead.
//! }
//! # fn main() {
//! # f();
//! # }
//! ```
//!
//! ## `defer!`
//!
//! Use the `defer` macro to run an operation at scope exit,
//! either regular scope exit or during unwinding from a panic.
//!
//! ```
//! #[macro_use(defer)] extern crate scopeguard;
//!
//! use std::cell::Cell;
//!
//! fn main() {
//! // use a cell to observe drops during and after the scope guard is active
//! let drop_counter = Cell::new(0);
//! {
//! // Create a scope guard using `defer!` for the current scope
//! defer! {{
//! drop_counter.set(1 + drop_counter.get());
//! }};
//!
//! // Do regular operations here in the meantime.
//!
//! // Just before scope exit: it hasn't run yet.
//! assert_eq!(drop_counter.get(), 0);
//!
//! // The following scope end is where the defer closure is called
//! }
//! assert_eq!(drop_counter.get(), 1);
//! }
//! ```
//!
//! ## Scope Guard with Value
//!
//! If the scope guard closure needs to access an outer value that is also
//! mutated outside of the scope guard, then you may want to use the scope guard
//! with a value. The guard works like a smart pointer, so the inner value can
//! be accessed by reference or by mutable reference.
//!
//! ### 1. The guard owns a file
//!
//! In this example, the scope guard owns a file and ensures pending writes are
//! synced at scope exit.
//!
//! ```
//! extern crate scopeguard;
//!
//! use std::fs::*;
//! use std::io::{self, Write};
//! # // Mock file so that we don't actually write a file
//! # struct MockFile;
//! # impl MockFile {
//! # fn create(_s: &str) -> io::Result<Self> { Ok(MockFile) }
//! # fn write_all(&self, _b: &[u8]) -> io::Result<()> { Ok(()) }
//! # fn sync_all(&self) -> io::Result<()> { Ok(()) }
//! # }
//! # use self::MockFile as File;
//!
//! fn try_main() -> io::Result<()> {
//! let f = File::create("newfile.txt")?;
//! let mut file = scopeguard::guard(f, |f| {
//! // ensure we flush file at return or panic
//! let _ = f.sync_all();
//! });
//! // Access the file through the scope guard itself
//! file.write_all(b"test me\n").map(|_| ())
//! }
//!
//! fn main() {
//! try_main().unwrap();
//! }
//!
//! ```
//!
//! ### 2. The guard restores an invariant on scope exit
//!
//! ```
//! extern crate scopeguard;
//!
//! use std::mem::ManuallyDrop;
//! use std::ptr;
//!
//! // This function, just for this example, takes the first element
//! // and inserts it into the assumed sorted tail of the vector.
//! //
//! // For optimization purposes we temporarily violate an invariant of the
//! // Vec, that it owns all of its elements.
//! //
//! // The safe approach is to use swap, which means two writes to memory,
//! // the optimization is to use a “hole” which uses only one write of memory
//! // for each position it moves.
//! //
//! // We *must* use a scope guard to run this code safely. We
//! // are running arbitrary user code (comparison operators) that may panic.
//! // The scope guard ensures we restore the invariant after successful
//! // exit or during unwinding from panic.
//! fn insertion_sort_first<T>(v: &mut Vec<T>)
//! where T: PartialOrd
//! {
//! struct Hole<'a, T: 'a> {
//! v: &'a mut Vec<T>,
//! index: usize,
//! value: ManuallyDrop<T>,
//! }
//!
//! unsafe {
//! // Create a moved-from location in the vector, a “hole”.
//! let value = ptr::read(&v[0]);
//! let mut hole = Hole { v: v, index: 0, value: ManuallyDrop::new(value) };
//!
//! // Use a scope guard with a value.
//! // At scope exit, plug the hole so that the vector is fully
//! // initialized again.
//! // The scope guard owns the hole, but we can access it through the guard.
//! let mut hole_guard = scopeguard::guard(hole, |hole| {
//! // plug the hole in the vector with the value that was // taken out
//! let index = hole.index;
//! ptr::copy_nonoverlapping(&*hole.value, &mut hole.v[index], 1);
//! });
//!
//! // run algorithm that moves the hole in the vector here
//! // move the hole until it's in a sorted position
//! for i in 1..hole_guard.v.len() {
//! if *hole_guard.value >= hole_guard.v[i] {
//! // move the element back and the hole forward
//! let index = hole_guard.index;
//! ptr::copy_nonoverlapping(&hole_guard.v[index + 1], &mut hole_guard.v[index], 1);
//! hole_guard.index += 1;
//! } else {
//! break;
//! }
//! }
//!
//! // When the scope exits here, the Vec becomes whole again!
//! }
//! }
//!
//! fn main() {
//! let string = String::from;
//! let mut data = vec![string("c"), string("a"), string("b"), string("d")];
//! insertion_sort_first(&mut data);
//! assert_eq!(data, vec!["a", "b", "c", "d"]);
//! }
//!
//! ```
#![cfg_attr(not(any(test, feature = "use_std")), no_std)]
//!
//!
//! # Crate Features
//! Crate features:
//!
//! - `use_std`
//! + Enabled by default. Enables the `OnUnwind` and `OnSuccess` strategies.
//! + Enabled by default. Enables the `OnUnwind` strategy.
//! + Disable to use `no_std`.
//!
//! # Rust Version
//!
//! This version of the crate requires Rust 1.20 or later.
//!
//! The scopeguard 1.x release series will use a carefully considered version
//! upgrade policy, where in a later 1.x version, we will raise the minimum
//! required Rust version.
#[cfg(not(any(test, feature = "use_std")))]
extern crate core as std;
use std::fmt;
use std::marker::PhantomData;
use std::mem::{self, ManuallyDrop};
use std::ops::{Deref, DerefMut};
use std::ptr;
/// Controls in which cases the associated code should be run
pub trait Strategy {
/// Return `true` if the guards associated code should run
/// (in the context where this method is called).
@ -224,7 +45,8 @@ pub enum OnUnwind {}
/// Requires crate feature `use_std`.
#[cfg(feature = "use_std")]
#[derive(Debug)]
pub enum OnSuccess {}
#[cfg(test)]
enum OnSuccess {}
impl Strategy for Always {
#[inline(always)]
@ -233,13 +55,14 @@ impl Strategy for Always {
#[cfg(feature = "use_std")]
impl Strategy for OnUnwind {
#[inline]
#[inline(always)]
fn should_run() -> bool { std::thread::panicking() }
}
#[cfg(feature = "use_std")]
#[cfg(test)]
impl Strategy for OnSuccess {
#[inline]
#[inline(always)]
fn should_run() -> bool { !std::thread::panicking() }
}
@ -251,7 +74,7 @@ impl Strategy for OnSuccess {
#[macro_export]
macro_rules! defer {
($e:expr) => {
let _guard = $crate::guard((), |()| $e);
let _guard = $crate::guard((), |_| $e);
}
}
@ -262,11 +85,10 @@ macro_rules! defer {
/// be a whole block.
///
/// Requires crate feature `use_std`.
#[cfg(feature = "use_std")]
#[macro_export]
#[cfg(test)]
macro_rules! defer_on_success {
($e:expr) => {
let _guard = $crate::guard_on_success((), |()| $e);
let _guard = $crate::guard_on_success((), |_| $e);
}
}
@ -277,11 +99,10 @@ macro_rules! defer_on_success {
/// be a whole block.
///
/// Requires crate feature `use_std`.
#[cfg(feature = "use_std")]
#[macro_export]
macro_rules! defer_on_unwind {
($e:expr) => {
let _guard = $crate::guard_on_unwind((), |()| $e);
let _guard = $crate::guard_on_unwind((), |_| $e);
}
}
@ -295,187 +116,105 @@ macro_rules! defer_on_unwind {
/// The `S` parameter for [`Strategy`](Strategy.t.html) determines if
/// the closure actually runs.
///
/// The guard's closure will be called with the held value in the destructor.
/// The guard's closure will be called with a mut ref to the held value
/// in the destructor. It's called only once.
///
/// The `ScopeGuard` implements `Deref` so that you can access the inner value.
pub struct ScopeGuard<T, F, S = Always>
where F: FnOnce(T),
S: Strategy,
pub struct ScopeGuard<T, F, S: Strategy = Always>
where F: FnMut(&mut T)
{
value: ManuallyDrop<T>,
dropfn: ManuallyDrop<F>,
strategy: PhantomData<fn(S) -> S>,
__dropfn: F,
__value: T,
strategy: PhantomData<S>,
}
impl<T, F, S> ScopeGuard<T, F, S>
where F: FnOnce(T),
where F: FnMut(&mut T),
S: Strategy,
{
/// Create a `ScopeGuard` that owns `v` (accessible through deref) and calls
/// `dropfn` when its destructor runs.
///
/// The `Strategy` decides whether the scope guard's closure should run.
#[inline]
pub fn with_strategy(v: T, dropfn: F) -> ScopeGuard<T, F, S> {
ScopeGuard {
value: ManuallyDrop::new(v),
dropfn: ManuallyDrop::new(dropfn),
__value: v,
__dropfn: dropfn,
strategy: PhantomData,
}
}
/// “Defuse” the guard and extract the value without calling the closure.
///
/// ```
/// extern crate scopeguard;
/// use scopeguard::{guard, ScopeGuard};
///
/// fn conditional() -> bool { true }
///
/// fn main() {
/// let mut guard = guard(Vec::new(), |mut v| v.clear());
/// guard.push(1);
///
/// if conditional() {
/// // a condition maybe makes us decide to
/// // “defuse” the guard and get back its inner parts
/// let value = ScopeGuard::into_inner(guard);
/// } else {
/// // guard still exists in this branch
/// }
/// }
/// ```
#[inline]
pub fn into_inner(guard: Self) -> T {
// Cannot pattern match out of Drop-implementing types, so
// ptr::read the value and forget the guard.
unsafe {
let value = ptr::read(&*guard.value);
// read the closure so that it is dropped, and assign it to a local
// variable to ensure that it is only dropped after the guard has
// been forgotten. (In case the Drop impl of the closure, or that
// of any consumed captured variable, panics).
let _dropfn = ptr::read(&*guard.dropfn);
mem::forget(guard);
value
}
}
}
/// Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`.
#[inline]
pub fn guard<T, F>(v: T, dropfn: F) -> ScopeGuard<T, F, Always>
where F: FnOnce(T)
where F: FnMut(&mut T)
{
ScopeGuard::with_strategy(v, dropfn)
}
#[cfg(feature = "use_std")]
/// Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`.
///
/// Requires crate feature `use_std`.
#[cfg(feature = "use_std")]
#[inline]
pub fn guard_on_success<T, F>(v: T, dropfn: F) -> ScopeGuard<T, F, OnSuccess>
where F: FnOnce(T)
#[cfg(test)]
fn guard_on_success<T, F>(v: T, dropfn: F) -> ScopeGuard<T, F, OnSuccess>
where F: FnMut(&mut T)
{
ScopeGuard::with_strategy(v, dropfn)
}
#[cfg(feature = "use_std")]
/// Create a new `ScopeGuard` owning `v` and with deferred closure `dropfn`.
///
/// Requires crate feature `use_std`.
///
/// ## Examples
///
/// For performance reasons, or to emulate “only run guard on unwind” in
/// no-std environments, we can also use the default guard and simply manually
/// defuse it at the end of scope like the following example. (The performance
/// reason would be if the [`OnUnwind`]'s call to [std::thread::panicking()] is
/// an issue.)
///
/// ```
/// extern crate scopeguard;
///
/// use scopeguard::ScopeGuard;
/// # fn main() {
/// {
/// let guard = scopeguard::guard((), |_| { });
///
/// // rest of the code here
///
/// // we reached the end of scope without unwinding - defuse it
/// ScopeGuard::into_inner(guard);
/// }
/// # }
/// ```
#[cfg(feature = "use_std")]
#[inline]
pub fn guard_on_unwind<T, F>(v: T, dropfn: F) -> ScopeGuard<T, F, OnUnwind>
where F: FnOnce(T)
where F: FnMut(&mut T)
{
ScopeGuard::with_strategy(v, dropfn)
}
// ScopeGuard can be Sync even if F isn't because the closure is
// not accessible from references.
// The guard does not store any instance of S, so it is also irellevant.
unsafe impl<T, F, S> Sync for ScopeGuard<T, F, S>
where T: Sync,
F: FnOnce(T),
S: Strategy
{ }
impl<T, F, S> Deref for ScopeGuard<T, F, S>
where F: FnOnce(T),
S: Strategy
impl<T, F, S: Strategy> Deref for ScopeGuard<T, F, S>
where F: FnMut(&mut T)
{
type Target = T;
fn deref(&self) -> &T {
&*self.value
&self.__value
}
}
impl<T, F, S> DerefMut for ScopeGuard<T, F, S>
where F: FnOnce(T),
S: Strategy
impl<T, F, S: Strategy> DerefMut for ScopeGuard<T, F, S>
where F: FnMut(&mut T)
{
fn deref_mut(&mut self) -> &mut T {
&mut*self.value
&mut self.__value
}
}
impl<T, F, S> Drop for ScopeGuard<T, F, S>
where F: FnOnce(T),
S: Strategy
impl<T, F, S: Strategy> Drop for ScopeGuard<T, F, S>
where F: FnMut(&mut T)
{
fn drop(&mut self) {
// This is OK because the fields are `ManuallyDrop`s
// which will not be dropped by the compiler.
let (value, dropfn) = unsafe {
(ptr::read(&*self.value), ptr::read(&*self.dropfn))
};
if S::should_run() {
dropfn(value);
(self.__dropfn)(&mut self.__value)
}
}
}
impl<T, F, S> fmt::Debug for ScopeGuard<T, F, S>
where T: fmt::Debug,
F: FnOnce(T),
S: Strategy
F: FnMut(&mut T),
S: Strategy + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct(stringify!(ScopeGuard))
.field("value", &*self.value)
f.debug_struct("ScopeGuard")
.field("value", &self.__value)
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::cell::Cell;
use std::panic::catch_unwind;
use std::panic::AssertUnwindSafe;
@ -487,7 +226,6 @@ mod tests {
assert_eq!(drops.get(), 0);
}
#[cfg(feature = "use_std")]
#[test]
fn test_defer_success_1() {
let drops = Cell::new(0);
@ -498,7 +236,6 @@ mod tests {
assert_eq!(drops.get(), 1);
}
#[cfg(feature = "use_std")]
#[test]
fn test_defer_success_2() {
let drops = Cell::new(0);
@ -509,7 +246,6 @@ mod tests {
assert_eq!(drops.get(), 0);
}
#[cfg(feature = "use_std")]
#[test]
fn test_defer_unwind_1() {
let drops = Cell::new(0);
@ -521,7 +257,6 @@ mod tests {
assert_eq!(drops.get(), 1);
}
#[cfg(feature = "use_std")]
#[test]
fn test_defer_unwind_2() {
let drops = Cell::new(0);
@ -530,49 +265,4 @@ mod tests {
}
assert_eq!(drops.get(), 0);
}
#[test]
fn test_only_dropped_by_closure_when_run() {
let value_drops = Cell::new(0);
let value = guard((), |()| value_drops.set(1 + value_drops.get()));
let closure_drops = Cell::new(0);
let guard = guard(value, |_| closure_drops.set(1 + closure_drops.get()));
assert_eq!(value_drops.get(), 0);
assert_eq!(closure_drops.get(), 0);
drop(guard);
assert_eq!(value_drops.get(), 1);
assert_eq!(closure_drops.get(), 1);
}
#[cfg(feature = "use_std")]
#[test]
fn test_dropped_once_when_not_run() {
let value_drops = Cell::new(0);
let value = guard((), |()| value_drops.set(1 + value_drops.get()));
let captured_drops = Cell::new(0);
let captured = guard((), |()| captured_drops.set(1 + captured_drops.get()));
let closure_drops = Cell::new(0);
let guard = guard_on_unwind(value, |value| {
drop(value);
drop(captured);
closure_drops.set(1 + closure_drops.get())
});
assert_eq!(value_drops.get(), 0);
assert_eq!(captured_drops.get(), 0);
assert_eq!(closure_drops.get(), 0);
drop(guard);
assert_eq!(value_drops.get(), 1);
assert_eq!(captured_drops.get(), 1);
assert_eq!(closure_drops.get(), 0);
}
#[test]
fn test_into_inner() {
let dropped = Cell::new(false);
let value = guard(42, |_| dropped.set(true));
let guard = guard(value, |_| dropped.set(true));
let inner = ScopeGuard::into_inner(guard);
assert_eq!(dropped.get(), false);
assert_eq!(*inner, 42);
}
}

2
third_party/rust/uuid/.cargo-checksum.json поставляемый
Просмотреть файл

@ -1 +1 @@
{"files":{"CODEOWNERS":"65d3fcb4156a2d5bce80d382a34044753e384d7f1eb71cdc646de400a0b969c8","CODE_OF_CONDUCT.md":"7d9c9062ee835c2dfd348cfddb938c563f3a7b1140dd090444a03ea1d73626b1","CONTRIBUTING.md":"c2b507733d5af2de972d63237a094a135935ad45cc74dedb79c199d841f35a3e","COPYRIGHT":"b4b2c0de2a05de3372d5c828128413ce82bb7dba2272487b7729f09cc3d3519d","Cargo.toml":"2647794e162e5e764854003d4e0ca2e2d0de5f7c11e3ec61ab53fae310328aab","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"436bc5a105d8e57dcd8778730f3754f7bf39c14d2f530e4cde4bd2d17a83ec3d","README.md":"f82b58d44ed24b07cc8e3a14e233ff2d0aa297732da1b789f16a84293de39e23","README.tpl":"1d5787815cea427e5cf0cce634bc629b03d51dda355fa52de500a9658c45625b","benches/format_str.rs":"0d080946d397a2578a978105a5a27309edded7115d2081e683170f0bf96edc3e","benches/invalid_parse_str.rs":"7f44d2ebec6ee1368d179f12dd09a288d589b252434082b2de134a658b460812","benches/mod.rs":"4733d7aa62dafe3e85ab90dca518b57f350a5538ea5643c5313e63939e884a45","benches/serde_support.rs":"afc719718c9a5d705b60bc9cd39720b921d9ee63ccf11c4b2900f02beea70c1b","benches/slog_support/mod.rs":"1be626f0a6921f4e6bd333ce7ab4a6c4da1fb6f3ae1c503672b4ba168a70c01d","benches/slog_support/parse_str.rs":"9c63ee7047ac8b9d08f02b7081020dd7300f84f302068927c859bbe26cea66a3","benches/valid_parse_str.rs":"7db47c7d25b20c8da03f25107fbea2b5c97fc814ff226e8489eda374f477eeac","src/adapter/compact.rs":"fa76330d5ff33fbb0f7da5898caee64d0e74fbe435c86621fefb864ee43560ec","src/adapter/core_support/mod.rs":"65bebe5034e450782ec9b0942bd4d758795ee315095fcc3f54412630a987f938","src/adapter/mod.rs":"6051f59190e438bbbd702e173fc85cc410b0d9b07b553991600064a34d53d2da","src/builder.rs":"86d8607e783a4a7429712edeae4ed34aff776064f1ce4b99d8b71461146b3e38","src/core_support.rs":"c5c94c3eab19a5833ec588db86d10aa94731f724d7fc5554f6e47d072ccdd88b","src/lib.rs":"2794fa11c36c09f4e356ce8ad0859f0df7eced8b03716e7df2aa971f30ff5059","src/parser/core_support.rs":"e37812662674fef191aca4574d705cbfaac6516a99a6871d88f7843494e5e653","src/parser/mod.rs":"51526e211c95730c830512007da23dfc9f88d1feccc9ddf881c541c1d5e01e2a","src/parser/std_support.rs":"4398d708bd42e8d3cb31eed8ada92615fb1cbfc70bfb3c7cbe952f47c7fe1183","src/prelude.rs":"c2c359c483993ffa3b2469ee5017d68d5c9d0c4226758112f585949e76971fff","src/serde_support.rs":"8821ba4b73f35d9a1ab19b3a32922cbdc991a7dce07062ca98230f45fdd57d98","src/slog_support.rs":"370f891a73f99436baecd21f5f2b7d7c89842336aad99167b07ca3f03c48a70c","src/std_support.rs":"eeb0d6560f96c8ce3cacafa6fe5342a8ad3b86387bf3455fb66459e28b39a6e1","src/test_util.rs":"1dfc1ab68bb403dd6d696fafeb7c00be59c37b51155703f3033ebf1062dd629f","src/u128_support.rs":"97ca20af9117e44bad72f987488efb0173699a22e0c646b268e0fe3dd90355a7","src/v1.rs":"82654b0cadfa56fd0140d78db5ab2d9869ea3d8eaaede0b975d42904317e9da4","src/v3.rs":"d25899b070bd791bc2b784d828399f5bce25f77300765dfd96e76583f31047f3","src/v4.rs":"c38784386b1f44d6333c4447140dd8ba0deec2d8c5bace5abd0e48f523716b0b","src/v5.rs":"11aeea13d38c5e3c5d7cc8bf571ac1ce57a0d46f363b90a991ed43dc1cc9caaa","src/winapi_support.rs":"13d2d83dd14ece29dfd88b4c5985ef62ff8017278bac0809dba334483881c457"},"package":"90dbc611eb48397705a6b0f6e917da23ae517e4d127123d2cf7674206627d32a"}
{"files":{"CODEOWNERS":"65d3fcb4156a2d5bce80d382a34044753e384d7f1eb71cdc646de400a0b969c8","CODE_OF_CONDUCT.md":"7d9c9062ee835c2dfd348cfddb938c563f3a7b1140dd090444a03ea1d73626b1","CONTRIBUTING.md":"cb8fa34c1d15542a318a7cbe3cd41d364f6100d49043825413f3ebb4c7471c99","COPYRIGHT":"b4b2c0de2a05de3372d5c828128413ce82bb7dba2272487b7729f09cc3d3519d","Cargo.toml":"71a942d30769089e36cbe32fade63a078961f3cab23d41ab87f5c8e8efcd9348","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"436bc5a105d8e57dcd8778730f3754f7bf39c14d2f530e4cde4bd2d17a83ec3d","README.md":"01e0a86cc2b9f29fbef04623e3f8e8ca46252fd8d5360b83780fa816f6012011","benches/format_str.rs":"2bcff80d8ab03c83ccc61c756f75a0c7aead821f68b29eeaf38f4ba6344bfc6e","benches/invalid_parse_str.rs":"7f44d2ebec6ee1368d179f12dd09a288d589b252434082b2de134a658b460812","benches/mod.rs":"4733d7aa62dafe3e85ab90dca518b57f350a5538ea5643c5313e63939e884a45","benches/serde_support.rs":"afc719718c9a5d705b60bc9cd39720b921d9ee63ccf11c4b2900f02beea70c1b","benches/slog_support/mod.rs":"1be626f0a6921f4e6bd333ce7ab4a6c4da1fb6f3ae1c503672b4ba168a70c01d","benches/slog_support/parse_str.rs":"9c63ee7047ac8b9d08f02b7081020dd7300f84f302068927c859bbe26cea66a3","benches/valid_parse_str.rs":"7db47c7d25b20c8da03f25107fbea2b5c97fc814ff226e8489eda374f477eeac","src/adapter/core_support/mod.rs":"65bebe5034e450782ec9b0942bd4d758795ee315095fcc3f54412630a987f938","src/adapter/mod.rs":"8f3acffda66148a7095286309f32b823c66826bfe35742fbc63bf1a1e07365a5","src/core_support.rs":"f5c83e3e16a32ae93c76e0e1699f3a07872788e782ce5ee4c2f8229a06a871c8","src/lib.rs":"1d0492819a93dc386e90659a21f6231a01dd172552a1221a298c089db7c671b1","src/parser/core_support.rs":"a8621aa837da2f4cd82f86539c2f3f153c52fcea21c223aa098e5873cbf36d0f","src/parser/mod.rs":"51526e211c95730c830512007da23dfc9f88d1feccc9ddf881c541c1d5e01e2a","src/parser/std_support.rs":"4398d708bd42e8d3cb31eed8ada92615fb1cbfc70bfb3c7cbe952f47c7fe1183","src/prelude.rs":"89553bb9a75e3801698f57fcc099235e5213452738cace4ab190d9444a1adfa4","src/serde_support.rs":"fdc5be309b9fc062832f3a2b114d7a06e8c8357f6569796ed1fc7848a5ebc155","src/slog_support.rs":"370f891a73f99436baecd21f5f2b7d7c89842336aad99167b07ca3f03c48a70c","src/std_support.rs":"50d2bdaaae64e4d22d9180404ac8600944683dcdc51d7de6587a6fdb29193a70","src/test_util.rs":"1dfc1ab68bb403dd6d696fafeb7c00be59c37b51155703f3033ebf1062dd629f","src/u128_support.rs":"97ca20af9117e44bad72f987488efb0173699a22e0c646b268e0fe3dd90355a7","src/v1.rs":"82654b0cadfa56fd0140d78db5ab2d9869ea3d8eaaede0b975d42904317e9da4","src/v3.rs":"d25899b070bd791bc2b784d828399f5bce25f77300765dfd96e76583f31047f3","src/v4.rs":"0cc02041d1215826e9fa2493fb4d97b1d15bc4925450db22eba86123680586ef","src/v5.rs":"11aeea13d38c5e3c5d7cc8bf571ac1ce57a0d46f363b90a991ed43dc1cc9caaa"},"package":"dab5c5526c5caa3d106653401a267fed923e7046f35895ffcb5ca42db64942e6"}

2
third_party/rust/uuid/CONTRIBUTING.md поставляемый
Просмотреть файл

@ -88,7 +88,7 @@ If the pull request is still a work in progress, prepend`[WIP] ` in your
title. `WIP bot` will make sure that the PR doesn't accidentally get merged.
> Uuid Project has a minimum rust version policy. Currently `uuid` should
compile with atleast `1.22.0`, and is enforced on our CI builds.
compile with atleast `1.18.0`, and is enforced on our CI builds.
When you feel that the PR is ready, please ping one of the maintainers so
they can review your changes.

36
third_party/rust/uuid/Cargo.toml поставляемый
Просмотреть файл

@ -12,7 +12,7 @@
[package]
name = "uuid"
version = "0.7.4"
version = "0.7.1"
authors = ["Ashley Mannix<ashleymannix@live.com.au>", "Christopher Armstrong", "Dylan DPC<dylan.dpc@gmail.com>", "Hunar Roop Kahlon<hunar.roop@gmail.com>"]
exclude = [".github/**", ".travis.yml", "appveyor.yml", "bors.toml"]
description = "A library to generate and parse UUIDs."
@ -22,8 +22,7 @@ readme = "README.md"
license = "Apache-2.0 OR MIT"
repository = "https://github.com/uuid-rs/uuid"
[package.metadata.docs.rs]
default-target = "x86_64-pc-windows-msvc"
features = ["guid", "serde", "slog", "v1", "v3", "v4", "v5"]
all-features = true
[package.metadata.playground]
features = ["serde", "u128", "v1", "v3", "v4", "v5"]
@ -34,11 +33,11 @@ optional = true
default-features = false
[dependencies.md5]
version = "0.6"
version = "0.3"
optional = true
[dependencies.rand]
version = "0.6"
version = "0.5"
optional = true
[dependencies.serde]
@ -56,9 +55,6 @@ optional = true
[dev-dependencies.bincode]
version = "1.0"
[dev-dependencies.serde_derive]
version = "1.0.79"
[dev-dependencies.serde_json]
version = "1.0"
@ -68,30 +64,10 @@ version = "1.0.56"
[features]
const_fn = ["nightly"]
default = ["std"]
guid = ["winapi"]
nightly = []
std = []
stdweb = ["rand/stdweb"]
u128 = ["byteorder"]
v1 = []
v3 = ["md5"]
v3 = ["md5", "rand"]
v4 = ["rand"]
v5 = ["sha1"]
wasm-bindgen = ["rand/wasm-bindgen"]
[target."cfg(windows)".dependencies.winapi]
version = "0.3"
optional = true
[badges.appveyor]
repository = "uuid-rs/uuid"
[badges.is-it-maintained-issue-resolution]
repository = "uuid-rs/uuid"
[badges.is-it-maintained-open-issues]
repository = "uuid-rs/uuid"
[badges.maintenance]
status = "actively-developed"
[badges.travis-ci]
repository = "uuid-rs/uuid"
v5 = ["sha1", "rand"]

150
third_party/rust/uuid/README.md поставляемый
Просмотреть файл

@ -1,98 +1,81 @@
uuid
---------
====
[![Latest Version](https://img.shields.io/crates/v/uuid.svg)](https://crates.io/crates/uuid)
[![Build Status](https://travis-ci.org/uuid-rs/uuid.svg?branch=master)](https://travis-ci.org/uuid-rs/uuid)
[![Appveyor Status](https://ci.appveyor.com/api/projects/status/github/uuid-rs/uuid?branch=master&svg=true)](https://ci.appveyor.com/project/KodrAus/uuid)
[![Latest Version](https://img.shields.io/crates/v/uuid.svg)](https://crates.io/crates/uuid)
[![Join the chat at https://gitter.im/uuid-rs/Lobby](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/uuid-rs/Lobby?utm_source=badge&utm_medium=badge&utm_content=badge)
![Minimum rustc version](https://img.shields.io/badge/rustc-1.22.0+-yellow.svg)
[![Build Status](https://ci.appveyor.com/api/projects/status/github/uuid-rs/uuid?branch=master&svg=true)](https://ci.appveyor.com/project/uuid-rs/uuid/branch/master)
[![Build Status](https://travis-ci.org/uuid-rs/uuid.svg?branch=master)](https://travis-ci.org/uuid-rs/uuid)
[![Average time to resolve an issue](https://isitmaintained.com/badge/resolution/uuid-rs/uuid.svg)](https://isitmaintained.com/project/uuid-rs/uuid "Average time to resolve an issue")
[![Percentage of issues still open](https://isitmaintained.com/badge/open/uuid-rs/uuid.svg)](https://isitmaintained.com/project/uuid-rs/uuid "Percentage of issues still open")
---
A Rust library to generate and parse UUIDs.
Generate and parse UUIDs.
Provides support for Universally Unique Identifiers (UUIDs). A UUID is a
unique 128-bit number, stored as 16 octets. UUIDs are used to assign
unique identifiers to entities without requiring a central allocating
authority.
Provides support for Universally Unique Identifiers (UUIDs). A UUID is a unique
128-bit number, stored as 16 octets. UUIDs are used to assign unique identifiers
to entities without requiring a central allocating authority.
They are particularly useful in distributed systems, though can be used in
disparate areas, such as databases and network protocols. Typically a UUID
is displayed in a readable string form as a sequence of hexadecimal digits,
disparate areas, such as databases and network protocols. Typically a UUID is
displayed in a readable string form as a sequence of hexadecimal digits,
separated into groups by hyphens.
The uniqueness property is not strictly guaranteed, however for all
practical purposes, it can be assumed that an unintentional collision would
be extremely unlikely.
The uniqueness property is not strictly guaranteed, however for all practical
purposes, it can be assumed that an unintentional collision would be extremely
unlikely.
## Dependencies
[Documentation](https://docs.rs/uuid)
By default, this crate depends on nothing but `std` and cannot generate
[`Uuid`]s. You need to enable the following Cargo features to enable
various pieces of functionality:
## Usage
* `v1` - adds the `Uuid::new_v1` function and the ability to create a V1
using an implementation of `uuid::v1::ClockSequence` (usually
`uuid::v1::Context`) and a timestamp from `time::timespec`.
* `v3` - adds the `Uuid::new_v3` function and the ability to create a V3
UUID based on the MD5 hash of some data.
* `v4` - adds the `Uuid::new_v4` function and the ability to randomly
generate a `Uuid`.
* `v5` - adds the `Uuid::new_v5` function and the ability to create a V5
UUID based on the SHA1 hash of some data.
* `serde` - adds the ability to serialize and deserialize a `Uuid` using the
`serde` crate.
You need to enable one of the following Cargo features together with
`v3`, `v4` or `v5` feature if you're targeting `wasm32` architecture:
* `stdweb` - enables support for `OsRng` on `wasm32-unknown-unknown` via
`stdweb` combined with `cargo-web`
* `wasm-bindgen` - `wasm-bindgen` enables support for `OsRng` on
`wasm32-unknown-unknown` via [`wasm-bindgen`]
By default, `uuid` can be depended on with:
Add this to your `Cargo.toml`:
```toml
[dependencies]
uuid = "0.7"
```
To activate various features, use syntax like:
and this to your crate root:
```toml
[dependencies]
uuid = { version = "0.7", features = ["serde", "v4"] }
```
You can disable default features with:
```toml
[dependencies]
uuid = { version = "0.7", default-features = false }
```rust
extern crate uuid;
```
## Examples
To parse a UUID given in the simple format and print it as a urn:
To parse a simple UUID, then print the version and urn string format:
```rust
extern crate uuid;
use uuid::Uuid;
fn main() {
let my_uuid =
Uuid::parse_str("936DA01F9ABD4d9d80C702AF85C822A8").unwrap();
println!("{}", my_uuid.to_urn());
let my_uuid = Uuid::parse_str("936DA01F9ABD4d9d80C702AF85C822A8").unwrap();
println!("Parsed a version {} UUID.", my_uuid.get_version_num());
println!("{}", my_uuid);
}
```
To create a new random (V4) UUID and print it out in hexadecimal form:
The library supports 5 versions of UUID:
```ignore,rust
// Note that this requires the `v4` feature enabled in the uuid crate.
Name | Version
---------|----------
Mac | Version 1: MAC address
Dce | Version 2: DCE Security
Md5 | Version 3: MD5 hash
Random | Version 4: Random
Sha1 | Version 5: SHA-1 hash
To create a new random (V4) UUID and print it out in hexadecimal form, first
you'll need to change how you depend on `uuid`:
```toml
[dependencies]
uuid = { version = "0.7", features = ["v4"] }
```
Next, you'll write:
```rust
extern crate uuid;
use uuid::Uuid;
fn main() {
@ -101,35 +84,26 @@ fn main() {
}
```
## Strings
To create a new sha1-hash based (V5) UUID and print it out in hexadecimal form,
you'll also need to change how you depend on `uuid`:
Examples of string representations:
```toml
[dependencies]
uuid = { version = "0.7", features = ["v5"] }
```
* simple: `936DA01F9ABD4d9d80C702AF85C822A8`
* hyphenated: `550e8400-e29b-41d4-a716-446655440000`
* urn: `urn:uuid:F9168C5E-CEB2-4faa-B6BF-329BF39FA1E4`
Next, you'll write:
```rust
extern crate uuid;
use uuid::Uuid;
fn main() {
let my_uuid = Uuid::new_v5(&Uuid::NAMESPACE_DNS, "foo".as_bytes());
println!("{}", my_uuid);
}
```
## References
* [Wikipedia: Universally Unique Identifier]( http://en.wikipedia.org/wiki/Universally_unique_identifier)
* [RFC4122: A Universally Unique IDentifier (UUID) URN Namespace]( http://tools.ietf.org/html/rfc4122)
[`wasm-bindgen`]: https://github.com/rustwasm/wasm-bindgen
[`Uuid`]: https://docs.rs/uuid/0.7.4/uuid/struct.Uuid.html
---
# License
Licensed under either of
* Apache License, Version 2.0, (LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0)
* MIT license (LICENSE-MIT or https://opensource.org/licenses/MIT)
at your option.
## Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall
be dual licensed as above, without any additional terms or conditions.
[Wikipedia: Universally Unique Identifier](https://en.wikipedia.org/wiki/Universally_unique_identifier)

29
third_party/rust/uuid/README.tpl поставляемый
Просмотреть файл

@ -1,29 +0,0 @@
{{crate}}
---------
[![Latest Version](https://img.shields.io/crates/v/uuid.svg)](https://crates.io/crates/uuid)
[![Join the chat at https://gitter.im/uuid-rs/Lobby](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/uuid-rs/Lobby?utm_source=badge&utm_medium=badge&utm_content=badge)
![Minimum rustc version](https://img.shields.io/badge/rustc-1.22.0+-yellow.svg)
{{badges}}
---
{{readme}}
[`Uuid`]: https://docs.rs/uuid/{{version}}/uuid/struct.Uuid.html
---
# License
Licensed under either of
* Apache License, Version 2.0, (LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0)
* MIT license (LICENSE-MIT or https://opensource.org/licenses/MIT)
at your option.
## Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall
be dual licensed as above, without any additional terms or conditions.

6
third_party/rust/uuid/benches/format_str.rs поставляемый Normal file → Executable file
Просмотреть файл

@ -41,7 +41,7 @@ fn bench_encode_hyphen(b: &mut Bencher) {
let uuid = Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF-329BF39FA1E4").unwrap();
b.iter(|| {
let mut buffer = [0_u8; 36];
uuid.to_hyphenated().encode_lower(&mut buffer);
// uuid.to_hyphenated().encode_lower(&mut buffer);
test::black_box(buffer);
});
}
@ -51,7 +51,7 @@ fn bench_encode_simple(b: &mut Bencher) {
let uuid = Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF-329BF39FA1E4").unwrap();
b.iter(|| {
let mut buffer = [0_u8; 32];
uuid.to_simple().encode_lower(&mut buffer);
// uuid.to_simple().encode_lower(&mut buffer);
test::black_box(buffer);
})
}
@ -61,7 +61,7 @@ fn bench_encode_urn(b: &mut Bencher) {
let uuid = Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF-329BF39FA1E4").unwrap();
b.iter(|| {
let mut buffer = [0_u8; 36 + 9];
uuid.to_urn().encode_lower(&mut buffer);
// uuid.to_urn().encode_lower(&mut buffer);
test::black_box(buffer);
})
}

0
third_party/rust/uuid/benches/serde_support.rs поставляемый Normal file → Executable file
Просмотреть файл

84
third_party/rust/uuid/src/adapter/compact.rs поставляемый
Просмотреть файл

@ -1,84 +0,0 @@
//! Module for use with `#[serde(with = "...")]` to serialize a [`Uuid`]
//! as a `[u8; 16]
//!
//! [`Uuid`]: ../../struct.Uuid.html
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use prelude::*;
/// Serializer for a [`Uuid`] into a `[u8; 16]`
///
/// [`Uuid`]: ../../struct.Uuid.html
pub fn serialize<S: Serializer>(
u: &Uuid,
serializer: S,
) -> Result<S::Ok, S::Error> {
u.as_bytes().serialize(serializer)
}
/// Deserializer from a `[u8; 16]` into a [`Uuid`]
///
/// [`Uuid`]: ../../struct.Uuid.html
pub fn deserialize<'de, D: Deserializer<'de>>(
deserializer: D,
) -> Result<Uuid, D::Error> {
let bytes = <[u8; 16]>::deserialize(deserializer)?;
Ok(Uuid::from_bytes(bytes))
}
#[cfg(test)]
mod tests {
use serde_test;
use prelude::*;
#[derive(Serialize, Debug, Deserialize, PartialEq)]
struct UuidContainer {
#[serde(with = "super")]
u: Uuid,
}
#[test]
fn test_serialize_compact() {
use serde_test::Configure;
let uuid_bytes = b"F9168C5E-CEB2-4F";
let container = UuidContainer {
u: Uuid::from_slice(uuid_bytes).unwrap(),
};
// more complex because of the struct wrapping the actual UUID
// serialization
serde_test::assert_tokens(
&container.compact(),
&[
serde_test::Token::Struct {
name: "UuidContainer",
len: 1,
},
serde_test::Token::Str("u"),
serde_test::Token::Tuple { len: 16 },
serde_test::Token::U8(uuid_bytes[0]),
serde_test::Token::U8(uuid_bytes[1]),
serde_test::Token::U8(uuid_bytes[2]),
serde_test::Token::U8(uuid_bytes[3]),
serde_test::Token::U8(uuid_bytes[4]),
serde_test::Token::U8(uuid_bytes[5]),
serde_test::Token::U8(uuid_bytes[6]),
serde_test::Token::U8(uuid_bytes[7]),
serde_test::Token::U8(uuid_bytes[8]),
serde_test::Token::U8(uuid_bytes[9]),
serde_test::Token::U8(uuid_bytes[10]),
serde_test::Token::U8(uuid_bytes[11]),
serde_test::Token::U8(uuid_bytes[12]),
serde_test::Token::U8(uuid_bytes[13]),
serde_test::Token::U8(uuid_bytes[14]),
serde_test::Token::U8(uuid_bytes[15]),
serde_test::Token::TupleEnd,
serde_test::Token::StructEnd,
],
)
}
}

1100
third_party/rust/uuid/src/adapter/mod.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

224
third_party/rust/uuid/src/builder.rs поставляемый
Просмотреть файл

@ -1,224 +0,0 @@
// Copyright 2013-2014 The Rust Project Developers.
// Copyright 2018 The Uuid Project Developers.
//
// See the COPYRIGHT file at the top-level directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A Builder type for [`Uuid`]s.
//!
//! [`Uuid`]: ../struct.Uuid.html
use prelude::*;
use BytesError;
/// A builder struct for creating a [`Uuid`]
///
/// # Examples
///
/// Creating a v4 `Uuid` from externally generated bytes:
///
/// ```
/// use uuid::{Builder, Variant, Version};
///
/// # let rng = || [
/// # 70, 235, 208, 238, 14, 109, 67, 201, 185, 13, 204, 195, 90,
/// # 145, 63, 62,
/// # ];
/// let random_bytes = rng();
/// let uuid = Builder::from_bytes(random_bytes)
/// .set_variant(Variant::RFC4122)
/// .set_version(Version::Random)
/// .build();
/// ```
#[allow(missing_copy_implementations)]
#[derive(Debug)]
pub struct Builder(Uuid);
impl Builder {
/// Creates a `Builder` using the supplied big-endian bytes.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Builder;
/// use uuid::Bytes;
///
/// let bytes: Bytes = [
/// 70, 235, 208, 238, 14, 109, 67, 201, 185, 13, 204, 195, 90, 145, 63, 62,
/// ];
///
/// let mut builder = Builder::from_bytes(bytes);
/// let uuid = builder.build().to_hyphenated().to_string();
///
/// let expected_uuid = String::from("46ebd0ee-0e6d-43c9-b90d-ccc35a913f3e");
///
/// assert_eq!(expected_uuid, uuid);
/// ```
///
/// An incorrect number of bytes:
///
/// ```compile_fail
/// use uuid::Builder;
/// use uuid::Bytes;
///
/// let bytes: Bytes = [4, 54, 67, 12, 43, 2, 98, 76]; // doesn't compile
///
/// let uuid = Builder::from_bytes(bytes);
/// ```
pub fn from_bytes(b: Bytes) -> Self {
Builder(Uuid::from_bytes(b))
}
/// Creates a `Builder` using the supplied big-endian bytes.
///
/// # Errors
///
/// This function will return an error if `b` has any length other than 16.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Builder;
///
/// let bytes = [4, 54, 67, 12, 43, 2, 98, 76, 32, 50, 87, 5, 1, 33, 43, 87];
///
/// let builder = Builder::from_slice(&bytes);
/// let uuid =
/// builder.map(|mut builder| builder.build().to_hyphenated().to_string());
///
/// let expected_uuid =
/// Ok(String::from("0436430c-2b02-624c-2032-570501212b57"));
///
/// assert_eq!(expected_uuid, uuid);
/// ```
///
/// An incorrect number of bytes:
///
/// ```
/// use uuid::prelude::*;
/// use uuid::Builder;
///
/// let bytes = [4, 54, 67, 12, 43, 2, 98, 76];
///
/// let builder = Builder::from_slice(&bytes);
///
/// assert!(builder.is_err());
/// ```
pub fn from_slice(b: &[u8]) -> Result<Self, BytesError> {
const BYTES_LEN: usize = 16;
let len = b.len();
if len != BYTES_LEN {
return Err(BytesError::new(BYTES_LEN, len));
}
let mut bytes: Bytes = [0; 16];
bytes.copy_from_slice(b);
Ok(Self::from_bytes(bytes))
}
/// Creates a `Builder` from four field values.
///
/// # Errors
///
/// This function will return an error if `d4`'s length is not 8 bytes.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Builder;
///
/// let d4 = [12, 3, 9, 56, 54, 43, 8, 9];
///
/// let builder = Builder::from_fields(42, 12, 5, &d4);
/// let uuid =
/// builder.map(|mut builder| builder.build().to_hyphenated().to_string());
///
/// let expected_uuid =
/// Ok(String::from("0000002a-000c-0005-0c03-0938362b0809"));
///
/// assert_eq!(expected_uuid, uuid);
/// ```
///
/// An invalid length:
///
/// ```
/// use uuid::prelude::*;
///
/// let d4 = [12];
///
/// let builder = uuid::Builder::from_fields(42, 12, 5, &d4);
///
/// assert!(builder.is_err());
/// ```
pub fn from_fields(
d1: u32,
d2: u16,
d3: u16,
d4: &[u8],
) -> Result<Self, BytesError> {
Uuid::from_fields(d1, d2, d3, d4).map(Builder)
}
/// Creates a `Builder` with an initial [`Uuid::nil`]
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Builder;
///
/// let mut builder = Builder::nil();
///
/// assert_eq!(
/// builder.build().to_hyphenated().to_string(),
/// "00000000-0000-0000-0000-000000000000"
/// );
/// ```
pub fn nil() -> Self {
Builder(Uuid::nil())
}
/// Specifies the variant of the internal [`Uuid`].
pub fn set_variant(&mut self, v: Variant) -> &mut Self {
self.0.set_variant(v);
self
}
/// Specifies the version number of the internal [`Uuid`].
pub fn set_version(&mut self, v: Version) -> &mut Self {
self.0.set_version(v);
self
}
/// Hands over the internal constructed [`Uuid`]
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Builder;
///
/// let uuid = Builder::nil().build();
///
/// assert_eq!(
/// uuid.to_hyphenated().to_string(),
/// "00000000-0000-0000-0000-000000000000"
/// );
/// ```
pub fn build(&mut self) -> Uuid {
self.0
}
}

22
third_party/rust/uuid/src/core_support.rs поставляемый
Просмотреть файл

@ -13,28 +13,6 @@ use core::{fmt, str};
use parser;
use prelude::*;
impl From<super::BytesError> for super::Error {
fn from(err: super::BytesError) -> Self {
super::Error::Bytes(err)
}
}
impl fmt::Debug for Uuid {
#[inline]
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::LowerHex::fmt(self, f)
}
}
impl fmt::Display for super::Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
super::Error::Bytes(ref err) => fmt::Display::fmt(&err, f),
super::Error::Parse(ref err) => fmt::Display::fmt(&err, f),
}
}
}
impl fmt::Display for Uuid {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::LowerHex::fmt(self, f)

440
third_party/rust/uuid/src/lib.rs поставляемый
Просмотреть файл

@ -12,7 +12,7 @@
//! Generate and parse UUIDs.
//!
//! Provides support for Universally Unique Identifiers (UUIDs). A UUID is a
//! unique 128-bit number, stored as 16 octets. UUIDs are used to assign
//! unique 128-bit number, stored as 16 octets. UUIDs are used to assign
//! unique identifiers to entities without requiring a central allocating
//! authority.
//!
@ -43,14 +43,6 @@
//! * `serde` - adds the ability to serialize and deserialize a `Uuid` using the
//! `serde` crate.
//!
//! You need to enable one of the following Cargo features together with
//! `v3`, `v4` or `v5` feature if you're targeting `wasm32` architecture:
//!
//! * `stdweb` - enables support for `OsRng` on `wasm32-unknown-unknown` via
//! `stdweb` combined with `cargo-web`
//! * `wasm-bindgen` - `wasm-bindgen` enables support for `OsRng` on
//! `wasm32-unknown-unknown` via [`wasm-bindgen`]
//!
//! By default, `uuid` can be depended on with:
//!
//! ```toml
@ -109,10 +101,10 @@
//!
//! # References
//!
//! * [Wikipedia: Universally Unique Identifier]( http://en.wikipedia.org/wiki/Universally_unique_identifier)
//! * [RFC4122: A Universally Unique IDentifier (UUID) URN Namespace]( http://tools.ietf.org/html/rfc4122)
//!
//! [`wasm-bindgen`]: https://github.com/rustwasm/wasm-bindgen
//! * [Wikipedia: Universally Unique Identifier](
//! http://en.wikipedia.org/wiki/Universally_unique_identifier)
//! * [RFC4122: A Universally Unique IDentifier (UUID) URN Namespace](
//! http://tools.ietf.org/html/rfc4122)
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(feature = "const_fn", feature(const_fn))]
@ -124,7 +116,7 @@
#![doc(
html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://www.rust-lang.org/favicon.ico",
html_root_url = "https://docs.rs/uuid/0.7.4"
html_root_url = "https://docs.rs/uuid"
)]
#[cfg(feature = "byteorder")]
@ -139,26 +131,18 @@ extern crate rand;
extern crate serde;
#[cfg(all(feature = "serde", test))]
extern crate serde_test;
#[cfg(all(feature = "serde", test))]
#[macro_use]
extern crate serde_derive;
#[cfg(feature = "sha1")]
extern crate sha1;
#[cfg(feature = "slog")]
#[cfg_attr(test, macro_use)]
extern crate slog;
#[cfg(feature = "winapi")]
extern crate winapi;
pub mod adapter;
pub mod builder;
pub mod parser;
pub mod prelude;
#[cfg(feature = "v1")]
pub mod v1;
pub use builder::Builder;
mod core_support;
#[cfg(feature = "serde")]
mod serde_support;
@ -170,41 +154,12 @@ mod std_support;
mod test_util;
#[cfg(feature = "u128")]
mod u128_support;
#[cfg(all(
feature = "v3",
any(
not(target_arch = "wasm32"),
all(
target_arch = "wasm32",
any(feature = "stdweb", feature = "wasm-bindgen")
)
)
))]
#[cfg(feature = "v3")]
mod v3;
#[cfg(all(
feature = "v4",
any(
not(target_arch = "wasm32"),
all(
target_arch = "wasm32",
any(feature = "stdweb", feature = "wasm-bindgen")
)
)
))]
#[cfg(feature = "v4")]
mod v4;
#[cfg(all(
feature = "v5",
any(
not(target_arch = "wasm32"),
all(
target_arch = "wasm32",
any(feature = "stdweb", feature = "wasm-bindgen")
)
)
))]
#[cfg(feature = "v5")]
mod v5;
#[cfg(all(windows, feature = "winapi"))]
mod winapi_support;
/// A 128-bit (16 byte) buffer containing the ID.
pub type Bytes = [u8; 16];
@ -218,38 +173,6 @@ pub struct BytesError {
found: usize,
}
/// A general error that can occur when handling [`Uuid`]s.
///
/// Although specialized error types exist in the crate,
/// sometimes where particular error type occurred is hidden
/// until errors need to be handled. This allows to enumerate
/// the errors.
///
/// [`Uuid`]: struct.Uuid.html
// TODO: improve the doc
// BODY: This detail should be fine for initial merge
// TODO: write tests for Error
// BODY: not immediately blocking, but should be covered for 1.0
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum Error {
/// An error occurred while handling [`Uuid`] bytes.
///
/// See [`BytesError`]
///
/// [`BytesError`]: struct.BytesError.html
/// [`Uuid`]: struct.Uuid.html
Bytes(BytesError),
/// An error occurred while parsing a [`Uuid`] string.
///
/// See [`parser::ParseError`]
///
/// [`parser::ParseError`]: parser/enum.ParseError.html
/// [`Uuid`]: struct.Uuid.html
Parse(parser::ParseError),
}
/// The version of the UUID, denoting the generating algorithm.
#[derive(Debug, PartialEq, Copy, Clone)]
#[repr(C)]
@ -285,7 +208,7 @@ pub enum Variant {
}
/// A Universally Unique Identifier (UUID).
#[derive(Clone, Copy, Eq, Hash, Ord, PartialEq, PartialOrd)]
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct Uuid(Bytes);
impl BytesError {
@ -323,7 +246,10 @@ impl BytesError {
#[cfg(feature = "const_fn")]
#[inline]
pub const fn new(expected: usize, found: usize) -> Self {
BytesError { expected, found }
BytesError {
expected: expected,
found: found,
}
}
/// Create a new [`UuidError`].
@ -421,7 +347,7 @@ impl Uuid {
Uuid::from_bytes([0; 16])
}
/// Creates a `Uuid` from four field values in big-endian order.
/// Creates a `Uuid` from four field values.
///
/// # Errors
///
@ -492,64 +418,7 @@ impl Uuid {
]))
}
/// Creates a `Uuid` from four field values in little-endian order.
///
/// The bytes in the `d1`, `d2` and `d3` fields will
/// be converted into big-endian order.
///
/// # Examples
///
/// ```
/// use uuid::Uuid;
///
/// let d1 = 0xAB3F1097u32;
/// let d2 = 0x501Eu16;
/// let d3 = 0xB736u16;
/// let d4 = [12, 3, 9, 56, 54, 43, 8, 9];
///
/// let uuid = Uuid::from_fields_le(d1, d2, d3, &d4);
/// let uuid = uuid.map(|uuid| uuid.to_hyphenated().to_string());
///
/// let expected_uuid =
/// Ok(String::from("97103fab-1e50-36b7-0c03-0938362b0809"));
///
/// assert_eq!(expected_uuid, uuid);
/// ```
pub fn from_fields_le(
d1: u32,
d2: u16,
d3: u16,
d4: &[u8],
) -> Result<Uuid, BytesError> {
const D4_LEN: usize = 8;
let len = d4.len();
if len != D4_LEN {
return Err(BytesError::new(D4_LEN, len));
}
Ok(Uuid::from_bytes([
d1 as u8,
(d1 >> 8) as u8,
(d1 >> 16) as u8,
(d1 >> 24) as u8,
(d2) as u8,
(d2 >> 8) as u8,
d3 as u8,
(d3 >> 8) as u8,
d4[0],
d4[1],
d4[2],
d4[3],
d4[4],
d4[5],
d4[6],
d4[7],
]))
}
/// Creates a `Uuid` using the supplied big-endian bytes.
/// Creates a `Uuid` using the supplied bytes.
///
/// # Errors
///
@ -600,13 +469,78 @@ impl Uuid {
Ok(Uuid::from_bytes(bytes))
}
/// Creates a `Uuid` using the supplied big-endian bytes.
/// Creates a `Uuid` using the supplied bytes.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Bytes;
/// use uuid::Uuid;
///
/// let bytes: Bytes = [
/// 70, 235, 208, 238, 14, 109, 67, 201, 185, 13, 204, 195, 90, 145, 63,
/// 62,
/// ];
///
/// let uuid = Uuid::from_bytes(bytes);
/// let uuid = uuid.to_hyphenated().to_string();
///
/// let expected_uuid = String::from("46ebd0ee-0e6d-43c9-b90d-ccc35a913f3e");
///
/// assert_eq!(expected_uuid, uuid);
/// ```
///
/// An incorrect number of bytes:
///
/// ```compile_fail
/// use uuid::Uuid;
/// use uuid::UuidBytes;
///
/// let bytes: UuidBytes = [4, 54, 67, 12, 43, 2, 98, 76]; // doesn't
/// compile
///
/// let uuid = Uuid::from_bytes(bytes);
/// ```
#[cfg(not(feature = "const_fn"))]
pub fn from_bytes(bytes: Bytes) -> Uuid {
Uuid(bytes)
}
/// Creates a `Uuid` using the supplied big-endian bytes.
/// Creates a `Uuid` using the supplied bytes.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use uuid::Bytes;
/// use uuid::Uuid;
///
/// let bytes: Bytes = [
/// 70, 235, 208, 238, 14, 109, 67, 201, 185, 13, 204, 195, 90, 145, 63,
/// 62,
/// ];
///
/// let uuid = Uuid::from_bytes(bytes);
/// let uuid = uuid.to_hyphenated().to_string();
///
/// let expected_uuid = String::from("46ebd0ee-0e6d-43c9-b90d-ccc35a913f3e");
///
/// assert_eq!(expected_uuid, uuid);
/// ```
///
/// An incorrect number of bytes:
///
/// ```compile_fail
/// use uuid::Bytes;
/// use uuid::Uuid;
///
/// let bytes: Bytes = [4, 54, 67, 12, 43, 2, 98, 76]; // doesn't compile
///
/// let uuid = Uuid::from_bytes(bytes);
/// ```
#[cfg(feature = "const_fn")]
pub const fn from_bytes(bytes: Bytes) -> Uuid {
Uuid(bytes)
@ -624,7 +558,8 @@ impl Uuid {
/// use uuid::Uuid;
///
/// let bytes: Bytes = [
/// 70, 235, 208, 238, 14, 109, 67, 201, 185, 13, 204, 195, 90, 145, 63, 62,
/// 70, 235, 208, 238, 14, 109, 67, 201, 185, 13, 204, 195, 90, 145, 63,
/// 62,
/// ];
/// let uuid = Uuid::from_random_bytes(bytes);
/// let uuid = uuid.to_hyphenated().to_string();
@ -633,10 +568,6 @@ impl Uuid {
///
/// assert_eq!(expected_uuid, uuid);
/// ```
#[deprecated(
since = "0.7.2",
note = "please use the `uuid::Builder` instead to set the variant and version"
)]
pub fn from_random_bytes(bytes: Bytes) -> Uuid {
let mut uuid = Uuid::from_bytes(bytes);
uuid.set_variant(Variant::RFC4122);
@ -707,7 +638,7 @@ impl Uuid {
}
}
/// Returns the four field values of the UUID in big-endian order.
/// Returns the four field values of the UUID.
///
/// These values can be passed to the `from_fields()` method to get the
/// original `Uuid` back.
@ -718,14 +649,15 @@ impl Uuid {
/// * The second field value represents the second group of (four) hex
/// digits, taken as a big-endian `u16` value. For V1 UUIDs, this field
/// represents the middle 16 bits of the timestamp.
/// * The third field value represents the third group of (four) hex digits,
/// taken as a big-endian `u16` value. The 4 most significant bits give
/// the UUID version, and for V1 UUIDs, the last 12 bits represent the
/// high 12 bits of the timestamp.
/// * The last field value represents the last two groups of four and twelve
/// hex digits, taken in order. The first 1-3 bits of this indicate the
/// UUID variant, and for V1 UUIDs, the next 13-15 bits indicate the clock
/// sequence and the last 48 bits indicate the node ID.
/// * The third field value represents the third group of (four) hex
/// digits, taken as a big-endian `u16` value. The 4 most significant
/// bits give the UUID version, and for V1 UUIDs, the last 12 bits
/// represent the high 12 bits of the timestamp.
/// * The last field value represents the last two groups of four and
/// twelve hex digits, taken in order. The first 1-3 bits of this
/// indicate the UUID variant, and for V1 UUIDs, the next 13-15 bits
/// indicate the clock sequence and the last 48 bits indicate the node
/// ID.
///
/// # Examples
///
@ -763,53 +695,49 @@ impl Uuid {
(d1, d2, d3, d4)
}
/// Returns the four field values of the UUID in little-endian order.
///
/// The bytes in the returned integer fields will
/// be converted from big-endian order.
/// Returns an array of 16 octets containing the UUID data.
///
/// # Examples
///
/// ```
/// use uuid::Uuid;
///
/// let uuid = Uuid::parse_str("936DA01F-9ABD-4D9D-80C7-02AF85C822A8").unwrap();
/// let uuid = Uuid::nil();
/// assert_eq!(uuid.as_bytes(), &[0; 16]);
///
/// let uuid = Uuid::parse_str("936DA01F9ABD4d9d80C702AF85C822A8").unwrap();
/// assert_eq!(
/// uuid.to_fields_le(),
/// (
/// 0x1FA06D93,
/// 0xBD9A,
/// 0x9D4D,
/// b"\x80\xC7\x02\xAF\x85\xC8\x22\xA8"
/// )
/// uuid.as_bytes(),
/// &[
/// 147, 109, 160, 31, 154, 189, 77, 157, 128, 199, 2, 175, 133, 200,
/// 34, 168,
/// ]
/// );
/// ```
pub fn to_fields_le(&self) -> (u32, u16, u16, &[u8; 8]) {
let d1 = u32::from(self.as_bytes()[0])
| u32::from(self.as_bytes()[1]) << 8
| u32::from(self.as_bytes()[2]) << 16
| u32::from(self.as_bytes()[3]) << 24;
let d2 =
u16::from(self.as_bytes()[4]) | u16::from(self.as_bytes()[5]) << 8;
let d3 =
u16::from(self.as_bytes()[6]) | u16::from(self.as_bytes()[7]) << 8;
let d4: &[u8; 8] =
unsafe { &*(self.as_bytes()[8..16].as_ptr() as *const [u8; 8]) };
(d1, d2, d3, d4)
}
/// Returns an array of 16 octets containing the UUID data.
/// This method wraps [`Uuid::as_bytes`]
#[cfg(feature = "const_fn")]
pub const fn as_bytes(&self) -> &Bytes {
&self.0
}
/// Returns an array of 16 octets containing the UUID data.
/// This method wraps [`Uuid::as_bytes`]
///
/// # Examples
///
/// ```
/// use uuid::Uuid;
///
/// let uuid = Uuid::nil();
/// assert_eq!(uuid.as_bytes(), &[0; 16]);
///
/// let uuid = Uuid::parse_str("936DA01F9ABD4d9d80C702AF85C822A8").unwrap();
/// assert_eq!(
/// uuid.as_bytes(),
/// &[
/// 147, 109, 160, 31, 154, 189, 77, 157, 128, 199, 2, 175, 133, 200,
/// 34, 168
/// ]
/// );
/// ```
#[cfg(not(feature = "const_fn"))]
pub fn as_bytes(&self) -> &Bytes {
&self.0
@ -995,34 +923,33 @@ impl Uuid {
pub fn is_nil(&self) -> bool {
self.as_bytes().iter().all(|&b| b == 0)
}
/// A buffer that can be used for `encode_...` calls, that is
/// guaranteed to be long enough for any of the adapters.
///
/// # Examples
///
/// ```rust
/// use uuid::Uuid;
///
/// let uuid = Uuid::nil();
///
/// assert_eq!(
/// uuid.to_simple().encode_lower(&mut Uuid::encode_buffer()),
/// "00000000000000000000000000000000"
/// );
///
/// assert_eq!(
/// uuid.to_hyphenated()
/// .encode_lower(&mut Uuid::encode_buffer()),
/// "00000000-0000-0000-0000-000000000000"
/// );
///
/// assert_eq!(
/// uuid.to_urn().encode_lower(&mut Uuid::encode_buffer()),
/// "urn:uuid:00000000-0000-0000-0000-000000000000"
/// );
/// ```
pub fn encode_buffer() -> [u8; adapter::Urn::LENGTH] {
// A buffer that can be used for `encode_...` calls, that is
// guaranteed to be long enough for any of the adapters.
//
// # Examples
//
// ```rust
// use uuid::Uuid;
//
// let uuid = Uuid::nil();
//
// assert_eq!(
// uuid.to_simple().encode_lower(&mut Uuid::encode_buffer()),
// "00000000000000000000000000000000"
// );
//
// assert_eq!(
// uuid.to_hyphenated()
// .encode_lower(&mut Uuid::encode_buffer()),
// "00000000-0000-0000-0000-000000000000"
// );
//
// assert_eq!(
// uuid.to_urn().encode_lower(&mut Uuid::encode_buffer()),
// "urn:uuid:00000000-0000-0000-0000-000000000000"
// );
// ```
pub(crate) fn encode_buffer() -> [u8; adapter::Urn::LENGTH] {
[0; adapter::Urn::LENGTH]
}
}
@ -1265,14 +1192,20 @@ mod tests {
// Valid
assert!(Uuid::parse_str("00000000000000000000000000000000").is_ok());
assert!(Uuid::parse_str("67e55044-10b1-426f-9247-bb680e5fe0c8").is_ok());
assert!(Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF-329BF39FA1E4").is_ok());
assert!(
Uuid::parse_str("67e55044-10b1-426f-9247-bb680e5fe0c8").is_ok()
);
assert!(
Uuid::parse_str("F9168C5E-CEB2-4faa-B6BF-329BF39FA1E4").is_ok()
);
assert!(Uuid::parse_str("67e5504410b1426f9247bb680e5fe0c8").is_ok());
assert!(Uuid::parse_str("01020304-1112-2122-3132-414243444546").is_ok());
assert!(Uuid::parse_str(
"urn:uuid:67e55044-10b1-426f-9247-bb680e5fe0c8"
)
.is_ok());
assert!(
Uuid::parse_str("01020304-1112-2122-3132-414243444546").is_ok()
);
assert!(
Uuid::parse_str("urn:uuid:67e55044-10b1-426f-9247-bb680e5fe0c8")
.is_ok()
);
// Nil
let nil = Uuid::nil();
@ -1425,20 +1358,6 @@ mod tests {
assert_eq!(result, expected);
}
#[test]
fn test_from_fields_le() {
let d1: u32 = 0xa4a3a2a1;
let d2: u16 = 0xb2b1;
let d3: u16 = 0xc2c1;
let d4 = [0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8];
let u = Uuid::from_fields_le(d1, d2, d3, &d4).unwrap();
let expected = "a1a2a3a4b1b2c1c2d1d2d3d4d5d6d7d8";
let result = u.to_simple().to_string();
assert_eq!(result, expected);
}
#[test]
fn test_as_fields() {
let u = test_util::new();
@ -1467,38 +1386,6 @@ mod tests {
assert_eq!(d4_in, d4_out);
}
#[test]
fn test_fields_le_roundtrip() {
let d1_in: u32 = 0xa4a3a2a1;
let d2_in: u16 = 0xb2b1;
let d3_in: u16 = 0xc2c1;
let d4_in = &[0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8];
let u = Uuid::from_fields_le(d1_in, d2_in, d3_in, d4_in).unwrap();
let (d1_out, d2_out, d3_out, d4_out) = u.to_fields_le();
assert_eq!(d1_in, d1_out);
assert_eq!(d2_in, d2_out);
assert_eq!(d3_in, d3_out);
assert_eq!(d4_in, d4_out);
}
#[test]
fn test_fields_le_are_actually_le() {
let d1_in: u32 = 0xa1a2a3a4;
let d2_in: u16 = 0xb1b2;
let d3_in: u16 = 0xc1c2;
let d4_in = &[0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8];
let u = Uuid::from_fields(d1_in, d2_in, d3_in, d4_in).unwrap();
let (d1_out, d2_out, d3_out, d4_out) = u.to_fields_le();
assert_eq!(d1_in, d1_out.swap_bytes());
assert_eq!(d2_in, d2_out.swap_bytes());
assert_eq!(d3_in, d3_out.swap_bytes());
assert_eq!(d4_in, d4_out);
}
#[test]
fn test_from_slice() {
let b = [
@ -1549,7 +1436,6 @@ mod tests {
}
#[test]
#[allow(deprecated)]
fn test_from_random_bytes() {
let b = [
0xa1, 0xa2, 0xa3, 0xa4, 0xb1, 0xb2, 0xc1, 0xc2, 0xd1, 0xd2, 0xd3,

Просмотреть файл

@ -12,12 +12,6 @@
use core::fmt;
use parser;
impl From<parser::ParseError> for ::Error {
fn from(err: parser::ParseError) -> Self {
::Error::Parse(err)
}
}
impl<'a> fmt::Display for parser::Expected {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
@ -39,9 +33,13 @@ impl fmt::Display for parser::ParseError {
expected,
found,
index,
} => {
write!(f, "expected {}, found {} at {}", expected, found, index)
}
} => write!(
f,
"expected {:?}, found {} at {}",
expected.chars(),
found,
index
),
parser::ParseError::InvalidGroupCount {
ref expected,
found,

8
third_party/rust/uuid/src/prelude.rs поставляемый
Просмотреть файл

@ -29,15 +29,13 @@
//!
//! Currently the prelude reexports the following:
//!
//! [`uuid`]`::{`[`Error`], [`Uuid`], [`Variant`], [`Version`],
//! builder::[`Builder`]`}`: The fundamental types used in [`uuid`] crate.
//! [`uuid`]`::{`[`Uuid`], [`Variant`], [`Version`]`}`: The fundamental
//! types used in [`uuid`] crate.
//!
//! [`uuid`]: ../index.html
//! [`Error`]: ../enum.Error.html
//! [`Uuid`]: ../struct.Uuid.html
//! [`Variant`]: ../enum.Variant.html
//! [`Version`]: ../enum.Version.html
//! [`Builder`]: ../builder/struct.Builder.html
//!
#![cfg_attr(feature = "v1",
doc = "
@ -48,6 +46,6 @@ handling uuid version 1. Requires feature `v1`.
[`Context`]: ../v1/struct.Context.html
[`ClockSequence`]: ../v1/trait.ClockSequence.html")]
pub use super::{Builder, Bytes, Error, Uuid, Variant, Version};
pub use super::{Bytes, Uuid, Variant, Version};
#[cfg(feature = "v1")]
pub use v1::{ClockSequence, Context};

6
third_party/rust/uuid/src/serde_support.rs поставляемый
Просмотреть файл

@ -13,7 +13,6 @@ use core::fmt;
use prelude::*;
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
#[cfg(feature = "serde")]
impl Serialize for Uuid {
fn serialize<S: Serializer>(
&self,
@ -28,7 +27,6 @@ impl Serialize for Uuid {
}
}
#[cfg(feature = "serde")]
impl<'de> Deserialize<'de> for Uuid {
fn deserialize<D: Deserializer<'de>>(
deserializer: D,
@ -88,8 +86,8 @@ impl<'de> Deserialize<'de> for Uuid {
}
}
#[cfg(all(test, feature = "serde"))]
mod serde_tests {
#[cfg(test)]
mod tests {
use serde_test;
use prelude::*;

11
third_party/rust/uuid/src/std_support.rs поставляемый
Просмотреть файл

@ -11,17 +11,8 @@
use std::error;
impl error::Error for super::BytesError {
impl error::Error for ::BytesError {
fn description(&self) -> &str {
"invalid number of uuid bytes"
}
}
impl error::Error for super::Error {
fn description(&self) -> &str {
match *self {
super::Error::Bytes(ref err) => error::Error::description(err),
super::Error::Parse(ref err) => error::Error::description(err),
}
}
}

5
third_party/rust/uuid/src/v4.rs поставляемый
Просмотреть файл

@ -31,10 +31,7 @@ impl Uuid {
rng.fill_bytes(&mut bytes);
Builder::from_bytes(bytes)
.set_variant(Variant::RFC4122)
.set_version(Version::Random)
.build()
Self::from_random_bytes(bytes)
}
}

81
third_party/rust/uuid/src/winapi_support.rs поставляемый
Просмотреть файл

@ -1,81 +0,0 @@
use prelude::*;
use BytesError;
use winapi::shared::guiddef;
#[cfg(feature = "guid")]
impl Uuid {
/// Attempts to create a [`Uuid`] from a little endian winapi `GUID`
///
/// [`Uuid`]: ../struct.Uuid.html
pub fn from_guid(guid: guiddef::GUID) -> Result<Uuid, BytesError> {
Uuid::from_fields_le(
guid.Data1 as u32,
guid.Data2 as u16,
guid.Data3 as u16,
&(guid.Data4 as [u8; 8]),
)
}
/// Converts a [`Uuid`] into a little endian winapi `GUID`
///
/// [`Uuid`]: ../struct.Uuid.html
pub fn to_guid(&self) -> guiddef::GUID {
let (data1, data2, data3, data4) = self.to_fields_le();
guiddef::GUID {
Data1: data1,
Data2: data2,
Data3: data3,
Data4: *data4,
}
}
}
#[cfg(feature = "guid")]
#[cfg(test)]
mod tests {
use prelude::*;
use std::str::FromStr;
use winapi::shared::guiddef;
#[test]
fn test_from_guid() {
let guid = guiddef::GUID {
Data1: 0x4a35229d,
Data2: 0x5527,
Data3: 0x4f30,
Data4: [0x86, 0x47, 0x9d, 0xc5, 0x4e, 0x1e, 0xe1, 0xe8],
};
let uuid = Uuid::from_guid(guid).unwrap();
assert_eq!(
"9d22354a-2755-304f-8647-9dc54e1ee1e8",
uuid.to_hyphenated().to_string()
);
}
#[test]
fn test_guid_roundtrip() {
let guid_in = guiddef::GUID {
Data1: 0x4a35229d,
Data2: 0x5527,
Data3: 0x4f30,
Data4: [0x86, 0x47, 0x9d, 0xc5, 0x4e, 0x1e, 0xe1, 0xe8],
};
let uuid = Uuid::from_guid(guid_in).unwrap();
let guid_out = uuid.to_guid();
assert_eq!(
(guid_in.Data1, guid_in.Data2, guid_in.Data3, guid_in.Data4),
(
guid_out.Data1,
guid_out.Data2,
guid_out.Data3,
guid_out.Data4
)
);
}
}

Просмотреть файл

@ -4,5 +4,5 @@ version = "0.1.0"
authors = ["Jonathan Kingston <jkt@mozilla.com>"]
[dependencies]
uuid = { version = "0.7.2", features = ["v4"] }
uuid = { version = "0.6", features = ["v4"] }
nsstring = { path = "../nsstring" }

Просмотреть файл

@ -7,6 +7,7 @@ use std::fmt::Write;
#[no_mangle]
pub extern "C" fn GkRustUtils_GenerateUUID(res: &mut nsACString) {
let uuid = Uuid::new_v4().to_hyphenated();
// TODO once the vendored Uuid implementation is >7 this likely can use Hyphenated instead of to_string
let uuid = Uuid::new_v4().hyphenated().to_string();
write!(res, "{{{}}}", uuid).expect("Unexpected uuid generated");
}