Bug 1396824 - Part 2: Revendor dependencies. r=ato

Differential Revision: https://phabricator.services.mozilla.com/D34632

--HG--
rename : third_party/rust/crossbeam-utils-0.3.2/Cargo.toml => third_party/rust/crossbeam-channel/Cargo.toml
rename : third_party/rust/crossbeam-utils-0.3.2/LICENSE-APACHE => third_party/rust/crossbeam-channel/LICENSE-APACHE
rename : third_party/rust/tokio-uds/LICENSE-MIT => third_party/rust/crossbeam-channel/LICENSE-MIT
rename : third_party/rust/tokio-timer/Cargo.toml => third_party/rust/headers-core/Cargo.toml
rename : third_party/rust/hyper/LICENSE => third_party/rust/headers-core/LICENSE
rename : third_party/rust/tokio-timer/Cargo.toml => third_party/rust/headers-derive/Cargo.toml
rename : third_party/rust/hyper/LICENSE => third_party/rust/headers-derive/LICENSE
rename : third_party/rust/hyper/LICENSE => third_party/rust/headers/LICENSE
rename : third_party/rust/tokio-timer/Cargo.toml => third_party/rust/input_buffer/Cargo.toml
rename : third_party/rust/crossbeam-utils-0.3.2/LICENSE-APACHE => third_party/rust/input_buffer/LICENSE-APACHE
rename : third_party/rust/hyper/LICENSE => third_party/rust/input_buffer/LICENSE-MIT
rename : third_party/rust/tokio-executor/Cargo.toml => third_party/rust/mime/Cargo.toml
rename : third_party/rust/crossbeam-utils-0.3.2/LICENSE-APACHE => third_party/rust/mime/LICENSE-APACHE
rename : third_party/rust/hyper/LICENSE => third_party/rust/mime/LICENSE-MIT
rename : third_party/rust/hyper/LICENSE => third_party/rust/mime_guess/LICENSE
rename : third_party/rust/scoped-tls/.cargo-checksum.json => third_party/rust/scoped-tls-0.1.0/.cargo-checksum.json
rename : third_party/rust/scoped-tls/Cargo.toml => third_party/rust/scoped-tls-0.1.0/Cargo.toml
rename : third_party/rust/crossbeam-utils-0.3.2/LICENSE-APACHE => third_party/rust/scoped-tls-0.1.0/LICENSE-APACHE
rename : third_party/rust/tokio-uds/LICENSE-MIT => third_party/rust/scoped-tls-0.1.0/LICENSE-MIT
rename : third_party/rust/scoped-tls/README.md => third_party/rust/scoped-tls-0.1.0/README.md
rename : third_party/rust/scoped-tls/src/lib.rs => third_party/rust/scoped-tls-0.1.0/src/lib.rs
rename : third_party/rust/tokio-uds/LICENSE-APACHE => third_party/rust/serde_urlencoded/LICENSE-APACHE
rename : third_party/rust/tokio-uds/LICENSE-MIT => third_party/rust/serde_urlencoded/LICENSE-MIT
rename : third_party/rust/tokio-executor/Cargo.toml => third_party/rust/tokio-current-thread/Cargo.toml
rename : third_party/rust/tokio-uds/LICENSE-MIT => third_party/rust/tokio-current-thread/LICENSE
rename : third_party/rust/tokio-timer/README.md => third_party/rust/tokio-current-thread/README.md
rename : third_party/rust/tokio/src/executor/current_thread/mod.rs => third_party/rust/tokio-current-thread/src/lib.rs
rename : third_party/rust/tokio/src/executor/current_thread/scheduler.rs => third_party/rust/tokio-current-thread/src/scheduler.rs
rename : third_party/rust/tokio-executor/src/lib.rs => third_party/rust/tokio-executor/src/executor.rs
rename : third_party/rust/tokio-timer/src/timer/level.rs => third_party/rust/tokio-timer/src/wheel/level.rs
rename : third_party/rust/tokio-uds/.cargo-checksum.json => third_party/rust/tokio-uds-0.1.7/.cargo-checksum.json
rename : third_party/rust/tokio-uds/Cargo.toml => third_party/rust/tokio-uds-0.1.7/Cargo.toml
rename : third_party/rust/crossbeam-utils-0.3.2/LICENSE-APACHE => third_party/rust/tokio-uds-0.1.7/LICENSE-APACHE
rename : third_party/rust/tokio-uds/LICENSE-MIT => third_party/rust/tokio-uds-0.1.7/LICENSE-MIT
rename : third_party/rust/tokio-uds/README.md => third_party/rust/tokio-uds-0.1.7/README.md
rename : third_party/rust/tokio-uds/src/frame.rs => third_party/rust/tokio-uds-0.1.7/src/frame.rs
rename : third_party/rust/tokio-uds/src/lib.rs => third_party/rust/tokio-uds-0.1.7/src/lib.rs
rename : third_party/rust/tokio-uds/src/ucred.rs => third_party/rust/tokio-uds-0.1.7/src/ucred.rs
rename : third_party/rust/tokio-uds/LICENSE-MIT => third_party/rust/tokio-uds/LICENSE
rename : third_party/rust/crossbeam-utils-0.3.2/LICENSE-APACHE => third_party/rust/tungstenite/LICENSE-APACHE
rename : third_party/rust/hyper/LICENSE => third_party/rust/tungstenite/LICENSE-MIT
rename : third_party/rust/tokio-timer/Cargo.toml => third_party/rust/unicase-1.4.2/Cargo.toml
rename : third_party/rust/hyper/LICENSE => third_party/rust/unicase-1.4.2/LICENSE
rename : third_party/rust/crossbeam-utils/Cargo.toml => third_party/rust/unicase/Cargo.toml
rename : third_party/rust/crossbeam-utils-0.3.2/LICENSE-APACHE => third_party/rust/unicase/LICENSE-APACHE
rename : third_party/rust/hyper/LICENSE => third_party/rust/unicase/LICENSE-MIT
rename : third_party/rust/hyper/LICENSE => third_party/rust/urlencoding/LICENSE
rename : third_party/rust/tokio-executor/Cargo.toml => third_party/rust/utf-8/Cargo.toml
rename : third_party/rust/tokio-timer/Cargo.toml => third_party/rust/version_check/Cargo.toml
rename : third_party/rust/crossbeam-utils-0.3.2/LICENSE-APACHE => third_party/rust/version_check/LICENSE-APACHE
rename : third_party/rust/hyper/LICENSE => third_party/rust/warp/LICENSE
extra : moz-landing-system : lando
This commit is contained in:
Bastien Orivel 2019-06-21 17:18:50 +00:00
Родитель 761235d949
Коммит 11f31bfdc8
603 изменённых файлов: 83519 добавлений и 10179 удалений

1
third_party/rust/crossbeam-channel/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"615aff5b524bbefd40063255084cc644e3ae37e043799b28ff55e67bfd2b3dc4","Cargo.toml":"fce8c5285af6e361f694dd00bda2d6162e9f896c60b7f81c206b7eb769728726","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","LICENSE-THIRD-PARTY":"924a49392dc8304def57586be4ebd69aaf51e16fd245b55b4b69ad2cce6b715a","README.md":"077c5ddbd471d4a21f5b183e415b708e5100fcad8baed335fd4ad5b6f959a8be","examples/fibonacci.rs":"8755bcb9cf05e391d8679fbb51db40ed9692703c3b66341cd1c1e4cca2068874","examples/matching.rs":"1fd35488bf42e1782a339b9691a4e82dcf23ad12502809e33555d46c91739476","examples/stopwatch.rs":"4e90ce134475859a421da0a095baea52a575b03e41fb8fb50cf47446f422ee6e","src/channel.rs":"4d398567823e2e8189de91d67ef58e4b61d1b8d3f1e373da2664ed40ae154717","src/context.rs":"5c57679e0d3d63f2df11c7b74c70b5d97a6c2e50a6b2fcd60f1f077d59b88598","src/counter.rs":"3a9c1b0fa94beeabd349789ca94453f0cb3aa67f5d56bc619a7e029956fd3dbf","src/err.rs":"1a9ac9d7f4d5561f9c3e513b2f776782783954efa0bf9428598f3ad9ccb9d5c9","src/flavors/after.rs":"4f761618efe21036145e44f01506bdfbc2524879368ac138600e0feed921a6a7","src/flavors/array.rs":"be4f7a50acef56b170f288df30722d69fd9388496c8a6bb480d0322ead6d79c4","src/flavors/list.rs":"e9c615aa0f04e222a01dfa6edc9cd1a83036d924db999480f83baa1123779d07","src/flavors/mod.rs":"a5af9b6105207e293c0d64928b4486fb1da9bfe0318354c66c8b5069e41ec31f","src/flavors/never.rs":"86e21b4d8b154e5d795cf72c7203a1d16a846c4d670095c8b592c12569f35a98","src/flavors/tick.rs":"3ae19df71310972e7b5fd8cac17d90bff683e1493ed3db26d0d3d329dff133ff","src/flavors/zero.rs":"cae6eb914d36e58810704d7200b1dc90ff6cc03b23a97fa3ac04fbd00f18381a","src/lib.rs":"bc843b55c65d3a20ad1fc56da675e48232af5373339f2e513b4f6d4460e0000d","src/select.rs":"99797f44c141eea8a1a64cd6fef629812680f6ec3393da1bb7e4b3a701028ec6","src/select_macro.rs":"e8f3998ab86f54e46acaa48bd6bba9b2e955ed11710d23c46f256295575fd638","src/utils.rs":"ebf230fed429c353f6202fe10c33dcca69a0917a947665a7ee03d230b88a29bf","src/waker.rs":"e85114e43a4db4befadd79b574c2567f3076e460791c2ea6b9588ee824d2c988","tests/after.rs":"9336716edbb6e3145f006e62af59a4e82160ecfb8748fac174b844fb81442d45","tests/array.rs":"1e87f8e2e75dcbaf0dbc75394cfa329c0fd5719db0bdb9ba2aa0cc86b2a18044","tests/golang.rs":"08c13a4fafd25da03809f20965203e4e88bdf936ff53342f2e43430e6603d3fa","tests/iter.rs":"3beaac492a9df39d2eae31d7cdb88f4ee3bd3cb57e2c5c9705811b8ee2abe744","tests/list.rs":"0a664517f3ff49e0885f6c649cbc76f5e5c7a4940096d7a1c98728cbe1655948","tests/mpsc.rs":"d268beb7bcb0be864bdb745585979c97343b82761705907fd90b0ec85f2db7d4","tests/never.rs":"cd455a4c78403d9a96fe0f3a4e968164cca533cc85c96aaa4558987f9b088fcc","tests/ready.rs":"7bc0dd86cd987dcae7db84c9f4379a2421e088b4e5dbdc07affb10c853fc7f55","tests/select.rs":"1ace0bbd2aecf488827d2c16d5d32b47baf00b0a1f7f7470bf9c8e16ddf92dad","tests/select_macro.rs":"d303abb0a3f7ff9bb3b5253951d7d17bfb30e4f77fb66d11c40a063691f9a9ae","tests/thread_locals.rs":"e9d25086d4bc590bacdaf20b5ff0ff3741d3403a413a8b817eaf15f61fb5e644","tests/tick.rs":"011c39a85f1e0427e61e7afc0860cf6dc29d41f7e8be9709684e4ffdc26ef9b8","tests/zero.rs":"983998a52173fba22045fb8390566a59ebb0392834477ab94cac934d637a3231"},"package":"8d4f5844607ce8da3fff431e7dba56cda8bfcc570aa50bee36adba8a32b8cad7"}

133
third_party/rust/crossbeam-channel/CHANGELOG.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,133 @@
# Version 0.3.7
- Remove `parking_lot` and `rand` dependencies.
- Expand documentation.
- Implement `Default` for `Select`.
- Make `size_of::<Receiver<T>>()` smaller.
- Several minor optimizations.
- Add more tests.
# Version 0.3.6
- Fix a bug in initialization of unbounded channels.
# Version 0.3.5
- New implementation for unbounded channels.
- A number of small performance improvements.
- Remove `crossbeam-epoch` dependency.
# Version 0.3.4
- Bump `crossbeam-epoch` to `0.7`.
- Improve documentation.
# Version 0.3.3
- Relax the lifetime in `SelectedOperation<'_>`.
- Add `Select::try_ready()`, `Select::ready()`, and `Select::ready_timeout()`.
- Update licensing notices.
- Improve documentation.
- Add methods `is_disconnected()`, `is_timeout()`, `is_empty()`, and `is_full()` on error types.
# Version 0.3.2
- More elaborate licensing notices.
# Version 0.3.1
- Update `crossbeam-utils` to `0.6`.
# Version 0.3.0
- Add a special `never` channel type.
- Dropping all receivers now closes the channel.
- The interface of sending and receiving methods is now very similar to those in v0.1.
- The syntax for `send` in `select!` is now `send(sender, msg) -> res => body`.
- The syntax for `recv` in `select!` is now `recv(receiver) -> res => body`.
- New, more efficient interface for `Select` without callbacks.
- Timeouts can be specified in `select!`.
# Version 0.2.6
- `Select` struct that can add cases dynamically.
- More documentation (in particular, the FAQ section).
- Optimize contended sends/receives in unbounded channels.
# Version 0.2.5
- Use `LocalKey::try_with` instead of `LocalKey::with`.
- Remove helper macros `__crossbeam_channel*`.
# Version 0.2.4
- Make `select!` linearizable with other channel operations.
- Update `crossbeam-utils` to `0.5.0`.
- Update `parking_lot` to `0.6.3`.
- Remove Mac OS X tests.
# Version 0.2.3
- Add Mac OS X tests.
- Lower some memory orderings.
- Eliminate calls to `mem::unitialized`, which caused bugs with ZST.
# Version 0.2.2
- Add more tests.
- Update `crossbeam-epoch` to 0.5.0
- Initialize the RNG seed to a random value.
- Replace `libc::abort` with `std::process::abort`.
- Ignore clippy warnings in `select!`.
- Better interaction of `select!` with the NLL borrow checker.
# Version 0.2.1
- Fix compilation errors when using `select!` with `#[deny(unsafe_code)]`.
# Version 0.2.0
- Implement `IntoIterator<Item = T>` for `Receiver<T>`.
- Add a new `select!` macro.
- Add special channels `after` and `tick`.
- Dropping receivers doesn't close the channel anymore.
- Change the signature of `recv`, `send`, and `try_recv`.
- Remove `Sender::is_closed` and `Receiver::is_closed`.
- Remove `Sender::close` and `Receiver::close`.
- Remove `Sender::send_timeout` and `Receiver::recv_timeout`.
- Remove `Sender::try_send`.
- Remove `Select` and `select_loop!`.
- Remove all error types.
- Remove `Iter`, `TryIter`, and `IntoIter`.
- Remove the `nightly` feature.
- Remove ordering operators for `Sender` and `Receiver`.
# Version 0.1.3
- Add `Sender::disconnect` and `Receiver::disconnect`.
- Implement comparison operators for `Sender` and `Receiver`.
- Allow arbitrary patterns in place of `msg` in `recv(r, msg)`.
- Add a few conversion impls between error types.
- Add benchmarks for `atomicring` and `mpmc`.
- Add benchmarks for different message sizes.
- Documentation improvements.
- Update `crossbeam-epoch` to 0.4.0
- Update `crossbeam-utils` to 0.3.0
- Update `parking_lot` to 0.5
- Update `rand` to 0.4
# Version 0.1.2
- Allow conditional cases in `select_loop!` macro.
- Fix typos in documentation.
- Fix deadlock in selection when all channels are disconnected and a timeout is specified.
# Version 0.1.1
- Implement `Debug` for `Sender`, `Receiver`, `Iter`, `TryIter`, `IntoIter`, and `Select`.
- Implement `Default` for `Select`.
# Version 0.1.0
- First implementation of the channels.
- Add `select_loop!` macro by @TimNN.

Просмотреть файл

@ -11,21 +11,24 @@
# will likely look very different (and much more reasonable)
[package]
name = "crossbeam-utils"
version = "0.3.2"
name = "crossbeam-channel"
version = "0.3.7"
authors = ["The Crossbeam Project Developers"]
description = "Utilities for concurrent programming"
homepage = "https://github.com/crossbeam-rs/crossbeam-utils"
documentation = "https://docs.rs/crossbeam-utils"
description = "Multi-producer multi-consumer channels for message passing"
homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-channel"
documentation = "https://docs.rs/crossbeam-channel"
readme = "README.md"
keywords = ["scoped", "thread", "atomic", "cache"]
keywords = ["channel", "mpmc", "select", "golang", "message"]
categories = ["algorithms", "concurrency", "data-structures"]
license = "MIT/Apache-2.0"
repository = "https://github.com/crossbeam-rs/crossbeam-utils"
[dependencies.cfg-if]
version = "0.1"
repository = "https://github.com/crossbeam-rs/crossbeam"
[dependencies.crossbeam-utils]
version = "0.6"
[features]
default = ["use_std"]
nightly = []
use_std = []
[dependencies.smallvec]
version = "0.6.2"
[dev-dependencies.rand]
version = "0.6"
[dev-dependencies.signal-hook]
version = "0.1.5"

Просмотреть файл

23
third_party/rust/crossbeam-channel/LICENSE-MIT поставляемый Normal file
Просмотреть файл

@ -0,0 +1,23 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

625
third_party/rust/crossbeam-channel/LICENSE-THIRD-PARTY поставляемый Normal file
Просмотреть файл

@ -0,0 +1,625 @@
===============================================================================
Bounded MPMC queue
http://www.1024cores.net/home/code-license
Copyright (c) 2010-2011 Dmitry Vyukov.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
EVENT SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of Dmitry Vyukov.
===============================================================================
matching.go
https://creativecommons.org/licenses/by/3.0/legalcode
Creative Commons Legal Code
Attribution 3.0 Unported
CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
LEGAL SERVICES. DISTRIBUTION OF THIS LICENSE DOES NOT CREATE AN
ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
REGARDING THE INFORMATION PROVIDED, AND DISCLAIMS LIABILITY FOR
DAMAGES RESULTING FROM ITS USE.
License
THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE
COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY
COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS
AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED.
BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE
TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY
BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS
CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND
CONDITIONS.
1. Definitions
a. "Adaptation" means a work based upon the Work, or upon the Work and
other pre-existing works, such as a translation, adaptation,
derivative work, arrangement of music or other alterations of a
literary or artistic work, or phonogram or performance and includes
cinematographic adaptations or any other form in which the Work may be
recast, transformed, or adapted including in any form recognizably
derived from the original, except that a work that constitutes a
Collection will not be considered an Adaptation for the purpose of
this License. For the avoidance of doubt, where the Work is a musical
work, performance or phonogram, the synchronization of the Work in
timed-relation with a moving image ("synching") will be considered an
Adaptation for the purpose of this License.
b. "Collection" means a collection of literary or artistic works, such as
encyclopedias and anthologies, or performances, phonograms or
broadcasts, or other works or subject matter other than works listed
in Section 1(f) below, which, by reason of the selection and
arrangement of their contents, constitute intellectual creations, in
which the Work is included in its entirety in unmodified form along
with one or more other contributions, each constituting separate and
independent works in themselves, which together are assembled into a
collective whole. A work that constitutes a Collection will not be
considered an Adaptation (as defined above) for the purposes of this
License.
c. "Distribute" means to make available to the public the original and
copies of the Work or Adaptation, as appropriate, through sale or
other transfer of ownership.
d. "Licensor" means the individual, individuals, entity or entities that
offer(s) the Work under the terms of this License.
e. "Original Author" means, in the case of a literary or artistic work,
the individual, individuals, entity or entities who created the Work
or if no individual or entity can be identified, the publisher; and in
addition (i) in the case of a performance the actors, singers,
musicians, dancers, and other persons who act, sing, deliver, declaim,
play in, interpret or otherwise perform literary or artistic works or
expressions of folklore; (ii) in the case of a phonogram the producer
being the person or legal entity who first fixes the sounds of a
performance or other sounds; and, (iii) in the case of broadcasts, the
organization that transmits the broadcast.
f. "Work" means the literary and/or artistic work offered under the terms
of this License including without limitation any production in the
literary, scientific and artistic domain, whatever may be the mode or
form of its expression including digital form, such as a book,
pamphlet and other writing; a lecture, address, sermon or other work
of the same nature; a dramatic or dramatico-musical work; a
choreographic work or entertainment in dumb show; a musical
composition with or without words; a cinematographic work to which are
assimilated works expressed by a process analogous to cinematography;
a work of drawing, painting, architecture, sculpture, engraving or
lithography; a photographic work to which are assimilated works
expressed by a process analogous to photography; a work of applied
art; an illustration, map, plan, sketch or three-dimensional work
relative to geography, topography, architecture or science; a
performance; a broadcast; a phonogram; a compilation of data to the
extent it is protected as a copyrightable work; or a work performed by
a variety or circus performer to the extent it is not otherwise
considered a literary or artistic work.
g. "You" means an individual or entity exercising rights under this
License who has not previously violated the terms of this License with
respect to the Work, or who has received express permission from the
Licensor to exercise rights under this License despite a previous
violation.
h. "Publicly Perform" means to perform public recitations of the Work and
to communicate to the public those public recitations, by any means or
process, including by wire or wireless means or public digital
performances; to make available to the public Works in such a way that
members of the public may access these Works from a place and at a
place individually chosen by them; to perform the Work to the public
by any means or process and the communication to the public of the
performances of the Work, including by public digital performance; to
broadcast and rebroadcast the Work by any means including signs,
sounds or images.
i. "Reproduce" means to make copies of the Work by any means including
without limitation by sound or visual recordings and the right of
fixation and reproducing fixations of the Work, including storage of a
protected performance or phonogram in digital form or other electronic
medium.
2. Fair Dealing Rights. Nothing in this License is intended to reduce,
limit, or restrict any uses free from copyright or rights arising from
limitations or exceptions that are provided for in connection with the
copyright protection under copyright law or other applicable laws.
3. License Grant. Subject to the terms and conditions of this License,
Licensor hereby grants You a worldwide, royalty-free, non-exclusive,
perpetual (for the duration of the applicable copyright) license to
exercise the rights in the Work as stated below:
a. to Reproduce the Work, to incorporate the Work into one or more
Collections, and to Reproduce the Work as incorporated in the
Collections;
b. to create and Reproduce Adaptations provided that any such Adaptation,
including any translation in any medium, takes reasonable steps to
clearly label, demarcate or otherwise identify that changes were made
to the original Work. For example, a translation could be marked "The
original work was translated from English to Spanish," or a
modification could indicate "The original work has been modified.";
c. to Distribute and Publicly Perform the Work including as incorporated
in Collections; and,
d. to Distribute and Publicly Perform Adaptations.
e. For the avoidance of doubt:
i. Non-waivable Compulsory License Schemes. In those jurisdictions in
which the right to collect royalties through any statutory or
compulsory licensing scheme cannot be waived, the Licensor
reserves the exclusive right to collect such royalties for any
exercise by You of the rights granted under this License;
ii. Waivable Compulsory License Schemes. In those jurisdictions in
which the right to collect royalties through any statutory or
compulsory licensing scheme can be waived, the Licensor waives the
exclusive right to collect such royalties for any exercise by You
of the rights granted under this License; and,
iii. Voluntary License Schemes. The Licensor waives the right to
collect royalties, whether individually or, in the event that the
Licensor is a member of a collecting society that administers
voluntary licensing schemes, via that society, from any exercise
by You of the rights granted under this License.
The above rights may be exercised in all media and formats whether now
known or hereafter devised. The above rights include the right to make
such modifications as are technically necessary to exercise the rights in
other media and formats. Subject to Section 8(f), all rights not expressly
granted by Licensor are hereby reserved.
4. Restrictions. The license granted in Section 3 above is expressly made
subject to and limited by the following restrictions:
a. You may Distribute or Publicly Perform the Work only under the terms
of this License. You must include a copy of, or the Uniform Resource
Identifier (URI) for, this License with every copy of the Work You
Distribute or Publicly Perform. You may not offer or impose any terms
on the Work that restrict the terms of this License or the ability of
the recipient of the Work to exercise the rights granted to that
recipient under the terms of the License. You may not sublicense the
Work. You must keep intact all notices that refer to this License and
to the disclaimer of warranties with every copy of the Work You
Distribute or Publicly Perform. When You Distribute or Publicly
Perform the Work, You may not impose any effective technological
measures on the Work that restrict the ability of a recipient of the
Work from You to exercise the rights granted to that recipient under
the terms of the License. This Section 4(a) applies to the Work as
incorporated in a Collection, but this does not require the Collection
apart from the Work itself to be made subject to the terms of this
License. If You create a Collection, upon notice from any Licensor You
must, to the extent practicable, remove from the Collection any credit
as required by Section 4(b), as requested. If You create an
Adaptation, upon notice from any Licensor You must, to the extent
practicable, remove from the Adaptation any credit as required by
Section 4(b), as requested.
b. If You Distribute, or Publicly Perform the Work or any Adaptations or
Collections, You must, unless a request has been made pursuant to
Section 4(a), keep intact all copyright notices for the Work and
provide, reasonable to the medium or means You are utilizing: (i) the
name of the Original Author (or pseudonym, if applicable) if supplied,
and/or if the Original Author and/or Licensor designate another party
or parties (e.g., a sponsor institute, publishing entity, journal) for
attribution ("Attribution Parties") in Licensor's copyright notice,
terms of service or by other reasonable means, the name of such party
or parties; (ii) the title of the Work if supplied; (iii) to the
extent reasonably practicable, the URI, if any, that Licensor
specifies to be associated with the Work, unless such URI does not
refer to the copyright notice or licensing information for the Work;
and (iv) , consistent with Section 3(b), in the case of an Adaptation,
a credit identifying the use of the Work in the Adaptation (e.g.,
"French translation of the Work by Original Author," or "Screenplay
based on original Work by Original Author"). The credit required by
this Section 4 (b) may be implemented in any reasonable manner;
provided, however, that in the case of a Adaptation or Collection, at
a minimum such credit will appear, if a credit for all contributing
authors of the Adaptation or Collection appears, then as part of these
credits and in a manner at least as prominent as the credits for the
other contributing authors. For the avoidance of doubt, You may only
use the credit required by this Section for the purpose of attribution
in the manner set out above and, by exercising Your rights under this
License, You may not implicitly or explicitly assert or imply any
connection with, sponsorship or endorsement by the Original Author,
Licensor and/or Attribution Parties, as appropriate, of You or Your
use of the Work, without the separate, express prior written
permission of the Original Author, Licensor and/or Attribution
Parties.
c. Except as otherwise agreed in writing by the Licensor or as may be
otherwise permitted by applicable law, if You Reproduce, Distribute or
Publicly Perform the Work either by itself or as part of any
Adaptations or Collections, You must not distort, mutilate, modify or
take other derogatory action in relation to the Work which would be
prejudicial to the Original Author's honor or reputation. Licensor
agrees that in those jurisdictions (e.g. Japan), in which any exercise
of the right granted in Section 3(b) of this License (the right to
make Adaptations) would be deemed to be a distortion, mutilation,
modification or other derogatory action prejudicial to the Original
Author's honor and reputation, the Licensor will waive or not assert,
as appropriate, this Section, to the fullest extent permitted by the
applicable national law, to enable You to reasonably exercise Your
right under Section 3(b) of this License (right to make Adaptations)
but not otherwise.
5. Representations, Warranties and Disclaimer
UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING, LICENSOR
OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY
KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE,
INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTIBILITY,
FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF
LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS,
WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION
OF IMPLIED WARRANTIES, SO SUCH EXCLUSION MAY NOT APPLY TO YOU.
6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE
LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR
ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES
ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS
BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
7. Termination
a. This License and the rights granted hereunder will terminate
automatically upon any breach by You of the terms of this License.
Individuals or entities who have received Adaptations or Collections
from You under this License, however, will not have their licenses
terminated provided such individuals or entities remain in full
compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will
survive any termination of this License.
b. Subject to the above terms and conditions, the license granted here is
perpetual (for the duration of the applicable copyright in the Work).
Notwithstanding the above, Licensor reserves the right to release the
Work under different license terms or to stop distributing the Work at
any time; provided, however that any such election will not serve to
withdraw this License (or any other license that has been, or is
required to be, granted under the terms of this License), and this
License will continue in full force and effect unless terminated as
stated above.
8. Miscellaneous
a. Each time You Distribute or Publicly Perform the Work or a Collection,
the Licensor offers to the recipient a license to the Work on the same
terms and conditions as the license granted to You under this License.
b. Each time You Distribute or Publicly Perform an Adaptation, Licensor
offers to the recipient a license to the original Work on the same
terms and conditions as the license granted to You under this License.
c. If any provision of this License is invalid or unenforceable under
applicable law, it shall not affect the validity or enforceability of
the remainder of the terms of this License, and without further action
by the parties to this agreement, such provision shall be reformed to
the minimum extent necessary to make such provision valid and
enforceable.
d. No term or provision of this License shall be deemed waived and no
breach consented to unless such waiver or consent shall be in writing
and signed by the party to be charged with such waiver or consent.
e. This License constitutes the entire agreement between the parties with
respect to the Work licensed here. There are no understandings,
agreements or representations with respect to the Work not specified
here. Licensor shall not be bound by any additional provisions that
may appear in any communication from You. This License may not be
modified without the mutual written agreement of the Licensor and You.
f. The rights granted under, and the subject matter referenced, in this
License were drafted utilizing the terminology of the Berne Convention
for the Protection of Literary and Artistic Works (as amended on
September 28, 1979), the Rome Convention of 1961, the WIPO Copyright
Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996
and the Universal Copyright Convention (as revised on July 24, 1971).
These rights and subject matter take effect in the relevant
jurisdiction in which the License terms are sought to be enforced
according to the corresponding provisions of the implementation of
those treaty provisions in the applicable national law. If the
standard suite of rights granted under applicable copyright law
includes additional rights not granted under this License, such
additional rights are deemed to be included in the License; this
License is not intended to restrict the license of any rights under
applicable law.
Creative Commons Notice
Creative Commons is not a party to this License, and makes no warranty
whatsoever in connection with the Work. Creative Commons will not be
liable to You or any party on any legal theory for any damages
whatsoever, including without limitation any general, special,
incidental or consequential damages arising in connection to this
license. Notwithstanding the foregoing two (2) sentences, if Creative
Commons has expressly identified itself as the Licensor hereunder, it
shall have all rights and obligations of Licensor.
Except for the limited purpose of indicating to the public that the
Work is licensed under the CCPL, Creative Commons does not authorize
the use by either party of the trademark "Creative Commons" or any
related trademark or logo of Creative Commons without the prior
written consent of Creative Commons. Any permitted use will be in
compliance with Creative Commons' then-current trademark usage
guidelines, as may be published on its website or otherwise made
available upon request from time to time. For the avoidance of doubt,
this trademark restriction does not form part of this License.
Creative Commons may be contacted at https://creativecommons.org/.
===============================================================================
The Go Programming Language
https://golang.org/LICENSE
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
===============================================================================
The Rust Programming Language
https://github.com/rust-lang/rust/blob/master/LICENSE-MIT
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
===============================================================================
The Rust Programming Language
https://github.com/rust-lang/rust/blob/master/LICENSE-APACHE
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

88
third_party/rust/crossbeam-channel/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,88 @@
# Crossbeam Channel
[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam.svg?branch=master)](
https://travis-ci.org/crossbeam-rs/crossbeam)
[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](
https://github.com/crossbeam-rs/crossbeam-channel)
[![Cargo](https://img.shields.io/crates/v/crossbeam-channel.svg)](
https://crates.io/crates/crossbeam-channel)
[![Documentation](https://docs.rs/crossbeam-channel/badge.svg)](
https://docs.rs/crossbeam-channel)
[![Rust 1.26+](https://img.shields.io/badge/rust-1.26+-lightgray.svg)](
https://www.rust-lang.org)
This crate provides multi-producer multi-consumer channels for message passing.
It is an alternative to [`std::sync::mpsc`] with more features and better performance.
Some highlights:
* [`Sender`]s and [`Receiver`]s can be cloned and shared among threads.
* Two main kinds of channels are [`bounded`] and [`unbounded`].
* Convenient extra channels like [`after`], [`never`], and [`tick`].
* The [`select!`] macro can block on multiple channel operations.
* [`Select`] can select over a dynamically built list of channel operations.
* Channels use locks very sparingly for maximum [performance](benchmarks).
[`std::sync::mpsc`]: https://doc.rust-lang.org/std/sync/mpsc/index.html
[`Sender`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/struct.Sender.html
[`Receiver`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/struct.Receiver.html
[`bounded`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/fn.bounded.html
[`unbounded`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/fn.unbounded.html
[`after`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/fn.after.html
[`never`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/fn.never.html
[`tick`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/fn.tick.html
[`select!`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/macro.select.html
[`Select`]: https://docs.rs/crossbeam-channel/*/crossbeam_channel/struct.Select.html
## Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
crossbeam-channel = "0.3"
```
Next, add this to your crate:
```rust
#[macro_use]
extern crate crossbeam_channel;
```
## License
Licensed under either of
* Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
#### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.
#### Third party software
This product includes copies and modifications of software developed by third parties:
* [examples/matching.rs](examples/matching.rs) includes
[matching.go](http://www.nada.kth.se/~snilsson/concurrency/src/matching.go) by Stefan Nilsson,
licensed under Creative Commons Attribution 3.0 Unported License.
* [src/flavors/array.rs](src/flavors/array.rs) is based on
[Bounded MPMC queue](http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue)
by Dmitry Vyukov, licensed under the Simplified BSD License and the Apache License, Version 2.0.
* [tests/mpsc.rs](tests/mpsc.rs) includes modifications of code from The Rust Programming Language,
licensed under the MIT License and the Apache License, Version 2.0.
* [tests/golang.rs](tests/golang.rs) is based on code from The Go Programming Language, licensed
under the 3-Clause BSD License.
See the source code files for more details.
Copies of third party licenses can be found in [LICENSE-THIRD-PARTY](LICENSE-THIRD-PARTY).

27
third_party/rust/crossbeam-channel/examples/fibonacci.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,27 @@
//! An asynchronous fibonacci sequence generator.
extern crate crossbeam_channel;
use std::thread;
use crossbeam_channel::{bounded, Sender};
// Sends the Fibonacci sequence into the channel until it becomes disconnected.
fn fibonacci(sender: Sender<u64>) {
let (mut x, mut y) = (0, 1);
while sender.send(x).is_ok() {
let tmp = x;
x = y;
y = tmp + y;
}
}
fn main() {
let (s, r) = bounded(0);
thread::spawn(|| fibonacci(s));
// Print the first 20 Fibonacci numbers.
for num in r.iter().take(20) {
println!("{}", num);
}
}

75
third_party/rust/crossbeam-channel/examples/matching.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,75 @@
//! Using `select!` to send and receive on the same channel at the same time.
//!
//! This example is based on the following program in Go.
//!
//! Source:
//! - https://web.archive.org/web/20171209034309/https://www.nada.kth.se/~snilsson/concurrency
//! - http://www.nada.kth.se/~snilsson/concurrency/src/matching.go
//!
//! Copyright & License:
//! - Stefan Nilsson
//! - Creative Commons Attribution 3.0 Unported License
//! - https://creativecommons.org/licenses/by/3.0/
//!
//! ```go
//! func main() {
//! people := []string{"Anna", "Bob", "Cody", "Dave", "Eva"}
//! match := make(chan string, 1) // Make room for one unmatched send.
//! wg := new(sync.WaitGroup)
//! for _, name := range people {
//! wg.Add(1)
//! go Seek(name, match, wg)
//! }
//! wg.Wait()
//! select {
//! case name := <-match:
//! fmt.Printf("No one received %ss message.\n", name)
//! default:
//! // There was no pending send operation.
//! }
//! }
//!
//! // Seek either sends or receives, whichever possible, a name on the match
//! // channel and notifies the wait group when done.
//! func Seek(name string, match chan string, wg *sync.WaitGroup) {
//! select {
//! case peer := <-match:
//! fmt.Printf("%s received a message from %s.\n", name, peer)
//! case match <- name:
//! // Wait for someone to receive my message.
//! }
//! wg.Done()
//! }
//! ```
#[macro_use]
extern crate crossbeam_channel;
extern crate crossbeam_utils;
use crossbeam_channel::bounded;
use crossbeam_utils::thread;
fn main() {
let people = vec!["Anna", "Bob", "Cody", "Dave", "Eva"];
let (s, r) = bounded(1); // Make room for one unmatched send.
// Either send my name into the channel or receive someone else's, whatever happens first.
let seek = |name, s, r| {
select! {
recv(r) -> peer => println!("{} received a message from {}.", name, peer.unwrap()),
send(s, name) -> _ => {}, // Wait for someone to receive my message.
}
};
thread::scope(|scope| {
for name in people {
let (s, r) = (s.clone(), r.clone());
scope.spawn(move |_| seek(name, s, r));
}
}).unwrap();
// Check if there is a pending send operation.
if let Ok(name) = r.try_recv() {
println!("No one received {}s message.", name);
}
}

58
third_party/rust/crossbeam-channel/examples/stopwatch.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,58 @@
//! Prints the elapsed time every 1 second and quits on Ctrl+C.
#[macro_use]
extern crate crossbeam_channel;
extern crate signal_hook;
use std::io;
use std::thread;
use std::time::{Duration, Instant};
use crossbeam_channel::{bounded, tick, Receiver};
use signal_hook::iterator::Signals;
use signal_hook::SIGINT;
// Creates a channel that gets a message every time `SIGINT` is signalled.
fn sigint_notifier() -> io::Result<Receiver<()>> {
let (s, r) = bounded(100);
let signals = Signals::new(&[SIGINT])?;
thread::spawn(move || {
for _ in signals.forever() {
if s.send(()).is_err() {
break;
}
}
});
Ok(r)
}
// Prints the elapsed time.
fn show(dur: Duration) {
println!(
"Elapsed: {}.{:03} sec",
dur.as_secs(),
dur.subsec_nanos() / 1_000_000
);
}
fn main() {
let start = Instant::now();
let update = tick(Duration::from_secs(1));
let ctrl_c = sigint_notifier().unwrap();
loop {
select! {
recv(update) -> _ => {
show(start.elapsed());
}
recv(ctrl_c) -> _ => {
println!();
println!("Goodbye!");
show(start.elapsed());
break;
}
}
}
}

1340
third_party/rust/crossbeam-channel/src/channel.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

189
third_party/rust/crossbeam-channel/src/context.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,189 @@
//! Thread-local context used in select.
use std::cell::Cell;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::thread::{self, Thread, ThreadId};
use std::time::Instant;
use crossbeam_utils::Backoff;
use select::Selected;
/// Thread-local context used in select.
#[derive(Clone)]
pub struct Context {
inner: Arc<Inner>,
}
/// Inner representation of `Context`.
struct Inner {
/// Selected operation.
select: AtomicUsize,
/// A slot into which another thread may store a pointer to its `Packet`.
packet: AtomicUsize,
/// Thread handle.
thread: Thread,
/// Thread id.
thread_id: ThreadId,
}
impl Context {
/// Creates a new context for the duration of the closure.
#[inline]
pub fn with<F, R>(f: F) -> R
where
F: FnOnce(&Context) -> R,
{
thread_local! {
/// Cached thread-local context.
static CONTEXT: Cell<Option<Context>> = Cell::new(Some(Context::new()));
}
let mut f = Some(f);
let mut f = move |cx: &Context| -> R {
let f = f.take().unwrap();
f(cx)
};
CONTEXT
.try_with(|cell| match cell.take() {
None => f(&Context::new()),
Some(cx) => {
cx.reset();
let res = f(&cx);
cell.set(Some(cx));
res
}
}).unwrap_or_else(|_| f(&Context::new()))
}
/// Creates a new `Context`.
#[cold]
fn new() -> Context {
Context {
inner: Arc::new(Inner {
select: AtomicUsize::new(Selected::Waiting.into()),
packet: AtomicUsize::new(0),
thread: thread::current(),
thread_id: thread::current().id(),
}),
}
}
/// Resets `select` and `packet`.
#[inline]
fn reset(&self) {
self.inner
.select
.store(Selected::Waiting.into(), Ordering::Release);
self.inner.packet.store(0, Ordering::Release);
}
/// Attempts to select an operation.
///
/// On failure, the previously selected operation is returned.
#[inline]
pub fn try_select(&self, select: Selected) -> Result<(), Selected> {
self.inner
.select
.compare_exchange(
Selected::Waiting.into(),
select.into(),
Ordering::AcqRel,
Ordering::Acquire,
)
.map(|_| ())
.map_err(|e| e.into())
}
/// Returns the selected operation.
#[inline]
pub fn selected(&self) -> Selected {
Selected::from(self.inner.select.load(Ordering::Acquire))
}
/// Stores a packet.
///
/// This method must be called after `try_select` succeeds and there is a packet to provide.
#[inline]
pub fn store_packet(&self, packet: usize) {
if packet != 0 {
self.inner.packet.store(packet, Ordering::Release);
}
}
/// Waits until a packet is provided and returns it.
#[inline]
pub fn wait_packet(&self) -> usize {
let backoff = Backoff::new();
loop {
let packet = self.inner.packet.load(Ordering::Acquire);
if packet != 0 {
return packet;
}
backoff.snooze();
}
}
/// Waits until an operation is selected and returns it.
///
/// If the deadline is reached, `Selected::Aborted` will be selected.
#[inline]
pub fn wait_until(&self, deadline: Option<Instant>) -> Selected {
// Spin for a short time, waiting until an operation is selected.
let backoff = Backoff::new();
loop {
let sel = Selected::from(self.inner.select.load(Ordering::Acquire));
if sel != Selected::Waiting {
return sel;
}
if backoff.is_complete() {
break;
} else {
backoff.snooze();
}
}
loop {
// Check whether an operation has been selected.
let sel = Selected::from(self.inner.select.load(Ordering::Acquire));
if sel != Selected::Waiting {
return sel;
}
// If there's a deadline, park the current thread until the deadline is reached.
if let Some(end) = deadline {
let now = Instant::now();
if now < end {
thread::park_timeout(end - now);
} else {
// The deadline has been reached. Try aborting select.
return match self.try_select(Selected::Aborted) {
Ok(()) => Selected::Aborted,
Err(s) => s,
};
}
} else {
thread::park();
}
}
}
/// Unparks the thread this context belongs to.
#[inline]
pub fn unpark(&self) {
self.inner.thread.unpark();
}
/// Returns the id of the thread this context belongs to.
#[inline]
pub fn thread_id(&self) -> ThreadId {
self.inner.thread_id
}
}

132
third_party/rust/crossbeam-channel/src/counter.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,132 @@
///! Reference counter for channels.
use std::isize;
use std::ops;
use std::process;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
/// Reference counter internals.
struct Counter<C> {
/// The number of senders associated with the channel.
senders: AtomicUsize,
/// The number of receivers associated with the channel.
receivers: AtomicUsize,
/// If `true`, either the sending or receiving side has been dropped.
disconnected: AtomicBool,
/// The internal channel.
chan: C,
}
/// Wraps a channel into the reference counter.
pub fn new<C>(chan: C) -> (Sender<C>, Receiver<C>) {
let counter = Box::into_raw(Box::new(Counter {
senders: AtomicUsize::new(1),
receivers: AtomicUsize::new(1),
disconnected: AtomicBool::new(false),
chan,
}));
let s = Sender { counter };
let r = Receiver { counter };
(s, r)
}
/// The sending side.
pub struct Sender<C> {
counter: *mut Counter<C>,
}
impl<C> Sender<C> {
/// Returns the internal `Counter`.
fn counter(&self) -> &Counter<C> {
unsafe { &*self.counter }
}
/// Acquires another sender reference.
pub fn acquire(&self) -> Sender<C> {
let count = self.counter().senders.fetch_add(1, Ordering::Relaxed);
// Cloning senders and calling `mem::forget` on the clones could potentially overflow the
// counter. It's very difficult to recover sensibly from such degenerate scenarios so we
// just abort when the count becomes very large.
if count > isize::MAX as usize {
process::abort();
}
Sender {
counter: self.counter,
}
}
/// Releases the sender reference.
///
/// Function `f` will be called if this is the last sender reference.
pub unsafe fn release<F: FnOnce(&C)>(&self, f: F) {
if self.counter().senders.fetch_sub(1, Ordering::AcqRel) == 1 {
f(&self.counter().chan);
if self.counter().disconnected.swap(true, Ordering::AcqRel) {
drop(Box::from_raw(self.counter));
}
}
}
}
impl<C> ops::Deref for Sender<C> {
type Target = C;
fn deref(&self) -> &C {
&self.counter().chan
}
}
/// The receiving side.
pub struct Receiver<C> {
counter: *mut Counter<C>,
}
impl<C> Receiver<C> {
/// Returns the internal `Counter`.
fn counter(&self) -> &Counter<C> {
unsafe { &*self.counter }
}
/// Acquires another receiver reference.
pub fn acquire(&self) -> Receiver<C> {
let count = self.counter().receivers.fetch_add(1, Ordering::Relaxed);
// Cloning receivers and calling `mem::forget` on the clones could potentially overflow the
// counter. It's very difficult to recover sensibly from such degenerate scenarios so we
// just abort when the count becomes very large.
if count > isize::MAX as usize {
process::abort();
}
Receiver {
counter: self.counter,
}
}
/// Releases the receiver reference.
///
/// Function `f` will be called if this is the last receiver reference.
pub unsafe fn release<F: FnOnce(&C)>(&self, f: F) {
if self.counter().receivers.fetch_sub(1, Ordering::AcqRel) == 1 {
f(&self.counter().chan);
if self.counter().disconnected.swap(true, Ordering::AcqRel) {
drop(Box::from_raw(self.counter));
}
}
}
}
impl<C> ops::Deref for Receiver<C> {
type Target = C;
fn deref(&self) -> &C {
&self.counter().chan
}
}

451
third_party/rust/crossbeam-channel/src/err.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,451 @@
use std::error;
use std::fmt;
/// An error returned from the [`send`] method.
///
/// The message could not be sent because the channel is disconnected.
///
/// The error contains the message so it can be recovered.
///
/// [`send`]: struct.Sender.html#method.send
#[derive(PartialEq, Eq, Clone, Copy)]
pub struct SendError<T>(pub T);
/// An error returned from the [`try_send`] method.
///
/// The error contains the message being sent so it can be recovered.
///
/// [`try_send`]: struct.Sender.html#method.try_send
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum TrySendError<T> {
/// The message could not be sent because the channel is full.
///
/// If this is a zero-capacity channel, then the error indicates that there was no receiver
/// available to receive the message at the time.
Full(T),
/// The message could not be sent because the channel is disconnected.
Disconnected(T),
}
/// An error returned from the [`send_timeout`] method.
///
/// The error contains the message being sent so it can be recovered.
///
/// [`send_timeout`]: struct.Sender.html#method.send_timeout
#[derive(PartialEq, Eq, Clone, Copy)]
pub enum SendTimeoutError<T> {
/// The message could not be sent because the channel is full and the operation timed out.
///
/// If this is a zero-capacity channel, then the error indicates that there was no receiver
/// available to receive the message and the operation timed out.
Timeout(T),
/// The message could not be sent because the channel is disconnected.
Disconnected(T),
}
/// An error returned from the [`recv`] method.
///
/// A message could not be received because the channel is empty and disconnected.
///
/// [`recv`]: struct.Receiver.html#method.recv
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub struct RecvError;
/// An error returned from the [`try_recv`] method.
///
/// [`try_recv`]: struct.Receiver.html#method.recv
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum TryRecvError {
/// A message could not be received because the channel is empty.
///
/// If this is a zero-capacity channel, then the error indicates that there was no sender
/// available to send a message at the time.
Empty,
/// The message could not be received because the channel is empty and disconnected.
Disconnected,
}
/// An error returned from the [`recv_timeout`] method.
///
/// [`recv_timeout`]: struct.Receiver.html#method.recv_timeout
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum RecvTimeoutError {
/// A message could not be received because the channel is empty and the operation timed out.
///
/// If this is a zero-capacity channel, then the error indicates that there was no sender
/// available to send a message and the operation timed out.
Timeout,
/// The message could not be received because the channel is empty and disconnected.
Disconnected,
}
/// An error returned from the [`try_select`] method.
///
/// Failed because none of the channel operations were ready.
///
/// [`try_select`]: struct.Select.html#method.try_select
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub struct TrySelectError;
/// An error returned from the [`select_timeout`] method.
///
/// Failed because none of the channel operations became ready before the timeout.
///
/// [`select_timeout`]: struct.Select.html#method.select_timeout
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub struct SelectTimeoutError;
/// An error returned from the [`try_ready`] method.
///
/// Failed because none of the channel operations were ready.
///
/// [`try_ready`]: struct.Select.html#method.try_ready
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub struct TryReadyError;
/// An error returned from the [`ready_timeout`] method.
///
/// Failed because none of the channel operations became ready before the timeout.
///
/// [`ready_timeout`]: struct.Select.html#method.ready_timeout
#[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub struct ReadyTimeoutError;
impl<T> fmt::Debug for SendError<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
"SendError(..)".fmt(f)
}
}
impl<T> fmt::Display for SendError<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
"sending on a disconnected channel".fmt(f)
}
}
impl<T: Send> error::Error for SendError<T> {
fn description(&self) -> &str {
"sending on a disconnected channel"
}
fn cause(&self) -> Option<&error::Error> {
None
}
}
impl<T> SendError<T> {
/// Unwraps the message.
///
/// # Examples
///
/// ```
/// use crossbeam_channel::unbounded;
///
/// let (s, r) = unbounded();
/// drop(r);
///
/// if let Err(err) = s.send("foo") {
/// assert_eq!(err.into_inner(), "foo");
/// }
/// ```
pub fn into_inner(self) -> T {
self.0
}
}
impl<T> fmt::Debug for TrySendError<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
TrySendError::Full(..) => "Full(..)".fmt(f),
TrySendError::Disconnected(..) => "Disconnected(..)".fmt(f),
}
}
}
impl<T> fmt::Display for TrySendError<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
TrySendError::Full(..) => "sending on a full channel".fmt(f),
TrySendError::Disconnected(..) => "sending on a disconnected channel".fmt(f),
}
}
}
impl<T: Send> error::Error for TrySendError<T> {
fn description(&self) -> &str {
match *self {
TrySendError::Full(..) => "sending on a full channel",
TrySendError::Disconnected(..) => "sending on a disconnected channel",
}
}
fn cause(&self) -> Option<&error::Error> {
None
}
}
impl<T> From<SendError<T>> for TrySendError<T> {
fn from(err: SendError<T>) -> TrySendError<T> {
match err {
SendError(t) => TrySendError::Disconnected(t),
}
}
}
impl<T> TrySendError<T> {
/// Unwraps the message.
///
/// # Examples
///
/// ```
/// use crossbeam_channel::bounded;
///
/// let (s, r) = bounded(0);
///
/// if let Err(err) = s.try_send("foo") {
/// assert_eq!(err.into_inner(), "foo");
/// }
/// ```
pub fn into_inner(self) -> T {
match self {
TrySendError::Full(v) => v,
TrySendError::Disconnected(v) => v,
}
}
/// Returns `true` if the send operation failed because the channel is full.
pub fn is_full(&self) -> bool {
match self {
TrySendError::Full(_) => true,
_ => false,
}
}
/// Returns `true` if the send operation failed because the channel is disconnected.
pub fn is_disconnected(&self) -> bool {
match self {
TrySendError::Disconnected(_) => true,
_ => false,
}
}
}
impl<T> fmt::Debug for SendTimeoutError<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
"SendTimeoutError(..)".fmt(f)
}
}
impl<T> fmt::Display for SendTimeoutError<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
SendTimeoutError::Timeout(..) => "timed out waiting on send operation".fmt(f),
SendTimeoutError::Disconnected(..) => "sending on a disconnected channel".fmt(f),
}
}
}
impl<T: Send> error::Error for SendTimeoutError<T> {
fn description(&self) -> &str {
"sending on an empty and disconnected channel"
}
fn cause(&self) -> Option<&error::Error> {
None
}
}
impl<T> From<SendError<T>> for SendTimeoutError<T> {
fn from(err: SendError<T>) -> SendTimeoutError<T> {
match err {
SendError(e) => SendTimeoutError::Disconnected(e),
}
}
}
impl<T> SendTimeoutError<T> {
/// Unwraps the message.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use crossbeam_channel::unbounded;
///
/// let (s, r) = unbounded();
///
/// if let Err(err) = s.send_timeout("foo", Duration::from_secs(1)) {
/// assert_eq!(err.into_inner(), "foo");
/// }
/// ```
pub fn into_inner(self) -> T {
match self {
SendTimeoutError::Timeout(v) => v,
SendTimeoutError::Disconnected(v) => v,
}
}
/// Returns `true` if the send operation timed out.
pub fn is_timeout(&self) -> bool {
match self {
SendTimeoutError::Timeout(_) => true,
_ => false,
}
}
/// Returns `true` if the send operation failed because the channel is disconnected.
pub fn is_disconnected(&self) -> bool {
match self {
SendTimeoutError::Disconnected(_) => true,
_ => false,
}
}
}
impl fmt::Display for RecvError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
"receiving on an empty and disconnected channel".fmt(f)
}
}
impl error::Error for RecvError {
fn description(&self) -> &str {
"receiving on an empty and disconnected channel"
}
fn cause(&self) -> Option<&error::Error> {
None
}
}
impl fmt::Display for TryRecvError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
TryRecvError::Empty => "receiving on an empty channel".fmt(f),
TryRecvError::Disconnected => "receiving on an empty and disconnected channel".fmt(f),
}
}
}
impl error::Error for TryRecvError {
fn description(&self) -> &str {
match *self {
TryRecvError::Empty => "receiving on an empty channel",
TryRecvError::Disconnected => "receiving on an empty and disconnected channel",
}
}
fn cause(&self) -> Option<&error::Error> {
None
}
}
impl From<RecvError> for TryRecvError {
fn from(err: RecvError) -> TryRecvError {
match err {
RecvError => TryRecvError::Disconnected,
}
}
}
impl TryRecvError {
/// Returns `true` if the receive operation failed because the channel is empty.
pub fn is_empty(&self) -> bool {
match self {
TryRecvError::Empty => true,
_ => false,
}
}
/// Returns `true` if the receive operation failed because the channel is disconnected.
pub fn is_disconnected(&self) -> bool {
match self {
TryRecvError::Disconnected => true,
_ => false,
}
}
}
impl fmt::Display for RecvTimeoutError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
RecvTimeoutError::Timeout => "timed out waiting on receive operation".fmt(f),
RecvTimeoutError::Disconnected => "channel is empty and disconnected".fmt(f),
}
}
}
impl error::Error for RecvTimeoutError {
fn description(&self) -> &str {
match *self {
RecvTimeoutError::Timeout => "timed out waiting on receive operation",
RecvTimeoutError::Disconnected => "channel is empty and disconnected",
}
}
fn cause(&self) -> Option<&error::Error> {
None
}
}
impl From<RecvError> for RecvTimeoutError {
fn from(err: RecvError) -> RecvTimeoutError {
match err {
RecvError => RecvTimeoutError::Disconnected,
}
}
}
impl RecvTimeoutError {
/// Returns `true` if the receive operation timed out.
pub fn is_timeout(&self) -> bool {
match self {
RecvTimeoutError::Timeout => true,
_ => false,
}
}
/// Returns `true` if the receive operation failed because the channel is disconnected.
pub fn is_disconnected(&self) -> bool {
match self {
RecvTimeoutError::Disconnected => true,
_ => false,
}
}
}
impl fmt::Display for TrySelectError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
"all operations in select would block".fmt(f)
}
}
impl error::Error for TrySelectError {
fn description(&self) -> &str {
"all operations in select would block"
}
fn cause(&self) -> Option<&error::Error> {
None
}
}
impl fmt::Display for SelectTimeoutError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
"timed out waiting on select".fmt(f)
}
}
impl error::Error for SelectTimeoutError {
fn description(&self) -> &str {
"timed out waiting on select"
}
fn cause(&self) -> Option<&error::Error> {
None
}
}

200
third_party/rust/crossbeam-channel/src/flavors/after.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,200 @@
//! Channel that delivers a message after a certain amount of time.
//!
//! Messages cannot be sent into this kind of channel; they are materialized on demand.
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
use std::time::{Duration, Instant};
use context::Context;
use err::{RecvTimeoutError, TryRecvError};
use select::{Operation, SelectHandle, Token};
use utils;
/// Result of a receive operation.
pub type AfterToken = Option<Instant>;
/// Channel that delivers a message after a certain amount of time.
pub struct Channel {
/// The instant at which the message will be delivered.
delivery_time: Instant,
/// `true` if the message has been received.
received: AtomicBool,
}
impl Channel {
/// Creates a channel that delivers a message after a certain duration of time.
#[inline]
pub fn new(dur: Duration) -> Self {
Channel {
delivery_time: Instant::now() + dur,
received: AtomicBool::new(false),
}
}
/// Attempts to receive a message without blocking.
#[inline]
pub fn try_recv(&self) -> Result<Instant, TryRecvError> {
// We use relaxed ordering because this is just an optional optimistic check.
if self.received.load(Ordering::Relaxed) {
// The message has already been received.
return Err(TryRecvError::Empty);
}
if Instant::now() < self.delivery_time {
// The message was not delivered yet.
return Err(TryRecvError::Empty);
}
// Try receiving the message if it is still available.
if !self.received.swap(true, Ordering::SeqCst) {
// Success! Return delivery time as the message.
Ok(self.delivery_time)
} else {
// The message was already received.
Err(TryRecvError::Empty)
}
}
/// Receives a message from the channel.
#[inline]
pub fn recv(&self, deadline: Option<Instant>) -> Result<Instant, RecvTimeoutError> {
// We use relaxed ordering because this is just an optional optimistic check.
if self.received.load(Ordering::Relaxed) {
// The message has already been received.
utils::sleep_until(deadline);
return Err(RecvTimeoutError::Timeout);
}
// Wait until the message is received or the deadline is reached.
loop {
let now = Instant::now();
// Check if we can receive the next message.
if now >= self.delivery_time {
break;
}
// Check if the deadline has been reached.
if let Some(d) = deadline {
if now >= d {
return Err(RecvTimeoutError::Timeout);
}
thread::sleep(self.delivery_time.min(d) - now);
} else {
thread::sleep(self.delivery_time - now);
}
}
// Try receiving the message if it is still available.
if !self.received.swap(true, Ordering::SeqCst) {
// Success! Return the message, which is the instant at which it was delivered.
Ok(self.delivery_time)
} else {
// The message was already received. Block forever.
utils::sleep_until(None);
unreachable!()
}
}
/// Reads a message from the channel.
#[inline]
pub unsafe fn read(&self, token: &mut Token) -> Result<Instant, ()> {
token.after.ok_or(())
}
/// Returns `true` if the channel is empty.
#[inline]
pub fn is_empty(&self) -> bool {
// We use relaxed ordering because this is just an optional optimistic check.
if self.received.load(Ordering::Relaxed) {
return true;
}
// If the delivery time hasn't been reached yet, the channel is empty.
if Instant::now() < self.delivery_time {
return true;
}
// The delivery time has been reached. The channel is empty only if the message has already
// been received.
self.received.load(Ordering::SeqCst)
}
/// Returns `true` if the channel is full.
#[inline]
pub fn is_full(&self) -> bool {
!self.is_empty()
}
/// Returns the number of messages in the channel.
#[inline]
pub fn len(&self) -> usize {
if self.is_empty() {
0
} else {
1
}
}
/// Returns the capacity of the channel.
#[inline]
pub fn capacity(&self) -> Option<usize> {
Some(1)
}
}
impl SelectHandle for Channel {
#[inline]
fn try_select(&self, token: &mut Token) -> bool {
match self.try_recv() {
Ok(msg) => {
token.after = Some(msg);
true
}
Err(TryRecvError::Disconnected) => {
token.after = None;
true
}
Err(TryRecvError::Empty) => false,
}
}
#[inline]
fn deadline(&self) -> Option<Instant> {
// We use relaxed ordering because this is just an optional optimistic check.
if self.received.load(Ordering::Relaxed) {
None
} else {
Some(self.delivery_time)
}
}
#[inline]
fn register(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
#[inline]
fn unregister(&self, _oper: Operation) {}
#[inline]
fn accept(&self, token: &mut Token, _cx: &Context) -> bool {
self.try_select(token)
}
#[inline]
fn is_ready(&self) -> bool {
!self.is_empty()
}
#[inline]
fn watch(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
#[inline]
fn unwatch(&self, _oper: Operation) {}
}

637
third_party/rust/crossbeam-channel/src/flavors/array.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,637 @@
//! Bounded channel based on a preallocated array.
//!
//! This flavor has a fixed, positive capacity.
//!
//! The implementation is based on Dmitry Vyukov's bounded MPMC queue.
//!
//! Source:
//! - http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
//! - https://docs.google.com/document/d/1yIAYmbvL3JxOKOjuCyon7JhW4cSv1wy5hC0ApeGMV9s/pub
//!
//! Copyright & License:
//! - Copyright (c) 2010-2011 Dmitry Vyukov
//! - Simplified BSD License and Apache License, Version 2.0
//! - http://www.1024cores.net/home/code-license
use std::cell::UnsafeCell;
use std::marker::PhantomData;
use std::mem;
use std::ptr;
use std::sync::atomic::{self, AtomicUsize, Ordering};
use std::time::Instant;
use crossbeam_utils::{Backoff, CachePadded};
use context::Context;
use err::{RecvTimeoutError, SendTimeoutError, TryRecvError, TrySendError};
use select::{Operation, SelectHandle, Selected, Token};
use waker::SyncWaker;
/// A slot in a channel.
struct Slot<T> {
/// The current stamp.
stamp: AtomicUsize,
/// The message in this slot.
msg: UnsafeCell<T>,
}
/// The token type for the array flavor.
pub struct ArrayToken {
/// Slot to read from or write to.
slot: *const u8,
/// Stamp to store into the slot after reading or writing.
stamp: usize,
}
impl Default for ArrayToken {
#[inline]
fn default() -> Self {
ArrayToken {
slot: ptr::null(),
stamp: 0,
}
}
}
/// Bounded channel based on a preallocated array.
pub struct Channel<T> {
/// The head of the channel.
///
/// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but
/// packed into a single `usize`. The lower bits represent the index, while the upper bits
/// represent the lap. The mark bit in the head is always zero.
///
/// Messages are popped from the head of the channel.
head: CachePadded<AtomicUsize>,
/// The tail of the channel.
///
/// This value is a "stamp" consisting of an index into the buffer, a mark bit, and a lap, but
/// packed into a single `usize`. The lower bits represent the index, while the upper bits
/// represent the lap. The mark bit indicates that the channel is disconnected.
///
/// Messages are pushed into the tail of the channel.
tail: CachePadded<AtomicUsize>,
/// The buffer holding slots.
buffer: *mut Slot<T>,
/// The channel capacity.
cap: usize,
/// A stamp with the value of `{ lap: 1, mark: 0, index: 0 }`.
one_lap: usize,
/// If this bit is set in the tail, that means the channel is disconnected.
mark_bit: usize,
/// Senders waiting while the channel is full.
senders: SyncWaker,
/// Receivers waiting while the channel is empty and not disconnected.
receivers: SyncWaker,
/// Indicates that dropping a `Channel<T>` may drop values of type `T`.
_marker: PhantomData<T>,
}
impl<T> Channel<T> {
/// Creates a bounded channel of capacity `cap`.
///
/// # Panics
///
/// Panics if the capacity is not in the range `1 ..= usize::max_value() / 4`.
pub fn with_capacity(cap: usize) -> Self {
assert!(cap > 0, "capacity must be positive");
// Make sure there are at least two most significant bits: one to encode laps and one more
// to indicate that the channel is disconnected. If we can't reserve two bits, then panic.
// In that case, the buffer is likely too large to allocate anyway.
let cap_limit = usize::max_value() / 4;
assert!(
cap <= cap_limit,
"channel capacity is too large: {} > {}",
cap,
cap_limit
);
// Compute constants `mark_bit` and `one_lap`.
let mark_bit = (cap + 1).next_power_of_two();
let one_lap = mark_bit * 2;
// Head is initialized to `{ lap: 0, mark: 0, index: 0 }`.
let head = 0;
// Tail is initialized to `{ lap: 0, mark: 0, index: 0 }`.
let tail = 0;
// Allocate a buffer of `cap` slots.
let buffer = {
let mut v = Vec::<Slot<T>>::with_capacity(cap);
let ptr = v.as_mut_ptr();
mem::forget(v);
ptr
};
// Initialize stamps in the slots.
for i in 0..cap {
unsafe {
// Set the stamp to `{ lap: 0, mark: 0, index: i }`.
let slot = buffer.add(i);
ptr::write(&mut (*slot).stamp, AtomicUsize::new(i));
}
}
Channel {
buffer,
cap,
one_lap,
mark_bit,
head: CachePadded::new(AtomicUsize::new(head)),
tail: CachePadded::new(AtomicUsize::new(tail)),
senders: SyncWaker::new(),
receivers: SyncWaker::new(),
_marker: PhantomData,
}
}
/// Returns a receiver handle to the channel.
pub fn receiver(&self) -> Receiver<T> {
Receiver(self)
}
/// Returns a sender handle to the channel.
pub fn sender(&self) -> Sender<T> {
Sender(self)
}
/// Attempts to reserve a slot for sending a message.
fn start_send(&self, token: &mut Token) -> bool {
let backoff = Backoff::new();
let mut tail = self.tail.load(Ordering::Relaxed);
loop {
// Check if the channel is disconnected.
if tail & self.mark_bit != 0 {
token.array.slot = ptr::null();
token.array.stamp = 0;
return true;
}
// Deconstruct the tail.
let index = tail & (self.mark_bit - 1);
let lap = tail & !(self.one_lap - 1);
// Inspect the corresponding slot.
let slot = unsafe { &*self.buffer.add(index) };
let stamp = slot.stamp.load(Ordering::Acquire);
// If the tail and the stamp match, we may attempt to push.
if tail == stamp {
let new_tail = if index + 1 < self.cap {
// Same lap, incremented index.
// Set to `{ lap: lap, mark: 0, index: index + 1 }`.
tail + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
lap.wrapping_add(self.one_lap)
};
// Try moving the tail.
match self
.tail
.compare_exchange_weak(tail, new_tail, Ordering::SeqCst, Ordering::Relaxed)
{
Ok(_) => {
// Prepare the token for the follow-up call to `write`.
token.array.slot = slot as *const Slot<T> as *const u8;
token.array.stamp = tail + 1;
return true;
}
Err(t) => {
tail = t;
backoff.spin();
}
}
} else if stamp.wrapping_add(self.one_lap) == tail + 1 {
atomic::fence(Ordering::SeqCst);
let head = self.head.load(Ordering::Relaxed);
// If the head lags one lap behind the tail as well...
if head.wrapping_add(self.one_lap) == tail {
// ...then the channel is full.
return false;
}
backoff.spin();
tail = self.tail.load(Ordering::Relaxed);
} else {
// Snooze because we need to wait for the stamp to get updated.
backoff.snooze();
tail = self.tail.load(Ordering::Relaxed);
}
}
}
/// Writes a message into the channel.
pub unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> {
// If there is no slot, the channel is disconnected.
if token.array.slot.is_null() {
return Err(msg);
}
let slot: &Slot<T> = &*(token.array.slot as *const Slot<T>);
// Write the message into the slot and update the stamp.
slot.msg.get().write(msg);
slot.stamp.store(token.array.stamp, Ordering::Release);
// Wake a sleeping receiver.
self.receivers.notify();
Ok(())
}
/// Attempts to reserve a slot for receiving a message.
fn start_recv(&self, token: &mut Token) -> bool {
let backoff = Backoff::new();
let mut head = self.head.load(Ordering::Relaxed);
loop {
// Deconstruct the head.
let index = head & (self.mark_bit - 1);
let lap = head & !(self.one_lap - 1);
// Inspect the corresponding slot.
let slot = unsafe { &*self.buffer.add(index) };
let stamp = slot.stamp.load(Ordering::Acquire);
// If the the stamp is ahead of the head by 1, we may attempt to pop.
if head + 1 == stamp {
let new = if index + 1 < self.cap {
// Same lap, incremented index.
// Set to `{ lap: lap, mark: 0, index: index + 1 }`.
head + 1
} else {
// One lap forward, index wraps around to zero.
// Set to `{ lap: lap.wrapping_add(1), mark: 0, index: 0 }`.
lap.wrapping_add(self.one_lap)
};
// Try moving the head.
match self
.head
.compare_exchange_weak(head, new, Ordering::SeqCst, Ordering::Relaxed)
{
Ok(_) => {
// Prepare the token for the follow-up call to `read`.
token.array.slot = slot as *const Slot<T> as *const u8;
token.array.stamp = head.wrapping_add(self.one_lap);
return true;
}
Err(h) => {
head = h;
backoff.spin();
}
}
} else if stamp == head {
atomic::fence(Ordering::SeqCst);
let tail = self.tail.load(Ordering::Relaxed);
// If the tail equals the head, that means the channel is empty.
if (tail & !self.mark_bit) == head {
// If the channel is disconnected...
if tail & self.mark_bit != 0 {
// ...then receive an error.
token.array.slot = ptr::null();
token.array.stamp = 0;
return true;
} else {
// Otherwise, the receive operation is not ready.
return false;
}
}
backoff.spin();
head = self.head.load(Ordering::Relaxed);
} else {
// Snooze because we need to wait for the stamp to get updated.
backoff.snooze();
head = self.head.load(Ordering::Relaxed);
}
}
}
/// Reads a message from the channel.
pub unsafe fn read(&self, token: &mut Token) -> Result<T, ()> {
if token.array.slot.is_null() {
// The channel is disconnected.
return Err(());
}
let slot: &Slot<T> = &*(token.array.slot as *const Slot<T>);
// Read the message from the slot and update the stamp.
let msg = slot.msg.get().read();
slot.stamp.store(token.array.stamp, Ordering::Release);
// Wake a sleeping sender.
self.senders.notify();
Ok(msg)
}
/// Attempts to send a message into the channel.
pub fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
let token = &mut Token::default();
if self.start_send(token) {
unsafe { self.write(token, msg).map_err(TrySendError::Disconnected) }
} else {
Err(TrySendError::Full(msg))
}
}
/// Sends a message into the channel.
pub fn send(&self, msg: T, deadline: Option<Instant>) -> Result<(), SendTimeoutError<T>> {
let token = &mut Token::default();
loop {
// Try sending a message several times.
let backoff = Backoff::new();
loop {
if self.start_send(token) {
let res = unsafe { self.write(token, msg) };
return res.map_err(SendTimeoutError::Disconnected);
}
if backoff.is_complete() {
break;
} else {
backoff.snooze();
}
}
Context::with(|cx| {
// Prepare for blocking until a receiver wakes us up.
let oper = Operation::hook(token);
self.senders.register(oper, cx);
// Has the channel become ready just now?
if !self.is_full() || self.is_disconnected() {
let _ = cx.try_select(Selected::Aborted);
}
// Block the current thread.
let sel = cx.wait_until(deadline);
match sel {
Selected::Waiting => unreachable!(),
Selected::Aborted | Selected::Disconnected => {
self.senders.unregister(oper).unwrap();
}
Selected::Operation(_) => {}
}
});
if let Some(d) = deadline {
if Instant::now() >= d {
return Err(SendTimeoutError::Timeout(msg));
}
}
}
}
/// Attempts to receive a message without blocking.
pub fn try_recv(&self) -> Result<T, TryRecvError> {
let token = &mut Token::default();
if self.start_recv(token) {
unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) }
} else {
Err(TryRecvError::Empty)
}
}
/// Receives a message from the channel.
pub fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
let token = &mut Token::default();
loop {
// Try receiving a message several times.
let backoff = Backoff::new();
loop {
if self.start_recv(token) {
let res = unsafe { self.read(token) };
return res.map_err(|_| RecvTimeoutError::Disconnected);
}
if backoff.is_complete() {
break;
} else {
backoff.snooze();
}
}
Context::with(|cx| {
// Prepare for blocking until a sender wakes us up.
let oper = Operation::hook(token);
self.receivers.register(oper, cx);
// Has the channel become ready just now?
if !self.is_empty() || self.is_disconnected() {
let _ = cx.try_select(Selected::Aborted);
}
// Block the current thread.
let sel = cx.wait_until(deadline);
match sel {
Selected::Waiting => unreachable!(),
Selected::Aborted | Selected::Disconnected => {
self.receivers.unregister(oper).unwrap();
// If the channel was disconnected, we still have to check for remaining
// messages.
}
Selected::Operation(_) => {}
}
});
if let Some(d) = deadline {
if Instant::now() >= d {
return Err(RecvTimeoutError::Timeout);
}
}
}
}
/// Returns the current number of messages inside the channel.
pub fn len(&self) -> usize {
loop {
// Load the tail, then load the head.
let tail = self.tail.load(Ordering::SeqCst);
let head = self.head.load(Ordering::SeqCst);
// If the tail didn't change, we've got consistent values to work with.
if self.tail.load(Ordering::SeqCst) == tail {
let hix = head & (self.mark_bit - 1);
let tix = tail & (self.mark_bit - 1);
return if hix < tix {
tix - hix
} else if hix > tix {
self.cap - hix + tix
} else if (tail & !self.mark_bit) == head {
0
} else {
self.cap
};
}
}
}
/// Returns the capacity of the channel.
pub fn capacity(&self) -> Option<usize> {
Some(self.cap)
}
/// Disconnects the channel and wakes up all blocked receivers.
pub fn disconnect(&self) {
let tail = self.tail.fetch_or(self.mark_bit, Ordering::SeqCst);
if tail & self.mark_bit == 0 {
self.senders.disconnect();
self.receivers.disconnect();
}
}
/// Returns `true` if the channel is disconnected.
pub fn is_disconnected(&self) -> bool {
self.tail.load(Ordering::SeqCst) & self.mark_bit != 0
}
/// Returns `true` if the channel is empty.
pub fn is_empty(&self) -> bool {
let head = self.head.load(Ordering::SeqCst);
let tail = self.tail.load(Ordering::SeqCst);
// Is the tail equal to the head?
//
// Note: If the head changes just before we load the tail, that means there was a moment
// when the channel was not empty, so it is safe to just return `false`.
(tail & !self.mark_bit) == head
}
/// Returns `true` if the channel is full.
pub fn is_full(&self) -> bool {
let tail = self.tail.load(Ordering::SeqCst);
let head = self.head.load(Ordering::SeqCst);
// Is the head lagging one lap behind tail?
//
// Note: If the tail changes just before we load the head, that means there was a moment
// when the channel was not full, so it is safe to just return `false`.
head.wrapping_add(self.one_lap) == tail & !self.mark_bit
}
}
impl<T> Drop for Channel<T> {
fn drop(&mut self) {
// Get the index of the head.
let hix = self.head.load(Ordering::Relaxed) & (self.mark_bit - 1);
// Loop over all slots that hold a message and drop them.
for i in 0..self.len() {
// Compute the index of the next slot holding a message.
let index = if hix + i < self.cap {
hix + i
} else {
hix + i - self.cap
};
unsafe {
self.buffer.add(index).drop_in_place();
}
}
// Finally, deallocate the buffer, but don't run any destructors.
unsafe {
Vec::from_raw_parts(self.buffer, 0, self.cap);
}
}
}
/// Receiver handle to a channel.
pub struct Receiver<'a, T: 'a>(&'a Channel<T>);
/// Sender handle to a channel.
pub struct Sender<'a, T: 'a>(&'a Channel<T>);
impl<'a, T> SelectHandle for Receiver<'a, T> {
fn try_select(&self, token: &mut Token) -> bool {
self.0.start_recv(token)
}
fn deadline(&self) -> Option<Instant> {
None
}
fn register(&self, oper: Operation, cx: &Context) -> bool {
self.0.receivers.register(oper, cx);
self.is_ready()
}
fn unregister(&self, oper: Operation) {
self.0.receivers.unregister(oper);
}
fn accept(&self, token: &mut Token, _cx: &Context) -> bool {
self.try_select(token)
}
fn is_ready(&self) -> bool {
!self.0.is_empty() || self.0.is_disconnected()
}
fn watch(&self, oper: Operation, cx: &Context) -> bool {
self.0.receivers.watch(oper, cx);
self.is_ready()
}
fn unwatch(&self, oper: Operation) {
self.0.receivers.unwatch(oper);
}
}
impl<'a, T> SelectHandle for Sender<'a, T> {
fn try_select(&self, token: &mut Token) -> bool {
self.0.start_send(token)
}
fn deadline(&self) -> Option<Instant> {
None
}
fn register(&self, oper: Operation, cx: &Context) -> bool {
self.0.senders.register(oper, cx);
self.is_ready()
}
fn unregister(&self, oper: Operation) {
self.0.senders.unregister(oper);
}
fn accept(&self, token: &mut Token, _cx: &Context) -> bool {
self.try_select(token)
}
fn is_ready(&self) -> bool {
!self.0.is_full() || self.0.is_disconnected()
}
fn watch(&self, oper: Operation, cx: &Context) -> bool {
self.0.senders.watch(oper, cx);
self.is_ready()
}
fn unwatch(&self, oper: Operation) {
self.0.senders.unwatch(oper);
}
}

657
third_party/rust/crossbeam-channel/src/flavors/list.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,657 @@
//! Unbounded channel implemented as a linked list.
use std::cell::UnsafeCell;
use std::marker::PhantomData;
use std::mem::{self, ManuallyDrop};
use std::ptr;
use std::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
use std::time::Instant;
use crossbeam_utils::{Backoff, CachePadded};
use context::Context;
use err::{RecvTimeoutError, SendTimeoutError, TryRecvError, TrySendError};
use select::{Operation, SelectHandle, Selected, Token};
use waker::SyncWaker;
// TODO(stjepang): Once we bump the minimum required Rust version to 1.28 or newer, re-apply the
// following changes by @kleimkuhler:
//
// 1. https://github.com/crossbeam-rs/crossbeam-channel/pull/100
// 2. https://github.com/crossbeam-rs/crossbeam-channel/pull/101
// Bits indicating the state of a slot:
// * If a message has been written into the slot, `WRITE` is set.
// * If a message has been read from the slot, `READ` is set.
// * If the block is being destroyed, `DESTROY` is set.
const WRITE: usize = 1;
const READ: usize = 2;
const DESTROY: usize = 4;
// Each block covers one "lap" of indices.
const LAP: usize = 32;
// The maximum number of messages a block can hold.
const BLOCK_CAP: usize = LAP - 1;
// How many lower bits are reserved for metadata.
const SHIFT: usize = 1;
// Has two different purposes:
// * If set in head, indicates that the block is not the last one.
// * If set in tail, indicates that the channel is disconnected.
const MARK_BIT: usize = 1;
/// A slot in a block.
struct Slot<T> {
/// The message.
msg: UnsafeCell<ManuallyDrop<T>>,
/// The state of the slot.
state: AtomicUsize,
}
impl<T> Slot<T> {
/// Waits until a message is written into the slot.
fn wait_write(&self) {
let backoff = Backoff::new();
while self.state.load(Ordering::Acquire) & WRITE == 0 {
backoff.snooze();
}
}
}
/// A block in a linked list.
///
/// Each block in the list can hold up to `BLOCK_CAP` messages.
struct Block<T> {
/// The next block in the linked list.
next: AtomicPtr<Block<T>>,
/// Slots for messages.
slots: [Slot<T>; BLOCK_CAP],
}
impl<T> Block<T> {
/// Creates an empty block.
fn new() -> Block<T> {
unsafe { mem::zeroed() }
}
/// Waits until the next pointer is set.
fn wait_next(&self) -> *mut Block<T> {
let backoff = Backoff::new();
loop {
let next = self.next.load(Ordering::Acquire);
if !next.is_null() {
return next;
}
backoff.snooze();
}
}
/// Sets the `DESTROY` bit in slots starting from `start` and destroys the block.
unsafe fn destroy(this: *mut Block<T>, start: usize) {
// It is not necessary to set the `DESTROY bit in the last slot because that slot has begun
// destruction of the block.
for i in start..BLOCK_CAP - 1 {
let slot = (*this).slots.get_unchecked(i);
// Mark the `DESTROY` bit if a thread is still using the slot.
if slot.state.load(Ordering::Acquire) & READ == 0
&& slot.state.fetch_or(DESTROY, Ordering::AcqRel) & READ == 0
{
// If a thread is still using the slot, it will continue destruction of the block.
return;
}
}
// No thread is using the block, now it is safe to destroy it.
drop(Box::from_raw(this));
}
}
/// A position in a channel.
#[derive(Debug)]
struct Position<T> {
/// The index in the channel.
index: AtomicUsize,
/// The block in the linked list.
block: AtomicPtr<Block<T>>,
}
/// The token type for the list flavor.
pub struct ListToken {
/// The block of slots.
block: *const u8,
/// The offset into the block.
offset: usize,
}
impl Default for ListToken {
#[inline]
fn default() -> Self {
ListToken {
block: ptr::null(),
offset: 0,
}
}
}
/// Unbounded channel implemented as a linked list.
///
/// Each message sent into the channel is assigned a sequence number, i.e. an index. Indices are
/// represented as numbers of type `usize` and wrap on overflow.
///
/// Consecutive messages are grouped into blocks in order to put less pressure on the allocator and
/// improve cache efficiency.
pub struct Channel<T> {
/// The head of the channel.
head: CachePadded<Position<T>>,
/// The tail of the channel.
tail: CachePadded<Position<T>>,
/// Receivers waiting while the channel is empty and not disconnected.
receivers: SyncWaker,
/// Indicates that dropping a `Channel<T>` may drop messages of type `T`.
_marker: PhantomData<T>,
}
impl<T> Channel<T> {
/// Creates a new unbounded channel.
pub fn new() -> Self {
Channel {
head: CachePadded::new(Position {
block: AtomicPtr::new(ptr::null_mut()),
index: AtomicUsize::new(0),
}),
tail: CachePadded::new(Position {
block: AtomicPtr::new(ptr::null_mut()),
index: AtomicUsize::new(0),
}),
receivers: SyncWaker::new(),
_marker: PhantomData,
}
}
/// Returns a receiver handle to the channel.
pub fn receiver(&self) -> Receiver<T> {
Receiver(self)
}
/// Returns a sender handle to the channel.
pub fn sender(&self) -> Sender<T> {
Sender(self)
}
/// Attempts to reserve a slot for sending a message.
fn start_send(&self, token: &mut Token) -> bool {
let backoff = Backoff::new();
let mut tail = self.tail.index.load(Ordering::Acquire);
let mut block = self.tail.block.load(Ordering::Acquire);
let mut next_block = None;
loop {
// Check if the channel is disconnected.
if tail & MARK_BIT != 0 {
token.list.block = ptr::null();
return true;
}
// Calculate the offset of the index into the block.
let offset = (tail >> SHIFT) % LAP;
// If we reached the end of the block, wait until the next one is installed.
if offset == BLOCK_CAP {
backoff.snooze();
tail = self.tail.index.load(Ordering::Acquire);
block = self.tail.block.load(Ordering::Acquire);
continue;
}
// If we're going to have to install the next block, allocate it in advance in order to
// make the wait for other threads as short as possible.
if offset + 1 == BLOCK_CAP && next_block.is_none() {
next_block = Some(Box::new(Block::<T>::new()));
}
// If this is the first message to be sent into the channel, we need to allocate the
// first block and install it.
if block.is_null() {
let new = Box::into_raw(Box::new(Block::<T>::new()));
if self.tail.block.compare_and_swap(block, new, Ordering::Release) == block {
self.head.block.store(new, Ordering::Release);
block = new;
} else {
next_block = unsafe { Some(Box::from_raw(new)) };
tail = self.tail.index.load(Ordering::Acquire);
block = self.tail.block.load(Ordering::Acquire);
continue;
}
}
let new_tail = tail + (1 << SHIFT);
// Try advancing the tail forward.
match self.tail.index
.compare_exchange_weak(
tail,
new_tail,
Ordering::SeqCst,
Ordering::Acquire,
)
{
Ok(_) => unsafe {
// If we've reached the end of the block, install the next one.
if offset + 1 == BLOCK_CAP {
let next_block = Box::into_raw(next_block.unwrap());
self.tail.block.store(next_block, Ordering::Release);
self.tail.index.fetch_add(1 << SHIFT, Ordering::Release);
(*block).next.store(next_block, Ordering::Release);
}
token.list.block = block as *const u8;
token.list.offset = offset;
return true;
}
Err(t) => {
tail = t;
block = self.tail.block.load(Ordering::Acquire);
backoff.spin();
}
}
}
}
/// Writes a message into the channel.
pub unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> {
// If there is no slot, the channel is disconnected.
if token.list.block.is_null() {
return Err(msg);
}
// Write the message into the slot.
let block = token.list.block as *mut Block<T>;
let offset = token.list.offset;
let slot = (*block).slots.get_unchecked(offset);
slot.msg.get().write(ManuallyDrop::new(msg));
slot.state.fetch_or(WRITE, Ordering::Release);
// Wake a sleeping receiver.
self.receivers.notify();
Ok(())
}
/// Attempts to reserve a slot for receiving a message.
fn start_recv(&self, token: &mut Token) -> bool {
let backoff = Backoff::new();
let mut head = self.head.index.load(Ordering::Acquire);
let mut block = self.head.block.load(Ordering::Acquire);
loop {
// Calculate the offset of the index into the block.
let offset = (head >> SHIFT) % LAP;
// If we reached the end of the block, wait until the next one is installed.
if offset == BLOCK_CAP {
backoff.snooze();
head = self.head.index.load(Ordering::Acquire);
block = self.head.block.load(Ordering::Acquire);
continue;
}
let mut new_head = head + (1 << SHIFT);
if new_head & MARK_BIT == 0 {
atomic::fence(Ordering::SeqCst);
let tail = self.tail.index.load(Ordering::Relaxed);
// If the tail equals the head, that means the channel is empty.
if head >> SHIFT == tail >> SHIFT {
// If the channel is disconnected...
if tail & MARK_BIT != 0 {
// ...then receive an error.
token.list.block = ptr::null();
return true;
} else {
// Otherwise, the receive operation is not ready.
return false;
}
}
// If head and tail are not in the same block, set `MARK_BIT` in head.
if (head >> SHIFT) / LAP != (tail >> SHIFT) / LAP {
new_head |= MARK_BIT;
}
}
// The block can be null here only if the first message is being sent into the channel.
// In that case, just wait until it gets initialized.
if block.is_null() {
backoff.snooze();
head = self.head.index.load(Ordering::Acquire);
block = self.head.block.load(Ordering::Acquire);
continue;
}
// Try moving the head index forward.
match self.head.index
.compare_exchange_weak(
head,
new_head,
Ordering::SeqCst,
Ordering::Acquire,
)
{
Ok(_) => unsafe {
// If we've reached the end of the block, move to the next one.
if offset + 1 == BLOCK_CAP {
let next = (*block).wait_next();
let mut next_index = (new_head & !MARK_BIT).wrapping_add(1 << SHIFT);
if !(*next).next.load(Ordering::Relaxed).is_null() {
next_index |= MARK_BIT;
}
self.head.block.store(next, Ordering::Release);
self.head.index.store(next_index, Ordering::Release);
}
token.list.block = block as *const u8;
token.list.offset = offset;
return true;
}
Err(h) => {
head = h;
block = self.head.block.load(Ordering::Acquire);
backoff.spin();
}
}
}
}
/// Reads a message from the channel.
pub unsafe fn read(&self, token: &mut Token) -> Result<T, ()> {
if token.list.block.is_null() {
// The channel is disconnected.
return Err(());
}
// Read the message.
let block = token.list.block as *mut Block<T>;
let offset = token.list.offset;
let slot = (*block).slots.get_unchecked(offset);
slot.wait_write();
let m = slot.msg.get().read();
let msg = ManuallyDrop::into_inner(m);
// Destroy the block if we've reached the end, or if another thread wanted to destroy but
// couldn't because we were busy reading from the slot.
if offset + 1 == BLOCK_CAP {
Block::destroy(block, 0);
} else if slot.state.fetch_or(READ, Ordering::AcqRel) & DESTROY != 0 {
Block::destroy(block, offset + 1);
}
Ok(msg)
}
/// Attempts to send a message into the channel.
pub fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
self.send(msg, None).map_err(|err| match err {
SendTimeoutError::Disconnected(msg) => TrySendError::Disconnected(msg),
SendTimeoutError::Timeout(_) => unreachable!(),
})
}
/// Sends a message into the channel.
pub fn send(&self, msg: T, _deadline: Option<Instant>) -> Result<(), SendTimeoutError<T>> {
let token = &mut Token::default();
assert!(self.start_send(token));
unsafe {
self.write(token, msg)
.map_err(SendTimeoutError::Disconnected)
}
}
/// Attempts to receive a message without blocking.
pub fn try_recv(&self) -> Result<T, TryRecvError> {
let token = &mut Token::default();
if self.start_recv(token) {
unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) }
} else {
Err(TryRecvError::Empty)
}
}
/// Receives a message from the channel.
pub fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
let token = &mut Token::default();
loop {
// Try receiving a message several times.
let backoff = Backoff::new();
loop {
if self.start_recv(token) {
unsafe {
return self.read(token).map_err(|_| RecvTimeoutError::Disconnected);
}
}
if backoff.is_complete() {
break;
} else {
backoff.snooze();
}
}
// Prepare for blocking until a sender wakes us up.
Context::with(|cx| {
let oper = Operation::hook(token);
self.receivers.register(oper, cx);
// Has the channel become ready just now?
if !self.is_empty() || self.is_disconnected() {
let _ = cx.try_select(Selected::Aborted);
}
// Block the current thread.
let sel = cx.wait_until(deadline);
match sel {
Selected::Waiting => unreachable!(),
Selected::Aborted | Selected::Disconnected => {
self.receivers.unregister(oper).unwrap();
// If the channel was disconnected, we still have to check for remaining
// messages.
}
Selected::Operation(_) => {}
}
});
if let Some(d) = deadline {
if Instant::now() >= d {
return Err(RecvTimeoutError::Timeout);
}
}
}
}
/// Returns the current number of messages inside the channel.
pub fn len(&self) -> usize {
loop {
// Load the tail index, then load the head index.
let mut tail = self.tail.index.load(Ordering::SeqCst);
let mut head = self.head.index.load(Ordering::SeqCst);
// If the tail index didn't change, we've got consistent indices to work with.
if self.tail.index.load(Ordering::SeqCst) == tail {
// Erase the lower bits.
tail &= !((1 << SHIFT) - 1);
head &= !((1 << SHIFT) - 1);
// Rotate indices so that head falls into the first block.
let lap = (head >> SHIFT) / LAP;
tail = tail.wrapping_sub((lap * LAP) << SHIFT);
head = head.wrapping_sub((lap * LAP) << SHIFT);
// Remove the lower bits.
tail >>= SHIFT;
head >>= SHIFT;
// Fix up indices if they fall onto block ends.
if head == BLOCK_CAP {
head = 0;
tail -= LAP;
}
if tail == BLOCK_CAP {
tail += 1;
}
// Return the difference minus the number of blocks between tail and head.
return tail - head - tail / LAP;
}
}
}
/// Returns the capacity of the channel.
pub fn capacity(&self) -> Option<usize> {
None
}
/// Disconnects the channel and wakes up all blocked receivers.
pub fn disconnect(&self) {
let tail = self.tail.index.fetch_or(MARK_BIT, Ordering::SeqCst);
if tail & MARK_BIT == 0 {
self.receivers.disconnect();
}
}
/// Returns `true` if the channel is disconnected.
pub fn is_disconnected(&self) -> bool {
self.tail.index.load(Ordering::SeqCst) & MARK_BIT != 0
}
/// Returns `true` if the channel is empty.
pub fn is_empty(&self) -> bool {
let head = self.head.index.load(Ordering::SeqCst);
let tail = self.tail.index.load(Ordering::SeqCst);
head >> SHIFT == tail >> SHIFT
}
/// Returns `true` if the channel is full.
pub fn is_full(&self) -> bool {
false
}
}
impl<T> Drop for Channel<T> {
fn drop(&mut self) {
let mut head = self.head.index.load(Ordering::Relaxed);
let mut tail = self.tail.index.load(Ordering::Relaxed);
let mut block = self.head.block.load(Ordering::Relaxed);
// Erase the lower bits.
head &= !((1 << SHIFT) - 1);
tail &= !((1 << SHIFT) - 1);
unsafe {
// Drop all messages between head and tail and deallocate the heap-allocated blocks.
while head != tail {
let offset = (head >> SHIFT) % LAP;
if offset < BLOCK_CAP {
// Drop the message in the slot.
let slot = (*block).slots.get_unchecked(offset);
ManuallyDrop::drop(&mut *(*slot).msg.get());
} else {
// Deallocate the block and move to the next one.
let next = (*block).next.load(Ordering::Relaxed);
drop(Box::from_raw(block));
block = next;
}
head = head.wrapping_add(1 << SHIFT);
}
// Deallocate the last remaining block.
if !block.is_null() {
drop(Box::from_raw(block));
}
}
}
}
/// Receiver handle to a channel.
pub struct Receiver<'a, T: 'a>(&'a Channel<T>);
/// Sender handle to a channel.
pub struct Sender<'a, T: 'a>(&'a Channel<T>);
impl<'a, T> SelectHandle for Receiver<'a, T> {
fn try_select(&self, token: &mut Token) -> bool {
self.0.start_recv(token)
}
fn deadline(&self) -> Option<Instant> {
None
}
fn register(&self, oper: Operation, cx: &Context) -> bool {
self.0.receivers.register(oper, cx);
self.is_ready()
}
fn unregister(&self, oper: Operation) {
self.0.receivers.unregister(oper);
}
fn accept(&self, token: &mut Token, _cx: &Context) -> bool {
self.try_select(token)
}
fn is_ready(&self) -> bool {
!self.0.is_empty() || self.0.is_disconnected()
}
fn watch(&self, oper: Operation, cx: &Context) -> bool {
self.0.receivers.watch(oper, cx);
self.is_ready()
}
fn unwatch(&self, oper: Operation) {
self.0.receivers.unwatch(oper);
}
}
impl<'a, T> SelectHandle for Sender<'a, T> {
fn try_select(&self, token: &mut Token) -> bool {
self.0.start_send(token)
}
fn deadline(&self) -> Option<Instant> {
None
}
fn register(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
fn unregister(&self, _oper: Operation) {}
fn accept(&self, token: &mut Token, _cx: &Context) -> bool {
self.try_select(token)
}
fn is_ready(&self) -> bool {
true
}
fn watch(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
fn unwatch(&self, _oper: Operation) {}
}

17
third_party/rust/crossbeam-channel/src/flavors/mod.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,17 @@
//! Channel flavors.
//!
//! There are six flavors:
//!
//! 1. `after` - Channel that delivers a message after a certain amount of time.
//! 2. `array` - Bounded channel based on a preallocated array.
//! 3. `list` - Unbounded channel implemented as a linked list.
//! 4. `never` - Channel that never delivers messages.
//! 5. `tick` - Channel that delivers messages periodically.
//! 6. `zero` - Zero-capacity channel.
pub mod after;
pub mod array;
pub mod list;
pub mod never;
pub mod tick;
pub mod zero;

110
third_party/rust/crossbeam-channel/src/flavors/never.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,110 @@
//! Channel that never delivers messages.
//!
//! Messages cannot be sent into this kind of channel.
use std::marker::PhantomData;
use std::time::Instant;
use context::Context;
use err::{RecvTimeoutError, TryRecvError};
use select::{Operation, SelectHandle, Token};
use utils;
/// This flavor doesn't need a token.
pub type NeverToken = ();
/// Channel that never delivers messages.
pub struct Channel<T> {
_marker: PhantomData<T>,
}
impl<T> Channel<T> {
/// Creates a channel that never delivers messages.
#[inline]
pub fn new() -> Self {
Channel {
_marker: PhantomData,
}
}
/// Attempts to receive a message without blocking.
#[inline]
pub fn try_recv(&self) -> Result<T, TryRecvError> {
Err(TryRecvError::Empty)
}
/// Receives a message from the channel.
#[inline]
pub fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
utils::sleep_until(deadline);
Err(RecvTimeoutError::Timeout)
}
/// Reads a message from the channel.
#[inline]
pub unsafe fn read(&self, _token: &mut Token) -> Result<T, ()> {
Err(())
}
/// Returns `true` if the channel is empty.
#[inline]
pub fn is_empty(&self) -> bool {
true
}
/// Returns `true` if the channel is full.
#[inline]
pub fn is_full(&self) -> bool {
true
}
/// Returns the number of messages in the channel.
#[inline]
pub fn len(&self) -> usize {
0
}
/// Returns the capacity of the channel.
#[inline]
pub fn capacity(&self) -> Option<usize> {
Some(0)
}
}
impl<T> SelectHandle for Channel<T> {
#[inline]
fn try_select(&self, _token: &mut Token) -> bool {
false
}
#[inline]
fn deadline(&self) -> Option<Instant> {
None
}
#[inline]
fn register(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
#[inline]
fn unregister(&self, _oper: Operation) {}
#[inline]
fn accept(&self, token: &mut Token, _cx: &Context) -> bool {
self.try_select(token)
}
#[inline]
fn is_ready(&self) -> bool {
false
}
#[inline]
fn watch(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
#[inline]
fn unwatch(&self, _oper: Operation) {}
}

173
third_party/rust/crossbeam-channel/src/flavors/tick.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,173 @@
//! Channel that delivers messages periodically.
//!
//! Messages cannot be sent into this kind of channel; they are materialized on demand.
use std::thread;
use std::time::{Duration, Instant};
use crossbeam_utils::atomic::AtomicCell;
use context::Context;
use err::{RecvTimeoutError, TryRecvError};
use select::{Operation, SelectHandle, Token};
/// Result of a receive operation.
pub type TickToken = Option<Instant>;
/// Channel that delivers messages periodically.
pub struct Channel {
/// The instant at which the next message will be delivered.
delivery_time: AtomicCell<Instant>,
/// The time interval in which messages get delivered.
duration: Duration,
}
impl Channel {
/// Creates a channel that delivers messages periodically.
#[inline]
pub fn new(dur: Duration) -> Self {
Channel {
delivery_time: AtomicCell::new(Instant::now() + dur),
duration: dur,
}
}
/// Attempts to receive a message without blocking.
#[inline]
pub fn try_recv(&self) -> Result<Instant, TryRecvError> {
loop {
let now = Instant::now();
let delivery_time = self.delivery_time.load();
if now < delivery_time {
return Err(TryRecvError::Empty);
}
if self
.delivery_time
.compare_exchange(delivery_time, now + self.duration)
.is_ok()
{
return Ok(delivery_time);
}
}
}
/// Receives a message from the channel.
#[inline]
pub fn recv(&self, deadline: Option<Instant>) -> Result<Instant, RecvTimeoutError> {
loop {
// Compute the time to sleep until the next message or the deadline.
let offset = {
let mut delivery_time = self.delivery_time.load();
let now = Instant::now();
// Check if we can receive the next message.
if now >= delivery_time
&& self
.delivery_time
.compare_exchange(delivery_time, now + self.duration)
.is_ok()
{
return Ok(delivery_time);
}
// Check if the operation deadline has been reached.
if let Some(d) = deadline {
if now >= d {
return Err(RecvTimeoutError::Timeout);
}
delivery_time.min(d) - now
} else {
delivery_time - now
}
};
thread::sleep(offset);
}
}
/// Reads a message from the channel.
#[inline]
pub unsafe fn read(&self, token: &mut Token) -> Result<Instant, ()> {
token.tick.ok_or(())
}
/// Returns `true` if the channel is empty.
#[inline]
pub fn is_empty(&self) -> bool {
Instant::now() < self.delivery_time.load()
}
/// Returns `true` if the channel is full.
#[inline]
pub fn is_full(&self) -> bool {
!self.is_empty()
}
/// Returns the number of messages in the channel.
#[inline]
pub fn len(&self) -> usize {
if self.is_empty() {
0
} else {
1
}
}
/// Returns the capacity of the channel.
#[inline]
pub fn capacity(&self) -> Option<usize> {
Some(1)
}
}
impl SelectHandle for Channel {
#[inline]
fn try_select(&self, token: &mut Token) -> bool {
match self.try_recv() {
Ok(msg) => {
token.tick = Some(msg);
true
}
Err(TryRecvError::Disconnected) => {
token.tick = None;
true
}
Err(TryRecvError::Empty) => false,
}
}
#[inline]
fn deadline(&self) -> Option<Instant> {
Some(self.delivery_time.load())
}
#[inline]
fn register(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
#[inline]
fn unregister(&self, _oper: Operation) {}
#[inline]
fn accept(&self, token: &mut Token, _cx: &Context) -> bool {
self.try_select(token)
}
#[inline]
fn is_ready(&self) -> bool {
!self.is_empty()
}
#[inline]
fn watch(&self, _oper: Operation, _cx: &Context) -> bool {
self.is_ready()
}
#[inline]
fn unwatch(&self, _oper: Operation) {}
}

461
third_party/rust/crossbeam-channel/src/flavors/zero.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,461 @@
//! Zero-capacity channel.
//!
//! This kind of channel is also known as *rendezvous* channel.
use std::cell::UnsafeCell;
use std::marker::PhantomData;
use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Instant;
use crossbeam_utils::Backoff;
use context::Context;
use err::{RecvTimeoutError, SendTimeoutError, TryRecvError, TrySendError};
use select::{Operation, SelectHandle, Selected, Token};
use utils::Mutex;
use waker::Waker;
/// A pointer to a packet.
pub type ZeroToken = usize;
/// A slot for passing one message from a sender to a receiver.
struct Packet<T> {
/// Equals `true` if the packet is allocated on the stack.
on_stack: bool,
/// Equals `true` once the packet is ready for reading or writing.
ready: AtomicBool,
/// The message.
msg: UnsafeCell<Option<T>>,
}
impl<T> Packet<T> {
/// Creates an empty packet on the stack.
fn empty_on_stack() -> Packet<T> {
Packet {
on_stack: true,
ready: AtomicBool::new(false),
msg: UnsafeCell::new(None),
}
}
/// Creates an empty packet on the heap.
fn empty_on_heap() -> Box<Packet<T>> {
Box::new(Packet {
on_stack: false,
ready: AtomicBool::new(false),
msg: UnsafeCell::new(None),
})
}
/// Creates a packet on the stack, containing a message.
fn message_on_stack(msg: T) -> Packet<T> {
Packet {
on_stack: true,
ready: AtomicBool::new(false),
msg: UnsafeCell::new(Some(msg)),
}
}
/// Waits until the packet becomes ready for reading or writing.
fn wait_ready(&self) {
let backoff = Backoff::new();
while !self.ready.load(Ordering::Acquire) {
backoff.snooze();
}
}
}
/// Inner representation of a zero-capacity channel.
struct Inner {
/// Senders waiting to pair up with a receive operation.
senders: Waker,
/// Receivers waiting to pair up with a send operation.
receivers: Waker,
/// Equals `true` when the channel is disconnected.
is_disconnected: bool,
}
/// Zero-capacity channel.
pub struct Channel<T> {
/// Inner representation of the channel.
inner: Mutex<Inner>,
/// Indicates that dropping a `Channel<T>` may drop values of type `T`.
_marker: PhantomData<T>,
}
impl<T> Channel<T> {
/// Constructs a new zero-capacity channel.
pub fn new() -> Self {
Channel {
inner: Mutex::new(Inner {
senders: Waker::new(),
receivers: Waker::new(),
is_disconnected: false,
}),
_marker: PhantomData,
}
}
/// Returns a receiver handle to the channel.
pub fn receiver(&self) -> Receiver<T> {
Receiver(self)
}
/// Returns a sender handle to the channel.
pub fn sender(&self) -> Sender<T> {
Sender(self)
}
/// Attempts to reserve a slot for sending a message.
fn start_send(&self, token: &mut Token) -> bool {
let mut inner = self.inner.lock();
// If there's a waiting receiver, pair up with it.
if let Some(operation) = inner.receivers.try_select() {
token.zero = operation.packet;
true
} else if inner.is_disconnected {
token.zero = 0;
true
} else {
false
}
}
/// Writes a message into the packet.
pub unsafe fn write(&self, token: &mut Token, msg: T) -> Result<(), T> {
// If there is no packet, the channel is disconnected.
if token.zero == 0 {
return Err(msg);
}
let packet = &*(token.zero as *const Packet<T>);
packet.msg.get().write(Some(msg));
packet.ready.store(true, Ordering::Release);
Ok(())
}
/// Attempts to pair up with a sender.
fn start_recv(&self, token: &mut Token) -> bool {
let mut inner = self.inner.lock();
// If there's a waiting sender, pair up with it.
if let Some(operation) = inner.senders.try_select() {
token.zero = operation.packet;
true
} else if inner.is_disconnected {
token.zero = 0;
true
} else {
false
}
}
/// Reads a message from the packet.
pub unsafe fn read(&self, token: &mut Token) -> Result<T, ()> {
// If there is no packet, the channel is disconnected.
if token.zero == 0 {
return Err(());
}
let packet = &*(token.zero as *const Packet<T>);
if packet.on_stack {
// The message has been in the packet from the beginning, so there is no need to wait
// for it. However, after reading the message, we need to set `ready` to `true` in
// order to signal that the packet can be destroyed.
let msg = packet.msg.get().replace(None).unwrap();
packet.ready.store(true, Ordering::Release);
Ok(msg)
} else {
// Wait until the message becomes available, then read it and destroy the
// heap-allocated packet.
packet.wait_ready();
let msg = packet.msg.get().replace(None).unwrap();
drop(Box::from_raw(packet as *const Packet<T> as *mut Packet<T>));
Ok(msg)
}
}
/// Attempts to send a message into the channel.
pub fn try_send(&self, msg: T) -> Result<(), TrySendError<T>> {
let token = &mut Token::default();
let mut inner = self.inner.lock();
// If there's a waiting receiver, pair up with it.
if let Some(operation) = inner.receivers.try_select() {
token.zero = operation.packet;
drop(inner);
unsafe {
self.write(token, msg).ok().unwrap();
}
Ok(())
} else if inner.is_disconnected {
Err(TrySendError::Disconnected(msg))
} else {
Err(TrySendError::Full(msg))
}
}
/// Sends a message into the channel.
pub fn send(&self, msg: T, deadline: Option<Instant>) -> Result<(), SendTimeoutError<T>> {
let token = &mut Token::default();
let mut inner = self.inner.lock();
// If there's a waiting receiver, pair up with it.
if let Some(operation) = inner.receivers.try_select() {
token.zero = operation.packet;
drop(inner);
unsafe {
self.write(token, msg).ok().unwrap();
}
return Ok(());
}
if inner.is_disconnected {
return Err(SendTimeoutError::Disconnected(msg));
}
Context::with(|cx| {
// Prepare for blocking until a receiver wakes us up.
let oper = Operation::hook(token);
let packet = Packet::<T>::message_on_stack(msg);
inner
.senders
.register_with_packet(oper, &packet as *const Packet<T> as usize, cx);
inner.receivers.notify();
drop(inner);
// Block the current thread.
let sel = cx.wait_until(deadline);
match sel {
Selected::Waiting => unreachable!(),
Selected::Aborted => {
self.inner.lock().senders.unregister(oper).unwrap();
let msg = unsafe { packet.msg.get().replace(None).unwrap() };
Err(SendTimeoutError::Timeout(msg))
}
Selected::Disconnected => {
self.inner.lock().senders.unregister(oper).unwrap();
let msg = unsafe { packet.msg.get().replace(None).unwrap() };
Err(SendTimeoutError::Disconnected(msg))
}
Selected::Operation(_) => {
// Wait until the message is read, then drop the packet.
packet.wait_ready();
Ok(())
}
}
})
}
/// Attempts to receive a message without blocking.
pub fn try_recv(&self) -> Result<T, TryRecvError> {
let token = &mut Token::default();
let mut inner = self.inner.lock();
// If there's a waiting sender, pair up with it.
if let Some(operation) = inner.senders.try_select() {
token.zero = operation.packet;
drop(inner);
unsafe { self.read(token).map_err(|_| TryRecvError::Disconnected) }
} else if inner.is_disconnected {
Err(TryRecvError::Disconnected)
} else {
Err(TryRecvError::Empty)
}
}
/// Receives a message from the channel.
pub fn recv(&self, deadline: Option<Instant>) -> Result<T, RecvTimeoutError> {
let token = &mut Token::default();
let mut inner = self.inner.lock();
// If there's a waiting sender, pair up with it.
if let Some(operation) = inner.senders.try_select() {
token.zero = operation.packet;
drop(inner);
unsafe {
return self.read(token).map_err(|_| RecvTimeoutError::Disconnected);
}
}
if inner.is_disconnected {
return Err(RecvTimeoutError::Disconnected);
}
Context::with(|cx| {
// Prepare for blocking until a sender wakes us up.
let oper = Operation::hook(token);
let packet = Packet::<T>::empty_on_stack();
inner
.receivers
.register_with_packet(oper, &packet as *const Packet<T> as usize, cx);
inner.senders.notify();
drop(inner);
// Block the current thread.
let sel = cx.wait_until(deadline);
match sel {
Selected::Waiting => unreachable!(),
Selected::Aborted => {
self.inner.lock().receivers.unregister(oper).unwrap();
Err(RecvTimeoutError::Timeout)
}
Selected::Disconnected => {
self.inner.lock().receivers.unregister(oper).unwrap();
Err(RecvTimeoutError::Disconnected)
}
Selected::Operation(_) => {
// Wait until the message is provided, then read it.
packet.wait_ready();
unsafe { Ok(packet.msg.get().replace(None).unwrap()) }
}
}
})
}
/// Disconnects the channel and wakes up all blocked receivers.
pub fn disconnect(&self) {
let mut inner = self.inner.lock();
if !inner.is_disconnected {
inner.is_disconnected = true;
inner.senders.disconnect();
inner.receivers.disconnect();
}
}
/// Returns the current number of messages inside the channel.
pub fn len(&self) -> usize {
0
}
/// Returns the capacity of the channel.
pub fn capacity(&self) -> Option<usize> {
Some(0)
}
/// Returns `true` if the channel is empty.
pub fn is_empty(&self) -> bool {
true
}
/// Returns `true` if the channel is full.
pub fn is_full(&self) -> bool {
true
}
}
/// Receiver handle to a channel.
pub struct Receiver<'a, T: 'a>(&'a Channel<T>);
/// Sender handle to a channel.
pub struct Sender<'a, T: 'a>(&'a Channel<T>);
impl<'a, T> SelectHandle for Receiver<'a, T> {
fn try_select(&self, token: &mut Token) -> bool {
self.0.start_recv(token)
}
fn deadline(&self) -> Option<Instant> {
None
}
fn register(&self, oper: Operation, cx: &Context) -> bool {
let packet = Box::into_raw(Packet::<T>::empty_on_heap());
let mut inner = self.0.inner.lock();
inner
.receivers
.register_with_packet(oper, packet as usize, cx);
inner.senders.notify();
inner.senders.can_select() || inner.is_disconnected
}
fn unregister(&self, oper: Operation) {
if let Some(operation) = self.0.inner.lock().receivers.unregister(oper) {
unsafe {
drop(Box::from_raw(operation.packet as *mut Packet<T>));
}
}
}
fn accept(&self, token: &mut Token, cx: &Context) -> bool {
token.zero = cx.wait_packet();
true
}
fn is_ready(&self) -> bool {
let inner = self.0.inner.lock();
inner.senders.can_select() || inner.is_disconnected
}
fn watch(&self, oper: Operation, cx: &Context) -> bool {
let mut inner = self.0.inner.lock();
inner.receivers.watch(oper, cx);
inner.senders.can_select() || inner.is_disconnected
}
fn unwatch(&self, oper: Operation) {
let mut inner = self.0.inner.lock();
inner.receivers.unwatch(oper);
}
}
impl<'a, T> SelectHandle for Sender<'a, T> {
fn try_select(&self, token: &mut Token) -> bool {
self.0.start_send(token)
}
fn deadline(&self) -> Option<Instant> {
None
}
fn register(&self, oper: Operation, cx: &Context) -> bool {
let packet = Box::into_raw(Packet::<T>::empty_on_heap());
let mut inner = self.0.inner.lock();
inner
.senders
.register_with_packet(oper, packet as usize, cx);
inner.receivers.notify();
inner.receivers.can_select() || inner.is_disconnected
}
fn unregister(&self, oper: Operation) {
if let Some(operation) = self.0.inner.lock().senders.unregister(oper) {
unsafe {
drop(Box::from_raw(operation.packet as *mut Packet<T>));
}
}
}
fn accept(&self, token: &mut Token, cx: &Context) -> bool {
token.zero = cx.wait_packet();
true
}
fn is_ready(&self) -> bool {
let inner = self.0.inner.lock();
inner.receivers.can_select() || inner.is_disconnected
}
fn watch(&self, oper: Operation, cx: &Context) -> bool {
let mut inner = self.0.inner.lock();
inner.senders.watch(oper, cx);
inner.receivers.can_select() || inner.is_disconnected
}
fn unwatch(&self, oper: Operation) {
let mut inner = self.0.inner.lock();
inner.senders.unwatch(oper);
}
}

372
third_party/rust/crossbeam-channel/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,372 @@
//! Multi-producer multi-consumer channels for message passing.
//!
//! This crate is an alternative to [`std::sync::mpsc`] with more features and better performance.
//!
//! # Hello, world!
//!
//! ```
//! use crossbeam_channel::unbounded;
//!
//! // Create a channel of unbounded capacity.
//! let (s, r) = unbounded();
//!
//! // Send a message into the channel.
//! s.send("Hello, world!").unwrap();
//!
//! // Receive the message from the channel.
//! assert_eq!(r.recv(), Ok("Hello, world!"));
//! ```
//!
//! # Channel types
//!
//! Channels can be created using two functions:
//!
//! * [`bounded`] creates a channel of bounded capacity, i.e. there is a limit to how many messages
//! it can hold at a time.
//!
//! * [`unbounded`] creates a channel of unbounded capacity, i.e. it can hold any number of
//! messages at a time.
//!
//! Both functions return a [`Sender`] and a [`Receiver`], which represent the two opposite sides
//! of a channel.
//!
//! Creating a bounded channel:
//!
//! ```
//! use crossbeam_channel::bounded;
//!
//! // Create a channel that can hold at most 5 messages at a time.
//! let (s, r) = bounded(5);
//!
//! // Can send only 5 messages without blocking.
//! for i in 0..5 {
//! s.send(i).unwrap();
//! }
//!
//! // Another call to `send` would block because the channel is full.
//! // s.send(5).unwrap();
//! ```
//!
//! Creating an unbounded channel:
//!
//! ```
//! use crossbeam_channel::unbounded;
//!
//! // Create an unbounded channel.
//! let (s, r) = unbounded();
//!
//! // Can send any number of messages into the channel without blocking.
//! for i in 0..1000 {
//! s.send(i).unwrap();
//! }
//! ```
//!
//! A special case is zero-capacity channel, which cannot hold any messages. Instead, send and
//! receive operations must appear at the same time in order to pair up and pass the message over:
//!
//! ```
//! use std::thread;
//! use crossbeam_channel::bounded;
//!
//! // Create a zero-capacity channel.
//! let (s, r) = bounded(0);
//!
//! // Sending blocks until a receive operation appears on the other side.
//! thread::spawn(move || s.send("Hi!").unwrap());
//!
//! // Receiving blocks until a send operation appears on the other side.
//! assert_eq!(r.recv(), Ok("Hi!"));
//! ```
//!
//! # Sharing channels
//!
//! Senders and receivers can be cloned and sent to other threads:
//!
//! ```
//! use std::thread;
//! use crossbeam_channel::bounded;
//!
//! let (s1, r1) = bounded(0);
//! let (s2, r2) = (s1.clone(), r1.clone());
//!
//! // Spawn a thread that receives a message and then sends one.
//! thread::spawn(move || {
//! r2.recv().unwrap();
//! s2.send(2).unwrap();
//! });
//!
//! // Send a message and then receive one.
//! s1.send(1).unwrap();
//! r1.recv().unwrap();
//! ```
//!
//! Note that cloning only creates a new handle to the same sending or receiving side. It does not
//! create a separate stream of messages in any way:
//!
//! ```
//! use crossbeam_channel::unbounded;
//!
//! let (s1, r1) = unbounded();
//! let (s2, r2) = (s1.clone(), r1.clone());
//! let (s3, r3) = (s2.clone(), r2.clone());
//!
//! s1.send(10).unwrap();
//! s2.send(20).unwrap();
//! s3.send(30).unwrap();
//!
//! assert_eq!(r3.recv(), Ok(10));
//! assert_eq!(r1.recv(), Ok(20));
//! assert_eq!(r2.recv(), Ok(30));
//! ```
//!
//! It's also possible to share senders and receivers by reference:
//!
//! ```
//! # extern crate crossbeam_channel;
//! # extern crate crossbeam_utils;
//! # fn main() {
//! use std::thread;
//! use crossbeam_channel::bounded;
//! use crossbeam_utils::thread::scope;
//!
//! let (s, r) = bounded(0);
//!
//! scope(|scope| {
//! // Spawn a thread that receives a message and then sends one.
//! scope.spawn(|_| {
//! r.recv().unwrap();
//! s.send(2).unwrap();
//! });
//!
//! // Send a message and then receive one.
//! s.send(1).unwrap();
//! r.recv().unwrap();
//! }).unwrap();
//! # }
//! ```
//!
//! # Disconnection
//!
//! When all senders or all receivers associated with a channel get dropped, the channel becomes
//! disconnected. No more messages can be sent, but any remaining messages can still be received.
//! Send and receive operations on a disconnected channel never block.
//!
//! ```
//! use crossbeam_channel::{unbounded, RecvError};
//!
//! let (s, r) = unbounded();
//! s.send(1).unwrap();
//! s.send(2).unwrap();
//! s.send(3).unwrap();
//!
//! // The only sender is dropped, disconnecting the channel.
//! drop(s);
//!
//! // The remaining messages can be received.
//! assert_eq!(r.recv(), Ok(1));
//! assert_eq!(r.recv(), Ok(2));
//! assert_eq!(r.recv(), Ok(3));
//!
//! // There are no more messages in the channel.
//! assert!(r.is_empty());
//!
//! // Note that calling `r.recv()` does not block.
//! // Instead, `Err(RecvError)` is returned immediately.
//! assert_eq!(r.recv(), Err(RecvError));
//! ```
//!
//! # Blocking operations
//!
//! Send and receive operations come in three flavors:
//!
//! * Non-blocking (returns immediately with success or failure).
//! * Blocking (waits until the operation succeeds or the channel becomes disconnected).
//! * Blocking with a timeout (blocks only for a certain duration of time).
//!
//! A simple example showing the difference between non-blocking and blocking operations:
//!
//! ```
//! use crossbeam_channel::{bounded, RecvError, TryRecvError};
//!
//! let (s, r) = bounded(1);
//!
//! // Send a message into the channel.
//! s.send("foo").unwrap();
//!
//! // This call would block because the channel is full.
//! // s.send("bar").unwrap();
//!
//! // Receive the message.
//! assert_eq!(r.recv(), Ok("foo"));
//!
//! // This call would block because the channel is empty.
//! // r.recv();
//!
//! // Try receiving a message without blocking.
//! assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
//!
//! // Disconnect the channel.
//! drop(s);
//!
//! // This call doesn't block because the channel is now disconnected.
//! assert_eq!(r.recv(), Err(RecvError));
//! ```
//!
//! # Iteration
//!
//! Receivers can be used as iterators. For example, method [`iter`] creates an iterator that
//! receives messages until the channel becomes empty and disconnected. Note that iteration may
//! block waiting for next message to arrive.
//!
//! ```
//! use std::thread;
//! use crossbeam_channel::unbounded;
//!
//! let (s, r) = unbounded();
//!
//! thread::spawn(move || {
//! s.send(1).unwrap();
//! s.send(2).unwrap();
//! s.send(3).unwrap();
//! drop(s); // Disconnect the channel.
//! });
//!
//! // Collect all messages from the channel.
//! // Note that the call to `collect` blocks until the sender is dropped.
//! let v: Vec<_> = r.iter().collect();
//!
//! assert_eq!(v, [1, 2, 3]);
//! ```
//!
//! A non-blocking iterator can be created using [`try_iter`], which receives all available
//! messages without blocking:
//!
//! ```
//! use crossbeam_channel::unbounded;
//!
//! let (s, r) = unbounded();
//! s.send(1).unwrap();
//! s.send(2).unwrap();
//! s.send(3).unwrap();
//! // No need to drop the sender.
//!
//! // Receive all messages currently in the channel.
//! let v: Vec<_> = r.try_iter().collect();
//!
//! assert_eq!(v, [1, 2, 3]);
//! ```
//!
//! # Selection
//!
//! The [`select!`] macro allows you to define a set of channel operations, wait until any one of
//! them becomes ready, and finally execute it. If multiple operations are ready at the same time,
//! a random one among them is selected.
//!
//! It is also possible to define a `default` case that gets executed if none of the operations are
//! ready, either right away or for a certain duration of time.
//!
//! An operation is considered to be ready if it doesn't have to block. Note that it is ready even
//! when it will simply return an error because the channel is disconnected.
//!
//! An example of receiving a message from two channels:
//!
//! ```
//! # #[macro_use]
//! # extern crate crossbeam_channel;
//! # fn main() {
//! use std::thread;
//! use std::time::Duration;
//! use crossbeam_channel::unbounded;
//!
//! let (s1, r1) = unbounded();
//! let (s2, r2) = unbounded();
//!
//! thread::spawn(move || s1.send(10).unwrap());
//! thread::spawn(move || s2.send(20).unwrap());
//!
//! // At most one of these two receive operations will be executed.
//! select! {
//! recv(r1) -> msg => assert_eq!(msg, Ok(10)),
//! recv(r2) -> msg => assert_eq!(msg, Ok(20)),
//! default(Duration::from_secs(1)) => println!("timed out"),
//! }
//! # }
//! ```
//!
//! If you need to select over a dynamically created list of channel operations, use [`Select`]
//! instead. The [`select!`] macro is just a convenience wrapper around [`Select`].
//!
//! # Extra channels
//!
//! Three functions can create special kinds of channels, all of which return just a [`Receiver`]
//! handle:
//!
//! * [`after`] creates a channel that delivers a single message after a certain duration of time.
//! * [`tick`] creates a channel that delivers messages periodically.
//! * [`never`] creates a channel that never delivers messages.
//!
//! These channels are very efficient because messages get lazily generated on receive operations.
//!
//! An example that prints elapsed time every 50 milliseconds for the duration of 1 second:
//!
//! ```
//! # #[macro_use]
//! # extern crate crossbeam_channel;
//! # fn main() {
//! use std::time::{Duration, Instant};
//! use crossbeam_channel::{after, tick};
//!
//! let start = Instant::now();
//! let ticker = tick(Duration::from_millis(50));
//! let timeout = after(Duration::from_secs(1));
//!
//! loop {
//! select! {
//! recv(ticker) -> _ => println!("elapsed: {:?}", start.elapsed()),
//! recv(timeout) -> _ => break,
//! }
//! }
//! # }
//! ```
//!
//! [`std::sync::mpsc`]: https://doc.rust-lang.org/std/sync/mpsc/index.html
//! [`unbounded`]: fn.unbounded.html
//! [`bounded`]: fn.bounded.html
//! [`after`]: fn.after.html
//! [`tick`]: fn.tick.html
//! [`never`]: fn.never.html
//! [`send`]: struct.Sender.html#method.send
//! [`recv`]: struct.Receiver.html#method.recv
//! [`iter`]: struct.Receiver.html#method.iter
//! [`try_iter`]: struct.Receiver.html#method.try_iter
//! [`select!`]: macro.select.html
//! [`Select`]: struct.Select.html
//! [`Sender`]: struct.Sender.html
//! [`Receiver`]: struct.Receiver.html
#![warn(missing_docs)]
#![warn(missing_debug_implementations)]
extern crate crossbeam_utils;
extern crate smallvec;
mod channel;
mod context;
mod counter;
mod err;
mod flavors;
mod select;
mod select_macro;
mod utils;
mod waker;
pub use channel::{after, never, tick};
pub use channel::{bounded, unbounded};
pub use channel::{IntoIter, Iter, TryIter};
pub use channel::{Receiver, Sender};
pub use select::{Select, SelectedOperation};
pub use err::{RecvError, RecvTimeoutError, TryRecvError};
pub use err::{ReadyTimeoutError, SelectTimeoutError, TryReadyError, TrySelectError};
pub use err::{SendError, SendTimeoutError, TrySendError};

1078
third_party/rust/crossbeam-channel/src/select.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

1201
third_party/rust/crossbeam-channel/src/select_macro.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

118
third_party/rust/crossbeam-channel/src/utils.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,118 @@
//! Miscellaneous utilities.
use std::cell::{Cell, UnsafeCell};
use std::num::Wrapping;
use std::ops::{Deref, DerefMut};
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
use std::time::{Duration, Instant};
use crossbeam_utils::Backoff;
/// Randomly shuffles a slice.
pub fn shuffle<T>(v: &mut [T]) {
let len = v.len();
if len <= 1 {
return;
}
thread_local! {
static RNG: Cell<Wrapping<u32>> = Cell::new(Wrapping(1406868647));
}
let _ = RNG.try_with(|rng| {
for i in 1..len {
// This is the 32-bit variant of Xorshift.
//
// Source: https://en.wikipedia.org/wiki/Xorshift
let mut x = rng.get();
x ^= x << 13;
x ^= x >> 17;
x ^= x << 5;
rng.set(x);
let x = x.0;
let n = i + 1;
// This is a fast alternative to `let j = x % n`.
//
// Author: Daniel Lemire
// Source: https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
let j = ((x as u64).wrapping_mul(n as u64) >> 32) as u32 as usize;
v.swap(i, j);
}
});
}
/// Sleeps until the deadline, or forever if the deadline isn't specified.
pub fn sleep_until(deadline: Option<Instant>) {
loop {
match deadline {
None => thread::sleep(Duration::from_secs(1000)),
Some(d) => {
let now = Instant::now();
if now >= d {
break;
}
thread::sleep(d - now);
}
}
}
}
/// A simple spinlock-based mutex.
pub struct Mutex<T> {
flag: AtomicBool,
value: UnsafeCell<T>,
}
impl<T> Mutex<T> {
/// Returns a new mutex initialized with `value`.
pub fn new(value: T) -> Mutex<T> {
Mutex {
flag: AtomicBool::new(false),
value: UnsafeCell::new(value),
}
}
/// Locks the mutex.
pub fn lock(&self) -> MutexGuard<'_, T> {
let backoff = Backoff::new();
while self.flag.swap(true, Ordering::Acquire) {
backoff.snooze();
}
MutexGuard {
parent: self,
}
}
}
/// A guard holding a mutex locked.
pub struct MutexGuard<'a, T: 'a> {
parent: &'a Mutex<T>,
}
impl<'a, T> Drop for MutexGuard<'a, T> {
fn drop(&mut self) {
self.parent.flag.store(false, Ordering::Release);
}
}
impl<'a, T> Deref for MutexGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe {
&*self.parent.value.get()
}
}
}
impl<'a, T> DerefMut for MutexGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe {
&mut *self.parent.value.get()
}
}
}

285
third_party/rust/crossbeam-channel/src/waker.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,285 @@
//! Waking mechanism for threads blocked on channel operations.
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread::{self, ThreadId};
use context::Context;
use select::{Operation, Selected};
use utils::Mutex;
/// Represents a thread blocked on a specific channel operation.
pub struct Entry {
/// The operation.
pub oper: Operation,
/// Optional packet.
pub packet: usize,
/// Context associated with the thread owning this operation.
pub cx: Context,
}
/// A queue of threads blocked on channel operations.
///
/// This data structure is used by threads to register blocking operations and get woken up once
/// an operation becomes ready.
pub struct Waker {
/// A list of select operations.
selectors: Vec<Entry>,
/// A list of operations waiting to be ready.
observers: Vec<Entry>,
}
impl Waker {
/// Creates a new `Waker`.
#[inline]
pub fn new() -> Self {
Waker {
selectors: Vec::new(),
observers: Vec::new(),
}
}
/// Registers a select operation.
#[inline]
pub fn register(&mut self, oper: Operation, cx: &Context) {
self.register_with_packet(oper, 0, cx);
}
/// Registers a select operation and a packet.
#[inline]
pub fn register_with_packet(&mut self, oper: Operation, packet: usize, cx: &Context) {
self.selectors.push(Entry {
oper,
packet,
cx: cx.clone(),
});
}
/// Unregisters a select operation.
#[inline]
pub fn unregister(&mut self, oper: Operation) -> Option<Entry> {
if let Some((i, _)) = self
.selectors
.iter()
.enumerate()
.find(|&(_, entry)| entry.oper == oper)
{
let entry = self.selectors.remove(i);
Some(entry)
} else {
None
}
}
/// Attempts to find another thread's entry, select the operation, and wake it up.
#[inline]
pub fn try_select(&mut self) -> Option<Entry> {
let mut entry = None;
if !self.selectors.is_empty() {
let thread_id = current_thread_id();
for i in 0..self.selectors.len() {
// Does the entry belong to a different thread?
if self.selectors[i].cx.thread_id() != thread_id {
// Try selecting this operation.
let sel = Selected::Operation(self.selectors[i].oper);
let res = self.selectors[i].cx.try_select(sel);
if res.is_ok() {
// Provide the packet.
self.selectors[i].cx.store_packet(self.selectors[i].packet);
// Wake the thread up.
self.selectors[i].cx.unpark();
// Remove the entry from the queue to keep it clean and improve
// performance.
entry = Some(self.selectors.remove(i));
break;
}
}
}
}
entry
}
/// Returns `true` if there is an entry which can be selected by the current thread.
#[inline]
pub fn can_select(&self) -> bool {
if self.selectors.is_empty() {
false
} else {
let thread_id = current_thread_id();
self.selectors.iter().any(|entry| {
entry.cx.thread_id() != thread_id && entry.cx.selected() == Selected::Waiting
})
}
}
/// Registers an operation waiting to be ready.
#[inline]
pub fn watch(&mut self, oper: Operation, cx: &Context) {
self.observers.push(Entry {
oper,
packet: 0,
cx: cx.clone(),
});
}
/// Unregisters an operation waiting to be ready.
#[inline]
pub fn unwatch(&mut self, oper: Operation) {
self.observers.retain(|e| e.oper != oper);
}
/// Notifies all operations waiting to be ready.
#[inline]
pub fn notify(&mut self) {
for entry in self.observers.drain(..) {
if entry.cx.try_select(Selected::Operation(entry.oper)).is_ok() {
entry.cx.unpark();
}
}
}
/// Notifies all registered operations that the channel is disconnected.
#[inline]
pub fn disconnect(&mut self) {
for entry in self.selectors.iter() {
if entry.cx.try_select(Selected::Disconnected).is_ok() {
// Wake the thread up.
//
// Here we don't remove the entry from the queue. Registered threads must
// unregister from the waker by themselves. They might also want to recover the
// packet value and destroy it, if necessary.
entry.cx.unpark();
}
}
self.notify();
}
}
impl Drop for Waker {
#[inline]
fn drop(&mut self) {
debug_assert_eq!(self.selectors.len(), 0);
debug_assert_eq!(self.observers.len(), 0);
}
}
/// A waker that can be shared among threads without locking.
///
/// This is a simple wrapper around `Waker` that internally uses a mutex for synchronization.
pub struct SyncWaker {
/// The inner `Waker`.
inner: Mutex<Waker>,
/// `true` if the waker is empty.
is_empty: AtomicBool,
}
impl SyncWaker {
/// Creates a new `SyncWaker`.
#[inline]
pub fn new() -> Self {
SyncWaker {
inner: Mutex::new(Waker::new()),
is_empty: AtomicBool::new(false),
}
}
/// Registers the current thread with an operation.
#[inline]
pub fn register(&self, oper: Operation, cx: &Context) {
let mut inner = self.inner.lock();
inner.register(oper, cx);
self.is_empty.store(
inner.selectors.is_empty() && inner.observers.is_empty(),
Ordering::SeqCst,
);
}
/// Unregisters an operation previously registered by the current thread.
#[inline]
pub fn unregister(&self, oper: Operation) -> Option<Entry> {
let mut inner = self.inner.lock();
let entry = inner.unregister(oper);
self.is_empty.store(
inner.selectors.is_empty() && inner.observers.is_empty(),
Ordering::SeqCst,
);
entry
}
/// Attempts to find one thread (not the current one), select its operation, and wake it up.
#[inline]
pub fn notify(&self) {
if !self.is_empty.load(Ordering::SeqCst) {
let mut inner = self.inner.lock();
inner.try_select();
inner.notify();
self.is_empty.store(
inner.selectors.is_empty() && inner.observers.is_empty(),
Ordering::SeqCst,
);
}
}
/// Registers an operation waiting to be ready.
#[inline]
pub fn watch(&self, oper: Operation, cx: &Context) {
let mut inner = self.inner.lock();
inner.watch(oper, cx);
self.is_empty.store(
inner.selectors.is_empty() && inner.observers.is_empty(),
Ordering::SeqCst,
);
}
/// Unregisters an operation waiting to be ready.
#[inline]
pub fn unwatch(&self, oper: Operation) {
let mut inner = self.inner.lock();
inner.unwatch(oper);
self.is_empty.store(
inner.selectors.is_empty() && inner.observers.is_empty(),
Ordering::SeqCst,
);
}
/// Notifies all threads that the channel is disconnected.
#[inline]
pub fn disconnect(&self) {
let mut inner = self.inner.lock();
inner.disconnect();
self.is_empty.store(
inner.selectors.is_empty() && inner.observers.is_empty(),
Ordering::SeqCst,
);
}
}
impl Drop for SyncWaker {
#[inline]
fn drop(&mut self) {
debug_assert_eq!(self.is_empty.load(Ordering::SeqCst), true);
}
}
/// Returns the id of the current thread.
#[inline]
fn current_thread_id() -> ThreadId {
thread_local! {
/// Cached thread-local id.
static THREAD_ID: ThreadId = thread::current().id();
}
THREAD_ID
.try_with(|id| *id)
.unwrap_or_else(|_| thread::current().id())
}

335
third_party/rust/crossbeam-channel/tests/after.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,335 @@
//! Tests for the after channel flavor.
#[macro_use]
extern crate crossbeam_channel;
extern crate crossbeam_utils;
extern crate rand;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::{Duration, Instant};
use crossbeam_channel::{after, Select, TryRecvError};
use crossbeam_utils::thread::scope;
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn fire() {
let start = Instant::now();
let r = after(ms(50));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
thread::sleep(ms(100));
let fired = r.try_recv().unwrap();
assert!(start < fired);
assert!(fired - start >= ms(50));
let now = Instant::now();
assert!(fired < now);
assert!(now - fired >= ms(50));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
select! {
recv(r) -> _ => panic!(),
default => {}
}
select! {
recv(r) -> _ => panic!(),
recv(after(ms(200))) -> _ => {}
}
}
#[test]
fn capacity() {
const COUNT: usize = 10;
for i in 0..COUNT {
let r = after(ms(i as u64));
assert_eq!(r.capacity(), Some(1));
}
}
#[test]
fn len_empty_full() {
let r = after(ms(50));
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), false);
thread::sleep(ms(100));
assert_eq!(r.len(), 1);
assert_eq!(r.is_empty(), false);
assert_eq!(r.is_full(), true);
r.try_recv().unwrap();
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), false);
}
#[test]
fn try_recv() {
let r = after(ms(200));
assert!(r.try_recv().is_err());
thread::sleep(ms(100));
assert!(r.try_recv().is_err());
thread::sleep(ms(200));
assert!(r.try_recv().is_ok());
assert!(r.try_recv().is_err());
thread::sleep(ms(200));
assert!(r.try_recv().is_err());
}
#[test]
fn recv() {
let start = Instant::now();
let r = after(ms(50));
let fired = r.recv().unwrap();
assert!(start < fired);
assert!(fired - start >= ms(50));
let now = Instant::now();
assert!(fired < now);
assert!(now - fired < fired - start);
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
}
#[test]
fn recv_timeout() {
let start = Instant::now();
let r = after(ms(200));
assert!(r.recv_timeout(ms(100)).is_err());
let now = Instant::now();
assert!(now - start >= ms(100));
assert!(now - start <= ms(150));
let fired = r.recv_timeout(ms(200)).unwrap();
assert!(fired - start >= ms(200));
assert!(fired - start <= ms(250));
assert!(r.recv_timeout(ms(200)).is_err());
let now = Instant::now();
assert!(now - start >= ms(400));
assert!(now - start <= ms(450));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
}
#[test]
fn recv_two() {
let r1 = after(ms(50));
let r2 = after(ms(50));
scope(|scope| {
scope.spawn(|_| {
select! {
recv(r1) -> _ => {}
recv(r2) -> _ => {}
}
});
scope.spawn(|_| {
select! {
recv(r1) -> _ => {}
recv(r2) -> _ => {}
}
});
}).unwrap();
}
#[test]
fn recv_race() {
select! {
recv(after(ms(50))) -> _ => {}
recv(after(ms(100))) -> _ => panic!(),
}
select! {
recv(after(ms(100))) -> _ => panic!(),
recv(after(ms(50))) -> _ => {}
}
}
#[test]
fn stress_default() {
const COUNT: usize = 10;
for _ in 0..COUNT {
select! {
recv(after(ms(0))) -> _ => {}
default => panic!(),
}
}
for _ in 0..COUNT {
select! {
recv(after(ms(100))) -> _ => panic!(),
default => {}
}
}
}
#[test]
fn select() {
const THREADS: usize = 4;
const COUNT: usize = 1000;
const TIMEOUT_MS: u64 = 100;
let v = (0..COUNT)
.map(|i| after(ms(i as u64 / TIMEOUT_MS / 2)))
.collect::<Vec<_>>();
let hits = AtomicUsize::new(0);
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
let v: Vec<&_> = v.iter().collect();
loop {
let timeout = after(ms(TIMEOUT_MS));
let mut sel = Select::new();
for r in &v {
sel.recv(r);
}
let oper_timeout = sel.recv(&timeout);
let oper = sel.select();
match oper.index() {
i if i == oper_timeout => {
oper.recv(&timeout).unwrap();
break;
}
i => {
oper.recv(&v[i]).unwrap();
hits.fetch_add(1, Ordering::SeqCst);
}
}
}
});
}
}).unwrap();
assert_eq!(hits.load(Ordering::SeqCst), COUNT);
}
#[test]
fn ready() {
const THREADS: usize = 4;
const COUNT: usize = 1000;
const TIMEOUT_MS: u64 = 100;
let v = (0..COUNT)
.map(|i| after(ms(i as u64 / TIMEOUT_MS / 2)))
.collect::<Vec<_>>();
let hits = AtomicUsize::new(0);
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
let v: Vec<&_> = v.iter().collect();
loop {
let timeout = after(ms(TIMEOUT_MS));
let mut sel = Select::new();
for r in &v {
sel.recv(r);
}
let oper_timeout = sel.recv(&timeout);
loop {
let i = sel.ready();
if i == oper_timeout {
timeout.try_recv().unwrap();
return;
} else if v[i].try_recv().is_ok() {
hits.fetch_add(1, Ordering::SeqCst);
break;
}
}
}
});
}
}).unwrap();
assert_eq!(hits.load(Ordering::SeqCst), COUNT);
}
#[test]
fn stress_clone() {
const RUNS: usize = 1000;
const THREADS: usize = 10;
const COUNT: usize = 50;
for i in 0..RUNS {
let r = after(ms(i as u64));
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
let r = r.clone();
let _ = r.try_recv();
for _ in 0..COUNT {
drop(r.clone());
thread::yield_now();
}
});
}
}).unwrap();
}
}
#[test]
fn fairness() {
const COUNT: usize = 1000;
for &dur in &[0, 1] {
let mut hits = [0usize; 2];
for _ in 0..COUNT {
select! {
recv(after(ms(dur))) -> _ => hits[0] += 1,
recv(after(ms(dur))) -> _ => hits[1] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
}
#[test]
fn fairness_duplicates() {
const COUNT: usize = 1000;
for &dur in &[0, 1] {
let mut hits = [0usize; 5];
for _ in 0..COUNT {
let r = after(ms(dur));
select! {
recv(r) -> _ => hits[0] += 1,
recv(r) -> _ => hits[1] += 1,
recv(r) -> _ => hits[2] += 1,
recv(r) -> _ => hits[3] += 1,
recv(r) -> _ => hits[4] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
}

604
third_party/rust/crossbeam-channel/tests/array.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,604 @@
//! Tests for the array channel flavor.
#[macro_use]
extern crate crossbeam_channel;
extern crate crossbeam_utils;
extern crate rand;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
use crossbeam_channel::bounded;
use crossbeam_channel::{RecvError, RecvTimeoutError, TryRecvError};
use crossbeam_channel::{SendError, SendTimeoutError, TrySendError};
use crossbeam_utils::thread::scope;
use rand::{thread_rng, Rng};
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn smoke() {
let (s, r) = bounded(1);
s.send(7).unwrap();
assert_eq!(r.try_recv(), Ok(7));
s.send(8).unwrap();
assert_eq!(r.recv(), Ok(8));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout));
}
#[test]
fn capacity() {
for i in 1..10 {
let (s, r) = bounded::<()>(i);
assert_eq!(s.capacity(), Some(i));
assert_eq!(r.capacity(), Some(i));
}
}
#[test]
fn len_empty_full() {
let (s, r) = bounded(2);
assert_eq!(s.len(), 0);
assert_eq!(s.is_empty(), true);
assert_eq!(s.is_full(), false);
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), false);
s.send(()).unwrap();
assert_eq!(s.len(), 1);
assert_eq!(s.is_empty(), false);
assert_eq!(s.is_full(), false);
assert_eq!(r.len(), 1);
assert_eq!(r.is_empty(), false);
assert_eq!(r.is_full(), false);
s.send(()).unwrap();
assert_eq!(s.len(), 2);
assert_eq!(s.is_empty(), false);
assert_eq!(s.is_full(), true);
assert_eq!(r.len(), 2);
assert_eq!(r.is_empty(), false);
assert_eq!(r.is_full(), true);
r.recv().unwrap();
assert_eq!(s.len(), 1);
assert_eq!(s.is_empty(), false);
assert_eq!(s.is_full(), false);
assert_eq!(r.len(), 1);
assert_eq!(r.is_empty(), false);
assert_eq!(r.is_full(), false);
}
#[test]
fn try_recv() {
let (s, r) = bounded(100);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
thread::sleep(ms(1500));
assert_eq!(r.try_recv(), Ok(7));
thread::sleep(ms(500));
assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
s.send(7).unwrap();
});
}).unwrap();
}
#[test]
fn recv() {
let (s, r) = bounded(100);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv(), Ok(7));
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(8));
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(9));
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
s.send(7).unwrap();
s.send(8).unwrap();
s.send(9).unwrap();
});
}).unwrap();
}
#[test]
fn recv_timeout() {
let (s, r) = bounded::<i32>(100);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout));
assert_eq!(r.recv_timeout(ms(1000)), Ok(7));
assert_eq!(
r.recv_timeout(ms(1000)),
Err(RecvTimeoutError::Disconnected)
);
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
s.send(7).unwrap();
});
}).unwrap();
}
#[test]
fn try_send() {
let (s, r) = bounded(1);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(s.try_send(1), Ok(()));
assert_eq!(s.try_send(2), Err(TrySendError::Full(2)));
thread::sleep(ms(1500));
assert_eq!(s.try_send(3), Ok(()));
thread::sleep(ms(500));
assert_eq!(s.try_send(4), Err(TrySendError::Disconnected(4)));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
assert_eq!(r.try_recv(), Ok(1));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
assert_eq!(r.recv(), Ok(3));
});
}).unwrap();
}
#[test]
fn send() {
let (s, r) = bounded(1);
scope(|scope| {
scope.spawn(|_| {
s.send(7).unwrap();
thread::sleep(ms(1000));
s.send(8).unwrap();
thread::sleep(ms(1000));
s.send(9).unwrap();
thread::sleep(ms(1000));
s.send(10).unwrap();
});
scope.spawn(|_| {
thread::sleep(ms(1500));
assert_eq!(r.recv(), Ok(7));
assert_eq!(r.recv(), Ok(8));
assert_eq!(r.recv(), Ok(9));
});
}).unwrap();
}
#[test]
fn send_timeout() {
let (s, r) = bounded(2);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(s.send_timeout(1, ms(1000)), Ok(()));
assert_eq!(s.send_timeout(2, ms(1000)), Ok(()));
assert_eq!(
s.send_timeout(3, ms(500)),
Err(SendTimeoutError::Timeout(3))
);
thread::sleep(ms(1000));
assert_eq!(s.send_timeout(4, ms(1000)), Ok(()));
thread::sleep(ms(1000));
assert_eq!(s.send(5), Err(SendError(5)));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(1));
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(2));
assert_eq!(r.recv(), Ok(4));
});
}).unwrap();
}
#[test]
fn send_after_disconnect() {
let (s, r) = bounded(100);
s.send(1).unwrap();
s.send(2).unwrap();
s.send(3).unwrap();
drop(r);
assert_eq!(s.send(4), Err(SendError(4)));
assert_eq!(s.try_send(5), Err(TrySendError::Disconnected(5)));
assert_eq!(
s.send_timeout(6, ms(500)),
Err(SendTimeoutError::Disconnected(6))
);
}
#[test]
fn recv_after_disconnect() {
let (s, r) = bounded(100);
s.send(1).unwrap();
s.send(2).unwrap();
s.send(3).unwrap();
drop(s);
assert_eq!(r.recv(), Ok(1));
assert_eq!(r.recv(), Ok(2));
assert_eq!(r.recv(), Ok(3));
assert_eq!(r.recv(), Err(RecvError));
}
#[test]
fn len() {
const COUNT: usize = 25_000;
const CAP: usize = 1000;
let (s, r) = bounded(CAP);
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
for _ in 0..CAP / 10 {
for i in 0..50 {
s.send(i).unwrap();
assert_eq!(s.len(), i + 1);
}
for i in 0..50 {
r.recv().unwrap();
assert_eq!(r.len(), 50 - i - 1);
}
}
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
for i in 0..CAP {
s.send(i).unwrap();
assert_eq!(s.len(), i + 1);
}
for _ in 0..CAP {
r.recv().unwrap();
}
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
assert_eq!(r.recv(), Ok(i));
let len = r.len();
assert!(len <= CAP);
}
});
scope.spawn(|_| {
for i in 0..COUNT {
s.send(i).unwrap();
let len = s.len();
assert!(len <= CAP);
}
});
}).unwrap();
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
}
#[test]
fn disconnect_wakes_sender() {
let (s, r) = bounded(1);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(s.send(()), Ok(()));
assert_eq!(s.send(()), Err(SendError(())));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
drop(r);
});
}).unwrap();
}
#[test]
fn disconnect_wakes_receiver() {
let (s, r) = bounded::<()>(1);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
drop(s);
});
}).unwrap();
}
#[test]
fn spsc() {
const COUNT: usize = 100_000;
let (s, r) = bounded(3);
scope(|scope| {
scope.spawn(move |_| {
for i in 0..COUNT {
assert_eq!(r.recv(), Ok(i));
}
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
for i in 0..COUNT {
s.send(i).unwrap();
}
});
}).unwrap();
}
#[test]
fn mpmc() {
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let (s, r) = bounded::<usize>(3);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
for _ in 0..COUNT {
let n = r.recv().unwrap();
v[n].fetch_add(1, Ordering::SeqCst);
}
});
}
for _ in 0..THREADS {
scope.spawn(|_| {
for i in 0..COUNT {
s.send(i).unwrap();
}
});
}
}).unwrap();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}
#[test]
fn stress_oneshot() {
const COUNT: usize = 10_000;
for _ in 0..COUNT {
let (s, r) = bounded(1);
scope(|scope| {
scope.spawn(|_| r.recv().unwrap());
scope.spawn(|_| s.send(0).unwrap());
}).unwrap();
}
}
#[test]
fn stress_iter() {
const COUNT: usize = 100_000;
let (request_s, request_r) = bounded(1);
let (response_s, response_r) = bounded(1);
scope(|scope| {
scope.spawn(move |_| {
let mut count = 0;
loop {
for x in response_r.try_iter() {
count += x;
if count == COUNT {
return;
}
}
request_s.send(()).unwrap();
}
});
for _ in request_r.iter() {
if response_s.send(1).is_err() {
break;
}
}
}).unwrap();
}
#[test]
fn stress_timeout_two_threads() {
const COUNT: usize = 100;
let (s, r) = bounded(2);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(50));
}
loop {
if let Ok(()) = s.send_timeout(i, ms(10)) {
break;
}
}
}
});
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(50));
}
loop {
if let Ok(x) = r.recv_timeout(ms(10)) {
assert_eq!(x, i);
break;
}
}
}
});
}).unwrap();
}
#[test]
fn drops() {
const RUNS: usize = 100;
static DROPS: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, PartialEq)]
struct DropCounter;
impl Drop for DropCounter {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
let mut rng = thread_rng();
for _ in 0..RUNS {
let steps = rng.gen_range(0, 10_000);
let additional = rng.gen_range(0, 50);
DROPS.store(0, Ordering::SeqCst);
let (s, r) = bounded::<DropCounter>(50);
scope(|scope| {
scope.spawn(|_| {
for _ in 0..steps {
r.recv().unwrap();
}
});
scope.spawn(|_| {
for _ in 0..steps {
s.send(DropCounter).unwrap();
}
});
}).unwrap();
for _ in 0..additional {
s.send(DropCounter).unwrap();
}
assert_eq!(DROPS.load(Ordering::SeqCst), steps);
drop(s);
drop(r);
assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional);
}
}
#[test]
fn linearizable() {
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let (s, r) = bounded(THREADS);
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
for _ in 0..COUNT {
s.send(0).unwrap();
r.try_recv().unwrap();
}
});
}
}).unwrap();
}
#[test]
fn fairness() {
const COUNT: usize = 10_000;
let (s1, r1) = bounded::<()>(COUNT);
let (s2, r2) = bounded::<()>(COUNT);
for _ in 0..COUNT {
s1.send(()).unwrap();
s2.send(()).unwrap();
}
let mut hits = [0usize; 2];
for _ in 0..COUNT {
select! {
recv(r1) -> _ => hits[0] += 1,
recv(r2) -> _ => hits[1] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
#[test]
fn fairness_duplicates() {
const COUNT: usize = 10_000;
let (s, r) = bounded::<()>(COUNT);
for _ in 0..COUNT {
s.send(()).unwrap();
}
let mut hits = [0usize; 5];
for _ in 0..COUNT {
select! {
recv(r) -> _ => hits[0] += 1,
recv(r) -> _ => hits[1] += 1,
recv(r) -> _ => hits[2] += 1,
recv(r) -> _ => hits[3] += 1,
recv(r) -> _ => hits[4] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
#[test]
fn recv_in_send() {
let (s, _r) = bounded(1);
s.send(()).unwrap();
#[allow(unreachable_code)]
{
select! {
send(s, panic!()) -> _ => panic!(),
default => {}
}
}
let (s, r) = bounded(2);
s.send(()).unwrap();
select! {
send(s, assert_eq!(r.recv(), Ok(()))) -> _ => {}
}
}

1025
third_party/rust/crossbeam-channel/tests/golang.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

110
third_party/rust/crossbeam-channel/tests/iter.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,110 @@
//! Tests for iteration over receivers.
extern crate crossbeam_channel;
extern crate crossbeam_utils;
use crossbeam_channel::unbounded;
use crossbeam_utils::thread::scope;
#[test]
fn nested_recv_iter() {
let (s, r) = unbounded::<i32>();
let (total_s, total_r) = unbounded::<i32>();
scope(|scope| {
scope.spawn(move |_| {
let mut acc = 0;
for x in r.iter() {
acc += x;
}
total_s.send(acc).unwrap();
});
s.send(3).unwrap();
s.send(1).unwrap();
s.send(2).unwrap();
drop(s);
assert_eq!(total_r.recv().unwrap(), 6);
}).unwrap();
}
#[test]
fn recv_iter_break() {
let (s, r) = unbounded::<i32>();
let (count_s, count_r) = unbounded();
scope(|scope| {
scope.spawn(move |_| {
let mut count = 0;
for x in r.iter() {
if count >= 3 {
break;
} else {
count += x;
}
}
count_s.send(count).unwrap();
});
s.send(2).unwrap();
s.send(2).unwrap();
s.send(2).unwrap();
let _ = s.send(2);
drop(s);
assert_eq!(count_r.recv().unwrap(), 4);
}).unwrap();
}
#[test]
fn recv_try_iter() {
let (request_s, request_r) = unbounded();
let (response_s, response_r) = unbounded();
scope(|scope| {
scope.spawn(move |_| {
let mut count = 0;
loop {
for x in response_r.try_iter() {
count += x;
if count == 6 {
return;
}
}
request_s.send(()).unwrap();
}
});
for _ in request_r.iter() {
if response_s.send(2).is_err() {
break;
}
}
}).unwrap();
}
#[test]
fn recv_into_iter_owned() {
let mut iter = {
let (s, r) = unbounded::<i32>();
s.send(1).unwrap();
s.send(2).unwrap();
r.into_iter()
};
assert_eq!(iter.next().unwrap(), 1);
assert_eq!(iter.next().unwrap(), 2);
assert_eq!(iter.next().is_none(), true);
}
#[test]
fn recv_into_iter_borrowed() {
let (s, r) = unbounded::<i32>();
s.send(1).unwrap();
s.send(2).unwrap();
drop(s);
let mut iter = (&r).into_iter();
assert_eq!(iter.next().unwrap(), 1);
assert_eq!(iter.next().unwrap(), 2);
assert_eq!(iter.next().is_none(), true);
}

488
third_party/rust/crossbeam-channel/tests/list.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,488 @@
//! Tests for the list channel flavor.
#[macro_use]
extern crate crossbeam_channel;
extern crate crossbeam_utils;
extern crate rand;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
use crossbeam_channel::unbounded;
use crossbeam_channel::{RecvError, RecvTimeoutError, TryRecvError};
use crossbeam_channel::{SendError, SendTimeoutError, TrySendError};
use crossbeam_utils::thread::scope;
use rand::{thread_rng, Rng};
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn smoke() {
let (s, r) = unbounded();
s.try_send(7).unwrap();
assert_eq!(r.try_recv(), Ok(7));
s.send(8).unwrap();
assert_eq!(r.recv(), Ok(8));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout));
}
#[test]
fn capacity() {
let (s, r) = unbounded::<()>();
assert_eq!(s.capacity(), None);
assert_eq!(r.capacity(), None);
}
#[test]
fn len_empty_full() {
let (s, r) = unbounded();
assert_eq!(s.len(), 0);
assert_eq!(s.is_empty(), true);
assert_eq!(s.is_full(), false);
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), false);
s.send(()).unwrap();
assert_eq!(s.len(), 1);
assert_eq!(s.is_empty(), false);
assert_eq!(s.is_full(), false);
assert_eq!(r.len(), 1);
assert_eq!(r.is_empty(), false);
assert_eq!(r.is_full(), false);
r.recv().unwrap();
assert_eq!(s.len(), 0);
assert_eq!(s.is_empty(), true);
assert_eq!(s.is_full(), false);
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), false);
}
#[test]
fn try_recv() {
let (s, r) = unbounded();
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
thread::sleep(ms(1500));
assert_eq!(r.try_recv(), Ok(7));
thread::sleep(ms(500));
assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
s.send(7).unwrap();
});
}).unwrap();
}
#[test]
fn recv() {
let (s, r) = unbounded();
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv(), Ok(7));
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(8));
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(9));
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
s.send(7).unwrap();
s.send(8).unwrap();
s.send(9).unwrap();
});
}).unwrap();
}
#[test]
fn recv_timeout() {
let (s, r) = unbounded::<i32>();
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout));
assert_eq!(r.recv_timeout(ms(1000)), Ok(7));
assert_eq!(
r.recv_timeout(ms(1000)),
Err(RecvTimeoutError::Disconnected)
);
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
s.send(7).unwrap();
});
}).unwrap();
}
#[test]
fn try_send() {
let (s, r) = unbounded();
for i in 0..1000 {
assert_eq!(s.try_send(i), Ok(()));
}
drop(r);
assert_eq!(s.try_send(777), Err(TrySendError::Disconnected(777)));
}
#[test]
fn send() {
let (s, r) = unbounded();
for i in 0..1000 {
assert_eq!(s.send(i), Ok(()));
}
drop(r);
assert_eq!(s.send(777), Err(SendError(777)));
}
#[test]
fn send_timeout() {
let (s, r) = unbounded();
for i in 0..1000 {
assert_eq!(s.send_timeout(i, ms(i as u64)), Ok(()));
}
drop(r);
assert_eq!(
s.send_timeout(777, ms(0)),
Err(SendTimeoutError::Disconnected(777))
);
}
#[test]
fn send_after_disconnect() {
let (s, r) = unbounded();
s.send(1).unwrap();
s.send(2).unwrap();
s.send(3).unwrap();
drop(r);
assert_eq!(s.send(4), Err(SendError(4)));
assert_eq!(s.try_send(5), Err(TrySendError::Disconnected(5)));
assert_eq!(
s.send_timeout(6, ms(0)),
Err(SendTimeoutError::Disconnected(6))
);
}
#[test]
fn recv_after_disconnect() {
let (s, r) = unbounded();
s.send(1).unwrap();
s.send(2).unwrap();
s.send(3).unwrap();
drop(s);
assert_eq!(r.recv(), Ok(1));
assert_eq!(r.recv(), Ok(2));
assert_eq!(r.recv(), Ok(3));
assert_eq!(r.recv(), Err(RecvError));
}
#[test]
fn len() {
let (s, r) = unbounded();
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
for i in 0..50 {
s.send(i).unwrap();
assert_eq!(s.len(), i + 1);
}
for i in 0..50 {
r.recv().unwrap();
assert_eq!(r.len(), 50 - i - 1);
}
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
}
#[test]
fn disconnect_wakes_receiver() {
let (s, r) = unbounded::<()>();
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
drop(s);
});
}).unwrap();
}
#[test]
fn spsc() {
const COUNT: usize = 100_000;
let (s, r) = unbounded();
scope(|scope| {
scope.spawn(move |_| {
for i in 0..COUNT {
assert_eq!(r.recv(), Ok(i));
}
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
for i in 0..COUNT {
s.send(i).unwrap();
}
});
}).unwrap();
}
#[test]
fn mpmc() {
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let (s, r) = unbounded::<usize>();
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
for _ in 0..COUNT {
let n = r.recv().unwrap();
v[n].fetch_add(1, Ordering::SeqCst);
}
});
}
for _ in 0..THREADS {
scope.spawn(|_| {
for i in 0..COUNT {
s.send(i).unwrap();
}
});
}
}).unwrap();
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}
#[test]
fn stress_oneshot() {
const COUNT: usize = 10_000;
for _ in 0..COUNT {
let (s, r) = unbounded();
scope(|scope| {
scope.spawn(|_| r.recv().unwrap());
scope.spawn(|_| s.send(0).unwrap());
}).unwrap();
}
}
#[test]
fn stress_iter() {
const COUNT: usize = 100_000;
let (request_s, request_r) = unbounded();
let (response_s, response_r) = unbounded();
scope(|scope| {
scope.spawn(move |_| {
let mut count = 0;
loop {
for x in response_r.try_iter() {
count += x;
if count == COUNT {
return;
}
}
request_s.send(()).unwrap();
}
});
for _ in request_r.iter() {
if response_s.send(1).is_err() {
break;
}
}
}).unwrap();
}
#[test]
fn stress_timeout_two_threads() {
const COUNT: usize = 100;
let (s, r) = unbounded();
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(50));
}
s.send(i).unwrap();
}
});
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(50));
}
loop {
if let Ok(x) = r.recv_timeout(ms(10)) {
assert_eq!(x, i);
break;
}
}
}
});
}).unwrap();
}
#[test]
fn drops() {
static DROPS: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, PartialEq)]
struct DropCounter;
impl Drop for DropCounter {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
let mut rng = thread_rng();
for _ in 0..100 {
let steps = rng.gen_range(0, 10_000);
let additional = rng.gen_range(0, 1000);
DROPS.store(0, Ordering::SeqCst);
let (s, r) = unbounded::<DropCounter>();
scope(|scope| {
scope.spawn(|_| {
for _ in 0..steps {
r.recv().unwrap();
}
});
scope.spawn(|_| {
for _ in 0..steps {
s.send(DropCounter).unwrap();
}
});
}).unwrap();
for _ in 0..additional {
s.try_send(DropCounter).unwrap();
}
assert_eq!(DROPS.load(Ordering::SeqCst), steps);
drop(s);
drop(r);
assert_eq!(DROPS.load(Ordering::SeqCst), steps + additional);
}
}
#[test]
fn linearizable() {
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let (s, r) = unbounded();
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
for _ in 0..COUNT {
s.send(0).unwrap();
r.try_recv().unwrap();
}
});
}
}).unwrap();
}
#[test]
fn fairness() {
const COUNT: usize = 10_000;
let (s1, r1) = unbounded::<()>();
let (s2, r2) = unbounded::<()>();
for _ in 0..COUNT {
s1.send(()).unwrap();
s2.send(()).unwrap();
}
let mut hits = [0usize; 2];
for _ in 0..COUNT {
select! {
recv(r1) -> _ => hits[0] += 1,
recv(r2) -> _ => hits[1] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
#[test]
fn fairness_duplicates() {
const COUNT: usize = 10_000;
let (s, r) = unbounded();
for _ in 0..COUNT {
s.send(()).unwrap();
}
let mut hits = [0usize; 5];
for _ in 0..COUNT {
select! {
recv(r) -> _ => hits[0] += 1,
recv(r) -> _ => hits[1] += 1,
recv(r) -> _ => hits[2] += 1,
recv(r) -> _ => hits[3] += 1,
recv(r) -> _ => hits[4] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
#[test]
fn recv_in_send() {
let (s, r) = unbounded();
s.send(()).unwrap();
select! {
send(s, assert_eq!(r.recv(), Ok(()))) -> _ => {}
}
}

1948
third_party/rust/crossbeam-channel/tests/mpsc.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

99
third_party/rust/crossbeam-channel/tests/never.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,99 @@
//! Tests for the never channel flavor.
#[macro_use]
extern crate crossbeam_channel;
extern crate rand;
use std::thread;
use std::time::{Duration, Instant};
use crossbeam_channel::{never, tick, unbounded};
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn smoke() {
select! {
recv(never::<i32>()) -> _ => panic!(),
default => {}
}
}
#[test]
fn optional() {
let (s, r) = unbounded::<i32>();
s.send(1).unwrap();
s.send(2).unwrap();
let mut r = Some(&r);
select! {
recv(r.unwrap_or(&never())) -> _ => {}
default => panic!(),
}
r = None;
select! {
recv(r.unwrap_or(&never())) -> _ => panic!(),
default => {}
}
}
#[test]
fn tick_n() {
let mut r = tick(ms(100));
let mut step = 0;
loop {
select! {
recv(r) -> _ => step += 1,
default(ms(500)) => break,
}
if step == 10 {
r = never();
}
}
assert_eq!(step, 10);
}
#[test]
fn capacity() {
let r = never::<i32>();
assert_eq!(r.capacity(), Some(0));
}
#[test]
fn len_empty_full() {
let r = never::<i32>();
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), true);
}
#[test]
fn try_recv() {
let r = never::<i32>();
assert!(r.try_recv().is_err());
thread::sleep(ms(100));
assert!(r.try_recv().is_err());
}
#[test]
fn recv_timeout() {
let start = Instant::now();
let r = never::<i32>();
assert!(r.recv_timeout(ms(100)).is_err());
let now = Instant::now();
assert!(now - start >= ms(100));
assert!(now - start <= ms(150));
assert!(r.recv_timeout(ms(100)).is_err());
let now = Instant::now();
assert!(now - start >= ms(200));
assert!(now - start <= ms(250));
}

822
third_party/rust/crossbeam-channel/tests/ready.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,822 @@
//! Tests for channel readiness using the `Select` struct.
extern crate crossbeam_channel;
extern crate crossbeam_utils;
use std::any::Any;
use std::cell::Cell;
use std::thread;
use std::time::{Duration, Instant};
use crossbeam_channel::{after, bounded, tick, unbounded};
use crossbeam_channel::{Receiver, Select, TryRecvError, TrySendError};
use crossbeam_utils::thread::scope;
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn smoke1() {
let (s1, r1) = unbounded::<usize>();
let (s2, r2) = unbounded::<usize>();
s1.send(1).unwrap();
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
assert_eq!(sel.ready(), 0);
assert_eq!(r1.try_recv(), Ok(1));
s2.send(2).unwrap();
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
assert_eq!(sel.ready(), 1);
assert_eq!(r2.try_recv(), Ok(2));
}
#[test]
fn smoke2() {
let (_s1, r1) = unbounded::<i32>();
let (_s2, r2) = unbounded::<i32>();
let (_s3, r3) = unbounded::<i32>();
let (_s4, r4) = unbounded::<i32>();
let (s5, r5) = unbounded::<i32>();
s5.send(5).unwrap();
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
sel.recv(&r3);
sel.recv(&r4);
sel.recv(&r5);
assert_eq!(sel.ready(), 4);
assert_eq!(r5.try_recv(), Ok(5));
}
#[test]
fn disconnected() {
let (s1, r1) = unbounded::<i32>();
let (s2, r2) = unbounded::<i32>();
scope(|scope| {
scope.spawn(|_| {
drop(s1);
thread::sleep(ms(500));
s2.send(5).unwrap();
});
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.ready_timeout(ms(1000)) {
Ok(0) => assert_eq!(r1.try_recv(), Err(TryRecvError::Disconnected)),
_ => panic!(),
}
r2.recv().unwrap();
}).unwrap();
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.ready_timeout(ms(1000)) {
Ok(0) => assert_eq!(r1.try_recv(), Err(TryRecvError::Disconnected)),
_ => panic!(),
}
scope(|scope| {
scope.spawn(|_| {
thread::sleep(ms(500));
drop(s2);
});
let mut sel = Select::new();
sel.recv(&r2);
match sel.ready_timeout(ms(1000)) {
Ok(0) => assert_eq!(r2.try_recv(), Err(TryRecvError::Disconnected)),
_ => panic!(),
}
}).unwrap();
}
#[test]
fn default() {
let (s1, r1) = unbounded::<i32>();
let (s2, r2) = unbounded::<i32>();
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
assert!(sel.try_ready().is_err());
drop(s1);
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.try_ready() {
Ok(0) => assert!(r1.try_recv().is_err()),
_ => panic!(),
}
s2.send(2).unwrap();
let mut sel = Select::new();
sel.recv(&r2);
match sel.try_ready() {
Ok(0) => assert_eq!(r2.try_recv(), Ok(2)),
_ => panic!(),
}
let mut sel = Select::new();
sel.recv(&r2);
assert!(sel.try_ready().is_err());
let mut sel = Select::new();
assert!(sel.try_ready().is_err());
}
#[test]
fn timeout() {
let (_s1, r1) = unbounded::<i32>();
let (s2, r2) = unbounded::<i32>();
scope(|scope| {
scope.spawn(|_| {
thread::sleep(ms(1500));
s2.send(2).unwrap();
});
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
assert!(sel.ready_timeout(ms(1000)).is_err());
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.ready_timeout(ms(1000)) {
Ok(1) => assert_eq!(r2.try_recv(), Ok(2)),
_ => panic!(),
}
}).unwrap();
scope(|scope| {
let (s, r) = unbounded::<i32>();
scope.spawn(move |_| {
thread::sleep(ms(500));
drop(s);
});
let mut sel = Select::new();
assert!(sel.ready_timeout(ms(1000)).is_err());
let mut sel = Select::new();
sel.recv(&r);
match sel.try_ready() {
Ok(0) => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)),
_ => panic!(),
}
}).unwrap();
}
#[test]
fn default_when_disconnected() {
let (_, r) = unbounded::<i32>();
let mut sel = Select::new();
sel.recv(&r);
match sel.try_ready() {
Ok(0) => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)),
_ => panic!(),
}
let (_, r) = unbounded::<i32>();
let mut sel = Select::new();
sel.recv(&r);
match sel.ready_timeout(ms(1000)) {
Ok(0) => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)),
_ => panic!(),
}
let (s, _) = bounded::<i32>(0);
let mut sel = Select::new();
sel.send(&s);
match sel.try_ready() {
Ok(0) => assert_eq!(s.try_send(0), Err(TrySendError::Disconnected(0))),
_ => panic!(),
}
let (s, _) = bounded::<i32>(0);
let mut sel = Select::new();
sel.send(&s);
match sel.ready_timeout(ms(1000)) {
Ok(0) => assert_eq!(s.try_send(0), Err(TrySendError::Disconnected(0))),
_ => panic!(),
}
}
#[test]
fn default_only() {
let start = Instant::now();
let mut sel = Select::new();
assert!(sel.try_ready().is_err());
let now = Instant::now();
assert!(now - start <= ms(50));
let start = Instant::now();
let mut sel = Select::new();
assert!(sel.ready_timeout(ms(500)).is_err());
let now = Instant::now();
assert!(now - start >= ms(450));
assert!(now - start <= ms(550));
}
#[test]
fn unblocks() {
let (s1, r1) = bounded::<i32>(0);
let (s2, r2) = bounded::<i32>(0);
scope(|scope| {
scope.spawn(|_| {
thread::sleep(ms(500));
s2.send(2).unwrap();
});
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.ready_timeout(ms(1000)) {
Ok(1) => assert_eq!(r2.try_recv(), Ok(2)),
_ => panic!(),
}
}).unwrap();
scope(|scope| {
scope.spawn(|_| {
thread::sleep(ms(500));
assert_eq!(r1.recv().unwrap(), 1);
});
let mut sel = Select::new();
let oper1 = sel.send(&s1);
let oper2 = sel.send(&s2);
let oper = sel.select_timeout(ms(1000));
match oper {
Err(_) => panic!(),
Ok(oper) => match oper.index() {
i if i == oper1 => oper.send(&s1, 1).unwrap(),
i if i == oper2 => panic!(),
_ => unreachable!(),
},
}
}).unwrap();
}
#[test]
fn both_ready() {
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
scope(|scope| {
scope.spawn(|_| {
thread::sleep(ms(500));
s1.send(1).unwrap();
assert_eq!(r2.recv().unwrap(), 2);
});
for _ in 0..2 {
let mut sel = Select::new();
sel.recv(&r1);
sel.send(&s2);
match sel.ready() {
0 => assert_eq!(r1.try_recv(), Ok(1)),
1 => s2.try_send(2).unwrap(),
_ => panic!(),
}
}
}).unwrap();
}
#[test]
fn cloning1() {
scope(|scope| {
let (s1, r1) = unbounded::<i32>();
let (_s2, r2) = unbounded::<i32>();
let (s3, r3) = unbounded::<()>();
scope.spawn(move |_| {
r3.recv().unwrap();
drop(s1.clone());
assert!(r3.try_recv().is_err());
s1.send(1).unwrap();
r3.recv().unwrap();
});
s3.send(()).unwrap();
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.ready() {
0 => drop(r1.try_recv()),
1 => drop(r2.try_recv()),
_ => panic!(),
}
s3.send(()).unwrap();
}).unwrap();
}
#[test]
fn cloning2() {
let (s1, r1) = unbounded::<()>();
let (s2, r2) = unbounded::<()>();
let (_s3, _r3) = unbounded::<()>();
scope(|scope| {
scope.spawn(move |_| {
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.ready() {
0 => panic!(),
1 => drop(r2.try_recv()),
_ => panic!(),
}
});
thread::sleep(ms(500));
drop(s1.clone());
s2.send(()).unwrap();
}).unwrap();
}
#[test]
fn preflight1() {
let (s, r) = unbounded();
s.send(()).unwrap();
let mut sel = Select::new();
sel.recv(&r);
match sel.ready() {
0 => drop(r.try_recv()),
_ => panic!(),
}
}
#[test]
fn preflight2() {
let (s, r) = unbounded();
drop(s.clone());
s.send(()).unwrap();
drop(s);
let mut sel = Select::new();
sel.recv(&r);
match sel.ready() {
0 => assert_eq!(r.try_recv(), Ok(())),
_ => panic!(),
}
assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected));
}
#[test]
fn preflight3() {
let (s, r) = unbounded();
drop(s.clone());
s.send(()).unwrap();
drop(s);
r.recv().unwrap();
let mut sel = Select::new();
sel.recv(&r);
match sel.ready() {
0 => assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected)),
_ => panic!(),
}
}
#[test]
fn duplicate_operations() {
let (s, r) = unbounded::<i32>();
let hit = vec![Cell::new(false); 4];
while hit.iter().map(|h| h.get()).any(|hit| !hit) {
let mut sel = Select::new();
sel.recv(&r);
sel.recv(&r);
sel.send(&s);
sel.send(&s);
match sel.ready() {
0 => {
assert!(r.try_recv().is_ok());
hit[0].set(true);
}
1 => {
assert!(r.try_recv().is_ok());
hit[1].set(true);
}
2 => {
assert!(s.try_send(0).is_ok());
hit[2].set(true);
}
3 => {
assert!(s.try_send(0).is_ok());
hit[3].set(true);
}
_ => panic!(),
}
}
}
#[test]
fn nesting() {
let (s, r) = unbounded::<i32>();
let mut sel = Select::new();
sel.send(&s);
match sel.ready() {
0 => {
assert!(s.try_send(0).is_ok());
let mut sel = Select::new();
sel.recv(&r);
match sel.ready() {
0 => {
assert_eq!(r.try_recv(), Ok(0));
let mut sel = Select::new();
sel.send(&s);
match sel.ready() {
0 => {
assert!(s.try_send(1).is_ok());
let mut sel = Select::new();
sel.recv(&r);
match sel.ready() {
0 => {
assert_eq!(r.try_recv(), Ok(1));
}
_ => panic!(),
}
}
_ => panic!(),
}
}
_ => panic!(),
}
}
_ => panic!(),
}
}
#[test]
fn stress_recv() {
const COUNT: usize = 10_000;
let (s1, r1) = unbounded();
let (s2, r2) = bounded(5);
let (s3, r3) = bounded(0);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
s1.send(i).unwrap();
r3.recv().unwrap();
s2.send(i).unwrap();
r3.recv().unwrap();
}
});
for i in 0..COUNT {
for _ in 0..2 {
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
match sel.ready() {
0 => assert_eq!(r1.try_recv(), Ok(i)),
1 => assert_eq!(r2.try_recv(), Ok(i)),
_ => panic!(),
}
s3.send(()).unwrap();
}
}
}).unwrap();
}
#[test]
fn stress_send() {
const COUNT: usize = 10_000;
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
let (s3, r3) = bounded(100);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
assert_eq!(r1.recv().unwrap(), i);
assert_eq!(r2.recv().unwrap(), i);
r3.recv().unwrap();
}
});
for i in 0..COUNT {
for _ in 0..2 {
let mut sel = Select::new();
sel.send(&s1);
sel.send(&s2);
match sel.ready() {
0 => assert!(s1.try_send(i).is_ok()),
1 => assert!(s2.try_send(i).is_ok()),
_ => panic!(),
}
}
s3.send(()).unwrap();
}
}).unwrap();
}
#[test]
fn stress_mixed() {
const COUNT: usize = 10_000;
let (s1, r1) = bounded(0);
let (s2, r2) = bounded(0);
let (s3, r3) = bounded(100);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
s1.send(i).unwrap();
assert_eq!(r2.recv().unwrap(), i);
r3.recv().unwrap();
}
});
for i in 0..COUNT {
for _ in 0..2 {
let mut sel = Select::new();
sel.recv(&r1);
sel.send(&s2);
match sel.ready() {
0 => assert_eq!(r1.try_recv(), Ok(i)),
1 => assert!(s2.try_send(i).is_ok()),
_ => panic!(),
}
}
s3.send(()).unwrap();
}
}).unwrap();
}
#[test]
fn stress_timeout_two_threads() {
const COUNT: usize = 20;
let (s, r) = bounded(2);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(500));
}
let mut done = false;
while !done {
let mut sel = Select::new();
sel.send(&s);
match sel.ready_timeout(ms(100)) {
Err(_) => {}
Ok(0) => {
assert!(s.try_send(i).is_ok());
break;
},
Ok(_) => panic!(),
}
}
}
});
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(500));
}
let mut done = false;
while !done {
let mut sel = Select::new();
sel.recv(&r);
match sel.ready_timeout(ms(100)) {
Err(_) => {}
Ok(0) => {
assert_eq!(r.try_recv(), Ok(i));
done = true;
},
Ok(_) => panic!(),
}
}
}
});
}).unwrap();
}
#[test]
fn send_recv_same_channel() {
let (s, r) = bounded::<i32>(0);
let mut sel = Select::new();
sel.send(&s);
sel.recv(&r);
assert!(sel.ready_timeout(ms(100)).is_err());
let (s, r) = unbounded::<i32>();
let mut sel = Select::new();
sel.send(&s);
sel.recv(&r);
match sel.ready_timeout(ms(100)) {
Err(_) => panic!(),
Ok(0) => assert!(s.try_send(0).is_ok()),
Ok(_) => panic!(),
}
}
#[test]
fn channel_through_channel() {
const COUNT: usize = 1000;
type T = Box<Any + Send>;
for cap in 1..4 {
let (s, r) = bounded::<T>(cap);
scope(|scope| {
scope.spawn(move |_| {
let mut s = s;
for _ in 0..COUNT {
let (new_s, new_r) = bounded(cap);
let mut new_r: T = Box::new(Some(new_r));
{
let mut sel = Select::new();
sel.send(&s);
match sel.ready() {
0 => assert!(s.try_send(new_r).is_ok()),
_ => panic!(),
}
}
s = new_s;
}
});
scope.spawn(move |_| {
let mut r = r;
for _ in 0..COUNT {
let new = {
let mut sel = Select::new();
sel.recv(&r);
match sel.ready() {
0 => r
.try_recv()
.unwrap()
.downcast_mut::<Option<Receiver<T>>>()
.unwrap()
.take()
.unwrap(),
_ => panic!(),
}
};
r = new;
}
});
}).unwrap();
}
}
#[test]
fn fairness1() {
const COUNT: usize = 10_000;
let (s1, r1) = bounded::<()>(COUNT);
let (s2, r2) = unbounded::<()>();
for _ in 0..COUNT {
s1.send(()).unwrap();
s2.send(()).unwrap();
}
let hits = vec![Cell::new(0usize); 4];
for _ in 0..COUNT {
let after = after(ms(0));
let tick = tick(ms(0));
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
sel.recv(&after);
sel.recv(&tick);
match sel.ready() {
0 => {
r1.try_recv().unwrap();
hits[0].set(hits[0].get() + 1);
}
1 => {
r2.try_recv().unwrap();
hits[1].set(hits[1].get() + 1);
}
2 => {
after.try_recv().unwrap();
hits[2].set(hits[2].get() + 1);
}
3 => {
tick.try_recv().unwrap();
hits[3].set(hits[3].get() + 1);
}
_ => panic!(),
}
}
assert!(hits.iter().all(|x| x.get() >= COUNT / hits.len() / 2));
}
#[test]
fn fairness2() {
const COUNT: usize = 10_000;
let (s1, r1) = unbounded::<()>();
let (s2, r2) = bounded::<()>(1);
let (s3, r3) = bounded::<()>(0);
scope(|scope| {
scope.spawn(|_| {
for _ in 0..COUNT {
let mut sel = Select::new();
let mut oper1 = None;
let mut oper2 = None;
if s1.is_empty() {
oper1 = Some(sel.send(&s1));
}
if s2.is_empty() {
oper2 = Some(sel.send(&s2));
}
let oper3 = sel.send(&s3);
let oper = sel.select();
match oper.index() {
i if Some(i) == oper1 => assert!(oper.send(&s1, ()).is_ok()),
i if Some(i) == oper2 => assert!(oper.send(&s2, ()).is_ok()),
i if i == oper3 => assert!(oper.send(&s3, ()).is_ok()),
_ => unreachable!(),
}
}
});
let hits = vec![Cell::new(0usize); 3];
for _ in 0..COUNT {
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
sel.recv(&r3);
loop {
match sel.ready() {
0 => {
if r1.try_recv().is_ok() {
hits[0].set(hits[0].get() + 1);
break;
}
}
1 => {
if r2.try_recv().is_ok() {
hits[1].set(hits[1].get() + 1);
break;
}
}
2 => {
if r3.try_recv().is_ok() {
hits[2].set(hits[2].get() + 1);
break;
}
}
_ => unreachable!(),
}
}
}
assert!(hits.iter().all(|x| x.get() >= COUNT / hits.len() / 10));
}).unwrap();
}

1285
third_party/rust/crossbeam-channel/tests/select.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

1416
third_party/rust/crossbeam-channel/tests/select_macro.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

53
third_party/rust/crossbeam-channel/tests/thread_locals.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,53 @@
//! Tests that make sure accessing thread-locals while exiting the thread doesn't cause panics.
#[macro_use]
extern crate crossbeam_channel;
extern crate crossbeam_utils;
use std::thread;
use std::time::Duration;
use crossbeam_channel::unbounded;
use crossbeam_utils::thread::scope;
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn use_while_exiting() {
struct Foo;
impl Drop for Foo {
fn drop(&mut self) {
// A blocking operation after the thread-locals have been dropped. This will attempt to
// use the thread-locals and must not panic.
let (_s, r) = unbounded::<()>();
select! {
recv(r) -> _ => {}
default(ms(100)) => {}
}
}
}
thread_local! {
static FOO: Foo = Foo;
}
let (s, r) = unbounded::<()>();
scope(|scope| {
scope.spawn(|_| {
// First initialize `FOO`, then the thread-locals related to crossbeam-channel.
FOO.with(|_| ());
r.recv().unwrap();
// At thread exit, thread-locals related to crossbeam-channel get dropped first and
// `FOO` is dropped last.
});
scope.spawn(|_| {
thread::sleep(ms(100));
s.send(()).unwrap();
});
}).unwrap();
}

350
third_party/rust/crossbeam-channel/tests/tick.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,350 @@
//! Tests for the tick channel flavor.
#[macro_use]
extern crate crossbeam_channel;
extern crate crossbeam_utils;
extern crate rand;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::{Duration, Instant};
use crossbeam_channel::{after, tick, Select, TryRecvError};
use crossbeam_utils::thread::scope;
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn fire() {
let start = Instant::now();
let r = tick(ms(50));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
thread::sleep(ms(100));
let fired = r.try_recv().unwrap();
assert!(start < fired);
assert!(fired - start >= ms(50));
let now = Instant::now();
assert!(fired < now);
assert!(now - fired >= ms(50));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
select! {
recv(r) -> _ => panic!(),
default => {}
}
select! {
recv(r) -> _ => {}
recv(tick(ms(200))) -> _ => panic!(),
}
}
#[test]
fn intervals() {
let start = Instant::now();
let r = tick(ms(50));
let t1 = r.recv().unwrap();
assert!(start + ms(50) <= t1);
assert!(start + ms(100) > t1);
thread::sleep(ms(300));
let t2 = r.try_recv().unwrap();
assert!(start + ms(100) <= t2);
assert!(start + ms(150) > t2);
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
let t3 = r.recv().unwrap();
assert!(start + ms(400) <= t3);
assert!(start + ms(450) > t3);
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
}
#[test]
fn capacity() {
const COUNT: usize = 10;
for i in 0..COUNT {
let r = tick(ms(i as u64));
assert_eq!(r.capacity(), Some(1));
}
}
#[test]
fn len_empty_full() {
let r = tick(ms(50));
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), false);
thread::sleep(ms(100));
assert_eq!(r.len(), 1);
assert_eq!(r.is_empty(), false);
assert_eq!(r.is_full(), true);
r.try_recv().unwrap();
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), false);
}
#[test]
fn try_recv() {
let r = tick(ms(200));
assert!(r.try_recv().is_err());
thread::sleep(ms(100));
assert!(r.try_recv().is_err());
thread::sleep(ms(200));
assert!(r.try_recv().is_ok());
assert!(r.try_recv().is_err());
thread::sleep(ms(200));
assert!(r.try_recv().is_ok());
assert!(r.try_recv().is_err());
}
#[test]
fn recv() {
let start = Instant::now();
let r = tick(ms(50));
let fired = r.recv().unwrap();
assert!(start < fired);
assert!(fired - start >= ms(50));
let now = Instant::now();
assert!(fired < now);
assert!(now - fired < fired - start);
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
}
#[test]
fn recv_timeout() {
let start = Instant::now();
let r = tick(ms(200));
assert!(r.recv_timeout(ms(100)).is_err());
let now = Instant::now();
assert!(now - start >= ms(100));
assert!(now - start <= ms(150));
let fired = r.recv_timeout(ms(200)).unwrap();
assert!(fired - start >= ms(200));
assert!(fired - start <= ms(250));
assert!(r.recv_timeout(ms(100)).is_err());
let now = Instant::now();
assert!(now - start >= ms(300));
assert!(now - start <= ms(350));
let fired = r.recv_timeout(ms(200)).unwrap();
assert!(fired - start >= ms(400));
assert!(fired - start <= ms(450));
}
#[test]
fn recv_two() {
let r1 = tick(ms(50));
let r2 = tick(ms(50));
scope(|scope| {
scope.spawn(|_| {
for _ in 0..10 {
select! {
recv(r1) -> _ => {}
recv(r2) -> _ => {}
}
}
});
scope.spawn(|_| {
for _ in 0..10 {
select! {
recv(r1) -> _ => {}
recv(r2) -> _ => {}
}
}
});
}).unwrap();
}
#[test]
fn recv_race() {
select! {
recv(tick(ms(50))) -> _ => {}
recv(tick(ms(100))) -> _ => panic!(),
}
select! {
recv(tick(ms(100))) -> _ => panic!(),
recv(tick(ms(50))) -> _ => {}
}
}
#[test]
fn stress_default() {
const COUNT: usize = 10;
for _ in 0..COUNT {
select! {
recv(tick(ms(0))) -> _ => {}
default => panic!(),
}
}
for _ in 0..COUNT {
select! {
recv(tick(ms(100))) -> _ => panic!(),
default => {}
}
}
}
#[test]
fn select() {
const THREADS: usize = 4;
let hits = AtomicUsize::new(0);
let r1 = tick(ms(200));
let r2 = tick(ms(300));
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
let timeout = after(ms(1100));
loop {
let mut sel = Select::new();
let oper1 = sel.recv(&r1);
let oper2 = sel.recv(&r2);
let oper3 = sel.recv(&timeout);
let oper = sel.select();
match oper.index() {
i if i == oper1 => {
oper.recv(&r1).unwrap();
hits.fetch_add(1, Ordering::SeqCst);
}
i if i == oper2 => {
oper.recv(&r2).unwrap();
hits.fetch_add(1, Ordering::SeqCst);
}
i if i == oper3 => {
oper.recv(&timeout).unwrap();
break;
}
_ => unreachable!(),
}
}
});
}
}).unwrap();
assert_eq!(hits.load(Ordering::SeqCst), 8);
}
#[test]
fn ready() {
const THREADS: usize = 4;
let hits = AtomicUsize::new(0);
let r1 = tick(ms(200));
let r2 = tick(ms(300));
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
let timeout = after(ms(1100));
'outer: loop {
let mut sel = Select::new();
sel.recv(&r1);
sel.recv(&r2);
sel.recv(&timeout);
loop {
match sel.ready() {
0 => {
if r1.try_recv().is_ok() {
hits.fetch_add(1, Ordering::SeqCst);
break;
}
}
1 => {
if r2.try_recv().is_ok() {
hits.fetch_add(1, Ordering::SeqCst);
break;
}
}
2 => {
if timeout.try_recv().is_ok() {
break 'outer;
}
}
_ => unreachable!(),
}
}
}
});
}
}).unwrap();
assert_eq!(hits.load(Ordering::SeqCst), 8);
}
#[test]
fn fairness() {
const COUNT: usize = 30;
for &dur in &[0, 1] {
let mut hits = [0usize; 2];
for _ in 0..COUNT {
let r1 = tick(ms(dur));
let r2 = tick(ms(dur));
for _ in 0..COUNT {
select! {
recv(r1) -> _ => hits[0] += 1,
recv(r2) -> _ => hits[1] += 1,
}
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
}
#[test]
fn fairness_duplicates() {
const COUNT: usize = 30;
for &dur in &[0, 1] {
let mut hits = [0usize; 5];
for _ in 0..COUNT {
let r = tick(ms(dur));
for _ in 0..COUNT {
select! {
recv(r) -> _ => hits[0] += 1,
recv(r) -> _ => hits[1] += 1,
recv(r) -> _ => hits[2] += 1,
recv(r) -> _ => hits[3] += 1,
recv(r) -> _ => hits[4] += 1,
}
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}
}

501
third_party/rust/crossbeam-channel/tests/zero.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,501 @@
//! Tests for the zero channel flavor.
#[macro_use]
extern crate crossbeam_channel;
extern crate crossbeam_utils;
extern crate rand;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering;
use std::thread;
use std::time::Duration;
use crossbeam_channel::bounded;
use crossbeam_channel::{RecvError, RecvTimeoutError, TryRecvError};
use crossbeam_channel::{SendError, SendTimeoutError, TrySendError};
use crossbeam_utils::thread::scope;
use rand::{thread_rng, Rng};
fn ms(ms: u64) -> Duration {
Duration::from_millis(ms)
}
#[test]
fn smoke() {
let (s, r) = bounded(0);
assert_eq!(s.try_send(7), Err(TrySendError::Full(7)));
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
}
#[test]
fn capacity() {
let (s, r) = bounded::<()>(0);
assert_eq!(s.capacity(), Some(0));
assert_eq!(r.capacity(), Some(0));
}
#[test]
fn len_empty_full() {
let (s, r) = bounded(0);
assert_eq!(s.len(), 0);
assert_eq!(s.is_empty(), true);
assert_eq!(s.is_full(), true);
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), true);
scope(|scope| {
scope.spawn(|_| s.send(0).unwrap());
scope.spawn(|_| r.recv().unwrap());
}).unwrap();
assert_eq!(s.len(), 0);
assert_eq!(s.is_empty(), true);
assert_eq!(s.is_full(), true);
assert_eq!(r.len(), 0);
assert_eq!(r.is_empty(), true);
assert_eq!(r.is_full(), true);
}
#[test]
fn try_recv() {
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.try_recv(), Err(TryRecvError::Empty));
thread::sleep(ms(1500));
assert_eq!(r.try_recv(), Ok(7));
thread::sleep(ms(500));
assert_eq!(r.try_recv(), Err(TryRecvError::Disconnected));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
s.send(7).unwrap();
});
}).unwrap();
}
#[test]
fn recv() {
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv(), Ok(7));
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(8));
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(9));
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
s.send(7).unwrap();
s.send(8).unwrap();
s.send(9).unwrap();
});
}).unwrap();
}
#[test]
fn recv_timeout() {
let (s, r) = bounded::<i32>(0);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv_timeout(ms(1000)), Err(RecvTimeoutError::Timeout));
assert_eq!(r.recv_timeout(ms(1000)), Ok(7));
assert_eq!(
r.recv_timeout(ms(1000)),
Err(RecvTimeoutError::Disconnected)
);
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
s.send(7).unwrap();
});
}).unwrap();
}
#[test]
fn try_send() {
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(s.try_send(7), Err(TrySendError::Full(7)));
thread::sleep(ms(1500));
assert_eq!(s.try_send(8), Ok(()));
thread::sleep(ms(500));
assert_eq!(s.try_send(9), Err(TrySendError::Disconnected(9)));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
assert_eq!(r.recv(), Ok(8));
});
}).unwrap();
}
#[test]
fn send() {
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
s.send(7).unwrap();
thread::sleep(ms(1000));
s.send(8).unwrap();
thread::sleep(ms(1000));
s.send(9).unwrap();
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
assert_eq!(r.recv(), Ok(7));
assert_eq!(r.recv(), Ok(8));
assert_eq!(r.recv(), Ok(9));
});
}).unwrap();
}
#[test]
fn send_timeout() {
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(
s.send_timeout(7, ms(1000)),
Err(SendTimeoutError::Timeout(7))
);
assert_eq!(s.send_timeout(8, ms(1000)), Ok(()));
assert_eq!(
s.send_timeout(9, ms(1000)),
Err(SendTimeoutError::Disconnected(9))
);
});
scope.spawn(move |_| {
thread::sleep(ms(1500));
assert_eq!(r.recv(), Ok(8));
});
}).unwrap();
}
#[test]
fn len() {
const COUNT: usize = 25_000;
let (s, r) = bounded(0);
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
assert_eq!(r.recv(), Ok(i));
assert_eq!(r.len(), 0);
}
});
scope.spawn(|_| {
for i in 0..COUNT {
s.send(i).unwrap();
assert_eq!(s.len(), 0);
}
});
}).unwrap();
assert_eq!(s.len(), 0);
assert_eq!(r.len(), 0);
}
#[test]
fn disconnect_wakes_sender() {
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(s.send(()), Err(SendError(())));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
drop(r);
});
}).unwrap();
}
#[test]
fn disconnect_wakes_receiver() {
let (s, r) = bounded::<()>(0);
scope(|scope| {
scope.spawn(move |_| {
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
thread::sleep(ms(1000));
drop(s);
});
}).unwrap();
}
#[test]
fn spsc() {
const COUNT: usize = 100_000;
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
for i in 0..COUNT {
assert_eq!(r.recv(), Ok(i));
}
assert_eq!(r.recv(), Err(RecvError));
});
scope.spawn(move |_| {
for i in 0..COUNT {
s.send(i).unwrap();
}
});
}).unwrap();
}
#[test]
fn mpmc() {
const COUNT: usize = 25_000;
const THREADS: usize = 4;
let (s, r) = bounded::<usize>(0);
let v = (0..COUNT).map(|_| AtomicUsize::new(0)).collect::<Vec<_>>();
scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|_| {
for _ in 0..COUNT {
let n = r.recv().unwrap();
v[n].fetch_add(1, Ordering::SeqCst);
}
});
}
for _ in 0..THREADS {
scope.spawn(|_| {
for i in 0..COUNT {
s.send(i).unwrap();
}
});
}
}).unwrap();
for c in v {
assert_eq!(c.load(Ordering::SeqCst), THREADS);
}
}
#[test]
fn stress_oneshot() {
const COUNT: usize = 10_000;
for _ in 0..COUNT {
let (s, r) = bounded(1);
scope(|scope| {
scope.spawn(|_| r.recv().unwrap());
scope.spawn(|_| s.send(0).unwrap());
}).unwrap();
}
}
#[test]
fn stress_iter() {
const COUNT: usize = 1000;
let (request_s, request_r) = bounded(0);
let (response_s, response_r) = bounded(0);
scope(|scope| {
scope.spawn(move |_| {
let mut count = 0;
loop {
for x in response_r.try_iter() {
count += x;
if count == COUNT {
return;
}
}
let _ = request_s.try_send(());
}
});
for _ in request_r.iter() {
if response_s.send(1).is_err() {
break;
}
}
}).unwrap();
}
#[test]
fn stress_timeout_two_threads() {
const COUNT: usize = 100;
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(50));
}
loop {
if let Ok(()) = s.send_timeout(i, ms(10)) {
break;
}
}
}
});
scope.spawn(|_| {
for i in 0..COUNT {
if i % 2 == 0 {
thread::sleep(ms(50));
}
loop {
if let Ok(x) = r.recv_timeout(ms(10)) {
assert_eq!(x, i);
break;
}
}
}
});
}).unwrap();
}
#[test]
fn drops() {
static DROPS: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug, PartialEq)]
struct DropCounter;
impl Drop for DropCounter {
fn drop(&mut self) {
DROPS.fetch_add(1, Ordering::SeqCst);
}
}
let mut rng = thread_rng();
for _ in 0..100 {
let steps = rng.gen_range(0, 3_000);
DROPS.store(0, Ordering::SeqCst);
let (s, r) = bounded::<DropCounter>(0);
scope(|scope| {
scope.spawn(|_| {
for _ in 0..steps {
r.recv().unwrap();
}
});
scope.spawn(|_| {
for _ in 0..steps {
s.send(DropCounter).unwrap();
}
});
}).unwrap();
assert_eq!(DROPS.load(Ordering::SeqCst), steps);
drop(s);
drop(r);
assert_eq!(DROPS.load(Ordering::SeqCst), steps);
}
}
#[test]
fn fairness() {
const COUNT: usize = 10_000;
let (s1, r1) = bounded::<()>(0);
let (s2, r2) = bounded::<()>(0);
scope(|scope| {
scope.spawn(|_| {
let mut hits = [0usize; 2];
for _ in 0..COUNT {
select! {
recv(r1) -> _ => hits[0] += 1,
recv(r2) -> _ => hits[1] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
});
let mut hits = [0usize; 2];
for _ in 0..COUNT {
select! {
send(s1, ()) -> _ => hits[0] += 1,
send(s2, ()) -> _ => hits[1] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}).unwrap();
}
#[test]
fn fairness_duplicates() {
const COUNT: usize = 10_000;
let (s, r) = bounded::<()>(0);
scope(|scope| {
scope.spawn(|_| {
let mut hits = [0usize; 5];
for _ in 0..COUNT {
select! {
recv(r) -> _ => hits[0] += 1,
recv(r) -> _ => hits[1] += 1,
recv(r) -> _ => hits[2] += 1,
recv(r) -> _ => hits[3] += 1,
recv(r) -> _ => hits[4] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
});
let mut hits = [0usize; 5];
for _ in 0..COUNT {
select! {
send(s, ()) -> _ => hits[0] += 1,
send(s, ()) -> _ => hits[1] += 1,
send(s, ()) -> _ => hits[2] += 1,
send(s, ()) -> _ => hits[3] += 1,
send(s, ()) -> _ => hits[4] += 1,
}
}
assert!(hits.iter().all(|x| *x >= COUNT / hits.len() / 2));
}).unwrap();
}
#[test]
fn recv_in_send() {
let (s, r) = bounded(0);
scope(|scope| {
scope.spawn(|_| {
thread::sleep(ms(100));
r.recv()
});
scope.spawn(|_| {
thread::sleep(ms(500));
s.send(()).unwrap();
});
select! {
send(s, r.recv().unwrap()) -> _ => {}
}
}).unwrap();
}

Просмотреть файл

@ -1 +1 @@
{"files":{"CHANGELOG.md":"44023168ca8df497a6bf6145965d3eca080744dd0c1bb3f638d907451b9a47df","Cargo.toml":"777ef5e8132243b5096ce9e3f16cfd400d9216b0cf3f02ae3e1ecc0774f78de6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"27ce503b57a65de4b2a3da3bbacc0ade00230495cc5cc63d2fbbb565d999ac64","src/lib.rs":"d4fac3875f95541899fa7cb79bc0d83c706c81d548a60d6c5f1b99ef4ba2b51c"},"package":"fe8153ef04a7594ded05b427ffad46ddeaf22e63fd48d42b3e1e3bb4db07cae7"}
{"files":{"CHANGELOG.md":"8c3a2735652220d8601067e0fa560b8c1146671fefb41a78f1cd4d08a12fe697","Cargo.toml":"f221e962c757551c11c9e145b5ef6dc943a8d4670d2230e76c8a6642c2667952","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"b9257b4c9a56e01992ad1ea6dc65e2928cfb27475222e5ef521f9469b8870e48","src/lib.rs":"54d40e045740c2ed857e0b04992b4f718404ec0af48938a9ae33ae59f73e2d84","tests/fifo.rs":"54b78bdd4f53e217291451bef579cdbe571b290ddde7f40dce05c5160a0c2de8","tests/lifo.rs":"8393e2f8f689135d4e9a3ea2da39d665b6cbe55a53be2d07f83969506a8003a6"},"package":"05e44b8cf3e1a625844d1750e1f7820da46044ff6d28f4d43e455ba3e5bb2c13"}

75
third_party/rust/crossbeam-deque/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,46 +1,71 @@
# Changelog
All notable changes to this project will be documented in this file.
# Version 0.6.3
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
- Bump `crossbeam-epoch` to `0.7`.
## [Unreleased]
# Version 0.6.2
## [0.3.1] - 2018-05-04
- Update `crosbeam-utils` to `0.6`.
### Added
- `Deque::capacity`
- `Deque::min_capacity`
- `Deque::shrink_to_fit`
# Version 0.6.1
### Changed
- Change a few `Relaxed` orderings to `Release` in order to fix false positives by tsan.
# Version 0.6.0
- Add `Stealer::steal_many` for batched stealing.
- Change the return type of `pop` to `Pop<T>` so that spinning can be handled manually.
# Version 0.5.2
- Update `crossbeam-utils` to `0.5.0`.
# Version 0.5.1
- Minor optimizations.
# Version 0.5.0
- Add two deque constructors : `fifo()` and `lifo()`.
- Update `rand` to `0.5.3`.
- Rename `Deque` to `Worker`.
- Return `Option<T>` from `Stealer::steal`.
- Remove methods `Deque::len` and `Stealer::len`.
- Remove method `Deque::stealer`.
- Remove method `Deque::steal`.
# Version 0.4.1
- Update `crossbeam-epoch` to `0.5.0`.
# Version 0.4.0
- Update `crossbeam-epoch` to `0.4.2`.
- Update `crossbeam-utils` to `0.4.0`.
- Require minimum Rust version 1.25.
# Version 0.3.1
- Add `Deque::capacity`.
- Add `Deque::min_capacity`.
- Add `Deque::shrink_to_fit`.
- Update `crossbeam-epoch` to `0.3.0`.
- Support Rust 1.20.
- Shrink the buffer in `Deque::push` if necessary.
## [0.3.0] - 2018-02-10
# Version 0.3.0
### Changed
- Update `crossbeam-epoch` to `0.4.0`.
- Drop support for Rust 1.13.
## [0.2.0] - 2018-02-10
# Version 0.2.0
### Changed
- Update `crossbeam-epoch` to `0.3.0`.
- Support Rust 1.13.
## [0.1.1] - 2017-11-29
# Version 0.1.1
### Changed
- Update `crossbeam-epoch` to `0.2.0`.
## 0.1.0 - 2017-11-26
### Added
- First implementation of the Chase-Lev deque.
# Version 0.1.0
[Unreleased]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.3.1...HEAD
[0.3.1]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.3.0...v0.3.1
[0.3.0]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.2.0...v0.3.0
[0.2.0]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.1.0...v0.2.0
[0.1.1]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.1.0...v0.1.1
- First implementation of the Chase-Lev deque.

14
third_party/rust/crossbeam-deque/Cargo.toml поставляемый
Просмотреть файл

@ -12,22 +12,20 @@
[package]
name = "crossbeam-deque"
version = "0.3.1"
version = "0.6.3"
authors = ["The Crossbeam Project Developers"]
description = "Concurrent work-stealing deque"
homepage = "https://github.com/crossbeam-rs/crossbeam-deque"
homepage = "https://github.com/crossbeam-rs/crossbeam"
documentation = "https://docs.rs/crossbeam-deque"
readme = "README.md"
keywords = ["chase-lev", "lock-free", "scheduler", "scheduling"]
categories = ["algorithms", "concurrency", "data-structures"]
license = "MIT/Apache-2.0"
repository = "https://github.com/crossbeam-rs/crossbeam-deque"
repository = "https://github.com/crossbeam-rs/crossbeam"
[dependencies.crossbeam-epoch]
version = "0.4.0"
version = "0.7"
[dependencies.crossbeam-utils]
version = "0.3"
version = "0.6"
[dev-dependencies.rand]
version = "0.4"
[badges.travis-ci]
repository = "crossbeam-rs/crossbeam-deque"
version = "0.6"

Просмотреть файл

@ -1,5 +1,3 @@
Copyright (c) 2010 The Rust Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the

40
third_party/rust/crossbeam-deque/README.md поставляемый
Просмотреть файл

@ -1,9 +1,18 @@
# Concurrent work-stealing deque
# Crossbeam Deque
[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam-deque.svg?branch=master)](https://travis-ci.org/crossbeam-rs/crossbeam-deque)
[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/crossbeam-rs/crossbeam-deque)
[![Cargo](https://img.shields.io/crates/v/crossbeam-deque.svg)](https://crates.io/crates/crossbeam-deque)
[![Documentation](https://docs.rs/crossbeam-deque/badge.svg)](https://docs.rs/crossbeam-deque)
[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam.svg?branch=master)](
https://travis-ci.org/crossbeam-rs/crossbeam)
[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](
https://github.com/crossbeam-rs/crossbeam-deque)
[![Cargo](https://img.shields.io/crates/v/crossbeam-deque.svg)](
https://crates.io/crates/crossbeam-deque)
[![Documentation](https://docs.rs/crossbeam-deque/badge.svg)](
https://docs.rs/crossbeam-deque)
[![Rust 1.26+](https://img.shields.io/badge/rust-1.26+-lightgray.svg)](
https://www.rust-lang.org)
This crate provides work-stealing deques, which are primarily intended for
building task schedulers.
## Usage
@ -11,7 +20,7 @@ Add this to your `Cargo.toml`:
```toml
[dependencies]
crossbeam-deque = "0.3"
crossbeam-deque = "0.6"
```
Next, add this to your crate:
@ -20,10 +29,23 @@ Next, add this to your crate:
extern crate crossbeam_deque;
```
The minimum required Rust version is 1.20.
## Compatibility
The minimum supported Rust version is 1.26.
This crate does not work in `no_std` environments.
## License
Licensed under the terms of MIT license and the Apache License (Version 2.0).
Licensed under either of
See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details.
* Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

1563
third_party/rust/crossbeam-deque/src/lib.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

371
third_party/rust/crossbeam-deque/tests/fifo.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,371 @@
extern crate crossbeam_deque as deque;
extern crate crossbeam_epoch as epoch;
extern crate rand;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::sync::{Arc, Mutex};
use std::thread;
use deque::{Pop, Steal};
use rand::Rng;
#[test]
fn smoke() {
let (w, s) = deque::fifo::<i32>();
assert_eq!(w.pop(), Pop::Empty);
assert_eq!(s.steal(), Steal::Empty);
w.push(1);
assert_eq!(w.pop(), Pop::Data(1));
assert_eq!(w.pop(), Pop::Empty);
assert_eq!(s.steal(), Steal::Empty);
w.push(2);
assert_eq!(s.steal(), Steal::Data(2));
assert_eq!(s.steal(), Steal::Empty);
assert_eq!(w.pop(), Pop::Empty);
w.push(3);
w.push(4);
w.push(5);
assert_eq!(s.steal(), Steal::Data(3));
assert_eq!(s.steal(), Steal::Data(4));
assert_eq!(s.steal(), Steal::Data(5));
assert_eq!(s.steal(), Steal::Empty);
w.push(6);
w.push(7);
w.push(8);
w.push(9);
assert_eq!(w.pop(), Pop::Data(6));
assert_eq!(s.steal(), Steal::Data(7));
assert_eq!(w.pop(), Pop::Data(8));
assert_eq!(w.pop(), Pop::Data(9));
assert_eq!(w.pop(), Pop::Empty);
}
#[test]
fn steal_push() {
const STEPS: usize = 50_000;
let (w, s) = deque::fifo();
let t = thread::spawn(move || {
for i in 0..STEPS {
loop {
if let Steal::Data(v) = s.steal() {
assert_eq!(i, v);
break;
}
}
}
});
for i in 0..STEPS {
w.push(i);
}
t.join().unwrap();
}
#[test]
fn stampede() {
const THREADS: usize = 8;
const COUNT: usize = 50_000;
let (w, s) = deque::fifo();
for i in 0..COUNT {
w.push(Box::new(i + 1));
}
let remaining = Arc::new(AtomicUsize::new(COUNT));
let threads = (0..THREADS)
.map(|_| {
let s = s.clone();
let remaining = remaining.clone();
thread::spawn(move || {
let mut last = 0;
while remaining.load(SeqCst) > 0 {
if let Steal::Data(x) = s.steal() {
assert!(last < *x);
last = *x;
remaining.fetch_sub(1, SeqCst);
}
}
})
}).collect::<Vec<_>>();
let mut last = 0;
while remaining.load(SeqCst) > 0 {
loop {
match w.pop() {
Pop::Data(x) => {
assert!(last < *x);
last = *x;
remaining.fetch_sub(1, SeqCst);
break;
}
Pop::Empty => break,
Pop::Retry => {}
}
}
}
for t in threads {
t.join().unwrap();
}
}
fn run_stress() {
const THREADS: usize = 8;
const COUNT: usize = 50_000;
let (w, s) = deque::fifo();
let done = Arc::new(AtomicBool::new(false));
let hits = Arc::new(AtomicUsize::new(0));
let threads = (0..THREADS)
.map(|_| {
let s = s.clone();
let done = done.clone();
let hits = hits.clone();
thread::spawn(move || {
let (w2, _) = deque::fifo();
while !done.load(SeqCst) {
if let Steal::Data(_) = s.steal() {
hits.fetch_add(1, SeqCst);
}
if let Steal::Data(_) = s.steal_many(&w2) {
hits.fetch_add(1, SeqCst);
loop {
match w2.pop() {
Pop::Data(_) => {
hits.fetch_add(1, SeqCst);
}
Pop::Empty => break,
Pop::Retry => {}
}
}
}
}
})
}).collect::<Vec<_>>();
let mut rng = rand::thread_rng();
let mut expected = 0;
while expected < COUNT {
if rng.gen_range(0, 3) == 0 {
loop {
match w.pop() {
Pop::Data(_) => {
hits.fetch_add(1, SeqCst);
}
Pop::Empty => break,
Pop::Retry => {}
}
}
} else {
w.push(expected);
expected += 1;
}
}
while hits.load(SeqCst) < COUNT {
loop {
match w.pop() {
Pop::Data(_) => {
hits.fetch_add(1, SeqCst);
}
Pop::Empty => break,
Pop::Retry => {}
}
}
}
done.store(true, SeqCst);
for t in threads {
t.join().unwrap();
}
}
#[test]
fn stress() {
run_stress();
}
#[test]
fn stress_pinned() {
let _guard = epoch::pin();
run_stress();
}
#[test]
fn no_starvation() {
const THREADS: usize = 8;
const COUNT: usize = 50_000;
let (w, s) = deque::fifo();
let done = Arc::new(AtomicBool::new(false));
let (threads, hits): (Vec<_>, Vec<_>) = (0..THREADS)
.map(|_| {
let s = s.clone();
let done = done.clone();
let hits = Arc::new(AtomicUsize::new(0));
let t = {
let hits = hits.clone();
thread::spawn(move || {
let (w2, _) = deque::fifo();
while !done.load(SeqCst) {
if let Steal::Data(_) = s.steal() {
hits.fetch_add(1, SeqCst);
}
if let Steal::Data(_) = s.steal_many(&w2) {
hits.fetch_add(1, SeqCst);
loop {
match w2.pop() {
Pop::Data(_) => {
hits.fetch_add(1, SeqCst);
}
Pop::Empty => break,
Pop::Retry => {}
}
}
}
}
})
};
(t, hits)
}).unzip();
let mut rng = rand::thread_rng();
let mut my_hits = 0;
loop {
for i in 0..rng.gen_range(0, COUNT) {
if rng.gen_range(0, 3) == 0 && my_hits == 0 {
loop {
match w.pop() {
Pop::Data(_) => my_hits += 1,
Pop::Empty => break,
Pop::Retry => {}
}
}
} else {
w.push(i);
}
}
if my_hits > 0 && hits.iter().all(|h| h.load(SeqCst) > 0) {
break;
}
}
done.store(true, SeqCst);
for t in threads {
t.join().unwrap();
}
}
#[test]
fn destructors() {
const THREADS: usize = 8;
const COUNT: usize = 50_000;
const STEPS: usize = 1000;
struct Elem(usize, Arc<Mutex<Vec<usize>>>);
impl Drop for Elem {
fn drop(&mut self) {
self.1.lock().unwrap().push(self.0);
}
}
let (w, s) = deque::fifo();
let dropped = Arc::new(Mutex::new(Vec::new()));
let remaining = Arc::new(AtomicUsize::new(COUNT));
for i in 0..COUNT {
w.push(Elem(i, dropped.clone()));
}
let threads = (0..THREADS)
.map(|_| {
let remaining = remaining.clone();
let s = s.clone();
thread::spawn(move || {
let (w2, _) = deque::fifo();
let mut cnt = 0;
while cnt < STEPS {
if let Steal::Data(_) = s.steal() {
cnt += 1;
remaining.fetch_sub(1, SeqCst);
}
if let Steal::Data(_) = s.steal_many(&w2) {
cnt += 1;
remaining.fetch_sub(1, SeqCst);
loop {
match w2.pop() {
Pop::Data(_) => {
cnt += 1;
remaining.fetch_sub(1, SeqCst);
}
Pop::Empty => break,
Pop::Retry => {}
}
}
}
}
})
}).collect::<Vec<_>>();
for _ in 0..STEPS {
loop {
match w.pop() {
Pop::Data(_) => {
remaining.fetch_sub(1, SeqCst);
break;
}
Pop::Empty => break,
Pop::Retry => {}
}
}
}
for t in threads {
t.join().unwrap();
}
let rem = remaining.load(SeqCst);
assert!(rem > 0);
{
let mut v = dropped.lock().unwrap();
assert_eq!(v.len(), COUNT - rem);
v.clear();
}
drop((w, s));
{
let mut v = dropped.lock().unwrap();
assert_eq!(v.len(), rem);
v.sort();
for pair in v.windows(2) {
assert_eq!(pair[0] + 1, pair[1]);
}
}
}

371
third_party/rust/crossbeam-deque/tests/lifo.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,371 @@
extern crate crossbeam_deque as deque;
extern crate crossbeam_epoch as epoch;
extern crate rand;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::atomic::{AtomicBool, AtomicUsize};
use std::sync::{Arc, Mutex};
use std::thread;
use deque::{Pop, Steal};
use rand::Rng;
#[test]
fn smoke() {
let (w, s) = deque::lifo::<i32>();
assert_eq!(w.pop(), Pop::Empty);
assert_eq!(s.steal(), Steal::Empty);
w.push(1);
assert_eq!(w.pop(), Pop::Data(1));
assert_eq!(w.pop(), Pop::Empty);
assert_eq!(s.steal(), Steal::Empty);
w.push(2);
assert_eq!(s.steal(), Steal::Data(2));
assert_eq!(s.steal(), Steal::Empty);
assert_eq!(w.pop(), Pop::Empty);
w.push(3);
w.push(4);
w.push(5);
assert_eq!(s.steal(), Steal::Data(3));
assert_eq!(s.steal(), Steal::Data(4));
assert_eq!(s.steal(), Steal::Data(5));
assert_eq!(s.steal(), Steal::Empty);
w.push(6);
w.push(7);
w.push(8);
w.push(9);
assert_eq!(w.pop(), Pop::Data(9));
assert_eq!(s.steal(), Steal::Data(6));
assert_eq!(w.pop(), Pop::Data(8));
assert_eq!(w.pop(), Pop::Data(7));
assert_eq!(w.pop(), Pop::Empty);
}
#[test]
fn steal_push() {
const STEPS: usize = 50_000;
let (w, s) = deque::lifo();
let t = thread::spawn(move || {
for i in 0..STEPS {
loop {
if let Steal::Data(v) = s.steal() {
assert_eq!(i, v);
break;
}
}
}
});
for i in 0..STEPS {
w.push(i);
}
t.join().unwrap();
}
#[test]
fn stampede() {
const THREADS: usize = 8;
const COUNT: usize = 50_000;
let (w, s) = deque::lifo();
for i in 0..COUNT {
w.push(Box::new(i + 1));
}
let remaining = Arc::new(AtomicUsize::new(COUNT));
let threads = (0..THREADS)
.map(|_| {
let s = s.clone();
let remaining = remaining.clone();
thread::spawn(move || {
let mut last = 0;
while remaining.load(SeqCst) > 0 {
if let Steal::Data(x) = s.steal() {
assert!(last < *x);
last = *x;
remaining.fetch_sub(1, SeqCst);
}
}
})
}).collect::<Vec<_>>();
let mut last = COUNT + 1;
while remaining.load(SeqCst) > 0 {
loop {
match w.pop() {
Pop::Data(x) => {
assert!(last > *x);
last = *x;
remaining.fetch_sub(1, SeqCst);
break;
}
Pop::Empty => break,
Pop::Retry => {}
}
}
}
for t in threads {
t.join().unwrap();
}
}
fn run_stress() {
const THREADS: usize = 8;
const COUNT: usize = 50_000;
let (w, s) = deque::lifo();
let done = Arc::new(AtomicBool::new(false));
let hits = Arc::new(AtomicUsize::new(0));
let threads = (0..THREADS)
.map(|_| {
let s = s.clone();
let done = done.clone();
let hits = hits.clone();
thread::spawn(move || {
let (w2, _) = deque::lifo();
while !done.load(SeqCst) {
if let Steal::Data(_) = s.steal() {
hits.fetch_add(1, SeqCst);
}
if let Steal::Data(_) = s.steal_many(&w2) {
hits.fetch_add(1, SeqCst);
loop {
match w2.pop() {
Pop::Data(_) => {
hits.fetch_add(1, SeqCst);
}
Pop::Empty => break,
Pop::Retry => {}
}
}
}
}
})
}).collect::<Vec<_>>();
let mut rng = rand::thread_rng();
let mut expected = 0;
while expected < COUNT {
if rng.gen_range(0, 3) == 0 {
loop {
match w.pop() {
Pop::Data(_) => {
hits.fetch_add(1, SeqCst);
}
Pop::Empty => break,
Pop::Retry => {}
}
}
} else {
w.push(expected);
expected += 1;
}
}
while hits.load(SeqCst) < COUNT {
loop {
match w.pop() {
Pop::Data(_) => {
hits.fetch_add(1, SeqCst);
}
Pop::Empty => break,
Pop::Retry => {}
}
}
}
done.store(true, SeqCst);
for t in threads {
t.join().unwrap();
}
}
#[test]
fn stress() {
run_stress();
}
#[test]
fn stress_pinned() {
let _guard = epoch::pin();
run_stress();
}
#[test]
fn no_starvation() {
const THREADS: usize = 8;
const COUNT: usize = 50_000;
let (w, s) = deque::lifo();
let done = Arc::new(AtomicBool::new(false));
let (threads, hits): (Vec<_>, Vec<_>) = (0..THREADS)
.map(|_| {
let s = s.clone();
let done = done.clone();
let hits = Arc::new(AtomicUsize::new(0));
let t = {
let hits = hits.clone();
thread::spawn(move || {
let (w2, _) = deque::lifo();
while !done.load(SeqCst) {
if let Steal::Data(_) = s.steal() {
hits.fetch_add(1, SeqCst);
}
if let Steal::Data(_) = s.steal_many(&w2) {
hits.fetch_add(1, SeqCst);
loop {
match w2.pop() {
Pop::Data(_) => {
hits.fetch_add(1, SeqCst);
}
Pop::Empty => break,
Pop::Retry => {}
}
}
}
}
})
};
(t, hits)
}).unzip();
let mut rng = rand::thread_rng();
let mut my_hits = 0;
loop {
for i in 0..rng.gen_range(0, COUNT) {
if rng.gen_range(0, 3) == 0 && my_hits == 0 {
loop {
match w.pop() {
Pop::Data(_) => my_hits += 1,
Pop::Empty => break,
Pop::Retry => {}
}
}
} else {
w.push(i);
}
}
if my_hits > 0 && hits.iter().all(|h| h.load(SeqCst) > 0) {
break;
}
}
done.store(true, SeqCst);
for t in threads {
t.join().unwrap();
}
}
#[test]
fn destructors() {
const THREADS: usize = 8;
const COUNT: usize = 50_000;
const STEPS: usize = 1000;
struct Elem(usize, Arc<Mutex<Vec<usize>>>);
impl Drop for Elem {
fn drop(&mut self) {
self.1.lock().unwrap().push(self.0);
}
}
let (w, s) = deque::lifo();
let dropped = Arc::new(Mutex::new(Vec::new()));
let remaining = Arc::new(AtomicUsize::new(COUNT));
for i in 0..COUNT {
w.push(Elem(i, dropped.clone()));
}
let threads = (0..THREADS)
.map(|_| {
let remaining = remaining.clone();
let s = s.clone();
thread::spawn(move || {
let (w2, _) = deque::lifo();
let mut cnt = 0;
while cnt < STEPS {
if let Steal::Data(_) = s.steal() {
cnt += 1;
remaining.fetch_sub(1, SeqCst);
}
if let Steal::Data(_) = s.steal_many(&w2) {
cnt += 1;
remaining.fetch_sub(1, SeqCst);
loop {
match w2.pop() {
Pop::Data(_) => {
cnt += 1;
remaining.fetch_sub(1, SeqCst);
}
Pop::Empty => break,
Pop::Retry => {}
}
}
}
}
})
}).collect::<Vec<_>>();
for _ in 0..STEPS {
loop {
match w.pop() {
Pop::Data(_) => {
remaining.fetch_sub(1, SeqCst);
break;
}
Pop::Empty => break,
Pop::Retry => {}
}
}
}
for t in threads {
t.join().unwrap();
}
let rem = remaining.load(SeqCst);
assert!(rem > 0);
{
let mut v = dropped.lock().unwrap();
assert_eq!(v.len(), COUNT - rem);
v.clear();
}
drop((w, s));
{
let mut v = dropped.lock().unwrap();
assert_eq!(v.len(), rem);
v.sort();
for pair in v.windows(2) {
assert_eq!(pair[0] + 1, pair[1]);
}
}
}

Просмотреть файл

@ -1 +1 @@
{"files":{"CHANGELOG.md":"3f0652c2ad1fc46b10d22cc3a5ad5fd8b737746dd3f3bc20d1e2a90432391892","Cargo.toml":"dc814f5487179536504adc4c77cacd827cd09b20dc81f49d3257553843599fb9","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"2721d525d6ea1309b5ce780d7748303ee24eecff074243f086bbf37768141efb","benches/defer.rs":"b2b64a8bb684721f12432aa63ae1e2227511879567ed212c0430961805b9f543","benches/flush.rs":"3b8c6be173ea546ad7e93adff324244a1c289608403bb13cc3bd89082fe90e35","benches/pin.rs":"4165baf238bbe2267e1598695d41ea8d3a312aa613d567e4dd7f5581a0f1323c","examples/sanitize.rs":"25ce494d162c4b730608e865894bda7fee6fdded5544f00b8882e482e39c12df","src/atomic.rs":"e9383337a4754c022a8d3c06372910299cb8318b620f26fe50347b244c4caee4","src/collector.rs":"0a068c19f67b094c52cd9e0e2cf4e6b7630cd6af810769cfebe4274631065e55","src/default.rs":"67c0e52f2ce85bc205e61a4f807848c0aab93dfcc034e8c460f7669694d4d43f","src/deferred.rs":"3e49824277fdc25a68498263a7ada67aca3977edef9545985f911ba42d7a2e61","src/epoch.rs":"47fb45f1cc07700473b25324dcdb00a086c5c145c69bed3eee6547552298fecf","src/guard.rs":"22c9d2a6c9a35e19f8d6da2cc69dc612226a1807e789291668f1ed85410dc351","src/internal.rs":"c2ee6dff11bb9a44afcff441fce04640da1bb070c778cedc9edf86c94b71aaf8","src/lib.rs":"325a7964f690d851006563341423ce69f9277db7e8bf21bb9139cdf22927f471","src/sync/list.rs":"abb9eae31f09d7c3692aed3c7ad7a3ad6d692992af891037db8eba50d1245f0c","src/sync/mod.rs":"2da979ca3a2293f7626a2e6a9ab2fad758d92e3d2bed6cc712ef59eeeea87eab","src/sync/queue.rs":"0254d182f820c8c880c9a80747501eb2cb9d53aa8cb958c04beceb39abf86aa9"},"package":"2af0e75710d6181e234c8ecc79f14a97907850a541b13b0be1dd10992f2e4620"}
{"files":{"CHANGELOG.md":"07d12c10c21688eb540d75106930bc5789525b40e8fc5502b3ee1742b86488f9","Cargo.toml":"d5ab00475fd595eaa4093d015f3e9ea4105baf61b51834021cbfa594eb7da56a","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"8d165ab5cada21a377029d1ad9f4f1e6e134460fb67a2132bc1c4fb161b79984","benches/defer.rs":"c4fcf1ebb596c1cde9661b64d6ea18e380b4ef56ae426d0c76edb5226cc591bb","benches/flush.rs":"773685fe76781e0d2d295af2153cf8a0320559306f2dab5bb407bfa94f8442c5","benches/pin.rs":"260ff2bc470b7e39075163a7eb70d4576b179873b460b6b58e37ddb5eac13d7e","examples/sanitize.rs":"487855c366f86e9fa8036a7ac8698fb318e03b01cbb7e8591cf9deff18c2cdc4","examples/treiber_stack.rs":"a0938c34bf1251ec2aa8768573758d6c190f93049eabe282fcecdb4dac2ac88d","src/atomic.rs":"3f882cb2b0df02e78e559aada5834f8c832a17fb0b17522c9eb70dee80670cd5","src/collector.rs":"bf9bf52a0b04f5f47f3ccce1a82804d173e5ec010f262d6767dc9f3e08fd5953","src/default.rs":"6afda8fd141ad594bed62baeb73f2e97c5ef33b051969a542bb908946fe39dd1","src/deferred.rs":"d5ace4be72e926cedb699cd19ae4076bbe87d795d650aa68f264106e6ff15bee","src/epoch.rs":"76dd63356d5bc52e741883d39abb636e4ccb04d20499fb2a0ce797bb81aa4e91","src/guard.rs":"1d90d690b02ee735263e845827f720be44faea871852731dd8444b92796f1539","src/internal.rs":"e48c7cfd1e7f764ee3af1e9bd69165641cfce65d9ff8a8c62233aa891cee1e81","src/lib.rs":"8633a59dd9a4d167dfb3b4470102fa6e70f6902f265f179f5bf522b9084ec6a6","src/sync/list.rs":"96f0acfa33197c6ba0711e1a7e21eab68faa811283544317b6844bf00f6be490","src/sync/mod.rs":"2da979ca3a2293f7626a2e6a9ab2fad758d92e3d2bed6cc712ef59eeeea87eab","src/sync/queue.rs":"94f80d8163c9ecac875cd8dbe7120e9067c7fee19dee57a4ba2f0912822dcc5a"},"package":"04c9e3102cc2d69cd681412141b390abd55a362afc1540965dad0ad4d34280b4"}

90
third_party/rust/crossbeam-epoch/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,70 +1,76 @@
# Changelog
All notable changes to this project will be documented in this file.
# Version 0.7.1
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
- Add `Shared::deref_mut()`.
- Add a Treiber stack to examples.
## [Unreleased]
# Version 0.7.0
## [0.4.3] - 2018-06-12
## Changed
- Downgrade `crossbeam-utils` to 0.3 because it was a breaking change.
- Remove `Guard::clone()`.
- Bump dependencies.
# Version 0.6.1
- Update `crossbeam-utils` to `0.6`.
# Version 0.6.0
- `defer` now requires `F: Send + 'static`.
- Bump the minimum Rust version to 1.26.
- Pinning while TLS is tearing down does not fail anymore.
- Rename `Handle` to `LocalHandle`.
- Add `defer_unchecked` and `defer_destroy`.
- Remove `Clone` impl for `LocalHandle`.
# Version 0.5.2
- Update `crossbeam-utils` to `0.5`.
# Version 0.5.1
- Fix compatibility with the latest Rust nightly.
# Version 0.5.0
- Update `crossbeam-utils` to `0.4`.
- Specify the minimum Rust version to `1.25.0`.
# Version 0.4.3
- Downgrade `crossbeam-utils` to `0.3` because it was a breaking change.
# Version 0.4.2
## [0.4.2] - 2018-06-12
### Added
- Expose the `Pointer` trait.
- Warn missing docs and missing debug impls.
- Update `crossbeam-utils` to `0.4`.
## Changed
- Update `crossbeam-utils` to 0.4.
# Version 0.4.1
## [0.4.1] - 2018-03-20
### Added
- Add `Debug` impls for `Collector`, `Handle`, and `Guard`.
- Add `load_consume` to `Atomic`.
### Changed
- Rename `Collector::handle` to `Collector::register`.
### Fixed
- Remove the `Send` implementation for `Handle` (this was a bug). Only
`Collector`s can be shared among multiple threads, while `Handle`s and
`Guard`s must stay within the thread in which they were created.
## [0.4.0] - 2018-02-10
### Changed
- Update dependencies.
# Version 0.4.0
### Removed
- Update dependencies.
- Remove support for Rust 1.13.
## [0.3.0] - 2018-02-10
### Added
- Add support for Rust 1.13.
# Version 0.3.0
### Changed
- Add support for Rust 1.13.
- Improve documentation for CAS.
## [0.2.0] - 2017-11-29
### Added
- Add method `Owned::into_box`.
# Version 0.2.0
### Changed
- Add method `Owned::into_box`.
- Fix a use-after-free bug in `Local::finalize`.
- Fix an ordering bug in `Global::push_bag`.
- Fix a bug in calculating distance between epochs.
### Removed
- Remove `impl<T> Into<Box<T>> for Owned<T>`.
## 0.1.0 - 2017-11-26
### Added
- First version of the new epoch-based GC.
# Version 0.1.0
[Unreleased]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.3...HEAD
[0.4.3]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.2...v0.4.3
[0.4.2]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.1...v0.4.2
[0.4.1]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.0...v0.4.1
[0.4.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.3.0...v0.4.0
[0.3.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.2.0...v0.3.0
[0.2.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.1.0...v0.2.0
- First version of the new epoch-based GC.

18
third_party/rust/crossbeam-epoch/Cargo.toml поставляемый
Просмотреть файл

@ -12,16 +12,16 @@
[package]
name = "crossbeam-epoch"
version = "0.4.3"
version = "0.7.1"
authors = ["The Crossbeam Project Developers"]
description = "Epoch-based garbage collection"
homepage = "https://github.com/crossbeam-rs/crossbeam-epoch"
homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-epoch"
documentation = "https://docs.rs/crossbeam-epoch"
readme = "README.md"
keywords = ["lock-free", "rcu", "atomic", "garbage"]
categories = ["concurrency", "memory-management"]
categories = ["concurrency", "memory-management", "no-std"]
license = "MIT/Apache-2.0"
repository = "https://github.com/crossbeam-rs/crossbeam-epoch"
repository = "https://github.com/crossbeam-rs/crossbeam"
[dependencies.arrayvec]
version = "0.4"
default-features = false
@ -30,7 +30,7 @@ default-features = false
version = "0.1"
[dependencies.crossbeam-utils]
version = "0.3"
version = "0.6"
default-features = false
[dependencies.lazy_static]
@ -44,10 +44,10 @@ version = "0.2"
version = "0.3"
default-features = false
[dev-dependencies.rand]
version = "0.4"
version = "0.6"
[features]
default = ["use_std"]
nightly = ["arrayvec/use_union"]
default = ["std"]
nightly = ["crossbeam-utils/nightly", "arrayvec/use_union"]
sanitize = []
use_std = ["lazy_static", "crossbeam-utils/use_std"]
std = ["crossbeam-utils/std", "lazy_static"]

Просмотреть файл

@ -1,5 +1,3 @@
Copyright (c) 2010 The Rust Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the

42
third_party/rust/crossbeam-epoch/README.md поставляемый
Просмотреть файл

@ -1,15 +1,24 @@
# Epoch-based garbage collection
# Crossbeam Epoch
[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam-epoch.svg?branch=master)](https://travis-ci.org/crossbeam-rs/crossbeam-epoch)
[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/crossbeam-rs/crossbeam-epoch)
[![Cargo](https://img.shields.io/crates/v/crossbeam-epoch.svg)](https://crates.io/crates/crossbeam-epoch)
[![Documentation](https://docs.rs/crossbeam-epoch/badge.svg)](https://docs.rs/crossbeam-epoch)
[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam.svg?branch=master)](
https://travis-ci.org/crossbeam-rs/crossbeam)
[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](
https://github.com/crossbeam-rs/crossbeam-epoch)
[![Cargo](https://img.shields.io/crates/v/crossbeam-epoch.svg)](
https://crates.io/crates/crossbeam-epoch)
[![Documentation](https://docs.rs/crossbeam-epoch/badge.svg)](
https://docs.rs/crossbeam-epoch)
[![Rust 1.26+](https://img.shields.io/badge/rust-1.26+-lightgray.svg)](
https://www.rust-lang.org)
This crate provides epoch-based garbage collection for use in concurrent data structures.
This crate provides epoch-based garbage collection for building concurrent data structures.
If a thread removes a node from a concurrent data structure, other threads
may still have pointers to that node, so it cannot be immediately destructed.
Epoch GC allows deferring destruction until it becomes safe to do so.
When a thread removes an object from a concurrent data structure, other threads
may be still using pointers to it at the same time, so it cannot be destroyed
immediately. Epoch-based GC is an efficient mechanism for deferring destruction of
shared objects until no pointers to them can exist.
Everything in this crate except the global GC can be used in `no_std` + `alloc` environments.
## Usage
@ -17,7 +26,7 @@ Add this to your `Cargo.toml`:
```toml
[dependencies]
crossbeam-epoch = "0.4"
crossbeam-epoch = "0.7"
```
Next, add this to your crate:
@ -28,6 +37,15 @@ extern crate crossbeam_epoch as epoch;
## License
Licensed under the terms of MIT license and the Apache License (Version 2.0).
Licensed under either of
See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details.
* Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
#### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

Просмотреть файл

@ -6,7 +6,7 @@ extern crate test;
use epoch::Owned;
use test::Bencher;
use utils::scoped::scope;
use utils::thread::scope;
#[bench]
fn single_alloc_defer_free(b: &mut Bencher) {
@ -14,7 +14,7 @@ fn single_alloc_defer_free(b: &mut Bencher) {
let guard = &epoch::pin();
let p = Owned::new(1).into_shared(guard);
unsafe {
guard.defer(move || p.into_owned());
guard.defer_destroy(p);
}
});
}
@ -23,9 +23,7 @@ fn single_alloc_defer_free(b: &mut Bencher) {
fn single_defer(b: &mut Bencher) {
b.iter(|| {
let guard = &epoch::pin();
unsafe {
guard.defer(move || ());
}
guard.defer(move || ());
});
}
@ -37,17 +35,17 @@ fn multi_alloc_defer_free(b: &mut Bencher) {
b.iter(|| {
scope(|s| {
for _ in 0..THREADS {
s.spawn(|| {
s.spawn(|_| {
for _ in 0..STEPS {
let guard = &epoch::pin();
let p = Owned::new(1).into_shared(guard);
unsafe {
guard.defer(move || p.into_owned());
guard.defer_destroy(p);
}
}
});
}
});
}).unwrap();
});
}
@ -59,15 +57,13 @@ fn multi_defer(b: &mut Bencher) {
b.iter(|| {
scope(|s| {
for _ in 0..THREADS {
s.spawn(|| {
s.spawn(|_| {
for _ in 0..STEPS {
let guard = &epoch::pin();
unsafe {
guard.defer(move || ());
}
guard.defer(move || ());
}
});
}
});
}).unwrap();
});
}

Просмотреть файл

@ -7,7 +7,7 @@ extern crate test;
use std::sync::Barrier;
use test::Bencher;
use utils::scoped::scope;
use utils::thread::scope;
#[bench]
fn single_flush(b: &mut Bencher) {
@ -18,7 +18,7 @@ fn single_flush(b: &mut Bencher) {
scope(|s| {
for _ in 0..THREADS {
s.spawn(|| {
s.spawn(|_| {
epoch::pin();
start.wait();
end.wait();
@ -28,7 +28,7 @@ fn single_flush(b: &mut Bencher) {
start.wait();
b.iter(|| epoch::pin().flush());
end.wait();
});
}).unwrap();
}
#[bench]
@ -39,13 +39,13 @@ fn multi_flush(b: &mut Bencher) {
b.iter(|| {
scope(|s| {
for _ in 0..THREADS {
s.spawn(|| {
s.spawn(|_| {
for _ in 0..STEPS {
let guard = &epoch::pin();
guard.flush();
}
});
}
});
}).unwrap();
});
}

Просмотреть файл

@ -5,18 +5,13 @@ extern crate crossbeam_utils as utils;
extern crate test;
use test::Bencher;
use utils::scoped::scope;
use utils::thread::scope;
#[bench]
fn single_pin(b: &mut Bencher) {
b.iter(|| epoch::pin());
}
#[bench]
fn single_default_handle_pin(b: &mut Bencher) {
b.iter(|| epoch::default_handle().pin());
}
#[bench]
fn multi_pin(b: &mut Bencher) {
const THREADS: usize = 16;
@ -25,12 +20,12 @@ fn multi_pin(b: &mut Bencher) {
b.iter(|| {
scope(|s| {
for _ in 0..THREADS {
s.spawn(|| {
s.spawn(|_| {
for _ in 0..STEPS {
epoch::pin();
}
});
}
});
}).unwrap();
});
}

Просмотреть файл

@ -1,16 +1,16 @@
extern crate crossbeam_epoch as epoch;
extern crate rand;
use std::sync::Arc;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed};
use std::time::{Duration, Instant};
use std::sync::Arc;
use std::thread;
use std::time::{Duration, Instant};
use epoch::{Atomic, Collector, Handle, Owned, Shared};
use epoch::{Atomic, Collector, LocalHandle, Owned, Shared};
use rand::Rng;
fn worker(a: Arc<Atomic<AtomicUsize>>, handle: Handle) -> usize {
fn worker(a: Arc<Atomic<AtomicUsize>>, handle: LocalHandle) -> usize {
let mut rng = rand::thread_rng();
let mut sum = 0;
@ -28,15 +28,13 @@ fn worker(a: Arc<Atomic<AtomicUsize>>, handle: Handle) -> usize {
let val = if rng.gen() {
let p = a.swap(Owned::new(AtomicUsize::new(sum)), AcqRel, guard);
unsafe {
guard.defer(move || p.into_owned());
guard.defer_destroy(p);
guard.flush();
p.deref().load(Relaxed)
}
} else {
let p = a.load(Acquire, guard);
unsafe {
p.deref().fetch_add(sum, Relaxed)
}
unsafe { p.deref().fetch_add(sum, Relaxed) }
};
sum = sum.wrapping_add(val);
@ -56,15 +54,15 @@ fn main() {
let a = a.clone();
let c = collector.clone();
thread::spawn(move || worker(a, c.register()))
})
.collect::<Vec<_>>();
}).collect::<Vec<_>>();
for t in threads {
t.join().unwrap();
}
unsafe {
a.swap(Shared::null(), AcqRel, epoch::unprotected()).into_owned();
a.swap(Shared::null(), AcqRel, epoch::unprotected())
.into_owned();
}
}
}

109
third_party/rust/crossbeam-epoch/examples/treiber_stack.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,109 @@
extern crate crossbeam_epoch as epoch;
extern crate crossbeam_utils as utils;
use std::mem::ManuallyDrop;
use std::ptr;
use std::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use epoch::{Atomic, Owned};
use utils::thread::scope;
/// Treiber's lock-free stack.
///
/// Usable with any number of producers and consumers.
#[derive(Debug)]
pub struct TreiberStack<T> {
head: Atomic<Node<T>>,
}
#[derive(Debug)]
struct Node<T> {
data: ManuallyDrop<T>,
next: Atomic<Node<T>>,
}
impl<T> TreiberStack<T> {
/// Creates a new, empty stack.
pub fn new() -> TreiberStack<T> {
TreiberStack {
head: Atomic::null(),
}
}
/// Pushes a value on top of the stack.
pub fn push(&self, t: T) {
let mut n = Owned::new(Node {
data: ManuallyDrop::new(t),
next: Atomic::null(),
});
let guard = epoch::pin();
loop {
let head = self.head.load(Relaxed, &guard);
n.next.store(head, Relaxed);
match self.head.compare_and_set(head, n, Release, &guard) {
Ok(_) => break,
Err(e) => n = e.new,
}
}
}
/// Attempts to pop the top element from the stack.
///
/// Returns `None` if the stack is empty.
pub fn pop(&self) -> Option<T> {
let guard = epoch::pin();
loop {
let head = self.head.load(Acquire, &guard);
match unsafe { head.as_ref() } {
Some(h) => {
let next = h.next.load(Relaxed, &guard);
if self
.head
.compare_and_set(head, next, Release, &guard)
.is_ok()
{
unsafe {
guard.defer_destroy(head);
return Some(ManuallyDrop::into_inner(ptr::read(&(*h).data)));
}
}
}
None => return None,
}
}
}
/// Returns `true` if the stack is empty.
pub fn is_empty(&self) -> bool {
let guard = epoch::pin();
self.head.load(Acquire, &guard).is_null()
}
}
impl<T> Drop for TreiberStack<T> {
fn drop(&mut self) {
while self.pop().is_some() {}
}
}
fn main() {
let stack = TreiberStack::new();
scope(|scope| {
for _ in 0..10 {
scope.spawn(|_| {
for i in 0..10_000 {
stack.push(i);
assert!(stack.pop().is_some());
}
});
}
}).unwrap();
assert!(stack.pop().is_none());
}

Просмотреть файл

@ -1,16 +1,15 @@
use alloc::boxed::Box;
use core::borrow::{Borrow, BorrowMut};
use core::cmp;
use core::fmt;
use core::marker::PhantomData;
use core::mem;
use core::ptr;
use core::ops::{Deref, DerefMut};
use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
use core::sync::atomic::Ordering;
use alloc::boxed::Box;
use core::ptr;
use core::sync::atomic::{AtomicUsize, Ordering};
use crossbeam_utils::atomic::AtomicConsume;
use guard::Guard;
use crossbeam_utils::consume::AtomicConsume;
/// Given ordering for the success case in a compare-exchange operation, returns the strongest
/// appropriate ordering for the failure case.
@ -154,7 +153,7 @@ impl<T> Atomic<T> {
#[cfg(not(feature = "nightly"))]
pub fn null() -> Atomic<T> {
Self {
data: ATOMIC_USIZE_INIT,
data: AtomicUsize::new(0),
_marker: PhantomData,
}
}
@ -171,7 +170,7 @@ impl<T> Atomic<T> {
#[cfg(feature = "nightly")]
pub const fn null() -> Atomic<T> {
Self {
data: ATOMIC_USIZE_INIT,
data: AtomicUsize::new(0),
_marker: PhantomData,
}
}
@ -936,6 +935,46 @@ impl<'g, T> Shared<'g, T> {
&*self.as_raw()
}
/// Dereferences the pointer.
///
/// Returns a mutable reference to the pointee that is valid during the lifetime `'g`.
///
/// # Safety
///
/// * There is no guarantee that there are no more threads attempting to read/write from/to the
/// actual object at the same time.
///
/// The user must know that there are no concurrent accesses towards the object itself.
///
/// * Other than the above, all safety concerns of `deref()` applies here.
///
/// # Examples
///
/// ```
/// use crossbeam_epoch::{self as epoch, Atomic};
/// use std::sync::atomic::Ordering::SeqCst;
///
/// let a = Atomic::new(vec![1, 2, 3, 4]);
/// let guard = &epoch::pin();
///
/// let mut p = a.load(SeqCst, guard);
/// unsafe {
/// assert!(!p.is_null());
/// let b = p.deref_mut();
/// assert_eq!(b, &vec![1, 2, 3, 4]);
/// b.push(5);
/// assert_eq!(b, &vec![1, 2, 3, 4, 5]);
/// }
///
/// let p = a.load(SeqCst, guard);
/// unsafe {
/// assert_eq!(p.deref(), &vec![1, 2, 3, 4, 5]);
/// }
/// ```
pub unsafe fn deref_mut(&mut self) -> &'g mut T {
&mut *(self.as_raw() as *mut T)
}
/// Converts the pointer to a reference.
///
/// Returns `None` if the pointer is null, or else a reference to the object wrapped in `Some`.

Просмотреть файл

@ -12,12 +12,11 @@
///
/// handle.pin().flush();
/// ```
use alloc::arc::Arc;
use alloc::sync::Arc;
use core::fmt;
use internal::{Global, Local};
use guard::Guard;
use internal::{Global, Local};
/// An epoch-based garbage collector.
pub struct Collector {
@ -30,11 +29,13 @@ unsafe impl Sync for Collector {}
impl Collector {
/// Creates a new collector.
pub fn new() -> Self {
Collector { global: Arc::new(Global::new()) }
Collector {
global: Arc::new(Global::new()),
}
}
/// Registers a new handle for the collector.
pub fn register(&self) -> Handle {
pub fn register(&self) -> LocalHandle {
Local::register(self)
}
}
@ -42,13 +43,15 @@ impl Collector {
impl Clone for Collector {
/// Creates another reference to the same garbage collector.
fn clone(&self) -> Self {
Collector { global: self.global.clone() }
Collector {
global: self.global.clone(),
}
}
}
impl fmt::Debug for Collector {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Collector").finish()
f.pad("Collector { .. }")
}
}
@ -61,11 +64,11 @@ impl PartialEq for Collector {
impl Eq for Collector {}
/// A handle to a garbage collector.
pub struct Handle {
pub struct LocalHandle {
pub(crate) local: *const Local,
}
impl Handle {
impl LocalHandle {
/// Pins the handle.
#[inline]
pub fn pin(&self) -> Guard {
@ -85,7 +88,7 @@ impl Handle {
}
}
impl Drop for Handle {
impl Drop for LocalHandle {
#[inline]
fn drop(&mut self) {
unsafe {
@ -94,29 +97,18 @@ impl Drop for Handle {
}
}
impl Clone for Handle {
#[inline]
fn clone(&self) -> Self {
unsafe {
Local::acquire_handle(&*self.local);
}
Handle { local: self.local }
}
}
impl fmt::Debug for Handle {
impl fmt::Debug for LocalHandle {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Handle").finish()
f.pad("LocalHandle { .. }")
}
}
#[cfg(test)]
mod tests {
use std::mem;
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::Ordering;
use std::sync::atomic::{AtomicUsize, Ordering};
use crossbeam_utils::scoped;
use crossbeam_utils::thread;
use {Collector, Owned};
@ -151,7 +143,7 @@ mod tests {
let guard = &handle.pin();
unsafe {
let a = Owned::new(7).into_shared(guard);
guard.defer(move || a.into_owned());
guard.defer_destroy(a);
assert!(!(*(*guard.local).bag.get()).is_empty());
@ -172,7 +164,7 @@ mod tests {
unsafe {
for _ in 0..10 {
let a = Owned::new(7).into_shared(guard);
guard.defer(move || a.into_owned());
guard.defer_destroy(a);
}
assert!(!(*(*guard.local).bag.get()).is_empty());
}
@ -182,9 +174,9 @@ mod tests {
fn pin_holds_advance() {
let collector = Collector::new();
scoped::scope(|scope| {
thread::scope(|scope| {
for _ in 0..NUM_THREADS {
scope.spawn(|| {
scope.spawn(|_| {
let handle = collector.register();
for _ in 0..500_000 {
let guard = &handle.pin();
@ -197,13 +189,13 @@ mod tests {
}
});
}
})
}).unwrap();
}
#[test]
fn incremental() {
const COUNT: usize = 100_000;
static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
static DESTROYS: AtomicUsize = AtomicUsize::new(0);
let collector = Collector::new();
let handle = collector.register();
@ -212,7 +204,7 @@ mod tests {
let guard = &handle.pin();
for _ in 0..COUNT {
let a = Owned::new(7i32).into_shared(guard);
guard.defer(move || {
guard.defer_unchecked(move || {
drop(a.into_owned());
DESTROYS.fetch_add(1, Ordering::Relaxed);
});
@ -236,7 +228,7 @@ mod tests {
#[test]
fn buffering() {
const COUNT: usize = 10;
static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
static DESTROYS: AtomicUsize = AtomicUsize::new(0);
let collector = Collector::new();
let handle = collector.register();
@ -245,7 +237,7 @@ mod tests {
let guard = &handle.pin();
for _ in 0..COUNT {
let a = Owned::new(7i32).into_shared(guard);
guard.defer(move || {
guard.defer_unchecked(move || {
drop(a.into_owned());
DESTROYS.fetch_add(1, Ordering::Relaxed);
});
@ -269,7 +261,7 @@ mod tests {
#[test]
fn count_drops() {
const COUNT: usize = 100_000;
static DROPS: AtomicUsize = ATOMIC_USIZE_INIT;
static DROPS: AtomicUsize = AtomicUsize::new(0);
struct Elem(i32);
@ -287,7 +279,7 @@ mod tests {
for _ in 0..COUNT {
let a = Owned::new(Elem(7i32)).into_shared(guard);
guard.defer(move || a.into_owned());
guard.defer_destroy(a);
}
guard.flush();
}
@ -302,7 +294,7 @@ mod tests {
#[test]
fn count_destroy() {
const COUNT: usize = 100_000;
static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
static DESTROYS: AtomicUsize = AtomicUsize::new(0);
let collector = Collector::new();
let handle = collector.register();
@ -312,7 +304,7 @@ mod tests {
for _ in 0..COUNT {
let a = Owned::new(7i32).into_shared(guard);
guard.defer(move || {
guard.defer_unchecked(move || {
drop(a.into_owned());
DESTROYS.fetch_add(1, Ordering::Relaxed);
});
@ -330,7 +322,7 @@ mod tests {
#[test]
fn drop_array() {
const COUNT: usize = 700;
static DROPS: AtomicUsize = ATOMIC_USIZE_INIT;
static DROPS: AtomicUsize = AtomicUsize::new(0);
struct Elem(i32);
@ -352,7 +344,9 @@ mod tests {
{
let a = Owned::new(v).into_shared(&guard);
unsafe { guard.defer(move || a.into_owned()); }
unsafe {
guard.defer_destroy(a);
}
guard.flush();
}
@ -366,7 +360,7 @@ mod tests {
#[test]
fn destroy_array() {
const COUNT: usize = 100_000;
static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
static DESTROYS: AtomicUsize = AtomicUsize::new(0);
let collector = Collector::new();
let handle = collector.register();
@ -381,7 +375,7 @@ mod tests {
let ptr = v.as_mut_ptr() as usize;
let len = v.len();
guard.defer(move || {
guard.defer_unchecked(move || {
drop(Vec::from_raw_parts(ptr as *const u8 as *mut u8, len, len));
DESTROYS.fetch_add(len, Ordering::Relaxed);
});
@ -401,7 +395,7 @@ mod tests {
fn stress() {
const THREADS: usize = 8;
const COUNT: usize = 100_000;
static DROPS: AtomicUsize = ATOMIC_USIZE_INIT;
static DROPS: AtomicUsize = AtomicUsize::new(0);
struct Elem(i32);
@ -413,20 +407,20 @@ mod tests {
let collector = Collector::new();
scoped::scope(|scope| {
thread::scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|| {
scope.spawn(|_| {
let handle = collector.register();
for _ in 0..COUNT {
let guard = &handle.pin();
unsafe {
let a = Owned::new(Elem(7i32)).into_shared(guard);
guard.defer(move || a.into_owned());
guard.defer_destroy(a);
}
}
});
}
});
}).unwrap();
let handle = collector.register();
while DROPS.load(Ordering::Relaxed) < COUNT * THREADS {

Просмотреть файл

@ -4,7 +4,7 @@
//! is registered in the default collector. If initialized, the thread's participant will get
//! destructed on thread exit, which in turn unregisters the thread.
use collector::{Collector, Handle};
use collector::{Collector, LocalHandle};
use guard::Guard;
lazy_static! {
@ -14,33 +14,62 @@ lazy_static! {
thread_local! {
/// The per-thread participant for the default garbage collector.
static HANDLE: Handle = COLLECTOR.register();
static HANDLE: LocalHandle = COLLECTOR.register();
}
/// Pins the current thread.
#[inline]
pub fn pin() -> Guard {
// FIXME(jeehoonkang): thread-local storage may be destructed at the time `pin()` is called. For
// that case, we should use `HANDLE.try_with()` instead.
HANDLE.with(|handle| handle.pin())
with_handle(|handle| handle.pin())
}
/// Returns `true` if the current thread is pinned.
#[inline]
pub fn is_pinned() -> bool {
// FIXME(jeehoonkang): thread-local storage may be destructed at the time `pin()` is called. For
// that case, we should use `HANDLE.try_with()` instead.
HANDLE.with(|handle| handle.is_pinned())
with_handle(|handle| handle.is_pinned())
}
/// Returns the default handle associated with the current thread.
#[inline]
pub fn default_handle() -> Handle {
HANDLE.with(|handle| handle.clone())
}
/// Returns the default handle associated with the current thread.
#[inline]
/// Returns the default global collector.
pub fn default_collector() -> &'static Collector {
&COLLECTOR
}
#[inline]
fn with_handle<F, R>(mut f: F) -> R
where
F: FnMut(&LocalHandle) -> R,
{
HANDLE
.try_with(|h| f(h))
.unwrap_or_else(|_| f(&COLLECTOR.register()))
}
#[cfg(test)]
mod tests {
use crossbeam_utils::thread;
#[test]
fn pin_while_exiting() {
struct Foo;
impl Drop for Foo {
fn drop(&mut self) {
// Pin after `HANDLE` has been dropped. This must not panic.
super::pin();
}
}
thread_local! {
static FOO: Foo = Foo;
}
thread::scope(|scope| {
scope.spawn(|_| {
// Initialize `FOO` and then `HANDLE`.
FOO.with(|_| ());
super::pin();
// At thread exit, `HANDLE` gets dropped first and `FOO` second.
});
}).unwrap();
}
}

Просмотреть файл

@ -1,8 +1,8 @@
use alloc::boxed::Box;
use core::fmt;
use core::marker::PhantomData;
use core::mem;
use core::ptr;
use alloc::boxed::Box;
/// Number of words a piece of `Data` can hold.
///
@ -24,7 +24,7 @@ pub struct Deferred {
impl fmt::Debug for Deferred {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
write!(f, "Deferred {{ ... }}")
f.pad("Deferred { .. }")
}
}
@ -78,8 +78,8 @@ impl Deferred {
#[cfg(test)]
mod tests {
use std::cell::Cell;
use super::Deferred;
use std::cell::Cell;
#[test]
fn on_stack() {

16
third_party/rust/crossbeam-epoch/src/epoch.rs поставляемый
Просмотреть файл

@ -46,13 +46,17 @@ impl Epoch {
/// Returns the same epoch, but marked as pinned.
#[inline]
pub fn pinned(self) -> Epoch {
Epoch { data: self.data | 1 }
Epoch {
data: self.data | 1,
}
}
/// Returns the same epoch, but marked as unpinned.
#[inline]
pub fn unpinned(self) -> Epoch {
Epoch { data: self.data & !1 }
Epoch {
data: self.data & !1,
}
}
/// Returns the successor epoch.
@ -60,7 +64,9 @@ impl Epoch {
/// The returned epoch will be marked as pinned only if the previous one was as well.
#[inline]
pub fn successor(self) -> Epoch {
Epoch { data: self.data.wrapping_add(2) }
Epoch {
data: self.data.wrapping_add(2),
}
}
}
@ -83,7 +89,9 @@ impl AtomicEpoch {
/// Loads a value from the atomic epoch.
#[inline]
pub fn load(&self, ord: Ordering) -> Epoch {
Epoch { data: self.data.load(ord) }
Epoch {
data: self.data.load(ord),
}
}
/// Stores a value into the atomic epoch.

147
third_party/rust/crossbeam-epoch/src/guard.rs поставляемый
Просмотреть файл

@ -1,10 +1,10 @@
use core::fmt;
use core::ptr;
use core::mem;
use atomic::Shared;
use collector::Collector;
use deferred::Deferred;
use internal::Local;
use collector::Collector;
/// A guard that keeps the current thread pinned.
///
@ -64,15 +64,6 @@ use collector::Collector;
/// assert!(!epoch::is_pinned());
/// ```
///
/// The same can be achieved by cloning guards:
///
/// ```
/// use crossbeam_epoch as epoch;
///
/// let guard1 = epoch::pin();
/// let guard2 = guard1.clone();
/// ```
///
/// [`pin`]: fn.pin.html
pub struct Guard {
pub(crate) local: *const Local,
@ -87,9 +78,37 @@ impl Guard {
/// functions from both local and global caches may get executed in order to incrementally
/// clean up the caches as they fill up.
///
/// There is no guarantee when exactly `f` will be executed. The only guarantee is that won't
/// until all currently pinned threads get unpinned. In theory, `f` might never be deallocated,
/// but the epoch-based garbage collection will make an effort to execute it reasonably soon.
/// There is no guarantee when exactly `f` will be executed. The only guarantee is that it
/// won't be executed until all currently pinned threads get unpinned. In theory, `f` might
/// never run, but the epoch-based garbage collection will make an effort to execute it
/// reasonably soon.
///
/// If this method is called from an [`unprotected`] guard, the function will simply be
/// executed immediately.
///
/// [`unprotected`]: fn.unprotected.html
pub fn defer<F, R>(&self, f: F)
where
F: FnOnce() -> R,
F: Send + 'static,
{
unsafe {
self.defer_unchecked(f);
}
}
/// Stores a function so that it can be executed at some point after all currently pinned
/// threads get unpinned.
///
/// This method first stores `f` into the thread-local (or handle-local) cache. If this cache
/// becomes full, some functions are moved into the global cache. At the same time, some
/// functions from both local and global caches may get executed in order to incrementally
/// clean up the caches as they fill up.
///
/// There is no guarantee when exactly `f` will be executed. The only guarantee is that it
/// won't be executed until all currently pinned threads get unpinned. In theory, `f` might
/// never run, but the epoch-based garbage collection will make an effort to execute it
/// reasonably soon.
///
/// If this method is called from an [`unprotected`] guard, the function will simply be
/// executed immediately.
@ -106,8 +125,8 @@ impl Guard {
/// let guard = &epoch::pin();
/// let message = "Hello!";
/// unsafe {
/// // ALWAYS use `move` when sending a closure into `defef`.
/// guard.defer(move || {
/// // ALWAYS use `move` when sending a closure into `defer_unchecked`.
/// guard.defer_unchecked(move || {
/// println!("{}", message);
/// });
/// }
@ -122,7 +141,7 @@ impl Guard {
///
/// ```ignore
/// let shared = Owned::new(7i32).into_shared(guard);
/// guard.defer(Deferred::new(move || shared.into_owned())); // `Shared` is not `Send`!
/// guard.defer_unchecked(move || shared.into_owned()); // `Shared` is not `Send`!
/// ```
///
/// While `Shared` is not `Send`, it's safe for another thread to call the deferred function,
@ -137,7 +156,7 @@ impl Guard {
/// get dropped. This method can defer deallocation until all those threads get unpinned and
/// consequently drop all their references on the stack.
///
/// ```rust
/// ```
/// use crossbeam_epoch::{self as epoch, Atomic, Owned};
/// use std::sync::atomic::Ordering::SeqCst;
///
@ -156,8 +175,8 @@ impl Guard {
/// // The object `p` is pointing to is now unreachable.
/// // Defer its deallocation until all currently pinned threads get unpinned.
/// unsafe {
/// // ALWAYS use `move` when sending a closure into `defer`.
/// guard.defer(move || {
/// // ALWAYS use `move` when sending a closure into `defer_unchecked`.
/// guard.defer_unchecked(move || {
/// println!("{} is now being deallocated.", p.deref());
/// // Now we have unique access to the object pointed to by `p` and can turn it
/// // into an `Owned`. Dropping the `Owned` will deallocate the object.
@ -168,7 +187,7 @@ impl Guard {
/// ```
///
/// [`unprotected`]: fn.unprotected.html
pub unsafe fn defer<F, R>(&self, f: F)
pub unsafe fn defer_unchecked<F, R>(&self, f: F)
where
F: FnOnce() -> R,
{
@ -177,6 +196,80 @@ impl Guard {
}
}
/// Stores a destructor for an object so that it can be deallocated and dropped at some point
/// after all currently pinned threads get unpinned.
///
/// This method first stores the destructor into the thread-local (or handle-local) cache. If
/// this cache becomes full, some destructors are moved into the global cache. At the same
/// time, some destructors from both local and global caches may get executed in order to
/// incrementally clean up the caches as they fill up.
///
/// There is no guarantee when exactly the destructor will be executed. The only guarantee is
/// that it won't be executed until all currently pinned threads get unpinned. In theory, the
/// destructor might never run, but the epoch-based garbage collection will make an effort to
/// execute it reasonably soon.
///
/// If this method is called from an [`unprotected`] guard, the destructor will simply be
/// executed immediately.
///
/// # Safety
///
/// The object must not be reachable by other threads anymore, otherwise it might be still in
/// use when the destructor runs.
///
/// Apart from that, keep in mind that another thread may execute the destructor, so the object
/// must be sendable to other threads.
///
/// We intentionally didn't require `T: Send`, because Rust's type systems usually cannot prove
/// `T: Send` for typical use cases. For example, consider the following code snippet, which
/// exemplifies the typical use case of deferring the deallocation of a shared reference:
///
/// ```ignore
/// let shared = Owned::new(7i32).into_shared(guard);
/// guard.defer_destroy(shared); // `Shared` is not `Send`!
/// ```
///
/// While `Shared` is not `Send`, it's safe for another thread to call the destructor, because
/// it's called only after the grace period and `shared` is no longer shared with other
/// threads. But we don't expect type systems to prove this.
///
/// # Examples
///
/// When a heap-allocated object in a data structure becomes unreachable, it has to be
/// deallocated. However, the current thread and other threads may be still holding references
/// on the stack to that same object. Therefore it cannot be deallocated before those references
/// get dropped. This method can defer deallocation until all those threads get unpinned and
/// consequently drop all their references on the stack.
///
/// ```
/// use crossbeam_epoch::{self as epoch, Atomic, Owned};
/// use std::sync::atomic::Ordering::SeqCst;
///
/// let a = Atomic::new("foo");
///
/// // Now suppose that `a` is shared among multiple threads and concurrently
/// // accessed and modified...
///
/// // Pin the current thread.
/// let guard = &epoch::pin();
///
/// // Steal the object currently stored in `a` and swap it with another one.
/// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard);
///
/// if !p.is_null() {
/// // The object `p` is pointing to is now unreachable.
/// // Defer its deallocation until all currently pinned threads get unpinned.
/// unsafe {
/// guard.defer_destroy(p);
/// }
/// }
/// ```
///
/// [`unprotected`]: fn.unprotected.html
pub unsafe fn defer_destroy<T>(&self, ptr: Shared<T>) {
self.defer_unchecked(move || ptr.into_owned());
}
/// Clears up the thread-local cache of deferred functions by executing them or moving into the
/// global cache.
///
@ -330,19 +423,9 @@ impl Drop for Guard {
}
}
impl Clone for Guard {
#[inline]
fn clone(&self) -> Guard {
match unsafe { self.local.as_ref() } {
None => Guard { local: ptr::null() },
Some(local) => local.pin(),
}
}
}
impl fmt::Debug for Guard {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Guard").finish()
f.pad("Guard { .. }")
}
}

Просмотреть файл

@ -35,23 +35,23 @@
//! Ideally each instance of concurrent data structure may have its own queue that gets fully
//! destroyed as soon as the data structure gets dropped.
use alloc::boxed::Box;
use core::cell::{Cell, UnsafeCell};
use core::mem::{self, ManuallyDrop};
use core::num::Wrapping;
use core::ptr;
use core::sync::atomic;
use core::sync::atomic::Ordering;
use alloc::boxed::Box;
use crossbeam_utils::cache_padded::CachePadded;
use arrayvec::ArrayVec;
use crossbeam_utils::CachePadded;
use atomic::Owned;
use collector::{Handle, Collector};
use collector::{Collector, LocalHandle};
use deferred::Deferred;
use epoch::{AtomicEpoch, Epoch};
use guard::{unprotected, Guard};
use deferred::Deferred;
use sync::list::{List, Entry, IterError, IsElement};
use sync::list::{Entry, IsElement, IterError, List};
use sync::queue::Queue;
/// Maximum number of objects a bag can contain.
@ -184,8 +184,7 @@ impl Global {
match self.queue.try_pop_if(
&|sealed_bag: &SealedBag| sealed_bag.is_expired(global_epoch),
guard,
)
{
) {
None => break,
Some(sealed_bag) => drop(sealed_bag),
}
@ -276,7 +275,7 @@ impl Local {
const PINNINGS_BETWEEN_COLLECT: usize = 128;
/// Registers a new `Local` in the provided `Global`.
pub fn register(collector: &Collector) -> Handle {
pub fn register(collector: &Collector) -> LocalHandle {
unsafe {
// Since we dereference no pointers in this block, it is safe to use `unprotected`.
@ -290,7 +289,9 @@ impl Local {
pin_count: Cell::new(Wrapping(0)),
}).into_shared(&unprotected());
collector.global.locals.insert(local, &unprotected());
Handle { local: local.as_raw() }
LocalHandle {
local: local.as_raw(),
}
}
}
@ -360,10 +361,20 @@ impl Local {
// instruction.
//
// Both instructions have the effect of a full barrier, but benchmarks have shown
// that the second one makes pinning faster in this particular case.
// that the second one makes pinning faster in this particular case. It is not
// clear that this is permitted by the C++ memory model (SC fences work very
// differently from SC accesses), but experimental evidence suggests that this
// works fine. Using inline assembly would be a viable (and correct) alternative,
// but alas, that is not possible on stable Rust.
let current = Epoch::starting();
let previous = self.epoch.compare_and_swap(current, new_epoch, Ordering::SeqCst);
let previous = self
.epoch
.compare_and_swap(current, new_epoch, Ordering::SeqCst);
debug_assert_eq!(current, previous, "participant was expected to be unpinned");
// We add a compiler fence to make it less likely for LLVM to do something wrong
// here. Formally, this is not enough to get rid of data races; practically,
// it should go a long way.
atomic::compiler_fence(Ordering::SeqCst);
} else {
self.epoch.store(new_epoch, Ordering::Relaxed);
atomic::fence(Ordering::SeqCst);
@ -406,7 +417,7 @@ impl Local {
// Update the local epoch only if there's only one guard.
if guard_count == 1 {
let epoch = self.epoch.load(Ordering::Relaxed);
let global_epoch = self.global().epoch.load(Ordering::Relaxed);
let global_epoch = self.global().epoch.load(Ordering::Relaxed).pinned();
// Update the local epoch only if the global epoch is greater than the local epoch.
if epoch != global_epoch {
@ -498,14 +509,13 @@ impl IsElement<Local> for Local {
#[cfg(test)]
mod tests {
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
use std::sync::atomic::Ordering;
use std::sync::atomic::{AtomicUsize, Ordering};
use super::*;
#[test]
fn check_defer() {
static FLAG: AtomicUsize = ATOMIC_USIZE_INIT;
static FLAG: AtomicUsize = AtomicUsize::new(0);
fn set() {
FLAG.store(42, Ordering::Relaxed);
}
@ -518,7 +528,7 @@ mod tests {
#[test]
fn check_bag() {
static FLAG: AtomicUsize = ATOMIC_USIZE_INIT;
static FLAG: AtomicUsize = AtomicUsize::new(0);
fn incr() {
FLAG.fetch_add(1, Ordering::Relaxed);
}

54
third_party/rust/crossbeam-epoch/src/lib.rs поставляемый
Просмотреть файл

@ -54,34 +54,30 @@
//! [`pin`]: fn.pin.html
//! [`defer`]: fn.defer.html
#![cfg_attr(feature = "nightly", feature(const_fn))]
#![warn(missing_docs)]
#![warn(missing_debug_implementations)]
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(feature = "nightly", feature(alloc))]
#![cfg_attr(not(test), no_std)]
#![cfg_attr(feature = "nightly", feature(const_fn))]
#![warn(missing_docs, missing_debug_implementations)]
#[cfg(test)]
extern crate core;
#[cfg(all(not(test), feature = "use_std"))]
#[macro_use]
extern crate std;
extern crate cfg_if;
#[cfg(feature = "std")]
extern crate core;
// Use liballoc on nightly to avoid a dependency on libstd
#[cfg(feature = "nightly")]
extern crate alloc;
#[cfg(not(feature = "nightly"))]
mod alloc {
// Tweak the module layout to match the one in liballoc
extern crate std;
pub use self::std::boxed;
pub use self::std::sync as arc;
cfg_if! {
if #[cfg(feature = "nightly")] {
extern crate alloc;
} else {
mod alloc {
extern crate std;
pub use self::std::*;
}
}
}
extern crate arrayvec;
extern crate crossbeam_utils;
#[cfg(feature = "use_std")]
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate memoffset;
#[macro_use]
@ -89,16 +85,22 @@ extern crate scopeguard;
mod atomic;
mod collector;
#[cfg(feature = "use_std")]
mod default;
mod deferred;
mod epoch;
mod guard;
mod internal;
mod sync;
pub use self::atomic::{Atomic, CompareAndSetError, CompareAndSetOrdering, Owned, Shared, Pointer};
pub use self::atomic::{Atomic, CompareAndSetError, CompareAndSetOrdering, Owned, Pointer, Shared};
pub use self::collector::{Collector, LocalHandle};
pub use self::guard::{unprotected, Guard};
#[cfg(feature = "use_std")]
pub use self::default::{default_collector, default_handle, is_pinned, pin};
pub use self::collector::{Collector, Handle};
cfg_if! {
if #[cfg(feature = "std")] {
#[macro_use]
extern crate lazy_static;
mod default;
pub use self::default::{default_collector, is_pinned, pin};
}
}

Просмотреть файл

@ -6,7 +6,7 @@
use core::marker::PhantomData;
use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use {Atomic, Shared, Guard, unprotected};
use {unprotected, Atomic, Guard, Shared};
/// An entry in a linked list.
///
@ -131,7 +131,9 @@ pub enum IterError {
impl Default for Entry {
/// Returns the empty entry.
fn default() -> Self {
Self { next: Atomic::null() }
Self {
next: Atomic::null(),
}
}
}
@ -245,19 +247,17 @@ impl<'g, T: 'g, C: IsElement<T>> Iterator for Iter<'g, T, C> {
// node leaves the list in an invalid state.
debug_assert!(self.curr.tag() == 0);
match self.pred.compare_and_set(
self.curr,
succ,
Acquire,
self.guard,
) {
match self
.pred
.compare_and_set(self.curr, succ, Acquire, self.guard)
{
Ok(_) => {
// We succeeded in unlinking this element from the list, so we have to
// schedule deallocation. Deferred drop is okay, because `list.delete()`
// can only be called if `T: 'static`.
unsafe {
let p = self.curr;
self.guard.defer(move || C::finalize(p.deref()));
self.guard.defer_unchecked(move || C::finalize(p.deref()));
}
// Move over the removed by only advancing `curr`, not `pred`.
@ -289,10 +289,10 @@ impl<'g, T: 'g, C: IsElement<T>> Iterator for Iter<'g, T, C> {
#[cfg(test)]
mod tests {
use {Collector, Owned};
use crossbeam_utils::scoped;
use std::sync::Barrier;
use super::*;
use crossbeam_utils::thread;
use std::sync::Barrier;
use {Collector, Owned};
impl IsElement<Entry> for Entry {
fn entry_of(entry: &Entry) -> &Entry {
@ -396,29 +396,31 @@ mod tests {
let l: List<Entry> = List::new();
let b = Barrier::new(THREADS);
scoped::scope(|s| for _ in 0..THREADS {
s.spawn(|| {
b.wait();
thread::scope(|s| {
for _ in 0..THREADS {
s.spawn(|_| {
b.wait();
let handle = collector.register();
let guard: Guard = handle.pin();
let mut v = Vec::with_capacity(ITERS);
let handle = collector.register();
let guard: Guard = handle.pin();
let mut v = Vec::with_capacity(ITERS);
for _ in 0..ITERS {
let e = Owned::new(Entry::default()).into_shared(&guard);
v.push(e);
unsafe {
l.insert(e, &guard);
for _ in 0..ITERS {
let e = Owned::new(Entry::default()).into_shared(&guard);
v.push(e);
unsafe {
l.insert(e, &guard);
}
}
}
for e in v {
unsafe {
e.as_ref().unwrap().delete(&guard);
for e in v {
unsafe {
e.as_ref().unwrap().delete(&guard);
}
}
}
});
});
});
}
}).unwrap();
let handle = collector.register();
let guard = handle.pin();
@ -435,34 +437,36 @@ mod tests {
let l: List<Entry> = List::new();
let b = Barrier::new(THREADS);
scoped::scope(|s| for _ in 0..THREADS {
s.spawn(|| {
b.wait();
thread::scope(|s| {
for _ in 0..THREADS {
s.spawn(|_| {
b.wait();
let handle = collector.register();
let guard: Guard = handle.pin();
let mut v = Vec::with_capacity(ITERS);
let handle = collector.register();
let guard: Guard = handle.pin();
let mut v = Vec::with_capacity(ITERS);
for _ in 0..ITERS {
let e = Owned::new(Entry::default()).into_shared(&guard);
v.push(e);
unsafe {
l.insert(e, &guard);
for _ in 0..ITERS {
let e = Owned::new(Entry::default()).into_shared(&guard);
v.push(e);
unsafe {
l.insert(e, &guard);
}
}
}
let mut iter = l.iter(&guard);
for _ in 0..ITERS {
assert!(iter.next().is_some());
}
for e in v {
unsafe {
e.as_ref().unwrap().delete(&guard);
let mut iter = l.iter(&guard);
for _ in 0..ITERS {
assert!(iter.next().is_some());
}
}
});
});
for e in v {
unsafe {
e.as_ref().unwrap().delete(&guard);
}
}
});
}
}).unwrap();
let handle = collector.register();
let guard = handle.pin();

Просмотреть файл

@ -9,7 +9,7 @@ use core::mem::{self, ManuallyDrop};
use core::ptr;
use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
use crossbeam_utils::cache_padded::CachePadded;
use crossbeam_utils::CachePadded;
use {unprotected, Atomic, Guard, Owned, Shared};
@ -39,7 +39,6 @@ struct Node<T> {
unsafe impl<T: Send> Sync for Queue<T> {}
unsafe impl<T: Send> Send for Queue<T> {}
impl<T> Queue<T> {
/// Create a new, empty queue.
pub fn new() -> Queue<T> {
@ -73,7 +72,8 @@ impl<T> Queue<T> {
false
} else {
// looks like the actual tail; attempt to link in `n`
let result = o.next
let result = o
.next
.compare_and_set(Shared::null(), new, Release, guard)
.is_ok();
if result {
@ -114,10 +114,9 @@ impl<T> Queue<T> {
self.head
.compare_and_set(head, next, Release, guard)
.map(|_| {
guard.defer(move || drop(head.into_owned()));
guard.defer_destroy(head);
Some(ManuallyDrop::into_inner(ptr::read(&n.data)))
})
.map_err(|_| ())
}).map_err(|_| ())
},
None => Ok(None),
}
@ -139,10 +138,9 @@ impl<T> Queue<T> {
self.head
.compare_and_set(head, next, Release, guard)
.map(|_| {
guard.defer(move || drop(head.into_owned()));
guard.defer_destroy(head);
Some(ManuallyDrop::into_inner(ptr::read(&n.data)))
})
.map_err(|_| ())
}).map_err(|_| ())
},
None | Some(_) => Ok(None),
}
@ -190,11 +188,10 @@ impl<T> Drop for Queue<T> {
}
}
#[cfg(test)]
mod test {
use super::*;
use crossbeam_utils::scoped;
use crossbeam_utils::thread;
use pin;
struct Queue<T> {
@ -203,7 +200,9 @@ mod test {
impl<T> Queue<T> {
pub fn new() -> Queue<T> {
Queue { queue: super::Queue::new() }
Queue {
queue: super::Queue::new(),
}
}
pub fn push(&self, t: T) {
@ -309,8 +308,8 @@ mod test {
let q: Queue<i64> = Queue::new();
assert!(q.is_empty());
scoped::scope(|scope| {
scope.spawn(|| {
thread::scope(|scope| {
scope.spawn(|_| {
let mut next = 0;
while next < CONC_COUNT {
@ -324,7 +323,7 @@ mod test {
for i in 0..CONC_COUNT {
q.push(i)
}
});
}).unwrap();
}
#[test]
@ -345,16 +344,18 @@ mod test {
let q: Queue<i64> = Queue::new();
assert!(q.is_empty());
let qr = &q;
scoped::scope(|scope| {
thread::scope(|scope| {
for i in 0..3 {
scope.spawn(move || recv(i, qr));
let q = &q;
scope.spawn(move |_| recv(i, q));
}
scope.spawn(|| for i in 0..CONC_COUNT {
q.push(i);
})
});
scope.spawn(|_| {
for i in 0..CONC_COUNT {
q.push(i);
}
});
}).unwrap();
}
#[test]
@ -367,41 +368,47 @@ mod test {
let q: Queue<LR> = Queue::new();
assert!(q.is_empty());
scoped::scope(|scope| for _t in 0..2 {
scope.spawn(|| for i in CONC_COUNT - 1..CONC_COUNT {
q.push(LR::Left(i))
});
scope.spawn(|| for i in CONC_COUNT - 1..CONC_COUNT {
q.push(LR::Right(i))
});
scope.spawn(|| {
let mut vl = vec![];
let mut vr = vec![];
for _i in 0..CONC_COUNT {
match q.try_pop() {
Some(LR::Left(x)) => vl.push(x),
Some(LR::Right(x)) => vr.push(x),
_ => {}
thread::scope(|scope| {
for _t in 0..2 {
scope.spawn(|_| {
for i in CONC_COUNT - 1..CONC_COUNT {
q.push(LR::Left(i))
}
});
scope.spawn(|_| {
for i in CONC_COUNT - 1..CONC_COUNT {
q.push(LR::Right(i))
}
});
scope.spawn(|_| {
let mut vl = vec![];
let mut vr = vec![];
for _i in 0..CONC_COUNT {
match q.try_pop() {
Some(LR::Left(x)) => vl.push(x),
Some(LR::Right(x)) => vr.push(x),
_ => {}
}
}
}
let mut vl2 = vl.clone();
let mut vr2 = vr.clone();
vl2.sort();
vr2.sort();
let mut vl2 = vl.clone();
let mut vr2 = vr.clone();
vl2.sort();
vr2.sort();
assert_eq!(vl, vl2);
assert_eq!(vr, vr2);
});
});
assert_eq!(vl, vl2);
assert_eq!(vr, vr2);
});
}
}).unwrap();
}
#[test]
fn push_pop_many_spsc() {
let q: Queue<i64> = Queue::new();
scoped::scope(|scope| {
scope.spawn(|| {
thread::scope(|scope| {
scope.spawn(|_| {
let mut next = 0;
while next < CONC_COUNT {
assert_eq!(q.pop(), next);
@ -412,7 +419,7 @@ mod test {
for i in 0..CONC_COUNT {
q.push(i)
}
});
}).unwrap();
assert!(q.is_empty());
}

Просмотреть файл

@ -1 +0,0 @@
{"files":{"CHANGELOG.md":"6b764c44d2f0ddb3a10101f738673685992bbd894152c0fc354d571f5115f85a","Cargo.toml":"48f3a37f7267b76120aa309e4e2d4e13df6e2994b5b2b402177640957dbcb18b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"ef6edf8bcb3491d8453ca36008f9e3fa0895bb6c17db47b38867784ed7717983","src/cache_padded.rs":"47a99e571bf5c213395585ff001c7abd10388609f349a2e776d481e2ed0b32cb","src/consume.rs":"422c6006dca162a80d39f1abcf1fe26dae6d69772111b3e8824c7f9b335c3ec2","src/lib.rs":"81273b19bd30f6f20084ff01af1acedadcf9ac88db89137d59cb7ee24c226588","src/scoped.rs":"1b7eaaf1fd6033875e4e368e4318a93430bedeb6f68a11c10221ace0243cd83b"},"package":"d636a8b3bcc1b409d7ffd3facef8f21dcb4009626adbd0c5e6c4305c07253c7b"}

Просмотреть файл

@ -1,59 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
## [Unreleased]
## [0.3.2] - 2018-03-12
### Fixed
- Mark `load_consume` with `#[inline]`.
## [0.3.1] - 2018-03-12
### Fixed
- `load_consume` on ARM and AArch64.
## [0.3.0] - 2018-03-11
### Added
- `join` for scoped thread API.
- `load_consume` for atomic load-consume memory ordering.
### Removed
- `AtomicOption`.
## [0.2.2] - 2018-01-14
### Added
- Support for Rust 1.12.1.
### Fixed
- Call `T::clone` when cloning a `CachePadded<T>`.
## [0.2.1] - 2017-11-26
### Added
- Add `use_std` feature.
## [0.2.0] - 2017-11-17
### Added
- Add `nightly` feature.
- Use `repr(align(64))` on `CachePadded` with the `nightly` feature.
- Implement `Drop` for `CachePadded<T>`.
- Implement `Clone` for `CachePadded<T>`.
- Implement `From<T>` for `CachePadded<T>`.
- Implement better `Debug` for `CachePadded<T>`.
- Write more tests.
- Add this changelog.
### Changed
- Change cache line length to 64 bytes.
### Removed
- Remove `ZerosValid`.
## 0.1.0 - 2017-08-27
### Added
- Old implementation of `CachePadded` from `crossbeam` version 0.3.0
[Unreleased]: https://github.com/crossbeam-rs/crossbeam-utils/compare/v0.2.1...HEAD
[0.2.1]: https://github.com/crossbeam-rs/crossbeam-utils/compare/v0.2.0...v0.2.1
[0.2.0]: https://github.com/crossbeam-rs/crossbeam-utils/compare/v0.1.0...v0.2.0

Просмотреть файл

@ -1,29 +0,0 @@
# Utilities for concurrent programming
[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam-utils.svg?branch=master)](https://travis-ci.org/crossbeam-rs/crossbeam-utils)
[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/crossbeam-rs/crossbeam-utils)
[![Cargo](https://img.shields.io/crates/v/crossbeam-utils.svg)](https://crates.io/crates/crossbeam-utils)
[![Documentation](https://docs.rs/crossbeam-utils/badge.svg)](https://docs.rs/crossbeam-utils)
This crate provides utilities for concurrent programming.
## Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
crossbeam-utils = "0.2"
```
Next, add this to your crate:
```rust
extern crate crossbeam_utils;
```
## License
Licensed under the terms of MIT license and the Apache License (Version 2.0).
See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details.

Просмотреть файл

@ -1,290 +0,0 @@
use core::fmt;
use core::mem;
use core::ops::{Deref, DerefMut};
use core::ptr;
cfg_if! {
if #[cfg(feature = "nightly")] {
// This trick allows use to support rustc 1.12.1, which does not support the
// #[repr(align(n))] syntax. Using the attribute makes the parser fail over.
// It is, however, okay to use it within a macro, since it would be parsed
// in a later stage, but that never occurs due to the cfg_if.
// TODO(Vtec234): remove this crap when we drop support for 1.12.
macro_rules! nightly_inner {
() => (
#[derive(Clone)]
#[repr(align(64))]
pub(crate) struct Inner<T> {
value: T,
}
)
}
nightly_inner!();
impl<T> Inner<T> {
pub(crate) fn new(t: T) -> Inner<T> {
Self {
value: t
}
}
}
impl<T> Deref for Inner<T> {
type Target = T;
fn deref(&self) -> &T {
&self.value
}
}
impl<T> DerefMut for Inner<T> {
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}
} else {
use core::marker::PhantomData;
struct Inner<T> {
bytes: [u8; 64],
/// `[T; 0]` ensures alignment is at least that of `T`.
/// `PhantomData<T>` signals that `CachePadded<T>` contains a `T`.
_marker: ([T; 0], PhantomData<T>),
}
impl<T> Inner<T> {
fn new(t: T) -> Inner<T> {
assert!(mem::size_of::<T>() <= mem::size_of::<Self>());
assert!(mem::align_of::<T>() <= mem::align_of::<Self>());
unsafe {
let mut inner: Self = mem::uninitialized();
let p: *mut T = &mut *inner;
ptr::write(p, t);
inner
}
}
}
impl<T> Deref for Inner<T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*(self.bytes.as_ptr() as *const T) }
}
}
impl<T> DerefMut for Inner<T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *(self.bytes.as_ptr() as *mut T) }
}
}
impl<T> Drop for CachePadded<T> {
fn drop(&mut self) {
let p: *mut T = self.deref_mut();
unsafe {
ptr::drop_in_place(p);
}
}
}
impl<T: Clone> Clone for Inner<T> {
fn clone(&self) -> Inner<T> {
let val = self.deref().clone();
Self::new(val)
}
}
}
}
/// Pads `T` to the length of a cache line.
///
/// Sometimes concurrent programming requires a piece of data to be padded out to the size of a
/// cacheline to avoid "false sharing": cache lines being invalidated due to unrelated concurrent
/// activity. Use this type when you want to *avoid* cache locality.
///
/// At the moment, cache lines are assumed to be 64 bytes on all architectures.
///
/// # Size and alignment
///
/// By default, the size of `CachePadded<T>` is 64 bytes. If `T` is larger than that, then
/// `CachePadded::<T>::new` will panic. Alignment of `CachePadded<T>` is the same as that of `T`.
///
/// However, if the `nightly` feature is enabled, arbitrarily large types `T` can be stored inside
/// a `CachePadded<T>`. The size will then be a multiple of 64 at least the size of `T`, and the
/// alignment will be the maximum of 64 and the alignment of `T`.
pub struct CachePadded<T> {
inner: Inner<T>,
}
unsafe impl<T: Send> Send for CachePadded<T> {}
unsafe impl<T: Sync> Sync for CachePadded<T> {}
impl<T> CachePadded<T> {
/// Pads a value to the length of a cache line.
///
/// # Panics
///
/// If `nightly` is not enabled and `T` is larger than 64 bytes, this function will panic.
pub fn new(t: T) -> CachePadded<T> {
CachePadded::<T> { inner: Inner::new(t) }
}
}
impl<T> Deref for CachePadded<T> {
type Target = T;
fn deref(&self) -> &T {
self.inner.deref()
}
}
impl<T> DerefMut for CachePadded<T> {
fn deref_mut(&mut self) -> &mut T {
self.inner.deref_mut()
}
}
impl<T: Default> Default for CachePadded<T> {
fn default() -> Self {
Self::new(Default::default())
}
}
impl<T: Clone> Clone for CachePadded<T> {
fn clone(&self) -> Self {
CachePadded { inner: self.inner.clone() }
}
}
impl<T: fmt::Debug> fmt::Debug for CachePadded<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let inner: &T = &*self;
write!(f, "CachePadded {{ {:?} }}", inner)
}
}
impl<T> From<T> for CachePadded<T> {
fn from(t: T) -> Self {
CachePadded::new(t)
}
}
#[cfg(test)]
mod test {
use super::*;
use std::cell::Cell;
#[test]
fn store_u64() {
let x: CachePadded<u64> = CachePadded::new(17);
assert_eq!(*x, 17);
}
#[test]
fn store_pair() {
let x: CachePadded<(u64, u64)> = CachePadded::new((17, 37));
assert_eq!(x.0, 17);
assert_eq!(x.1, 37);
}
#[test]
fn distance() {
let arr = [CachePadded::new(17u8), CachePadded::new(37u8)];
let a = &*arr[0] as *const u8;
let b = &*arr[1] as *const u8;
assert!(unsafe { a.offset(64) } <= b);
}
#[test]
fn different_sizes() {
CachePadded::new(17u8);
CachePadded::new(17u16);
CachePadded::new(17u32);
CachePadded::new([17u64; 0]);
CachePadded::new([17u64; 1]);
CachePadded::new([17u64; 2]);
CachePadded::new([17u64; 3]);
CachePadded::new([17u64; 4]);
CachePadded::new([17u64; 5]);
CachePadded::new([17u64; 6]);
CachePadded::new([17u64; 7]);
CachePadded::new([17u64; 8]);
}
cfg_if! {
if #[cfg(feature = "nightly")] {
#[test]
fn large() {
let a = [17u64; 9];
let b = CachePadded::new(a);
assert!(mem::size_of_val(&a) <= mem::size_of_val(&b));
}
} else {
#[test]
#[should_panic]
fn large() {
CachePadded::new([17u64; 9]);
}
}
}
#[test]
fn debug() {
assert_eq!(
format!("{:?}", CachePadded::new(17u64)),
"CachePadded { 17 }"
);
}
#[test]
fn drops() {
let count = Cell::new(0);
struct Foo<'a>(&'a Cell<usize>);
impl<'a> Drop for Foo<'a> {
fn drop(&mut self) {
self.0.set(self.0.get() + 1);
}
}
let a = CachePadded::new(Foo(&count));
let b = CachePadded::new(Foo(&count));
assert_eq!(count.get(), 0);
drop(a);
assert_eq!(count.get(), 1);
drop(b);
assert_eq!(count.get(), 2);
}
#[test]
fn clone() {
let a = CachePadded::new(17);
let b = a.clone();
assert_eq!(*a, *b);
}
#[test]
fn runs_custom_clone() {
let count = Cell::new(0);
struct Foo<'a>(&'a Cell<usize>);
impl<'a> Clone for Foo<'a> {
fn clone(&self) -> Foo<'a> {
self.0.set(self.0.get() + 1);
Foo::<'a>(self.0)
}
}
let a = CachePadded::new(Foo(&count));
a.clone();
assert_eq!(count.get(), 1);
}
}

Просмотреть файл

@ -1,82 +0,0 @@
use core::sync::atomic::Ordering;
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
use core::sync::atomic::compiler_fence;
/// Trait which allows reading from an atomic type with "consume" ordering.
pub trait AtomicConsume {
/// Type returned by `load_consume`.
type Val;
/// Loads a value from the atomic using a "consume" memory ordering.
///
/// This is similar to the "acquire" ordering, except that an ordering is
/// only guaranteed with operations that "depend on" the result of the load.
/// However consume loads are usually much faster than acquire loads on
/// architectures with a weak memory model since they don't require memory
/// fence instructions.
///
/// The exact definition of "depend on" is a bit vague, but it works as you
/// would expect in practice since a lot of software, especially the Linux
/// kernel, rely on this behavior.
///
/// This is currently only implemented on ARM and AArch64, where a fence
/// can be avoided. On other architectures this will fall back to a simple
/// `load(Ordering::Acquire)`.
fn load_consume(&self) -> Self::Val;
}
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
macro_rules! impl_consume {
() => {
#[inline]
fn load_consume(&self) -> Self::Val {
let result = self.load(Ordering::Relaxed);
compiler_fence(Ordering::Acquire);
result
}
};
}
#[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))]
macro_rules! impl_consume {
() => {
#[inline]
fn load_consume(&self) -> Self::Val {
self.load(Ordering::Acquire)
}
};
}
macro_rules! impl_atomic {
($atomic:ident, $val:ty) => {
impl AtomicConsume for ::core::sync::atomic::$atomic {
type Val = $val;
impl_consume!();
}
};
}
impl_atomic!(AtomicBool, bool);
impl_atomic!(AtomicUsize, usize);
impl_atomic!(AtomicIsize, isize);
#[cfg(all(feature = "nightly", target_has_atomic = "8"))]
impl_atomic!(AtomicU8, u8);
#[cfg(all(feature = "nightly", target_has_atomic = "8"))]
impl_atomic!(AtomicI8, i8);
#[cfg(all(feature = "nightly", target_has_atomic = "16"))]
impl_atomic!(AtomicU16, u16);
#[cfg(all(feature = "nightly", target_has_atomic = "16"))]
impl_atomic!(AtomicI16, i16);
#[cfg(all(feature = "nightly", target_has_atomic = "32"))]
impl_atomic!(AtomicU32, u32);
#[cfg(all(feature = "nightly", target_has_atomic = "32"))]
impl_atomic!(AtomicI32, i32);
#[cfg(all(feature = "nightly", target_has_atomic = "64"))]
impl_atomic!(AtomicU64, u64);
#[cfg(all(feature = "nightly", target_has_atomic = "64"))]
impl_atomic!(AtomicI64, i64);
impl<T> AtomicConsume for ::core::sync::atomic::AtomicPtr<T> {
type Val = *mut T;
impl_consume!();
}

Просмотреть файл

@ -1,14 +0,0 @@
#![cfg_attr(feature = "nightly",
feature(attr_literals, repr_align, cfg_target_has_atomic, integer_atomics))]
#![cfg_attr(not(feature = "use_std"), no_std)]
#[cfg(feature = "use_std")]
extern crate core;
#[macro_use]
extern crate cfg_if;
pub mod cache_padded;
#[cfg(feature = "use_std")]
pub mod scoped;
pub mod consume;

Просмотреть файл

@ -1,381 +0,0 @@
/// Scoped thread.
///
/// # Examples
///
/// A basic scoped thread:
///
/// ```
/// crossbeam_utils::scoped::scope(|scope| {
/// scope.spawn(|| {
/// println!("Hello from a scoped thread!");
/// });
/// });
/// ```
///
/// When writing concurrent Rust programs, you'll sometimes see a pattern like this, using
/// [`std::thread::spawn`][spawn]:
///
/// ```ignore
/// let array = [1, 2, 3];
/// let mut guards = vec![];
///
/// for i in &array {
/// let guard = std::thread::spawn(move || {
/// println!("element: {}", i);
/// });
///
/// guards.push(guard);
/// }
///
/// for guard in guards {
/// guard.join().unwrap();
/// }
/// ```
///
/// The basic pattern is:
///
/// 1. Iterate over some collection.
/// 2. Spin up a thread to operate on each part of the collection.
/// 3. Join all the threads.
///
/// However, this code actually gives an error:
///
/// ```text
/// error: `array` does not live long enough
/// for i in &array {
/// ^~~~~
/// in expansion of for loop expansion
/// note: expansion site
/// note: reference must be valid for the static lifetime...
/// note: ...but borrowed value is only valid for the block suffix following statement 0 at ...
/// let array = [1, 2, 3];
/// let mut guards = vec![];
///
/// for i in &array {
/// let guard = std::thread::spawn(move || {
/// println!("element: {}", i);
/// ...
/// error: aborting due to previous error
/// ```
///
/// Because [`std::thread::spawn`][spawn] doesn't know about this scope, it requires a
/// `'static` lifetime. One way of giving it a proper lifetime is to use an [`Arc`][arc]:
///
/// [arc]: http://doc.rust-lang.org/stable/std/sync/struct.Arc.html
///
/// ```
/// use std::sync::Arc;
///
/// let array = Arc::new([1, 2, 3]);
/// let mut guards = vec![];
///
/// for i in 0..array.len() {
/// let a = array.clone();
///
/// let guard = std::thread::spawn(move || {
/// println!("element: {}", a[i]);
/// });
///
/// guards.push(guard);
/// }
///
/// for guard in guards {
/// guard.join().unwrap();
/// }
/// ```
///
/// But this introduces unnecessary allocation, as `Arc<T>` puts its data on the heap, and we
/// also end up dealing with reference counts. We know that we're joining the threads before
/// our function returns, so just taking a reference _should_ be safe. Rust can't know that,
/// though.
///
/// Enter scoped threads. Here's our original example, using `spawn` from crossbeam rather
/// than from `std::thread`:
///
/// ```
/// let array = [1, 2, 3];
///
/// crossbeam_utils::scoped::scope(|scope| {
/// for i in &array {
/// scope.spawn(move || {
/// println!("element: {}", i);
/// });
/// }
/// });
/// ```
///
/// Much more straightforward.
// FIXME(jeehoonkang): maybe we should create a new crate for scoped threads.
use std::cell::RefCell;
use std::fmt;
use std::marker::PhantomData;
use std::mem;
use std::ops::DerefMut;
use std::rc::Rc;
use std::thread;
use std::io;
#[doc(hidden)]
trait FnBox<T> {
fn call_box(self: Box<Self>) -> T;
}
impl<T, F: FnOnce() -> T> FnBox<T> for F {
fn call_box(self: Box<Self>) -> T {
(*self)()
}
}
/// Like `std::thread::spawn`, but without the closure bounds.
pub unsafe fn spawn_unsafe<'a, F>(f: F) -> thread::JoinHandle<()>
where
F: FnOnce() + Send + 'a,
{
let builder = thread::Builder::new();
builder_spawn_unsafe(builder, f).unwrap()
}
/// Like `std::thread::Builder::spawn`, but without the closure bounds.
pub unsafe fn builder_spawn_unsafe<'a, F>(
builder: thread::Builder,
f: F,
) -> io::Result<thread::JoinHandle<()>>
where
F: FnOnce() + Send + 'a,
{
let closure: Box<FnBox<()> + 'a> = Box::new(f);
let closure: Box<FnBox<()> + Send> = mem::transmute(closure);
builder.spawn(move || closure.call_box())
}
pub struct Scope<'a> {
/// The list of the deferred functions and thread join jobs.
dtors: RefCell<Option<DtorChain<'a, ()>>>,
// !Send + !Sync
_marker: PhantomData<*const ()>,
}
struct DtorChain<'a, T> {
dtor: Box<FnBox<T> + 'a>,
next: Option<Box<DtorChain<'a, T>>>,
}
impl<'a, T> DtorChain<'a, T> {
pub fn pop(chain: &mut Option<DtorChain<'a, T>>) -> Option<Box<FnBox<T> + 'a>> {
chain.take().map(|mut node| {
*chain = node.next.take().map(|b| *b);
node.dtor
})
}
}
struct JoinState<T> {
join_handle: thread::JoinHandle<()>,
result: usize,
_marker: PhantomData<T>,
}
impl<T: Send> JoinState<T> {
fn new(join_handle: thread::JoinHandle<()>, result: usize) -> JoinState<T> {
JoinState {
join_handle: join_handle,
result: result,
_marker: PhantomData,
}
}
fn join(self) -> thread::Result<T> {
let result = self.result;
self.join_handle.join().map(|_| {
unsafe { *Box::from_raw(result as *mut T) }
})
}
}
/// A handle to a scoped thread
pub struct ScopedJoinHandle<'a, T: 'a> {
// !Send + !Sync
inner: Rc<RefCell<Option<JoinState<T>>>>,
thread: thread::Thread,
_marker: PhantomData<&'a T>,
}
/// Create a new `scope`, for deferred destructors.
///
/// Scopes, in particular, support [*scoped thread spawning*](struct.Scope.html#method.spawn).
///
/// # Examples
///
/// Creating and using a scope:
///
/// ```
/// crossbeam_utils::scoped::scope(|scope| {
/// scope.defer(|| println!("Exiting scope"));
/// scope.spawn(|| println!("Running child thread in scope"))
/// });
/// // Prints messages in the reverse order written
/// ```
///
/// # Panics
///
/// `scoped::scope()` panics if a spawned thread panics but it is not joined inside the scope.
pub fn scope<'a, F, R>(f: F) -> R
where
F: FnOnce(&Scope<'a>) -> R,
{
let mut scope = Scope {
dtors: RefCell::new(None),
_marker: PhantomData,
};
let ret = f(&scope);
scope.drop_all();
ret
}
impl<'a> fmt::Debug for Scope<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Scope {{ ... }}")
}
}
impl<'a, T> fmt::Debug for ScopedJoinHandle<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "ScopedJoinHandle {{ ... }}")
}
}
impl<'a> Scope<'a> {
// This method is carefully written in a transactional style, so
// that it can be called directly and, if any dtor panics, can be
// resumed in the unwinding this causes. By initially running the
// method outside of any destructor, we avoid any leakage problems
// due to @rust-lang/rust#14875.
fn drop_all(&mut self) {
while let Some(dtor) = DtorChain::pop(&mut self.dtors.borrow_mut()) {
dtor.call_box();
}
}
/// Schedule code to be executed when exiting the scope.
///
/// This is akin to having a destructor on the stack, except that it is
/// *guaranteed* to be run. It is guaranteed that the function is called
/// after all the spawned threads are joined.
pub fn defer<F>(&self, f: F)
where
F: FnOnce() + 'a,
{
let mut dtors = self.dtors.borrow_mut();
*dtors = Some(DtorChain {
dtor: Box::new(f),
next: dtors.take().map(Box::new),
});
}
/// Create a scoped thread.
///
/// `spawn` is similar to the [`spawn`][spawn] function in Rust's standard library. The
/// difference is that this thread is scoped, meaning that it's guaranteed to terminate
/// before the current stack frame goes away, allowing you to reference the parent stack frame
/// directly. This is ensured by having the parent thread join on the child thread before the
/// scope exits.
///
/// [spawn]: http://doc.rust-lang.org/std/thread/fn.spawn.html
pub fn spawn<'s, F, T>(&'s self, f: F) -> ScopedJoinHandle<'a, T>
where
'a: 's,
F: FnOnce() -> T + Send + 'a,
T: Send + 'a,
{
self.builder().spawn(f).unwrap()
}
/// Generates the base configuration for spawning a scoped thread, from which configuration
/// methods can be chained.
pub fn builder<'s>(&'s self) -> ScopedThreadBuilder<'s, 'a> {
ScopedThreadBuilder {
scope: self,
builder: thread::Builder::new(),
}
}
}
/// Scoped thread configuration. Provides detailed control over the properties and behavior of new
/// scoped threads.
pub struct ScopedThreadBuilder<'s, 'a: 's> {
scope: &'s Scope<'a>,
builder: thread::Builder,
}
impl<'s, 'a: 's> ScopedThreadBuilder<'s, 'a> {
/// Names the thread-to-be. Currently the name is used for identification only in panic
/// messages.
pub fn name(mut self, name: String) -> ScopedThreadBuilder<'s, 'a> {
self.builder = self.builder.name(name);
self
}
/// Sets the size of the stack for the new thread.
pub fn stack_size(mut self, size: usize) -> ScopedThreadBuilder<'s, 'a> {
self.builder = self.builder.stack_size(size);
self
}
/// Spawns a new thread, and returns a join handle for it.
pub fn spawn<F, T>(self, f: F) -> io::Result<ScopedJoinHandle<'a, T>>
where
F: FnOnce() -> T + Send + 'a,
T: Send + 'a,
{
// The `Box` constructed below is written only by the spawned thread,
// and read by the current thread only after the spawned thread is
// joined (`JoinState::join()`). Thus there are no data races.
let result = Box::into_raw(Box::<T>::new(unsafe { mem::uninitialized() })) as usize;
let join_handle = try!(unsafe {
builder_spawn_unsafe(self.builder, move || {
let mut result = Box::from_raw(result as *mut T);
*result = f();
mem::forget(result);
})
});
let thread = join_handle.thread().clone();
let join_state = JoinState::<T>::new(join_handle, result);
let deferred_handle = Rc::new(RefCell::new(Some(join_state)));
let my_handle = deferred_handle.clone();
self.scope.defer(move || {
let state = mem::replace(deferred_handle.borrow_mut().deref_mut(), None);
if let Some(state) = state {
state.join().unwrap();
}
});
Ok(ScopedJoinHandle {
inner: my_handle,
thread: thread,
_marker: PhantomData,
})
}
}
impl<'a, T: Send + 'a> ScopedJoinHandle<'a, T> {
/// Join the scoped thread, returning the result it produced.
pub fn join(self) -> thread::Result<T> {
let state = mem::replace(self.inner.borrow_mut().deref_mut(), None);
state.unwrap().join()
}
/// Get the underlying thread handle.
pub fn thread(&self) -> &thread::Thread {
&self.thread
}
}
impl<'a> Drop for Scope<'a> {
fn drop(&mut self) {
self.drop_all()
}
}

Просмотреть файл

@ -1 +1 @@
{"files":{"CHANGELOG.md":"632777ff129c108f4b3a445f99604f98565345570f0688f68d0d906b6e62a713","Cargo.toml":"5a3bf61003b6175a0de19ad055afb9f99bd09af200910c5de9dcdfce8fd21fc2","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"fbf8251ad672babe9db82a62ef76d06854c96ee0195fd3d043be222a96b428ec","benches/atomic_cell.rs":"9e80d3c120df4e6e766ed4fa3df3ed1be5256f6e6cd96a1ced71bedab291bf7f","src/atomic/atomic_cell.rs":"bdb746cabb68eb12f1ddd531838e1732403d42f98f7bd5ce187a982e2bae051b","src/atomic/consume.rs":"bfdc7e2d8370a5a3bb1699b6214347c359d66fcc92a2d1345a513676ac91d821","src/atomic/mod.rs":"d37c5edec55b31dacf0f97f28cee8f91b06c551f04582e70aea6aea91390aa25","src/cache_padded.rs":"95b10657b4e50316d2213894e195c61602ff0c6655cc965301de1584fb7d61c7","src/lib.rs":"2459c04964ec61aff53201e941b3086c903bb3c651825efb4978ff5173478781","src/sync/mod.rs":"d37870028432ad0c3c7fa82d0ee99b3f4bdd9a8410a0a4255a80fe7cc8bdcf38","src/sync/parker.rs":"55324bbea5b7c6838a0f8467a5b8a5dbd5526c8e1c7fd4f6d64dad1ab19f9be9","src/thread.rs":"c01d49383e773fedac9abec3cfb008caec13f959a78f3ddcbf282f32ebecadd7","tests/atomic_cell.rs":"690f516c7e827b18adec5da1c3249ebb26ff674c5887d863ddc94fe1600b9c28","tests/cache_padded.rs":"02235757a554279dae5053d46314a765059ec036c63a05336353994c2aa344d1","tests/parker.rs":"996212c084286567638919c27d46a250a5d592d8e1a97c1e6a4d7e10c060e4dd","tests/thread.rs":"0d86998085a8aace79e5b3dae61aa8bd864492f44aafcce6ec85778954f55809"},"package":"41ee4864f4797060e52044376f7d107429ce1fb43460021b126424b7180ee21a"}
{"files":{"CHANGELOG.md":"e58bfef23e76d04b244941fd4ecdb35837a1a6f1370bf4596cc0280193c9a4f9","Cargo.toml":"2d4d20231a89e61fa6d1d83ad853b274e71d243c992eda5a9de0c9e8ca428ba5","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"63ba61fd2e75aa90572476eda5246fc766846af40d31e0bdccbf763d9f0799ba","benches/atomic_cell.rs":"ada69698def9d4eab485a6e0da235aaac001efe49a6b0d6f5c5be381a645310f","src/atomic/atomic_cell.rs":"97a9ec7ac2625ee0a951b984a419fbeab62173ed9c23cab47dfc13ed25e8ee6c","src/atomic/consume.rs":"bfdc7e2d8370a5a3bb1699b6214347c359d66fcc92a2d1345a513676ac91d821","src/atomic/mod.rs":"404eacae422012f3628cb44262df73a5891fe02a17ab345b832e3062982b5a20","src/backoff.rs":"029fede365eaa3408c7359cf868303120903976304aee546aeedcb80085568d5","src/cache_padded.rs":"95b10657b4e50316d2213894e195c61602ff0c6655cc965301de1584fb7d61c7","src/lib.rs":"957df3bd2875147aa1b939fc47f1a8a72719748e9001f27dba2f3589e27a73b4","src/sync/mod.rs":"4c8ad6ec4601f212791b0b531b46ee5decec2f1d14746aa7f2c18e36c609cd8e","src/sync/parker.rs":"55324bbea5b7c6838a0f8467a5b8a5dbd5526c8e1c7fd4f6d64dad1ab19f9be9","src/sync/sharded_lock.rs":"7a401ba621233732c26cf49324748269359d7bc5dc27e0ec26c9493e9a5ec97d","src/sync/wait_group.rs":"21708bbd46daa98e9f788765a9a4ef3b087a8d1e97a6e9406b4a960c95e44ca0","src/thread.rs":"384e3c6e6db565e752169223205991f1eadb1258b1d416758172a40a6c9bd645","tests/atomic_cell.rs":"690f516c7e827b18adec5da1c3249ebb26ff674c5887d863ddc94fe1600b9c28","tests/cache_padded.rs":"02235757a554279dae5053d46314a765059ec036c63a05336353994c2aa344d1","tests/parker.rs":"996212c084286567638919c27d46a250a5d592d8e1a97c1e6a4d7e10c060e4dd","tests/sharded_lock.rs":"1e2e8a355b74d89569873fbba7772235bc64d13a7209ee673f368f4fe6f70c65","tests/thread.rs":"0d86998085a8aace79e5b3dae61aa8bd864492f44aafcce6ec85778954f55809","tests/wait_group.rs":"e3d5168581fb511b760f4249ca487b919cffc60ac2b4610a78db99899772dd5b"},"package":"f8306fcef4a7b563b76b7dd949ca48f52bc1141aa067d2ea09565f3e2652aa5c"}

10
third_party/rust/crossbeam-utils/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,3 +1,13 @@
# Version 0.6.5
- Rename `Backoff::is_complete()` to `Backoff::is_completed()`.
# Version 0.6.4
- Add `WaitGroup`, `ShardedLock`, and `Backoff`.
- Add `fetch_*` methods for `AtomicCell<i128>` and `AtomicCell<u128>`.
- Expand documentation.
# Version 0.6.3
- Add `AtomicCell`.

14
third_party/rust/crossbeam-utils/Cargo.toml поставляемый
Просмотреть файл

@ -12,20 +12,26 @@
[package]
name = "crossbeam-utils"
version = "0.6.3"
version = "0.6.5"
authors = ["The Crossbeam Project Developers"]
description = "Utilities for concurrent programming"
homepage = "https://github.com/crossbeam-rs/crossbeam"
homepage = "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-utils"
documentation = "https://docs.rs/crossbeam-utils"
readme = "README.md"
keywords = ["scoped", "thread", "atomic", "cache"]
categories = ["algorithms", "concurrency", "data-structures"]
categories = ["algorithms", "concurrency", "data-structures", "no-std"]
license = "MIT/Apache-2.0"
repository = "https://github.com/crossbeam-rs/crossbeam"
[dependencies.cfg-if]
version = "0.1"
[dependencies.lazy_static]
version = "1.1.0"
optional = true
[dev-dependencies.rand]
version = "0.6"
[features]
default = ["std"]
nightly = []
std = []
std = ["lazy_static"]

43
third_party/rust/crossbeam-utils/README.md поставляемый
Просмотреть файл

@ -11,11 +11,35 @@ https://docs.rs/crossbeam-utils)
[![Rust 1.26+](https://img.shields.io/badge/rust-1.26+-lightgray.svg)](
https://www.rust-lang.org)
This crate provides miscellaneous utilities for concurrent programming:
This crate provides miscellaneous tools for concurrent programming:
* `AtomicConsume` allows reading from primitive atomic types with "consume" ordering.
* `CachePadded<T>` pads and aligns a value to the length of a cache line.
* `scope()` can spawn threads that borrow local variables from the stack.
#### Atomics
* [`AtomicCell`], a thread-safe mutable memory location.<sup>(\*)</sup>
* [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering.<sup>(\*)</sup>
#### Thread synchronization
* [`Parker`], a thread parking primitive.
* [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads.
* [`WaitGroup`], for synchronizing the beginning or end of some computation.
#### Utilities
* [`Backoff`], for exponential backoff in spin loops.<sup>(\*)</sup>
* [`CachePadded`], for padding and aligning a value to the length of a cache line.<sup>(\*)</sup>
* [`scope`], for spawning threads that borrow local variables from the stack.
*Features marked with <sup>(\*)</sup> can be used in `no_std` environments.*
[`AtomicCell`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/atomic/struct.AtomicCell.html
[`AtomicConsume`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/atomic/trait.AtomicConsume.html
[`Parker`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/sync/struct.Parker.html
[`ShardedLock`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/sync/struct.ShardedLock.html
[`WaitGroup`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/sync/struct.WaitGroup.html
[`Backoff`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/struct.Backoff.html
[`CachePadded`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/struct.CachePadded.html
[`scope`]: https://docs.rs/crossbeam-utils/*/crossbeam_utils/thread/fn.scope.html
## Usage
@ -32,15 +56,6 @@ Next, add this to your crate:
extern crate crossbeam_utils;
```
## Compatibility
The minimum supported Rust version is 1.26.
Features available in `no_std` environments:
* `AtomicConsume`
* `CachePadded<T>`
## License
Licensed under either of
@ -50,7 +65,7 @@ Licensed under either of
at your option.
### Contribution
#### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be

44
third_party/rust/crossbeam-utils/benches/atomic_cell.rs поставляемый Executable file → Normal file
Просмотреть файл

@ -51,18 +51,20 @@ fn concurrent_load_u8(b: &mut test::Bencher) {
thread::scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|| loop {
start.wait();
scope.spawn(|_| {
loop {
start.wait();
let mut sum = 0;
for _ in 0..STEPS {
sum += a.load();
}
test::black_box(sum);
let mut sum = 0;
for _ in 0..STEPS {
sum += a.load();
}
test::black_box(sum);
end.wait();
if exit.load() {
break;
end.wait();
if exit.load() {
break;
}
}
});
}
@ -124,18 +126,20 @@ fn concurrent_load_usize(b: &mut test::Bencher) {
thread::scope(|scope| {
for _ in 0..THREADS {
scope.spawn(|| loop {
start.wait();
scope.spawn(|_| {
loop {
start.wait();
let mut sum = 0;
for _ in 0..STEPS {
sum += a.load();
}
test::black_box(sum);
let mut sum = 0;
for _ in 0..STEPS {
sum += a.load();
}
test::black_box(sum);
end.wait();
if exit.load() {
break;
end.wait();
if exit.load() {
break;
}
}
});
}

Просмотреть файл

@ -5,6 +5,8 @@ use core::ptr;
use core::slice;
use core::sync::atomic::{self, AtomicBool, AtomicUsize, Ordering};
use Backoff;
/// A thread-safe mutable memory location.
///
/// This type is equivalent to [`Cell`], except it can also be shared among multiple threads.
@ -464,18 +466,24 @@ macro_rules! impl_arithmetic {
}
}
};
($t:ty, $size:tt, $atomic:ty, $example:tt) => {
#[cfg(target_has_atomic = $size)]
impl_arithmetic!($t, $atomic, $example);
};
}
cfg_if! {
if #[cfg(feature = "nightly")] {
impl_arithmetic!(u8, atomic::AtomicU8, "let a = AtomicCell::new(7u8);");
impl_arithmetic!(i8, atomic::AtomicI8, "let a = AtomicCell::new(7i8);");
impl_arithmetic!(u16, atomic::AtomicU16, "let a = AtomicCell::new(7u16);");
impl_arithmetic!(i16, atomic::AtomicI16, "let a = AtomicCell::new(7i16);");
impl_arithmetic!(u32, atomic::AtomicU32, "let a = AtomicCell::new(7u32);");
impl_arithmetic!(i32, atomic::AtomicI32, "let a = AtomicCell::new(7i32);");
impl_arithmetic!(u64, atomic::AtomicU64, "let a = AtomicCell::new(7u64);");
impl_arithmetic!(i64, atomic::AtomicI64, "let a = AtomicCell::new(7i64);");
impl_arithmetic!(u8, "8", atomic::AtomicU8, "let a = AtomicCell::new(7u8);");
impl_arithmetic!(i8, "8", atomic::AtomicI8, "let a = AtomicCell::new(7i8);");
impl_arithmetic!(u16, "16", atomic::AtomicU16, "let a = AtomicCell::new(7u16);");
impl_arithmetic!(i16, "16", atomic::AtomicI16, "let a = AtomicCell::new(7i16);");
impl_arithmetic!(u32, "32", atomic::AtomicU32, "let a = AtomicCell::new(7u32);");
impl_arithmetic!(i32, "32", atomic::AtomicI32, "let a = AtomicCell::new(7i32);");
impl_arithmetic!(u64, "64", atomic::AtomicU64, "let a = AtomicCell::new(7u64);");
impl_arithmetic!(i64, "64", atomic::AtomicI64, "let a = AtomicCell::new(7i64);");
impl_arithmetic!(u128, "let a = AtomicCell::new(7u128);");
impl_arithmetic!(i128, "let a = AtomicCell::new(7i128);");
} else {
impl_arithmetic!(u8, "let a = AtomicCell::new(7u8);");
impl_arithmetic!(i8, "let a = AtomicCell::new(7i8);");
@ -485,6 +493,8 @@ cfg_if! {
impl_arithmetic!(i32, "let a = AtomicCell::new(7i32);");
impl_arithmetic!(u64, "let a = AtomicCell::new(7u64);");
impl_arithmetic!(i64, "let a = AtomicCell::new(7i64);");
impl_arithmetic!(u128, "let a = AtomicCell::new(7u128);");
impl_arithmetic!(i128, "let a = AtomicCell::new(7i128);");
}
}
@ -629,8 +639,7 @@ impl Lock {
/// Grabs the lock for writing.
#[inline]
fn write(&'static self) -> WriteGuard {
let mut step = 0usize;
let backoff = Backoff::new();
loop {
let previous = self.state.swap(1, Ordering::Acquire);
@ -643,17 +652,7 @@ impl Lock {
};
}
if step < 10 {
atomic::spin_loop_hint();
} else {
#[cfg(not(feature = "std"))]
atomic::spin_loop_hint();
#[cfg(feature = "std")]
::std::thread::yield_now();
}
step = step.wrapping_add(1);
backoff.snooze();
}
}
}
@ -696,7 +695,28 @@ impl Drop for WriteGuard {
#[inline]
#[must_use]
fn lock(addr: usize) -> &'static Lock {
// The number of locks is prime.
// The number of locks is a prime number because we want to make sure `addr % LEN` gets
// dispersed across all locks.
//
// Note that addresses are always aligned to some power of 2, depending on type `T` in
// `AtomicCell<T>`. If `LEN` was an even number, then `addr % LEN` would be an even number,
// too, which means only half of the locks would get utilized!
//
// It is also possible for addresses to accidentally get aligned to a number that is not a
// power of 2. Consider this example:
//
// ```
// #[repr(C)]
// struct Foo {
// a: AtomicCell<u8>,
// b: u8,
// c: u8,
// }
// ```
//
// Now, if we have a slice of type `&[Foo]`, it is possible that field `a` in all items gets
// stored at addresses that are multiples of 3. It'd be too bad if `LEN` was divisible by 3.
// In order to protect from such cases, we simply choose a large prime number for `LEN`.
const LEN: usize = 97;
const L: Lock = Lock {

Просмотреть файл

@ -1,4 +1,4 @@
//! Additional utilities for atomics.
//! Atomic types.
mod atomic_cell;
mod consume;

294
third_party/rust/crossbeam-utils/src/backoff.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,294 @@
use core::cell::Cell;
use core::fmt;
use core::sync::atomic;
const SPIN_LIMIT: u32 = 6;
const YIELD_LIMIT: u32 = 10;
/// Performs exponential backoff in spin loops.
///
/// Backing off in spin loops reduces contention and improves overall performance.
///
/// This primitive can execute *YIELD* and *PAUSE* instructions, yield the current thread to the OS
/// scheduler, and tell when is a good time to block the thread using a different synchronization
/// mechanism. Each step of the back off procedure takes roughly twice as long as the previous
/// step.
///
/// # Examples
///
/// Backing off in a lock-free loop:
///
/// ```
/// use crossbeam_utils::Backoff;
/// use std::sync::atomic::AtomicUsize;
/// use std::sync::atomic::Ordering::SeqCst;
///
/// fn fetch_mul(a: &AtomicUsize, b: usize) -> usize {
/// let backoff = Backoff::new();
/// loop {
/// let val = a.load(SeqCst);
/// if a.compare_and_swap(val, val.wrapping_mul(b), SeqCst) == val {
/// return val;
/// }
/// backoff.spin();
/// }
/// }
/// ```
///
/// Waiting for an [`AtomicBool`] to become `true`:
///
/// ```
/// use crossbeam_utils::Backoff;
/// use std::sync::atomic::AtomicBool;
/// use std::sync::atomic::Ordering::SeqCst;
///
/// fn spin_wait(ready: &AtomicBool) {
/// let backoff = Backoff::new();
/// while !ready.load(SeqCst) {
/// backoff.snooze();
/// }
/// }
/// ```
///
/// Waiting for an [`AtomicBool`] to become `true` and parking the thread after a long wait.
/// Note that whoever sets the atomic variable to `true` must notify the parked thread by calling
/// [`unpark()`]:
///
/// ```
/// use crossbeam_utils::Backoff;
/// use std::sync::atomic::AtomicBool;
/// use std::sync::atomic::Ordering::SeqCst;
/// use std::thread;
///
/// fn blocking_wait(ready: &AtomicBool) {
/// let backoff = Backoff::new();
/// while !ready.load(SeqCst) {
/// if backoff.is_completed() {
/// thread::park();
/// } else {
/// backoff.snooze();
/// }
/// }
/// }
/// ```
///
/// [`is_completed`]: struct.Backoff.html#method.is_completed
/// [`std::thread::park()`]: https://doc.rust-lang.org/std/thread/fn.park.html
/// [`Condvar`]: https://doc.rust-lang.org/std/sync/struct.Condvar.html
/// [`AtomicBool`]: https://doc.rust-lang.org/std/sync/atomic/struct.AtomicBool.html
/// [`unpark()`]: https://doc.rust-lang.org/std/thread/struct.Thread.html#method.unpark
pub struct Backoff {
step: Cell<u32>,
}
impl Backoff {
/// Creates a new `Backoff`.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::Backoff;
///
/// let backoff = Backoff::new();
/// ```
#[inline]
pub fn new() -> Self {
Backoff {
step: Cell::new(0),
}
}
/// Resets the `Backoff`.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::Backoff;
///
/// let backoff = Backoff::new();
/// backoff.reset();
/// ```
#[inline]
pub fn reset(&self) {
self.step.set(0);
}
/// Backs off in a lock-free loop.
///
/// This method should be used when we need to retry an operation because another thread made
/// progress.
///
/// The processor may yield using the *YIELD* or *PAUSE* instruction.
///
/// # Examples
///
/// Backing off in a lock-free loop:
///
/// ```
/// use crossbeam_utils::Backoff;
/// use std::sync::atomic::AtomicUsize;
/// use std::sync::atomic::Ordering::SeqCst;
///
/// fn fetch_mul(a: &AtomicUsize, b: usize) -> usize {
/// let backoff = Backoff::new();
/// loop {
/// let val = a.load(SeqCst);
/// if a.compare_and_swap(val, val.wrapping_mul(b), SeqCst) == val {
/// return val;
/// }
/// backoff.spin();
/// }
/// }
///
/// let a = AtomicUsize::new(7);
/// assert_eq!(fetch_mul(&a, 8), 7);
/// assert_eq!(a.load(SeqCst), 56);
/// ```
#[inline]
pub fn spin(&self) {
for _ in 0..1 << self.step.get().min(SPIN_LIMIT) {
atomic::spin_loop_hint();
}
if self.step.get() <= SPIN_LIMIT {
self.step.set(self.step.get() + 1);
}
}
/// Backs off in a blocking loop.
///
/// This method should be used when we need to wait for another thread to make progress.
///
/// The processor may yield using the *YIELD* or *PAUSE* instruction and the current thread
/// may yield by giving up a timeslice to the OS scheduler.
///
/// In `#[no_std]` environments, this method is equivalent to [`spin`].
///
/// If possible, use [`is_completed`] to check when it is advised to stop using backoff and
/// block the current thread using a different synchronization mechanism instead.
///
/// [`spin`]: struct.Backoff.html#method.spin
/// [`is_completed`]: struct.Backoff.html#method.is_completed
///
/// # Examples
///
/// Waiting for an [`AtomicBool`] to become `true`:
///
/// ```
/// use crossbeam_utils::Backoff;
/// use std::sync::Arc;
/// use std::sync::atomic::AtomicBool;
/// use std::sync::atomic::Ordering::SeqCst;
/// use std::thread;
/// use std::time::Duration;
///
/// fn spin_wait(ready: &AtomicBool) {
/// let backoff = Backoff::new();
/// while !ready.load(SeqCst) {
/// backoff.snooze();
/// }
/// }
///
/// let ready = Arc::new(AtomicBool::new(false));
/// let ready2 = ready.clone();
///
/// thread::spawn(move || {
/// thread::sleep(Duration::from_millis(100));
/// ready2.store(true, SeqCst);
/// });
///
/// assert_eq!(ready.load(SeqCst), false);
/// spin_wait(&ready);
/// assert_eq!(ready.load(SeqCst), true);
/// ```
///
/// [`AtomicBool`]: https://doc.rust-lang.org/std/sync/atomic/struct.AtomicBool.html
#[inline]
pub fn snooze(&self) {
if self.step.get() <= SPIN_LIMIT {
for _ in 0..1 << self.step.get() {
atomic::spin_loop_hint();
}
} else {
#[cfg(not(feature = "std"))]
for _ in 0..1 << self.step.get() {
atomic::spin_loop_hint();
}
#[cfg(feature = "std")]
::std::thread::yield_now();
}
if self.step.get() <= YIELD_LIMIT {
self.step.set(self.step.get() + 1);
}
}
/// Returns `true` if exponential backoff has completed and blocking the thread is advised.
///
/// # Examples
///
/// Waiting for an [`AtomicBool`] to become `true` and parking the thread after a long wait:
///
/// ```
/// use crossbeam_utils::Backoff;
/// use std::sync::Arc;
/// use std::sync::atomic::AtomicBool;
/// use std::sync::atomic::Ordering::SeqCst;
/// use std::thread;
/// use std::time::Duration;
///
/// fn blocking_wait(ready: &AtomicBool) {
/// let backoff = Backoff::new();
/// while !ready.load(SeqCst) {
/// if backoff.is_completed() {
/// thread::park();
/// } else {
/// backoff.snooze();
/// }
/// }
/// }
///
/// let ready = Arc::new(AtomicBool::new(false));
/// let ready2 = ready.clone();
/// let waiter = thread::current();
///
/// thread::spawn(move || {
/// thread::sleep(Duration::from_millis(100));
/// ready2.store(true, SeqCst);
/// waiter.unpark();
/// });
///
/// assert_eq!(ready.load(SeqCst), false);
/// blocking_wait(&ready);
/// assert_eq!(ready.load(SeqCst), true);
/// ```
///
/// [`AtomicBool`]: https://doc.rust-lang.org/std/sync/atomic/struct.AtomicBool.html
#[inline]
pub fn is_completed(&self) -> bool {
self.step.get() > YIELD_LIMIT
}
#[inline]
#[doc(hidden)]
#[deprecated(note = "use `is_completed` instead")]
pub fn is_complete(&self) -> bool {
self.is_completed()
}
}
impl fmt::Debug for Backoff {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Backoff")
.field("step", &self.step)
.field("is_completed", &self.is_completed())
.finish()
}
}
impl Default for Backoff {
fn default() -> Backoff {
Backoff::new()
}
}

35
third_party/rust/crossbeam-utils/src/lib.rs поставляемый
Просмотреть файл

@ -1,11 +1,36 @@
//! Utilities for concurrent programming.
//! Miscellaneous tools for concurrent programming.
//!
//! ## Atomics
//!
//! * [`AtomicCell`], a thread-safe mutable memory location.
//! * [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering.
//!
//! ## Thread synchronization
//!
//! * [`Parker`], a thread parking primitive.
//! * [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads.
//! * [`WaitGroup`], for synchronizing the beginning or end of some computation.
//!
//! ## Utilities
//!
//! * [`Backoff`], for exponential backoff in spin loops.
//! * [`CachePadded`], for padding and aligning a value to the length of a cache line.
//! * [`scope`], for spawning threads that borrow local variables from the stack.
//!
//! [`AtomicCell`]: atomic/struct.AtomicCell.html
//! [`AtomicConsume`]: atomic/trait.AtomicConsume.html
//! [`Parker`]: sync/struct.Parker.html
//! [`ShardedLock`]: sync/struct.ShardedLock.html
//! [`WaitGroup`]: sync/struct.WaitGroup.html
//! [`Backoff`]: struct.Backoff.html
//! [`CachePadded`]: struct.CachePadded.html
//! [`scope`]: thread/fn.scope.html
#![warn(missing_docs)]
#![warn(missing_debug_implementations)]
#![cfg_attr(not(feature = "std"), no_std)]
#![cfg_attr(feature = "nightly", feature(alloc))]
#![cfg_attr(feature = "nightly", feature(cfg_target_has_atomic))]
#![cfg_attr(feature = "nightly", feature(integer_atomics))]
#[macro_use]
extern crate cfg_if;
@ -28,8 +53,14 @@ pub mod atomic;
mod cache_padded;
pub use cache_padded::CachePadded;
mod backoff;
pub use backoff::Backoff;
cfg_if! {
if #[cfg(feature = "std")] {
#[macro_use]
extern crate lazy_static;
pub mod sync;
pub mod thread;
}

Просмотреть файл

@ -1,5 +1,17 @@
//! Synchronization tools.
//! Thread synchronization primitives.
//!
//! * [`Parker`], a thread parking primitive.
//! * [`ShardedLock`], a sharded reader-writer lock with fast concurrent reads.
//! * [`WaitGroup`], for synchronizing the beginning or end of some computation.
//!
//! [`Parker`]: struct.Parker.html
//! [`ShardedLock`]: struct.ShardedLock.html
//! [`WaitGroup`]: struct.WaitGroup.html
mod parker;
mod sharded_lock;
mod wait_group;
pub use self::sharded_lock::{ShardedLock, ShardedLockReadGuard, ShardedLockWriteGuard};
pub use self::parker::{Parker, Unparker};
pub use self::wait_group::WaitGroup;

600
third_party/rust/crossbeam-utils/src/sync/sharded_lock.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,600 @@
use std::cell::UnsafeCell;
use std::collections::HashMap;
use std::fmt;
use std::marker::PhantomData;
use std::mem;
use std::ops::{Deref, DerefMut};
use std::panic::{RefUnwindSafe, UnwindSafe};
use std::sync::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
use std::sync::{LockResult, PoisonError, TryLockError, TryLockResult};
use std::thread::{self, ThreadId};
use CachePadded;
/// The number of shards per sharded lock. Must be a power of two.
const NUM_SHARDS: usize = 8;
/// A shard containing a single reader-writer lock.
struct Shard {
/// The inner reader-writer lock.
lock: RwLock<()>,
/// The write-guard keeping this shard locked.
///
/// Write operations will lock each shard and store the guard here. These guards get dropped at
/// the same time the big guard is dropped.
write_guard: UnsafeCell<Option<RwLockWriteGuard<'static, ()>>>,
}
/// A sharded reader-writer lock.
///
/// This lock is equivalent to [`RwLock`], except read operations are faster and write operations
/// are slower.
///
/// A `ShardedLock` is internally made of a list of *shards*, each being a [`RwLock`] occupying a
/// single cache line. Read operations will pick one of the shards depending on the current thread
/// and lock it. Write operations need to lock all shards in succession.
///
/// By splitting the lock into shards, concurrent read operations will in most cases choose
/// different shards and thus update different cache lines, which is good for scalability. However,
/// write operations need to do more work and are therefore slower than usual.
///
/// The priority policy of the lock is dependent on the underlying operating system's
/// implementation, and this type does not guarantee that any particular policy will be used.
///
/// # Poisoning
///
/// A `ShardedLock`, like [`RwLock`], will become poisoned on a panic. Note that it may only be
/// poisoned if a panic occurs while a write operation is in progress. If a panic occurs in any
/// read operation, the lock will not be poisoned.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::sync::ShardedLock;
///
/// let lock = ShardedLock::new(5);
///
/// // Any number of read locks can be held at once.
/// {
/// let r1 = lock.read().unwrap();
/// let r2 = lock.read().unwrap();
/// assert_eq!(*r1, 5);
/// assert_eq!(*r2, 5);
/// } // Read locks are dropped at this point.
///
/// // However, only one write lock may be held.
/// {
/// let mut w = lock.write().unwrap();
/// *w += 1;
/// assert_eq!(*w, 6);
/// } // Write lock is dropped here.
/// ```
///
/// [`RwLock`]: https://doc.rust-lang.org/std/sync/struct.RwLock.html
pub struct ShardedLock<T: ?Sized> {
/// A list of locks protecting the internal data.
shards: Box<[CachePadded<Shard>]>,
/// The internal data.
value: UnsafeCell<T>,
}
unsafe impl<T: ?Sized + Send> Send for ShardedLock<T> {}
unsafe impl<T: ?Sized + Send + Sync> Sync for ShardedLock<T> {}
impl<T: ?Sized> UnwindSafe for ShardedLock<T> {}
impl<T: ?Sized> RefUnwindSafe for ShardedLock<T> {}
impl<T> ShardedLock<T> {
/// Creates a new sharded reader-writer lock.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::sync::ShardedLock;
///
/// let lock = ShardedLock::new(5);
/// ```
pub fn new(value: T) -> ShardedLock<T> {
ShardedLock {
shards: (0..NUM_SHARDS)
.map(|_| CachePadded::new(Shard {
lock: RwLock::new(()),
write_guard: UnsafeCell::new(None),
}))
.collect::<Vec<_>>()
.into_boxed_slice(),
value: UnsafeCell::new(value),
}
}
/// Consumes this lock, returning the underlying data.
///
/// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
/// operation panics.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::sync::ShardedLock;
///
/// let lock = ShardedLock::new(String::new());
/// {
/// let mut s = lock.write().unwrap();
/// *s = "modified".to_owned();
/// }
/// assert_eq!(lock.into_inner().unwrap(), "modified");
/// ```
pub fn into_inner(self) -> LockResult<T> {
let is_poisoned = self.is_poisoned();
let inner = self.value.into_inner();
if is_poisoned {
Err(PoisonError::new(inner))
} else {
Ok(inner)
}
}
}
impl<T: ?Sized> ShardedLock<T> {
/// Returns `true` if the lock is poisoned.
///
/// If another thread can still access the lock, it may become poisoned at any time. A `false`
/// result should not be trusted without additional synchronization.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::sync::ShardedLock;
/// use std::sync::Arc;
/// use std::thread;
///
/// let lock = Arc::new(ShardedLock::new(0));
/// let c_lock = lock.clone();
///
/// let _ = thread::spawn(move || {
/// let _lock = c_lock.write().unwrap();
/// panic!(); // the lock gets poisoned
/// }).join();
/// assert_eq!(lock.is_poisoned(), true);
/// ```
pub fn is_poisoned(&self) -> bool {
self.shards[0].lock.is_poisoned()
}
/// Returns a mutable reference to the underlying data.
///
/// Since this call borrows the lock mutably, no actual locking needs to take place.
///
/// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
/// operation panics.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::sync::ShardedLock;
///
/// let mut lock = ShardedLock::new(0);
/// *lock.get_mut().unwrap() = 10;
/// assert_eq!(*lock.read().unwrap(), 10);
/// ```
pub fn get_mut(&mut self) -> LockResult<&mut T> {
let is_poisoned = self.is_poisoned();
let inner = unsafe { &mut *self.value.get() };
if is_poisoned {
Err(PoisonError::new(inner))
} else {
Ok(inner)
}
}
/// Attempts to acquire this lock with shared read access.
///
/// If the access could not be granted at this time, an error is returned. Otherwise, a guard
/// is returned which will release the shared access when it is dropped. This method does not
/// provide any guarantees with respect to the ordering of whether contentious readers or
/// writers will acquire the lock first.
///
/// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
/// operation panics.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::sync::ShardedLock;
///
/// let lock = ShardedLock::new(1);
///
/// match lock.try_read() {
/// Ok(n) => assert_eq!(*n, 1),
/// Err(_) => unreachable!(),
/// };
/// ```
pub fn try_read(&self) -> TryLockResult<ShardedLockReadGuard<T>> {
// Take the current thread index and map it to a shard index. Thread indices will tend to
// distribute shards among threads equally, thus reducing contention due to read-locking.
let current_index = current_index().unwrap_or(0);
let shard_index = current_index & (self.shards.len() - 1);
match self.shards[shard_index].lock.try_read() {
Ok(guard) => Ok(ShardedLockReadGuard {
lock: self,
_guard: guard,
_marker: PhantomData,
}),
Err(TryLockError::Poisoned(err)) => {
let guard = ShardedLockReadGuard {
lock: self,
_guard: err.into_inner(),
_marker: PhantomData,
};
Err(TryLockError::Poisoned(PoisonError::new(guard)))
},
Err(TryLockError::WouldBlock) => Err(TryLockError::WouldBlock),
}
}
/// Locks with shared read access, blocking the current thread until it can be acquired.
///
/// The calling thread will be blocked until there are no more writers which hold the lock.
/// There may be other readers currently inside the lock when this method returns. This method
/// does not provide any guarantees with respect to the ordering of whether contentious readers
/// or writers will acquire the lock first.
///
/// Returns a guard which will release the shared access when dropped.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::sync::ShardedLock;
/// use std::sync::Arc;
/// use std::thread;
///
/// let lock = Arc::new(ShardedLock::new(1));
/// let c_lock = lock.clone();
///
/// let n = lock.read().unwrap();
/// assert_eq!(*n, 1);
///
/// thread::spawn(move || {
/// let r = c_lock.read();
/// assert!(r.is_ok());
/// }).join().unwrap();
/// ```
pub fn read(&self) -> LockResult<ShardedLockReadGuard<T>> {
// Take the current thread index and map it to a shard index. Thread indices will tend to
// distribute shards among threads equally, thus reducing contention due to read-locking.
let current_index = current_index().unwrap_or(0);
let shard_index = current_index & (self.shards.len() - 1);
match self.shards[shard_index].lock.read() {
Ok(guard) => Ok(ShardedLockReadGuard {
lock: self,
_guard: guard,
_marker: PhantomData,
}),
Err(err) => Err(PoisonError::new(ShardedLockReadGuard {
lock: self,
_guard: err.into_inner(),
_marker: PhantomData,
})),
}
}
/// Attempts to acquire this lock with exclusive write access.
///
/// If the access could not be granted at this time, an error is returned. Otherwise, a guard
/// is returned which will release the exclusive access when it is dropped. This method does
/// not provide any guarantees with respect to the ordering of whether contentious readers or
/// writers will acquire the lock first.
///
/// This method will return an error if the lock is poisoned. A lock gets poisoned when a write
/// operation panics.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::sync::ShardedLock;
///
/// let lock = ShardedLock::new(1);
///
/// let n = lock.read().unwrap();
/// assert_eq!(*n, 1);
///
/// assert!(lock.try_write().is_err());
/// ```
pub fn try_write(&self) -> TryLockResult<ShardedLockWriteGuard<T>> {
let mut poisoned = false;
let mut blocked = None;
// Write-lock each shard in succession.
for (i, shard) in self.shards.iter().enumerate() {
let guard = match shard.lock.try_write() {
Ok(guard) => guard,
Err(TryLockError::Poisoned(err)) => {
poisoned = true;
err.into_inner()
},
Err(TryLockError::WouldBlock) => {
blocked = Some(i);
break;
}
};
// Store the guard into the shard.
unsafe {
let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard);
let dest: *mut _ = shard.write_guard.get();
*dest = Some(guard);
}
}
if let Some(i) = blocked {
// Unlock the shards in reverse order of locking.
for shard in self.shards[0..i].iter().rev() {
unsafe {
let dest: *mut _ = shard.write_guard.get();
let guard = mem::replace(&mut *dest, None);
drop(guard);
}
}
Err(TryLockError::WouldBlock)
} else if poisoned {
let guard = ShardedLockWriteGuard {
lock: self,
_marker: PhantomData,
};
Err(TryLockError::Poisoned(PoisonError::new(guard)))
} else {
Ok(ShardedLockWriteGuard {
lock: self,
_marker: PhantomData,
})
}
}
/// Locks with exclusive write access, blocking the current thread until it can be acquired.
///
/// The calling thread will be blocked until there are no more writers which hold the lock.
/// There may be other readers currently inside the lock when this method returns. This method
/// does not provide any guarantees with respect to the ordering of whether contentious readers
/// or writers will acquire the lock first.
///
/// Returns a guard which will release the exclusive access when dropped.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::sync::ShardedLock;
///
/// let lock = ShardedLock::new(1);
///
/// let mut n = lock.write().unwrap();
/// *n = 2;
///
/// assert!(lock.try_read().is_err());
/// ```
pub fn write(&self) -> LockResult<ShardedLockWriteGuard<T>> {
let mut poisoned = false;
// Write-lock each shard in succession.
for shard in self.shards.iter() {
let guard = match shard.lock.write() {
Ok(guard) => guard,
Err(err) => {
poisoned = true;
err.into_inner()
}
};
// Store the guard into the shard.
unsafe {
let guard: RwLockWriteGuard<'_, ()> = guard;
let guard: RwLockWriteGuard<'static, ()> = mem::transmute(guard);
let dest: *mut _ = shard.write_guard.get();
*dest = Some(guard);
}
}
if poisoned {
Err(PoisonError::new(ShardedLockWriteGuard {
lock: self,
_marker: PhantomData,
}))
} else {
Ok(ShardedLockWriteGuard {
lock: self,
_marker: PhantomData,
})
}
}
}
impl<T: ?Sized + fmt::Debug> fmt::Debug for ShardedLock<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.try_read() {
Ok(guard) => f.debug_struct("ShardedLock").field("data", &&*guard).finish(),
Err(TryLockError::Poisoned(err)) => {
f.debug_struct("ShardedLock").field("data", &&**err.get_ref()).finish()
},
Err(TryLockError::WouldBlock) => {
struct LockedPlaceholder;
impl fmt::Debug for LockedPlaceholder {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("<locked>")
}
}
f.debug_struct("ShardedLock").field("data", &LockedPlaceholder).finish()
}
}
}
}
impl<T: Default> Default for ShardedLock<T> {
fn default() -> ShardedLock<T> {
ShardedLock::new(Default::default())
}
}
impl<T> From<T> for ShardedLock<T> {
fn from(t: T) -> Self {
ShardedLock::new(t)
}
}
/// A guard used to release the shared read access of a [`ShardedLock`] when dropped.
///
/// [`ShardedLock`]: struct.ShardedLock.html
pub struct ShardedLockReadGuard<'a, T: ?Sized + 'a> {
lock: &'a ShardedLock<T>,
_guard: RwLockReadGuard<'a, ()>,
_marker: PhantomData<RwLockReadGuard<'a, T>>,
}
unsafe impl<'a, T: ?Sized + Sync> Sync for ShardedLockReadGuard<'a, T> {}
impl<'a, T: ?Sized> Deref for ShardedLockReadGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.lock.value.get() }
}
}
impl<'a, T: fmt::Debug> fmt::Debug for ShardedLockReadGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ShardedLockReadGuard")
.field("lock", &self.lock)
.finish()
}
}
impl<'a, T: ?Sized + fmt::Display> fmt::Display for ShardedLockReadGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
(**self).fmt(f)
}
}
/// A guard used to release the exclusive write access of a [`ShardedLock`] when dropped.
///
/// [`ShardedLock`]: struct.ShardedLock.html
pub struct ShardedLockWriteGuard<'a, T: ?Sized + 'a> {
lock: &'a ShardedLock<T>,
_marker: PhantomData<RwLockWriteGuard<'a, T>>,
}
unsafe impl<'a, T: ?Sized + Sync> Sync for ShardedLockWriteGuard<'a, T> {}
impl<'a, T: ?Sized> Drop for ShardedLockWriteGuard<'a, T> {
fn drop(&mut self) {
// Unlock the shards in reverse order of locking.
for shard in self.lock.shards.iter().rev() {
unsafe {
let dest: *mut _ = shard.write_guard.get();
let guard = mem::replace(&mut *dest, None);
drop(guard);
}
}
}
}
impl<'a, T: fmt::Debug> fmt::Debug for ShardedLockWriteGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ShardedLockWriteGuard")
.field("lock", &self.lock)
.finish()
}
}
impl<'a, T: ?Sized + fmt::Display> fmt::Display for ShardedLockWriteGuard<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
(**self).fmt(f)
}
}
impl<'a, T: ?Sized> Deref for ShardedLockWriteGuard<'a, T> {
type Target = T;
fn deref(&self) -> &T {
unsafe { &*self.lock.value.get() }
}
}
impl<'a, T: ?Sized> DerefMut for ShardedLockWriteGuard<'a, T> {
fn deref_mut(&mut self) -> &mut T {
unsafe { &mut *self.lock.value.get() }
}
}
/// Returns a `usize` that identifies the current thread.
///
/// Each thread is associated with an 'index'. While there are no particular guarantees, indices
/// usually tend to be consecutive numbers between 0 and the number of running threads.
///
/// Since this function accesses TLS, `None` might be returned if the current thread's TLS is
/// tearing down.
#[inline]
fn current_index() -> Option<usize> {
REGISTRATION.try_with(|reg| reg.index).ok()
}
/// The global registry keeping track of registered threads and indices.
struct ThreadIndices {
/// Mapping from `ThreadId` to thread index.
mapping: HashMap<ThreadId, usize>,
/// A list of free indices.
free_list: Vec<usize>,
/// The next index to allocate if the free list is empty.
next_index: usize,
}
lazy_static! {
static ref THREAD_INDICES: Mutex<ThreadIndices> = Mutex::new(ThreadIndices {
mapping: HashMap::new(),
free_list: Vec::new(),
next_index: 0,
});
}
/// A registration of a thread with an index.
///
/// When dropped, unregisters the thread and frees the reserved index.
struct Registration {
index: usize,
thread_id: ThreadId,
}
impl Drop for Registration {
fn drop(&mut self) {
let mut indices = THREAD_INDICES.lock().unwrap();
indices.mapping.remove(&self.thread_id);
indices.free_list.push(self.index);
}
}
thread_local! {
static REGISTRATION: Registration = {
let thread_id = thread::current().id();
let mut indices = THREAD_INDICES.lock().unwrap();
let index = match indices.free_list.pop() {
Some(i) => i,
None => {
let i = indices.next_index;
indices.next_index += 1;
i
}
};
indices.mapping.insert(thread_id, index);
Registration {
index,
thread_id,
}
};
}

139
third_party/rust/crossbeam-utils/src/sync/wait_group.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,139 @@
use std::fmt;
use std::sync::{Arc, Condvar, Mutex};
/// Enables threads to synchronize the beginning or end of some computation.
///
/// # Wait groups vs barriers
///
/// `WaitGroup` is very similar to [`Barrier`], but there are a few differences:
///
/// * `Barrier` needs to know the number of threads at construction, while `WaitGroup` is cloned to
/// register more threads.
///
/// * A `Barrier` can be reused even after all threads have synchronized, while a `WaitGroup`
/// synchronizes threads only once.
///
/// * All threads wait for others to reach the `Barrier`. With `WaitGroup`, each thread can choose
/// to either wait for other threads or to continue without blocking.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::sync::WaitGroup;
/// use std::thread;
///
/// // Create a new wait group.
/// let wg = WaitGroup::new();
///
/// for _ in 0..4 {
/// // Create another reference to the wait group.
/// let wg = wg.clone();
///
/// thread::spawn(move || {
/// // Do some work.
///
/// // Drop the reference to the wait group.
/// drop(wg);
/// });
/// }
///
/// // Block until all threads have finished their work.
/// wg.wait();
/// ```
///
/// [`Barrier`]: https://doc.rust-lang.org/std/sync/struct.Barrier.html
pub struct WaitGroup {
inner: Arc<Inner>,
}
/// Inner state of a `WaitGroup`.
struct Inner {
cvar: Condvar,
count: Mutex<usize>,
}
impl WaitGroup {
/// Creates a new wait group and returns the single reference to it.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::sync::WaitGroup;
///
/// let wg = WaitGroup::new();
/// ```
pub fn new() -> WaitGroup {
WaitGroup {
inner: Arc::new(Inner {
cvar: Condvar::new(),
count: Mutex::new(1),
}),
}
}
/// Drops this reference and waits until all other references are dropped.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::sync::WaitGroup;
/// use std::thread;
///
/// let wg = WaitGroup::new();
///
/// thread::spawn({
/// let wg = wg.clone();
/// move || {
/// // Block until both threads have reached `wait()`.
/// wg.wait();
/// }
/// });
///
/// // Block until both threads have reached `wait()`.
/// wg.wait();
/// ```
pub fn wait(self) {
if *self.inner.count.lock().unwrap() == 1 {
return;
}
let inner = self.inner.clone();
drop(self);
let mut count = inner.count.lock().unwrap();
while *count > 0 {
count = inner.cvar.wait(count).unwrap();
}
}
}
impl Drop for WaitGroup {
fn drop(&mut self) {
let mut count = self.inner.count.lock().unwrap();
*count -= 1;
if *count == 0 {
self.inner.cvar.notify_all();
}
}
}
impl Clone for WaitGroup {
fn clone(&self) -> WaitGroup {
let mut count = self.inner.count.lock().unwrap();
*count += 1;
WaitGroup {
inner: self.inner.clone(),
}
}
}
impl fmt::Debug for WaitGroup {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let count: &usize = &*self.inner.count.lock().unwrap();
f.debug_struct("WaitGroup")
.field("count", count)
.finish()
}
}

266
third_party/rust/crossbeam-utils/src/thread.rs поставляемый
Просмотреть файл

@ -11,9 +11,9 @@
//! "Carol".to_string(),
//! ];
//!
//! thread::scope(|scope| {
//! thread::scope(|s| {
//! for person in &people {
//! scope.spawn(move |_| {
//! s.spawn(move |_| {
//! println!("Hello, {}!", person);
//! });
//! }
@ -74,10 +74,43 @@
//! `'static` lifetime because the borrow checker cannot be sure when the thread will complete.
//!
//! A scope creates a clear boundary between variables outside the scope and threads inside the
//! scope. Whenever a scope spawns a thread, it promises to join the thread before the scope ends.
//! scope. Whenever a s.spawns a thread, it promises to join the thread before the scope ends.
//! This way we guarantee to the borrow checker that scoped threads only live within the scope and
//! can safely access variables outside it.
//!
//! # Nesting scoped threads
//!
//! Sometimes scoped threads need to spawn more threads within the same scope. This is a little
//! tricky because argument `s` lives *inside* the invocation of `thread::scope()` and as such
//! cannot be borrowed by scoped threads:
//!
//! ```ignore
//! use crossbeam_utils::thread;
//!
//! thread::scope(|s| {
//! s.spawn(|_| {
//! // Not going to compile because we're trying to borrow `s`,
//! // which lives *inside* the scope! :(
//! s.spawn(|_| println!("nested thread"));
//! }});
//! });
//! ```
//!
//! Fortunately, there is a solution. Every scoped thread is passed a reference to its scope as an
//! argument, which can be used for spawning nested threads:
//!
//! ```ignore
//! use crossbeam_utils::thread;
//!
//! thread::scope(|s| {
//! // Note the `|s|` here.
//! s.spawn(|s| {
//! // Yay, this works because we're using a fresh argument `s`! :)
//! s.spawn(|_| println!("nested thread"));
//! }});
//! });
//! ```
//!
//! [`std::thread::spawn`]: https://doc.rust-lang.org/std/thread/fn.spawn.html
use std::fmt;
@ -85,39 +118,42 @@ use std::io;
use std::marker::PhantomData;
use std::mem;
use std::panic;
use std::sync::{Arc, Mutex, mpsc};
use std::sync::{Arc, Mutex};
use std::thread;
use sync::WaitGroup;
type SharedVec<T> = Arc<Mutex<Vec<T>>>;
type SharedOption<T> = Arc<Mutex<Option<T>>>;
/// Creates a new `Scope` for [*scoped thread spawning*](struct.Scope.html#method.spawn).
/// Creates a new scope for spawning threads.
///
/// No matter what happens, before the `Scope` is dropped, it is guaranteed that all the unjoined
/// spawned scoped threads are joined.
///
/// `thread::scope()` returns `Ok(())` if all the unjoined spawned threads did not panic. It returns
/// `Err(e)` if one of them panics with `e`. If many of them panic, it is still guaranteed that all
/// the threads are joined, and `thread::scope()` returns `Err(e)` with `e` from a panicking thread.
/// All child threads that haven't been manually joined will be automatically joined just before
/// this function invocation ends. If all joined threads have successfully completed, `Ok` is
/// returned with the return value of `f`. If any of the joined threads has panicked, an `Err` is
/// returned containing errors from panicked threads.
///
/// # Examples
///
/// Creating and using a scope:
///
/// ```
/// crossbeam_utils::thread::scope(|scope| {
/// scope.spawn(|_| println!("Exiting scope"));
/// scope.spawn(|_| println!("Running child thread in scope"));
/// use crossbeam_utils::thread;
///
/// let var = vec![1, 2, 3];
///
/// thread::scope(|s| {
/// s.spawn(|_| {
/// println!("A child thread borrowing `var`: {:?}", var);
/// });
/// }).unwrap();
/// ```
pub fn scope<'env, F, R>(f: F) -> thread::Result<R>
where
F: FnOnce(&Scope<'env>) -> R,
{
let (tx, rx) = mpsc::channel();
let wg = WaitGroup::new();
let scope = Scope::<'env> {
handles: SharedVec::default(),
chan: tx,
wait_group: wg.clone(),
_marker: PhantomData,
};
@ -125,8 +161,8 @@ where
let result = panic::catch_unwind(panic::AssertUnwindSafe(|| f(&scope)));
// Wait until all nested scopes are dropped.
drop(scope.chan);
let _ = rx.recv();
drop(scope.wait_group);
wg.wait();
// Join all remaining spawned threads.
let panics: Vec<_> = {
@ -163,7 +199,7 @@ pub struct Scope<'env> {
handles: SharedVec<SharedOption<thread::JoinHandle<()>>>,
/// Used to wait until all subscopes all dropped.
chan: mpsc::Sender<()>,
wait_group: WaitGroup,
/// Borrows data with invariant lifetime `'env`.
_marker: PhantomData<&'env mut &'env ()>,
@ -172,14 +208,35 @@ pub struct Scope<'env> {
unsafe impl<'env> Sync for Scope<'env> {}
impl<'env> Scope<'env> {
/// Create a scoped thread.
/// Spawns a scoped thread.
///
/// `spawn` is similar to the [`spawn`] function in Rust's standard library. The difference is
/// that this thread is scoped, meaning that it's guaranteed to terminate before the current
/// stack frame goes away, allowing you to reference the parent stack frame directly. This is
/// ensured by having the parent thread join on the child thread before the scope exits.
/// This method is similar to the [`spawn`] function in Rust's standard library. The difference
/// is that this thread is scoped, meaning it's guaranteed to terminate before the scope exits,
/// allowing it to reference variables outside the scope.
///
/// The scoped thread is passed a reference to this scope as an argument, which can be used for
/// spawning nested threads.
///
/// The returned handle can be used to manually join the thread before the scope exits.
///
/// [`spawn`]: https://doc.rust-lang.org/std/thread/fn.spawn.html
///
/// # Examples
///
/// ```
/// use crossbeam_utils::thread;
///
/// thread::scope(|s| {
/// let handle = s.spawn(|_| {
/// println!("A child thread is running");
/// 42
/// });
///
/// // Join the thread and retrieve its result.
/// let res = handle.join().unwrap();
/// assert_eq!(res, 42);
/// }).unwrap();
/// ```
pub fn spawn<'scope, F, T>(&'scope self, f: F) -> ScopedJoinHandle<'scope, T>
where
F: FnOnce(&Scope<'env>) -> T,
@ -189,8 +246,20 @@ impl<'env> Scope<'env> {
self.builder().spawn(f).unwrap()
}
/// Generates the base configuration for spawning a scoped thread, from which configuration
/// methods can be chained.
/// Creates a builder that can configure a thread before spawning.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::thread;
/// use std::thread::current;
///
/// thread::scope(|s| {
/// s.builder()
/// .spawn(|_| println!("A child thread is running"))
/// .unwrap();
/// }).unwrap();
/// ```
pub fn builder<'scope>(&'scope self) -> ScopedThreadBuilder<'scope, 'env> {
ScopedThreadBuilder {
scope: self,
@ -205,8 +274,39 @@ impl<'env> fmt::Debug for Scope<'env> {
}
}
/// Scoped thread configuration. Provides detailed control over the properties and behavior of new
/// scoped threads.
/// Configures the properties of a new thread.
///
/// The two configurable properties are:
///
/// - [`name`]: Specifies an [associated name for the thread][naming-threads].
/// - [`stack_size`]: Specifies the [desired stack size for the thread][stack-size].
///
/// The [`spawn`] method will take ownership of the builder and return an [`io::Result`] of the
/// thread handle with the given configuration.
///
/// The [`Scope::spawn`] method uses a builder with default configuration and unwraps its return
/// value. You may want to use this builder when you want to recover from a failure to launch a
/// thread.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::thread;
///
/// thread::scope(|s| {
/// s.builder()
/// .spawn(|_| println!("Running a child thread"))
/// .unwrap();
/// }).unwrap();
/// ```
///
/// [`name`]: struct.ScopedThreadBuilder.html#method.name
/// [`stack_size`]: struct.ScopedThreadBuilder.html#method.stack_size
/// [`spawn`]: struct.ScopedThreadBuilder.html#method.spawn
/// [`Scope::spawn`]: struct.Scope.html#method.spawn
/// [`io::Result`]: https://doc.rust-lang.org/std/io/type.Result.html
/// [naming-threads]: https://doc.rust-lang.org/std/thread/index.html#naming-threads
/// [stack-size]: https://doc.rust-lang.org/std/thread/index.html#stack-size
#[derive(Debug)]
pub struct ScopedThreadBuilder<'scope, 'env: 'scope> {
scope: &'scope Scope<'env>,
@ -214,20 +314,77 @@ pub struct ScopedThreadBuilder<'scope, 'env: 'scope> {
}
impl<'scope, 'env> ScopedThreadBuilder<'scope, 'env> {
/// Names the thread-to-be. Currently the name is used for identification only in panic
/// messages.
/// Sets the name for the new thread.
///
/// The name must not contain null bytes. For more information about named threads, see
/// [here][naming-threads].
///
/// # Examples
///
/// ```
/// use crossbeam_utils::thread;
/// use std::thread::current;
///
/// thread::scope(|s| {
/// s.builder()
/// .name("my thread".to_string())
/// .spawn(|_| assert_eq!(current().name(), Some("my thread")))
/// .unwrap();
/// }).unwrap();
/// ```
///
/// [naming-threads]: https://doc.rust-lang.org/std/thread/index.html#naming-threads
pub fn name(mut self, name: String) -> ScopedThreadBuilder<'scope, 'env> {
self.builder = self.builder.name(name);
self
}
/// Sets the size of the stack for the new thread.
///
/// The stack size is measured in bytes.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::thread;
///
/// thread::scope(|s| {
/// s.builder()
/// .stack_size(32 * 1024)
/// .spawn(|_| println!("Running a child thread"))
/// .unwrap();
/// }).unwrap();
/// ```
pub fn stack_size(mut self, size: usize) -> ScopedThreadBuilder<'scope, 'env> {
self.builder = self.builder.stack_size(size);
self
}
/// Spawns a new thread, and returns a join handle for it.
/// Spawns a scoped thread with this configuration.
///
/// The scoped thread is passed a reference to this scope as an argument, which can be used for
/// spawning nested threads.
///
/// The returned handle can be used to manually join the thread before the scope exits.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::thread;
///
/// thread::scope(|s| {
/// let handle = s.builder()
/// .spawn(|_| {
/// println!("A child thread is running");
/// 42
/// })
/// .unwrap();
///
/// // Join the thread and retrieve its result.
/// let res = handle.join().unwrap();
/// assert_eq!(res, 42);
/// }).unwrap();
/// ```
pub fn spawn<F, T>(self, f: F) -> io::Result<ScopedJoinHandle<'scope, T>>
where
F: FnOnce(&Scope<'env>) -> T,
@ -244,7 +401,7 @@ impl<'scope, 'env> ScopedThreadBuilder<'scope, 'env> {
// A clone of the scope that will be moved into the new thread.
let scope = Scope::<'env> {
handles: Arc::clone(&self.scope.handles),
chan: self.scope.chan.clone(),
wait_group: self.scope.wait_group.clone(),
_marker: PhantomData,
};
@ -294,7 +451,7 @@ impl<'scope, 'env> ScopedThreadBuilder<'scope, 'env> {
unsafe impl<'scope, T> Send for ScopedJoinHandle<'scope, T> {}
unsafe impl<'scope, T> Sync for ScopedJoinHandle<'scope, T> {}
/// A handle to a scoped thread
/// A handle that can be used to join its scoped thread.
pub struct ScopedJoinHandle<'scope, T> {
/// A join handle to the spawned thread.
handle: SharedOption<thread::JoinHandle<()>>,
@ -310,17 +467,33 @@ pub struct ScopedJoinHandle<'scope, T> {
}
impl<'scope, T> ScopedJoinHandle<'scope, T> {
/// Waits for the associated thread to finish.
/// Waits for the thread to finish and returns its result.
///
/// If the child thread panics, [`Err`] is returned with the parameter given to [`panic`].
///
/// [`Err`]: https://doc.rust-lang.org/std/result/enum.Result.html#variant.Err
/// [`panic`]: https://doc.rust-lang.org/std/macro.panic.html
/// If the child thread panics, an error is returned.
///
/// # Panics
///
/// This function may panic on some platforms if a thread attempts to join itself or otherwise
/// may create a deadlock with joining threads.
///
/// # Examples
///
/// ```
/// use crossbeam_utils::thread;
///
/// thread::scope(|s| {
/// let handle1 = s.spawn(|_| println!("I'm a happy thread :)"));
/// let handle2 = s.spawn(|_| panic!("I'm a sad thread :("));
///
/// // Join the first thread and verify that it succeeded.
/// let res = handle1.join();
/// assert!(res.is_ok());
///
/// // Join the second thread and verify that it panicked.
/// let res = handle2.join();
/// assert!(res.is_err());
/// }).unwrap();
/// ```
pub fn join(self) -> thread::Result<T> {
// Take out the handle. The handle will surely be available because the root scope waits
// for nested scopes before joining remaining threads.
@ -332,9 +505,18 @@ impl<'scope, T> ScopedJoinHandle<'scope, T> {
.map(|()| self.result.lock().unwrap().take().unwrap())
}
/// Gets the underlying [`std::thread::Thread`] handle.
/// Returns a handle to the underlying thread.
///
/// [`std::thread::Thread`]: https://doc.rust-lang.org/std/thread/struct.Thread.html
/// # Examples
///
/// ```
/// use crossbeam_utils::thread;
///
/// thread::scope(|s| {
/// let handle = s.spawn(|_| println!("A child thread is running"));
/// println!("The child thread ID: {:?}", handle.thread().id());
/// }).unwrap();
/// ```
pub fn thread(&self) -> &thread::Thread {
&self.thread
}

245
third_party/rust/crossbeam-utils/tests/sharded_lock.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,245 @@
extern crate crossbeam_utils;
extern crate rand;
use std::sync::mpsc::channel;
use std::thread;
use std::sync::{Arc, TryLockError};
use std::sync::atomic::{AtomicUsize, Ordering};
use crossbeam_utils::sync::ShardedLock;
use rand::Rng;
#[derive(Eq, PartialEq, Debug)]
struct NonCopy(i32);
#[test]
fn smoke() {
let l = ShardedLock::new(());
drop(l.read().unwrap());
drop(l.write().unwrap());
drop((l.read().unwrap(), l.read().unwrap()));
drop(l.write().unwrap());
}
#[test]
fn frob() {
const N: u32 = 10;
const M: usize = 1000;
let r = Arc::new(ShardedLock::new(()));
let (tx, rx) = channel::<()>();
for _ in 0..N {
let tx = tx.clone();
let r = r.clone();
thread::spawn(move || {
let mut rng = rand::thread_rng();
for _ in 0..M {
if rng.gen_bool(1.0 / (N as f64)) {
drop(r.write().unwrap());
} else {
drop(r.read().unwrap());
}
}
drop(tx);
});
}
drop(tx);
let _ = rx.recv();
}
#[test]
fn arc_poison_wr() {
let arc = Arc::new(ShardedLock::new(1));
let arc2 = arc.clone();
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.write().unwrap();
panic!();
}).join();
assert!(arc.read().is_err());
}
#[test]
fn arc_poison_ww() {
let arc = Arc::new(ShardedLock::new(1));
assert!(!arc.is_poisoned());
let arc2 = arc.clone();
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.write().unwrap();
panic!();
}).join();
assert!(arc.write().is_err());
assert!(arc.is_poisoned());
}
#[test]
fn arc_no_poison_rr() {
let arc = Arc::new(ShardedLock::new(1));
let arc2 = arc.clone();
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.read().unwrap();
panic!();
}).join();
let lock = arc.read().unwrap();
assert_eq!(*lock, 1);
}
#[test]
fn arc_no_poison_sl() {
let arc = Arc::new(ShardedLock::new(1));
let arc2 = arc.clone();
let _: Result<(), _> = thread::spawn(move || {
let _lock = arc2.read().unwrap();
panic!()
}).join();
let lock = arc.write().unwrap();
assert_eq!(*lock, 1);
}
#[test]
fn arc() {
let arc = Arc::new(ShardedLock::new(0));
let arc2 = arc.clone();
let (tx, rx) = channel();
thread::spawn(move || {
let mut lock = arc2.write().unwrap();
for _ in 0..10 {
let tmp = *lock;
*lock = -1;
thread::yield_now();
*lock = tmp + 1;
}
tx.send(()).unwrap();
});
// Readers try to catch the writer in the act
let mut children = Vec::new();
for _ in 0..5 {
let arc3 = arc.clone();
children.push(thread::spawn(move || {
let lock = arc3.read().unwrap();
assert!(*lock >= 0);
}));
}
// Wait for children to pass their asserts
for r in children {
assert!(r.join().is_ok());
}
// Wait for writer to finish
rx.recv().unwrap();
let lock = arc.read().unwrap();
assert_eq!(*lock, 10);
}
#[test]
fn arc_access_in_unwind() {
let arc = Arc::new(ShardedLock::new(1));
let arc2 = arc.clone();
let _ = thread::spawn(move || -> () {
struct Unwinder {
i: Arc<ShardedLock<isize>>,
}
impl Drop for Unwinder {
fn drop(&mut self) {
let mut lock = self.i.write().unwrap();
*lock += 1;
}
}
let _u = Unwinder { i: arc2 };
panic!();
}).join();
let lock = arc.read().unwrap();
assert_eq!(*lock, 2);
}
#[test]
fn unsized_type() {
let sl: &ShardedLock<[i32]> = &ShardedLock::new([1, 2, 3]);
{
let b = &mut *sl.write().unwrap();
b[0] = 4;
b[2] = 5;
}
let comp: &[i32] = &[4, 2, 5];
assert_eq!(&*sl.read().unwrap(), comp);
}
#[test]
fn try_write() {
let lock = ShardedLock::new(0isize);
let read_guard = lock.read().unwrap();
let write_result = lock.try_write();
match write_result {
Err(TryLockError::WouldBlock) => (),
Ok(_) => assert!(false, "try_write should not succeed while read_guard is in scope"),
Err(_) => assert!(false, "unexpected error"),
}
drop(read_guard);
}
#[test]
fn test_into_inner() {
let m = ShardedLock::new(NonCopy(10));
assert_eq!(m.into_inner().unwrap(), NonCopy(10));
}
#[test]
fn test_into_inner_drop() {
struct Foo(Arc<AtomicUsize>);
impl Drop for Foo {
fn drop(&mut self) {
self.0.fetch_add(1, Ordering::SeqCst);
}
}
let num_drops = Arc::new(AtomicUsize::new(0));
let m = ShardedLock::new(Foo(num_drops.clone()));
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
{
let _inner = m.into_inner().unwrap();
assert_eq!(num_drops.load(Ordering::SeqCst), 0);
}
assert_eq!(num_drops.load(Ordering::SeqCst), 1);
}
#[test]
fn test_into_inner_poison() {
let m = Arc::new(ShardedLock::new(NonCopy(10)));
let m2 = m.clone();
let _ = thread::spawn(move || {
let _lock = m2.write().unwrap();
panic!("test panic in inner thread to poison ShardedLock");
}).join();
assert!(m.is_poisoned());
match Arc::try_unwrap(m).unwrap().into_inner() {
Err(e) => assert_eq!(e.into_inner(), NonCopy(10)),
Ok(x) => panic!("into_inner of poisoned ShardedLock is Ok: {:?}", x),
}
}
#[test]
fn test_get_mut() {
let mut m = ShardedLock::new(NonCopy(10));
*m.get_mut().unwrap() = NonCopy(20);
assert_eq!(m.into_inner().unwrap(), NonCopy(20));
}
#[test]
fn test_get_mut_poison() {
let m = Arc::new(ShardedLock::new(NonCopy(10)));
let m2 = m.clone();
let _ = thread::spawn(move || {
let _lock = m2.write().unwrap();
panic!("test panic in inner thread to poison ShardedLock");
}).join();
assert!(m.is_poisoned());
match Arc::try_unwrap(m).unwrap().get_mut() {
Err(e) => assert_eq!(*e.into_inner(), NonCopy(10)),
Ok(x) => panic!("get_mut of poisoned ShardedLock is Ok: {:?}", x),
}
}

66
third_party/rust/crossbeam-utils/tests/wait_group.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,66 @@
extern crate crossbeam_utils;
use std::sync::mpsc;
use std::thread;
use std::time::Duration;
use crossbeam_utils::sync::WaitGroup;
const THREADS: usize = 10;
#[test]
fn wait() {
let wg = WaitGroup::new();
let (tx, rx) = mpsc::channel();
for _ in 0..THREADS {
let wg = wg.clone();
let tx = tx.clone();
thread::spawn(move || {
wg.wait();
tx.send(()).unwrap();
});
}
thread::sleep(Duration::from_millis(100));
// At this point, all spawned threads should be blocked, so we shouldn't get anything from the
// channel.
assert!(rx.try_recv().is_err());
wg.wait();
// Now, the wait group is cleared and we should receive messages.
for _ in 0..THREADS {
rx.recv().unwrap();
}
}
#[test]
fn wait_and_drop() {
let wg = WaitGroup::new();
let (tx, rx) = mpsc::channel();
for _ in 0..THREADS {
let wg = wg.clone();
let tx = tx.clone();
thread::spawn(move || {
thread::sleep(Duration::from_millis(100));
tx.send(()).unwrap();
drop(wg);
});
}
// At this point, all spawned threads should be sleeping, so we shouldn't get anything from the
// channel.
assert!(rx.try_recv().is_err());
wg.wait();
// Now, the wait group is cleared and we should receive messages.
for _ in 0..THREADS {
rx.try_recv().unwrap();
}
}

1
third_party/rust/headers-core/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"Cargo.toml":"9f5a2dae8c85e4b6df7deba5841a7d8a159f3edbf2e578241168fd8a6b12f490","LICENSE":"ee0f7b3693a4878ac89b5c34674a3f1208dc6dd96e201e8c1f65f55873ec38d8","README.md":"957e16f30d33c262cdbc2eb7d13e6c11314f36ae0351935621a9ff0df078f005","src/lib.rs":"3ee38d6c82a1357be8add774892e4862582fa628950c0f43b7405d4e0866cfe7"},"package":"967131279aaa9f7c20c7205b45a391638a83ab118e6509b2d0ccbe08de044237"}

27
third_party/rust/headers-core/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,27 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "headers-core"
version = "0.1.1"
authors = ["Sean McArthur <sean@seanmonstar.com>"]
description = "typed HTTP headers core trait"
homepage = "https://hyper.rs"
readme = "README.md"
keywords = ["http", "headers", "hyper", "hyperium"]
license = "MIT"
repository = "https://github.com/hyperium/headers"
[dependencies.bytes]
version = "0.4"
[dependencies.http]
version = "0.1.15"

20
third_party/rust/headers-core/LICENSE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,20 @@
Copyright (c) 2014-2019 Sean McArthur
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

3
third_party/rust/headers-core/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,3 @@
# Typed HTTP Headers: core `Header` trait
WIP

69
third_party/rust/headers-core/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,69 @@
#![deny(missing_docs)]
#![deny(missing_debug_implementations)]
#![cfg_attr(test, deny(warnings))]
#![doc(html_root_url = "https://docs.rs/headers-core/0.1.1")]
//! # headers-core
//!
//! This is the core crate of the typed HTTP headers system, providing only
//! the relevant traits. All actual header implementations are in other crates.
extern crate bytes;
extern crate http;
pub use http::header::{self, HeaderName, HeaderValue};
use std::fmt::{self, Display, Formatter};
use std::error;
/// A trait for any object that will represent a header field and value.
///
/// This trait represents the construction and identification of headers,
/// and contains trait-object unsafe methods.
pub trait Header {
/// The name of this header.
fn name() -> &'static HeaderName;
/// Decode this type from an iterator of `HeaderValue`s.
fn decode<'i, I>(values: &mut I) -> Result<Self, Error>
where
Self: Sized,
I: Iterator<Item = &'i HeaderValue>;
/// Encode this type to a `HeaderMap`.
///
/// This function should be infallible. Any errors converting to a
/// `HeaderValue` should have been caught when parsing or constructing
/// this value.
fn encode<E: Extend<HeaderValue>>(&self, values: &mut E);
}
/// Errors trying to decode a header.
#[derive(Debug)]
pub struct Error {
kind: Kind,
}
#[derive(Debug)]
enum Kind {
Invalid,
}
impl Error {
/// Create an 'invalid' Error.
pub fn invalid() -> Error {
Error {
kind: Kind::Invalid,
}
}
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
match &self.kind {
Kind::Invalid => f.write_str("invalid HTTP header"),
}
}
}
impl error::Error for Error {}

1
third_party/rust/headers-derive/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"Cargo.toml":"bc1cd05985f5bb52b5814c54a342ebf71b6ec426992b36fc1ebf8818490d7d4e","LICENSE":"ee0f7b3693a4878ac89b5c34674a3f1208dc6dd96e201e8c1f65f55873ec38d8","README.md":"cc68b692e7270588fc0e758b58803bdec852add5100e16d2141184f508f16faf","src/lib.rs":"ecda704721fb2a53655d0ec6fa0a1e724fd65d493ec7bc15fce12bbfd3f5a781"},"package":"97c462e8066bca4f0968ddf8d12de64c40f2c2187b3b9a2fa994d06e8ad444a9"}

33
third_party/rust/headers-derive/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,33 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g. crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "headers-derive"
version = "0.1.0"
authors = ["Sean McArthur <sean@seanmonstar.com>"]
description = "derive(Header)"
homepage = "https://hyper.rs"
readme = "README.md"
license = "MIT"
repository = "https://github.com/hyperium/headers"
[lib]
name = "headers_derive"
proc-macro = true
[dependencies.proc-macro2]
version = "0.4"
[dependencies.quote]
version = "0.6"
[dependencies.syn]
version = "0.15"

20
third_party/rust/headers-derive/LICENSE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,20 @@
Copyright (c) 2014-2019 Sean McArthur
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

3
third_party/rust/headers-derive/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,3 @@
# Internal derive(Header) macro for `headers` crate
Doesn't work outside the `headers` crate, nothing to see here.

242
third_party/rust/headers-derive/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,242 @@
#![recursion_limit="128"]
extern crate proc_macro;
extern crate proc_macro2;
#[macro_use]
extern crate quote;
extern crate syn;
use proc_macro::TokenStream;
use proc_macro2::Span;
use syn::{Data, Fields, Ident, Lit, Meta, NestedMeta};
#[proc_macro_derive(Header, attributes(header))]
pub fn derive_header(input: TokenStream) -> TokenStream {
let ast = syn::parse(input).unwrap();
impl_header(&ast).into()
}
fn impl_header(ast: &syn::DeriveInput) -> proc_macro2::TokenStream {
let fns = match impl_fns(ast) {
Ok(fns) => fns,
Err(msg) => {
return quote! {
compile_error!(#msg);
}.into();
}
};
let decode = fns.decode;
let encode = fns.encode;
let ty = &ast.ident;
let hname = fns.name.unwrap_or_else(|| {
to_header_name(&ty.to_string())
});
let hname_ident = Ident::new(&hname, Span::call_site());
let dummy_const = Ident::new(&format!("_IMPL_HEADER_FOR_{}", hname), Span::call_site());
let impl_block = quote! {
impl __hc::Header for #ty {
fn name() -> &'static __hc::HeaderName {
&__hc::header::#hname_ident
}
fn decode<'i, I>(values: &mut I) -> Result<Self, __hc::Error>
where
I: Iterator<Item = &'i __hc::HeaderValue>,
{
#decode
}
fn encode<E: Extend<__hc::HeaderValue>>(&self, values: &mut E) {
#encode
}
}
};
quote! {
const #dummy_const: () = {
extern crate headers_core as __hc;
#impl_block
};
}
}
struct Fns {
encode: proc_macro2::TokenStream,
decode: proc_macro2::TokenStream,
name: Option<String>,
}
fn impl_fns(ast: &syn::DeriveInput) -> Result<Fns, String> {
let ty = &ast.ident;
// Only structs are allowed...
let st = match ast.data {
Data::Struct(ref st) => st,
_ => {
return Err("derive(Header) only works on structs".into())
}
};
// Check attributes for `#[header(...)]` that may influence the code
// that is generated...
//let mut is_csv = false;
let mut name = None;
for attr in &ast.attrs {
if attr.path.segments.len() != 1 {
continue;
}
if attr.path.segments[0].ident != "header" {
continue;
}
match attr.interpret_meta() {
Some(Meta::List(list)) => {
for meta in &list.nested {
match meta {
/*
To be conservative, this attribute is disabled...
NestedMeta::Meta(Meta::Word(ref word)) if word == "csv" => {
is_csv = true;
},
*/
NestedMeta::Meta(Meta::NameValue(ref kv)) if kv.ident == "name_const" => {
if name.is_some() {
return Err("repeated 'name_const' option in #[header] attribute".into());
}
name = match kv.lit {
Lit::Str(ref s) => Some(s.value()),
_ => {
return Err("illegal literal in #[header(name_const = ..)] attribute".into());
}
};
}
_ => {
return Err("illegal option in #[header(..)] attribute".into())
}
}
}
},
Some(Meta::NameValue(_)) => {
return Err("illegal #[header = ..] attribute".into())
},
Some(Meta::Word(_)) => {
return Err("empty #[header] attributes do nothing".into())
},
None => {
// TODO stringify attribute to return better error
return Err("illegal #[header ??] attribute".into())
}
}
}
let decode_res = quote! {
::util::TryFromValues::try_from_values(values)
};
let (decode, encode_name) = match st.fields {
Fields::Named(ref fields) => {
if fields.named.len() != 1 {
return Err("derive(Header) doesn't support multiple fields".into());
}
let field = fields
.named
.iter()
.next()
.expect("just checked for len() == 1");
let field_name = field.ident.as_ref().unwrap();
let decode = quote! {
#decode_res
.map(|inner| #ty {
#field_name: inner,
})
};
let encode_name = Ident::new(&field_name.to_string(), Span::call_site());
(decode, Value::Named(encode_name))
},
Fields::Unnamed(ref fields) => {
if fields.unnamed.len() != 1 {
return Err("derive(Header) doesn't support multiple fields".into());
}
let decode = quote! {
#decode_res
.map(#ty)
};
(decode, Value::Unnamed)
},
Fields::Unit => {
return Err("derive(Header) doesn't support unit structs".into())
}
};
// csv attr disabled for now
let encode = /*if is_csv {
let field = if let Value::Named(field) = encode_name {
quote! {
(&(self.0).#field)
}
} else {
quote! {
(&(self.0).0)
}
};
quote! {
struct __HeaderFmt<'hfmt>(&'hfmt #ty);
impl<'hfmt> ::std::fmt::Display for __HeaderFmt<'hfmt> {
fn fmt(&self, hfmt: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
__hc::encode::comma_delimited(hfmt, (#field).into_iter())
}
}
values.append_fmt(&__HeaderFmt(self));
}
} else*/ {
let field = if let Value::Named(field) = encode_name {
quote! {
(&self.#field)
}
} else {
quote! {
(&self.0)
}
};
quote! {
values.extend(::std::iter::once((#field).into()));
}
};
Ok(Fns {
decode,
encode,
name,
})
}
fn to_header_name(ty_name: &str) -> String {
let mut out = String::new();
let mut first = true;
for c in ty_name.chars() {
if first {
out.push(c.to_ascii_uppercase());
first = false;
} else {
if c.is_uppercase() {
out.push('_');
}
out.push(c.to_ascii_uppercase());
}
}
out
}
enum Value {
Named(Ident),
Unnamed,
}

1
third_party/rust/headers/.cargo-checksum.json поставляемый Normal file

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше