зеркало из https://github.com/mozilla/gecko-dev.git
No bug - Revendor rust dependencies
--HG-- rename : third_party/rust/rayon/tests/compile-fail-unstable/scope_join_bad.rs => third_party/rust/rayon/tests/compile-fail/scope_join_bad.rs rename : third_party/rust/rayon/tests/run-pass-unstable/scope_join.rs => third_party/rust/rayon/tests/run-pass/scope_join.rs
This commit is contained in:
Родитель
5248783910
Коммит
0822921f3b
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -0,0 +1,2 @@
|
|||
target
|
||||
Cargo.lock
|
|
@ -0,0 +1,37 @@
|
|||
language: rust
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- os: linux
|
||||
rust: 1.10.0
|
||||
script: cargo test
|
||||
rust:
|
||||
- stable
|
||||
- beta
|
||||
- nightly
|
||||
sudo: false
|
||||
before_script:
|
||||
- pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH
|
||||
script:
|
||||
- export CARGO_TARGET_DIR=`pwd`/target
|
||||
- cargo build
|
||||
- cargo build --no-default-features
|
||||
- cargo test
|
||||
- cargo test --no-default-features --features use_std
|
||||
- cargo test --manifest-path futures-cpupool/Cargo.toml
|
||||
- cargo test --manifest-path futures-cpupool/Cargo.toml --no-default-features
|
||||
|
||||
- cargo doc --no-deps
|
||||
- cargo doc --no-deps --manifest-path futures-cpupool/Cargo.toml
|
||||
after_success:
|
||||
- travis-cargo --only nightly doc-upload
|
||||
env:
|
||||
global:
|
||||
- secure: "iwVcMVIF7ZSY82fK5UyyUvVvJxMSYrbZawh1+4Oi8pvOdYq1gptcDoOC8jxWwCwrNF1b+/85n+jlEUngEqqSmV5PjAbWPjoc+u4Zn7CRi1AlxoUlvHPiQm4vM4Mkkd6GsqoIZttCeedU9m/w0nQ18uUtK8uD6vr2FVdcMnUnkYQAxuGOowGLrwidukzfBXMCu/JrwKMIbt61knAFiI/KJknu0h1mRrhpeF/sQ3tJFzRRcQeFJkbfwDzltMpPo1hq5D3HI4ONjYi/qO2pwUhDk4umfp9cLW9MS8rQvptxJTQmWemHi+f2/U4ld6a0URL6kEuMkt/EbH0A74eFtlicfRs44dX9MlWoqbLypnC3ymqmHcpwcwNA3HmZyg800MTuU+BPK41HIPdO9tPpxjHEiqvNDknH7qs+YBnis0eH7DHJgEjXq651PjW7pm+rnHPwsj+OzKE1YBNxBQZZDkS3VnZJz+O4tVsOzc3IOz0e+lf7VVuI17C9haj117nKp3umC4MVBA0S8RfreFgqpyDeY2zwcqOr0YOlEGGRl0vyWP8Qcxx12kQ7+doLolt6Kxda4uO0hKRmIF6+qki1T+L7v8BOGOtCncz4f7IX48eQ7+Wu0OtglRn45qAa3CxjUuW6xX3KSNH66PCXV0Jtp8Ga2SSevX2wtbbFu9f+9R+PQY4="
|
||||
|
||||
notifications:
|
||||
email:
|
||||
on_success: never
|
||||
os:
|
||||
- linux
|
||||
- osx
|
|
@ -0,0 +1,29 @@
|
|||
[package]
|
||||
name = "futures"
|
||||
version = "0.1.13"
|
||||
authors = ["Alex Crichton <alex@alexcrichton.com>"]
|
||||
license = "MIT/Apache-2.0"
|
||||
readme = "README.md"
|
||||
keywords = ["futures", "async", "future"]
|
||||
repository = "https://github.com/alexcrichton/futures-rs"
|
||||
homepage = "https://github.com/alexcrichton/futures-rs"
|
||||
documentation = "https://docs.rs/futures"
|
||||
description = """
|
||||
An implementation of futures and streams featuring zero allocations,
|
||||
composability, and iterator-like interfaces.
|
||||
"""
|
||||
categories = ["asynchronous"]
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "alexcrichton/futures-rs" }
|
||||
appveyor = { repository = "alexcrichton/futures-rs" }
|
||||
|
||||
[dependencies]
|
||||
|
||||
[features]
|
||||
use_std = []
|
||||
with-deprecated = []
|
||||
default = ["use_std", "with-deprecated"]
|
||||
|
||||
[workspace]
|
||||
members = ["futures-cpupool"]
|
|
@ -0,0 +1,99 @@
|
|||
# FAQ
|
||||
|
||||
A collection of some commonly asked questions, with responses! If you find any
|
||||
of these unsatisfactory feel free to ping me (@alexcrichton) on github,
|
||||
acrichto on IRC, or just by email!
|
||||
|
||||
### Why both `Item` and `Error` associated types?
|
||||
|
||||
An alternative design of the `Future` trait would be to only have one associated
|
||||
type, `Item`, and then most futures would resolve to `Result<T, E>`. The
|
||||
intention of futures, the fundamental support for async I/O, typically means
|
||||
that errors will be encoded in almost all futures anyway though. By encoding an
|
||||
error type in the future as well we're able to provide convenient combinators
|
||||
like `and_then` which automatically propagate errors, as well as combinators
|
||||
like `join` which can act differently depending on whether a future resolves to
|
||||
an error or not.
|
||||
|
||||
### Do futures work with multiple event loops?
|
||||
|
||||
Yes! Futures are designed to source events from any location, including multiple
|
||||
event loops. All of the basic combinators will work on any number of event loops
|
||||
across any number of threads.
|
||||
|
||||
### What if I have CPU intensive work?
|
||||
|
||||
The documentation of the `Future::poll` function says that's it's supposed to
|
||||
"return quickly", what if I have work that doesn't return quickly! In this case
|
||||
it's intended that this work will run on a dedicated pool of threads intended
|
||||
for this sort of work, and a future to the returned value is used to represent
|
||||
its completion.
|
||||
|
||||
A proof-of-concept method of doing this is the `futures-cpupool` crate in this
|
||||
repository, where you can execute work on a thread pool and receive a future to
|
||||
the value generated. This future is then composable with `and_then`, for
|
||||
example, to mesh in with the rest of a future's computation.
|
||||
|
||||
### How do I call `poll`?
|
||||
|
||||
In general it's not recommended to call `poll` unless you're implementing
|
||||
another `poll` function. If you need to poll a future, however, you can use
|
||||
`task::spawn` followed by the `poll_future` method on `Spawn<T>`.
|
||||
|
||||
### How do I return a future?
|
||||
|
||||
Returning a future is like returning an iterator in Rust today. It's not the
|
||||
easiest thing to do and you frequently need to resort to `Box` with a trait
|
||||
object. Thankfully though [`impl Trait`] is just around the corner and will
|
||||
allow returning these types unboxed in the future.
|
||||
|
||||
[`impl Trait`]: https://github.com/rust-lang/rust/issues/34511
|
||||
|
||||
For now though the cost of boxing shouldn't actually be that high. A future
|
||||
computation can be constructed *without boxing* and only the final step actually
|
||||
places a `Box` around the entire future. In that sense you're only paying the
|
||||
allocation at the very end, not for any of the intermediate futures.
|
||||
|
||||
More information can be found [in the tutorial][return-future].
|
||||
|
||||
[return-future]: https://github.com/alexcrichton/futures-rs/blob/master/TUTORIAL.md#returning-futures
|
||||
|
||||
### Does it work on Windows?
|
||||
|
||||
Yes! This library builds on top of mio, which works on Windows.
|
||||
|
||||
### What version of Rust should I use?
|
||||
|
||||
Rust 1.10 or later.
|
||||
|
||||
### Is it on crates.io?
|
||||
|
||||
Not yet! A few names are reserved, but crates cannot have dependencies from a
|
||||
git repository. Right now we depend on the master branch of `mio`, and crates
|
||||
will be published once that's on crates.io as well!
|
||||
|
||||
### Does this implement tail call optimization?
|
||||
|
||||
One aspect of many existing futures libraries is whether or not a tail call
|
||||
optimization is implemented. The exact meaning of this varies from framework to
|
||||
framework, but it typically boils down to whether common patterns can be
|
||||
implemented in such a way that prevents blowing the stack if the system is
|
||||
overloaded for a moment or leaking memory for the entire lifetime of a
|
||||
future/server.
|
||||
|
||||
For the prior case, blowing the stack, this typically arises as loops are often
|
||||
implemented through recursion with futures. This recursion can end up proceeding
|
||||
too quickly if the "loop" makes lots of turns very quickly. At this time neither
|
||||
the `Future` nor `Stream` traits handle tail call optimizations in this case,
|
||||
but rather combinators are patterns are provided to avoid recursion. For example
|
||||
a `Stream` implements `fold`, `for_each`, etc. These combinators can often be
|
||||
used to implement an asynchronous loop to avoid recursion, and they all execute
|
||||
in constant stack space. Note that we're very interested in exploring more
|
||||
generalized loop combinators, so PRs are always welcome!
|
||||
|
||||
For the latter case, leaking memory, this can happen where a future accidentally
|
||||
"remembers" all of its previous states when it'll never use them again. This
|
||||
also can arise through recursion or otherwise manufacturing of futures of
|
||||
infinite length. Like above, however, these also tend to show up in situations
|
||||
that would otherwise be expressed with a loop, so the same solutions should
|
||||
apply there regardless.
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,25 @@
|
|||
Copyright (c) 2016 Alex Crichton
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,51 @@
|
|||
# futures-rs
|
||||
|
||||
This library is an implementation of **zero-cost futures** in Rust.
|
||||
|
||||
[![Build Status](https://travis-ci.org/alexcrichton/futures-rs.svg?branch=master)](https://travis-ci.org/alexcrichton/futures-rs)
|
||||
[![Build status](https://ci.appveyor.com/api/projects/status/yl5w3ittk4kggfsh?svg=true)](https://ci.appveyor.com/project/alexcrichton/futures-rs)
|
||||
[![Crates.io](https://img.shields.io/crates/v/futures.svg?maxAge=2592000)](https://crates.io/crates/futures)
|
||||
|
||||
[Documentation](https://docs.rs/futures)
|
||||
|
||||
[Tutorial](https://tokio.rs/docs/getting-started/futures/)
|
||||
|
||||
## Usage
|
||||
|
||||
First, add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
futures = "0.1.9"
|
||||
```
|
||||
|
||||
Next, add this to your crate:
|
||||
|
||||
```rust
|
||||
extern crate futures;
|
||||
|
||||
use futures::Future;
|
||||
```
|
||||
|
||||
For more information about how you can use futures with async I/O you can take a
|
||||
look at [https://tokio.rs](https://tokio.rs) which is an introduction to both
|
||||
the Tokio stack and also futures.
|
||||
|
||||
### Feature `use_std`
|
||||
|
||||
`futures-rs` works without the standard library, such as in bare metal environments.
|
||||
However, it has a significantly reduced API surface. To use `futures-rs` in
|
||||
a `#[no_std]` environment, use:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
futures = { version = "0.1", default-features = false }
|
||||
```
|
||||
|
||||
# License
|
||||
|
||||
`futures-rs` is primarily distributed under the terms of both the MIT license and
|
||||
the Apache License (Version 2.0), with portions covered by various BSD-like
|
||||
licenses.
|
||||
|
||||
See LICENSE-APACHE, and LICENSE-MIT for details.
|
|
@ -0,0 +1,19 @@
|
|||
environment:
|
||||
matrix:
|
||||
- TARGET: x86_64-pc-windows-msvc
|
||||
install:
|
||||
- set PATH=C:\Program Files\Git\mingw64\bin;%PATH%
|
||||
- curl -sSf -o rustup-init.exe https://win.rustup.rs/
|
||||
- rustup-init.exe -y --default-host %TARGET%
|
||||
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
|
||||
- rustc -V
|
||||
- cargo -V
|
||||
|
||||
build: false
|
||||
|
||||
test_script:
|
||||
- cargo build
|
||||
- cargo build --no-default-features
|
||||
- cargo test
|
||||
- cargo test --no-default-features --features use_std
|
||||
- cargo test --manifest-path futures-cpupool/Cargo.toml
|
|
@ -0,0 +1,10 @@
|
|||
//! Executors
|
||||
//!
|
||||
//! This module contains tools for managing the raw execution of futures,
|
||||
//! which is needed when building *executors* (places where futures can run).
|
||||
//!
|
||||
//! More information about executors can be [found online at tokio.rs][online].
|
||||
//!
|
||||
//! [online]: https://tokio.rs/docs/going-deeper/tasks/
|
||||
|
||||
pub use task_impl::{Spawn, spawn, Unpark, Executor, Run};
|
|
@ -0,0 +1,38 @@
|
|||
use {Future, IntoFuture, Poll};
|
||||
use super::chain::Chain;
|
||||
|
||||
/// Future for the `and_then` combinator, chaining a computation onto the end of
|
||||
/// another future which completes successfully.
|
||||
///
|
||||
/// This is created by the `Future::and_then` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct AndThen<A, B, F> where A: Future, B: IntoFuture {
|
||||
state: Chain<A, B::Future, F>,
|
||||
}
|
||||
|
||||
pub fn new<A, B, F>(future: A, f: F) -> AndThen<A, B, F>
|
||||
where A: Future,
|
||||
B: IntoFuture,
|
||||
{
|
||||
AndThen {
|
||||
state: Chain::new(future, f),
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, B, F> Future for AndThen<A, B, F>
|
||||
where A: Future,
|
||||
B: IntoFuture<Error=A::Error>,
|
||||
F: FnOnce(A::Item) -> B,
|
||||
{
|
||||
type Item = B::Item;
|
||||
type Error = B::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<B::Item, B::Error> {
|
||||
self.state.poll(|result, f| {
|
||||
result.map(|e| {
|
||||
Err(f(e).into_future())
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
use std::prelude::v1::*;
|
||||
use std::any::Any;
|
||||
use std::panic::{catch_unwind, UnwindSafe, AssertUnwindSafe};
|
||||
|
||||
use {Future, Poll, Async};
|
||||
|
||||
/// Future for the `catch_unwind` combinator.
|
||||
///
|
||||
/// This is created by the `Future::catch_unwind` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct CatchUnwind<F> where F: Future {
|
||||
future: Option<F>,
|
||||
}
|
||||
|
||||
pub fn new<F>(future: F) -> CatchUnwind<F>
|
||||
where F: Future + UnwindSafe,
|
||||
{
|
||||
CatchUnwind {
|
||||
future: Some(future),
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> Future for CatchUnwind<F>
|
||||
where F: Future + UnwindSafe,
|
||||
{
|
||||
type Item = Result<F::Item, F::Error>;
|
||||
type Error = Box<Any + Send>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
let mut future = self.future.take().expect("cannot poll twice");
|
||||
let (res, future) = try!(catch_unwind(|| (future.poll(), future)));
|
||||
match res {
|
||||
Ok(Async::NotReady) => {
|
||||
self.future = Some(future);
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
Ok(Async::Ready(t)) => Ok(Async::Ready(Ok(t))),
|
||||
Err(e) => Ok(Async::Ready(Err(e))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Future> Future for AssertUnwindSafe<F> {
|
||||
type Item = F::Item;
|
||||
type Error = F::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<F::Item, F::Error> {
|
||||
self.0.poll()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
use core::mem;
|
||||
|
||||
use {Future, Poll, Async};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Chain<A, B, C> where A: Future {
|
||||
First(A, C),
|
||||
Second(B),
|
||||
Done,
|
||||
}
|
||||
|
||||
impl<A, B, C> Chain<A, B, C>
|
||||
where A: Future,
|
||||
B: Future,
|
||||
{
|
||||
pub fn new(a: A, c: C) -> Chain<A, B, C> {
|
||||
Chain::First(a, c)
|
||||
}
|
||||
|
||||
pub fn poll<F>(&mut self, f: F) -> Poll<B::Item, B::Error>
|
||||
where F: FnOnce(Result<A::Item, A::Error>, C)
|
||||
-> Result<Result<B::Item, B>, B::Error>,
|
||||
{
|
||||
let a_result = match *self {
|
||||
Chain::First(ref mut a, _) => {
|
||||
match a.poll() {
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Ok(Async::Ready(t)) => Ok(t),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
Chain::Second(ref mut b) => return b.poll(),
|
||||
Chain::Done => panic!("cannot poll a chained future twice"),
|
||||
};
|
||||
let data = match mem::replace(self, Chain::Done) {
|
||||
Chain::First(_, c) => c,
|
||||
_ => panic!(),
|
||||
};
|
||||
match try!(f(a_result, data)) {
|
||||
Ok(e) => Ok(Async::Ready(e)),
|
||||
Err(mut b) => {
|
||||
let ret = b.poll();
|
||||
*self = Chain::Second(b);
|
||||
ret
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
use {Future, Poll};
|
||||
|
||||
/// Combines two different futures yielding the same item and error
|
||||
/// types into a single type.
|
||||
#[derive(Debug)]
|
||||
pub enum Either<A, B> {
|
||||
/// First branch of the type
|
||||
A(A),
|
||||
/// Second branch of the type
|
||||
B(B),
|
||||
}
|
||||
|
||||
impl<T, A, B> Either<(T, A), (T, B)> {
|
||||
/// Splits out the homogenous type from an either of tuples.
|
||||
///
|
||||
/// This method is typically useful when combined with the `Future::select2`
|
||||
/// combinator.
|
||||
pub fn split(self) -> (T, Either<A, B>) {
|
||||
match self {
|
||||
Either::A((a, b)) => (a, Either::A(b)),
|
||||
Either::B((a, b)) => (a, Either::B(b)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, B> Future for Either<A, B>
|
||||
where A: Future,
|
||||
B: Future<Item = A::Item, Error = A::Error>
|
||||
{
|
||||
type Item = A::Item;
|
||||
type Error = A::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<A::Item, A::Error> {
|
||||
match *self {
|
||||
Either::A(ref mut a) => a.poll(),
|
||||
Either::B(ref mut b) => b.poll(),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
//! Definition of the Empty combinator, a future that's never ready.
|
||||
|
||||
use core::marker;
|
||||
|
||||
use {Future, Poll, Async};
|
||||
|
||||
/// A future which is never resolved.
|
||||
///
|
||||
/// This future can be created with the `empty` function.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct Empty<T, E> {
|
||||
_data: marker::PhantomData<(T, E)>,
|
||||
}
|
||||
|
||||
/// Creates a future which never resolves, representing a computation that never
|
||||
/// finishes.
|
||||
///
|
||||
/// The returned future will forever return `Async::NotReady`.
|
||||
pub fn empty<T, E>() -> Empty<T, E> {
|
||||
Empty { _data: marker::PhantomData }
|
||||
}
|
||||
|
||||
impl<T, E> Future for Empty<T, E> {
|
||||
type Item = T;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<T, E> {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
use {Future, IntoFuture, Poll};
|
||||
use core::fmt;
|
||||
use super::chain::Chain;
|
||||
|
||||
/// Future for the `flatten` combinator, flattening a future-of-a-future to get just
|
||||
/// the result of the final future.
|
||||
///
|
||||
/// This is created by the `Future::flatten` method.
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct Flatten<A> where A: Future, A::Item: IntoFuture {
|
||||
state: Chain<A, <A::Item as IntoFuture>::Future, ()>,
|
||||
}
|
||||
|
||||
impl<A> fmt::Debug for Flatten<A>
|
||||
where A: Future + fmt::Debug,
|
||||
A::Item: IntoFuture,
|
||||
<<A as IntoFuture>::Item as IntoFuture>::Future: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_struct("Flatten")
|
||||
.field("state", &self.state)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new<A>(future: A) -> Flatten<A>
|
||||
where A: Future,
|
||||
A::Item: IntoFuture,
|
||||
{
|
||||
Flatten {
|
||||
state: Chain::new(future, ()),
|
||||
}
|
||||
}
|
||||
|
||||
impl<A> Future for Flatten<A>
|
||||
where A: Future,
|
||||
A::Item: IntoFuture,
|
||||
<<A as Future>::Item as IntoFuture>::Error: From<<A as Future>::Error>
|
||||
{
|
||||
type Item = <<A as Future>::Item as IntoFuture>::Item;
|
||||
type Error = <<A as Future>::Item as IntoFuture>::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
self.state.poll(|a, ()| {
|
||||
let future = try!(a).into_future();
|
||||
Ok(Err(future))
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
use {Async, Future, Poll};
|
||||
use core::fmt;
|
||||
use stream::Stream;
|
||||
|
||||
/// Future for the `flatten_stream` combinator, flattening a
|
||||
/// future-of-a-stream to get just the result of the final stream as a stream.
|
||||
///
|
||||
/// This is created by the `Future::flatten_stream` method.
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct FlattenStream<F>
|
||||
where F: Future,
|
||||
<F as Future>::Item: Stream<Error=F::Error>,
|
||||
{
|
||||
state: State<F>
|
||||
}
|
||||
|
||||
impl<F> fmt::Debug for FlattenStream<F>
|
||||
where F: Future + fmt::Debug,
|
||||
<F as Future>::Item: Stream<Error=F::Error> + fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_struct("FlattenStream")
|
||||
.field("state", &self.state)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new<F>(f: F) -> FlattenStream<F>
|
||||
where F: Future,
|
||||
<F as Future>::Item: Stream<Error=F::Error>,
|
||||
{
|
||||
FlattenStream {
|
||||
state: State::Future(f)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum State<F>
|
||||
where F: Future,
|
||||
<F as Future>::Item: Stream<Error=F::Error>,
|
||||
{
|
||||
// future is not yet called or called and not ready
|
||||
Future(F),
|
||||
// future resolved to Stream
|
||||
Stream(F::Item),
|
||||
// EOF after future resolved to error
|
||||
Eof,
|
||||
// after EOF after future resolved to error
|
||||
Done,
|
||||
}
|
||||
|
||||
impl<F> Stream for FlattenStream<F>
|
||||
where F: Future,
|
||||
<F as Future>::Item: Stream<Error=F::Error>,
|
||||
{
|
||||
type Item = <F::Item as Stream>::Item;
|
||||
type Error = <F::Item as Stream>::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
loop {
|
||||
let (next_state, ret_opt) = match self.state {
|
||||
State::Future(ref mut f) => {
|
||||
match f.poll() {
|
||||
Ok(Async::NotReady) => {
|
||||
// State is not changed, early return.
|
||||
return Ok(Async::NotReady)
|
||||
},
|
||||
Ok(Async::Ready(stream)) => {
|
||||
// Future resolved to stream.
|
||||
// We do not return, but poll that
|
||||
// stream in the next loop iteration.
|
||||
(State::Stream(stream), None)
|
||||
}
|
||||
Err(e) => {
|
||||
(State::Eof, Some(Err(e)))
|
||||
}
|
||||
}
|
||||
}
|
||||
State::Stream(ref mut s) => {
|
||||
// Just forward call to the stream,
|
||||
// do not track its state.
|
||||
return s.poll();
|
||||
}
|
||||
State::Eof => {
|
||||
(State::Done, Some(Ok(Async::Ready(None))))
|
||||
}
|
||||
State::Done => {
|
||||
panic!("poll called after eof");
|
||||
}
|
||||
};
|
||||
|
||||
self.state = next_state;
|
||||
if let Some(ret) = ret_opt {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
use core::marker::PhantomData;
|
||||
|
||||
use {Future, Poll, Async};
|
||||
|
||||
/// Future for the `from_err` combinator, changing the error type of a future.
|
||||
///
|
||||
/// This is created by the `Future::from_err` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct FromErr<A, E> where A: Future {
|
||||
future: A,
|
||||
f: PhantomData<E>
|
||||
}
|
||||
|
||||
pub fn new<A, E>(future: A) -> FromErr<A, E>
|
||||
where A: Future
|
||||
{
|
||||
FromErr {
|
||||
future: future,
|
||||
f: PhantomData
|
||||
}
|
||||
}
|
||||
|
||||
impl<A:Future, E:From<A::Error>> Future for FromErr<A, E> {
|
||||
type Item = A::Item;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<A::Item, E> {
|
||||
let e = match self.future.poll() {
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
other => other,
|
||||
};
|
||||
e.map_err(From::from)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
use {Future, Poll, Async};
|
||||
|
||||
/// A future which "fuses" a future once it's been resolved.
|
||||
///
|
||||
/// Normally futures can behave unpredictable once they're used after a future
|
||||
/// has been resolved, but `Fuse` is always defined to return `Async::NotReady`
|
||||
/// from `poll` after it has resolved successfully or returned an error.
|
||||
///
|
||||
/// This is created by the `Future::fuse` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct Fuse<A: Future> {
|
||||
future: Option<A>,
|
||||
}
|
||||
|
||||
pub fn new<A: Future>(f: A) -> Fuse<A> {
|
||||
Fuse {
|
||||
future: Some(f),
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: Future> Future for Fuse<A> {
|
||||
type Item = A::Item;
|
||||
type Error = A::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<A::Item, A::Error> {
|
||||
let res = self.future.as_mut().map(|f| f.poll());
|
||||
match res.unwrap_or(Ok(Async::NotReady)) {
|
||||
res @ Ok(Async::Ready(_)) |
|
||||
res @ Err(_) => {
|
||||
self.future = None;
|
||||
res
|
||||
}
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
use {Async, Poll};
|
||||
use Future;
|
||||
use stream::Stream;
|
||||
|
||||
/// Future that forwards one element from the underlying future
|
||||
/// (whether it is success of error) and emits EOF after that.
|
||||
#[derive(Debug)]
|
||||
pub struct IntoStream<F: Future> {
|
||||
future: Option<F>
|
||||
}
|
||||
|
||||
pub fn new<F: Future>(future: F) -> IntoStream<F> {
|
||||
IntoStream {
|
||||
future: Some(future)
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Future> Stream for IntoStream<F> {
|
||||
type Item = F::Item;
|
||||
type Error = F::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
let ret = match self.future {
|
||||
None => return Ok(Async::Ready(None)),
|
||||
Some(ref mut future) => {
|
||||
match future.poll() {
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Err(e) => Err(e),
|
||||
Ok(Async::Ready(r)) => Ok(r),
|
||||
}
|
||||
}
|
||||
};
|
||||
self.future = None;
|
||||
ret.map(|r| Async::Ready(Some(r)))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,172 @@
|
|||
#![allow(non_snake_case)]
|
||||
|
||||
use core::fmt;
|
||||
use core::mem;
|
||||
|
||||
use {Future, Poll, IntoFuture, Async};
|
||||
|
||||
macro_rules! generate {
|
||||
($(
|
||||
$(#[$doc:meta])*
|
||||
($Join:ident, $new:ident, <A, $($B:ident),*>),
|
||||
)*) => ($(
|
||||
$(#[$doc])*
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct $Join<A, $($B),*>
|
||||
where A: Future,
|
||||
$($B: Future<Error=A::Error>),*
|
||||
{
|
||||
a: MaybeDone<A>,
|
||||
$($B: MaybeDone<$B>,)*
|
||||
}
|
||||
|
||||
impl<A, $($B),*> fmt::Debug for $Join<A, $($B),*>
|
||||
where A: Future + fmt::Debug,
|
||||
A::Item: fmt::Debug,
|
||||
$(
|
||||
$B: Future<Error=A::Error> + fmt::Debug,
|
||||
$B::Item: fmt::Debug
|
||||
),*
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_struct(stringify!($Join))
|
||||
.field("a", &self.a)
|
||||
$(.field(stringify!($B), &self.$B))*
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn $new<A, $($B),*>(a: A, $($B: $B),*) -> $Join<A, $($B),*>
|
||||
where A: Future,
|
||||
$($B: Future<Error=A::Error>),*
|
||||
{
|
||||
$Join {
|
||||
a: MaybeDone::NotYet(a),
|
||||
$($B: MaybeDone::NotYet($B)),*
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, $($B),*> $Join<A, $($B),*>
|
||||
where A: Future,
|
||||
$($B: Future<Error=A::Error>),*
|
||||
{
|
||||
fn erase(&mut self) {
|
||||
self.a = MaybeDone::Gone;
|
||||
$(self.$B = MaybeDone::Gone;)*
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, $($B),*> Future for $Join<A, $($B),*>
|
||||
where A: Future,
|
||||
$($B: Future<Error=A::Error>),*
|
||||
{
|
||||
type Item = (A::Item, $($B::Item),*);
|
||||
type Error = A::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
let mut all_done = match self.a.poll() {
|
||||
Ok(done) => done,
|
||||
Err(e) => {
|
||||
self.erase();
|
||||
return Err(e)
|
||||
}
|
||||
};
|
||||
$(
|
||||
all_done = match self.$B.poll() {
|
||||
Ok(done) => all_done && done,
|
||||
Err(e) => {
|
||||
self.erase();
|
||||
return Err(e)
|
||||
}
|
||||
};
|
||||
)*
|
||||
|
||||
if all_done {
|
||||
Ok(Async::Ready((self.a.take(), $(self.$B.take()),*)))
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, $($B),*> IntoFuture for (A, $($B),*)
|
||||
where A: IntoFuture,
|
||||
$(
|
||||
$B: IntoFuture<Error=A::Error>
|
||||
),*
|
||||
{
|
||||
type Future = $Join<A::Future, $($B::Future),*>;
|
||||
type Item = (A::Item, $($B::Item),*);
|
||||
type Error = A::Error;
|
||||
|
||||
fn into_future(self) -> Self::Future {
|
||||
match self {
|
||||
(a, $($B),+) => {
|
||||
$new(
|
||||
IntoFuture::into_future(a),
|
||||
$(IntoFuture::into_future($B)),+
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
)*)
|
||||
}
|
||||
|
||||
generate! {
|
||||
/// Future for the `join` combinator, waiting for two futures to
|
||||
/// complete.
|
||||
///
|
||||
/// This is created by the `Future::join` method.
|
||||
(Join, new, <A, B>),
|
||||
|
||||
/// Future for the `join3` combinator, waiting for three futures to
|
||||
/// complete.
|
||||
///
|
||||
/// This is created by the `Future::join3` method.
|
||||
(Join3, new3, <A, B, C>),
|
||||
|
||||
/// Future for the `join4` combinator, waiting for four futures to
|
||||
/// complete.
|
||||
///
|
||||
/// This is created by the `Future::join4` method.
|
||||
(Join4, new4, <A, B, C, D>),
|
||||
|
||||
/// Future for the `join5` combinator, waiting for five futures to
|
||||
/// complete.
|
||||
///
|
||||
/// This is created by the `Future::join5` method.
|
||||
(Join5, new5, <A, B, C, D, E>),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum MaybeDone<A: Future> {
|
||||
NotYet(A),
|
||||
Done(A::Item),
|
||||
Gone,
|
||||
}
|
||||
|
||||
impl<A: Future> MaybeDone<A> {
|
||||
fn poll(&mut self) -> Result<bool, A::Error> {
|
||||
let res = match *self {
|
||||
MaybeDone::NotYet(ref mut a) => try!(a.poll()),
|
||||
MaybeDone::Done(_) => return Ok(true),
|
||||
MaybeDone::Gone => panic!("cannot poll Join twice"),
|
||||
};
|
||||
match res {
|
||||
Async::Ready(res) => {
|
||||
*self = MaybeDone::Done(res);
|
||||
Ok(true)
|
||||
}
|
||||
Async::NotReady => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
fn take(&mut self) -> A::Item {
|
||||
match mem::replace(self, MaybeDone::Gone) {
|
||||
MaybeDone::Done(a) => a,
|
||||
_ => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,135 @@
|
|||
//! Definition of the JoinAll combinator, waiting for all of a list of futures
|
||||
//! to finish.
|
||||
|
||||
use std::prelude::v1::*;
|
||||
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
|
||||
use {Future, IntoFuture, Poll, Async};
|
||||
|
||||
#[derive(Debug)]
|
||||
enum ElemState<T> where T: Future {
|
||||
Pending(T),
|
||||
Done(T::Item),
|
||||
}
|
||||
|
||||
/// A future which takes a list of futures and resolves with a vector of the
|
||||
/// completed values.
|
||||
///
|
||||
/// This future is created with the `join_all` method.
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct JoinAll<I>
|
||||
where I: IntoIterator,
|
||||
I::Item: IntoFuture,
|
||||
{
|
||||
elems: Vec<ElemState<<I::Item as IntoFuture>::Future>>,
|
||||
}
|
||||
|
||||
impl<I> fmt::Debug for JoinAll<I>
|
||||
where I: IntoIterator,
|
||||
I::Item: IntoFuture,
|
||||
<<I as IntoIterator>::Item as IntoFuture>::Future: fmt::Debug,
|
||||
<<I as IntoIterator>::Item as IntoFuture>::Item: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_struct("JoinAll")
|
||||
.field("elems", &self.elems)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a future which represents a collection of the results of the futures
|
||||
/// given.
|
||||
///
|
||||
/// The returned future will drive execution for all of its underlying futures,
|
||||
/// collecting the results into a destination `Vec<T>`. If any future returns
|
||||
/// an error then all other futures will be canceled and an error will be
|
||||
/// returned immediately. If all futures complete successfully, however, then
|
||||
/// the returned future will succeed with a `Vec` of all the successful results.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let f = join_all(vec![
|
||||
/// ok::<u32, u32>(1),
|
||||
/// ok::<u32, u32>(2),
|
||||
/// ok::<u32, u32>(3),
|
||||
/// ]);
|
||||
/// let f = f.map(|x| {
|
||||
/// assert_eq!(x, [1, 2, 3]);
|
||||
/// });
|
||||
///
|
||||
/// let f = join_all(vec![
|
||||
/// ok::<u32, u32>(1).boxed(),
|
||||
/// err::<u32, u32>(2).boxed(),
|
||||
/// ok::<u32, u32>(3).boxed(),
|
||||
/// ]);
|
||||
/// let f = f.then(|x| {
|
||||
/// assert_eq!(x, Err(2));
|
||||
/// x
|
||||
/// });
|
||||
/// ```
|
||||
pub fn join_all<I>(i: I) -> JoinAll<I>
|
||||
where I: IntoIterator,
|
||||
I::Item: IntoFuture,
|
||||
{
|
||||
let elems = i.into_iter().map(|f| {
|
||||
ElemState::Pending(f.into_future())
|
||||
}).collect();
|
||||
JoinAll { elems: elems }
|
||||
}
|
||||
|
||||
impl<I> Future for JoinAll<I>
|
||||
where I: IntoIterator,
|
||||
I::Item: IntoFuture,
|
||||
{
|
||||
type Item = Vec<<I::Item as IntoFuture>::Item>;
|
||||
type Error = <I::Item as IntoFuture>::Error;
|
||||
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
let mut all_done = true;
|
||||
|
||||
for idx in 0 .. self.elems.len() {
|
||||
let done_val = match &mut self.elems[idx] {
|
||||
&mut ElemState::Pending(ref mut t) => {
|
||||
match t.poll() {
|
||||
Ok(Async::Ready(v)) => Ok(v),
|
||||
Ok(Async::NotReady) => {
|
||||
all_done = false;
|
||||
continue
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
&mut ElemState::Done(ref mut _v) => continue,
|
||||
};
|
||||
|
||||
match done_val {
|
||||
Ok(v) => self.elems[idx] = ElemState::Done(v),
|
||||
Err(e) => {
|
||||
// On completion drop all our associated resources
|
||||
// ASAP.
|
||||
self.elems = Vec::new();
|
||||
return Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if all_done {
|
||||
let elems = mem::replace(&mut self.elems, Vec::new());
|
||||
let result = elems.into_iter().map(|e| {
|
||||
match e {
|
||||
ElemState::Done(t) => t,
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}).collect();
|
||||
Ok(Async::Ready(result))
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
//! Definition of the Lazy combinator, deferring execution of a function until
|
||||
//! the future is polled.
|
||||
|
||||
use core::mem;
|
||||
|
||||
use {Future, IntoFuture, Poll};
|
||||
|
||||
/// A future which defers creation of the actual future until a callback is
|
||||
/// scheduled.
|
||||
///
|
||||
/// This is created by the `lazy` function.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct Lazy<F, R: IntoFuture> {
|
||||
inner: _Lazy<F, R::Future>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum _Lazy<F, R> {
|
||||
First(F),
|
||||
Second(R),
|
||||
Moved,
|
||||
}
|
||||
|
||||
/// Creates a new future which will eventually be the same as the one created
|
||||
/// by the closure provided.
|
||||
///
|
||||
/// The provided closure is only run once the future has a callback scheduled
|
||||
/// on it, otherwise the callback never runs. Once run, however, this future is
|
||||
/// the same as the one the closure creates.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let a = lazy(|| ok::<u32, u32>(1));
|
||||
///
|
||||
/// let b = lazy(|| -> FutureResult<u32, u32> {
|
||||
/// panic!("oh no!")
|
||||
/// });
|
||||
/// drop(b); // closure is never run
|
||||
/// ```
|
||||
pub fn lazy<F, R>(f: F) -> Lazy<F, R>
|
||||
where F: FnOnce() -> R,
|
||||
R: IntoFuture
|
||||
{
|
||||
Lazy {
|
||||
inner: _Lazy::First(f),
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, R> Lazy<F, R>
|
||||
where F: FnOnce() -> R,
|
||||
R: IntoFuture,
|
||||
{
|
||||
fn get(&mut self) -> &mut R::Future {
|
||||
match self.inner {
|
||||
_Lazy::First(_) => {}
|
||||
_Lazy::Second(ref mut f) => return f,
|
||||
_Lazy::Moved => panic!(), // can only happen if `f()` panics
|
||||
}
|
||||
match mem::replace(&mut self.inner, _Lazy::Moved) {
|
||||
_Lazy::First(f) => self.inner = _Lazy::Second(f().into_future()),
|
||||
_ => panic!(), // we already found First
|
||||
}
|
||||
match self.inner {
|
||||
_Lazy::Second(ref mut f) => f,
|
||||
_ => panic!(), // we just stored Second
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<F, R> Future for Lazy<F, R>
|
||||
where F: FnOnce() -> R,
|
||||
R: IntoFuture,
|
||||
{
|
||||
type Item = R::Item;
|
||||
type Error = R::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<R::Item, R::Error> {
|
||||
self.get().poll()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,99 @@
|
|||
//! Definition of the `LoopFn` combinator, implementing `Future` loops.
|
||||
|
||||
use {Async, Future, IntoFuture, Poll};
|
||||
|
||||
/// The status of a `loop_fn` loop.
|
||||
#[derive(Debug)]
|
||||
pub enum Loop<T, S> {
|
||||
/// Indicates that the loop has completed with output `T`.
|
||||
Break(T),
|
||||
|
||||
/// Indicates that the loop function should be called again with input
|
||||
/// state `S`.
|
||||
Continue(S),
|
||||
}
|
||||
|
||||
/// A future implementing a tail-recursive loop.
|
||||
///
|
||||
/// Created by the `loop_fn` function.
|
||||
#[derive(Debug)]
|
||||
pub struct LoopFn<A, F> where A: IntoFuture {
|
||||
future: A::Future,
|
||||
func: F,
|
||||
}
|
||||
|
||||
/// Creates a new future implementing a tail-recursive loop.
|
||||
///
|
||||
/// The loop function is immediately called with `initial_state` and should
|
||||
/// return a value that can be converted to a future. On successful completion,
|
||||
/// this future should output a `Loop<T, S>` to indicate the status of the
|
||||
/// loop.
|
||||
///
|
||||
/// `Loop::Break(T)` halts the loop and completes the future with output `T`.
|
||||
///
|
||||
/// `Loop::Continue(S)` reinvokes the loop function with state `S`. The returned
|
||||
/// future will be subsequently polled for a new `Loop<T, S>` value.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::{ok, loop_fn, Future, FutureResult, Loop};
|
||||
/// use std::io::Error;
|
||||
///
|
||||
/// struct Client {
|
||||
/// ping_count: u8,
|
||||
/// }
|
||||
///
|
||||
/// impl Client {
|
||||
/// fn new() -> Self {
|
||||
/// Client { ping_count: 0 }
|
||||
/// }
|
||||
///
|
||||
/// fn send_ping(self) -> FutureResult<Self, Error> {
|
||||
/// ok(Client { ping_count: self.ping_count + 1 })
|
||||
/// }
|
||||
///
|
||||
/// fn receive_pong(self) -> FutureResult<(Self, bool), Error> {
|
||||
/// let done = self.ping_count >= 5;
|
||||
/// ok((self, done))
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// let ping_til_done = loop_fn(Client::new(), |client| {
|
||||
/// client.send_ping()
|
||||
/// .and_then(|client| client.receive_pong())
|
||||
/// .and_then(|(client, done)| {
|
||||
/// if done {
|
||||
/// Ok(Loop::Break(client))
|
||||
/// } else {
|
||||
/// Ok(Loop::Continue(client))
|
||||
/// }
|
||||
/// })
|
||||
/// });
|
||||
/// ```
|
||||
pub fn loop_fn<S, T, A, F>(initial_state: S, mut func: F) -> LoopFn<A, F>
|
||||
where F: FnMut(S) -> A,
|
||||
A: IntoFuture<Item = Loop<T, S>>,
|
||||
{
|
||||
LoopFn {
|
||||
future: func(initial_state).into_future(),
|
||||
func: func,
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, T, A, F> Future for LoopFn<A, F>
|
||||
where F: FnMut(S) -> A,
|
||||
A: IntoFuture<Item = Loop<T, S>>,
|
||||
{
|
||||
type Item = T;
|
||||
type Error = A::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
loop {
|
||||
match try_ready!(self.future.poll()) {
|
||||
Loop::Break(x) => return Ok(Async::Ready(x)),
|
||||
Loop::Continue(s) => self.future = (self.func)(s).into_future(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
use {Future, Poll, Async};
|
||||
|
||||
/// Future for the `map` combinator, changing the type of a future.
|
||||
///
|
||||
/// This is created by the `Future::map` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct Map<A, F> where A: Future {
|
||||
future: A,
|
||||
f: Option<F>,
|
||||
}
|
||||
|
||||
pub fn new<A, F>(future: A, f: F) -> Map<A, F>
|
||||
where A: Future,
|
||||
{
|
||||
Map {
|
||||
future: future,
|
||||
f: Some(f),
|
||||
}
|
||||
}
|
||||
|
||||
impl<U, A, F> Future for Map<A, F>
|
||||
where A: Future,
|
||||
F: FnOnce(A::Item) -> U,
|
||||
{
|
||||
type Item = U;
|
||||
type Error = A::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<U, A::Error> {
|
||||
let e = match self.future.poll() {
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Ok(Async::Ready(e)) => Ok(e),
|
||||
Err(e) => Err(e),
|
||||
};
|
||||
e.map(self.f.take().expect("cannot poll Map twice"))
|
||||
.map(Async::Ready)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
use {Future, Poll, Async};
|
||||
|
||||
/// Future for the `map_err` combinator, changing the error type of a future.
|
||||
///
|
||||
/// This is created by the `Future::map_err` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct MapErr<A, F> where A: Future {
|
||||
future: A,
|
||||
f: Option<F>,
|
||||
}
|
||||
|
||||
pub fn new<A, F>(future: A, f: F) -> MapErr<A, F>
|
||||
where A: Future
|
||||
{
|
||||
MapErr {
|
||||
future: future,
|
||||
f: Some(f),
|
||||
}
|
||||
}
|
||||
|
||||
impl<U, A, F> Future for MapErr<A, F>
|
||||
where A: Future,
|
||||
F: FnOnce(A::Error) -> U,
|
||||
{
|
||||
type Item = A::Item;
|
||||
type Error = U;
|
||||
|
||||
fn poll(&mut self) -> Poll<A::Item, U> {
|
||||
let e = match self.future.poll() {
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
other => other,
|
||||
};
|
||||
e.map_err(self.f.take().expect("cannot poll MapErr twice"))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,959 @@
|
|||
//! Futures
|
||||
//!
|
||||
//! This module contains the `Future` trait and a number of adaptors for this
|
||||
//! trait. See the crate docs, and the docs for `Future`, for full detail.
|
||||
|
||||
use core::result;
|
||||
|
||||
// Primitive futures
|
||||
mod empty;
|
||||
mod lazy;
|
||||
mod poll_fn;
|
||||
#[path = "result.rs"]
|
||||
mod result_;
|
||||
mod loop_fn;
|
||||
mod option;
|
||||
pub use self::empty::{empty, Empty};
|
||||
pub use self::lazy::{lazy, Lazy};
|
||||
pub use self::poll_fn::{poll_fn, PollFn};
|
||||
pub use self::result_::{result, ok, err, FutureResult};
|
||||
pub use self::loop_fn::{loop_fn, Loop, LoopFn};
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "use `ok` instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use self::{ok as finished, Ok as Finished};
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "use `err` instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use self::{err as failed, Err as Failed};
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "use `result` instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use self::{result as done, FutureResult as Done};
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.7", note = "use `FutureResult` instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use self::{FutureResult as Ok};
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.7", note = "use `FutureResult` instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use self::{FutureResult as Err};
|
||||
|
||||
// combinators
|
||||
mod and_then;
|
||||
mod flatten;
|
||||
mod flatten_stream;
|
||||
mod fuse;
|
||||
mod into_stream;
|
||||
mod join;
|
||||
mod map;
|
||||
mod map_err;
|
||||
mod from_err;
|
||||
mod or_else;
|
||||
mod select;
|
||||
mod select2;
|
||||
mod then;
|
||||
mod either;
|
||||
|
||||
// impl details
|
||||
mod chain;
|
||||
|
||||
pub use self::and_then::AndThen;
|
||||
pub use self::flatten::Flatten;
|
||||
pub use self::flatten_stream::FlattenStream;
|
||||
pub use self::fuse::Fuse;
|
||||
pub use self::into_stream::IntoStream;
|
||||
pub use self::join::{Join, Join3, Join4, Join5};
|
||||
pub use self::map::Map;
|
||||
pub use self::map_err::MapErr;
|
||||
pub use self::from_err::FromErr;
|
||||
pub use self::or_else::OrElse;
|
||||
pub use self::select::{Select, SelectNext};
|
||||
pub use self::select2::Select2;
|
||||
pub use self::then::Then;
|
||||
pub use self::either::Either;
|
||||
|
||||
if_std! {
|
||||
mod catch_unwind;
|
||||
mod join_all;
|
||||
mod select_all;
|
||||
mod select_ok;
|
||||
mod shared;
|
||||
pub use self::catch_unwind::CatchUnwind;
|
||||
pub use self::join_all::{join_all, JoinAll};
|
||||
pub use self::select_all::{SelectAll, SelectAllNext, select_all};
|
||||
pub use self::select_ok::{SelectOk, select_ok};
|
||||
pub use self::shared::{Shared, SharedItem, SharedError};
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "use join_all instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use self::join_all::join_all as collect;
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "use JoinAll instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use self::join_all::JoinAll as Collect;
|
||||
|
||||
/// A type alias for `Box<Future + Send>`
|
||||
pub type BoxFuture<T, E> = ::std::boxed::Box<Future<Item = T, Error = E> + Send>;
|
||||
|
||||
impl<F: ?Sized + Future> Future for ::std::boxed::Box<F> {
|
||||
type Item = F::Item;
|
||||
type Error = F::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
(**self).poll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
use {Poll, stream};
|
||||
|
||||
/// Trait for types which are a placeholder of a value that may become
|
||||
/// available at some later point in time.
|
||||
///
|
||||
/// In addition to the documentation here you can also find more information
|
||||
/// about futures [online] at [https://tokio.rs](https://tokio.rs)
|
||||
///
|
||||
/// [online]: https://tokio.rs/docs/getting-started/futures/
|
||||
///
|
||||
/// Futures are used to provide a sentinel through which a value can be
|
||||
/// referenced. They crucially allow chaining and composing operations through
|
||||
/// consumption which allows expressing entire trees of computation as one
|
||||
/// sentinel value.
|
||||
///
|
||||
/// The ergonomics and implementation of the `Future` trait are very similar to
|
||||
/// the `Iterator` trait in that there is just one methods you need
|
||||
/// to implement, but you get a whole lot of others for free as a result.
|
||||
///
|
||||
/// # The `poll` method
|
||||
///
|
||||
/// The core method of future, `poll`, is used to attempt to generate the value
|
||||
/// of a `Future`. This method *does not block* but is allowed to inform the
|
||||
/// caller that the value is not ready yet. Implementations of `poll` may
|
||||
/// themselves do work to generate the value, but it's guaranteed that this will
|
||||
/// never block the calling thread.
|
||||
///
|
||||
/// A key aspect of this method is that if the value is not yet available the
|
||||
/// current task is scheduled to receive a notification when it's later ready to
|
||||
/// be made available. This follows what's typically known as a "readiness" or
|
||||
/// "pull" model where values are pulled out of futures on demand, and
|
||||
/// otherwise a task is notified when a value might be ready to get pulled out.
|
||||
///
|
||||
/// The `poll` method is not intended to be called in general, but rather is
|
||||
/// typically called in the context of a "task" which drives a future to
|
||||
/// completion. For more information on this see the `task` module.
|
||||
///
|
||||
/// More information about the details of `poll` and the nitty-gritty of tasks
|
||||
/// can be [found online at tokio.rs][poll-dox].
|
||||
///
|
||||
/// [poll-dox]: https://tokio.rs/docs/going-deeper/futures-model/
|
||||
///
|
||||
/// # Combinators
|
||||
///
|
||||
/// Like iterators, futures provide a large number of combinators to work with
|
||||
/// futures to express computations in a much more natural method than
|
||||
/// scheduling a number of callbacks. For example the `map` method can change
|
||||
/// a `Future<Item=T>` to a `Future<Item=U>` or an `and_then` combinator could
|
||||
/// create a future after the first one is done and only be resolved when the
|
||||
/// second is done.
|
||||
///
|
||||
/// Combinators act very similarly to the methods on the `Iterator` trait itself
|
||||
/// or those on `Option` and `Result`. Like with iterators, the combinators are
|
||||
/// zero-cost and don't impose any extra layers of indirection you wouldn't
|
||||
/// otherwise have to write down.
|
||||
///
|
||||
/// More information about combinators can be found [on tokio.rs].
|
||||
///
|
||||
/// [on tokio.rs]: https://tokio.rs/docs/going-deeper/futures-mechanics/
|
||||
pub trait Future {
|
||||
/// The type of value that this future will resolved with if it is
|
||||
/// successful.
|
||||
type Item;
|
||||
|
||||
/// The type of error that this future will resolve with if it fails in a
|
||||
/// normal fashion.
|
||||
type Error;
|
||||
|
||||
/// Query this future to see if its value has become available, registering
|
||||
/// interest if it is not.
|
||||
///
|
||||
/// This function will check the internal state of the future and assess
|
||||
/// whether the value is ready to be produced. Implementors of this function
|
||||
/// should ensure that a call to this **never blocks** as event loops may
|
||||
/// not work properly otherwise.
|
||||
///
|
||||
/// When a future is not ready yet, the `Async::NotReady` value will be
|
||||
/// returned. In this situation the future will *also* register interest of
|
||||
/// the current task in the value being produced. This is done by calling
|
||||
/// `task::park` to retrieve a handle to the current `Task`. When the future
|
||||
/// is then ready to make progress (e.g. it should be `poll`ed again) the
|
||||
/// `unpark` method is called on the `Task`.
|
||||
///
|
||||
/// More information about the details of `poll` and the nitty-gritty of
|
||||
/// tasks can be [found online at tokio.rs][poll-dox].
|
||||
///
|
||||
/// [poll-dox]: https://tokio.rs/docs/going-deeper/futures-model/
|
||||
///
|
||||
/// # Runtime characteristics
|
||||
///
|
||||
/// This function, `poll`, is the primary method for 'making progress'
|
||||
/// within a tree of futures. For example this method will be called
|
||||
/// repeatedly as the internal state machine makes its various transitions.
|
||||
/// Executors are responsible for ensuring that this function is called in
|
||||
/// the right location (e.g. always on an I/O thread or not). Unless it is
|
||||
/// otherwise arranged to be so, it should be ensured that **implementations
|
||||
/// of this function finish very quickly**.
|
||||
///
|
||||
/// Returning quickly prevents unnecessarily clogging up threads and/or
|
||||
/// event loops while a `poll` function call, for example, takes up compute
|
||||
/// resources to perform some expensive computation. If it is known ahead
|
||||
/// of time that a call to `poll` may end up taking awhile, the work should
|
||||
/// be offloaded to a thread pool (or something similar) to ensure that
|
||||
/// `poll` can return quickly.
|
||||
///
|
||||
/// Note that the `poll` function is not called repeatedly in a loop for
|
||||
/// futures typically, but only whenever the future itself is ready. If
|
||||
/// you're familiar with the `poll(2)` or `select(2)` syscalls on Unix
|
||||
/// it's worth noting that futures typically do *not* suffer the same
|
||||
/// problems of "all wakeups must poll all events". Futures have enough
|
||||
/// support for only polling futures which cause a wakeup.
|
||||
///
|
||||
/// # Return value
|
||||
///
|
||||
/// This function returns `Async::NotReady` if the future is not ready yet,
|
||||
/// `Err` if the future is finished but resolved to an error, or
|
||||
/// `Async::Ready` with the result of this future if it's finished
|
||||
/// successfully. Once a future has finished it is considered a contract
|
||||
/// error to continue polling the future.
|
||||
///
|
||||
/// If `NotReady` is returned, then the future will internally register
|
||||
/// interest in the value being produced for the current task (through
|
||||
/// `task::park`). In other words, the current task will receive a
|
||||
/// notification (through the `unpark` method) once the value is ready to be
|
||||
/// produced or the future can make progress.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Once a future has completed (returned `Ready` or `Err` from `poll`),
|
||||
/// then any future calls to `poll` may panic, block forever, or otherwise
|
||||
/// cause wrong behavior. The `Future` trait itself provides no guarantees
|
||||
/// about the behavior of `poll` after a future has completed.
|
||||
///
|
||||
/// Callers who may call `poll` too many times may want to consider using
|
||||
/// the `fuse` adaptor which defines the behavior of `poll`, but comes with
|
||||
/// a little bit of extra cost.
|
||||
///
|
||||
/// Additionally, calls to `poll` must always be made from within the
|
||||
/// context of a task. If a current task is not set then this method will
|
||||
/// likely panic.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This future may have failed to finish the computation, in which case
|
||||
/// the `Err` variant will be returned with an appropriate payload of an
|
||||
/// error.
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error>;
|
||||
|
||||
/// Block the current thread until this future is resolved.
|
||||
///
|
||||
/// This method will consume ownership of this future, driving it to
|
||||
/// completion via `poll` and blocking the current thread while it's waiting
|
||||
/// for the value to become available. Once the future is resolved the
|
||||
/// result of this future is returned.
|
||||
///
|
||||
/// > **Note:** This method is not appropriate to call on event loops or
|
||||
/// > similar I/O situations because it will prevent the event
|
||||
/// > loop from making progress (this blocks the thread). This
|
||||
/// > method should only be called when it's guaranteed that the
|
||||
/// > blocking work associated with this future will be completed
|
||||
/// > by another thread.
|
||||
///
|
||||
/// This method is only available when the `use_std` feature of this
|
||||
/// library is activated, and it is activated by default.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function does not attempt to catch panics. If the `poll` function
|
||||
/// of this future panics, panics will be propagated to the caller.
|
||||
#[cfg(feature = "use_std")]
|
||||
fn wait(self) -> result::Result<Self::Item, Self::Error>
|
||||
where Self: Sized
|
||||
{
|
||||
::executor::spawn(self).wait_future()
|
||||
}
|
||||
|
||||
/// Convenience function for turning this future into a trait object which
|
||||
/// is also `Send`.
|
||||
///
|
||||
/// This simply avoids the need to write `Box::new` and can often help with
|
||||
/// type inference as well by always returning a trait object. Note that
|
||||
/// this method requires the `Send` bound and returns a `BoxFuture`, which
|
||||
/// also encodes this. If you'd like to create a `Box<Future>` without the
|
||||
/// `Send` bound, then the `Box::new` function can be used instead.
|
||||
///
|
||||
/// This method is only available when the `use_std` feature of this
|
||||
/// library is activated, and it is activated by default.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let a: BoxFuture<i32, i32> = result(Ok(1)).boxed();
|
||||
/// ```
|
||||
#[cfg(feature = "use_std")]
|
||||
fn boxed(self) -> BoxFuture<Self::Item, Self::Error>
|
||||
where Self: Sized + Send + 'static
|
||||
{
|
||||
::std::boxed::Box::new(self)
|
||||
}
|
||||
|
||||
/// Map this future's result to a different type, returning a new future of
|
||||
/// the resulting type.
|
||||
///
|
||||
/// This function is similar to the `Option::map` or `Iterator::map` where
|
||||
/// it will change the type of the underlying future. This is useful to
|
||||
/// chain along a computation once a future has been resolved.
|
||||
///
|
||||
/// The closure provided will only be called if this future is resolved
|
||||
/// successfully. If this future returns an error, panics, or is dropped,
|
||||
/// then the closure provided will never be invoked.
|
||||
///
|
||||
/// Note that this function consumes the receiving future and returns a
|
||||
/// wrapped version of it, similar to the existing `map` methods in the
|
||||
/// standard library.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let future_of_1 = ok::<u32, u32>(1);
|
||||
/// let future_of_4 = future_of_1.map(|x| x + 3);
|
||||
/// ```
|
||||
fn map<F, U>(self, f: F) -> Map<Self, F>
|
||||
where F: FnOnce(Self::Item) -> U,
|
||||
Self: Sized,
|
||||
{
|
||||
assert_future::<U, Self::Error, _>(map::new(self, f))
|
||||
}
|
||||
|
||||
/// Map this future's error to a different error, returning a new future.
|
||||
///
|
||||
/// This function is similar to the `Result::map_err` where it will change
|
||||
/// the error type of the underlying future. This is useful for example to
|
||||
/// ensure that futures have the same error type when used with combinators
|
||||
/// like `select` and `join`.
|
||||
///
|
||||
/// The closure provided will only be called if this future is resolved
|
||||
/// with an error. If this future returns a success, panics, or is
|
||||
/// dropped, then the closure provided will never be invoked.
|
||||
///
|
||||
/// Note that this function consumes the receiving future and returns a
|
||||
/// wrapped version of it.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let future_of_err_1 = err::<u32, u32>(1);
|
||||
/// let future_of_err_4 = future_of_err_1.map_err(|x| x + 3);
|
||||
/// ```
|
||||
fn map_err<F, E>(self, f: F) -> MapErr<Self, F>
|
||||
where F: FnOnce(Self::Error) -> E,
|
||||
Self: Sized,
|
||||
{
|
||||
assert_future::<Self::Item, E, _>(map_err::new(self, f))
|
||||
}
|
||||
|
||||
|
||||
|
||||
/// Map this future's error to any error implementing `From` for
|
||||
/// this future's `Error`, returning a new future.
|
||||
///
|
||||
/// This function does for futures what `try!` does for `Result`,
|
||||
/// by letting the compiler infer the type of the resulting error.
|
||||
/// Just as `map_err` above, this is useful for example to ensure
|
||||
/// that futures have the same error type when used with
|
||||
/// combinators like `select` and `join`.
|
||||
///
|
||||
/// Note that this function consumes the receiving future and returns a
|
||||
/// wrapped version of it.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let future_of_err_1 = err::<u32, u32>(1);
|
||||
/// let future_of_err_4 = future_of_err_1.from_err::<u32>();
|
||||
/// ```
|
||||
fn from_err<E:From<Self::Error>>(self) -> FromErr<Self, E>
|
||||
where Self: Sized,
|
||||
{
|
||||
assert_future::<Self::Item, E, _>(from_err::new(self))
|
||||
}
|
||||
|
||||
/// Chain on a computation for when a future finished, passing the result of
|
||||
/// the future to the provided closure `f`.
|
||||
///
|
||||
/// This function can be used to ensure a computation runs regardless of
|
||||
/// the conclusion of the future. The closure provided will be yielded a
|
||||
/// `Result` once the future is complete.
|
||||
///
|
||||
/// The returned value of the closure must implement the `IntoFuture` trait
|
||||
/// and can represent some more work to be done before the composed future
|
||||
/// is finished. Note that the `Result` type implements the `IntoFuture`
|
||||
/// trait so it is possible to simply alter the `Result` yielded to the
|
||||
/// closure and return it.
|
||||
///
|
||||
/// If this future is dropped or panics then the closure `f` will not be
|
||||
/// run.
|
||||
///
|
||||
/// Note that this function consumes the receiving future and returns a
|
||||
/// wrapped version of it.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let future_of_1 = ok::<u32, u32>(1);
|
||||
/// let future_of_4 = future_of_1.then(|x| {
|
||||
/// x.map(|y| y + 3)
|
||||
/// });
|
||||
///
|
||||
/// let future_of_err_1 = err::<u32, u32>(1);
|
||||
/// let future_of_4 = future_of_err_1.then(|x| {
|
||||
/// match x {
|
||||
/// Ok(_) => panic!("expected an error"),
|
||||
/// Err(y) => ok::<u32, u32>(y + 3),
|
||||
/// }
|
||||
/// });
|
||||
/// ```
|
||||
fn then<F, B>(self, f: F) -> Then<Self, B, F>
|
||||
where F: FnOnce(result::Result<Self::Item, Self::Error>) -> B,
|
||||
B: IntoFuture,
|
||||
Self: Sized,
|
||||
{
|
||||
assert_future::<B::Item, B::Error, _>(then::new(self, f))
|
||||
}
|
||||
|
||||
/// Execute another future after this one has resolved successfully.
|
||||
///
|
||||
/// This function can be used to chain two futures together and ensure that
|
||||
/// the final future isn't resolved until both have finished. The closure
|
||||
/// provided is yielded the successful result of this future and returns
|
||||
/// another value which can be converted into a future.
|
||||
///
|
||||
/// Note that because `Result` implements the `IntoFuture` trait this method
|
||||
/// can also be useful for chaining fallible and serial computations onto
|
||||
/// the end of one future.
|
||||
///
|
||||
/// If this future is dropped, panics, or completes with an error then the
|
||||
/// provided closure `f` is never called.
|
||||
///
|
||||
/// Note that this function consumes the receiving future and returns a
|
||||
/// wrapped version of it.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let future_of_1 = ok::<u32, u32>(1);
|
||||
/// let future_of_4 = future_of_1.and_then(|x| {
|
||||
/// Ok(x + 3)
|
||||
/// });
|
||||
///
|
||||
/// let future_of_err_1 = err::<u32, u32>(1);
|
||||
/// future_of_err_1.and_then(|_| -> FutureResult<u32, u32> {
|
||||
/// panic!("should not be called in case of an error");
|
||||
/// });
|
||||
/// ```
|
||||
fn and_then<F, B>(self, f: F) -> AndThen<Self, B, F>
|
||||
where F: FnOnce(Self::Item) -> B,
|
||||
B: IntoFuture<Error = Self::Error>,
|
||||
Self: Sized,
|
||||
{
|
||||
assert_future::<B::Item, Self::Error, _>(and_then::new(self, f))
|
||||
}
|
||||
|
||||
/// Execute another future if this one resolves with an error.
|
||||
///
|
||||
/// Return a future that passes along this future's value if it succeeds,
|
||||
/// and otherwise passes the error to the closure `f` and waits for the
|
||||
/// future it returns. The closure may also simply return a value that can
|
||||
/// be converted into a future.
|
||||
///
|
||||
/// Note that because `Result` implements the `IntoFuture` trait this method
|
||||
/// can also be useful for chaining together fallback computations, where
|
||||
/// when one fails, the next is attempted.
|
||||
///
|
||||
/// If this future is dropped, panics, or completes successfully then the
|
||||
/// provided closure `f` is never called.
|
||||
///
|
||||
/// Note that this function consumes the receiving future and returns a
|
||||
/// wrapped version of it.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let future_of_err_1 = err::<u32, u32>(1);
|
||||
/// let future_of_4 = future_of_err_1.or_else(|x| -> Result<u32, u32> {
|
||||
/// Ok(x + 3)
|
||||
/// });
|
||||
///
|
||||
/// let future_of_1 = ok::<u32, u32>(1);
|
||||
/// future_of_1.or_else(|_| -> FutureResult<u32, u32> {
|
||||
/// panic!("should not be called in case of success");
|
||||
/// });
|
||||
/// ```
|
||||
fn or_else<F, B>(self, f: F) -> OrElse<Self, B, F>
|
||||
where F: FnOnce(Self::Error) -> B,
|
||||
B: IntoFuture<Item = Self::Item>,
|
||||
Self: Sized,
|
||||
{
|
||||
assert_future::<Self::Item, B::Error, _>(or_else::new(self, f))
|
||||
}
|
||||
|
||||
/// Waits for either one of two futures to complete.
|
||||
///
|
||||
/// This function will return a new future which awaits for either this or
|
||||
/// the `other` future to complete. The returned future will finish with
|
||||
/// both the value resolved and a future representing the completion of the
|
||||
/// other work. Both futures must have the same item and error type.
|
||||
///
|
||||
/// Note that this function consumes the receiving futures and returns a
|
||||
/// wrapped version of them.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// // A poor-man's join implemented on top of select
|
||||
///
|
||||
/// fn join<A>(a: A, b: A) -> BoxFuture<(u32, u32), u32>
|
||||
/// where A: Future<Item = u32, Error = u32> + Send + 'static,
|
||||
/// {
|
||||
/// a.select(b).then(|res| {
|
||||
/// match res {
|
||||
/// Ok((a, b)) => b.map(move |b| (a, b)).boxed(),
|
||||
/// Err((a, _)) => err(a).boxed(),
|
||||
/// }
|
||||
/// }).boxed()
|
||||
/// }
|
||||
/// ```
|
||||
fn select<B>(self, other: B) -> Select<Self, B::Future>
|
||||
where B: IntoFuture<Item=Self::Item, Error=Self::Error>,
|
||||
Self: Sized,
|
||||
{
|
||||
let f = select::new(self, other.into_future());
|
||||
assert_future::<(Self::Item, SelectNext<Self, B::Future>),
|
||||
(Self::Error, SelectNext<Self, B::Future>), _>(f)
|
||||
}
|
||||
|
||||
/// Waits for either one of two differently-typed futures to complete.
|
||||
///
|
||||
/// This function will return a new future which awaits for either this or
|
||||
/// the `other` future to complete. The returned future will finish with
|
||||
/// both the value resolved and a future representing the completion of the
|
||||
/// other work.
|
||||
///
|
||||
/// Note that this function consumes the receiving futures and returns a
|
||||
/// wrapped version of them.
|
||||
///
|
||||
/// Also note that if both this and the second future have the same
|
||||
/// success/error type you can use the `Either::split` method to
|
||||
/// conveniently extract out the value at the end.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// // A poor-man's join implemented on top of select2
|
||||
///
|
||||
/// fn join<A, B, E>(a: A, b: B) -> BoxFuture<(A::Item, B::Item), E>
|
||||
/// where A: Future<Error = E> + Send + 'static,
|
||||
/// B: Future<Error = E> + Send + 'static,
|
||||
/// A::Item: Send, B::Item: Send, E: Send + 'static,
|
||||
/// {
|
||||
/// a.select2(b).then(|res| {
|
||||
/// match res {
|
||||
/// Ok(Either::A((x, b))) => b.map(move |y| (x, y)).boxed(),
|
||||
/// Ok(Either::B((y, a))) => a.map(move |x| (x, y)).boxed(),
|
||||
/// Err(Either::A((e, _))) => err(e).boxed(),
|
||||
/// Err(Either::B((e, _))) => err(e).boxed(),
|
||||
/// }
|
||||
/// }).boxed()
|
||||
/// }
|
||||
/// ```
|
||||
fn select2<B>(self, other: B) -> Select2<Self, B::Future>
|
||||
where B: IntoFuture, Self: Sized
|
||||
{
|
||||
select2::new(self, other.into_future())
|
||||
}
|
||||
|
||||
/// Joins the result of two futures, waiting for them both to complete.
|
||||
///
|
||||
/// This function will return a new future which awaits both this and the
|
||||
/// `other` future to complete. The returned future will finish with a tuple
|
||||
/// of both results.
|
||||
///
|
||||
/// Both futures must have the same error type, and if either finishes with
|
||||
/// an error then the other will be dropped and that error will be
|
||||
/// returned.
|
||||
///
|
||||
/// Note that this function consumes the receiving future and returns a
|
||||
/// wrapped version of it.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let a = ok::<u32, u32>(1);
|
||||
/// let b = ok::<u32, u32>(2);
|
||||
/// let pair = a.join(b);
|
||||
///
|
||||
/// pair.map(|(a, b)| {
|
||||
/// assert_eq!(a, 1);
|
||||
/// assert_eq!(b, 2);
|
||||
/// });
|
||||
/// ```
|
||||
fn join<B>(self, other: B) -> Join<Self, B::Future>
|
||||
where B: IntoFuture<Error=Self::Error>,
|
||||
Self: Sized,
|
||||
{
|
||||
let f = join::new(self, other.into_future());
|
||||
assert_future::<(Self::Item, B::Item), Self::Error, _>(f)
|
||||
}
|
||||
|
||||
/// Same as `join`, but with more futures.
|
||||
fn join3<B, C>(self, b: B, c: C) -> Join3<Self, B::Future, C::Future>
|
||||
where B: IntoFuture<Error=Self::Error>,
|
||||
C: IntoFuture<Error=Self::Error>,
|
||||
Self: Sized,
|
||||
{
|
||||
join::new3(self, b.into_future(), c.into_future())
|
||||
}
|
||||
|
||||
/// Same as `join`, but with more futures.
|
||||
fn join4<B, C, D>(self, b: B, c: C, d: D)
|
||||
-> Join4<Self, B::Future, C::Future, D::Future>
|
||||
where B: IntoFuture<Error=Self::Error>,
|
||||
C: IntoFuture<Error=Self::Error>,
|
||||
D: IntoFuture<Error=Self::Error>,
|
||||
Self: Sized,
|
||||
{
|
||||
join::new4(self, b.into_future(), c.into_future(), d.into_future())
|
||||
}
|
||||
|
||||
/// Same as `join`, but with more futures.
|
||||
fn join5<B, C, D, E>(self, b: B, c: C, d: D, e: E)
|
||||
-> Join5<Self, B::Future, C::Future, D::Future, E::Future>
|
||||
where B: IntoFuture<Error=Self::Error>,
|
||||
C: IntoFuture<Error=Self::Error>,
|
||||
D: IntoFuture<Error=Self::Error>,
|
||||
E: IntoFuture<Error=Self::Error>,
|
||||
Self: Sized,
|
||||
{
|
||||
join::new5(self, b.into_future(), c.into_future(), d.into_future(),
|
||||
e.into_future())
|
||||
}
|
||||
|
||||
/// Convert this future into a single element stream.
|
||||
///
|
||||
/// The returned stream contains single success if this future resolves to
|
||||
/// success or single error if this future resolves into error.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::{Stream, Async};
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let future = ok::<_, bool>(17);
|
||||
/// let mut stream = future.into_stream();
|
||||
/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
|
||||
/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
|
||||
///
|
||||
/// let future = err::<bool, _>(19);
|
||||
/// let mut stream = future.into_stream();
|
||||
/// assert_eq!(Err(19), stream.poll());
|
||||
/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
|
||||
/// ```
|
||||
fn into_stream(self) -> IntoStream<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
into_stream::new(self)
|
||||
}
|
||||
|
||||
/// Flatten the execution of this future when the successful result of this
|
||||
/// future is itself another future.
|
||||
///
|
||||
/// This can be useful when combining futures together to flatten the
|
||||
/// computation out the the final result. This method can only be called
|
||||
/// when the successful result of this future itself implements the
|
||||
/// `IntoFuture` trait and the error can be created from this future's error
|
||||
/// type.
|
||||
///
|
||||
/// This method is roughly equivalent to `self.and_then(|x| x)`.
|
||||
///
|
||||
/// Note that this function consumes the receiving future and returns a
|
||||
/// wrapped version of it.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let future_of_a_future = ok::<_, u32>(ok::<u32, u32>(1));
|
||||
/// let future_of_1 = future_of_a_future.flatten();
|
||||
/// ```
|
||||
fn flatten(self) -> Flatten<Self>
|
||||
where Self::Item: IntoFuture,
|
||||
<<Self as Future>::Item as IntoFuture>::Error:
|
||||
From<<Self as Future>::Error>,
|
||||
Self: Sized
|
||||
{
|
||||
let f = flatten::new(self);
|
||||
assert_future::<<<Self as Future>::Item as IntoFuture>::Item,
|
||||
<<Self as Future>::Item as IntoFuture>::Error,
|
||||
_>(f)
|
||||
}
|
||||
|
||||
/// Flatten the execution of this future when the successful result of this
|
||||
/// future is a stream.
|
||||
///
|
||||
/// This can be useful when stream initialization is deferred, and it is
|
||||
/// convenient to work with that stream as if stream was available at the
|
||||
/// call site.
|
||||
///
|
||||
/// Note that this function consumes this future and returns a wrapped
|
||||
/// version of it.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::stream::{self, Stream};
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let stream_items = vec![Ok(17), Err(true), Ok(19)];
|
||||
/// let future_of_a_stream = ok::<_, bool>(stream::iter(stream_items));
|
||||
///
|
||||
/// let stream = future_of_a_stream.flatten_stream();
|
||||
///
|
||||
/// let mut iter = stream.wait();
|
||||
/// assert_eq!(Ok(17), iter.next().unwrap());
|
||||
/// assert_eq!(Err(true), iter.next().unwrap());
|
||||
/// assert_eq!(Ok(19), iter.next().unwrap());
|
||||
/// assert_eq!(None, iter.next());
|
||||
/// ```
|
||||
fn flatten_stream(self) -> FlattenStream<Self>
|
||||
where <Self as Future>::Item: stream::Stream<Error=Self::Error>,
|
||||
Self: Sized
|
||||
{
|
||||
flatten_stream::new(self)
|
||||
}
|
||||
|
||||
/// Fuse a future such that `poll` will never again be called once it has
|
||||
/// completed.
|
||||
///
|
||||
/// Currently once a future has returned `Ready` or `Err` from
|
||||
/// `poll` any further calls could exhibit bad behavior such as blocking
|
||||
/// forever, panicking, never returning, etc. If it is known that `poll`
|
||||
/// may be called too often then this method can be used to ensure that it
|
||||
/// has defined semantics.
|
||||
///
|
||||
/// Once a future has been `fuse`d and it returns a completion from `poll`,
|
||||
/// then it will forever return `NotReady` from `poll` again (never
|
||||
/// resolve). This, unlike the trait's `poll` method, is guaranteed.
|
||||
///
|
||||
/// This combinator will drop this future as soon as it's been completed to
|
||||
/// ensure resources are reclaimed as soon as possible.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use futures::Async;
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let mut future = ok::<i32, u32>(2);
|
||||
/// assert_eq!(future.poll(), Ok(Async::Ready(2)));
|
||||
///
|
||||
/// // Normally, a call such as this would panic:
|
||||
/// //future.poll();
|
||||
///
|
||||
/// // This, however, is guaranteed to not panic
|
||||
/// let mut future = ok::<i32, u32>(2).fuse();
|
||||
/// assert_eq!(future.poll(), Ok(Async::Ready(2)));
|
||||
/// assert_eq!(future.poll(), Ok(Async::NotReady));
|
||||
/// ```
|
||||
fn fuse(self) -> Fuse<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
let f = fuse::new(self);
|
||||
assert_future::<Self::Item, Self::Error, _>(f)
|
||||
}
|
||||
|
||||
/// Catches unwinding panics while polling the future.
|
||||
///
|
||||
/// In general, panics within a future can propagate all the way out to the
|
||||
/// task level. This combinator makes it possible to halt unwinding within
|
||||
/// the future itself. It's most commonly used within task executors. It's
|
||||
/// not recommended to use this for error handling.
|
||||
///
|
||||
/// Note that this method requires the `UnwindSafe` bound from the standard
|
||||
/// library. This isn't always applied automatically, and the standard
|
||||
/// library provides an `AssertUnwindSafe` wrapper type to apply it
|
||||
/// after-the fact. To assist using this method, the `Future` trait is also
|
||||
/// implemented for `AssertUnwindSafe<F>` where `F` implements `Future`.
|
||||
///
|
||||
/// This method is only available when the `use_std` feature of this
|
||||
/// library is activated, and it is activated by default.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let mut future = ok::<i32, u32>(2);
|
||||
/// assert!(future.catch_unwind().wait().is_ok());
|
||||
///
|
||||
/// let mut future = lazy(|| -> FutureResult<i32, u32> {
|
||||
/// panic!();
|
||||
/// ok::<i32, u32>(2)
|
||||
/// });
|
||||
/// assert!(future.catch_unwind().wait().is_err());
|
||||
/// ```
|
||||
#[cfg(feature = "use_std")]
|
||||
fn catch_unwind(self) -> CatchUnwind<Self>
|
||||
where Self: Sized + ::std::panic::UnwindSafe
|
||||
{
|
||||
catch_unwind::new(self)
|
||||
}
|
||||
|
||||
/// Create a cloneable handle to this future where all handles will resolve
|
||||
/// to the same result.
|
||||
///
|
||||
/// The shared() method provides a mean to convert any future into a
|
||||
/// cloneable future. It enables a future to be polled by multiple threads.
|
||||
///
|
||||
/// The returned `Shared` future resolves successfully with
|
||||
/// `SharedItem<Self::Item>` or erroneously with `SharedError<Self::Error>`.
|
||||
/// Both `SharedItem` and `SharedError` implements `Deref` to allow shared
|
||||
/// access to the underlying result. Ownership of `Self::Item` and
|
||||
/// `Self::Error` cannot currently be reclaimed.
|
||||
///
|
||||
/// This method is only available when the `use_std` feature of this
|
||||
/// library is activated, and it is activated by default.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let future = ok::<_, bool>(6);
|
||||
/// let shared1 = future.shared();
|
||||
/// let shared2 = shared1.clone();
|
||||
/// assert_eq!(6, *shared1.wait().unwrap());
|
||||
/// assert_eq!(6, *shared2.wait().unwrap());
|
||||
/// ```
|
||||
///
|
||||
/// ```
|
||||
/// use std::thread;
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let future = ok::<_, bool>(6);
|
||||
/// let shared1 = future.shared();
|
||||
/// let shared2 = shared1.clone();
|
||||
/// let join_handle = thread::spawn(move || {
|
||||
/// assert_eq!(6, *shared2.wait().unwrap());
|
||||
/// });
|
||||
/// assert_eq!(6, *shared1.wait().unwrap());
|
||||
/// join_handle.join().unwrap();
|
||||
/// ```
|
||||
#[cfg(feature = "use_std")]
|
||||
fn shared(self) -> Shared<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
shared::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F: ?Sized + Future> Future for &'a mut F {
|
||||
type Item = F::Item;
|
||||
type Error = F::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
(**self).poll()
|
||||
}
|
||||
}
|
||||
|
||||
// Just a helper function to ensure the futures we're returning all have the
|
||||
// right implementations.
|
||||
fn assert_future<A, B, F>(t: F) -> F
|
||||
where F: Future<Item=A, Error=B>,
|
||||
{
|
||||
t
|
||||
}
|
||||
|
||||
/// Class of types which can be converted into a future.
|
||||
///
|
||||
/// This trait is very similar to the `IntoIterator` trait and is intended to be
|
||||
/// used in a very similar fashion.
|
||||
pub trait IntoFuture {
|
||||
/// The future that this type can be converted into.
|
||||
type Future: Future<Item=Self::Item, Error=Self::Error>;
|
||||
|
||||
/// The item that the future may resolve with.
|
||||
type Item;
|
||||
/// The error that the future may resolve with.
|
||||
type Error;
|
||||
|
||||
/// Consumes this object and produces a future.
|
||||
fn into_future(self) -> Self::Future;
|
||||
}
|
||||
|
||||
impl<F: Future> IntoFuture for F {
|
||||
type Future = F;
|
||||
type Item = F::Item;
|
||||
type Error = F::Error;
|
||||
|
||||
fn into_future(self) -> F {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> IntoFuture for result::Result<T, E> {
|
||||
type Future = FutureResult<T, E>;
|
||||
type Item = T;
|
||||
type Error = E;
|
||||
|
||||
fn into_future(self) -> FutureResult<T, E> {
|
||||
result(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Asynchronous conversion from a type `T`.
|
||||
///
|
||||
/// This trait is analogous to `std::convert::From`, adapted to asynchronous
|
||||
/// computation.
|
||||
pub trait FutureFrom<T>: Sized {
|
||||
/// The future for the conversion.
|
||||
type Future: Future<Item=Self, Error=Self::Error>;
|
||||
|
||||
/// Possible errors during conversion.
|
||||
type Error;
|
||||
|
||||
/// Consume the given value, beginning the conversion.
|
||||
fn future_from(T) -> Self::Future;
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
//! Definition of the `Option` (optional step) combinator
|
||||
|
||||
use {Future, Poll, Async};
|
||||
|
||||
impl<F, T, E> Future for Option<F> where F: Future<Item=T, Error=E> {
|
||||
type Item = Option<T>;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<T>, E> {
|
||||
match *self {
|
||||
None => Ok(Async::Ready(None)),
|
||||
Some(ref mut x) => x.poll().map(|x| x.map(Some)),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
use {Future, IntoFuture, Poll};
|
||||
use super::chain::Chain;
|
||||
|
||||
/// Future for the `or_else` combinator, chaining a computation onto the end of
|
||||
/// a future which fails with an error.
|
||||
///
|
||||
/// This is created by the `Future::or_else` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct OrElse<A, B, F> where A: Future, B: IntoFuture {
|
||||
state: Chain<A, B::Future, F>,
|
||||
}
|
||||
|
||||
pub fn new<A, B, F>(future: A, f: F) -> OrElse<A, B, F>
|
||||
where A: Future,
|
||||
B: IntoFuture<Item=A::Item>,
|
||||
{
|
||||
OrElse {
|
||||
state: Chain::new(future, f),
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, B, F> Future for OrElse<A, B, F>
|
||||
where A: Future,
|
||||
B: IntoFuture<Item=A::Item>,
|
||||
F: FnOnce(A::Error) -> B,
|
||||
{
|
||||
type Item = B::Item;
|
||||
type Error = B::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<B::Item, B::Error> {
|
||||
self.state.poll(|a, f| {
|
||||
match a {
|
||||
Ok(item) => Ok(Ok(item)),
|
||||
Err(e) => Ok(Err(f(e).into_future()))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
//! Definition of the `PollFn` adapter combinator
|
||||
|
||||
use {Future, Poll};
|
||||
|
||||
/// A future which adapts a function returning `Poll`.
|
||||
///
|
||||
/// Created by the `poll_fn` function.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct PollFn<F> {
|
||||
inner: F,
|
||||
}
|
||||
|
||||
/// Creates a new future wrapping around a function returning `Poll`.
|
||||
///
|
||||
/// Polling the returned future delegates to the wrapped function.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::poll_fn;
|
||||
/// use futures::{Async, Poll};
|
||||
///
|
||||
/// fn read_line() -> Poll<String, std::io::Error> {
|
||||
/// Ok(Async::Ready("Hello, World!".into()))
|
||||
/// }
|
||||
///
|
||||
/// let read_future = poll_fn(read_line);
|
||||
/// ```
|
||||
pub fn poll_fn<T, E, F>(f: F) -> PollFn<F>
|
||||
where F: FnMut() -> ::Poll<T, E>
|
||||
{
|
||||
PollFn { inner: f }
|
||||
}
|
||||
|
||||
impl<T, E, F> Future for PollFn<F>
|
||||
where F: FnMut() -> Poll<T, E>
|
||||
{
|
||||
type Item = T;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<T, E> {
|
||||
(self.inner)()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
//! Definition of the `Result` (immediately finished) combinator
|
||||
|
||||
use core::result;
|
||||
|
||||
use {Future, Poll, Async};
|
||||
|
||||
/// A future representing a value that is immediately ready.
|
||||
///
|
||||
/// Created by the `result` function.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
// TODO: rename this to `Result` on the next major version
|
||||
pub struct FutureResult<T, E> {
|
||||
inner: Option<result::Result<T, E>>,
|
||||
}
|
||||
|
||||
/// Creates a new "leaf future" which will resolve with the given result.
|
||||
///
|
||||
/// The returned future represents a computation which is finshed immediately.
|
||||
/// This can be useful with the `finished` and `failed` base future types to
|
||||
/// convert an immediate value to a future to interoperate elsewhere.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let future_of_1 = result::<u32, u32>(Ok(1));
|
||||
/// let future_of_err_2 = result::<u32, u32>(Err(2));
|
||||
/// ```
|
||||
pub fn result<T, E>(r: result::Result<T, E>) -> FutureResult<T, E> {
|
||||
FutureResult { inner: Some(r) }
|
||||
}
|
||||
|
||||
/// Creates a "leaf future" from an immediate value of a finished and
|
||||
/// successful computation.
|
||||
///
|
||||
/// The returned future is similar to `result` where it will immediately run a
|
||||
/// scheduled callback with the provided value.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let future_of_1 = ok::<u32, u32>(1);
|
||||
/// ```
|
||||
pub fn ok<T, E>(t: T) -> FutureResult<T, E> {
|
||||
result(Ok(t))
|
||||
}
|
||||
|
||||
/// Creates a "leaf future" from an immediate value of a failed computation.
|
||||
///
|
||||
/// The returned future is similar to `result` where it will immediately run a
|
||||
/// scheduled callback with the provided value.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let future_of_err_1 = err::<u32, u32>(1);
|
||||
/// ```
|
||||
pub fn err<T, E>(e: E) -> FutureResult<T, E> {
|
||||
result(Err(e))
|
||||
}
|
||||
|
||||
impl<T, E> Future for FutureResult<T, E> {
|
||||
type Item = T;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<T, E> {
|
||||
self.inner.take().expect("cannot poll Result twice").map(Async::Ready)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
use {Future, Poll, Async};
|
||||
|
||||
/// Future for the `select` combinator, waiting for one of two futures to
|
||||
/// complete.
|
||||
///
|
||||
/// This is created by the `Future::select` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct Select<A, B> where A: Future, B: Future<Item=A::Item, Error=A::Error> {
|
||||
inner: Option<(A, B)>,
|
||||
}
|
||||
|
||||
/// Future yielded as the second result in a `Select` future.
|
||||
///
|
||||
/// This sentinel future represents the completion of the second future to a
|
||||
/// `select` which finished second.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct SelectNext<A, B> where A: Future, B: Future<Item=A::Item, Error=A::Error> {
|
||||
inner: OneOf<A, B>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum OneOf<A, B> where A: Future, B: Future {
|
||||
A(A),
|
||||
B(B),
|
||||
}
|
||||
|
||||
pub fn new<A, B>(a: A, b: B) -> Select<A, B>
|
||||
where A: Future,
|
||||
B: Future<Item=A::Item, Error=A::Error>
|
||||
{
|
||||
Select {
|
||||
inner: Some((a, b)),
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, B> Future for Select<A, B>
|
||||
where A: Future,
|
||||
B: Future<Item=A::Item, Error=A::Error>,
|
||||
{
|
||||
type Item = (A::Item, SelectNext<A, B>);
|
||||
type Error = (A::Error, SelectNext<A, B>);
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
let (ret, is_a) = match self.inner {
|
||||
Some((ref mut a, ref mut b)) => {
|
||||
match a.poll() {
|
||||
Err(a) => (Err(a), true),
|
||||
Ok(Async::Ready(a)) => (Ok(a), true),
|
||||
Ok(Async::NotReady) => {
|
||||
match b.poll() {
|
||||
Err(a) => (Err(a), false),
|
||||
Ok(Async::Ready(a)) => (Ok(a), false),
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None => panic!("cannot poll select twice"),
|
||||
};
|
||||
|
||||
let (a, b) = self.inner.take().unwrap();
|
||||
let next = if is_a {OneOf::B(b)} else {OneOf::A(a)};
|
||||
let next = SelectNext { inner: next };
|
||||
match ret {
|
||||
Ok(a) => Ok(Async::Ready((a, next))),
|
||||
Err(e) => Err((e, next)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, B> Future for SelectNext<A, B>
|
||||
where A: Future,
|
||||
B: Future<Item=A::Item, Error=A::Error>,
|
||||
{
|
||||
type Item = A::Item;
|
||||
type Error = A::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
match self.inner {
|
||||
OneOf::A(ref mut a) => a.poll(),
|
||||
OneOf::B(ref mut b) => b.poll(),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
use {Future, Poll, Async};
|
||||
use future::Either;
|
||||
|
||||
/// Future for the `merge` combinator, waiting for one of two differently-typed
|
||||
/// futures to complete.
|
||||
///
|
||||
/// This is created by the `Future::merge` method.
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
#[derive(Debug)]
|
||||
pub struct Select2<A, B> {
|
||||
inner: Option<(A, B)>,
|
||||
}
|
||||
|
||||
pub fn new<A, B>(a: A, b: B) -> Select2<A, B> {
|
||||
Select2 { inner: Some((a, b)) }
|
||||
}
|
||||
|
||||
impl<A, B> Future for Select2<A, B> where A: Future, B: Future {
|
||||
type Item = Either<(A::Item, B), (B::Item, A)>;
|
||||
type Error = Either<(A::Error, B), (B::Error, A)>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
let (mut a, mut b) = self.inner.take().expect("cannot poll Select2 twice");
|
||||
match a.poll() {
|
||||
Err(e) => Err(Either::A((e, b))),
|
||||
Ok(Async::Ready(x)) => Ok(Async::Ready((Either::A((x, b))))),
|
||||
Ok(Async::NotReady) => match b.poll() {
|
||||
Err(e) => Err(Either::B((e, a))),
|
||||
Ok(Async::Ready(x)) => Ok(Async::Ready((Either::B((x, a))))),
|
||||
Ok(Async::NotReady) => {
|
||||
self.inner = Some((a, b));
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
//! Definition of the SelectAll, finding the first future in a list that
|
||||
//! finishes.
|
||||
|
||||
use std::mem;
|
||||
use std::prelude::v1::*;
|
||||
|
||||
use {Future, IntoFuture, Poll, Async};
|
||||
|
||||
/// Future for the `select_all` combinator, waiting for one of any of a list of
|
||||
/// futures to complete.
|
||||
///
|
||||
/// This is created by the `select_all` function.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct SelectAll<A> where A: Future {
|
||||
inner: Vec<A>,
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
pub type SelectAllNext<A> = A;
|
||||
|
||||
/// Creates a new future which will select over a list of futures.
|
||||
///
|
||||
/// The returned future will wait for any future within `iter` to be ready. Upon
|
||||
/// completion or failure the item resolved will be returned, along with the
|
||||
/// index of the future that was ready and the list of all the remaining
|
||||
/// futures.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if the iterator specified contains no items.
|
||||
pub fn select_all<I>(iter: I) -> SelectAll<<I::Item as IntoFuture>::Future>
|
||||
where I: IntoIterator,
|
||||
I::Item: IntoFuture,
|
||||
{
|
||||
let ret = SelectAll {
|
||||
inner: iter.into_iter()
|
||||
.map(|a| a.into_future())
|
||||
.collect(),
|
||||
};
|
||||
assert!(ret.inner.len() > 0);
|
||||
ret
|
||||
}
|
||||
|
||||
impl<A> Future for SelectAll<A>
|
||||
where A: Future,
|
||||
{
|
||||
type Item = (A::Item, usize, Vec<A>);
|
||||
type Error = (A::Error, usize, Vec<A>);
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
let item = self.inner.iter_mut().enumerate().filter_map(|(i, f)| {
|
||||
match f.poll() {
|
||||
Ok(Async::NotReady) => None,
|
||||
Ok(Async::Ready(e)) => Some((i, Ok(e))),
|
||||
Err(e) => Some((i, Err(e))),
|
||||
}
|
||||
}).next();
|
||||
match item {
|
||||
Some((idx, res)) => {
|
||||
self.inner.remove(idx);
|
||||
let rest = mem::replace(&mut self.inner, Vec::new());
|
||||
match res {
|
||||
Ok(e) => Ok(Async::Ready((e, idx, rest))),
|
||||
Err(e) => Err((e, idx, rest)),
|
||||
}
|
||||
}
|
||||
None => Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
//! Definition of the `SelectOk` combinator, finding the first successful future
|
||||
//! in a list.
|
||||
|
||||
use std::mem;
|
||||
use std::prelude::v1::*;
|
||||
|
||||
use {Future, IntoFuture, Poll, Async};
|
||||
|
||||
/// Future for the `select_ok` combinator, waiting for one of any of a list of
|
||||
/// futures to succesfully complete. unlike `select_all`, this future ignores all
|
||||
/// but the last error, if there are any.
|
||||
///
|
||||
/// This is created by the `select_ok` function.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct SelectOk<A> where A: Future {
|
||||
inner: Vec<A>,
|
||||
}
|
||||
|
||||
/// Creates a new future which will select the first successful future over a list of futures.
|
||||
///
|
||||
/// The returned future will wait for any future within `iter` to be ready and Ok. Unlike
|
||||
/// `select_all`, this will only return the first successful completion, or the last
|
||||
/// failure. This is useful in contexts where any success is desired and failures
|
||||
/// are ignored, unless all the futures fail.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if the iterator specified contains no items.
|
||||
pub fn select_ok<I>(iter: I) -> SelectOk<<I::Item as IntoFuture>::Future>
|
||||
where I: IntoIterator,
|
||||
I::Item: IntoFuture,
|
||||
{
|
||||
let ret = SelectOk {
|
||||
inner: iter.into_iter()
|
||||
.map(|a| a.into_future())
|
||||
.collect(),
|
||||
};
|
||||
assert!(ret.inner.len() > 0);
|
||||
ret
|
||||
}
|
||||
|
||||
impl<A> Future for SelectOk<A> where A: Future {
|
||||
type Item = (A::Item, Vec<A>);
|
||||
type Error = A::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
// loop until we've either exhausted all errors, a success was hit, or nothing is ready
|
||||
loop {
|
||||
let item = self.inner.iter_mut().enumerate().filter_map(|(i, f)| {
|
||||
match f.poll() {
|
||||
Ok(Async::NotReady) => None,
|
||||
Ok(Async::Ready(e)) => Some((i, Ok(e))),
|
||||
Err(e) => Some((i, Err(e))),
|
||||
}
|
||||
}).next();
|
||||
|
||||
match item {
|
||||
Some((idx, res)) => {
|
||||
// always remove Ok or Err, if it's not the last Err continue looping
|
||||
drop(self.inner.remove(idx));
|
||||
match res {
|
||||
Ok(e) => {
|
||||
let rest = mem::replace(&mut self.inner, Vec::new());
|
||||
return Ok(Async::Ready((e, rest)))
|
||||
},
|
||||
Err(e) => {
|
||||
if self.inner.is_empty() {
|
||||
return Err(e)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
None => {
|
||||
// based on the filter above, nothing is ready, return
|
||||
return Ok(Async::NotReady)
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,279 @@
|
|||
//! Definition of the Shared combinator, a future that is cloneable,
|
||||
//! and can be polled in multiple threads.
|
||||
//!
|
||||
//! # Examples
|
||||
//!
|
||||
//! ```
|
||||
//! use futures::future::*;
|
||||
//!
|
||||
//! let future = ok::<_, bool>(6);
|
||||
//! let shared1 = future.shared();
|
||||
//! let shared2 = shared1.clone();
|
||||
//! assert_eq!(6, *shared1.wait().unwrap());
|
||||
//! assert_eq!(6, *shared2.wait().unwrap());
|
||||
//! ```
|
||||
|
||||
use {Future, Poll, Async};
|
||||
use executor::{self, Spawn, Unpark};
|
||||
use task::{self, Task};
|
||||
|
||||
use std::{fmt, mem, ops};
|
||||
use std::cell::UnsafeCell;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// A future that is cloneable and can be polled in multiple threads.
|
||||
/// Use Future::shared() method to convert any future into a `Shared` future.
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct Shared<F: Future> {
|
||||
inner: Arc<Inner<F>>,
|
||||
waiter: usize,
|
||||
}
|
||||
|
||||
impl<F> fmt::Debug for Shared<F>
|
||||
where F: Future + fmt::Debug,
|
||||
F::Item: fmt::Debug,
|
||||
F::Error: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_struct("Shared")
|
||||
.field("inner", &self.inner)
|
||||
.field("waiter", &self.waiter)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
struct Inner<F: Future> {
|
||||
next_clone_id: AtomicUsize,
|
||||
future: UnsafeCell<Option<Spawn<F>>>,
|
||||
result: UnsafeCell<Option<Result<SharedItem<F::Item>, SharedError<F::Error>>>>,
|
||||
unparker: Arc<Unparker>,
|
||||
}
|
||||
|
||||
struct Unparker {
|
||||
state: AtomicUsize,
|
||||
waiters: Mutex<HashMap<usize, Task>>,
|
||||
}
|
||||
|
||||
const IDLE: usize = 0;
|
||||
const POLLING: usize = 1;
|
||||
const REPOLL: usize = 2;
|
||||
const COMPLETE: usize = 3;
|
||||
const POISONED: usize = 4;
|
||||
|
||||
pub fn new<F: Future>(future: F) -> Shared<F> {
|
||||
Shared {
|
||||
inner: Arc::new(Inner {
|
||||
next_clone_id: AtomicUsize::new(1),
|
||||
unparker: Arc::new(Unparker {
|
||||
state: AtomicUsize::new(IDLE),
|
||||
waiters: Mutex::new(HashMap::new()),
|
||||
}),
|
||||
future: UnsafeCell::new(Some(executor::spawn(future))),
|
||||
result: UnsafeCell::new(None),
|
||||
}),
|
||||
waiter: 0,
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> Shared<F> where F: Future {
|
||||
// TODO: make this private
|
||||
#[deprecated(since = "0.1.12", note = "use `Future::shared` instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
#[doc(hidden)]
|
||||
pub fn new(future: F) -> Self {
|
||||
new(future)
|
||||
}
|
||||
|
||||
/// If any clone of this `Shared` has completed execution, returns its result immediately
|
||||
/// without blocking. Otherwise, returns None without triggering the work represented by
|
||||
/// this `Shared`.
|
||||
pub fn peek(&self) -> Option<Result<SharedItem<F::Item>, SharedError<F::Error>>> {
|
||||
match self.inner.unparker.state.load(SeqCst) {
|
||||
COMPLETE => {
|
||||
Some(unsafe { self.clone_result() })
|
||||
}
|
||||
POISONED => panic!("inner future panicked during poll"),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
fn set_waiter(&mut self) {
|
||||
let mut waiters = self.inner.unparker.waiters.lock().unwrap();
|
||||
waiters.insert(self.waiter, task::park());
|
||||
}
|
||||
|
||||
unsafe fn clone_result(&self) -> Result<SharedItem<F::Item>, SharedError<F::Error>> {
|
||||
match *self.inner.result.get() {
|
||||
Some(Ok(ref item)) => Ok(SharedItem { item: item.item.clone() }),
|
||||
Some(Err(ref e)) => Err(SharedError { error: e.error.clone() }),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
fn complete(&self) {
|
||||
unsafe { *self.inner.future.get() = None };
|
||||
self.inner.unparker.state.store(COMPLETE, SeqCst);
|
||||
self.inner.unparker.unpark();
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> Future for Shared<F>
|
||||
where F: Future
|
||||
{
|
||||
type Item = SharedItem<F::Item>;
|
||||
type Error = SharedError<F::Error>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
self.set_waiter();
|
||||
|
||||
match self.inner.unparker.state.compare_and_swap(IDLE, POLLING, SeqCst) {
|
||||
IDLE => {
|
||||
// Lock acquired, fall through
|
||||
}
|
||||
POLLING | REPOLL => {
|
||||
// Another task is currently polling, at this point we just want
|
||||
// to ensure that our task handle is currently registered
|
||||
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
COMPLETE => {
|
||||
return unsafe { self.clone_result().map(Async::Ready) };
|
||||
}
|
||||
POISONED => panic!("inner future panicked during poll"),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
loop {
|
||||
struct Reset<'a>(&'a AtomicUsize);
|
||||
|
||||
impl<'a> Drop for Reset<'a> {
|
||||
fn drop(&mut self) {
|
||||
use std::thread;
|
||||
|
||||
if thread::panicking() {
|
||||
self.0.store(POISONED, SeqCst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let _reset = Reset(&self.inner.unparker.state);
|
||||
|
||||
// Get a handle to the unparker
|
||||
let unpark: Arc<Unpark> = self.inner.unparker.clone();
|
||||
|
||||
// Poll the future
|
||||
match unsafe { (*self.inner.future.get()).as_mut().unwrap().poll_future(unpark) } {
|
||||
Ok(Async::NotReady) => {
|
||||
// Not ready, try to release the handle
|
||||
match self.inner.unparker.state.compare_and_swap(POLLING, IDLE, SeqCst) {
|
||||
POLLING => {
|
||||
// Success
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
REPOLL => {
|
||||
// Gotta poll again!
|
||||
let prev = self.inner.unparker.state.swap(POLLING, SeqCst);
|
||||
assert_eq!(prev, REPOLL);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
}
|
||||
Ok(Async::Ready(i)) => {
|
||||
unsafe {
|
||||
(*self.inner.result.get()) = Some(Ok(SharedItem { item: Arc::new(i) }));
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
unsafe {
|
||||
(*self.inner.result.get()) = Some(Err(SharedError { error: Arc::new(e) }));
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.complete();
|
||||
unsafe { self.clone_result().map(Async::Ready) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> Clone for Shared<F> where F: Future {
|
||||
fn clone(&self) -> Self {
|
||||
let next_clone_id = self.inner.next_clone_id.fetch_add(1, SeqCst);
|
||||
|
||||
Shared {
|
||||
inner: self.inner.clone(),
|
||||
waiter: next_clone_id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> Drop for Shared<F> where F: Future {
|
||||
fn drop(&mut self) {
|
||||
let mut waiters = self.inner.unparker.waiters.lock().unwrap();
|
||||
waiters.remove(&self.waiter);
|
||||
}
|
||||
}
|
||||
|
||||
impl Unpark for Unparker {
|
||||
fn unpark(&self) {
|
||||
self.state.compare_and_swap(POLLING, REPOLL, SeqCst);
|
||||
|
||||
let waiters = mem::replace(&mut *self.waiters.lock().unwrap(), HashMap::new());
|
||||
|
||||
for (_, waiter) in waiters {
|
||||
waiter.unpark();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<F: Future> Sync for Inner<F> {}
|
||||
unsafe impl<F: Future> Send for Inner<F> {}
|
||||
|
||||
impl<F> fmt::Debug for Inner<F>
|
||||
where F: Future + fmt::Debug,
|
||||
F::Item: fmt::Debug,
|
||||
F::Error: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_struct("Inner")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// A wrapped item of the original future that is clonable and implements Deref
|
||||
/// for ease of use.
|
||||
#[derive(Debug)]
|
||||
pub struct SharedItem<T> {
|
||||
item: Arc<T>,
|
||||
}
|
||||
|
||||
impl<T> ops::Deref for SharedItem<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
&self.item.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
/// A wrapped error of the original future that is clonable and implements Deref
|
||||
/// for ease of use.
|
||||
#[derive(Debug)]
|
||||
pub struct SharedError<E> {
|
||||
error: Arc<E>,
|
||||
}
|
||||
|
||||
impl<E> ops::Deref for SharedError<E> {
|
||||
type Target = E;
|
||||
|
||||
fn deref(&self) -> &E {
|
||||
&self.error.as_ref()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
use {Future, IntoFuture, Poll};
|
||||
use super::chain::Chain;
|
||||
|
||||
/// Future for the `then` combinator, chaining computations on the end of
|
||||
/// another future regardless of its outcome.
|
||||
///
|
||||
/// This is created by the `Future::then` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct Then<A, B, F> where A: Future, B: IntoFuture {
|
||||
state: Chain<A, B::Future, F>,
|
||||
}
|
||||
|
||||
pub fn new<A, B, F>(future: A, f: F) -> Then<A, B, F>
|
||||
where A: Future,
|
||||
B: IntoFuture,
|
||||
{
|
||||
Then {
|
||||
state: Chain::new(future, f),
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, B, F> Future for Then<A, B, F>
|
||||
where A: Future,
|
||||
B: IntoFuture,
|
||||
F: FnOnce(Result<A::Item, A::Error>) -> B,
|
||||
{
|
||||
type Item = B::Item;
|
||||
type Error = B::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<B::Item, B::Error> {
|
||||
self.state.poll(|a, f| {
|
||||
Ok(Err(f(a).into_future()))
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,238 @@
|
|||
//! Zero-cost Futures in Rust
|
||||
//!
|
||||
//! This library is an implementation of futures in Rust which aims to provide
|
||||
//! a robust implementation of handling asynchronous computations, ergonomic
|
||||
//! composition and usage, and zero-cost abstractions over what would otherwise
|
||||
//! be written by hand.
|
||||
//!
|
||||
//! Futures are a concept for an object which is a proxy for another value that
|
||||
//! may not be ready yet. For example issuing an HTTP request may return a
|
||||
//! future for the HTTP response, as it probably hasn't arrived yet. With an
|
||||
//! object representing a value that will eventually be available, futures allow
|
||||
//! for powerful composition of tasks through basic combinators that can perform
|
||||
//! operations like chaining computations, changing the types of futures, or
|
||||
//! waiting for two futures to complete at the same time.
|
||||
//!
|
||||
//! You can find extensive tutorials and documentations at [https://tokio.rs]
|
||||
//! for both this crate (asynchronous programming in general) as well as the
|
||||
//! Tokio stack to perform async I/O with.
|
||||
//!
|
||||
//! [https://tokio.rs]: https://tokio.rs
|
||||
//!
|
||||
//! ## Installation
|
||||
//!
|
||||
//! Add this to your `Cargo.toml`:
|
||||
//!
|
||||
//! ```toml
|
||||
//! [dependencies]
|
||||
//! futures = "0.1"
|
||||
//! ```
|
||||
//!
|
||||
//! ## Examples
|
||||
//!
|
||||
//! Let's take a look at a few examples of how futures might be used:
|
||||
//!
|
||||
//! ```
|
||||
//! extern crate futures;
|
||||
//!
|
||||
//! use std::io;
|
||||
//! use std::time::Duration;
|
||||
//! use futures::future::{Future, Map};
|
||||
//!
|
||||
//! // A future is actually a trait implementation, so we can generically take a
|
||||
//! // future of any integer and return back a future that will resolve to that
|
||||
//! // value plus 10 more.
|
||||
//! //
|
||||
//! // Note here that like iterators, we're returning the `Map` combinator in
|
||||
//! // the futures crate, not a boxed abstraction. This is a zero-cost
|
||||
//! // construction of a future.
|
||||
//! fn add_ten<F>(future: F) -> Map<F, fn(i32) -> i32>
|
||||
//! where F: Future<Item=i32>,
|
||||
//! {
|
||||
//! fn add(a: i32) -> i32 { a + 10 }
|
||||
//! future.map(add)
|
||||
//! }
|
||||
//!
|
||||
//! // Not only can we modify one future, but we can even compose them together!
|
||||
//! // Here we have a function which takes two futures as input, and returns a
|
||||
//! // future that will calculate the sum of their two values.
|
||||
//! //
|
||||
//! // Above we saw a direct return value of the `Map` combinator, but
|
||||
//! // performance isn't always critical and sometimes it's more ergonomic to
|
||||
//! // return a trait object like we do here. Note though that there's only one
|
||||
//! // allocation here, not any for the intermediate futures.
|
||||
//! fn add<'a, A, B>(a: A, b: B) -> Box<Future<Item=i32, Error=A::Error> + 'a>
|
||||
//! where A: Future<Item=i32> + 'a,
|
||||
//! B: Future<Item=i32, Error=A::Error> + 'a,
|
||||
//! {
|
||||
//! Box::new(a.join(b).map(|(a, b)| a + b))
|
||||
//! }
|
||||
//!
|
||||
//! // Futures also allow chaining computations together, starting another after
|
||||
//! // the previous finishes. Here we wait for the first computation to finish,
|
||||
//! // and then decide what to do depending on the result.
|
||||
//! fn download_timeout(url: &str,
|
||||
//! timeout_dur: Duration)
|
||||
//! -> Box<Future<Item=Vec<u8>, Error=io::Error>> {
|
||||
//! use std::io;
|
||||
//! use std::net::{SocketAddr, TcpStream};
|
||||
//!
|
||||
//! type IoFuture<T> = Box<Future<Item=T, Error=io::Error>>;
|
||||
//!
|
||||
//! // First thing to do is we need to resolve our URL to an address. This
|
||||
//! // will likely perform a DNS lookup which may take some time.
|
||||
//! let addr = resolve(url);
|
||||
//!
|
||||
//! // After we acquire the address, we next want to open up a TCP
|
||||
//! // connection.
|
||||
//! let tcp = addr.and_then(|addr| connect(&addr));
|
||||
//!
|
||||
//! // After the TCP connection is established and ready to go, we're off to
|
||||
//! // the races!
|
||||
//! let data = tcp.and_then(|conn| download(conn));
|
||||
//!
|
||||
//! // That all might take awhile, though, so let's not wait too long for it
|
||||
//! // to all come back. The `select` combinator here returns a future which
|
||||
//! // resolves to the first value that's ready plus the next future.
|
||||
//! //
|
||||
//! // Note we can also use the `then` combinator which is similar to
|
||||
//! // `and_then` above except that it receives the result of the
|
||||
//! // computation, not just the successful value.
|
||||
//! //
|
||||
//! // Again note that all the above calls to `and_then` and the below calls
|
||||
//! // to `map` and such require no allocations. We only ever allocate once
|
||||
//! // we hit the `Box::new()` call at the end here, which means we've built
|
||||
//! // up a relatively involved computation with only one box, and even that
|
||||
//! // was optional!
|
||||
//!
|
||||
//! let data = data.map(Ok);
|
||||
//! let timeout = timeout(timeout_dur).map(Err);
|
||||
//!
|
||||
//! let ret = data.select(timeout).then(|result| {
|
||||
//! match result {
|
||||
//! // One future succeeded, and it was the one which was
|
||||
//! // downloading data from the connection.
|
||||
//! Ok((Ok(data), _other_future)) => Ok(data),
|
||||
//!
|
||||
//! // The timeout fired, and otherwise no error was found, so
|
||||
//! // we translate this to an error.
|
||||
//! Ok((Err(_timeout), _other_future)) => {
|
||||
//! Err(io::Error::new(io::ErrorKind::Other, "timeout"))
|
||||
//! }
|
||||
//!
|
||||
//! // A normal I/O error happened, so we pass that on through.
|
||||
//! Err((e, _other_future)) => Err(e),
|
||||
//! }
|
||||
//! });
|
||||
//! return Box::new(ret);
|
||||
//!
|
||||
//! fn resolve(url: &str) -> IoFuture<SocketAddr> {
|
||||
//! // ...
|
||||
//! # panic!("unimplemented");
|
||||
//! }
|
||||
//!
|
||||
//! fn connect(hostname: &SocketAddr) -> IoFuture<TcpStream> {
|
||||
//! // ...
|
||||
//! # panic!("unimplemented");
|
||||
//! }
|
||||
//!
|
||||
//! fn download(stream: TcpStream) -> IoFuture<Vec<u8>> {
|
||||
//! // ...
|
||||
//! # panic!("unimplemented");
|
||||
//! }
|
||||
//!
|
||||
//! fn timeout(stream: Duration) -> IoFuture<()> {
|
||||
//! // ...
|
||||
//! # panic!("unimplemented");
|
||||
//! }
|
||||
//! }
|
||||
//! # fn main() {}
|
||||
//! ```
|
||||
//!
|
||||
//! Some more information can also be found in the [README] for now, but
|
||||
//! otherwise feel free to jump in to the docs below!
|
||||
//!
|
||||
//! [README]: https://github.com/alexcrichton/futures-rs#futures-rs
|
||||
|
||||
#![no_std]
|
||||
#![deny(missing_docs, missing_debug_implementations)]
|
||||
#![doc(html_root_url = "https://docs.rs/futures/0.1")]
|
||||
|
||||
#[macro_use]
|
||||
#[cfg(feature = "use_std")]
|
||||
extern crate std;
|
||||
|
||||
macro_rules! if_std {
|
||||
($($i:item)*) => ($(
|
||||
#[cfg(feature = "use_std")]
|
||||
$i
|
||||
)*)
|
||||
}
|
||||
|
||||
#[macro_use]
|
||||
mod poll;
|
||||
pub use poll::{Poll, Async, AsyncSink, StartSend};
|
||||
|
||||
pub mod future;
|
||||
pub use future::{Future, IntoFuture};
|
||||
|
||||
pub mod stream;
|
||||
pub use stream::Stream;
|
||||
|
||||
pub mod sink;
|
||||
pub use sink::Sink;
|
||||
|
||||
#[deprecated(since = "0.1.4", note = "import through the future module instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
#[doc(hidden)]
|
||||
pub use future::{done, empty, failed, finished, lazy};
|
||||
|
||||
#[doc(hidden)]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
#[deprecated(since = "0.1.4", note = "import through the future module instead")]
|
||||
pub use future::{
|
||||
Done, Empty, Failed, Finished, Lazy, AndThen, Flatten, FlattenStream, Fuse, IntoStream,
|
||||
Join, Join3, Join4, Join5, Map, MapErr, OrElse, Select,
|
||||
SelectNext, Then
|
||||
};
|
||||
|
||||
if_std! {
|
||||
mod lock;
|
||||
mod task_impl;
|
||||
mod stack;
|
||||
|
||||
pub mod task;
|
||||
pub mod executor;
|
||||
pub mod sync;
|
||||
pub mod unsync;
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "use sync::oneshot::channel instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use sync::oneshot::channel as oneshot;
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "use sync::oneshot::Receiver instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use sync::oneshot::Receiver as Oneshot;
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "use sync::oneshot::Sender instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use sync::oneshot::Sender as Complete;
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "use sync::oneshot::Canceled instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use sync::oneshot::Canceled;
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "import through the future module instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use future::{BoxFuture, collect, select_all, select_ok};
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "import through the future module instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use future::{SelectAll, SelectAllNext, Collect, SelectOk};
|
||||
}
|
|
@ -0,0 +1,107 @@
|
|||
//! A "mutex" which only supports `try_lock`
|
||||
//!
|
||||
//! As a futures library the eventual call to an event loop should be the only
|
||||
//! thing that ever blocks, so this is assisted with a fast user-space
|
||||
//! implementation of a lock that can only have a `try_lock` operation.
|
||||
|
||||
extern crate core;
|
||||
|
||||
use self::core::cell::UnsafeCell;
|
||||
use self::core::ops::{Deref, DerefMut};
|
||||
use self::core::sync::atomic::Ordering::SeqCst;
|
||||
use self::core::sync::atomic::AtomicBool;
|
||||
|
||||
/// A "mutex" around a value, similar to `std::sync::Mutex<T>`.
|
||||
///
|
||||
/// This lock only supports the `try_lock` operation, however, and does not
|
||||
/// implement poisoning.
|
||||
#[derive(Debug)]
|
||||
pub struct Lock<T> {
|
||||
locked: AtomicBool,
|
||||
data: UnsafeCell<T>,
|
||||
}
|
||||
|
||||
/// Sentinel representing an acquired lock through which the data can be
|
||||
/// accessed.
|
||||
pub struct TryLock<'a, T: 'a> {
|
||||
__ptr: &'a Lock<T>,
|
||||
}
|
||||
|
||||
// The `Lock` structure is basically just a `Mutex<T>`, and these two impls are
|
||||
// intended to mirror the standard library's corresponding impls for `Mutex<T>`.
|
||||
//
|
||||
// If a `T` is sendable across threads, so is the lock, and `T` must be sendable
|
||||
// across threads to be `Sync` because it allows mutable access from multiple
|
||||
// threads.
|
||||
unsafe impl<T: Send> Send for Lock<T> {}
|
||||
unsafe impl<T: Send> Sync for Lock<T> {}
|
||||
|
||||
impl<T> Lock<T> {
|
||||
/// Creates a new lock around the given value.
|
||||
pub fn new(t: T) -> Lock<T> {
|
||||
Lock {
|
||||
locked: AtomicBool::new(false),
|
||||
data: UnsafeCell::new(t),
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to acquire this lock, returning whether the lock was acquired or
|
||||
/// not.
|
||||
///
|
||||
/// If `Some` is returned then the data this lock protects can be accessed
|
||||
/// through the sentinel. This sentinel allows both mutable and immutable
|
||||
/// access.
|
||||
///
|
||||
/// If `None` is returned then the lock is already locked, either elsewhere
|
||||
/// on this thread or on another thread.
|
||||
pub fn try_lock(&self) -> Option<TryLock<T>> {
|
||||
if !self.locked.swap(true, SeqCst) {
|
||||
Some(TryLock { __ptr: self })
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Deref for TryLock<'a, T> {
|
||||
type Target = T;
|
||||
fn deref(&self) -> &T {
|
||||
// The existence of `TryLock` represents that we own the lock, so we
|
||||
// can safely access the data here.
|
||||
unsafe { &*self.__ptr.data.get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> DerefMut for TryLock<'a, T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
// The existence of `TryLock` represents that we own the lock, so we
|
||||
// can safely access the data here.
|
||||
//
|
||||
// Additionally, we're the *only* `TryLock` in existence so mutable
|
||||
// access should be ok.
|
||||
unsafe { &mut *self.__ptr.data.get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Drop for TryLock<'a, T> {
|
||||
fn drop(&mut self) {
|
||||
self.__ptr.locked.store(false, SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::Lock;
|
||||
|
||||
#[test]
|
||||
fn smoke() {
|
||||
let a = Lock::new(1);
|
||||
let mut a1 = a.try_lock().unwrap();
|
||||
assert!(a.try_lock().is_none());
|
||||
assert_eq!(*a1, 1);
|
||||
*a1 = 2;
|
||||
drop(a1);
|
||||
assert_eq!(*a.try_lock().unwrap(), 2);
|
||||
assert_eq!(*a.try_lock().unwrap(), 2);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
/// A macro for extracting the successful type of a `Poll<T, E>`.
|
||||
///
|
||||
/// This macro bakes propagation of both errors and `NotReady` signals by
|
||||
/// returning early.
|
||||
#[macro_export]
|
||||
macro_rules! try_ready {
|
||||
($e:expr) => (match $e {
|
||||
Ok($crate::Async::Ready(t)) => t,
|
||||
Ok($crate::Async::NotReady) => return Ok($crate::Async::NotReady),
|
||||
Err(e) => return Err(From::from(e)),
|
||||
})
|
||||
}
|
||||
|
||||
/// Return type of the `Future::poll` method, indicates whether a future's value
|
||||
/// is ready or not.
|
||||
///
|
||||
/// * `Ok(Async::Ready(t))` means that a future has successfully resolved
|
||||
/// * `Ok(Async::NotReady)` means that a future is not ready to complete yet
|
||||
/// * `Err(e)` means that a future has completed with the given failure
|
||||
pub type Poll<T, E> = Result<Async<T>, E>;
|
||||
|
||||
/// Return type of future, indicating whether a value is ready or not.
|
||||
#[derive(Copy, Clone, Debug, PartialEq)]
|
||||
pub enum Async<T> {
|
||||
/// Represents that a value is immediately ready.
|
||||
Ready(T),
|
||||
|
||||
/// Represents that a value is not ready yet, but may be so later.
|
||||
NotReady,
|
||||
}
|
||||
|
||||
impl<T> Async<T> {
|
||||
/// Change the success type of this `Async` value with the closure provided
|
||||
pub fn map<F, U>(self, f: F) -> Async<U>
|
||||
where F: FnOnce(T) -> U
|
||||
{
|
||||
match self {
|
||||
Async::Ready(t) => Async::Ready(f(t)),
|
||||
Async::NotReady => Async::NotReady,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether this is `Async::Ready`
|
||||
pub fn is_ready(&self) -> bool {
|
||||
match *self {
|
||||
Async::Ready(_) => true,
|
||||
Async::NotReady => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether this is `Async::NotReady`
|
||||
pub fn is_not_ready(&self) -> bool {
|
||||
!self.is_ready()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<T> for Async<T> {
|
||||
fn from(t: T) -> Async<T> {
|
||||
Async::Ready(t)
|
||||
}
|
||||
}
|
||||
|
||||
/// The result of an asynchronous attempt to send a value to a sink.
|
||||
#[derive(Copy, Clone, Debug, PartialEq)]
|
||||
pub enum AsyncSink<T> {
|
||||
/// The `start_send` attempt succeeded, so the sending process has
|
||||
/// *started*; you must use `Sink::poll_complete` to drive the send
|
||||
/// to completion.
|
||||
Ready,
|
||||
|
||||
/// The `start_send` attempt failed due to the sink being full. The value
|
||||
/// being sent is returned, and the current `Task` will be automatically
|
||||
/// notified again once the sink has room.
|
||||
NotReady(T),
|
||||
}
|
||||
|
||||
impl<T> AsyncSink<T> {
|
||||
/// Returns whether this is `AsyncSink::Ready`
|
||||
pub fn is_ready(&self) -> bool {
|
||||
match *self {
|
||||
AsyncSink::Ready => true,
|
||||
AsyncSink::NotReady(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether this is `AsyncSink::NotReady`
|
||||
pub fn is_not_ready(&self) -> bool {
|
||||
!self.is_ready()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Return type of the `Sink::start_send` method, indicating the outcome of a
|
||||
/// send attempt. See `AsyncSink` for more details.
|
||||
pub type StartSend<T, E> = Result<AsyncSink<T>, E>;
|
|
@ -0,0 +1,91 @@
|
|||
use std::collections::VecDeque;
|
||||
|
||||
use {Poll, Async};
|
||||
use {StartSend, AsyncSink};
|
||||
use sink::Sink;
|
||||
use stream::Stream;
|
||||
|
||||
/// Sink for the `Sink::buffer` combinator, which buffers up to some fixed
|
||||
/// number of values when the underlying sink is unable to accept them.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "sinks do nothing unless polled"]
|
||||
pub struct Buffer<S: Sink> {
|
||||
sink: S,
|
||||
buf: VecDeque<S::SinkItem>,
|
||||
|
||||
// Track capacity separately from the `VecDeque`, which may be rounded up
|
||||
cap: usize,
|
||||
}
|
||||
|
||||
pub fn new<S: Sink>(sink: S, amt: usize) -> Buffer<S> {
|
||||
Buffer {
|
||||
sink: sink,
|
||||
buf: VecDeque::with_capacity(amt),
|
||||
cap: amt,
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Sink> Buffer<S> {
|
||||
/// Get a shared reference to the inner sink.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.sink
|
||||
}
|
||||
|
||||
/// Get a mutable reference to the inner sink.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.sink
|
||||
}
|
||||
|
||||
fn try_empty_buffer(&mut self) -> Poll<(), S::SinkError> {
|
||||
while let Some(item) = self.buf.pop_front() {
|
||||
if let AsyncSink::NotReady(item) = try!(self.sink.start_send(item)) {
|
||||
self.buf.push_front(item);
|
||||
|
||||
// ensure that we attempt to complete any pushes we've started
|
||||
try!(self.sink.poll_complete());
|
||||
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Stream from the underlying sink
|
||||
impl<S> Stream for Buffer<S> where S: Sink + Stream {
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
|
||||
self.sink.poll()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Sink> Sink for Buffer<S> {
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
|
||||
try!(self.try_empty_buffer());
|
||||
if self.buf.len() > self.cap {
|
||||
return Ok(AsyncSink::NotReady(item));
|
||||
}
|
||||
self.buf.push_back(item);
|
||||
Ok(AsyncSink::Ready)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
|
||||
try_ready!(self.try_empty_buffer());
|
||||
debug_assert!(self.buf.is_empty());
|
||||
self.sink.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
if self.buf.len() > 0 {
|
||||
try_ready!(self.try_empty_buffer());
|
||||
}
|
||||
assert_eq!(self.buf.len(), 0);
|
||||
self.sink.close()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
use {Poll, Async, Future};
|
||||
use sink::Sink;
|
||||
|
||||
/// Future for the `Sink::flush` combinator, which polls the sink until all data
|
||||
/// has been flushed.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct Flush<S> {
|
||||
sink: Option<S>,
|
||||
}
|
||||
|
||||
pub fn new<S: Sink>(sink: S) -> Flush<S> {
|
||||
Flush { sink: Some(sink) }
|
||||
}
|
||||
|
||||
impl<S: Sink> Flush<S> {
|
||||
/// Get a shared reference to the inner sink.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
self.sink.as_ref().expect("Attempted `Flush::get_ref` after the flush completed")
|
||||
}
|
||||
|
||||
/// Get a mutable reference to the inner sink.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
self.sink.as_mut().expect("Attempted `Flush::get_mut` after the flush completed")
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Sink> Future for Flush<S> {
|
||||
type Item = S;
|
||||
type Error = S::SinkError;
|
||||
|
||||
fn poll(&mut self) -> Poll<S, S::SinkError> {
|
||||
let mut sink = self.sink.take().expect("Attempted to poll Flush after it completed");
|
||||
if try!(sink.poll_complete()).is_ready() {
|
||||
Ok(Async::Ready(sink))
|
||||
} else {
|
||||
self.sink = Some(sink);
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
use core::marker::PhantomData;
|
||||
|
||||
use {Sink, Poll, StartSend};
|
||||
|
||||
/// A sink combinator to change the error type of a sink.
|
||||
///
|
||||
/// This is created by the `Sink::from_err` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct SinkFromErr<S, E> where S: Sink {
|
||||
sink: S,
|
||||
f: PhantomData<E>
|
||||
}
|
||||
|
||||
pub fn new<S, E>(sink: S) -> SinkFromErr<S, E>
|
||||
where S: Sink
|
||||
{
|
||||
SinkFromErr {
|
||||
sink: sink,
|
||||
f: PhantomData
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, E> Sink for SinkFromErr<S, E>
|
||||
where S: Sink,
|
||||
E: From<S::SinkError>
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = E;
|
||||
|
||||
fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
|
||||
self.sink.start_send(item).map_err(|e| e.into())
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
|
||||
self.sink.poll_complete().map_err(|e| e.into())
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
self.sink.close().map_err(|e| e.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: ::stream::Stream, E> ::stream::Stream for SinkFromErr<S, E> where S: Sink {
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
|
||||
self.sink.poll()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
use sink::Sink;
|
||||
|
||||
use {Poll, StartSend};
|
||||
|
||||
/// Sink for the `Sink::sink_map_err` combinator.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "sinks do nothing unless polled"]
|
||||
pub struct SinkMapErr<S, F> {
|
||||
sink: S,
|
||||
f: Option<F>,
|
||||
}
|
||||
|
||||
pub fn new<S, F>(s: S, f: F) -> SinkMapErr<S, F> {
|
||||
SinkMapErr { sink: s, f: Some(f) }
|
||||
}
|
||||
|
||||
impl<S, F, E> Sink for SinkMapErr<S, F>
|
||||
where S: Sink,
|
||||
F: FnOnce(S::SinkError) -> E,
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = E;
|
||||
|
||||
fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
|
||||
self.sink.start_send(item).map_err(|e| self.f.take().expect("cannot use MapErr after an error")(e))
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
|
||||
self.sink.poll_complete().map_err(|e| self.f.take().expect("cannot use MapErr after an error")(e))
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
self.sink.close().map_err(|e| self.f.take().expect("cannot use MapErr after an error")(e))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,433 @@
|
|||
//! Asynchronous sinks
|
||||
//!
|
||||
//! This module contains the `Sink` trait, along with a number of adapter types
|
||||
//! for it. An overview is available in the documentaiton for the trait itself.
|
||||
//!
|
||||
//! You can find more information/tutorials about streams [online at
|
||||
//! https://tokio.rs][online]
|
||||
//!
|
||||
//! [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/
|
||||
|
||||
use {IntoFuture, Poll, StartSend};
|
||||
use stream::Stream;
|
||||
|
||||
mod with;
|
||||
// mod with_map;
|
||||
// mod with_filter;
|
||||
// mod with_filter_map;
|
||||
mod flush;
|
||||
mod from_err;
|
||||
mod send;
|
||||
mod send_all;
|
||||
mod map_err;
|
||||
|
||||
if_std! {
|
||||
mod buffer;
|
||||
mod wait;
|
||||
|
||||
pub use self::buffer::Buffer;
|
||||
pub use self::wait::Wait;
|
||||
|
||||
// TODO: consider expanding this via e.g. FromIterator
|
||||
impl<T> Sink for ::std::vec::Vec<T> {
|
||||
type SinkItem = T;
|
||||
type SinkError = (); // Change this to ! once it stabilizes
|
||||
|
||||
fn start_send(&mut self, item: Self::SinkItem)
|
||||
-> StartSend<Self::SinkItem, Self::SinkError>
|
||||
{
|
||||
self.push(item);
|
||||
Ok(::AsyncSink::Ready)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
|
||||
Ok(::Async::Ready(()))
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
Ok(::Async::Ready(()))
|
||||
}
|
||||
}
|
||||
|
||||
/// A type alias for `Box<Stream + Send>`
|
||||
pub type BoxSink<T, E> = ::std::boxed::Box<Sink<SinkItem = T, SinkError = E> +
|
||||
::core::marker::Send>;
|
||||
|
||||
impl<S: ?Sized + Sink> Sink for ::std::boxed::Box<S> {
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: Self::SinkItem)
|
||||
-> StartSend<Self::SinkItem, Self::SinkError> {
|
||||
(**self).start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
|
||||
(**self).poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
(**self).close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub use self::with::With;
|
||||
pub use self::flush::Flush;
|
||||
pub use self::send::Send;
|
||||
pub use self::send_all::SendAll;
|
||||
pub use self::map_err::SinkMapErr;
|
||||
pub use self::from_err::SinkFromErr;
|
||||
|
||||
/// A `Sink` is a value into which other values can be sent, asynchronously.
|
||||
///
|
||||
/// Basic examples of sinks include the sending side of:
|
||||
///
|
||||
/// - Channels
|
||||
/// - Sockets
|
||||
/// - Pipes
|
||||
///
|
||||
/// In addition to such "primitive" sinks, it's typical to layer additional
|
||||
/// functionality, such as buffering, on top of an existing sink.
|
||||
///
|
||||
/// Sending to a sink is "asynchronous" in the sense that the value may not be
|
||||
/// sent in its entirety immediately. Instead, values are sent in a two-phase
|
||||
/// way: first by initiating a send, and then by polling for completion. This
|
||||
/// two-phase setup is analogous to buffered writing in synchronous code, where
|
||||
/// writes often succeed immediately, but internally are buffered and are
|
||||
/// *actually* written only upon flushing.
|
||||
///
|
||||
/// In addition, the `Sink` may be *full*, in which case it is not even possible
|
||||
/// to start the sending process.
|
||||
///
|
||||
/// As with `Future` and `Stream`, the `Sink` trait is built from a few core
|
||||
/// required methods, and a host of default methods for working in a
|
||||
/// higher-level way. The `Sink::send_all` combinator is of particular
|
||||
/// importance: you can use it to send an entire stream to a sink, which is
|
||||
/// the simplest way to ultimately consume a sink.
|
||||
///
|
||||
/// You can find more information/tutorials about streams [online at
|
||||
/// https://tokio.rs][online]
|
||||
///
|
||||
/// [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/
|
||||
pub trait Sink {
|
||||
/// The type of value that the sink accepts.
|
||||
type SinkItem;
|
||||
|
||||
/// The type of value produced by the sink when an error occurs.
|
||||
type SinkError;
|
||||
|
||||
/// Begin the process of sending a value to the sink.
|
||||
///
|
||||
/// As the name suggests, this method only *begins* the process of sending
|
||||
/// the item. If the sink employs buffering, the item isn't fully processed
|
||||
/// until the buffer is fully flushed. Since sinks are designed to work with
|
||||
/// asynchronous I/O, the process of actually writing out the data to an
|
||||
/// underlying object takes place asynchronously. **You *must* use
|
||||
/// `poll_complete` in order to drive completion of a send**. In particular,
|
||||
/// `start_send` does not begin the flushing process
|
||||
///
|
||||
/// # Return value
|
||||
///
|
||||
/// This method returns `AsyncSink::Ready` if the sink was able to start
|
||||
/// sending `item`. In that case, you *must* ensure that you call
|
||||
/// `poll_complete` to process the sent item to completion. Note, however,
|
||||
/// that several calls to `start_send` can be made prior to calling
|
||||
/// `poll_complete`, which will work on completing all pending items.
|
||||
///
|
||||
/// The method returns `AsyncSink::NotReady` if the sink was unable to begin
|
||||
/// sending, usually due to being full. The sink must have attempted to
|
||||
/// complete processing any outstanding requests (equivalent to
|
||||
/// `poll_complete`) before yielding this result. The current task will be
|
||||
/// automatically scheduled for notification when the sink may be ready to
|
||||
/// receive new values.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// If the sink encounters an error other than being temporarily full, it
|
||||
/// uses the `Err` variant to signal that error. In most cases, such errors
|
||||
/// mean that the sink will permanently be unable to receive items.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method may panic in a few situations, depending on the specific
|
||||
/// sink:
|
||||
///
|
||||
/// - It is called outside of the context of a task.
|
||||
/// - A previous call to `start_send` or `poll_complete` yielded an error.
|
||||
fn start_send(&mut self, item: Self::SinkItem)
|
||||
-> StartSend<Self::SinkItem, Self::SinkError>;
|
||||
|
||||
/// Flush all output from this sink, if necessary.
|
||||
///
|
||||
/// Some sinks may buffer intermediate data as an optimization to improve
|
||||
/// throughput. In other words, if a sink has a corresponding receiver then
|
||||
/// a successful `start_send` above may not guarantee that the value is
|
||||
/// actually ready to be received by the receiver. This function is intended
|
||||
/// to be used to ensure that values do indeed make their way to the
|
||||
/// receiver.
|
||||
///
|
||||
/// This function will attempt to process any pending requests on behalf of
|
||||
/// the sink and drive it to completion.
|
||||
///
|
||||
/// # Return value
|
||||
///
|
||||
/// Returns `Ok(Async::Ready(()))` when no buffered items remain. If this
|
||||
/// value is returned then it is guaranteed that all previous values sent
|
||||
/// via `start_send` will be guaranteed to be available to a listening
|
||||
/// receiver.
|
||||
///
|
||||
/// Returns `Ok(Async::NotReady)` if there is more work left to do, in which
|
||||
/// case the current task is scheduled to wake up when more progress may be
|
||||
/// possible.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns `Err` if the sink encounters an error while processing one of
|
||||
/// its pending requests. Due to the buffered nature of requests, it is not
|
||||
/// generally possible to correlate the error with a particular request. As
|
||||
/// with `start_send`, these errors are generally "fatal" for continued use
|
||||
/// of the sink.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method may panic in a few situations, depending on the specific sink:
|
||||
///
|
||||
/// - It is called outside of the context of a task.
|
||||
/// - A previous call to `start_send` or `poll_complete` yielded an error.
|
||||
///
|
||||
/// # Compatibility nodes
|
||||
///
|
||||
/// The name of this method may be slightly misleading as the original
|
||||
/// intention was to have this method be more general than just flushing
|
||||
/// requests. Over time though it was decided to trim back the ambitions of
|
||||
/// this method to what it's always done, just flushing.
|
||||
///
|
||||
/// In the 0.2 release series of futures this method will be renamed to
|
||||
/// `poll_flush`. For 0.1, however, the breaking change is not happening
|
||||
/// yet.
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError>;
|
||||
|
||||
/// A method to indicate that no more values will ever be pushed into this
|
||||
/// sink.
|
||||
///
|
||||
/// This method is used to indicate that a sink will no longer even be given
|
||||
/// another value by the caller. That is, the `start_send` method above will
|
||||
/// be called no longer (nor `poll_complete`). This method is intended to
|
||||
/// model "graceful shutdown" in various protocols where the intent to shut
|
||||
/// down is followed by a little more blocking work.
|
||||
///
|
||||
/// Callers of this function should work it it in a similar fashion to
|
||||
/// `poll_complete`. Once called it may return `NotReady` which indicates
|
||||
/// that more external work needs to happen to make progress. The current
|
||||
/// task will be scheduled to receive a notification in such an event,
|
||||
/// however.
|
||||
///
|
||||
/// Note that this function will imply `poll_complete` above. That is, if a
|
||||
/// sink has buffered data, then it'll be flushed out during a `close`
|
||||
/// operation. It is not necessary to have `poll_complete` return `Ready`
|
||||
/// before a `close` is called. Once a `close` is called, though,
|
||||
/// `poll_complete` cannot be called.
|
||||
///
|
||||
/// # Return value
|
||||
///
|
||||
/// This function, like `poll_complete`, returns a `Poll`. The value is
|
||||
/// `Ready` once the close operation has completed. At that point it should
|
||||
/// be safe to drop the sink and deallocate associated resources.
|
||||
///
|
||||
/// If the value returned is `NotReady` then the sink is not yet closed and
|
||||
/// work needs to be done to close it. The work has been scheduled and the
|
||||
/// current task will recieve a notification when it's next ready to call
|
||||
/// this method again.
|
||||
///
|
||||
/// Finally, this function may also return an error.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This function will return an `Err` if any operation along the way during
|
||||
/// the close operation fails. An error typically is fatal for a sink and is
|
||||
/// unable to be recovered from, but in specific situations this may not
|
||||
/// always be true.
|
||||
///
|
||||
/// Note that it's also typically an error to call `start_send` or
|
||||
/// `poll_complete` after the `close` function is called. This method will
|
||||
/// *initiate* a close, and continuing to send values after that (or attempt
|
||||
/// to flush) may result in strange behavior, panics, errors, etc. Once this
|
||||
/// method is called, it must be the only method called on this `Sink`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method may panic or cause panics if:
|
||||
///
|
||||
/// * It is called outside the context of a future's task
|
||||
/// * It is called and then `start_send` or `poll_complete` is called
|
||||
///
|
||||
/// # Compatibility notes
|
||||
///
|
||||
/// Note that this function is currently by default a provided function,
|
||||
/// defaulted to calling `poll_complete` above. This function was added
|
||||
/// in the 0.1 series of the crate as a backwards-compatible addition. It
|
||||
/// is intended that in the 0.2 series the method will no longer be a
|
||||
/// default method.
|
||||
///
|
||||
/// It is highly recommended to consider this method a required method and
|
||||
/// to implement it whenever you implement `Sink` locally. It is especially
|
||||
/// crucial to be sure to close inner sinks, if applicable.
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
self.poll_complete()
|
||||
}
|
||||
|
||||
/// dox (you should see the above, not this)
|
||||
#[cfg(not(feature = "with-deprecated"))]
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError>;
|
||||
|
||||
/// Creates a new object which will produce a synchronous sink.
|
||||
///
|
||||
/// The sink returned does **not** implement the `Sink` trait, and instead
|
||||
/// only has two methods: `send` and `flush`. These two methods correspond
|
||||
/// to `start_send` and `poll_complete` above except are executed in a
|
||||
/// blocking fashion.
|
||||
#[cfg(feature = "use_std")]
|
||||
fn wait(self) -> Wait<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
wait::new(self)
|
||||
}
|
||||
|
||||
/// Composes a function *in front of* the sink.
|
||||
///
|
||||
/// This adapter produces a new sink that passes each value through the
|
||||
/// given function `f` before sending it to `self`.
|
||||
///
|
||||
/// To process each value, `f` produces a *future*, which is then polled to
|
||||
/// completion before passing its result down to the underlying sink. If the
|
||||
/// future produces an error, that error is returned by the new sink.
|
||||
///
|
||||
/// Note that this function consumes the given sink, returning a wrapped
|
||||
/// version, much like `Iterator::map`.
|
||||
fn with<U, F, Fut>(self, f: F) -> With<Self, U, F, Fut>
|
||||
where F: FnMut(U) -> Fut,
|
||||
Fut: IntoFuture<Item = Self::SinkItem>,
|
||||
Fut::Error: From<Self::SinkError>,
|
||||
Self: Sized
|
||||
{
|
||||
with::new(self, f)
|
||||
}
|
||||
|
||||
/*
|
||||
fn with_map<U, F>(self, f: F) -> WithMap<Self, U, F>
|
||||
where F: FnMut(U) -> Self::SinkItem,
|
||||
Self: Sized;
|
||||
|
||||
fn with_filter<F>(self, f: F) -> WithFilter<Self, F>
|
||||
where F: FnMut(Self::SinkItem) -> bool,
|
||||
Self: Sized;
|
||||
|
||||
fn with_filter_map<U, F>(self, f: F) -> WithFilterMap<Self, U, F>
|
||||
where F: FnMut(U) -> Option<Self::SinkItem>,
|
||||
Self: Sized;
|
||||
*/
|
||||
|
||||
/// Transforms the error returned by the sink.
|
||||
fn sink_map_err<F, E>(self, f: F) -> SinkMapErr<Self, F>
|
||||
where F: FnOnce(Self::SinkError) -> E,
|
||||
Self: Sized,
|
||||
{
|
||||
map_err::new(self, f)
|
||||
}
|
||||
|
||||
/// Map this sink's error to any error implementing `From` for this sink's
|
||||
/// `Error`, returning a new sink.
|
||||
///
|
||||
/// If wanting to map errors of a `Sink + Stream`, use `.sink_from_err().from_err()`.
|
||||
fn sink_from_err<E: From<Self::SinkError>>(self) -> from_err::SinkFromErr<Self, E>
|
||||
where Self: Sized,
|
||||
{
|
||||
from_err::new(self)
|
||||
}
|
||||
|
||||
|
||||
/// Adds a fixed-size buffer to the current sink.
|
||||
///
|
||||
/// The resulting sink will buffer up to `amt` items when the underlying
|
||||
/// sink is unwilling to accept additional items. Calling `poll_complete` on
|
||||
/// the buffered sink will attempt to both empty the buffer and complete
|
||||
/// processing on the underlying sink.
|
||||
///
|
||||
/// Note that this function consumes the given sink, returning a wrapped
|
||||
/// version, much like `Iterator::map`.
|
||||
///
|
||||
/// This method is only available when the `use_std` feature of this
|
||||
/// library is activated, and it is activated by default.
|
||||
#[cfg(feature = "use_std")]
|
||||
fn buffer(self, amt: usize) -> Buffer<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
buffer::new(self, amt)
|
||||
}
|
||||
|
||||
/// A future that completes when the sink has finished processing all
|
||||
/// pending requests.
|
||||
///
|
||||
/// The sink itself is returned after flushing is complete; this adapter is
|
||||
/// intended to be used when you want to stop sending to the sink until
|
||||
/// all current requests are processed.
|
||||
fn flush(self) -> Flush<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
flush::new(self)
|
||||
}
|
||||
|
||||
/// A future that completes after the given item has been fully processed
|
||||
/// into the sink, including flushing.
|
||||
///
|
||||
/// Note that, **because of the flushing requirement, it is usually better
|
||||
/// to batch together items to send via `send_all`, rather than flushing
|
||||
/// between each item.**
|
||||
///
|
||||
/// On completion, the sink is returned.
|
||||
fn send(self, item: Self::SinkItem) -> Send<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
send::new(self, item)
|
||||
}
|
||||
|
||||
/// A future that completes after the given stream has been fully processed
|
||||
/// into the sink, including flushing.
|
||||
///
|
||||
/// This future will drive the stream to keep producing items until it is
|
||||
/// exhausted, sending each item to the sink. It will complete once both the
|
||||
/// stream is exhausted, and the sink has fully processed and flushed all of
|
||||
/// the items sent to it.
|
||||
///
|
||||
/// Doing `sink.send_all(stream)` is roughly equivalent to
|
||||
/// `stream.forward(sink)`.
|
||||
///
|
||||
/// On completion, the pair `(sink, source)` is returned.
|
||||
fn send_all<S>(self, stream: S) -> SendAll<Self, S>
|
||||
where S: Stream<Item = Self::SinkItem>,
|
||||
Self::SinkError: From<S::Error>,
|
||||
Self: Sized
|
||||
{
|
||||
send_all::new(self, stream)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, S: ?Sized + Sink> Sink for &'a mut S {
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: Self::SinkItem)
|
||||
-> StartSend<Self::SinkItem, Self::SinkError> {
|
||||
(**self).start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
|
||||
(**self).poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
(**self).close()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
use {Poll, Async, Future, AsyncSink};
|
||||
use sink::Sink;
|
||||
|
||||
/// Future for the `Sink::send` combinator, which sends a value to a sink and
|
||||
/// then waits until the sink has fully flushed.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct Send<S: Sink> {
|
||||
sink: Option<S>,
|
||||
item: Option<S::SinkItem>,
|
||||
}
|
||||
|
||||
pub fn new<S: Sink>(sink: S, item: S::SinkItem) -> Send<S> {
|
||||
Send {
|
||||
sink: Some(sink),
|
||||
item: Some(item),
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Sink> Send<S> {
|
||||
/// Get a shared reference to the inner sink.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
self.sink.as_ref().take().expect("Attempted Send::get_ref after completion")
|
||||
}
|
||||
|
||||
/// Get a mutable reference to the inner sink.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
self.sink.as_mut().take().expect("Attempted Send::get_mut after completion")
|
||||
}
|
||||
|
||||
fn sink_mut(&mut self) -> &mut S {
|
||||
self.sink.as_mut().take().expect("Attempted to poll Send after completion")
|
||||
}
|
||||
|
||||
fn take_sink(&mut self) -> S {
|
||||
self.sink.take().expect("Attempted to poll Send after completion")
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Sink> Future for Send<S> {
|
||||
type Item = S;
|
||||
type Error = S::SinkError;
|
||||
|
||||
fn poll(&mut self) -> Poll<S, S::SinkError> {
|
||||
if let Some(item) = self.item.take() {
|
||||
if let AsyncSink::NotReady(item) = try!(self.sink_mut().start_send(item)) {
|
||||
self.item = Some(item);
|
||||
return Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
|
||||
// we're done sending the item, but want to block on flushing the
|
||||
// sink
|
||||
try_ready!(self.sink_mut().poll_complete());
|
||||
|
||||
// now everything's emptied, so return the sink for further use
|
||||
return Ok(Async::Ready(self.take_sink()))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
use {Poll, Async, Future, AsyncSink};
|
||||
use stream::{Stream, Fuse};
|
||||
use sink::Sink;
|
||||
|
||||
/// Future for the `Sink::send_all` combinator, which sends a stream of values
|
||||
/// to a sink and then waits until the sink has fully flushed those values.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct SendAll<T, U: Stream> {
|
||||
sink: Option<T>,
|
||||
stream: Option<Fuse<U>>,
|
||||
buffered: Option<U::Item>,
|
||||
}
|
||||
|
||||
pub fn new<T, U>(sink: T, stream: U) -> SendAll<T, U>
|
||||
where T: Sink,
|
||||
U: Stream<Item = T::SinkItem>,
|
||||
T::SinkError: From<U::Error>,
|
||||
{
|
||||
SendAll {
|
||||
sink: Some(sink),
|
||||
stream: Some(stream.fuse()),
|
||||
buffered: None,
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> SendAll<T, U>
|
||||
where T: Sink,
|
||||
U: Stream<Item = T::SinkItem>,
|
||||
T::SinkError: From<U::Error>,
|
||||
{
|
||||
fn sink_mut(&mut self) -> &mut T {
|
||||
self.sink.as_mut().take().expect("Attempted to poll SendAll after completion")
|
||||
}
|
||||
|
||||
fn stream_mut(&mut self) -> &mut Fuse<U> {
|
||||
self.stream.as_mut().take()
|
||||
.expect("Attempted to poll SendAll after completion")
|
||||
}
|
||||
|
||||
fn take_result(&mut self) -> (T, U) {
|
||||
let sink = self.sink.take()
|
||||
.expect("Attempted to poll Forward after completion");
|
||||
let fuse = self.stream.take()
|
||||
.expect("Attempted to poll Forward after completion");
|
||||
return (sink, fuse.into_inner());
|
||||
}
|
||||
|
||||
fn try_start_send(&mut self, item: U::Item) -> Poll<(), T::SinkError> {
|
||||
debug_assert!(self.buffered.is_none());
|
||||
if let AsyncSink::NotReady(item) = try!(self.sink_mut().start_send(item)) {
|
||||
self.buffered = Some(item);
|
||||
return Ok(Async::NotReady)
|
||||
}
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Future for SendAll<T, U>
|
||||
where T: Sink,
|
||||
U: Stream<Item = T::SinkItem>,
|
||||
T::SinkError: From<U::Error>,
|
||||
{
|
||||
type Item = (T, U);
|
||||
type Error = T::SinkError;
|
||||
|
||||
fn poll(&mut self) -> Poll<(T, U), T::SinkError> {
|
||||
// If we've got an item buffered already, we need to write it to the
|
||||
// sink before we can do anything else
|
||||
if let Some(item) = self.buffered.take() {
|
||||
try_ready!(self.try_start_send(item))
|
||||
}
|
||||
|
||||
loop {
|
||||
match try!(self.stream_mut().poll()) {
|
||||
Async::Ready(Some(item)) => try_ready!(self.try_start_send(item)),
|
||||
Async::Ready(None) => {
|
||||
try_ready!(self.sink_mut().close());
|
||||
return Ok(Async::Ready(self.take_result()))
|
||||
}
|
||||
Async::NotReady => {
|
||||
try_ready!(self.sink_mut().poll_complete());
|
||||
return Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
use sink::Sink;
|
||||
use executor;
|
||||
|
||||
/// A sink combinator which converts an asynchronous sink to a **blocking
|
||||
/// sink**.
|
||||
///
|
||||
/// Created by the `Sink::wait` method, this function transforms any sink into a
|
||||
/// blocking version. This is implemented by blocking the current thread when a
|
||||
/// sink is otherwise unable to make progress.
|
||||
#[must_use = "sinks do nothing unless used"]
|
||||
#[derive(Debug)]
|
||||
pub struct Wait<S> {
|
||||
sink: executor::Spawn<S>,
|
||||
}
|
||||
|
||||
pub fn new<S: Sink>(s: S) -> Wait<S> {
|
||||
Wait {
|
||||
sink: executor::spawn(s),
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Sink> Wait<S> {
|
||||
/// Sends a value to this sink, blocking the current thread until it's able
|
||||
/// to do so.
|
||||
///
|
||||
/// This function will take the `value` provided and call the underlying
|
||||
/// sink's `start_send` function until it's ready to accept the value. If
|
||||
/// the function returns `NotReady` then the current thread is blocked
|
||||
/// until it is otherwise ready to accept the value.
|
||||
///
|
||||
/// # Return value
|
||||
///
|
||||
/// If `Ok(())` is returned then the `value` provided was successfully sent
|
||||
/// along the sink, and if `Err(e)` is returned then an error occurred
|
||||
/// which prevented the value from being sent.
|
||||
pub fn send(&mut self, value: S::SinkItem) -> Result<(), S::SinkError> {
|
||||
self.sink.wait_send(value)
|
||||
}
|
||||
|
||||
/// Flushes any buffered data in this sink, blocking the current thread
|
||||
/// until it's entirely flushed.
|
||||
///
|
||||
/// This function will call the underlying sink's `poll_complete` method
|
||||
/// until it returns that it's ready to proceed. If the method returns
|
||||
/// `NotReady` the current thread will be blocked until it's otherwise
|
||||
/// ready to proceed.
|
||||
pub fn flush(&mut self) -> Result<(), S::SinkError> {
|
||||
self.sink.wait_flush()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,145 @@
|
|||
use core::mem;
|
||||
use core::marker::PhantomData;
|
||||
|
||||
use {IntoFuture, Future, Poll, Async, StartSend, AsyncSink};
|
||||
use sink::Sink;
|
||||
use stream::Stream;
|
||||
|
||||
/// Sink for the `Sink::with` combinator, chaining a computation to run *prior*
|
||||
/// to pushing a value into the underlying sink.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "sinks do nothing unless polled"]
|
||||
pub struct With<S, U, F, Fut>
|
||||
where S: Sink,
|
||||
F: FnMut(U) -> Fut,
|
||||
Fut: IntoFuture,
|
||||
{
|
||||
sink: S,
|
||||
f: F,
|
||||
state: State<Fut::Future, S::SinkItem>,
|
||||
_phantom: PhantomData<fn(U)>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum State<Fut, T> {
|
||||
Empty,
|
||||
Process(Fut),
|
||||
Buffered(T),
|
||||
}
|
||||
|
||||
impl<Fut, T> State<Fut, T> {
|
||||
fn is_empty(&self) -> bool {
|
||||
if let State::Empty = *self {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new<S, U, F, Fut>(sink: S, f: F) -> With<S, U, F, Fut>
|
||||
where S: Sink,
|
||||
F: FnMut(U) -> Fut,
|
||||
Fut: IntoFuture<Item = S::SinkItem>,
|
||||
Fut::Error: From<S::SinkError>,
|
||||
{
|
||||
With {
|
||||
state: State::Empty,
|
||||
sink: sink,
|
||||
f: f,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Stream from the underlying sink
|
||||
impl<S, U, F, Fut> Stream for With<S, U, F, Fut>
|
||||
where S: Stream + Sink,
|
||||
F: FnMut(U) -> Fut,
|
||||
Fut: IntoFuture
|
||||
{
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
|
||||
self.sink.poll()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, U, F, Fut> With<S, U, F, Fut>
|
||||
where S: Sink,
|
||||
F: FnMut(U) -> Fut,
|
||||
Fut: IntoFuture<Item = S::SinkItem>,
|
||||
Fut::Error: From<S::SinkError>,
|
||||
{
|
||||
/// Get a shared reference to the inner sink.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.sink
|
||||
}
|
||||
|
||||
/// Get a mutable reference to the inner sink.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.sink
|
||||
}
|
||||
|
||||
fn poll(&mut self) -> Poll<(), Fut::Error> {
|
||||
loop {
|
||||
match mem::replace(&mut self.state, State::Empty) {
|
||||
State::Empty => break,
|
||||
State::Process(mut fut) => {
|
||||
match try!(fut.poll()) {
|
||||
Async::Ready(item) => {
|
||||
self.state = State::Buffered(item);
|
||||
}
|
||||
Async::NotReady => {
|
||||
self.state = State::Process(fut);
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
State::Buffered(item) => {
|
||||
if let AsyncSink::NotReady(item) = try!(self.sink.start_send(item)) {
|
||||
self.state = State::Buffered(item);
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if self.state.is_empty() {
|
||||
Ok(Async::Ready(()))
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, U, F, Fut> Sink for With<S, U, F, Fut>
|
||||
where S: Sink,
|
||||
F: FnMut(U) -> Fut,
|
||||
Fut: IntoFuture<Item = S::SinkItem>,
|
||||
Fut::Error: From<S::SinkError>,
|
||||
{
|
||||
type SinkItem = U;
|
||||
type SinkError = Fut::Error;
|
||||
|
||||
fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Fut::Error> {
|
||||
if try!(self.poll()).is_not_ready() {
|
||||
return Ok(AsyncSink::NotReady(item))
|
||||
}
|
||||
self.state = State::Process((self.f)(item).into_future());
|
||||
Ok(AsyncSink::Ready)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), Fut::Error> {
|
||||
// poll ourselves first, to push data downward
|
||||
let me_ready = try!(self.poll());
|
||||
// always propagate `poll_complete` downward to attempt to make progress
|
||||
try_ready!(self.sink.poll_complete());
|
||||
Ok(me_ready)
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), Fut::Error> {
|
||||
try_ready!(self.poll());
|
||||
Ok(try!(self.sink.close()))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,140 @@
|
|||
//! A lock-free stack which supports concurrent pushes and a concurrent call to
|
||||
//! drain the entire stack all at once.
|
||||
|
||||
use std::prelude::v1::*;
|
||||
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::atomic::AtomicPtr;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
|
||||
use task::EventSet;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Stack<T> {
|
||||
head: AtomicPtr<Node<T>>,
|
||||
}
|
||||
|
||||
struct Node<T> {
|
||||
data: T,
|
||||
next: *mut Node<T>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Drain<T> {
|
||||
head: *mut Node<T>,
|
||||
}
|
||||
|
||||
unsafe impl<T: Send> Send for Drain<T> {}
|
||||
unsafe impl<T: Sync> Sync for Drain<T> {}
|
||||
|
||||
impl<T> Stack<T> {
|
||||
pub fn new() -> Stack<T> {
|
||||
Stack {
|
||||
head: AtomicPtr::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push(&self, data: T) {
|
||||
let mut node = Box::new(Node { data: data, next: ptr::null_mut() });
|
||||
let mut head = self.head.load(SeqCst);
|
||||
loop {
|
||||
node.next = head;
|
||||
match self.head.compare_exchange(head, &mut *node, SeqCst, SeqCst) {
|
||||
Ok(_) => {
|
||||
mem::forget(node);
|
||||
return
|
||||
}
|
||||
Err(cur) => head = cur,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn drain(&self) -> Drain<T> {
|
||||
Drain {
|
||||
head: self.head.swap(ptr::null_mut(), SeqCst),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Stack<T> {
|
||||
fn drop(&mut self) {
|
||||
self.drain();
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Iterator for Drain<T> {
|
||||
type Item = T;
|
||||
|
||||
fn next(&mut self) -> Option<T> {
|
||||
if self.head.is_null() {
|
||||
return None
|
||||
}
|
||||
unsafe {
|
||||
let node = Box::from_raw(self.head);
|
||||
self.head = node.next;
|
||||
return Some(node.data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Drain<T> {
|
||||
fn drop(&mut self) {
|
||||
for item in self.by_ref() {
|
||||
drop(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::prelude::v1::*;
|
||||
use std::rc::Rc;
|
||||
use std::cell::Cell;
|
||||
|
||||
use super::Stack;
|
||||
|
||||
struct Set(Rc<Cell<usize>>, usize);
|
||||
|
||||
impl Drop for Set {
|
||||
fn drop(&mut self) {
|
||||
self.0.set(self.1);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn simple() {
|
||||
let s = Stack::new();
|
||||
s.push(1);
|
||||
s.push(2);
|
||||
s.push(4);
|
||||
assert_eq!(s.drain().collect::<Vec<_>>(), vec![4, 2, 1]);
|
||||
s.push(5);
|
||||
assert_eq!(s.drain().collect::<Vec<_>>(), vec![5]);
|
||||
assert_eq!(s.drain().collect::<Vec<_>>(), vec![]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drain_drops() {
|
||||
let data = Rc::new(Cell::new(0));
|
||||
let s = Stack::new();
|
||||
s.push(Set(data.clone(), 1));
|
||||
drop(s.drain());
|
||||
assert_eq!(data.get(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drop_drops() {
|
||||
let data = Rc::new(Cell::new(0));
|
||||
let s = Stack::new();
|
||||
s.push(Set(data.clone(), 1));
|
||||
drop(s);
|
||||
assert_eq!(data.get(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
impl EventSet for Stack<usize> {
|
||||
fn insert(&self, id: usize) {
|
||||
self.push(id);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
use {IntoFuture, Future, Poll, Async};
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream combinator which chains a computation onto values produced by a
|
||||
/// stream.
|
||||
///
|
||||
/// This structure is produced by the `Stream::and_then` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct AndThen<S, F, U>
|
||||
where U: IntoFuture,
|
||||
{
|
||||
stream: S,
|
||||
future: Option<U::Future>,
|
||||
f: F,
|
||||
}
|
||||
|
||||
pub fn new<S, F, U>(s: S, f: F) -> AndThen<S, F, U>
|
||||
where S: Stream,
|
||||
F: FnMut(S::Item) -> U,
|
||||
U: IntoFuture<Error=S::Error>,
|
||||
{
|
||||
AndThen {
|
||||
stream: s,
|
||||
future: None,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, F, U: IntoFuture> ::sink::Sink for AndThen<S, F, U>
|
||||
where S: ::sink::Sink
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, F, U> Stream for AndThen<S, F, U>
|
||||
where S: Stream,
|
||||
F: FnMut(S::Item) -> U,
|
||||
U: IntoFuture<Error=S::Error>,
|
||||
{
|
||||
type Item = U::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<U::Item>, S::Error> {
|
||||
if self.future.is_none() {
|
||||
let item = match try_ready!(self.stream.poll()) {
|
||||
None => return Ok(Async::Ready(None)),
|
||||
Some(e) => e,
|
||||
};
|
||||
self.future = Some((self.f)(item).into_future());
|
||||
}
|
||||
assert!(self.future.is_some());
|
||||
match self.future.as_mut().unwrap().poll() {
|
||||
Ok(Async::Ready(e)) => {
|
||||
self.future = None;
|
||||
Ok(Async::Ready(Some(e)))
|
||||
}
|
||||
Err(e) => {
|
||||
self.future = None;
|
||||
Err(e)
|
||||
}
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,180 @@
|
|||
use std::prelude::v1::*;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::sync::Arc;
|
||||
|
||||
use task::{self, UnparkEvent};
|
||||
|
||||
use {Async, IntoFuture, Poll, Future};
|
||||
use stream::{Stream, Fuse};
|
||||
use stack::{Stack, Drain};
|
||||
|
||||
/// An adaptor for a stream of futures to execute the futures concurrently, if
|
||||
/// possible, delivering results as they become available.
|
||||
///
|
||||
/// This adaptor will buffer up a list of pending futures, and then return their
|
||||
/// results in the order that they complete. This is created by the
|
||||
/// `Stream::buffer_unordered` method.
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct BufferUnordered<S>
|
||||
where S: Stream,
|
||||
S::Item: IntoFuture,
|
||||
{
|
||||
stream: Fuse<S>,
|
||||
|
||||
// A slab of futures that are being executed. Each slot in this vector is
|
||||
// either an active future or a pointer to the next empty slot. This is used
|
||||
// to get O(1) deallocation in the slab and O(1) allocation.
|
||||
//
|
||||
// The `next_future` field is the next slot in the `futures` array that's a
|
||||
// `Slot::Next` variant. If it points to the end of the array then the array
|
||||
// is full.
|
||||
futures: Vec<Slot<<S::Item as IntoFuture>::Future>>,
|
||||
next_future: usize,
|
||||
|
||||
// A list of events that will get pushed onto concurrently by our many
|
||||
// futures. This is filled in and used with the `with_unpark_event`
|
||||
// function. The `pending` list here is the last time we drained events from
|
||||
// our stack.
|
||||
stack: Arc<Stack<usize>>,
|
||||
pending: Drain<usize>,
|
||||
|
||||
// Number of active futures running in the `futures` slab
|
||||
active: usize,
|
||||
}
|
||||
|
||||
impl<S> fmt::Debug for BufferUnordered<S>
|
||||
where S: Stream + fmt::Debug,
|
||||
S::Item: IntoFuture,
|
||||
<<S as Stream>::Item as IntoFuture>::Future: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_struct("BufferUnordered")
|
||||
.field("stream", &self.stream)
|
||||
.field("futures", &self.futures)
|
||||
.field("next_future", &self.next_future)
|
||||
.field("stack", &self.stack)
|
||||
.field("pending", &self.pending)
|
||||
.field("active", &self.active)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Slot<T> {
|
||||
Next(usize),
|
||||
Data(T),
|
||||
}
|
||||
|
||||
pub fn new<S>(s: S, amt: usize) -> BufferUnordered<S>
|
||||
where S: Stream,
|
||||
S::Item: IntoFuture<Error=<S as Stream>::Error>,
|
||||
{
|
||||
BufferUnordered {
|
||||
stream: super::fuse::new(s),
|
||||
futures: (0..amt).map(|i| Slot::Next(i + 1)).collect(),
|
||||
next_future: 0,
|
||||
pending: Stack::new().drain(),
|
||||
stack: Arc::new(Stack::new()),
|
||||
active: 0,
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> BufferUnordered<S>
|
||||
where S: Stream,
|
||||
S::Item: IntoFuture<Error=<S as Stream>::Error>,
|
||||
{
|
||||
fn poll_pending(&mut self)
|
||||
-> Option<Poll<Option<<S::Item as IntoFuture>::Item>,
|
||||
S::Error>> {
|
||||
while let Some(idx) = self.pending.next() {
|
||||
let result = match self.futures[idx] {
|
||||
Slot::Data(ref mut f) => {
|
||||
let event = UnparkEvent::new(self.stack.clone(), idx);
|
||||
match task::with_unpark_event(event, || f.poll()) {
|
||||
Ok(Async::NotReady) => continue,
|
||||
Ok(Async::Ready(e)) => Ok(Async::Ready(Some(e))),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
},
|
||||
Slot::Next(_) => continue,
|
||||
};
|
||||
self.active -= 1;
|
||||
self.futures[idx] = Slot::Next(self.next_future);
|
||||
self.next_future = idx;
|
||||
return Some(result)
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Stream for BufferUnordered<S>
|
||||
where S: Stream,
|
||||
S::Item: IntoFuture<Error=<S as Stream>::Error>,
|
||||
{
|
||||
type Item = <S::Item as IntoFuture>::Item;
|
||||
type Error = <S as Stream>::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
// First up, try to spawn off as many futures as possible by filling up
|
||||
// our slab of futures.
|
||||
while self.next_future < self.futures.len() {
|
||||
let future = match try!(self.stream.poll()) {
|
||||
Async::Ready(Some(s)) => s.into_future(),
|
||||
Async::Ready(None) |
|
||||
Async::NotReady => break,
|
||||
};
|
||||
self.active += 1;
|
||||
self.stack.push(self.next_future);
|
||||
match mem::replace(&mut self.futures[self.next_future],
|
||||
Slot::Data(future)) {
|
||||
Slot::Next(next) => self.next_future = next,
|
||||
Slot::Data(_) => panic!(),
|
||||
}
|
||||
}
|
||||
|
||||
// Next, see if our list of `pending` events from last time has any
|
||||
// items, and if so process them here.
|
||||
if let Some(ret) = self.poll_pending() {
|
||||
return ret
|
||||
}
|
||||
|
||||
// And finally, take a look at our stack of events, attempting to
|
||||
// process all of those.
|
||||
assert!(self.pending.next().is_none());
|
||||
self.pending = self.stack.drain();
|
||||
if let Some(ret) = self.poll_pending() {
|
||||
return ret
|
||||
}
|
||||
|
||||
// If we've gotten this far then there's no events for us to process and
|
||||
// nothing was ready, so figure out if we're not done yet or if we've
|
||||
// reached the end.
|
||||
Ok(if self.active > 0 || !self.stream.is_done() {
|
||||
Async::NotReady
|
||||
} else {
|
||||
Async::Ready(None)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S> ::sink::Sink for BufferUnordered<S>
|
||||
where S: ::sink::Sink + Stream,
|
||||
S::Item: IntoFuture,
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,142 @@
|
|||
use std::prelude::v1::*;
|
||||
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
|
||||
use {Async, IntoFuture, Poll, Future};
|
||||
use stream::{Stream, Fuse};
|
||||
|
||||
/// An adaptor for a stream of futures to execute the futures concurrently, if
|
||||
/// possible.
|
||||
///
|
||||
/// This adaptor will buffer up a list of pending futures, and then return their
|
||||
/// results in the order that they were pulled out of the original stream. This
|
||||
/// is created by the `Stream::buffered` method.
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Buffered<S>
|
||||
where S: Stream,
|
||||
S::Item: IntoFuture,
|
||||
{
|
||||
stream: Fuse<S>,
|
||||
futures: Vec<State<<S::Item as IntoFuture>::Future>>,
|
||||
cur: usize,
|
||||
}
|
||||
|
||||
impl<S> fmt::Debug for Buffered<S>
|
||||
where S: Stream + fmt::Debug,
|
||||
S::Item: IntoFuture,
|
||||
<<S as Stream>::Item as IntoFuture>::Future: fmt::Debug,
|
||||
<<S as Stream>::Item as IntoFuture>::Item: fmt::Debug,
|
||||
<<S as Stream>::Item as IntoFuture>::Error: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_struct("Stream")
|
||||
.field("stream", &self.stream)
|
||||
.field("futures", &self.futures)
|
||||
.field("cur", &self.cur)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum State<S: Future> {
|
||||
Empty,
|
||||
Running(S),
|
||||
Finished(Result<S::Item, S::Error>),
|
||||
}
|
||||
|
||||
pub fn new<S>(s: S, amt: usize) -> Buffered<S>
|
||||
where S: Stream,
|
||||
S::Item: IntoFuture<Error=<S as Stream>::Error>,
|
||||
{
|
||||
Buffered {
|
||||
stream: super::fuse::new(s),
|
||||
futures: (0..amt).map(|_| State::Empty).collect(),
|
||||
cur: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S> ::sink::Sink for Buffered<S>
|
||||
where S: ::sink::Sink + Stream,
|
||||
S::Item: IntoFuture,
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Stream for Buffered<S>
|
||||
where S: Stream,
|
||||
S::Item: IntoFuture<Error=<S as Stream>::Error>,
|
||||
{
|
||||
type Item = <S::Item as IntoFuture>::Item;
|
||||
type Error = <S as Stream>::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
// First, try to fill in all the futures
|
||||
for i in 0..self.futures.len() {
|
||||
let mut idx = self.cur + i;
|
||||
if idx >= self.futures.len() {
|
||||
idx -= self.futures.len();
|
||||
}
|
||||
|
||||
if let State::Empty = self.futures[idx] {
|
||||
match try!(self.stream.poll()) {
|
||||
Async::Ready(Some(future)) => {
|
||||
let future = future.into_future();
|
||||
self.futures[idx] = State::Running(future);
|
||||
}
|
||||
Async::Ready(None) => break,
|
||||
Async::NotReady => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Next, try and step all the futures forward
|
||||
for future in self.futures.iter_mut() {
|
||||
let result = match *future {
|
||||
State::Running(ref mut s) => {
|
||||
match s.poll() {
|
||||
Ok(Async::NotReady) => continue,
|
||||
Ok(Async::Ready(e)) => Ok(e),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
_ => continue,
|
||||
};
|
||||
*future = State::Finished(result);
|
||||
}
|
||||
|
||||
// Check to see if our current future is done.
|
||||
if let State::Finished(_) = self.futures[self.cur] {
|
||||
let r = match mem::replace(&mut self.futures[self.cur], State::Empty) {
|
||||
State::Finished(r) => r,
|
||||
_ => panic!(),
|
||||
};
|
||||
self.cur += 1;
|
||||
if self.cur >= self.futures.len() {
|
||||
self.cur = 0;
|
||||
}
|
||||
return Ok(Async::Ready(Some(try!(r))))
|
||||
}
|
||||
|
||||
if self.stream.is_done() {
|
||||
if let State::Empty = self.futures[self.cur] {
|
||||
return Ok(Async::Ready(None))
|
||||
}
|
||||
}
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
use std::prelude::v1::*;
|
||||
use std::any::Any;
|
||||
use std::panic::{catch_unwind, UnwindSafe, AssertUnwindSafe};
|
||||
use std::mem;
|
||||
|
||||
use super::super::{Poll, Async};
|
||||
use super::Stream;
|
||||
|
||||
/// Stream for the `catch_unwind` combinator.
|
||||
///
|
||||
/// This is created by the `Stream::catch_unwind` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct CatchUnwind<S> where S: Stream {
|
||||
state: CatchUnwindState<S>,
|
||||
}
|
||||
|
||||
pub fn new<S>(stream: S) -> CatchUnwind<S>
|
||||
where S: Stream + UnwindSafe,
|
||||
{
|
||||
CatchUnwind {
|
||||
state: CatchUnwindState::Stream(stream),
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum CatchUnwindState<S> {
|
||||
Stream(S),
|
||||
Eof,
|
||||
Done,
|
||||
}
|
||||
|
||||
impl<S> Stream for CatchUnwind<S>
|
||||
where S: Stream + UnwindSafe,
|
||||
{
|
||||
type Item = Result<S::Item, S::Error>;
|
||||
type Error = Box<Any + Send>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
let mut stream = match mem::replace(&mut self.state, CatchUnwindState::Eof) {
|
||||
CatchUnwindState::Done => panic!("cannot poll after eof"),
|
||||
CatchUnwindState::Eof => {
|
||||
self.state = CatchUnwindState::Done;
|
||||
return Ok(Async::Ready(None));
|
||||
}
|
||||
CatchUnwindState::Stream(stream) => stream,
|
||||
};
|
||||
let res = catch_unwind(|| (stream.poll(), stream));
|
||||
match res {
|
||||
Err(e) => Err(e), // and state is already Eof
|
||||
Ok((poll, stream)) => {
|
||||
self.state = CatchUnwindState::Stream(stream);
|
||||
match poll {
|
||||
Err(e) => Ok(Async::Ready(Some(Err(e)))),
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
Ok(Async::Ready(Some(r))) => Ok(Async::Ready(Some(Ok(r)))),
|
||||
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream> Stream for AssertUnwindSafe<S> {
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
|
||||
self.0.poll()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
use core::mem;
|
||||
|
||||
use stream::Stream;
|
||||
use {Async, Poll};
|
||||
|
||||
|
||||
/// State of chain stream.
|
||||
#[derive(Debug)]
|
||||
enum State<S1, S2> {
|
||||
/// Emitting elements of first stream
|
||||
First(S1, S2),
|
||||
/// Emitting elements of second stream
|
||||
Second(S2),
|
||||
/// Temporary value to replace first with second
|
||||
Temp,
|
||||
}
|
||||
|
||||
/// An adapter for chaining the output of two streams.
|
||||
///
|
||||
/// The resulting stream produces items from first stream and then
|
||||
/// from second stream.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Chain<S1, S2> {
|
||||
state: State<S1, S2>
|
||||
}
|
||||
|
||||
pub fn new<S1, S2>(s1: S1, s2: S2) -> Chain<S1, S2>
|
||||
where S1: Stream, S2: Stream<Item=S1::Item, Error=S1::Error>,
|
||||
{
|
||||
Chain { state: State::First(s1, s2) }
|
||||
}
|
||||
|
||||
impl<S1, S2> Stream for Chain<S1, S2>
|
||||
where S1: Stream, S2: Stream<Item=S1::Item, Error=S1::Error>,
|
||||
{
|
||||
type Item = S1::Item;
|
||||
type Error = S1::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
loop {
|
||||
match self.state {
|
||||
State::First(ref mut s1, ref _s2) => match s1.poll() {
|
||||
Ok(Async::Ready(None)) => (), // roll
|
||||
x => return x,
|
||||
},
|
||||
State::Second(ref mut s2) => return s2.poll(),
|
||||
State::Temp => unreachable!(),
|
||||
}
|
||||
|
||||
self.state = match mem::replace(&mut self.state, State::Temp) {
|
||||
State::First(_s1, s2) => State::Second(s2),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
#![cfg(feature = "with-deprecated")]
|
||||
#![deprecated(since = "0.1.4", note = "use sync::mpsc::channel instead")]
|
||||
#![allow(deprecated)]
|
||||
|
||||
use std::any::Any;
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
|
||||
use {Poll, Async, Stream, Future, Sink};
|
||||
use sink::Send;
|
||||
use sync::mpsc;
|
||||
|
||||
/// Creates an in-memory channel implementation of the `Stream` trait.
|
||||
///
|
||||
/// This method creates a concrete implementation of the `Stream` trait which
|
||||
/// can be used to send values across threads in a streaming fashion. This
|
||||
/// channel is unique in that it implements back pressure to ensure that the
|
||||
/// sender never outpaces the receiver. The `Sender::send` method will only
|
||||
/// allow sending one message and the next message can only be sent once the
|
||||
/// first was consumed.
|
||||
///
|
||||
/// The `Receiver` returned implements the `Stream` trait and has access to any
|
||||
/// number of the associated combinators for transforming the result.
|
||||
pub fn channel<T, E>() -> (Sender<T, E>, Receiver<T, E>) {
|
||||
let (tx, rx) = mpsc::channel(0);
|
||||
(Sender { inner: tx }, Receiver { inner: rx })
|
||||
}
|
||||
|
||||
/// The transmission end of a channel which is used to send values.
|
||||
///
|
||||
/// This is created by the `channel` method in the `stream` module.
|
||||
#[derive(Debug)]
|
||||
pub struct Sender<T, E> {
|
||||
inner: mpsc::Sender<Result<T, E>>,
|
||||
}
|
||||
|
||||
/// The receiving end of a channel which implements the `Stream` trait.
|
||||
///
|
||||
/// This is a concrete implementation of a stream which can be used to represent
|
||||
/// a stream of values being computed elsewhere. This is created by the
|
||||
/// `channel` method in the `stream` module.
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
#[derive(Debug)]
|
||||
pub struct Receiver<T, E> {
|
||||
inner: mpsc::Receiver<Result<T, E>>,
|
||||
}
|
||||
|
||||
/// Error type for sending, used when the receiving end of the channel is dropped
|
||||
pub struct SendError<T, E>(Result<T, E>);
|
||||
|
||||
/// Future returned by `Sender::send`.
|
||||
#[derive(Debug)]
|
||||
pub struct FutureSender<T, E> {
|
||||
inner: Send<mpsc::Sender<Result<T, E>>>,
|
||||
}
|
||||
|
||||
impl<T, E> fmt::Debug for SendError<T, E> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_tuple("SendError")
|
||||
.field(&"...")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> fmt::Display for SendError<T, E> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(fmt, "send failed because receiver is gone")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> Error for SendError<T, E>
|
||||
where T: Any, E: Any
|
||||
{
|
||||
fn description(&self) -> &str {
|
||||
"send failed because receiver is gone"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<T, E> Stream for Receiver<T, E> {
|
||||
type Item = T;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<T>, E> {
|
||||
match self.inner.poll().expect("cannot fail") {
|
||||
Async::Ready(Some(Ok(e))) => Ok(Async::Ready(Some(e))),
|
||||
Async::Ready(Some(Err(e))) => Err(e),
|
||||
Async::Ready(None) => Ok(Async::Ready(None)),
|
||||
Async::NotReady => Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> Sender<T, E> {
|
||||
/// Sends a new value along this channel to the receiver.
|
||||
///
|
||||
/// This method consumes the sender and returns a future which will resolve
|
||||
/// to the sender again when the value sent has been consumed.
|
||||
pub fn send(self, t: Result<T, E>) -> FutureSender<T, E> {
|
||||
FutureSender { inner: self.inner.send(t) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> Future for FutureSender<T, E> {
|
||||
type Item = Sender<T, E>;
|
||||
type Error = SendError<T, E>;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
match self.inner.poll() {
|
||||
Ok(a) => Ok(a.map(|a| Sender { inner: a })),
|
||||
Err(e) => Err(SendError(e.into_inner())),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,112 @@
|
|||
use std::mem;
|
||||
use std::prelude::v1::*;
|
||||
|
||||
use {Async, Poll};
|
||||
use stream::{Stream, Fuse};
|
||||
|
||||
/// An adaptor that chunks up elements in a vector.
|
||||
///
|
||||
/// This adaptor will buffer up a list of items in the stream and pass on the
|
||||
/// vector used for buffering when a specified capacity has been reached. This
|
||||
/// is created by the `Stream::chunks` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Chunks<S>
|
||||
where S: Stream
|
||||
{
|
||||
items: Vec<S::Item>,
|
||||
err: Option<S::Error>,
|
||||
stream: Fuse<S>
|
||||
}
|
||||
|
||||
pub fn new<S>(s: S, capacity: usize) -> Chunks<S>
|
||||
where S: Stream
|
||||
{
|
||||
assert!(capacity > 0);
|
||||
|
||||
Chunks {
|
||||
items: Vec::with_capacity(capacity),
|
||||
err: None,
|
||||
stream: super::fuse::new(s),
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S> ::sink::Sink for Chunks<S>
|
||||
where S: ::sink::Sink + Stream
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<S> Chunks<S> where S: Stream {
|
||||
fn take(&mut self) -> Vec<S::Item> {
|
||||
let cap = self.items.capacity();
|
||||
mem::replace(&mut self.items, Vec::with_capacity(cap))
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Stream for Chunks<S>
|
||||
where S: Stream
|
||||
{
|
||||
type Item = Vec<<S as Stream>::Item>;
|
||||
type Error = <S as Stream>::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
if let Some(err) = self.err.take() {
|
||||
return Err(err)
|
||||
}
|
||||
|
||||
let cap = self.items.capacity();
|
||||
loop {
|
||||
match self.stream.poll() {
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
|
||||
// Push the item into the buffer and check whether it is full.
|
||||
// If so, replace our buffer with a new and empty one and return
|
||||
// the full one.
|
||||
Ok(Async::Ready(Some(item))) => {
|
||||
self.items.push(item);
|
||||
if self.items.len() >= cap {
|
||||
return Ok(Some(self.take()).into())
|
||||
}
|
||||
}
|
||||
|
||||
// Since the underlying stream ran out of values, return what we
|
||||
// have buffered, if we have anything.
|
||||
Ok(Async::Ready(None)) => {
|
||||
return if self.items.len() > 0 {
|
||||
let full_buf = mem::replace(&mut self.items, Vec::new());
|
||||
Ok(Some(full_buf).into())
|
||||
} else {
|
||||
Ok(Async::Ready(None))
|
||||
}
|
||||
}
|
||||
|
||||
// If we've got buffered items be sure to return them first,
|
||||
// we'll defer our error for later.
|
||||
Err(e) => {
|
||||
if self.items.len() == 0 {
|
||||
return Err(e)
|
||||
} else {
|
||||
self.err = Some(e);
|
||||
return Ok(Some(self.take()).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
use std::prelude::v1::*;
|
||||
|
||||
use std::mem;
|
||||
|
||||
use {Future, Poll, Async};
|
||||
use stream::Stream;
|
||||
|
||||
/// A future which collects all of the values of a stream into a vector.
|
||||
///
|
||||
/// This future is created by the `Stream::collect` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Collect<S> where S: Stream {
|
||||
stream: S,
|
||||
items: Vec<S::Item>,
|
||||
}
|
||||
|
||||
pub fn new<S>(s: S) -> Collect<S>
|
||||
where S: Stream,
|
||||
{
|
||||
Collect {
|
||||
stream: s,
|
||||
items: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream> Collect<S> {
|
||||
fn finish(&mut self) -> Vec<S::Item> {
|
||||
mem::replace(&mut self.items, Vec::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Future for Collect<S>
|
||||
where S: Stream,
|
||||
{
|
||||
type Item = Vec<S::Item>;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Vec<S::Item>, S::Error> {
|
||||
loop {
|
||||
match self.stream.poll() {
|
||||
Ok(Async::Ready(Some(e))) => self.items.push(e),
|
||||
Ok(Async::Ready(None)) => return Ok(Async::Ready(self.finish())),
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Err(e) => {
|
||||
self.finish();
|
||||
return Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
use core::mem;
|
||||
|
||||
use {Poll, Async};
|
||||
use future::Future;
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream combinator to concatenate the results of a stream into the first
|
||||
/// yielded item.
|
||||
///
|
||||
/// This structure is produced by the `Stream::concat` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Concat<S>
|
||||
where S: Stream,
|
||||
{
|
||||
stream: S,
|
||||
extend: Inner<S::Item>,
|
||||
}
|
||||
|
||||
pub fn new<S>(s: S) -> Concat<S>
|
||||
where S: Stream,
|
||||
S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
|
||||
{
|
||||
Concat {
|
||||
stream: s,
|
||||
extend: Inner::First,
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Future for Concat<S>
|
||||
where S: Stream,
|
||||
S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
|
||||
|
||||
{
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
loop {
|
||||
match self.stream.poll() {
|
||||
Ok(Async::Ready(Some(i))) => {
|
||||
match self.extend {
|
||||
Inner::First => {
|
||||
self.extend = Inner::Extending(i);
|
||||
},
|
||||
Inner::Extending(ref mut e) => {
|
||||
e.extend(i);
|
||||
},
|
||||
Inner::Done => unreachable!(),
|
||||
}
|
||||
},
|
||||
Ok(Async::Ready(None)) => return Ok(Async::Ready(expect(self.extend.take()))),
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Err(e) => {
|
||||
self.extend.take();
|
||||
return Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Inner<E> {
|
||||
First,
|
||||
Extending(E),
|
||||
Done,
|
||||
}
|
||||
|
||||
impl<E> Inner<E> {
|
||||
fn take(&mut self) -> Option<E> {
|
||||
match mem::replace(self, Inner::Done) {
|
||||
Inner::Extending(e) => Some(e),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn expect<T>(opt: Option<T>) -> T {
|
||||
opt.expect("cannot poll Concat again")
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
use core::marker;
|
||||
|
||||
use stream::Stream;
|
||||
use {Poll, Async};
|
||||
|
||||
/// A stream which contains no elements.
|
||||
///
|
||||
/// This stream can be created with the `stream::empty` function.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Empty<T, E> {
|
||||
_data: marker::PhantomData<(T, E)>,
|
||||
}
|
||||
|
||||
/// Creates a stream which contains no elements.
|
||||
///
|
||||
/// The returned stream will always return `Ready(None)` when polled.
|
||||
pub fn empty<T, E>() -> Empty<T, E> {
|
||||
Empty { _data: marker::PhantomData }
|
||||
}
|
||||
|
||||
impl<T, E> Stream for Empty<T, E> {
|
||||
type Item = T;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
Ok(Async::Ready(None))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
use {Async, Poll};
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream combinator used to filter the results of a stream and only yield
|
||||
/// some values.
|
||||
///
|
||||
/// This structure is produced by the `Stream::filter` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Filter<S, F> {
|
||||
stream: S,
|
||||
f: F,
|
||||
}
|
||||
|
||||
pub fn new<S, F>(s: S, f: F) -> Filter<S, F>
|
||||
where S: Stream,
|
||||
F: FnMut(&S::Item) -> bool,
|
||||
{
|
||||
Filter {
|
||||
stream: s,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, F> ::sink::Sink for Filter<S, F>
|
||||
where S: ::sink::Sink
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, F> Stream for Filter<S, F>
|
||||
where S: Stream,
|
||||
F: FnMut(&S::Item) -> bool,
|
||||
{
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
|
||||
loop {
|
||||
match try_ready!(self.stream.poll()) {
|
||||
Some(e) => {
|
||||
if (self.f)(&e) {
|
||||
return Ok(Async::Ready(Some(e)))
|
||||
}
|
||||
}
|
||||
None => return Ok(Async::Ready(None)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
use {Async, Poll};
|
||||
use stream::Stream;
|
||||
|
||||
/// A combinator used to filter the results of a stream and simultaneously map
|
||||
/// them to a different type.
|
||||
///
|
||||
/// This structure is returned by the `Stream::filter_map` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct FilterMap<S, F> {
|
||||
stream: S,
|
||||
f: F,
|
||||
}
|
||||
|
||||
pub fn new<S, F, B>(s: S, f: F) -> FilterMap<S, F>
|
||||
where S: Stream,
|
||||
F: FnMut(S::Item) -> Option<B>,
|
||||
{
|
||||
FilterMap {
|
||||
stream: s,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, F> ::sink::Sink for FilterMap<S, F>
|
||||
where S: ::sink::Sink
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, F, B> Stream for FilterMap<S, F>
|
||||
where S: Stream,
|
||||
F: FnMut(S::Item) -> Option<B>,
|
||||
{
|
||||
type Item = B;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<B>, S::Error> {
|
||||
loop {
|
||||
match try_ready!(self.stream.poll()) {
|
||||
Some(e) => {
|
||||
if let Some(e) = (self.f)(e) {
|
||||
return Ok(Async::Ready(Some(e)))
|
||||
}
|
||||
}
|
||||
None => return Ok(Async::Ready(None)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
use {Poll, Async};
|
||||
use stream::Stream;
|
||||
|
||||
/// A combinator used to flatten a stream-of-streams into one long stream of
|
||||
/// elements.
|
||||
///
|
||||
/// This combinator is created by the `Stream::flatten` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Flatten<S>
|
||||
where S: Stream,
|
||||
{
|
||||
stream: S,
|
||||
next: Option<S::Item>,
|
||||
}
|
||||
|
||||
pub fn new<S>(s: S) -> Flatten<S>
|
||||
where S: Stream,
|
||||
S::Item: Stream,
|
||||
<S::Item as Stream>::Error: From<S::Error>,
|
||||
{
|
||||
Flatten {
|
||||
stream: s,
|
||||
next: None,
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S> ::sink::Sink for Flatten<S>
|
||||
where S: ::sink::Sink + Stream
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Stream for Flatten<S>
|
||||
where S: Stream,
|
||||
S::Item: Stream,
|
||||
<S::Item as Stream>::Error: From<S::Error>,
|
||||
{
|
||||
type Item = <S::Item as Stream>::Item;
|
||||
type Error = <S::Item as Stream>::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
loop {
|
||||
if self.next.is_none() {
|
||||
match try_ready!(self.stream.poll()) {
|
||||
Some(e) => self.next = Some(e),
|
||||
None => return Ok(Async::Ready(None)),
|
||||
}
|
||||
}
|
||||
assert!(self.next.is_some());
|
||||
match self.next.as_mut().unwrap().poll() {
|
||||
Ok(Async::Ready(None)) => self.next = None,
|
||||
other => return other,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
use core::mem;
|
||||
|
||||
use {Future, Poll, IntoFuture, Async};
|
||||
use stream::Stream;
|
||||
|
||||
/// A future used to collect all the results of a stream into one generic type.
|
||||
///
|
||||
/// This future is returned by the `Stream::fold` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Fold<S, F, Fut, T> where Fut: IntoFuture {
|
||||
stream: S,
|
||||
f: F,
|
||||
state: State<T, Fut::Future>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum State<T, F> where F: Future {
|
||||
/// Placeholder state when doing work
|
||||
Empty,
|
||||
|
||||
/// Ready to process the next stream item; current accumulator is the `T`
|
||||
Ready(T),
|
||||
|
||||
/// Working on a future the process the previous stream item
|
||||
Processing(F),
|
||||
}
|
||||
|
||||
pub fn new<S, F, Fut, T>(s: S, f: F, t: T) -> Fold<S, F, Fut, T>
|
||||
where S: Stream,
|
||||
F: FnMut(T, S::Item) -> Fut,
|
||||
Fut: IntoFuture<Item = T>,
|
||||
S::Error: From<Fut::Error>,
|
||||
{
|
||||
Fold {
|
||||
stream: s,
|
||||
f: f,
|
||||
state: State::Ready(t),
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, F, Fut, T> Future for Fold<S, F, Fut, T>
|
||||
where S: Stream,
|
||||
F: FnMut(T, S::Item) -> Fut,
|
||||
Fut: IntoFuture<Item = T>,
|
||||
S::Error: From<Fut::Error>,
|
||||
{
|
||||
type Item = T;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<T, S::Error> {
|
||||
loop {
|
||||
match mem::replace(&mut self.state, State::Empty) {
|
||||
State::Empty => panic!("cannot poll Fold twice"),
|
||||
State::Ready(state) => {
|
||||
match try!(self.stream.poll()) {
|
||||
Async::Ready(Some(e)) => {
|
||||
let future = (self.f)(state, e);
|
||||
let future = future.into_future();
|
||||
self.state = State::Processing(future);
|
||||
}
|
||||
Async::Ready(None) => return Ok(Async::Ready(state)),
|
||||
Async::NotReady => {
|
||||
self.state = State::Ready(state);
|
||||
return Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
State::Processing(mut fut) => {
|
||||
match try!(fut.poll()) {
|
||||
Async::Ready(state) => self.state = State::Ready(state),
|
||||
Async::NotReady => {
|
||||
self.state = State::Processing(fut);
|
||||
return Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
use {Async, Future, IntoFuture, Poll};
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream combinator which executes a unit closure over each item on a
|
||||
/// stream.
|
||||
///
|
||||
/// This structure is returned by the `Stream::for_each` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct ForEach<S, F, U> where U: IntoFuture {
|
||||
stream: S,
|
||||
f: F,
|
||||
fut: Option<U::Future>,
|
||||
}
|
||||
|
||||
pub fn new<S, F, U>(s: S, f: F) -> ForEach<S, F, U>
|
||||
where S: Stream,
|
||||
F: FnMut(S::Item) -> U,
|
||||
U: IntoFuture<Item = (), Error = S::Error>,
|
||||
{
|
||||
ForEach {
|
||||
stream: s,
|
||||
f: f,
|
||||
fut: None,
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, F, U> Future for ForEach<S, F, U>
|
||||
where S: Stream,
|
||||
F: FnMut(S::Item) -> U,
|
||||
U: IntoFuture<Item= (), Error = S::Error>,
|
||||
{
|
||||
type Item = ();
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<(), S::Error> {
|
||||
loop {
|
||||
if let Some(mut fut) = self.fut.take() {
|
||||
if try!(fut.poll()).is_not_ready() {
|
||||
self.fut = Some(fut);
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
}
|
||||
|
||||
match try_ready!(self.stream.poll()) {
|
||||
Some(e) => self.fut = Some((self.f)(e).into_future()),
|
||||
None => return Ok(Async::Ready(())),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
use {Poll, Async, Future, AsyncSink};
|
||||
use stream::{Stream, Fuse};
|
||||
use sink::Sink;
|
||||
|
||||
/// Future for the `Stream::forward` combinator, which sends a stream of values
|
||||
/// to a sink and then waits until the sink has fully flushed those values.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct Forward<T: Stream, U> {
|
||||
sink: Option<U>,
|
||||
stream: Option<Fuse<T>>,
|
||||
buffered: Option<T::Item>,
|
||||
}
|
||||
|
||||
|
||||
pub fn new<T, U>(stream: T, sink: U) -> Forward<T, U>
|
||||
where U: Sink<SinkItem=T::Item>,
|
||||
T: Stream,
|
||||
T::Error: From<U::SinkError>,
|
||||
{
|
||||
Forward {
|
||||
sink: Some(sink),
|
||||
stream: Some(stream.fuse()),
|
||||
buffered: None,
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Forward<T, U>
|
||||
where U: Sink<SinkItem=T::Item>,
|
||||
T: Stream,
|
||||
T::Error: From<U::SinkError>,
|
||||
{
|
||||
fn sink_mut(&mut self) -> &mut U {
|
||||
self.sink.as_mut().take()
|
||||
.expect("Attempted to poll Forward after completion")
|
||||
}
|
||||
|
||||
fn stream_mut(&mut self) -> &mut Fuse<T> {
|
||||
self.stream.as_mut().take()
|
||||
.expect("Attempted to poll Forward after completion")
|
||||
}
|
||||
|
||||
fn take_result(&mut self) -> (T, U) {
|
||||
let sink = self.sink.take()
|
||||
.expect("Attempted to poll Forward after completion");
|
||||
let fuse = self.stream.take()
|
||||
.expect("Attempted to poll Forward after completion");
|
||||
return (fuse.into_inner(), sink)
|
||||
}
|
||||
|
||||
fn try_start_send(&mut self, item: T::Item) -> Poll<(), U::SinkError> {
|
||||
debug_assert!(self.buffered.is_none());
|
||||
if let AsyncSink::NotReady(item) = try!(self.sink_mut().start_send(item)) {
|
||||
self.buffered = Some(item);
|
||||
return Ok(Async::NotReady)
|
||||
}
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Future for Forward<T, U>
|
||||
where U: Sink<SinkItem=T::Item>,
|
||||
T: Stream,
|
||||
T::Error: From<U::SinkError>,
|
||||
{
|
||||
type Item = (T, U);
|
||||
type Error = T::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<(T, U), T::Error> {
|
||||
// If we've got an item buffered already, we need to write it to the
|
||||
// sink before we can do anything else
|
||||
if let Some(item) = self.buffered.take() {
|
||||
try_ready!(self.try_start_send(item))
|
||||
}
|
||||
|
||||
loop {
|
||||
match try!(self.stream_mut().poll()) {
|
||||
Async::Ready(Some(item)) => try_ready!(self.try_start_send(item)),
|
||||
Async::Ready(None) => {
|
||||
try_ready!(self.sink_mut().close());
|
||||
return Ok(Async::Ready(self.take_result()))
|
||||
}
|
||||
Async::NotReady => {
|
||||
try_ready!(self.sink_mut().poll_complete());
|
||||
return Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
use core::marker::PhantomData;
|
||||
use poll::Poll;
|
||||
use Async;
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream combinator to change the error type of a stream.
|
||||
///
|
||||
/// This is created by the `Stream::from_err` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct FromErr<S, E> where S: Stream {
|
||||
stream: S,
|
||||
f: PhantomData<E>
|
||||
}
|
||||
|
||||
pub fn new<S, E>(stream: S) -> FromErr<S, E>
|
||||
where S: Stream
|
||||
{
|
||||
FromErr {
|
||||
stream: stream,
|
||||
f: PhantomData
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream, E: From<S::Error>> Stream for FromErr<S, E> {
|
||||
type Item = S::Item;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, E> {
|
||||
let e = match self.stream.poll() {
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
other => other,
|
||||
};
|
||||
e.map_err(From::from)
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S: Stream + ::sink::Sink, E> ::sink::Sink for FromErr<S, E> {
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: Self::SinkItem) -> ::StartSend<Self::SinkItem, Self::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
use {Poll, Async};
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream which "fuse"s a stream once it's terminated.
|
||||
///
|
||||
/// Normally streams can behave unpredictably when used after they have already
|
||||
/// finished, but `Fuse` continues to return `None` from `poll` forever when
|
||||
/// finished.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Fuse<S> {
|
||||
stream: S,
|
||||
done: bool,
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S> ::sink::Sink for Fuse<S>
|
||||
where S: ::sink::Sink
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new<S: Stream>(s: S) -> Fuse<S> {
|
||||
Fuse { stream: s, done: false }
|
||||
}
|
||||
|
||||
impl<S: Stream> Stream for Fuse<S> {
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
|
||||
if self.done {
|
||||
Ok(Async::Ready(None))
|
||||
} else {
|
||||
let r = self.stream.poll();
|
||||
if let Ok(Async::Ready(None)) = r {
|
||||
self.done = true;
|
||||
}
|
||||
r
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Fuse<S> {
|
||||
/// Returns whether the underlying stream has finished or not.
|
||||
///
|
||||
/// If this method returns `true`, then all future calls to poll are
|
||||
/// guaranteed to return `None`. If this returns `false`, then the
|
||||
/// underlying stream is still in use.
|
||||
pub fn is_done(&self) -> bool {
|
||||
self.done
|
||||
}
|
||||
|
||||
/// Recover original stream
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream
|
||||
}
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
use {Future, Poll, Async};
|
||||
use stream::Stream;
|
||||
|
||||
/// A combinator used to temporarily convert a stream into a future.
|
||||
///
|
||||
/// This future is returned by the `Stream::into_future` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct StreamFuture<S> {
|
||||
stream: Option<S>,
|
||||
}
|
||||
|
||||
pub fn new<S: Stream>(s: S) -> StreamFuture<S> {
|
||||
StreamFuture { stream: Some(s) }
|
||||
}
|
||||
|
||||
impl<S: Stream> Future for StreamFuture<S> {
|
||||
type Item = (Option<S::Item>, S);
|
||||
type Error = (S::Error, S);
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
let item = {
|
||||
let s = self.stream.as_mut().expect("polling StreamFuture twice");
|
||||
match s.poll() {
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Ok(Async::Ready(e)) => Ok(e),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
};
|
||||
let stream = self.stream.take().unwrap();
|
||||
match item {
|
||||
Ok(e) => Ok(Async::Ready((e, stream))),
|
||||
Err(e) => Err((e, stream)),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,107 @@
|
|||
use future::{Future, IntoFuture};
|
||||
use stream::Stream;
|
||||
use poll::Poll;
|
||||
use Async;
|
||||
use stack::{Stack, Drain};
|
||||
use std::sync::Arc;
|
||||
use task::{self, UnparkEvent};
|
||||
|
||||
use std::prelude::v1::*;
|
||||
|
||||
/// An adaptor for a stream of futures to execute the futures concurrently, if
|
||||
/// possible, delivering results as they become available.
|
||||
///
|
||||
/// This adaptor will return their results in the order that they complete.
|
||||
/// This is created by the `futures` method.
|
||||
///
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct FuturesUnordered<F>
|
||||
where F: Future
|
||||
{
|
||||
futures: Vec<Option<F>>,
|
||||
stack: Arc<Stack<usize>>,
|
||||
pending: Option<Drain<usize>>,
|
||||
active: usize,
|
||||
}
|
||||
|
||||
/// Converts a list of futures into a `Stream` of results from the futures.
|
||||
///
|
||||
/// This function will take an list of futures (e.g. a vector, an iterator,
|
||||
/// etc), and return a stream. The stream will yield items as they become
|
||||
/// available on the futures internally, in the order that they become
|
||||
/// available. This function is similar to `buffer_unordered` in that it may
|
||||
/// return items in a different order than in the list specified.
|
||||
pub fn futures_unordered<I>(futures: I) -> FuturesUnordered<<I::Item as IntoFuture>::Future>
|
||||
where I: IntoIterator,
|
||||
I::Item: IntoFuture
|
||||
{
|
||||
let futures = futures.into_iter()
|
||||
.map(IntoFuture::into_future)
|
||||
.map(Some)
|
||||
.collect::<Vec<_>>();
|
||||
let stack = Arc::new(Stack::new());
|
||||
for i in 0..futures.len() {
|
||||
stack.push(i);
|
||||
}
|
||||
FuturesUnordered {
|
||||
active: futures.len(),
|
||||
futures: futures,
|
||||
pending: None,
|
||||
stack: stack,
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> FuturesUnordered<F>
|
||||
where F: Future
|
||||
{
|
||||
fn poll_pending(&mut self, mut drain: Drain<usize>)
|
||||
-> Option<Poll<Option<F::Item>, F::Error>> {
|
||||
while let Some(id) = drain.next() {
|
||||
// If this future was already done just skip the notification
|
||||
if self.futures[id].is_none() {
|
||||
continue
|
||||
}
|
||||
let event = UnparkEvent::new(self.stack.clone(), id);
|
||||
let ret = match task::with_unpark_event(event, || {
|
||||
self.futures[id]
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.poll()
|
||||
}) {
|
||||
Ok(Async::NotReady) => continue,
|
||||
Ok(Async::Ready(val)) => Ok(Async::Ready(Some(val))),
|
||||
Err(e) => Err(e),
|
||||
};
|
||||
self.pending = Some(drain);
|
||||
self.active -= 1;
|
||||
self.futures[id] = None;
|
||||
return Some(ret)
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> Stream for FuturesUnordered<F>
|
||||
where F: Future
|
||||
{
|
||||
type Item = F::Item;
|
||||
type Error = F::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
if self.active == 0 {
|
||||
return Ok(Async::Ready(None))
|
||||
}
|
||||
if let Some(drain) = self.pending.take() {
|
||||
if let Some(ret) = self.poll_pending(drain) {
|
||||
return ret
|
||||
}
|
||||
}
|
||||
let drain = self.stack.drain();
|
||||
if let Some(ret) = self.poll_pending(drain) {
|
||||
return ret
|
||||
}
|
||||
assert!(self.active > 0);
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
use {Async, Poll};
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream which is just a shim over an underlying instance of `Iterator`.
|
||||
///
|
||||
/// This stream will never block and is always ready.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Iter<I> {
|
||||
iter: I,
|
||||
}
|
||||
|
||||
/// Converts an `Iterator` over `Result`s into a `Stream` which is always ready
|
||||
/// to yield the next value.
|
||||
///
|
||||
/// Iterators in Rust don't express the ability to block, so this adapter simply
|
||||
/// always calls `iter.next()` and returns that.
|
||||
///
|
||||
/// ```rust
|
||||
/// use futures::*;
|
||||
///
|
||||
/// let mut stream = stream::iter(vec![Ok(17), Err(false), Ok(19)]);
|
||||
/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
|
||||
/// assert_eq!(Err(false), stream.poll());
|
||||
/// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll());
|
||||
/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
|
||||
/// ```
|
||||
pub fn iter<J, T, E>(i: J) -> Iter<J::IntoIter>
|
||||
where J: IntoIterator<Item=Result<T, E>>,
|
||||
{
|
||||
Iter {
|
||||
iter: i.into_iter(),
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, T, E> Stream for Iter<I>
|
||||
where I: Iterator<Item=Result<T, E>>,
|
||||
{
|
||||
type Item = T;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<T>, E> {
|
||||
match self.iter.next() {
|
||||
Some(Ok(e)) => Ok(Async::Ready(Some(e))),
|
||||
Some(Err(e)) => Err(e),
|
||||
None => Ok(Async::Ready(None)),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
use {Async, Poll};
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream combinator which will change the type of a stream from one
|
||||
/// type to another.
|
||||
///
|
||||
/// This is produced by the `Stream::map` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Map<S, F> {
|
||||
stream: S,
|
||||
f: F,
|
||||
}
|
||||
|
||||
pub fn new<S, F, U>(s: S, f: F) -> Map<S, F>
|
||||
where S: Stream,
|
||||
F: FnMut(S::Item) -> U,
|
||||
{
|
||||
Map {
|
||||
stream: s,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, F> ::sink::Sink for Map<S, F>
|
||||
where S: ::sink::Sink
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, F, U> Stream for Map<S, F>
|
||||
where S: Stream,
|
||||
F: FnMut(S::Item) -> U,
|
||||
{
|
||||
type Item = U;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<U>, S::Error> {
|
||||
let option = try_ready!(self.stream.poll());
|
||||
Ok(Async::Ready(option.map(&mut self.f)))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,55 @@
|
|||
use Poll;
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream combinator which will change the error type of a stream from one
|
||||
/// type to another.
|
||||
///
|
||||
/// This is produced by the `Stream::map_err` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct MapErr<S, F> {
|
||||
stream: S,
|
||||
f: F,
|
||||
}
|
||||
|
||||
pub fn new<S, F, U>(s: S, f: F) -> MapErr<S, F>
|
||||
where S: Stream,
|
||||
F: FnMut(S::Error) -> U,
|
||||
{
|
||||
MapErr {
|
||||
stream: s,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, F> ::sink::Sink for MapErr<S, F>
|
||||
where S: ::sink::Sink
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, F, U> Stream for MapErr<S, F>
|
||||
where S: Stream,
|
||||
F: FnMut(S::Error) -> U,
|
||||
{
|
||||
type Item = S::Item;
|
||||
type Error = U;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, U> {
|
||||
self.stream.poll().map_err(&mut self.f)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
use {Poll, Async};
|
||||
use stream::{Stream, Fuse};
|
||||
|
||||
/// An adapter for merging the output of two streams.
|
||||
///
|
||||
/// The merged stream produces items from one or both of the underlying
|
||||
/// streams as they become available. Errors, however, are not merged: you
|
||||
/// get at most one error at a time.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Merge<S1, S2: Stream> {
|
||||
stream1: Fuse<S1>,
|
||||
stream2: Fuse<S2>,
|
||||
queued_error: Option<S2::Error>,
|
||||
}
|
||||
|
||||
pub fn new<S1, S2>(stream1: S1, stream2: S2) -> Merge<S1, S2>
|
||||
where S1: Stream, S2: Stream<Error = S1::Error>
|
||||
{
|
||||
Merge {
|
||||
stream1: stream1.fuse(),
|
||||
stream2: stream2.fuse(),
|
||||
queued_error: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// An item returned from a merge stream, which represents an item from one or
|
||||
/// both of the underlying streams.
|
||||
#[derive(Debug)]
|
||||
pub enum MergedItem<I1, I2> {
|
||||
/// An item from the first stream
|
||||
First(I1),
|
||||
/// An item from the second stream
|
||||
Second(I2),
|
||||
/// Items from both streams
|
||||
Both(I1, I2),
|
||||
}
|
||||
|
||||
impl<S1, S2> Stream for Merge<S1, S2>
|
||||
where S1: Stream, S2: Stream<Error = S1::Error>
|
||||
{
|
||||
type Item = MergedItem<S1::Item, S2::Item>;
|
||||
type Error = S1::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
if let Some(e) = self.queued_error.take() {
|
||||
return Err(e)
|
||||
}
|
||||
|
||||
match try!(self.stream1.poll()) {
|
||||
Async::NotReady => {
|
||||
match try_ready!(self.stream2.poll()) {
|
||||
Some(item2) => Ok(Async::Ready(Some(MergedItem::Second(item2)))),
|
||||
None => Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
Async::Ready(None) => {
|
||||
match try_ready!(self.stream2.poll()) {
|
||||
Some(item2) => Ok(Async::Ready(Some(MergedItem::Second(item2)))),
|
||||
None => Ok(Async::Ready(None)),
|
||||
}
|
||||
}
|
||||
Async::Ready(Some(item1)) => {
|
||||
match self.stream2.poll() {
|
||||
Err(e) => {
|
||||
self.queued_error = Some(e);
|
||||
Ok(Async::Ready(Some(MergedItem::First(item1))))
|
||||
}
|
||||
Ok(Async::NotReady) | Ok(Async::Ready(None)) => {
|
||||
Ok(Async::Ready(Some(MergedItem::First(item1))))
|
||||
}
|
||||
Ok(Async::Ready(Some(item2))) => {
|
||||
Ok(Async::Ready(Some(MergedItem::Both(item1, item2))))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,993 @@
|
|||
//! Asynchronous streams
|
||||
//!
|
||||
//! This module contains the `Stream` trait and a number of adaptors for this
|
||||
//! trait. This trait is very similar to the `Iterator` trait in the standard
|
||||
//! library except that it expresses the concept of blocking as well. A stream
|
||||
//! here is a sequential sequence of values which may take some amount of time
|
||||
//! in between to produce.
|
||||
//!
|
||||
//! A stream may request that it is blocked between values while the next value
|
||||
//! is calculated, and provides a way to get notified once the next value is
|
||||
//! ready as well.
|
||||
//!
|
||||
//! You can find more information/tutorials about streams [online at
|
||||
//! https://tokio.rs][online]
|
||||
//!
|
||||
//! [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/
|
||||
|
||||
use {IntoFuture, Poll};
|
||||
|
||||
mod iter;
|
||||
pub use self::iter::{iter, Iter};
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use self::Iter as IterStream;
|
||||
|
||||
mod repeat;
|
||||
pub use self::repeat::{repeat, Repeat};
|
||||
|
||||
mod and_then;
|
||||
mod chain;
|
||||
mod concat;
|
||||
mod empty;
|
||||
mod filter;
|
||||
mod filter_map;
|
||||
mod flatten;
|
||||
mod fold;
|
||||
mod for_each;
|
||||
mod from_err;
|
||||
mod fuse;
|
||||
mod future;
|
||||
mod map;
|
||||
mod map_err;
|
||||
mod merge;
|
||||
mod once;
|
||||
mod or_else;
|
||||
mod peek;
|
||||
mod select;
|
||||
mod skip;
|
||||
mod skip_while;
|
||||
mod take;
|
||||
mod take_while;
|
||||
mod then;
|
||||
mod unfold;
|
||||
mod zip;
|
||||
mod forward;
|
||||
pub use self::and_then::AndThen;
|
||||
pub use self::chain::Chain;
|
||||
pub use self::concat::Concat;
|
||||
pub use self::empty::{Empty, empty};
|
||||
pub use self::filter::Filter;
|
||||
pub use self::filter_map::FilterMap;
|
||||
pub use self::flatten::Flatten;
|
||||
pub use self::fold::Fold;
|
||||
pub use self::for_each::ForEach;
|
||||
pub use self::from_err::FromErr;
|
||||
pub use self::fuse::Fuse;
|
||||
pub use self::future::StreamFuture;
|
||||
pub use self::map::Map;
|
||||
pub use self::map_err::MapErr;
|
||||
pub use self::merge::{Merge, MergedItem};
|
||||
pub use self::once::{Once, once};
|
||||
pub use self::or_else::OrElse;
|
||||
pub use self::peek::Peekable;
|
||||
pub use self::select::Select;
|
||||
pub use self::skip::Skip;
|
||||
pub use self::skip_while::SkipWhile;
|
||||
pub use self::take::Take;
|
||||
pub use self::take_while::TakeWhile;
|
||||
pub use self::then::Then;
|
||||
pub use self::unfold::{Unfold, unfold};
|
||||
pub use self::zip::Zip;
|
||||
pub use self::forward::Forward;
|
||||
use sink::{Sink};
|
||||
|
||||
if_std! {
|
||||
use std;
|
||||
|
||||
mod buffered;
|
||||
mod buffer_unordered;
|
||||
mod catch_unwind;
|
||||
mod chunks;
|
||||
mod collect;
|
||||
mod wait;
|
||||
mod channel;
|
||||
mod split;
|
||||
mod futures_unordered;
|
||||
pub use self::buffered::Buffered;
|
||||
pub use self::buffer_unordered::BufferUnordered;
|
||||
pub use self::catch_unwind::CatchUnwind;
|
||||
pub use self::chunks::Chunks;
|
||||
pub use self::collect::Collect;
|
||||
pub use self::wait::Wait;
|
||||
pub use self::split::{SplitStream, SplitSink};
|
||||
pub use self::futures_unordered::{futures_unordered, FuturesUnordered};
|
||||
|
||||
#[doc(hidden)]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
#[allow(deprecated)]
|
||||
pub use self::channel::{channel, Sender, Receiver, FutureSender, SendError};
|
||||
|
||||
/// A type alias for `Box<Stream + Send>`
|
||||
pub type BoxStream<T, E> = ::std::boxed::Box<Stream<Item = T, Error = E> + Send>;
|
||||
|
||||
impl<S: ?Sized + Stream> Stream for ::std::boxed::Box<S> {
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
(**self).poll()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A stream of values, not all of which may have been produced yet.
|
||||
///
|
||||
/// `Stream` is a trait to represent any source of sequential events or items
|
||||
/// which acts like an iterator but long periods of time may pass between
|
||||
/// items. Like `Future` the methods of `Stream` never block and it is thus
|
||||
/// suitable for programming in an asynchronous fashion. This trait is very
|
||||
/// similar to the `Iterator` trait in the standard library where `Some` is
|
||||
/// used to signal elements of the stream and `None` is used to indicate that
|
||||
/// the stream is finished.
|
||||
///
|
||||
/// Like futures a stream has basic combinators to transform the stream, perform
|
||||
/// more work on each item, etc.
|
||||
///
|
||||
/// You can find more information/tutorials about streams [online at
|
||||
/// https://tokio.rs][online]
|
||||
///
|
||||
/// [online]: https://tokio.rs/docs/getting-started/streams-and-sinks/
|
||||
///
|
||||
/// # Streams as Futures
|
||||
///
|
||||
/// Any instance of `Stream` can also be viewed as a `Future` where the resolved
|
||||
/// value is the next item in the stream along with the rest of the stream. The
|
||||
/// `into_future` adaptor can be used here to convert any stream into a future
|
||||
/// for use with other future methods like `join` and `select`.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Streams, like futures, can also model errors in their computation. All
|
||||
/// streams have an associated `Error` type like with futures. Currently as of
|
||||
/// the 0.1 release of this library an error on a stream **does not terminate
|
||||
/// the stream**. That is, after one error is received, another error may be
|
||||
/// received from the same stream (it's valid to keep polling).
|
||||
///
|
||||
/// This property of streams, however, is [being considered] for change in 0.2
|
||||
/// where an error on a stream is similar to `None`, it terminates the stream
|
||||
/// entirely. If one of these use cases suits you perfectly and not the other,
|
||||
/// please feel welcome to comment on [the issue][being considered]!
|
||||
///
|
||||
/// [being considered]: https://github.com/alexcrichton/futures-rs/issues/206
|
||||
pub trait Stream {
|
||||
/// The type of item this stream will yield on success.
|
||||
type Item;
|
||||
|
||||
/// The type of error this stream may generate.
|
||||
type Error;
|
||||
|
||||
/// Attempt to pull out the next value of this stream, returning `None` if
|
||||
/// the stream is finished.
|
||||
///
|
||||
/// This method, like `Future::poll`, is the sole method of pulling out a
|
||||
/// value from a stream. This method must also be run within the context of
|
||||
/// a task typically and implementors of this trait must ensure that
|
||||
/// implementations of this method do not block, as it may cause consumers
|
||||
/// to behave badly.
|
||||
///
|
||||
/// # Return value
|
||||
///
|
||||
/// If `NotReady` is returned then this stream's next value is not ready
|
||||
/// yet and implementations will ensure that the current task will be
|
||||
/// notified when the next value may be ready. If `Some` is returned then
|
||||
/// the returned value represents the next value on the stream. `Err`
|
||||
/// indicates an error happened, while `Ok` indicates whether there was a
|
||||
/// new item on the stream or whether the stream has terminated.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Once a stream is finished, that is `Ready(None)` has been returned,
|
||||
/// further calls to `poll` may result in a panic or other "bad behavior".
|
||||
/// If this is difficult to guard against then the `fuse` adapter can be
|
||||
/// used to ensure that `poll` always has well-defined semantics.
|
||||
// TODO: more here
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error>;
|
||||
|
||||
// TODO: should there also be a method like `poll` but doesn't return an
|
||||
// item? basically just says "please make more progress internally"
|
||||
// seems crucial for buffering to actually make any sense.
|
||||
|
||||
/// Creates an iterator which blocks the current thread until each item of
|
||||
/// this stream is resolved.
|
||||
///
|
||||
/// This method will consume ownership of this stream, returning an
|
||||
/// implementation of a standard iterator. This iterator will *block the
|
||||
/// current thread* on each call to `next` if the item in the stream isn't
|
||||
/// ready yet.
|
||||
///
|
||||
/// > **Note:** This method is not appropriate to call on event loops or
|
||||
/// > similar I/O situations because it will prevent the event
|
||||
/// > loop from making progress (this blocks the thread). This
|
||||
/// > method should only be called when it's guaranteed that the
|
||||
/// > blocking work associated with this stream will be completed
|
||||
/// > by another thread.
|
||||
///
|
||||
/// This method is only available when the `use_std` feature of this
|
||||
/// library is activated, and it is activated by default.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// The returned iterator does not attempt to catch panics. If the `poll`
|
||||
/// function panics, panics will be propagated to the caller of `next`.
|
||||
#[cfg(feature = "use_std")]
|
||||
fn wait(self) -> Wait<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
wait::new(self)
|
||||
}
|
||||
|
||||
/// Convenience function for turning this stream into a trait object.
|
||||
///
|
||||
/// This simply avoids the need to write `Box::new` and can often help with
|
||||
/// type inference as well by always returning a trait object. Note that
|
||||
/// this method requires the `Send` bound and returns a `BoxStream`, which
|
||||
/// also encodes this. If you'd like to create a `Box<Stream>` without the
|
||||
/// `Send` bound, then the `Box::new` function can be used instead.
|
||||
///
|
||||
/// This method is only available when the `use_std` feature of this
|
||||
/// library is activated, and it is activated by default.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::stream::*;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (_tx, rx) = mpsc::channel(1);
|
||||
/// let a: BoxStream<i32, ()> = rx.boxed();
|
||||
/// ```
|
||||
#[cfg(feature = "use_std")]
|
||||
fn boxed(self) -> BoxStream<Self::Item, Self::Error>
|
||||
where Self: Sized + Send + 'static,
|
||||
{
|
||||
::std::boxed::Box::new(self)
|
||||
}
|
||||
|
||||
/// Converts this stream into a `Future`.
|
||||
///
|
||||
/// A stream can be viewed as a future which will resolve to a pair containing
|
||||
/// the next element of the stream plus the remaining stream. If the stream
|
||||
/// terminates, then the next element is `None` and the remaining stream is
|
||||
/// still passed back, to allow reclamation of its resources.
|
||||
///
|
||||
/// The returned future can be used to compose streams and futures together by
|
||||
/// placing everything into the "world of futures".
|
||||
fn into_future(self) -> StreamFuture<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
future::new(self)
|
||||
}
|
||||
|
||||
/// Converts a stream of type `T` to a stream of type `U`.
|
||||
///
|
||||
/// The provided closure is executed over all elements of this stream as
|
||||
/// they are made available, and the callback will be executed inline with
|
||||
/// calls to `poll`.
|
||||
///
|
||||
/// Note that this function consumes the receiving stream and returns a
|
||||
/// wrapped version of it, similar to the existing `map` methods in the
|
||||
/// standard library.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::Stream;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (_tx, rx) = mpsc::channel::<i32>(1);
|
||||
/// let rx = rx.map(|x| x + 3);
|
||||
/// ```
|
||||
fn map<U, F>(self, f: F) -> Map<Self, F>
|
||||
where F: FnMut(Self::Item) -> U,
|
||||
Self: Sized
|
||||
{
|
||||
map::new(self, f)
|
||||
}
|
||||
|
||||
/// Converts a stream of error type `T` to a stream of error type `U`.
|
||||
///
|
||||
/// The provided closure is executed over all errors of this stream as
|
||||
/// they are made available, and the callback will be executed inline with
|
||||
/// calls to `poll`.
|
||||
///
|
||||
/// Note that this function consumes the receiving stream and returns a
|
||||
/// wrapped version of it, similar to the existing `map_err` methods in the
|
||||
/// standard library.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::Stream;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (_tx, rx) = mpsc::channel::<i32>(1);
|
||||
/// let rx = rx.map_err(|()| 3);
|
||||
/// ```
|
||||
fn map_err<U, F>(self, f: F) -> MapErr<Self, F>
|
||||
where F: FnMut(Self::Error) -> U,
|
||||
Self: Sized
|
||||
{
|
||||
map_err::new(self, f)
|
||||
}
|
||||
|
||||
/// Filters the values produced by this stream according to the provided
|
||||
/// predicate.
|
||||
///
|
||||
/// As values of this stream are made available, the provided predicate will
|
||||
/// be run against them. If the predicate returns `true` then the stream
|
||||
/// will yield the value, but if the predicate returns `false` then the
|
||||
/// value will be discarded and the next value will be produced.
|
||||
///
|
||||
/// All errors are passed through without filtering in this combinator.
|
||||
///
|
||||
/// Note that this function consumes the receiving stream and returns a
|
||||
/// wrapped version of it, similar to the existing `filter` methods in the
|
||||
/// standard library.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::Stream;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (_tx, rx) = mpsc::channel::<i32>(1);
|
||||
/// let evens = rx.filter(|x| x % 0 == 2);
|
||||
/// ```
|
||||
fn filter<F>(self, f: F) -> Filter<Self, F>
|
||||
where F: FnMut(&Self::Item) -> bool,
|
||||
Self: Sized
|
||||
{
|
||||
filter::new(self, f)
|
||||
}
|
||||
|
||||
/// Filters the values produced by this stream while simultaneously mapping
|
||||
/// them to a different type.
|
||||
///
|
||||
/// As values of this stream are made available, the provided function will
|
||||
/// be run on them. If the predicate returns `Some(e)` then the stream will
|
||||
/// yield the value `e`, but if the predicate returns `None` then the next
|
||||
/// value will be produced.
|
||||
///
|
||||
/// All errors are passed through without filtering in this combinator.
|
||||
///
|
||||
/// Note that this function consumes the receiving stream and returns a
|
||||
/// wrapped version of it, similar to the existing `filter_map` methods in the
|
||||
/// standard library.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::Stream;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (_tx, rx) = mpsc::channel::<i32>(1);
|
||||
/// let evens_plus_one = rx.filter_map(|x| {
|
||||
/// if x % 0 == 2 {
|
||||
/// Some(x + 1)
|
||||
/// } else {
|
||||
/// None
|
||||
/// }
|
||||
/// });
|
||||
/// ```
|
||||
fn filter_map<F, B>(self, f: F) -> FilterMap<Self, F>
|
||||
where F: FnMut(Self::Item) -> Option<B>,
|
||||
Self: Sized
|
||||
{
|
||||
filter_map::new(self, f)
|
||||
}
|
||||
|
||||
/// Chain on a computation for when a value is ready, passing the resulting
|
||||
/// item to the provided closure `f`.
|
||||
///
|
||||
/// This function can be used to ensure a computation runs regardless of
|
||||
/// the next value on the stream. The closure provided will be yielded a
|
||||
/// `Result` once a value is ready, and the returned future will then be run
|
||||
/// to completion to produce the next value on this stream.
|
||||
///
|
||||
/// The returned value of the closure must implement the `IntoFuture` trait
|
||||
/// and can represent some more work to be done before the composed stream
|
||||
/// is finished. Note that the `Result` type implements the `IntoFuture`
|
||||
/// trait so it is possible to simply alter the `Result` yielded to the
|
||||
/// closure and return it.
|
||||
///
|
||||
/// Note that this function consumes the receiving stream and returns a
|
||||
/// wrapped version of it.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::Stream;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (_tx, rx) = mpsc::channel::<i32>(1);
|
||||
///
|
||||
/// let rx = rx.then(|result| {
|
||||
/// match result {
|
||||
/// Ok(e) => Ok(e + 3),
|
||||
/// Err(()) => Err(4),
|
||||
/// }
|
||||
/// });
|
||||
/// ```
|
||||
fn then<F, U>(self, f: F) -> Then<Self, F, U>
|
||||
where F: FnMut(Result<Self::Item, Self::Error>) -> U,
|
||||
U: IntoFuture,
|
||||
Self: Sized
|
||||
{
|
||||
then::new(self, f)
|
||||
}
|
||||
|
||||
/// Chain on a computation for when a value is ready, passing the successful
|
||||
/// results to the provided closure `f`.
|
||||
///
|
||||
/// This function can be used to run a unit of work when the next successful
|
||||
/// value on a stream is ready. The closure provided will be yielded a value
|
||||
/// when ready, and the returned future will then be run to completion to
|
||||
/// produce the next value on this stream.
|
||||
///
|
||||
/// Any errors produced by this stream will not be passed to the closure,
|
||||
/// and will be passed through.
|
||||
///
|
||||
/// The returned value of the closure must implement the `IntoFuture` trait
|
||||
/// and can represent some more work to be done before the composed stream
|
||||
/// is finished. Note that the `Result` type implements the `IntoFuture`
|
||||
/// trait so it is possible to simply alter the `Result` yielded to the
|
||||
/// closure and return it.
|
||||
///
|
||||
/// Note that this function consumes the receiving stream and returns a
|
||||
/// wrapped version of it.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::stream::*;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (_tx, rx) = mpsc::channel::<i32>(1);
|
||||
///
|
||||
/// let rx = rx.and_then(|result| {
|
||||
/// if result % 2 == 0 {
|
||||
/// Ok(result)
|
||||
/// } else {
|
||||
/// Err(())
|
||||
/// }
|
||||
/// });
|
||||
/// ```
|
||||
fn and_then<F, U>(self, f: F) -> AndThen<Self, F, U>
|
||||
where F: FnMut(Self::Item) -> U,
|
||||
U: IntoFuture<Error = Self::Error>,
|
||||
Self: Sized
|
||||
{
|
||||
and_then::new(self, f)
|
||||
}
|
||||
|
||||
/// Chain on a computation for when an error happens, passing the
|
||||
/// erroneous result to the provided closure `f`.
|
||||
///
|
||||
/// This function can be used to run a unit of work and attempt to recover from
|
||||
/// an error if one happens. The closure provided will be yielded an error
|
||||
/// when one appears, and the returned future will then be run to completion
|
||||
/// to produce the next value on this stream.
|
||||
///
|
||||
/// Any successful values produced by this stream will not be passed to the
|
||||
/// closure, and will be passed through.
|
||||
///
|
||||
/// The returned value of the closure must implement the `IntoFuture` trait
|
||||
/// and can represent some more work to be done before the composed stream
|
||||
/// is finished. Note that the `Result` type implements the `IntoFuture`
|
||||
/// trait so it is possible to simply alter the `Result` yielded to the
|
||||
/// closure and return it.
|
||||
///
|
||||
/// Note that this function consumes the receiving stream and returns a
|
||||
/// wrapped version of it.
|
||||
fn or_else<F, U>(self, f: F) -> OrElse<Self, F, U>
|
||||
where F: FnMut(Self::Error) -> U,
|
||||
U: IntoFuture<Item = Self::Item>,
|
||||
Self: Sized
|
||||
{
|
||||
or_else::new(self, f)
|
||||
}
|
||||
|
||||
/// Collect all of the values of this stream into a vector, returning a
|
||||
/// future representing the result of that computation.
|
||||
///
|
||||
/// This combinator will collect all successful results of this stream and
|
||||
/// collect them into a `Vec<Self::Item>`. If an error happens then all
|
||||
/// collected elements will be dropped and the error will be returned.
|
||||
///
|
||||
/// The returned future will be resolved whenever an error happens or when
|
||||
/// the stream returns `Ok(None)`.
|
||||
///
|
||||
/// This method is only available when the `use_std` feature of this
|
||||
/// library is activated, and it is activated by default.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::thread;
|
||||
///
|
||||
/// use futures::{Stream, Future, Sink};
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (mut tx, rx) = mpsc::channel(1);
|
||||
///
|
||||
/// thread::spawn(|| {
|
||||
/// for i in (0..5).rev() {
|
||||
/// tx = tx.send(i + 1).wait().unwrap();
|
||||
/// }
|
||||
/// });
|
||||
///
|
||||
/// let mut result = rx.collect();
|
||||
/// assert_eq!(result.wait(), Ok(vec![5, 4, 3, 2, 1]));
|
||||
/// ```
|
||||
#[cfg(feature = "use_std")]
|
||||
fn collect(self) -> Collect<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
collect::new(self)
|
||||
}
|
||||
|
||||
/// Concatenate all results of a stream into a single extendable
|
||||
/// destination, returning a future representing the end result.
|
||||
///
|
||||
/// This combinator will extend the first item with the contents
|
||||
/// of all the successful results of the stream. If an error
|
||||
/// occurs, all the results will be dropped and the error will be
|
||||
/// returned.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::thread;
|
||||
///
|
||||
/// use futures::{Future, Sink, Stream};
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (mut tx, rx) = mpsc::channel(1);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// for i in (0..3).rev() {
|
||||
/// let n = i * 3;
|
||||
/// tx = tx.send(vec![n + 1, n + 2, n + 3]).wait().unwrap();
|
||||
/// }
|
||||
/// });
|
||||
/// let result = rx.concat();
|
||||
/// assert_eq!(result.wait(), Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3]));
|
||||
/// ```
|
||||
fn concat(self) -> Concat<Self>
|
||||
where Self: Sized,
|
||||
Self::Item: Extend<<<Self as Stream>::Item as IntoIterator>::Item> + IntoIterator,
|
||||
{
|
||||
concat::new(self)
|
||||
}
|
||||
|
||||
/// Execute an accumulating computation over a stream, collecting all the
|
||||
/// values into one final result.
|
||||
///
|
||||
/// This combinator will collect all successful results of this stream
|
||||
/// according to the closure provided. The initial state is also provided to
|
||||
/// this method and then is returned again by each execution of the closure.
|
||||
/// Once the entire stream has been exhausted the returned future will
|
||||
/// resolve to this value.
|
||||
///
|
||||
/// If an error happens then collected state will be dropped and the error
|
||||
/// will be returned.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::stream::{self, Stream};
|
||||
/// use futures::future::{ok, Future};
|
||||
///
|
||||
/// let number_stream = stream::iter::<_, _, ()>((0..6).map(Ok));
|
||||
/// let sum = number_stream.fold(0, |a, b| ok(a + b));
|
||||
/// assert_eq!(sum.wait(), Ok(15));
|
||||
/// ```
|
||||
fn fold<F, T, Fut>(self, init: T, f: F) -> Fold<Self, F, Fut, T>
|
||||
where F: FnMut(T, Self::Item) -> Fut,
|
||||
Fut: IntoFuture<Item = T>,
|
||||
Self::Error: From<Fut::Error>,
|
||||
Self: Sized
|
||||
{
|
||||
fold::new(self, f, init)
|
||||
}
|
||||
|
||||
/// Flattens a stream of streams into just one continuous stream.
|
||||
///
|
||||
/// If this stream's elements are themselves streams then this combinator
|
||||
/// will flatten out the entire stream to one long chain of elements. Any
|
||||
/// errors are passed through without looking at them, but otherwise each
|
||||
/// individual stream will get exhausted before moving on to the next.
|
||||
///
|
||||
/// ```
|
||||
/// use std::thread;
|
||||
///
|
||||
/// use futures::{Future, Stream, Poll, Sink};
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (tx1, rx1) = mpsc::channel::<i32>(1);
|
||||
/// let (tx2, rx2) = mpsc::channel::<i32>(1);
|
||||
/// let (tx3, rx3) = mpsc::channel(1);
|
||||
///
|
||||
/// thread::spawn(|| {
|
||||
/// tx1.send(1).wait().unwrap()
|
||||
/// .send(2).wait().unwrap();
|
||||
/// });
|
||||
/// thread::spawn(|| {
|
||||
/// tx2.send(3).wait().unwrap()
|
||||
/// .send(4).wait().unwrap();
|
||||
/// });
|
||||
/// thread::spawn(|| {
|
||||
/// tx3.send(rx1).wait().unwrap()
|
||||
/// .send(rx2).wait().unwrap();
|
||||
/// });
|
||||
///
|
||||
/// let mut result = rx3.flatten().collect();
|
||||
/// assert_eq!(result.wait(), Ok(vec![1, 2, 3, 4]));
|
||||
/// ```
|
||||
fn flatten(self) -> Flatten<Self>
|
||||
where Self::Item: Stream,
|
||||
<Self::Item as Stream>::Error: From<Self::Error>,
|
||||
Self: Sized
|
||||
{
|
||||
flatten::new(self)
|
||||
}
|
||||
|
||||
/// Skip elements on this stream while the predicate provided resolves to
|
||||
/// `true`.
|
||||
///
|
||||
/// This function, like `Iterator::skip_while`, will skip elements on the
|
||||
/// stream until the `predicate` resolves to `false`. Once one element
|
||||
/// returns false all future elements will be returned from the underlying
|
||||
/// stream.
|
||||
fn skip_while<P, R>(self, pred: P) -> SkipWhile<Self, P, R>
|
||||
where P: FnMut(&Self::Item) -> R,
|
||||
R: IntoFuture<Item=bool, Error=Self::Error>,
|
||||
Self: Sized
|
||||
{
|
||||
skip_while::new(self, pred)
|
||||
}
|
||||
|
||||
/// Take elements from this stream while the predicate provided resolves to
|
||||
/// `true`.
|
||||
///
|
||||
/// This function, like `Iterator::take_while`, will take elements from the
|
||||
/// stream until the `predicate` resolves to `false`. Once one element
|
||||
/// returns false it will always return that the stream is done.
|
||||
fn take_while<P, R>(self, pred: P) -> TakeWhile<Self, P, R>
|
||||
where P: FnMut(&Self::Item) -> R,
|
||||
R: IntoFuture<Item=bool, Error=Self::Error>,
|
||||
Self: Sized
|
||||
{
|
||||
take_while::new(self, pred)
|
||||
}
|
||||
|
||||
/// Runs this stream to completion, executing the provided closure for each
|
||||
/// element on the stream.
|
||||
///
|
||||
/// The closure provided will be called for each item this stream resolves
|
||||
/// to successfully, producing a future. That future will then be executed
|
||||
/// to completion before moving on to the next item.
|
||||
///
|
||||
/// The returned value is a `Future` where the `Item` type is `()` and
|
||||
/// errors are otherwise threaded through. Any error on the stream or in the
|
||||
/// closure will cause iteration to be halted immediately and the future
|
||||
/// will resolve to that error.
|
||||
fn for_each<F, U>(self, f: F) -> ForEach<Self, F, U>
|
||||
where F: FnMut(Self::Item) -> U,
|
||||
U: IntoFuture<Item=(), Error = Self::Error>,
|
||||
Self: Sized
|
||||
{
|
||||
for_each::new(self, f)
|
||||
}
|
||||
|
||||
/// Map this stream's error to any error implementing `From` for
|
||||
/// this stream's `Error`, returning a new stream.
|
||||
///
|
||||
/// This function does for streams what `try!` does for `Result`,
|
||||
/// by letting the compiler infer the type of the resulting error.
|
||||
/// Just as `map_err` above, this is useful for example to ensure
|
||||
/// that streams have the same error type when used with
|
||||
/// combinators.
|
||||
///
|
||||
/// Note that this function consumes the receiving stream and returns a
|
||||
/// wrapped version of it.
|
||||
fn from_err<E: From<Self::Error>>(self) -> FromErr<Self, E>
|
||||
where Self: Sized,
|
||||
{
|
||||
from_err::new(self)
|
||||
}
|
||||
|
||||
/// Creates a new stream of at most `amt` items of the underlying stream.
|
||||
///
|
||||
/// Once `amt` items have been yielded from this stream then it will always
|
||||
/// return that the stream is done.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Any errors yielded from underlying stream, before the desired amount of
|
||||
/// items is reached, are passed through and do not affect the total number
|
||||
/// of items taken.
|
||||
fn take(self, amt: u64) -> Take<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
take::new(self, amt)
|
||||
}
|
||||
|
||||
/// Creates a new stream which skips `amt` items of the underlying stream.
|
||||
///
|
||||
/// Once `amt` items have been skipped from this stream then it will always
|
||||
/// return the remaining items on this stream.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// All errors yielded from underlying stream are passed through and do not
|
||||
/// affect the total number of items skipped.
|
||||
fn skip(self, amt: u64) -> Skip<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
skip::new(self, amt)
|
||||
}
|
||||
|
||||
/// Fuse a stream such that `poll` will never again be called once it has
|
||||
/// finished.
|
||||
///
|
||||
/// Currently once a stream has returned `None` from `poll` any further
|
||||
/// calls could exhibit bad behavior such as block forever, panic, never
|
||||
/// return, etc. If it is known that `poll` may be called after stream has
|
||||
/// already finished, then this method can be used to ensure that it has
|
||||
/// defined semantics.
|
||||
///
|
||||
/// Once a stream has been `fuse`d and it finishes, then it will forever
|
||||
/// return `None` from `poll`. This, unlike for the traits `poll` method,
|
||||
/// is guaranteed.
|
||||
///
|
||||
/// Also note that as soon as this stream returns `None` it will be dropped
|
||||
/// to reclaim resources associated with it.
|
||||
fn fuse(self) -> Fuse<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
fuse::new(self)
|
||||
}
|
||||
|
||||
/// Catches unwinding panics while polling the stream.
|
||||
///
|
||||
/// Caught panic (if any) will be the last element of the resulting stream.
|
||||
///
|
||||
/// In general, panics within a stream can propagate all the way out to the
|
||||
/// task level. This combinator makes it possible to halt unwinding within
|
||||
/// the stream itself. It's most commonly used within task executors. This
|
||||
/// method should not be used for error handling.
|
||||
///
|
||||
/// Note that this method requires the `UnwindSafe` bound from the standard
|
||||
/// library. This isn't always applied automatically, and the standard
|
||||
/// library provides an `AssertUnwindSafe` wrapper type to apply it
|
||||
/// after-the fact. To assist using this method, the `Stream` trait is also
|
||||
/// implemented for `AssertUnwindSafe<S>` where `S` implements `Stream`.
|
||||
///
|
||||
/// This method is only available when the `use_std` feature of this
|
||||
/// library is activated, and it is activated by default.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use futures::stream;
|
||||
/// use futures::stream::Stream;
|
||||
///
|
||||
/// let stream = stream::iter::<_, Option<i32>, bool>(vec![
|
||||
/// Some(10), None, Some(11)].into_iter().map(Ok));
|
||||
/// // panic on second element
|
||||
/// let stream_panicking = stream.map(|o| o.unwrap());
|
||||
/// let mut iter = stream_panicking.catch_unwind().wait();
|
||||
///
|
||||
/// assert_eq!(Ok(10), iter.next().unwrap().ok().unwrap());
|
||||
/// assert!(iter.next().unwrap().is_err());
|
||||
/// assert!(iter.next().is_none());
|
||||
/// ```
|
||||
#[cfg(feature = "use_std")]
|
||||
fn catch_unwind(self) -> CatchUnwind<Self>
|
||||
where Self: Sized + std::panic::UnwindSafe
|
||||
{
|
||||
catch_unwind::new(self)
|
||||
}
|
||||
|
||||
/// An adaptor for creating a buffered list of pending futures.
|
||||
///
|
||||
/// If this stream's item can be converted into a future, then this adaptor
|
||||
/// will buffer up to at most `amt` futures and then return results in the
|
||||
/// same order as the underlying stream. No more than `amt` futures will be
|
||||
/// buffered at any point in time, and less than `amt` may also be buffered
|
||||
/// depending on the state of each future.
|
||||
///
|
||||
/// The returned stream will be a stream of each future's result, with
|
||||
/// errors passed through whenever they occur.
|
||||
///
|
||||
/// This method is only available when the `use_std` feature of this
|
||||
/// library is activated, and it is activated by default.
|
||||
#[cfg(feature = "use_std")]
|
||||
fn buffered(self, amt: usize) -> Buffered<Self>
|
||||
where Self::Item: IntoFuture<Error = <Self as Stream>::Error>,
|
||||
Self: Sized
|
||||
{
|
||||
buffered::new(self, amt)
|
||||
}
|
||||
|
||||
/// An adaptor for creating a buffered list of pending futures (unordered).
|
||||
///
|
||||
/// If this stream's item can be converted into a future, then this adaptor
|
||||
/// will buffer up to `amt` futures and then return results in the order
|
||||
/// in which they complete. No more than `amt` futures will be buffered at
|
||||
/// any point in time, and less than `amt` may also be buffered depending on
|
||||
/// the state of each future.
|
||||
///
|
||||
/// The returned stream will be a stream of each future's result, with
|
||||
/// errors passed through whenever they occur.
|
||||
///
|
||||
/// This method is only available when the `use_std` feature of this
|
||||
/// library is activated, and it is activated by default.
|
||||
#[cfg(feature = "use_std")]
|
||||
fn buffer_unordered(self, amt: usize) -> BufferUnordered<Self>
|
||||
where Self::Item: IntoFuture<Error = <Self as Stream>::Error>,
|
||||
Self: Sized
|
||||
{
|
||||
buffer_unordered::new(self, amt)
|
||||
}
|
||||
|
||||
/// An adapter for merging the output of two streams.
|
||||
///
|
||||
/// The merged stream produces items from one or both of the underlying
|
||||
/// streams as they become available. Errors, however, are not merged: you
|
||||
/// get at most one error at a time.
|
||||
fn merge<S>(self, other: S) -> Merge<Self, S>
|
||||
where S: Stream<Error = Self::Error>,
|
||||
Self: Sized,
|
||||
{
|
||||
merge::new(self, other)
|
||||
}
|
||||
|
||||
/// An adapter for zipping two streams together.
|
||||
///
|
||||
/// The zipped stream waits for both streams to produce an item, and then
|
||||
/// returns that pair. If an error happens, then that error will be returned
|
||||
/// immediately. If either stream ends then the zipped stream will also end.
|
||||
fn zip<S>(self, other: S) -> Zip<Self, S>
|
||||
where S: Stream<Error = Self::Error>,
|
||||
Self: Sized,
|
||||
{
|
||||
zip::new(self, other)
|
||||
}
|
||||
|
||||
/// Adapter for chaining two stream.
|
||||
///
|
||||
/// The resulting stream emits elements from the first stream, and when
|
||||
/// first stream reaches the end, emits the elements from the second stream.
|
||||
///
|
||||
/// ```rust
|
||||
/// use futures::stream;
|
||||
/// use futures::stream::Stream;
|
||||
///
|
||||
/// let stream1 = stream::iter(vec![Ok(10), Err(false)]);
|
||||
/// let stream2 = stream::iter(vec![Err(true), Ok(20)]);
|
||||
/// let mut chain = stream1.chain(stream2).wait();
|
||||
///
|
||||
/// assert_eq!(Some(Ok(10)), chain.next());
|
||||
/// assert_eq!(Some(Err(false)), chain.next());
|
||||
/// assert_eq!(Some(Err(true)), chain.next());
|
||||
/// assert_eq!(Some(Ok(20)), chain.next());
|
||||
/// assert_eq!(None, chain.next());
|
||||
/// ```
|
||||
fn chain<S>(self, other: S) -> Chain<Self, S>
|
||||
where S: Stream<Item = Self::Item, Error = Self::Error>,
|
||||
Self: Sized
|
||||
{
|
||||
chain::new(self, other)
|
||||
}
|
||||
|
||||
/// Creates a new stream which exposes a `peek` method.
|
||||
///
|
||||
/// Calling `peek` returns a reference to the next item in the stream.
|
||||
fn peekable(self) -> Peekable<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
peek::new(self)
|
||||
}
|
||||
|
||||
/// An adaptor for chunking up items of the stream inside a vector.
|
||||
///
|
||||
/// This combinator will attempt to pull items from this stream and buffer
|
||||
/// them into a local vector. At most `capacity` items will get buffered
|
||||
/// before they're yielded from the returned stream.
|
||||
///
|
||||
/// Note that the vectors returned from this iterator may not always have
|
||||
/// `capacity` elements. If the underlying stream ended and only a partial
|
||||
/// vector was created, it'll be returned. Additionally if an error happens
|
||||
/// from the underlying stream then the currently buffered items will be
|
||||
/// yielded.
|
||||
///
|
||||
/// Errors are passed through the stream unbuffered.
|
||||
///
|
||||
/// This method is only available when the `use_std` feature of this
|
||||
/// library is activated, and it is activated by default.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method will panic of `capacity` is zero.
|
||||
#[cfg(feature = "use_std")]
|
||||
fn chunks(self, capacity: usize) -> Chunks<Self>
|
||||
where Self: Sized
|
||||
{
|
||||
chunks::new(self, capacity)
|
||||
}
|
||||
|
||||
/// Creates a stream that selects the next element from either this stream
|
||||
/// or the provided one, whichever is ready first.
|
||||
///
|
||||
/// This combinator will attempt to pull items from both streams. Each
|
||||
/// stream will be polled in a round-robin fashion, and whenever a stream is
|
||||
/// ready to yield an item that item is yielded.
|
||||
///
|
||||
/// The `select` function is similar to `merge` except that it requires both
|
||||
/// streams to have the same item and error types.
|
||||
///
|
||||
/// Error are passed through from either stream.
|
||||
fn select<S>(self, other: S) -> Select<Self, S>
|
||||
where S: Stream<Item = Self::Item, Error = Self::Error>,
|
||||
Self: Sized,
|
||||
{
|
||||
select::new(self, other)
|
||||
}
|
||||
|
||||
/// A future that completes after the given stream has been fully processed
|
||||
/// into the sink, including flushing.
|
||||
///
|
||||
/// This future will drive the stream to keep producing items until it is
|
||||
/// exhausted, sending each item to the sink. It will complete once both the
|
||||
/// stream is exhausted, and the sink has fully processed and flushed all of
|
||||
/// the items sent to it.
|
||||
///
|
||||
/// Doing `stream.forward(sink)` is roughly equivalent to
|
||||
/// `sink.send_all(stream)`.
|
||||
///
|
||||
/// On completion, the pair `(stream, sink)` is returned.
|
||||
fn forward<S>(self, sink: S) -> Forward<Self, S>
|
||||
where S: Sink<SinkItem = Self::Item>,
|
||||
Self::Error: From<S::SinkError>,
|
||||
Self: Sized
|
||||
{
|
||||
forward::new(self, sink)
|
||||
}
|
||||
|
||||
/// Splits this `Stream + Sink` object into separate `Stream` and `Sink`
|
||||
/// objects.
|
||||
///
|
||||
/// This can be useful when you want to split ownership between tasks, or
|
||||
/// allow direct interaction between the two objects (e.g. via
|
||||
/// `Sink::send_all`).
|
||||
///
|
||||
/// This method is only available when the `use_std` feature of this
|
||||
/// library is activated, and it is activated by default.
|
||||
#[cfg(feature = "use_std")]
|
||||
fn split(self) -> (SplitSink<Self>, SplitStream<Self>)
|
||||
where Self: super::sink::Sink + Sized
|
||||
{
|
||||
split::split(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, S: ?Sized + Stream> Stream for &'a mut S {
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
(**self).poll()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
use core;
|
||||
|
||||
use Poll;
|
||||
use stream;
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream which emits single element and then EOF.
|
||||
///
|
||||
/// This stream will never block and is always ready.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Once<T, E>(stream::Iter<core::iter::Once<Result<T, E>>>);
|
||||
|
||||
/// Creates a stream of single element
|
||||
///
|
||||
/// ```rust
|
||||
/// use futures::*;
|
||||
///
|
||||
/// let mut stream = stream::once::<(), _>(Err(17));
|
||||
/// assert_eq!(Err(17), stream.poll());
|
||||
/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
|
||||
/// ```
|
||||
pub fn once<T, E>(item: Result<T, E>) -> Once<T, E> {
|
||||
Once(stream::iter(core::iter::once(item)))
|
||||
}
|
||||
|
||||
impl<T, E> Stream for Once<T, E> {
|
||||
type Item = T;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<T>, E> {
|
||||
self.0.poll()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
use {IntoFuture, Future, Poll, Async};
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream combinator which chains a computation onto errors produced by a
|
||||
/// stream.
|
||||
///
|
||||
/// This structure is produced by the `Stream::or_else` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct OrElse<S, F, U>
|
||||
where U: IntoFuture,
|
||||
{
|
||||
stream: S,
|
||||
future: Option<U::Future>,
|
||||
f: F,
|
||||
}
|
||||
|
||||
pub fn new<S, F, U>(s: S, f: F) -> OrElse<S, F, U>
|
||||
where S: Stream,
|
||||
F: FnMut(S::Error) -> U,
|
||||
U: IntoFuture<Item=S::Item>,
|
||||
{
|
||||
OrElse {
|
||||
stream: s,
|
||||
future: None,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, F, U> ::sink::Sink for OrElse<S, F, U>
|
||||
where S: ::sink::Sink, U: IntoFuture
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, F, U> Stream for OrElse<S, F, U>
|
||||
where S: Stream,
|
||||
F: FnMut(S::Error) -> U,
|
||||
U: IntoFuture<Item=S::Item>,
|
||||
{
|
||||
type Item = S::Item;
|
||||
type Error = U::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, U::Error> {
|
||||
if self.future.is_none() {
|
||||
let item = match self.stream.poll() {
|
||||
Ok(Async::Ready(e)) => return Ok(Async::Ready(e)),
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Err(e) => e,
|
||||
};
|
||||
self.future = Some((self.f)(item).into_future());
|
||||
}
|
||||
assert!(self.future.is_some());
|
||||
match self.future.as_mut().unwrap().poll() {
|
||||
Ok(Async::Ready(e)) => {
|
||||
self.future = None;
|
||||
Ok(Async::Ready(Some(e)))
|
||||
}
|
||||
Err(e) => {
|
||||
self.future = None;
|
||||
Err(e)
|
||||
}
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
use {Async, Poll};
|
||||
use stream::{Stream, Fuse};
|
||||
|
||||
/// A `Stream` that implements a `peek` method.
|
||||
///
|
||||
/// The `peek` method can be used to retrieve a reference
|
||||
/// to the next `Stream::Item` if available. A subsequent
|
||||
/// call to `poll` will return the owned item.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Peekable<S: Stream> {
|
||||
stream: Fuse<S>,
|
||||
peeked: Option<S::Item>,
|
||||
}
|
||||
|
||||
|
||||
pub fn new<S: Stream>(stream: S) -> Peekable<S> {
|
||||
Peekable {
|
||||
stream: stream.fuse(),
|
||||
peeked: None
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S> ::sink::Sink for Peekable<S>
|
||||
where S: ::sink::Sink + Stream
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream> Stream for Peekable<S> {
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
if let Some(item) = self.peeked.take() {
|
||||
return Ok(Async::Ready(Some(item)))
|
||||
}
|
||||
self.stream.poll()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<S: Stream> Peekable<S> {
|
||||
/// Peek retrieves a reference to the next item in the stream.
|
||||
///
|
||||
/// This method polls the underlying stream and return either a reference
|
||||
/// to the next item if the stream is ready or passes through any errors.
|
||||
pub fn peek(&mut self) -> Poll<Option<&S::Item>, S::Error> {
|
||||
if self.peeked.is_some() {
|
||||
return Ok(Async::Ready(self.peeked.as_ref()))
|
||||
}
|
||||
match try_ready!(self.poll()) {
|
||||
None => Ok(Async::Ready(None)),
|
||||
Some(item) => {
|
||||
self.peeked = Some(item);
|
||||
Ok(Async::Ready(self.peeked.as_ref()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
use core::marker;
|
||||
|
||||
|
||||
use stream::Stream;
|
||||
|
||||
use {Async, Poll};
|
||||
|
||||
|
||||
/// Stream that produces the same element repeatedly.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Repeat<T, E>
|
||||
where T: Clone
|
||||
{
|
||||
item: T,
|
||||
error: marker::PhantomData<E>,
|
||||
}
|
||||
|
||||
/// Create a stream which produces the same item repeatedly.
|
||||
///
|
||||
/// Stream never produces an error or EOF.
|
||||
///
|
||||
/// ```rust
|
||||
/// use futures::*;
|
||||
///
|
||||
/// let mut stream = stream::repeat::<_, bool>(10);
|
||||
/// assert_eq!(Ok(Async::Ready(Some(10))), stream.poll());
|
||||
/// assert_eq!(Ok(Async::Ready(Some(10))), stream.poll());
|
||||
/// assert_eq!(Ok(Async::Ready(Some(10))), stream.poll());
|
||||
/// ```
|
||||
pub fn repeat<T, E>(item: T) -> Repeat<T, E>
|
||||
where T: Clone
|
||||
{
|
||||
Repeat {
|
||||
item: item,
|
||||
error: marker::PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> Stream for Repeat<T, E>
|
||||
where T: Clone
|
||||
{
|
||||
type Item = T;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
Ok(Async::Ready(Some(self.item.clone())))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
use {Poll, Async};
|
||||
use stream::{Stream, Fuse};
|
||||
|
||||
/// An adapter for merging the output of two streams.
|
||||
///
|
||||
/// The merged stream produces items from either of the underlying streams as
|
||||
/// they become available, and the streams are polled in a round-robin fashion.
|
||||
/// Errors, however, are not merged: you get at most one error at a time.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Select<S1, S2> {
|
||||
stream1: Fuse<S1>,
|
||||
stream2: Fuse<S2>,
|
||||
flag: bool,
|
||||
}
|
||||
|
||||
pub fn new<S1, S2>(stream1: S1, stream2: S2) -> Select<S1, S2>
|
||||
where S1: Stream,
|
||||
S2: Stream<Item = S1::Item, Error = S1::Error>
|
||||
{
|
||||
Select {
|
||||
stream1: stream1.fuse(),
|
||||
stream2: stream2.fuse(),
|
||||
flag: false,
|
||||
}
|
||||
}
|
||||
|
||||
impl<S1, S2> Stream for Select<S1, S2>
|
||||
where S1: Stream,
|
||||
S2: Stream<Item = S1::Item, Error = S1::Error>
|
||||
{
|
||||
type Item = S1::Item;
|
||||
type Error = S1::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S1::Item>, S1::Error> {
|
||||
let (a, b) = if self.flag {
|
||||
(&mut self.stream2 as &mut Stream<Item=_, Error=_>,
|
||||
&mut self.stream1 as &mut Stream<Item=_, Error=_>)
|
||||
} else {
|
||||
(&mut self.stream1 as &mut Stream<Item=_, Error=_>,
|
||||
&mut self.stream2 as &mut Stream<Item=_, Error=_>)
|
||||
};
|
||||
self.flag = !self.flag;
|
||||
|
||||
let a_done = match try!(a.poll()) {
|
||||
Async::Ready(Some(item)) => return Ok(Some(item).into()),
|
||||
Async::Ready(None) => true,
|
||||
Async::NotReady => false,
|
||||
};
|
||||
|
||||
match try!(b.poll()) {
|
||||
Async::Ready(Some(item)) => {
|
||||
// If the other stream isn't finished yet, give them a chance to
|
||||
// go first next time as we pulled something off `b`.
|
||||
if !a_done {
|
||||
self.flag = !self.flag;
|
||||
}
|
||||
return Ok(Some(item).into())
|
||||
}
|
||||
Async::Ready(None) if a_done => Ok(None.into()),
|
||||
Async::Ready(None) => Ok(Async::NotReady),
|
||||
Async::NotReady => Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
use {Poll, Async};
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream combinator which skips a number of elements before continuing.
|
||||
///
|
||||
/// This structure is produced by the `Stream::skip` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Skip<S> {
|
||||
stream: S,
|
||||
remaining: u64,
|
||||
}
|
||||
|
||||
pub fn new<S>(s: S, amt: u64) -> Skip<S>
|
||||
where S: Stream,
|
||||
{
|
||||
Skip {
|
||||
stream: s,
|
||||
remaining: amt,
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S> ::sink::Sink for Skip<S>
|
||||
where S: ::sink::Sink
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Stream for Skip<S>
|
||||
where S: Stream,
|
||||
{
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
|
||||
while self.remaining > 0 {
|
||||
match try_ready!(self.stream.poll()) {
|
||||
Some(_) => self.remaining -= 1,
|
||||
None => return Ok(Async::Ready(None)),
|
||||
}
|
||||
}
|
||||
|
||||
self.stream.poll()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
use {Async, Poll, IntoFuture, Future};
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream combinator which skips elements of a stream while a predicate
|
||||
/// holds.
|
||||
///
|
||||
/// This structure is produced by the `Stream::skip_while` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct SkipWhile<S, P, R> where S: Stream, R: IntoFuture {
|
||||
stream: S,
|
||||
pred: P,
|
||||
pending: Option<(R::Future, S::Item)>,
|
||||
done_skipping: bool,
|
||||
}
|
||||
|
||||
pub fn new<S, P, R>(s: S, p: P) -> SkipWhile<S, P, R>
|
||||
where S: Stream,
|
||||
P: FnMut(&S::Item) -> R,
|
||||
R: IntoFuture<Item=bool, Error=S::Error>,
|
||||
{
|
||||
SkipWhile {
|
||||
stream: s,
|
||||
pred: p,
|
||||
pending: None,
|
||||
done_skipping: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, P, R> ::sink::Sink for SkipWhile<S, P, R>
|
||||
where S: ::sink::Sink + Stream, R: IntoFuture
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, P, R> Stream for SkipWhile<S, P, R>
|
||||
where S: Stream,
|
||||
P: FnMut(&S::Item) -> R,
|
||||
R: IntoFuture<Item=bool, Error=S::Error>,
|
||||
{
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
|
||||
if self.done_skipping {
|
||||
return self.stream.poll();
|
||||
}
|
||||
|
||||
loop {
|
||||
if self.pending.is_none() {
|
||||
let item = match try_ready!(self.stream.poll()) {
|
||||
Some(e) => e,
|
||||
None => return Ok(Async::Ready(None)),
|
||||
};
|
||||
self.pending = Some(((self.pred)(&item).into_future(), item));
|
||||
}
|
||||
|
||||
assert!(self.pending.is_some());
|
||||
match self.pending.as_mut().unwrap().0.poll() {
|
||||
Ok(Async::Ready(true)) => self.pending = None,
|
||||
Ok(Async::Ready(false)) => {
|
||||
let (_, item) = self.pending.take().unwrap();
|
||||
self.done_skipping = true;
|
||||
return Ok(Async::Ready(Some(item)))
|
||||
}
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Err(e) => {
|
||||
self.pending = None;
|
||||
return Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
use {StartSend, Sink, Stream, Poll, Async, AsyncSink};
|
||||
use sync::BiLock;
|
||||
|
||||
/// A `Stream` part of the split pair
|
||||
#[derive(Debug)]
|
||||
pub struct SplitStream<S>(BiLock<S>);
|
||||
|
||||
impl<S: Stream> Stream for SplitStream<S> {
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
|
||||
match self.0.poll_lock() {
|
||||
Async::Ready(mut inner) => inner.poll(),
|
||||
Async::NotReady => Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A `Sink` part of the split pair
|
||||
#[derive(Debug)]
|
||||
pub struct SplitSink<S>(BiLock<S>);
|
||||
|
||||
impl<S: Sink> Sink for SplitSink<S> {
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem)
|
||||
-> StartSend<S::SinkItem, S::SinkError>
|
||||
{
|
||||
match self.0.poll_lock() {
|
||||
Async::Ready(mut inner) => inner.start_send(item),
|
||||
Async::NotReady => Ok(AsyncSink::NotReady(item)),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
match self.0.poll_lock() {
|
||||
Async::Ready(mut inner) => inner.poll_complete(),
|
||||
Async::NotReady => Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
match self.0.poll_lock() {
|
||||
Async::Ready(mut inner) => inner.close(),
|
||||
Async::NotReady => Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn split<S: Stream + Sink>(s: S) -> (SplitSink<S>, SplitStream<S>) {
|
||||
let (a, b) = BiLock::new(s);
|
||||
let read = SplitStream(a);
|
||||
let write = SplitSink(b);
|
||||
(write, read)
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
use {Async, Poll};
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream combinator which returns a maximum number of elements.
|
||||
///
|
||||
/// This structure is produced by the `Stream::take` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Take<S> {
|
||||
stream: S,
|
||||
remaining: u64,
|
||||
}
|
||||
|
||||
pub fn new<S>(s: S, amt: u64) -> Take<S>
|
||||
where S: Stream,
|
||||
{
|
||||
Take {
|
||||
stream: s,
|
||||
remaining: amt,
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S> ::sink::Sink for Take<S>
|
||||
where S: ::sink::Sink + Stream
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Stream for Take<S>
|
||||
where S: Stream,
|
||||
{
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
|
||||
if self.remaining == 0 {
|
||||
Ok(Async::Ready(None))
|
||||
} else {
|
||||
let next = try_ready!(self.stream.poll());
|
||||
match next {
|
||||
Some(_) => self.remaining -= 1,
|
||||
None => self.remaining = 0,
|
||||
}
|
||||
Ok(Async::Ready(next))
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
use {Async, Poll, IntoFuture, Future};
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream combinator which takes elements from a stream while a predicate
|
||||
/// holds.
|
||||
///
|
||||
/// This structure is produced by the `Stream::take_while` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct TakeWhile<S, P, R> where S: Stream, R: IntoFuture {
|
||||
stream: S,
|
||||
pred: P,
|
||||
pending: Option<(R::Future, S::Item)>,
|
||||
done_taking: bool,
|
||||
}
|
||||
|
||||
pub fn new<S, P, R>(s: S, p: P) -> TakeWhile<S, P, R>
|
||||
where S: Stream,
|
||||
P: FnMut(&S::Item) -> R,
|
||||
R: IntoFuture<Item=bool, Error=S::Error>,
|
||||
{
|
||||
TakeWhile {
|
||||
stream: s,
|
||||
pred: p,
|
||||
pending: None,
|
||||
done_taking: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, P, R> ::sink::Sink for TakeWhile<S, P, R>
|
||||
where S: ::sink::Sink + Stream, R: IntoFuture
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, P, R> Stream for TakeWhile<S, P, R>
|
||||
where S: Stream,
|
||||
P: FnMut(&S::Item) -> R,
|
||||
R: IntoFuture<Item=bool, Error=S::Error>,
|
||||
{
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
|
||||
if self.done_taking {
|
||||
return Ok(Async::Ready(None));
|
||||
}
|
||||
|
||||
if self.pending.is_none() {
|
||||
let item = match try_ready!(self.stream.poll()) {
|
||||
Some(e) => e,
|
||||
None => return Ok(Async::Ready(None)),
|
||||
};
|
||||
self.pending = Some(((self.pred)(&item).into_future(), item));
|
||||
}
|
||||
|
||||
assert!(self.pending.is_some());
|
||||
match self.pending.as_mut().unwrap().0.poll() {
|
||||
Ok(Async::Ready(true)) => {
|
||||
let (_, item) = self.pending.take().unwrap();
|
||||
Ok(Async::Ready(Some(item)))
|
||||
},
|
||||
Ok(Async::Ready(false)) => {
|
||||
self.done_taking = true;
|
||||
Ok(Async::Ready(None))
|
||||
}
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
Err(e) => {
|
||||
self.pending = None;
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
use {Async, IntoFuture, Future, Poll};
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream combinator which chains a computation onto each item produced by a
|
||||
/// stream.
|
||||
///
|
||||
/// This structure is produced by the `Stream::then` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Then<S, F, U>
|
||||
where U: IntoFuture,
|
||||
{
|
||||
stream: S,
|
||||
future: Option<U::Future>,
|
||||
f: F,
|
||||
}
|
||||
|
||||
pub fn new<S, F, U>(s: S, f: F) -> Then<S, F, U>
|
||||
where S: Stream,
|
||||
F: FnMut(Result<S::Item, S::Error>) -> U,
|
||||
U: IntoFuture,
|
||||
{
|
||||
Then {
|
||||
stream: s,
|
||||
future: None,
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, F, U> ::sink::Sink for Then<S, F, U>
|
||||
where S: ::sink::Sink, U: IntoFuture,
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, F, U> Stream for Then<S, F, U>
|
||||
where S: Stream,
|
||||
F: FnMut(Result<S::Item, S::Error>) -> U,
|
||||
U: IntoFuture,
|
||||
{
|
||||
type Item = U::Item;
|
||||
type Error = U::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<U::Item>, U::Error> {
|
||||
if self.future.is_none() {
|
||||
let item = match self.stream.poll() {
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Ok(Async::Ready(None)) => return Ok(Async::Ready(None)),
|
||||
Ok(Async::Ready(Some(e))) => Ok(e),
|
||||
Err(e) => Err(e),
|
||||
};
|
||||
self.future = Some((self.f)(item).into_future());
|
||||
}
|
||||
assert!(self.future.is_some());
|
||||
match self.future.as_mut().unwrap().poll() {
|
||||
Ok(Async::Ready(e)) => {
|
||||
self.future = None;
|
||||
Ok(Async::Ready(Some(e)))
|
||||
}
|
||||
Err(e) => {
|
||||
self.future = None;
|
||||
Err(e)
|
||||
}
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
use core::mem;
|
||||
|
||||
use {Future, IntoFuture, Async, Poll};
|
||||
use stream::Stream;
|
||||
|
||||
/// Creates a `Stream` from a seed and a closure returning a `Future`.
|
||||
///
|
||||
/// This function is the dual for the `Stream::fold()` adapter: while
|
||||
/// `Stream:fold()` reduces a `Stream` to one single value, `unfold()` creates a
|
||||
/// `Stream` from a seed value.
|
||||
///
|
||||
/// `unfold()` will call the provided closure with the provided seed, then wait
|
||||
/// for the returned `Future` to complete with `(a, b)`. It will then yield the
|
||||
/// value `a`, and use `b` as the next internal state.
|
||||
///
|
||||
/// If the closure returns `None` instead of `Some(Future)`, then the `unfold()`
|
||||
/// will stop producing items and return `Ok(Async::Ready(None))` in future
|
||||
/// calls to `poll()`.
|
||||
///
|
||||
/// In case of error generated by the returned `Future`, the error will be
|
||||
/// returned by the `Stream`. The `Stream` will then yield
|
||||
/// `Ok(Async::Ready(None))` in future calls to `poll()`.
|
||||
///
|
||||
/// This function can typically be used when wanting to go from the "world of
|
||||
/// futures" to the "world of streams": the provided closure can build a
|
||||
/// `Future` using other library functions working on futures, and `unfold()`
|
||||
/// will turn it into a `Stream` by repeating the operation.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use futures::stream::{self, Stream};
|
||||
/// use futures::future::{self, Future};
|
||||
///
|
||||
/// let mut stream = stream::unfold(0, |state| {
|
||||
/// if state <= 2 {
|
||||
/// let next_state = state + 1;
|
||||
/// let yielded = state * 2;
|
||||
/// let fut = future::ok::<_, u32>((yielded, next_state));
|
||||
/// Some(fut)
|
||||
/// } else {
|
||||
/// None
|
||||
/// }
|
||||
/// });
|
||||
///
|
||||
/// let result = stream.collect().wait();
|
||||
/// assert_eq!(result, Ok(vec![0, 2, 4]));
|
||||
/// ```
|
||||
pub fn unfold<T, F, Fut, It>(init: T, f: F) -> Unfold<T, F, Fut>
|
||||
where F: FnMut(T) -> Option<Fut>,
|
||||
Fut: IntoFuture<Item = (It, T)>,
|
||||
{
|
||||
Unfold {
|
||||
f: f,
|
||||
state: State::Ready(init),
|
||||
}
|
||||
}
|
||||
|
||||
/// A stream which creates futures, polls them and return their result
|
||||
///
|
||||
/// This stream is returned by the `futures::stream::unfold` method
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Unfold<T, F, Fut> where Fut: IntoFuture {
|
||||
f: F,
|
||||
state: State<T, Fut::Future>,
|
||||
}
|
||||
|
||||
impl <T, F, Fut, It> Stream for Unfold<T, F, Fut>
|
||||
where F: FnMut(T) -> Option<Fut>,
|
||||
Fut: IntoFuture<Item = (It, T)>,
|
||||
{
|
||||
type Item = It;
|
||||
type Error = Fut::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<It>, Fut::Error> {
|
||||
loop {
|
||||
match mem::replace(&mut self.state, State::Empty) {
|
||||
// State::Empty may happen if the future returned an error
|
||||
State::Empty => { return Ok(Async::Ready(None)); }
|
||||
State::Ready(state) => {
|
||||
match (self.f)(state) {
|
||||
Some(fut) => { self.state = State::Processing(fut.into_future()); }
|
||||
None => { return Ok(Async::Ready(None)); }
|
||||
}
|
||||
}
|
||||
State::Processing(mut fut) => {
|
||||
match try!(fut.poll()) {
|
||||
Async:: Ready((item, next_state)) => {
|
||||
self.state = State::Ready(next_state);
|
||||
return Ok(Async::Ready(Some(item)));
|
||||
}
|
||||
Async::NotReady => {
|
||||
self.state = State::Processing(fut);
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum State<T, F> where F: Future {
|
||||
/// Placeholder state when doing work, or when the returned Future generated an error
|
||||
Empty,
|
||||
|
||||
/// Ready to generate new future; current internal state is the `T`
|
||||
Ready(T),
|
||||
|
||||
/// Working on a future generated previously
|
||||
Processing(F),
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
use stream::Stream;
|
||||
use executor;
|
||||
|
||||
/// A stream combinator which converts an asynchronous stream to a **blocking
|
||||
/// iterator**.
|
||||
///
|
||||
/// Created by the `Stream::wait` method, this function transforms any stream
|
||||
/// into a standard iterator. This is implemented by blocking the current thread
|
||||
/// while items on the underlying stream aren't ready yet.
|
||||
#[must_use = "iterators do nothing unless advanced"]
|
||||
#[derive(Debug)]
|
||||
pub struct Wait<S> {
|
||||
stream: executor::Spawn<S>,
|
||||
}
|
||||
|
||||
pub fn new<S: Stream>(s: S) -> Wait<S> {
|
||||
Wait {
|
||||
stream: executor::spawn(s),
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream> Iterator for Wait<S> {
|
||||
type Item = Result<S::Item, S::Error>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.stream.wait_stream()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,61 @@
|
|||
use {Async, Poll};
|
||||
use stream::{Stream, Fuse};
|
||||
|
||||
/// An adapter for merging the output of two streams.
|
||||
///
|
||||
/// The merged stream produces items from one or both of the underlying
|
||||
/// streams as they become available. Errors, however, are not merged: you
|
||||
#[derive(Debug)]
|
||||
/// get at most one error at a time.
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Zip<S1: Stream, S2: Stream> {
|
||||
stream1: Fuse<S1>,
|
||||
stream2: Fuse<S2>,
|
||||
queued1: Option<S1::Item>,
|
||||
queued2: Option<S2::Item>,
|
||||
}
|
||||
|
||||
pub fn new<S1, S2>(stream1: S1, stream2: S2) -> Zip<S1, S2>
|
||||
where S1: Stream, S2: Stream<Error = S1::Error>
|
||||
{
|
||||
Zip {
|
||||
stream1: stream1.fuse(),
|
||||
stream2: stream2.fuse(),
|
||||
queued1: None,
|
||||
queued2: None,
|
||||
}
|
||||
}
|
||||
|
||||
impl<S1, S2> Stream for Zip<S1, S2>
|
||||
where S1: Stream, S2: Stream<Error = S1::Error>
|
||||
{
|
||||
type Item = (S1::Item, S2::Item);
|
||||
type Error = S1::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
if self.queued1.is_none() {
|
||||
match try!(self.stream1.poll()) {
|
||||
Async::NotReady => {}
|
||||
Async::Ready(Some(item1)) => self.queued1 = Some(item1),
|
||||
Async::Ready(None) => {}
|
||||
}
|
||||
}
|
||||
if self.queued2.is_none() {
|
||||
match try!(self.stream2.poll()) {
|
||||
Async::NotReady => {}
|
||||
Async::Ready(Some(item2)) => self.queued2 = Some(item2),
|
||||
Async::Ready(None) => {}
|
||||
}
|
||||
}
|
||||
|
||||
if self.queued1.is_some() && self.queued2.is_some() {
|
||||
let pair = (self.queued1.take().unwrap(),
|
||||
self.queued2.take().unwrap());
|
||||
Ok(Async::Ready(Some(pair)))
|
||||
} else if self.stream1.is_done() || self.stream2.is_done() {
|
||||
Ok(Async::Ready(None))
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,248 @@
|
|||
use std::boxed::Box;
|
||||
use std::cell::UnsafeCell;
|
||||
use std::mem;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
|
||||
use {Async, Future, Poll};
|
||||
use task::{self, Task};
|
||||
|
||||
/// A type of futures-powered synchronization primitive which is a mutex between
|
||||
/// two possible owners.
|
||||
///
|
||||
/// This primitive is not as generic as a full-blown mutex but is sufficient for
|
||||
/// many use cases where there are only two possible owners of a resource. The
|
||||
/// implementation of `BiLock` can be more optimized for just the two possible
|
||||
/// owners.
|
||||
///
|
||||
/// Note that it's possible to use this lock through a poll-style interface with
|
||||
/// the `poll_lock` method but you can also use it as a future with the `lock`
|
||||
/// method that consumes a `BiLock` and returns a future that will resolve when
|
||||
/// it's locked.
|
||||
///
|
||||
/// A `BiLock` is typically used for "split" operations where data which serves
|
||||
/// two purposes wants to be split into two to be worked with separately. For
|
||||
/// example a TCP stream could be both a reader and a writer or a framing layer
|
||||
/// could be both a stream and a sink for messages. A `BiLock` enables splitting
|
||||
/// these two and then using each independently in a futures-powered fashion.
|
||||
#[derive(Debug)]
|
||||
pub struct BiLock<T> {
|
||||
inner: Arc<Inner<T>>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Inner<T> {
|
||||
state: AtomicUsize,
|
||||
inner: UnsafeCell<T>,
|
||||
}
|
||||
|
||||
unsafe impl<T: Send> Send for Inner<T> {}
|
||||
unsafe impl<T: Send> Sync for Inner<T> {}
|
||||
|
||||
impl<T> BiLock<T> {
|
||||
/// Creates a new `BiLock` protecting the provided data.
|
||||
///
|
||||
/// Two handles to the lock are returned, and these are the only two handles
|
||||
/// that will ever be available to the lock. These can then be sent to separate
|
||||
/// tasks to be managed there.
|
||||
pub fn new(t: T) -> (BiLock<T>, BiLock<T>) {
|
||||
let inner = Arc::new(Inner {
|
||||
state: AtomicUsize::new(0),
|
||||
inner: UnsafeCell::new(t),
|
||||
});
|
||||
|
||||
(BiLock { inner: inner.clone() }, BiLock { inner: inner })
|
||||
}
|
||||
|
||||
/// Attempt to acquire this lock, returning `NotReady` if it can't be
|
||||
/// acquired.
|
||||
///
|
||||
/// This function will acquire the lock in a nonblocking fashion, returning
|
||||
/// immediately if the lock is already held. If the lock is successfully
|
||||
/// acquired then `Async::Ready` is returned with a value that represents
|
||||
/// the locked value (and can be used to access the protected data). The
|
||||
/// lock is unlocked when the returned `BiLockGuard` is dropped.
|
||||
///
|
||||
/// If the lock is already held then this function will return
|
||||
/// `Async::NotReady`. In this case the current task will also be scheduled
|
||||
/// to receive a notification when the lock would otherwise become
|
||||
/// available.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if called outside the context of a future's
|
||||
/// task.
|
||||
pub fn poll_lock(&self) -> Async<BiLockGuard<T>> {
|
||||
loop {
|
||||
match self.inner.state.swap(1, SeqCst) {
|
||||
// Woohoo, we grabbed the lock!
|
||||
0 => return Async::Ready(BiLockGuard { inner: self }),
|
||||
|
||||
// Oops, someone else has locked the lock
|
||||
1 => {}
|
||||
|
||||
// A task was previously blocked on this lock, likely our task,
|
||||
// so we need to update that task.
|
||||
n => unsafe {
|
||||
drop(Box::from_raw(n as *mut Task));
|
||||
}
|
||||
}
|
||||
|
||||
let me = Box::new(task::park());
|
||||
let me = Box::into_raw(me) as usize;
|
||||
|
||||
match self.inner.state.compare_exchange(1, me, SeqCst, SeqCst) {
|
||||
// The lock is still locked, but we've now parked ourselves, so
|
||||
// just report that we're scheduled to receive a notification.
|
||||
Ok(_) => return Async::NotReady,
|
||||
|
||||
// Oops, looks like the lock was unlocked after our swap above
|
||||
// and before the compare_exchange. Deallocate what we just
|
||||
// allocated and go through the loop again.
|
||||
Err(0) => unsafe {
|
||||
drop(Box::from_raw(me as *mut Task));
|
||||
},
|
||||
|
||||
// The top of this loop set the previous state to 1, so if we
|
||||
// failed the CAS above then it's because the previous value was
|
||||
// *not* zero or one. This indicates that a task was blocked,
|
||||
// but we're trying to acquire the lock and there's only one
|
||||
// other reference of the lock, so it should be impossible for
|
||||
// that task to ever block itself.
|
||||
Err(n) => panic!("invalid state: {}", n),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform a "blocking lock" of this lock, consuming this lock handle and
|
||||
/// returning a future to the acquired lock.
|
||||
///
|
||||
/// This function consumes the `BiLock<T>` and returns a sentinel future,
|
||||
/// `BiLockAcquire<T>`. The returned future will resolve to
|
||||
/// `BiLockAcquired<T>` which represents a locked lock similarly to
|
||||
/// `BiLockGuard<T>`.
|
||||
///
|
||||
/// Note that the returned future will never resolve to an error.
|
||||
pub fn lock(self) -> BiLockAcquire<T> {
|
||||
BiLockAcquire {
|
||||
inner: self,
|
||||
}
|
||||
}
|
||||
|
||||
fn unlock(&self) {
|
||||
match self.inner.state.swap(0, SeqCst) {
|
||||
// we've locked the lock, shouldn't be possible for us to see an
|
||||
// unlocked lock.
|
||||
0 => panic!("invalid unlocked state"),
|
||||
|
||||
// Ok, no one else tried to get the lock, we're done.
|
||||
1 => {}
|
||||
|
||||
// Another task has parked themselves on this lock, let's wake them
|
||||
// up as its now their turn.
|
||||
n => unsafe {
|
||||
Box::from_raw(n as *mut Task).unpark();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Inner<T> {
|
||||
fn drop(&mut self) {
|
||||
assert_eq!(self.state.load(SeqCst), 0);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returned RAII guard from the `poll_lock` method.
|
||||
///
|
||||
/// This structure acts as a sentinel to the data in the `BiLock<T>` itself,
|
||||
/// implementing `Deref` and `DerefMut` to `T`. When dropped, the lock will be
|
||||
/// unlocked.
|
||||
#[derive(Debug)]
|
||||
pub struct BiLockGuard<'a, T: 'a> {
|
||||
inner: &'a BiLock<T>,
|
||||
}
|
||||
|
||||
impl<'a, T> Deref for BiLockGuard<'a, T> {
|
||||
type Target = T;
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { &*self.inner.inner.inner.get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> DerefMut for BiLockGuard<'a, T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { &mut *self.inner.inner.inner.get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Drop for BiLockGuard<'a, T> {
|
||||
fn drop(&mut self) {
|
||||
self.inner.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
/// Future returned by `BiLock::lock` which will resolve when the lock is
|
||||
/// acquired.
|
||||
#[derive(Debug)]
|
||||
pub struct BiLockAcquire<T> {
|
||||
inner: BiLock<T>,
|
||||
}
|
||||
|
||||
impl<T> Future for BiLockAcquire<T> {
|
||||
type Item = BiLockAcquired<T>;
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<BiLockAcquired<T>, ()> {
|
||||
match self.inner.poll_lock() {
|
||||
Async::Ready(r) => {
|
||||
mem::forget(r);
|
||||
Ok(BiLockAcquired {
|
||||
inner: BiLock { inner: self.inner.inner.clone() },
|
||||
}.into())
|
||||
}
|
||||
Async::NotReady => Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Resolved value of the `BiLockAcquire<T>` future.
|
||||
///
|
||||
/// This value, like `BiLockGuard<T>`, is a sentinel to the value `T` through
|
||||
/// implementations of `Deref` and `DerefMut`. When dropped will unlock the
|
||||
/// lock, and the original unlocked `BiLock<T>` can be recovered through the
|
||||
/// `unlock` method.
|
||||
#[derive(Debug)]
|
||||
pub struct BiLockAcquired<T> {
|
||||
inner: BiLock<T>,
|
||||
}
|
||||
|
||||
impl<T> BiLockAcquired<T> {
|
||||
/// Recovers the original `BiLock<T>`, unlocking this lock.
|
||||
pub fn unlock(self) -> BiLock<T> {
|
||||
// note that unlocked is implemented in `Drop`, so we don't do anything
|
||||
// here other than creating a new handle to return.
|
||||
BiLock { inner: self.inner.inner.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for BiLockAcquired<T> {
|
||||
type Target = T;
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { &*self.inner.inner.inner.get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for BiLockAcquired<T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { &mut *self.inner.inner.inner.get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for BiLockAcquired<T> {
|
||||
fn drop(&mut self) {
|
||||
self.inner.unlock();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
//! Future-aware synchronization
|
||||
//!
|
||||
//! This module, which is modeled after `std::sync`, contains user-space
|
||||
//! synchronization tools that work with futures, streams and sinks. In
|
||||
//! particular, these synchronizers do *not* block physical OS threads, but
|
||||
//! instead work at the task level.
|
||||
//!
|
||||
//! More information and examples of how to use these synchronization primitives
|
||||
//! can be found [online at tokio.rs].
|
||||
//!
|
||||
//! [online at tokio.rs]: https://tokio.rs/docs/going-deeper/synchronization/
|
||||
|
||||
pub mod oneshot;
|
||||
pub mod mpsc;
|
||||
mod bilock;
|
||||
|
||||
pub use self::bilock::{BiLock, BiLockGuard, BiLockAcquire, BiLockAcquired};
|
|
@ -0,0 +1,872 @@
|
|||
//! A multi-producer, single-consumer, futures-aware, FIFO queue with back pressure.
|
||||
//!
|
||||
//! A channel can be used as a communication primitive between tasks running on
|
||||
//! `futures-rs` executors. Channel creation provides `Receiver` and `Sender`
|
||||
//! handles. `Receiver` implements `Stream` and allows a task to read values
|
||||
//! out of the channel. If there is no message to read from the channel, the
|
||||
//! current task will be notified when a new value is sent. `Sender` implements
|
||||
//! the `Sink` trait and allows a task to send messages into the channel. If
|
||||
//! the channel is at capacity, then send will be rejected and the task will be
|
||||
//! notified when additional capacity is available.
|
||||
//!
|
||||
//! # Disconnection
|
||||
//!
|
||||
//! When all `Sender` handles have been dropped, it is no longer possible to
|
||||
//! send values into the channel. This is considered the termination event of
|
||||
//! the stream. As such, `Sender::poll` will return `Ok(Ready(None))`.
|
||||
//!
|
||||
//! If the receiver handle is dropped, then messages can no longer be read out
|
||||
//! of the channel. In this case, a `send` will result in an error.
|
||||
//!
|
||||
//! # Clean Shutdown
|
||||
//!
|
||||
//! If the `Receiver` is simply dropped, then it is possible for there to be
|
||||
//! messages still in the channel that will not be processed. As such, it is
|
||||
//! usually desirable to perform a "clean" shutdown. To do this, the receiver
|
||||
//! will first call `close`, which will prevent any further messages to be sent
|
||||
//! into the channel. Then, the receiver consumes the channel to completion, at
|
||||
//! which point the receiver can be dropped.
|
||||
|
||||
// At the core, the channel uses an atomic FIFO queue for message passing. This
|
||||
// queue is used as the primary coordination primitive. In order to enforce
|
||||
// capacity limits and handle back pressure, a secondary FIFO queue is used to
|
||||
// send parked task handles.
|
||||
//
|
||||
// The general idea is that the channel is created with a `buffer` size of `n`.
|
||||
// The channel capacity is `n + num-senders`. Each sender gets one "guaranteed"
|
||||
// slot to hold a message. This allows `Sender` to know for a fact that a send
|
||||
// will succeed *before* starting to do the actual work of sending the value.
|
||||
// Since most of this work is lock-free, once the work starts, it is impossible
|
||||
// to safely revert.
|
||||
//
|
||||
// If the sender is unable to process a send operation, then the the curren
|
||||
// task is parked and the handle is sent on the parked task queue.
|
||||
//
|
||||
// Note that the implementation guarantees that the channel capacity will never
|
||||
// exceed the configured limit, however there is no *strict* guarantee that the
|
||||
// receiver will wake up a parked task *immediately* when a slot becomes
|
||||
// available. However, it will almost always unpark a task when a slot becomes
|
||||
// available and it is *guaranteed* that a sender will be unparked when the
|
||||
// message that caused the sender to become parked is read out of the channel.
|
||||
//
|
||||
// The steps for sending a message are roughly:
|
||||
//
|
||||
// 1) Increment the channel message count
|
||||
// 2) If the channel is at capacity, push the task handle onto the wait queue
|
||||
// 3) Push the message onto the message queue.
|
||||
//
|
||||
// The steps for receiving a message are roughly:
|
||||
//
|
||||
// 1) Pop a message from the message queue
|
||||
// 2) Pop a task handle from the wait queue
|
||||
// 3) Decrement the channel message count.
|
||||
//
|
||||
// It's important for the order of operations on lock-free structures to happen
|
||||
// in reverse order between the sender and receiver. This makes the message
|
||||
// queue the primary coordination structure and establishes the necessary
|
||||
// happens-before semantics required for the acquire / release semantics used
|
||||
// by the queue structure.
|
||||
|
||||
use std::fmt;
|
||||
use std::error::Error;
|
||||
use std::any::Any;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
use std::usize;
|
||||
|
||||
use sync::mpsc::queue::{Queue, PopResult};
|
||||
use task::{self, Task};
|
||||
use {Async, AsyncSink, Poll, StartSend, Sink, Stream};
|
||||
|
||||
mod queue;
|
||||
|
||||
/// The transmission end of a channel which is used to send values.
|
||||
///
|
||||
/// This is created by the `channel` method.
|
||||
#[derive(Debug)]
|
||||
pub struct Sender<T> {
|
||||
// Channel state shared between the sender and receiver.
|
||||
inner: Arc<Inner<T>>,
|
||||
|
||||
// Handle to the task that is blocked on this sender. This handle is sent
|
||||
// to the receiver half in order to be notified when the sender becomes
|
||||
// unblocked.
|
||||
sender_task: SenderTask,
|
||||
|
||||
// True if the sender might be blocked. This is an optimization to avoid
|
||||
// having to lock the mutex most of the time.
|
||||
maybe_parked: bool,
|
||||
}
|
||||
|
||||
/// The transmission end of a channel which is used to send values.
|
||||
///
|
||||
/// This is created by the `unbounded` method.
|
||||
#[derive(Debug)]
|
||||
pub struct UnboundedSender<T>(Sender<T>);
|
||||
|
||||
fn _assert_kinds() {
|
||||
fn _assert_send<T: Send>() {}
|
||||
fn _assert_sync<T: Sync>() {}
|
||||
fn _assert_clone<T: Clone>() {}
|
||||
_assert_send::<UnboundedSender<u32>>();
|
||||
_assert_sync::<UnboundedSender<u32>>();
|
||||
_assert_clone::<UnboundedSender<u32>>();
|
||||
}
|
||||
|
||||
|
||||
/// The receiving end of a channel which implements the `Stream` trait.
|
||||
///
|
||||
/// This is a concrete implementation of a stream which can be used to represent
|
||||
/// a stream of values being computed elsewhere. This is created by the
|
||||
/// `channel` method.
|
||||
#[derive(Debug)]
|
||||
pub struct Receiver<T> {
|
||||
inner: Arc<Inner<T>>,
|
||||
}
|
||||
|
||||
/// The receiving end of a channel which implements the `Stream` trait.
|
||||
///
|
||||
/// This is a concrete implementation of a stream which can be used to represent
|
||||
/// a stream of values being computed elsewhere. This is created by the
|
||||
/// `unbounded` method.
|
||||
#[derive(Debug)]
|
||||
pub struct UnboundedReceiver<T>(Receiver<T>);
|
||||
|
||||
/// Error type for sending, used when the receiving end of a channel is
|
||||
/// dropped
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub struct SendError<T>(T);
|
||||
|
||||
impl<T> fmt::Debug for SendError<T> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_tuple("SendError")
|
||||
.field(&"...")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> fmt::Display for SendError<T> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(fmt, "send failed because receiver is gone")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Any> Error for SendError<T>
|
||||
{
|
||||
fn description(&self) -> &str {
|
||||
"send failed because receiver is gone"
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> SendError<T> {
|
||||
/// Returns the message that was attempted to be sent but failed.
|
||||
pub fn into_inner(self) -> T {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Inner<T> {
|
||||
// Max buffer size of the channel. If `None` then the channel is unbounded.
|
||||
buffer: Option<usize>,
|
||||
|
||||
// Internal channel state. Consists of the number of messages stored in the
|
||||
// channel as well as a flag signalling that the channel is closed.
|
||||
state: AtomicUsize,
|
||||
|
||||
// Atomic, FIFO queue used to send messages to the receiver
|
||||
message_queue: Queue<Option<T>>,
|
||||
|
||||
// Atomic, FIFO queue used to send parked task handles to the receiver.
|
||||
parked_queue: Queue<SenderTask>,
|
||||
|
||||
// Number of senders in existence
|
||||
num_senders: AtomicUsize,
|
||||
|
||||
// Handle to the receiver's task.
|
||||
recv_task: Mutex<ReceiverTask>,
|
||||
}
|
||||
|
||||
// Struct representation of `Inner::state`.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
struct State {
|
||||
// `true` when the channel is open
|
||||
is_open: bool,
|
||||
|
||||
// Number of messages in the channel
|
||||
num_messages: usize,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ReceiverTask {
|
||||
unparked: bool,
|
||||
task: Option<Task>,
|
||||
}
|
||||
|
||||
// Returned from Receiver::try_park()
|
||||
enum TryPark {
|
||||
Parked,
|
||||
Closed,
|
||||
NotEmpty,
|
||||
}
|
||||
|
||||
// The `is_open` flag is stored in the left-most bit of `Inner::state`
|
||||
const OPEN_MASK: usize = 1 << 31;
|
||||
|
||||
// When a new channel is created, it is created in the open state with no
|
||||
// pending messages.
|
||||
const INIT_STATE: usize = OPEN_MASK;
|
||||
|
||||
// The maximum number of messages that a channel can track is `usize::MAX > 1`
|
||||
const MAX_CAPACITY: usize = !(OPEN_MASK);
|
||||
|
||||
// The maximum requested buffer size must be less than the maximum capacity of
|
||||
// a channel. This is because each sender gets a guaranteed slot.
|
||||
const MAX_BUFFER: usize = MAX_CAPACITY >> 1;
|
||||
|
||||
// Sent to the consumer to wake up blocked producers
|
||||
type SenderTask = Arc<Mutex<Option<Task>>>;
|
||||
|
||||
/// Creates an in-memory channel implementation of the `Stream` trait with
|
||||
/// bounded capacity.
|
||||
///
|
||||
/// This method creates a concrete implementation of the `Stream` trait which
|
||||
/// can be used to send values across threads in a streaming fashion. This
|
||||
/// channel is unique in that it implements back pressure to ensure that the
|
||||
/// sender never outpaces the receiver. The channel capacity is equal to
|
||||
/// `buffer + num-senders`. In other words, each sender gets a guaranteed slot
|
||||
/// in the channel capacity, and on top of that there are `buffer` "first come,
|
||||
/// first serve" slots available to all senders.
|
||||
///
|
||||
/// The `Receiver` returned implements the `Stream` trait and has access to any
|
||||
/// number of the associated combinators for transforming the result.
|
||||
pub fn channel<T>(buffer: usize) -> (Sender<T>, Receiver<T>) {
|
||||
// Check that the requested buffer size does not exceed the maximum buffer
|
||||
// size permitted by the system.
|
||||
assert!(buffer < MAX_BUFFER, "requested buffer size too large");
|
||||
channel2(Some(buffer))
|
||||
}
|
||||
|
||||
/// Creates an in-memory channel implementation of the `Stream` trait with
|
||||
/// unbounded capacity.
|
||||
///
|
||||
/// This method creates a concrete implementation of the `Stream` trait which
|
||||
/// can be used to send values across threads in a streaming fashion. A `send`
|
||||
/// on this channel will always succeed as long as the receive half has not
|
||||
/// been closed. If the receiver falls behind, messages will be buffered
|
||||
/// internally.
|
||||
///
|
||||
/// **Note** that the amount of available system memory is an implicit bound to
|
||||
/// the channel. Using an `unbounded` channel has the ability of causing the
|
||||
/// process to run out of memory. In this case, the process will be aborted.
|
||||
pub fn unbounded<T>() -> (UnboundedSender<T>, UnboundedReceiver<T>) {
|
||||
let (tx, rx) = channel2(None);
|
||||
(UnboundedSender(tx), UnboundedReceiver(rx))
|
||||
}
|
||||
|
||||
fn channel2<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) {
|
||||
let inner = Arc::new(Inner {
|
||||
buffer: buffer,
|
||||
state: AtomicUsize::new(INIT_STATE),
|
||||
message_queue: Queue::new(),
|
||||
parked_queue: Queue::new(),
|
||||
num_senders: AtomicUsize::new(1),
|
||||
recv_task: Mutex::new(ReceiverTask {
|
||||
unparked: false,
|
||||
task: None,
|
||||
}),
|
||||
});
|
||||
|
||||
let tx = Sender {
|
||||
inner: inner.clone(),
|
||||
sender_task: Arc::new(Mutex::new(None)),
|
||||
maybe_parked: false,
|
||||
};
|
||||
|
||||
let rx = Receiver {
|
||||
inner: inner,
|
||||
};
|
||||
|
||||
(tx, rx)
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* ===== impl Sender =====
|
||||
*
|
||||
*/
|
||||
|
||||
impl<T> Sender<T> {
|
||||
// Do the send without failing
|
||||
fn do_send(&mut self, msg: Option<T>, can_park: bool) -> Result<(), SendError<T>> {
|
||||
// First, increment the number of messages contained by the channel.
|
||||
// This operation will also atomically determine if the sender task
|
||||
// should be parked.
|
||||
//
|
||||
// None is returned in the case that the channel has been closed by the
|
||||
// receiver. This happens when `Receiver::close` is called or the
|
||||
// receiver is dropped.
|
||||
let park_self = match self.inc_num_messages(msg.is_none()) {
|
||||
Some(park_self) => park_self,
|
||||
None => {
|
||||
// The receiver has closed the channel. Only abort if actually
|
||||
// sending a message. It is important that the stream
|
||||
// termination (None) is always sent. This technically means
|
||||
// that it is possible for the queue to contain the following
|
||||
// number of messages:
|
||||
//
|
||||
// num-senders + buffer + 1
|
||||
//
|
||||
if let Some(msg) = msg {
|
||||
return Err(SendError(msg));
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// If the channel has reached capacity, then the sender task needs to
|
||||
// be parked. This will send the task handle on the parked task queue.
|
||||
//
|
||||
// However, when `do_send` is called while dropping the `Sender`,
|
||||
// `task::park()` can't be called safely. In this case, in order to
|
||||
// maintain internal consistency, a blank message is pushed onto the
|
||||
// parked task queue.
|
||||
if park_self {
|
||||
self.park(can_park);
|
||||
}
|
||||
|
||||
self.queue_push_and_signal(msg);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Do the send without parking current task.
|
||||
//
|
||||
// To be called from unbounded sender.
|
||||
fn do_send_nb(&self, msg: T) -> Result<(), SendError<T>> {
|
||||
match self.inc_num_messages(false) {
|
||||
Some(park_self) => assert!(!park_self),
|
||||
None => return Err(SendError(msg)),
|
||||
};
|
||||
|
||||
self.queue_push_and_signal(Some(msg));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Push message to the queue and signal to the receiver
|
||||
fn queue_push_and_signal(&self, msg: Option<T>) {
|
||||
// Push the message onto the message queue
|
||||
self.inner.message_queue.push(msg);
|
||||
|
||||
// Signal to the receiver that a message has been enqueued. If the
|
||||
// receiver is parked, this will unpark the task.
|
||||
self.signal();
|
||||
}
|
||||
|
||||
// Increment the number of queued messages. Returns if the sender should
|
||||
// block.
|
||||
fn inc_num_messages(&self, close: bool) -> Option<bool> {
|
||||
let mut curr = self.inner.state.load(SeqCst);
|
||||
|
||||
loop {
|
||||
let mut state = decode_state(curr);
|
||||
|
||||
// The receiver end closed the channel.
|
||||
if !state.is_open {
|
||||
return None;
|
||||
}
|
||||
|
||||
// This probably is never hit? Odds are the process will run out of
|
||||
// memory first. It may be worth to return something else in this
|
||||
// case?
|
||||
assert!(state.num_messages < MAX_CAPACITY, "buffer space exhausted; \
|
||||
sending this messages would overflow the state");
|
||||
|
||||
state.num_messages += 1;
|
||||
|
||||
// The channel is closed by all sender handles being dropped.
|
||||
if close {
|
||||
state.is_open = false;
|
||||
}
|
||||
|
||||
let next = encode_state(&state);
|
||||
match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) {
|
||||
Ok(_) => {
|
||||
// Block if the current number of pending messages has exceeded
|
||||
// the configured buffer size
|
||||
let park_self = match self.inner.buffer {
|
||||
Some(buffer) => state.num_messages > buffer,
|
||||
None => false,
|
||||
};
|
||||
|
||||
return Some(park_self)
|
||||
}
|
||||
Err(actual) => curr = actual,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Signal to the receiver task that a message has been enqueued
|
||||
fn signal(&self) {
|
||||
// TODO
|
||||
// This logic can probably be improved by guarding the lock with an
|
||||
// atomic.
|
||||
//
|
||||
// Do this step first so that the lock is dropped when
|
||||
// `unpark` is called
|
||||
let task = {
|
||||
let mut recv_task = self.inner.recv_task.lock().unwrap();
|
||||
|
||||
// If the receiver has already been unparked, then there is nothing
|
||||
// more to do
|
||||
if recv_task.unparked {
|
||||
return;
|
||||
}
|
||||
|
||||
// Setting this flag enables the receiving end to detect that
|
||||
// an unpark event happened in order to avoid unecessarily
|
||||
// parking.
|
||||
recv_task.unparked = true;
|
||||
recv_task.task.take()
|
||||
};
|
||||
|
||||
if let Some(task) = task {
|
||||
task.unpark();
|
||||
}
|
||||
}
|
||||
|
||||
fn park(&mut self, can_park: bool) {
|
||||
// TODO: clean up internal state if the task::park will fail
|
||||
|
||||
let task = if can_park {
|
||||
Some(task::park())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
*self.sender_task.lock().unwrap() = task;
|
||||
|
||||
// Send handle over queue
|
||||
let t = self.sender_task.clone();
|
||||
self.inner.parked_queue.push(t);
|
||||
|
||||
// Check to make sure we weren't closed after we sent our task on the
|
||||
// queue
|
||||
let state = decode_state(self.inner.state.load(SeqCst));
|
||||
self.maybe_parked = state.is_open;
|
||||
}
|
||||
|
||||
fn poll_unparked(&mut self) -> Async<()> {
|
||||
// First check the `maybe_parked` variable. This avoids acquiring the
|
||||
// lock in most cases
|
||||
if self.maybe_parked {
|
||||
// Get a lock on the task handle
|
||||
let mut task = self.sender_task.lock().unwrap();
|
||||
|
||||
if task.is_none() {
|
||||
self.maybe_parked = false;
|
||||
return Async::Ready(())
|
||||
}
|
||||
|
||||
// At this point, an unpark request is pending, so there will be an
|
||||
// unpark sometime in the future. We just need to make sure that
|
||||
// the correct task will be notified.
|
||||
//
|
||||
// Update the task in case the `Sender` has been moved to another
|
||||
// task
|
||||
*task = Some(task::park());
|
||||
|
||||
Async::NotReady
|
||||
} else {
|
||||
Async::Ready(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Sink for Sender<T> {
|
||||
type SinkItem = T;
|
||||
type SinkError = SendError<T>;
|
||||
|
||||
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
|
||||
// If the sender is currently blocked, reject the message before doing
|
||||
// any work.
|
||||
if !self.poll_unparked().is_ready() {
|
||||
return Ok(AsyncSink::NotReady(msg));
|
||||
}
|
||||
|
||||
// The channel has capacity to accept the message, so send it.
|
||||
try!(self.do_send(Some(msg), true));
|
||||
|
||||
Ok(AsyncSink::Ready)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), SendError<T>> {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> UnboundedSender<T> {
|
||||
/// Sends the provided message along this channel.
|
||||
///
|
||||
/// This is an unbounded sender, so this function differs from `Sink::send`
|
||||
/// by ensuring the return type reflects that the channel is always ready to
|
||||
/// receive messages.
|
||||
pub fn send(&self, msg: T) -> Result<(), SendError<T>> {
|
||||
self.0.do_send_nb(msg)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Sink for UnboundedSender<T> {
|
||||
type SinkItem = T;
|
||||
type SinkError = SendError<T>;
|
||||
|
||||
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
|
||||
self.0.start_send(msg)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
|
||||
self.0.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), SendError<T>> {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Sink for &'a UnboundedSender<T> {
|
||||
type SinkItem = T;
|
||||
type SinkError = SendError<T>;
|
||||
|
||||
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
|
||||
try!(self.0.do_send_nb(msg));
|
||||
Ok(AsyncSink::Ready)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), SendError<T>> {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), SendError<T>> {
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Clone for UnboundedSender<T> {
|
||||
fn clone(&self) -> UnboundedSender<T> {
|
||||
UnboundedSender(self.0.clone())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<T> Clone for Sender<T> {
|
||||
fn clone(&self) -> Sender<T> {
|
||||
// Since this atomic op isn't actually guarding any memory and we don't
|
||||
// care about any orderings besides the ordering on the single atomic
|
||||
// variable, a relaxed ordering is acceptable.
|
||||
let mut curr = self.inner.num_senders.load(SeqCst);
|
||||
|
||||
loop {
|
||||
// If the maximum number of senders has been reached, then fail
|
||||
if curr == self.inner.max_senders() {
|
||||
panic!("cannot clone `Sender` -- too many outstanding senders");
|
||||
}
|
||||
|
||||
debug_assert!(curr < self.inner.max_senders());
|
||||
|
||||
let next = curr + 1;
|
||||
let actual = self.inner.num_senders.compare_and_swap(curr, next, SeqCst);
|
||||
|
||||
// The ABA problem doesn't matter here. We only care that the
|
||||
// number of senders never exceeds the maximum.
|
||||
if actual == curr {
|
||||
return Sender {
|
||||
inner: self.inner.clone(),
|
||||
sender_task: Arc::new(Mutex::new(None)),
|
||||
maybe_parked: false,
|
||||
};
|
||||
}
|
||||
|
||||
curr = actual;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Sender<T> {
|
||||
fn drop(&mut self) {
|
||||
// Ordering between variables don't matter here
|
||||
let prev = self.inner.num_senders.fetch_sub(1, SeqCst);
|
||||
|
||||
if prev == 1 {
|
||||
let _ = self.do_send(None, false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* ===== impl Receiver =====
|
||||
*
|
||||
*/
|
||||
|
||||
impl<T> Receiver<T> {
|
||||
/// Closes the receiving half
|
||||
///
|
||||
/// This prevents any further messages from being sent on the channel while
|
||||
/// still enabling the receiver to drain messages that are buffered.
|
||||
pub fn close(&mut self) {
|
||||
let mut curr = self.inner.state.load(SeqCst);
|
||||
|
||||
loop {
|
||||
let mut state = decode_state(curr);
|
||||
|
||||
if !state.is_open {
|
||||
break
|
||||
}
|
||||
|
||||
state.is_open = false;
|
||||
|
||||
let next = encode_state(&state);
|
||||
match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) {
|
||||
Ok(_) => break,
|
||||
Err(actual) => curr = actual,
|
||||
}
|
||||
}
|
||||
|
||||
// Wake up any threads waiting as they'll see that we've closed the
|
||||
// channel and will continue on their merry way.
|
||||
loop {
|
||||
match unsafe { self.inner.parked_queue.pop() } {
|
||||
PopResult::Data(task) => {
|
||||
let task = task.lock().unwrap().take();
|
||||
if let Some(task) = task {
|
||||
task.unpark();
|
||||
}
|
||||
}
|
||||
PopResult::Empty => break,
|
||||
PopResult::Inconsistent => thread::yield_now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn next_message(&mut self) -> Async<Option<T>> {
|
||||
// Pop off a message
|
||||
loop {
|
||||
match unsafe { self.inner.message_queue.pop() } {
|
||||
PopResult::Data(msg) => {
|
||||
return Async::Ready(msg);
|
||||
}
|
||||
PopResult::Empty => {
|
||||
// The queue is empty, return NotReady
|
||||
return Async::NotReady;
|
||||
}
|
||||
PopResult::Inconsistent => {
|
||||
// Inconsistent means that there will be a message to pop
|
||||
// in a short time. This branch can only be reached if
|
||||
// values are being produced from another thread, so there
|
||||
// are a few ways that we can deal with this:
|
||||
//
|
||||
// 1) Spin
|
||||
// 2) thread::yield_now()
|
||||
// 3) task::park().unwrap() & return NotReady
|
||||
//
|
||||
// For now, thread::yield_now() is used, but it would
|
||||
// probably be better to spin a few times then yield.
|
||||
thread::yield_now();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Unpark a single task handle if there is one pending in the parked queue
|
||||
fn unpark_one(&mut self) {
|
||||
loop {
|
||||
match unsafe { self.inner.parked_queue.pop() } {
|
||||
PopResult::Data(task) => {
|
||||
// Do this step first so that the lock is dropped when
|
||||
// `unpark` is called
|
||||
let task = task.lock().unwrap().take();
|
||||
|
||||
if let Some(task) = task {
|
||||
task.unpark();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
PopResult::Empty => {
|
||||
// Queue empty, no task to wake up.
|
||||
return;
|
||||
}
|
||||
PopResult::Inconsistent => {
|
||||
// Same as above
|
||||
thread::yield_now();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Try to park the receiver task
|
||||
fn try_park(&self) -> TryPark {
|
||||
let curr = self.inner.state.load(SeqCst);
|
||||
let state = decode_state(curr);
|
||||
|
||||
// If the channel is closed, then there is no need to park.
|
||||
if !state.is_open && state.num_messages == 0 {
|
||||
return TryPark::Closed;
|
||||
}
|
||||
|
||||
// First, track the task in the `recv_task` slot
|
||||
let mut recv_task = self.inner.recv_task.lock().unwrap();
|
||||
|
||||
if recv_task.unparked {
|
||||
// Consume the `unpark` signal without actually parking
|
||||
recv_task.unparked = false;
|
||||
return TryPark::NotEmpty;
|
||||
}
|
||||
|
||||
recv_task.task = Some(task::park());
|
||||
TryPark::Parked
|
||||
}
|
||||
|
||||
fn dec_num_messages(&self) {
|
||||
let mut curr = self.inner.state.load(SeqCst);
|
||||
|
||||
loop {
|
||||
let mut state = decode_state(curr);
|
||||
|
||||
state.num_messages -= 1;
|
||||
|
||||
let next = encode_state(&state);
|
||||
match self.inner.state.compare_exchange(curr, next, SeqCst, SeqCst) {
|
||||
Ok(_) => break,
|
||||
Err(actual) => curr = actual,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Stream for Receiver<T> {
|
||||
type Item = T;
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<T>, ()> {
|
||||
loop {
|
||||
// Try to read a message off of the message queue.
|
||||
let msg = match self.next_message() {
|
||||
Async::Ready(msg) => msg,
|
||||
Async::NotReady => {
|
||||
// There are no messages to read, in this case, attempt to
|
||||
// park. The act of parking will verify that the channel is
|
||||
// still empty after the park operation has completed.
|
||||
match self.try_park() {
|
||||
TryPark::Parked => {
|
||||
// The task was parked, and the channel is still
|
||||
// empty, return NotReady.
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
TryPark::Closed => {
|
||||
// The channel is closed, there will be no further
|
||||
// messages.
|
||||
return Ok(Async::Ready(None));
|
||||
}
|
||||
TryPark::NotEmpty => {
|
||||
// A message has been sent while attempting to
|
||||
// park. Loop again, the next iteration is
|
||||
// guaranteed to get the message.
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// If there are any parked task handles in the parked queue, pop
|
||||
// one and unpark it.
|
||||
self.unpark_one();
|
||||
|
||||
// Decrement number of messages
|
||||
self.dec_num_messages();
|
||||
|
||||
// Return the message
|
||||
return Ok(Async::Ready(msg));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Receiver<T> {
|
||||
fn drop(&mut self) {
|
||||
// Drain the channel of all pending messages
|
||||
self.close();
|
||||
while self.next_message().is_ready() {
|
||||
// ...
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> UnboundedReceiver<T> {
|
||||
/// Closes the receiving half
|
||||
///
|
||||
/// This prevents any further messages from being sent on the channel while
|
||||
/// still enabling the receiver to drain messages that are buffered.
|
||||
pub fn close(&mut self) {
|
||||
self.0.close();
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Stream for UnboundedReceiver<T> {
|
||||
type Item = T;
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<T>, ()> {
|
||||
self.0.poll()
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* ===== impl Inner =====
|
||||
*
|
||||
*/
|
||||
|
||||
impl<T> Inner<T> {
|
||||
// The return value is such that the total number of messages that can be
|
||||
// enqueued into the channel will never exceed MAX_CAPACITY
|
||||
fn max_senders(&self) -> usize {
|
||||
match self.buffer {
|
||||
Some(buffer) => MAX_CAPACITY - buffer,
|
||||
None => MAX_BUFFER,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Send> Send for Inner<T> {}
|
||||
unsafe impl<T: Send> Sync for Inner<T> {}
|
||||
|
||||
/*
|
||||
*
|
||||
* ===== Helpers =====
|
||||
*
|
||||
*/
|
||||
|
||||
fn decode_state(num: usize) -> State {
|
||||
State {
|
||||
is_open: num & OPEN_MASK == OPEN_MASK,
|
||||
num_messages: num & MAX_CAPACITY,
|
||||
}
|
||||
}
|
||||
|
||||
fn encode_state(state: &State) -> usize {
|
||||
let mut num = state.num_messages;
|
||||
|
||||
if state.is_open {
|
||||
num |= OPEN_MASK;
|
||||
}
|
||||
|
||||
num
|
||||
}
|
|
@ -0,0 +1,151 @@
|
|||
/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved.
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright notice,
|
||||
* this list of conditions and the following disclaimer.
|
||||
*
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED
|
||||
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
|
||||
* SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
||||
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* The views and conclusions contained in the software and documentation are
|
||||
* those of the authors and should not be interpreted as representing official
|
||||
* policies, either expressed or implied, of Dmitry Vyukov.
|
||||
*/
|
||||
|
||||
//! A mostly lock-free multi-producer, single consumer queue.
|
||||
//!
|
||||
//! This module contains an implementation of a concurrent MPSC queue. This
|
||||
//! queue can be used to share data between threads, and is also used as the
|
||||
//! building block of channels in rust.
|
||||
//!
|
||||
//! Note that the current implementation of this queue has a caveat of the `pop`
|
||||
//! method, and see the method for more information about it. Due to this
|
||||
//! caveat, this queue may not be appropriate for all use-cases.
|
||||
|
||||
// http://www.1024cores.net/home/lock-free-algorithms
|
||||
// /queues/non-intrusive-mpsc-node-based-queue
|
||||
|
||||
// NOTE: this implementation is lifted from the standard library and only
|
||||
// slightly modified
|
||||
|
||||
pub use self::PopResult::*;
|
||||
use std::prelude::v1::*;
|
||||
|
||||
use std::cell::UnsafeCell;
|
||||
use std::ptr;
|
||||
use std::sync::atomic::{AtomicPtr, Ordering};
|
||||
|
||||
/// A result of the `pop` function.
|
||||
pub enum PopResult<T> {
|
||||
/// Some data has been popped
|
||||
Data(T),
|
||||
/// The queue is empty
|
||||
Empty,
|
||||
/// The queue is in an inconsistent state. Popping data should succeed, but
|
||||
/// some pushers have yet to make enough progress in order allow a pop to
|
||||
/// succeed. It is recommended that a pop() occur "in the near future" in
|
||||
/// order to see if the sender has made progress or not
|
||||
Inconsistent,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Node<T> {
|
||||
next: AtomicPtr<Node<T>>,
|
||||
value: Option<T>,
|
||||
}
|
||||
|
||||
/// The multi-producer single-consumer structure. This is not cloneable, but it
|
||||
/// may be safely shared so long as it is guaranteed that there is only one
|
||||
/// popper at a time (many pushers are allowed).
|
||||
#[derive(Debug)]
|
||||
pub struct Queue<T> {
|
||||
head: AtomicPtr<Node<T>>,
|
||||
tail: UnsafeCell<*mut Node<T>>,
|
||||
}
|
||||
|
||||
unsafe impl<T: Send> Send for Queue<T> { }
|
||||
unsafe impl<T: Send> Sync for Queue<T> { }
|
||||
|
||||
impl<T> Node<T> {
|
||||
unsafe fn new(v: Option<T>) -> *mut Node<T> {
|
||||
Box::into_raw(Box::new(Node {
|
||||
next: AtomicPtr::new(ptr::null_mut()),
|
||||
value: v,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Queue<T> {
|
||||
/// Creates a new queue that is safe to share among multiple producers and
|
||||
/// one consumer.
|
||||
pub fn new() -> Queue<T> {
|
||||
let stub = unsafe { Node::new(None) };
|
||||
Queue {
|
||||
head: AtomicPtr::new(stub),
|
||||
tail: UnsafeCell::new(stub),
|
||||
}
|
||||
}
|
||||
|
||||
/// Pushes a new value onto this queue.
|
||||
pub fn push(&self, t: T) {
|
||||
unsafe {
|
||||
let n = Node::new(Some(t));
|
||||
let prev = self.head.swap(n, Ordering::AcqRel);
|
||||
(*prev).next.store(n, Ordering::Release);
|
||||
}
|
||||
}
|
||||
|
||||
/// Pops some data from this queue.
|
||||
///
|
||||
/// Note that the current implementation means that this function cannot
|
||||
/// return `Option<T>`. It is possible for this queue to be in an
|
||||
/// inconsistent state where many pushes have succeeded and completely
|
||||
/// finished, but pops cannot return `Some(t)`. This inconsistent state
|
||||
/// happens when a pusher is pre-empted at an inopportune moment.
|
||||
///
|
||||
/// This inconsistent state means that this queue does indeed have data, but
|
||||
/// it does not currently have access to it at this time.
|
||||
///
|
||||
/// This function is unsafe because only one thread can call it at a time.
|
||||
pub unsafe fn pop(&self) -> PopResult<T> {
|
||||
let tail = *self.tail.get();
|
||||
let next = (*tail).next.load(Ordering::Acquire);
|
||||
|
||||
if !next.is_null() {
|
||||
*self.tail.get() = next;
|
||||
assert!((*tail).value.is_none());
|
||||
assert!((*next).value.is_some());
|
||||
let ret = (*next).value.take().unwrap();
|
||||
drop(Box::from_raw(tail));
|
||||
return Data(ret);
|
||||
}
|
||||
|
||||
if self.head.load(Ordering::Acquire) == tail {Empty} else {Inconsistent}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Queue<T> {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
let mut cur = *self.tail.get();
|
||||
while !cur.is_null() {
|
||||
let next = (*cur).next.load(Ordering::Relaxed);
|
||||
drop(Box::from_raw(cur));
|
||||
cur = next;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,341 @@
|
|||
//! A one-shot, futures-aware channel
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
|
||||
use {Future, Poll, Async};
|
||||
use lock::Lock;
|
||||
use task::{self, Task};
|
||||
|
||||
/// A future representing the completion of a computation happening elsewhere in
|
||||
/// memory.
|
||||
///
|
||||
/// This is created by the `oneshot::channel` function.
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
#[derive(Debug)]
|
||||
pub struct Receiver<T> {
|
||||
inner: Arc<Inner<T>>,
|
||||
}
|
||||
|
||||
/// Represents the completion half of a oneshot through which the result of a
|
||||
/// computation is signaled.
|
||||
///
|
||||
/// This is created by the `oneshot::channel` function.
|
||||
#[derive(Debug)]
|
||||
pub struct Sender<T> {
|
||||
inner: Arc<Inner<T>>,
|
||||
}
|
||||
|
||||
/// Internal state of the `Receiver`/`Sender` pair above. This is all used as
|
||||
/// the internal synchronization between the two for send/recv operations.
|
||||
#[derive(Debug)]
|
||||
struct Inner<T> {
|
||||
/// Indicates whether this oneshot is complete yet. This is filled in both
|
||||
/// by `Sender::drop` and by `Receiver::drop`, and both sides iterpret it
|
||||
/// appropriately.
|
||||
///
|
||||
/// For `Receiver`, if this is `true`, then it's guaranteed that `data` is
|
||||
/// unlocked and ready to be inspected.
|
||||
///
|
||||
/// For `Sender` if this is `true` then the oneshot has gone away and it
|
||||
/// can return ready from `poll_cancel`.
|
||||
complete: AtomicBool,
|
||||
|
||||
/// The actual data being transferred as part of this `Receiver`. This is
|
||||
/// filled in by `Sender::complete` and read by `Receiver::poll`.
|
||||
///
|
||||
/// Note that this is protected by `Lock`, but it is in theory safe to
|
||||
/// replace with an `UnsafeCell` as it's actually protected by `complete`
|
||||
/// above. I wouldn't recommend doing this, however, unless someone is
|
||||
/// supremely confident in the various atomic orderings here and there.
|
||||
data: Lock<Option<T>>,
|
||||
|
||||
/// Field to store the task which is blocked in `Receiver::poll`.
|
||||
///
|
||||
/// This is filled in when a oneshot is polled but not ready yet. Note that
|
||||
/// the `Lock` here, unlike in `data` above, is important to resolve races.
|
||||
/// Both the `Receiver` and the `Sender` halves understand that if they
|
||||
/// can't acquire the lock then some important interference is happening.
|
||||
rx_task: Lock<Option<Task>>,
|
||||
|
||||
/// Like `rx_task` above, except for the task blocked in
|
||||
/// `Sender::poll_cancel`. Additionally, `Lock` cannot be `UnsafeCell`.
|
||||
tx_task: Lock<Option<Task>>,
|
||||
}
|
||||
|
||||
/// Creates a new futures-aware, one-shot channel.
|
||||
///
|
||||
/// This function is similar to Rust's channels found in the standard library.
|
||||
/// Two halves are returned, the first of which is a `Sender` handle, used to
|
||||
/// signal the end of a computation and provide its value. The second half is a
|
||||
/// `Receiver` which implements the `Future` trait, resolving to the value that
|
||||
/// was given to the `Sender` handle.
|
||||
///
|
||||
/// Each half can be separately owned and sent across threads/tasks.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::thread;
|
||||
/// use futures::sync::oneshot;
|
||||
/// use futures::*;
|
||||
///
|
||||
/// let (c, p) = oneshot::channel::<i32>();
|
||||
///
|
||||
/// thread::spawn(|| {
|
||||
/// p.map(|i| {
|
||||
/// println!("got: {}", i);
|
||||
/// }).wait();
|
||||
/// });
|
||||
///
|
||||
/// c.send(3).unwrap();
|
||||
/// ```
|
||||
pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
|
||||
let inner = Arc::new(Inner {
|
||||
complete: AtomicBool::new(false),
|
||||
data: Lock::new(None),
|
||||
rx_task: Lock::new(None),
|
||||
tx_task: Lock::new(None),
|
||||
});
|
||||
let receiver = Receiver {
|
||||
inner: inner.clone(),
|
||||
};
|
||||
let sender = Sender {
|
||||
inner: inner,
|
||||
};
|
||||
(sender, receiver)
|
||||
}
|
||||
|
||||
impl<T> Sender<T> {
|
||||
#[deprecated(note = "renamed to `send`", since = "0.1.11")]
|
||||
#[doc(hidden)]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub fn complete(self, t: T) {
|
||||
drop(self.send(t));
|
||||
}
|
||||
|
||||
/// Completes this oneshot with a successful result.
|
||||
///
|
||||
/// This function will consume `self` and indicate to the other end, the
|
||||
/// `Receiver`, that the error provided is the result of the computation this
|
||||
/// represents.
|
||||
///
|
||||
/// If the value is successfully enqueued for the remote end to receive,
|
||||
/// then `Ok(())` is returned. If the receiving end was deallocated before
|
||||
/// this function was called, however, then `Err` is returned with the value
|
||||
/// provided.
|
||||
pub fn send(self, t: T) -> Result<(), T> {
|
||||
if self.inner.complete.load(SeqCst) {
|
||||
return Err(t)
|
||||
}
|
||||
|
||||
// Note that this lock acquisition should always succeed as it can only
|
||||
// interfere with `poll` in `Receiver` which is only called when the
|
||||
// `complete` flag is true, which we're setting here.
|
||||
let mut slot = self.inner.data.try_lock().unwrap();
|
||||
assert!(slot.is_none());
|
||||
*slot = Some(t);
|
||||
drop(slot);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Polls this `Sender` half to detect whether the `Receiver` this has
|
||||
/// paired with has gone away.
|
||||
///
|
||||
/// This function can be used to learn about when the `Receiver` (consumer)
|
||||
/// half has gone away and nothing will be able to receive a message sent
|
||||
/// from `complete`.
|
||||
///
|
||||
/// Like `Future::poll`, this function will panic if it's not called from
|
||||
/// within the context of a task. In otherwords, this should only ever be
|
||||
/// called from inside another future.
|
||||
///
|
||||
/// If `Ready` is returned then it means that the `Receiver` has disappeared
|
||||
/// and the result this `Sender` would otherwise produce should no longer
|
||||
/// be produced.
|
||||
///
|
||||
/// If `NotReady` is returned then the `Receiver` is still alive and may be
|
||||
/// able to receive a message if sent. The current task, however, is
|
||||
/// scheduled to receive a notification if the corresponding `Receiver` goes
|
||||
/// away.
|
||||
pub fn poll_cancel(&mut self) -> Poll<(), ()> {
|
||||
// Fast path up first, just read the flag and see if our other half is
|
||||
// gone. This flag is set both in our destructor and the oneshot
|
||||
// destructor, but our destructor hasn't run yet so if it's set then the
|
||||
// oneshot is gone.
|
||||
if self.inner.complete.load(SeqCst) {
|
||||
return Ok(Async::Ready(()))
|
||||
}
|
||||
|
||||
// If our other half is not gone then we need to park our current task
|
||||
// and move it into the `notify_cancel` slot to get notified when it's
|
||||
// actually gone.
|
||||
//
|
||||
// If `try_lock` fails, then the `Receiver` is in the process of using
|
||||
// it, so we can deduce that it's now in the process of going away and
|
||||
// hence we're canceled. If it succeeds then we just store our handle.
|
||||
//
|
||||
// Crucially we then check `oneshot_gone` *again* before we return.
|
||||
// While we were storing our handle inside `notify_cancel` the `Receiver`
|
||||
// may have been dropped. The first thing it does is set the flag, and
|
||||
// if it fails to acquire the lock it assumes that we'll see the flag
|
||||
// later on. So... we then try to see the flag later on!
|
||||
let handle = task::park();
|
||||
match self.inner.tx_task.try_lock() {
|
||||
Some(mut p) => *p = Some(handle),
|
||||
None => return Ok(Async::Ready(())),
|
||||
}
|
||||
if self.inner.complete.load(SeqCst) {
|
||||
Ok(Async::Ready(()))
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Sender<T> {
|
||||
fn drop(&mut self) {
|
||||
// Flag that we're a completed `Sender` and try to wake up a receiver.
|
||||
// Whether or not we actually stored any data will get picked up and
|
||||
// translated to either an item or cancellation.
|
||||
//
|
||||
// Note that if we fail to acquire the `rx_task` lock then that means
|
||||
// we're in one of two situations:
|
||||
//
|
||||
// 1. The receiver is trying to block in `poll`
|
||||
// 2. The receiver is being dropped
|
||||
//
|
||||
// In the first case it'll check the `complete` flag after it's done
|
||||
// blocking to see if it succeeded. In the latter case we don't need to
|
||||
// wake up anyone anyway. So in both cases it's ok to ignore the `None`
|
||||
// case of `try_lock` and bail out.
|
||||
//
|
||||
// The first case crucially depends on `Lock` using `SeqCst` ordering
|
||||
// under the hood. If it instead used `Release` / `Acquire` ordering,
|
||||
// then it would not necessarily synchronize with `inner.complete`
|
||||
// and deadlock might be possible, as was observed in
|
||||
// https://github.com/alexcrichton/futures-rs/pull/219.
|
||||
self.inner.complete.store(true, SeqCst);
|
||||
if let Some(mut slot) = self.inner.rx_task.try_lock() {
|
||||
if let Some(task) = slot.take() {
|
||||
drop(slot);
|
||||
task.unpark();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error returned from a `Receiver<T>` whenever the correponding `Sender<T>`
|
||||
/// is dropped.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Canceled;
|
||||
|
||||
impl fmt::Display for Canceled {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(fmt, "oneshot canceled")
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for Canceled {
|
||||
fn description(&self) -> &str {
|
||||
"oneshot canceled"
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Receiver<T> {
|
||||
/// Gracefully close this receiver, preventing sending any future messages.
|
||||
///
|
||||
/// Any `send` operation which happens after this method returns is
|
||||
/// guaranteed to fail. Once this method is called the normal `poll` method
|
||||
/// can be used to determine whether a message was actually sent or not. If
|
||||
/// `Canceled` is returned from `poll` then no message was sent.
|
||||
pub fn close(&mut self) {
|
||||
// Flag our completion and then attempt to wake up the sender if it's
|
||||
// blocked. See comments in `drop` below for more info
|
||||
self.inner.complete.store(true, SeqCst);
|
||||
if let Some(mut handle) = self.inner.tx_task.try_lock() {
|
||||
if let Some(task) = handle.take() {
|
||||
drop(handle);
|
||||
task.unpark()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Future for Receiver<T> {
|
||||
type Item = T;
|
||||
type Error = Canceled;
|
||||
|
||||
fn poll(&mut self) -> Poll<T, Canceled> {
|
||||
let mut done = false;
|
||||
|
||||
// Check to see if some data has arrived. If it hasn't then we need to
|
||||
// block our task.
|
||||
//
|
||||
// Note that the acquisition of the `rx_task` lock might fail below, but
|
||||
// the only situation where this can happen is during `Sender::drop`
|
||||
// when we are indeed completed already. If that's happening then we
|
||||
// know we're completed so keep going.
|
||||
if self.inner.complete.load(SeqCst) {
|
||||
done = true;
|
||||
} else {
|
||||
let task = task::park();
|
||||
match self.inner.rx_task.try_lock() {
|
||||
Some(mut slot) => *slot = Some(task),
|
||||
None => done = true,
|
||||
}
|
||||
}
|
||||
|
||||
// If we're `done` via one of the paths above, then look at the data and
|
||||
// figure out what the answer is. If, however, we stored `rx_task`
|
||||
// successfully above we need to check again if we're completed in case
|
||||
// a message was sent while `rx_task` was locked and couldn't notify us
|
||||
// otherwise.
|
||||
//
|
||||
// If we're not done, and we're not complete, though, then we've
|
||||
// successfully blocked our task and we return `NotReady`.
|
||||
if done || self.inner.complete.load(SeqCst) {
|
||||
match self.inner.data.try_lock().unwrap().take() {
|
||||
Some(data) => Ok(data.into()),
|
||||
None => Err(Canceled),
|
||||
}
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Receiver<T> {
|
||||
fn drop(&mut self) {
|
||||
// Indicate to the `Sender` that we're done, so any future calls to
|
||||
// `poll_cancel` are weeded out.
|
||||
self.inner.complete.store(true, SeqCst);
|
||||
|
||||
// If we've blocked a task then there's no need for it to stick around,
|
||||
// so we need to drop it. If this lock acquisition fails, though, then
|
||||
// it's just because our `Sender` is trying to take the task, so we
|
||||
// let them take care of that.
|
||||
if let Some(mut slot) = self.inner.rx_task.try_lock() {
|
||||
let task = slot.take();
|
||||
drop(slot);
|
||||
drop(task);
|
||||
}
|
||||
|
||||
// Finally, if our `Sender` wants to get notified of us going away, it
|
||||
// would have stored something in `tx_task`. Here we try to peel that
|
||||
// out and unpark it.
|
||||
//
|
||||
// Note that the `try_lock` here may fail, but only if the `Sender` is
|
||||
// in the process of filling in the task. If that happens then we
|
||||
// already flagged `complete` and they'll pick that up above.
|
||||
if let Some(mut handle) = self.inner.tx_task.try_lock() {
|
||||
if let Some(task) = handle.take() {
|
||||
drop(handle);
|
||||
task.unpark()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
//! Tasks used to drive a future computation
|
||||
//!
|
||||
//! It's intended over time a particular operation (such as servicing an HTTP
|
||||
//! request) will involve many futures. This entire operation, however, can be
|
||||
//! thought of as one unit, as the entire result is essentially just moving
|
||||
//! through one large state machine.
|
||||
//!
|
||||
//! A "task" is the unit of abstraction for what is driving this state machine
|
||||
//! and tree of futures forward. A task is used to poll futures and schedule
|
||||
//! futures with, and has utilities for sharing data between tasks and handles
|
||||
//! for notifying when a future is ready. Each task also has its own set of
|
||||
//! task-local data generated by `task_local!`.
|
||||
//!
|
||||
//! Note that libraries typically should not manage tasks themselves, but rather
|
||||
//! leave that to event loops and other "executors" (see the `executor` module),
|
||||
//! or by using the `wait` method to create and execute a task directly on the
|
||||
//! current thread.
|
||||
//!
|
||||
//! More information about the task model can be found [online at tokio.rs].
|
||||
//!
|
||||
//! [online at tokio.rs]: https://tokio.rs/docs/going-deeper/futures-model/
|
||||
//!
|
||||
//! ## Functions
|
||||
//!
|
||||
//! There is an important bare function in this module: `park`. The `park`
|
||||
//! function is similar to the standard library's `thread::park` method where it
|
||||
//! returns a handle to wake up a task at a later date (via an `unpark` method).
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "import through the executor module instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use task_impl::{Spawn, spawn, Unpark, Executor, Run};
|
||||
|
||||
pub use task_impl::{Task, LocalKey, park, with_unpark_event, UnparkEvent, EventSet};
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "import through the executor module instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
#[allow(deprecated)]
|
||||
pub use task_impl::TaskRc;
|
|
@ -0,0 +1,129 @@
|
|||
use std::prelude::v1::*;
|
||||
|
||||
use std::any::TypeId;
|
||||
use std::cell::RefCell;
|
||||
use std::hash::{BuildHasherDefault, Hasher};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// A macro to create a `static` of type `LocalKey`
|
||||
///
|
||||
/// This macro is intentionally similar to the `thread_local!`, and creates a
|
||||
/// `static` which has a `with` method to access the data on a task.
|
||||
///
|
||||
/// The data associated with each task local is per-task, so different tasks
|
||||
/// will contain different values.
|
||||
#[macro_export]
|
||||
macro_rules! task_local {
|
||||
(static $NAME:ident: $t:ty = $e:expr) => (
|
||||
static $NAME: $crate::task::LocalKey<$t> = {
|
||||
fn __init() -> $t { $e }
|
||||
fn __key() -> ::std::any::TypeId {
|
||||
struct __A;
|
||||
::std::any::TypeId::of::<__A>()
|
||||
}
|
||||
$crate::task::LocalKey {
|
||||
__init: __init,
|
||||
__key: __key,
|
||||
}
|
||||
};
|
||||
)
|
||||
}
|
||||
|
||||
pub type LocalMap = RefCell<HashMap<TypeId,
|
||||
Box<Opaque>,
|
||||
BuildHasherDefault<IdHasher>>>;
|
||||
|
||||
pub fn local_map() -> LocalMap {
|
||||
RefCell::new(HashMap::default())
|
||||
}
|
||||
|
||||
pub trait Opaque: Send {}
|
||||
impl<T: Send> Opaque for T {}
|
||||
|
||||
/// A key for task-local data stored in a future's task.
|
||||
///
|
||||
/// This type is generated by the `task_local!` macro and performs very
|
||||
/// similarly to the `thread_local!` macro and `std::thread::LocalKey` types.
|
||||
/// Data associated with a `LocalKey<T>` is stored inside of a future's task,
|
||||
/// and the data is destroyed when the future is completed and the task is
|
||||
/// destroyed.
|
||||
///
|
||||
/// Task-local data can migrate between threads and hence requires a `Send`
|
||||
/// bound. Additionally, task-local data also requires the `'static` bound to
|
||||
/// ensure it lives long enough. When a key is accessed for the first time the
|
||||
/// task's data is initialized with the provided initialization expression to
|
||||
/// the macro.
|
||||
#[derive(Debug)]
|
||||
pub struct LocalKey<T> {
|
||||
// "private" fields which have to be public to get around macro hygiene, not
|
||||
// included in the stability story for this type. Can change at any time.
|
||||
#[doc(hidden)]
|
||||
pub __key: fn() -> TypeId,
|
||||
#[doc(hidden)]
|
||||
pub __init: fn() -> T,
|
||||
}
|
||||
|
||||
pub struct IdHasher {
|
||||
id: u64,
|
||||
}
|
||||
|
||||
impl Default for IdHasher {
|
||||
fn default() -> IdHasher {
|
||||
IdHasher { id: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
impl Hasher for IdHasher {
|
||||
fn write(&mut self, _bytes: &[u8]) {
|
||||
// TODO: need to do something sensible
|
||||
panic!("can only hash u64");
|
||||
}
|
||||
|
||||
fn write_u64(&mut self, u: u64) {
|
||||
self.id = u;
|
||||
}
|
||||
|
||||
fn finish(&self) -> u64 {
|
||||
self.id
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Send + 'static> LocalKey<T> {
|
||||
/// Access this task-local key, running the provided closure with a
|
||||
/// reference to the value.
|
||||
///
|
||||
/// This function will access this task-local key to retrieve the data
|
||||
/// associated with the current task and this key. If this is the first time
|
||||
/// this key has been accessed on this task, then the key will be
|
||||
/// initialized with the initialization expression provided at the time the
|
||||
/// `task_local!` macro was called.
|
||||
///
|
||||
/// The provided closure will be provided a shared reference to the
|
||||
/// underlying data associated with this task-local-key. The data itself is
|
||||
/// stored inside of the current task.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function can possibly panic for a number of reasons:
|
||||
///
|
||||
/// * If there is not a current task.
|
||||
/// * If the initialization expression is run and it panics
|
||||
/// * If the closure provided panics
|
||||
pub fn with<F, R>(&'static self, f: F) -> R
|
||||
where F: FnOnce(&T) -> R
|
||||
{
|
||||
let key = (self.__key)();
|
||||
super::with(|task| {
|
||||
let raw_pointer = {
|
||||
let mut data = task.map.borrow_mut();
|
||||
let entry = data.entry(key).or_insert_with(|| {
|
||||
Box::new((self.__init)())
|
||||
});
|
||||
&**entry as *const Opaque as *const T
|
||||
};
|
||||
unsafe {
|
||||
f(&*raw_pointer)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,604 @@
|
|||
use std::prelude::v1::*;
|
||||
|
||||
use std::cell::Cell;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{Ordering, AtomicBool, AtomicUsize, ATOMIC_USIZE_INIT};
|
||||
use std::thread;
|
||||
|
||||
use {Poll, Future, Async, Stream, Sink, StartSend, AsyncSink};
|
||||
use future::BoxFuture;
|
||||
|
||||
mod unpark_mutex;
|
||||
use self::unpark_mutex::UnparkMutex;
|
||||
|
||||
mod task_rc;
|
||||
mod data;
|
||||
#[allow(deprecated)]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use self::task_rc::TaskRc;
|
||||
pub use self::data::LocalKey;
|
||||
|
||||
struct BorrowedTask<'a> {
|
||||
id: usize,
|
||||
unpark: &'a Arc<Unpark>,
|
||||
map: &'a data::LocalMap,
|
||||
events: Events,
|
||||
}
|
||||
|
||||
thread_local!(static CURRENT_TASK: Cell<*const BorrowedTask<'static>> = {
|
||||
Cell::new(0 as *const _)
|
||||
});
|
||||
|
||||
fn fresh_task_id() -> usize {
|
||||
// TODO: this assert is a real bummer, need to figure out how to reuse
|
||||
// old IDs that are no longer in use.
|
||||
static NEXT_ID: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
let id = NEXT_ID.fetch_add(1, Ordering::Relaxed);
|
||||
assert!(id < usize::max_value() / 2,
|
||||
"too many previous tasks have been allocated");
|
||||
id
|
||||
}
|
||||
|
||||
fn set<'a, F, R>(task: &BorrowedTask<'a>, f: F) -> R
|
||||
where F: FnOnce() -> R
|
||||
{
|
||||
struct Reset(*const BorrowedTask<'static>);
|
||||
impl Drop for Reset {
|
||||
fn drop(&mut self) {
|
||||
CURRENT_TASK.with(|c| c.set(self.0));
|
||||
}
|
||||
}
|
||||
|
||||
CURRENT_TASK.with(move |c| {
|
||||
let _reset = Reset(c.get());
|
||||
let task = unsafe {
|
||||
mem::transmute::<&BorrowedTask<'a>,
|
||||
*const BorrowedTask<'static>>(task)
|
||||
};
|
||||
c.set(task);
|
||||
f()
|
||||
})
|
||||
}
|
||||
|
||||
fn with<F: FnOnce(&BorrowedTask) -> R, R>(f: F) -> R {
|
||||
let task = CURRENT_TASK.with(|c| c.get());
|
||||
assert!(!task.is_null(), "no Task is currently running");
|
||||
unsafe {
|
||||
f(&*task)
|
||||
}
|
||||
}
|
||||
|
||||
/// A handle to a "task", which represents a single lightweight "thread" of
|
||||
/// execution driving a future to completion.
|
||||
///
|
||||
/// In general, futures are composed into large units of work, which are then
|
||||
/// spawned as tasks onto an *executor*. The executor is responsible for polling
|
||||
/// the future as notifications arrive, until the future terminates.
|
||||
///
|
||||
/// This is obtained by the `task::park` function.
|
||||
#[derive(Clone)]
|
||||
pub struct Task {
|
||||
id: usize,
|
||||
unpark: Arc<Unpark>,
|
||||
events: Events,
|
||||
}
|
||||
|
||||
fn _assert_kinds() {
|
||||
fn _assert_send<T: Send>() {}
|
||||
_assert_send::<Task>();
|
||||
}
|
||||
|
||||
/// Returns a handle to the current task to call `unpark` at a later date.
|
||||
///
|
||||
/// This function is similar to the standard library's `thread::park` function
|
||||
/// except that it won't block the current thread but rather the current future
|
||||
/// that is being executed.
|
||||
///
|
||||
/// The returned handle implements the `Send` and `'static` bounds and may also
|
||||
/// be cheaply cloned. This is useful for squirreling away the handle into a
|
||||
/// location which is then later signaled that a future can make progress.
|
||||
///
|
||||
/// Implementations of the `Future` trait typically use this function if they
|
||||
/// would otherwise perform a blocking operation. When something isn't ready
|
||||
/// yet, this `park` function is called to acquire a handle to the current
|
||||
/// task, and then the future arranges it such that when the block operation
|
||||
/// otherwise finishes (perhaps in the background) it will `unpark` the returned
|
||||
/// handle.
|
||||
///
|
||||
/// It's sometimes necessary to pass extra information to the task when
|
||||
/// unparking it, so that the task knows something about *why* it was woken. See
|
||||
/// the `with_unpark_event` for details on how to do this.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if a task is not currently being executed. That
|
||||
/// is, this method can be dangerous to call outside of an implementation of
|
||||
/// `poll`.
|
||||
pub fn park() -> Task {
|
||||
with(|task| {
|
||||
Task {
|
||||
id: task.id,
|
||||
events: task.events.clone(),
|
||||
unpark: task.unpark.clone(),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
impl Task {
|
||||
/// Indicate that the task should attempt to poll its future in a timely
|
||||
/// fashion.
|
||||
///
|
||||
/// It's typically guaranteed that, for each call to `unpark`, `poll` will
|
||||
/// be called at least once subsequently (unless the task has terminated).
|
||||
/// If the task is currently polling its future when `unpark` is called, it
|
||||
/// must poll the future *again* afterwards, ensuring that all relevant
|
||||
/// events are eventually observed by the future.
|
||||
pub fn unpark(&self) {
|
||||
self.events.trigger();
|
||||
self.unpark.unpark();
|
||||
}
|
||||
|
||||
/// Returns `true` when called from within the context of the task. In
|
||||
/// other words, the task is currently running on the thread calling the
|
||||
/// function.
|
||||
pub fn is_current(&self) -> bool {
|
||||
with(|current| current.id == self.id)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Task {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Task")
|
||||
.field("id", &self.id)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// For the duration of the given callback, add an "unpark event" to be
|
||||
/// triggered when the task handle is used to unpark the task.
|
||||
///
|
||||
/// Unpark events are used to pass information about what event caused a task to
|
||||
/// be unparked. In some cases, tasks are waiting on a large number of possible
|
||||
/// events, and need precise information about the wakeup to avoid extraneous
|
||||
/// polling.
|
||||
///
|
||||
/// Every `Task` handle comes with a set of unpark events which will fire when
|
||||
/// `unpark` is called. When fired, these events insert an identifer into a
|
||||
/// concurrent set, which the task can read from to determine what events
|
||||
/// occurred.
|
||||
///
|
||||
/// This function immediately invokes the closure, `f`, but arranges things so
|
||||
/// that `task::park` will produce a `Task` handle that includes the given
|
||||
/// unpark event.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if a task is not currently being executed. That
|
||||
/// is, this method can be dangerous to call outside of an implementation of
|
||||
/// `poll`.
|
||||
pub fn with_unpark_event<F, R>(event: UnparkEvent, f: F) -> R
|
||||
where F: FnOnce() -> R
|
||||
{
|
||||
with(|task| {
|
||||
let new_task = BorrowedTask {
|
||||
id: task.id,
|
||||
unpark: task.unpark,
|
||||
events: task.events.with_event(event),
|
||||
map: task.map,
|
||||
};
|
||||
set(&new_task, f)
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
/// A set insertion to trigger upon `unpark`.
|
||||
///
|
||||
/// Unpark events are used to communicate information about *why* an unpark
|
||||
/// occured, in particular populating sets with event identifiers so that the
|
||||
/// unparked task can avoid extraneous polling. See `with_unpark_event` for
|
||||
/// more.
|
||||
pub struct UnparkEvent {
|
||||
set: Arc<EventSet>,
|
||||
item: usize,
|
||||
}
|
||||
|
||||
impl UnparkEvent {
|
||||
/// Construct an unpark event that will insert `id` into `set` when
|
||||
/// triggered.
|
||||
pub fn new(set: Arc<EventSet>, id: usize) -> UnparkEvent {
|
||||
UnparkEvent {
|
||||
set: set,
|
||||
item: id,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for UnparkEvent {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("UnparkEvent")
|
||||
.field("set", &"...")
|
||||
.field("item", &self.item)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// A concurrent set which allows for the insertion of `usize` values.
|
||||
///
|
||||
/// `EventSet`s are used to communicate precise information about the event(s)
|
||||
/// that trigged a task notification. See `task::with_unpark_event` for details.
|
||||
pub trait EventSet: Send + Sync + 'static {
|
||||
/// Insert the given ID into the set
|
||||
fn insert(&self, id: usize);
|
||||
}
|
||||
|
||||
// A collection of UnparkEvents to trigger on `unpark`
|
||||
#[derive(Clone)]
|
||||
enum Events {
|
||||
Zero,
|
||||
One(UnparkEvent),
|
||||
Lots(Vec<UnparkEvent>),
|
||||
}
|
||||
|
||||
impl Events {
|
||||
fn new() -> Events {
|
||||
Events::Zero
|
||||
}
|
||||
|
||||
fn trigger(&self) {
|
||||
match *self {
|
||||
Events::Zero => {}
|
||||
Events::One(ref event) => event.set.insert(event.item),
|
||||
Events::Lots(ref list) => {
|
||||
for event in list {
|
||||
event.set.insert(event.item);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn with_event(&self, event: UnparkEvent) -> Events {
|
||||
let mut list = match *self {
|
||||
Events::Zero => return Events::One(event),
|
||||
Events::One(ref event) => vec![event.clone()],
|
||||
Events::Lots(ref list) => list.clone(),
|
||||
};
|
||||
list.push(event);
|
||||
Events::Lots(list)
|
||||
}
|
||||
}
|
||||
|
||||
/// Representation of a spawned future/stream.
|
||||
///
|
||||
/// This object is returned by the `spawn` function in this module. This
|
||||
/// represents a "fused task and future", storing all necessary pieces of a task
|
||||
/// and owning the top-level future that's being driven as well.
|
||||
///
|
||||
/// A `Spawn` can be poll'd for completion or execution of the current thread
|
||||
/// can be blocked indefinitely until a notification arrives. This can be used
|
||||
/// with either futures or streams, with different methods being available on
|
||||
/// `Spawn` depending which is used.
|
||||
pub struct Spawn<T> {
|
||||
obj: T,
|
||||
id: usize,
|
||||
data: data::LocalMap,
|
||||
}
|
||||
|
||||
/// Spawns a new future, returning the fused future and task.
|
||||
///
|
||||
/// This function is the termination endpoint for running futures. This method
|
||||
/// will conceptually allocate a new task to run the given object, which is
|
||||
/// normally either a `Future` or `Stream`.
|
||||
///
|
||||
/// This function is similar to the `thread::spawn` function but does not
|
||||
/// attempt to run code in the background. The future will not make progress
|
||||
/// until the methods on `Spawn` are called in turn.
|
||||
pub fn spawn<T>(obj: T) -> Spawn<T> {
|
||||
Spawn {
|
||||
obj: obj,
|
||||
id: fresh_task_id(),
|
||||
data: data::local_map(),
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Spawn<T> {
|
||||
/// Get a shared reference to the object the Spawn is wrapping.
|
||||
pub fn get_ref(&self) -> &T {
|
||||
&self.obj
|
||||
}
|
||||
|
||||
/// Get a mutable reference to the object the Spawn is wrapping.
|
||||
pub fn get_mut(&mut self) -> &mut T {
|
||||
&mut self.obj
|
||||
}
|
||||
|
||||
/// Consume the Spawn, returning its inner object
|
||||
pub fn into_inner(self) -> T {
|
||||
self.obj
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Future> Spawn<F> {
|
||||
/// Polls the internal future, scheduling notifications to be sent to the
|
||||
/// `unpark` argument.
|
||||
///
|
||||
/// This method will poll the internal future, testing if it's completed
|
||||
/// yet. The `unpark` argument is used as a sink for notifications sent to
|
||||
/// this future. That is, while the future is being polled, any call to
|
||||
/// `task::park()` will return a handle that contains the `unpark`
|
||||
/// specified.
|
||||
///
|
||||
/// If this function returns `NotReady`, then the `unpark` should have been
|
||||
/// scheduled to receive a notification when poll can be called again.
|
||||
/// Otherwise if `Ready` or `Err` is returned, the `Spawn` task can be
|
||||
/// safely destroyed.
|
||||
pub fn poll_future(&mut self, unpark: Arc<Unpark>) -> Poll<F::Item, F::Error> {
|
||||
self.enter(&unpark, |f| f.poll())
|
||||
}
|
||||
|
||||
/// Waits for the internal future to complete, blocking this thread's
|
||||
/// execution until it does.
|
||||
///
|
||||
/// This function will call `poll_future` in a loop, waiting for the future
|
||||
/// to complete. When a future cannot make progress it will use
|
||||
/// `thread::park` to block the current thread.
|
||||
pub fn wait_future(&mut self) -> Result<F::Item, F::Error> {
|
||||
let unpark = Arc::new(ThreadUnpark::new(thread::current()));
|
||||
loop {
|
||||
match try!(self.poll_future(unpark.clone())) {
|
||||
Async::NotReady => unpark.park(),
|
||||
Async::Ready(e) => return Ok(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A specialized function to request running a future to completion on the
|
||||
/// specified executor.
|
||||
///
|
||||
/// This function only works for futures whose item and error types are `()`
|
||||
/// and also implement the `Send` and `'static` bounds. This will submit
|
||||
/// units of work (instances of `Run`) to the `exec` argument provided
|
||||
/// necessary to drive the future to completion.
|
||||
///
|
||||
/// When the future would block, it's arranged that when the future is again
|
||||
/// ready it will submit another unit of work to the `exec` provided. This
|
||||
/// will happen in a loop until the future has completed.
|
||||
///
|
||||
/// This method is not appropriate for all futures, and other kinds of
|
||||
/// executors typically provide a similar function with perhaps relaxed
|
||||
/// bounds as well.
|
||||
pub fn execute(self, exec: Arc<Executor>)
|
||||
where F: Future<Item=(), Error=()> + Send + 'static,
|
||||
{
|
||||
exec.clone().execute(Run {
|
||||
// Ideally this method would be defined directly on
|
||||
// `Spawn<BoxFuture<(), ()>>` so we wouldn't have to box here and
|
||||
// it'd be more explicit, but unfortunately that currently has a
|
||||
// link error on nightly: rust-lang/rust#36155
|
||||
spawn: Spawn {
|
||||
id: self.id,
|
||||
data: self.data,
|
||||
obj: self.obj.boxed(),
|
||||
},
|
||||
inner: Arc::new(Inner {
|
||||
exec: exec,
|
||||
mutex: UnparkMutex::new()
|
||||
}),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream> Spawn<S> {
|
||||
/// Like `poll_future`, except polls the underlying stream.
|
||||
pub fn poll_stream(&mut self, unpark: Arc<Unpark>)
|
||||
-> Poll<Option<S::Item>, S::Error> {
|
||||
self.enter(&unpark, |stream| stream.poll())
|
||||
}
|
||||
|
||||
/// Like `wait_future`, except only waits for the next element to arrive on
|
||||
/// the underlying stream.
|
||||
pub fn wait_stream(&mut self) -> Option<Result<S::Item, S::Error>> {
|
||||
let unpark = Arc::new(ThreadUnpark::new(thread::current()));
|
||||
loop {
|
||||
match self.poll_stream(unpark.clone()) {
|
||||
Ok(Async::NotReady) => unpark.park(),
|
||||
Ok(Async::Ready(Some(e))) => return Some(Ok(e)),
|
||||
Ok(Async::Ready(None)) => return None,
|
||||
Err(e) => return Some(Err(e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Sink> Spawn<S> {
|
||||
/// Invokes the underlying `start_send` method with this task in place.
|
||||
///
|
||||
/// If the underlying operation returns `NotReady` then the `unpark` value
|
||||
/// passed in will receive a notification when the operation is ready to be
|
||||
/// attempted again.
|
||||
pub fn start_send(&mut self, value: S::SinkItem, unpark: &Arc<Unpark>)
|
||||
-> StartSend<S::SinkItem, S::SinkError> {
|
||||
self.enter(unpark, |sink| sink.start_send(value))
|
||||
}
|
||||
|
||||
/// Invokes the underlying `poll_complete` method with this task in place.
|
||||
///
|
||||
/// If the underlying operation returns `NotReady` then the `unpark` value
|
||||
/// passed in will receive a notification when the operation is ready to be
|
||||
/// attempted again.
|
||||
pub fn poll_flush(&mut self, unpark: &Arc<Unpark>)
|
||||
-> Poll<(), S::SinkError> {
|
||||
self.enter(unpark, |sink| sink.poll_complete())
|
||||
}
|
||||
|
||||
/// Blocks the current thread until it's able to send `value` on this sink.
|
||||
///
|
||||
/// This function will send the `value` on the sink that this task wraps. If
|
||||
/// the sink is not ready to send the value yet then the current thread will
|
||||
/// be blocked until it's able to send the value.
|
||||
pub fn wait_send(&mut self, mut value: S::SinkItem)
|
||||
-> Result<(), S::SinkError> {
|
||||
let unpark = Arc::new(ThreadUnpark::new(thread::current()));
|
||||
let unpark2 = unpark.clone() as Arc<Unpark>;
|
||||
loop {
|
||||
value = match try!(self.start_send(value, &unpark2)) {
|
||||
AsyncSink::NotReady(v) => v,
|
||||
AsyncSink::Ready => return Ok(()),
|
||||
};
|
||||
unpark.park();
|
||||
}
|
||||
}
|
||||
|
||||
/// Blocks the current thread until it's able to flush this sink.
|
||||
///
|
||||
/// This function will call the underlying sink's `poll_complete` method
|
||||
/// until it returns that it's ready, proxying out errors upwards to the
|
||||
/// caller if one occurs.
|
||||
///
|
||||
/// The thread will be blocked until `poll_complete` returns that it's
|
||||
/// ready.
|
||||
pub fn wait_flush(&mut self) -> Result<(), S::SinkError> {
|
||||
let unpark = Arc::new(ThreadUnpark::new(thread::current()));
|
||||
let unpark2 = unpark.clone() as Arc<Unpark>;
|
||||
loop {
|
||||
if try!(self.poll_flush(&unpark2)).is_ready() {
|
||||
return Ok(())
|
||||
}
|
||||
unpark.park();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Spawn<T> {
|
||||
fn enter<F, R>(&mut self, unpark: &Arc<Unpark>, f: F) -> R
|
||||
where F: FnOnce(&mut T) -> R
|
||||
{
|
||||
let task = BorrowedTask {
|
||||
id: self.id,
|
||||
unpark: unpark,
|
||||
events: Events::new(),
|
||||
map: &self.data,
|
||||
};
|
||||
let obj = &mut self.obj;
|
||||
set(&task, || f(obj))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: fmt::Debug> fmt::Debug for Spawn<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Spawn")
|
||||
.field("obj", &self.obj)
|
||||
.field("id", &self.id)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait which represents a sink of notifications that a future is ready to
|
||||
/// make progress.
|
||||
///
|
||||
/// This trait is provided as an argument to the `Spawn::poll_future` and
|
||||
/// `Spawn::poll_stream` functions. It's transitively used as part of the
|
||||
/// `Task::unpark` method to internally deliver notifications of readiness of a
|
||||
/// future to move forward.
|
||||
pub trait Unpark: Send + Sync {
|
||||
/// Indicates that an associated future and/or task are ready to make
|
||||
/// progress.
|
||||
///
|
||||
/// Typically this means that the receiver of the notification should
|
||||
/// arrange for the future to get poll'd in a prompt fashion.
|
||||
fn unpark(&self);
|
||||
}
|
||||
|
||||
/// A trait representing requests to poll futures.
|
||||
///
|
||||
/// This trait is an argument to the `Spawn::execute` which is used to run a
|
||||
/// future to completion. An executor will receive requests to run a future and
|
||||
/// an executor is responsible for ensuring that happens in a timely fashion.
|
||||
pub trait Executor: Send + Sync + 'static {
|
||||
/// Requests that `Run` is executed soon on the given executor.
|
||||
fn execute(&self, r: Run);
|
||||
}
|
||||
|
||||
struct ThreadUnpark {
|
||||
thread: thread::Thread,
|
||||
ready: AtomicBool,
|
||||
}
|
||||
|
||||
impl ThreadUnpark {
|
||||
fn new(thread: thread::Thread) -> ThreadUnpark {
|
||||
ThreadUnpark {
|
||||
thread: thread,
|
||||
ready: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
fn park(&self) {
|
||||
if !self.ready.swap(false, Ordering::SeqCst) {
|
||||
thread::park();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Unpark for ThreadUnpark {
|
||||
fn unpark(&self) {
|
||||
self.ready.store(true, Ordering::SeqCst);
|
||||
self.thread.unpark()
|
||||
}
|
||||
}
|
||||
|
||||
/// Units of work submitted to an `Executor`, currently only created
|
||||
/// internally.
|
||||
pub struct Run {
|
||||
spawn: Spawn<BoxFuture<(), ()>>,
|
||||
inner: Arc<Inner>,
|
||||
}
|
||||
|
||||
struct Inner {
|
||||
mutex: UnparkMutex<Run>,
|
||||
exec: Arc<Executor>,
|
||||
}
|
||||
|
||||
impl Run {
|
||||
/// Actually run the task (invoking `poll` on its future) on the current
|
||||
/// thread.
|
||||
pub fn run(self) {
|
||||
let Run { mut spawn, inner } = self;
|
||||
|
||||
// SAFETY: the ownership of this `Run` object is evidence that
|
||||
// we are in the `POLLING`/`REPOLL` state for the mutex.
|
||||
unsafe {
|
||||
inner.mutex.start_poll();
|
||||
|
||||
loop {
|
||||
match spawn.poll_future(inner.clone()) {
|
||||
Ok(Async::NotReady) => {}
|
||||
Ok(Async::Ready(())) |
|
||||
Err(()) => return inner.mutex.complete(),
|
||||
}
|
||||
let run = Run { spawn: spawn, inner: inner.clone() };
|
||||
match inner.mutex.wait(run) {
|
||||
Ok(()) => return, // we've waited
|
||||
Err(r) => spawn = r.spawn, // someone's notified us
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Run {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Run")
|
||||
.field("contents", &"...")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Unpark for Inner {
|
||||
fn unpark(&self) {
|
||||
match self.mutex.notify() {
|
||||
Ok(run) => self.exec.execute(run),
|
||||
Err(()) => {}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,131 @@
|
|||
#![cfg(feature = "with-deprecated")]
|
||||
#![allow(deprecated)]
|
||||
#![deprecated(since = "0.1.4",
|
||||
note = "replaced with `BiLock` in many cases, otherwise slated \
|
||||
for removal due to confusion")]
|
||||
|
||||
use std::prelude::v1::*;
|
||||
use std::sync::Arc;
|
||||
use std::cell::UnsafeCell;
|
||||
|
||||
// One critical piece of this module's contents are the `TaskRc<A>` handles.
|
||||
// The purpose of this is to conceptually be able to store data in a task,
|
||||
// allowing it to be accessed within multiple futures at once. For example if
|
||||
// you have some concurrent futures working, they may all want mutable access to
|
||||
// some data. We already know that when the futures are being poll'ed that we're
|
||||
// entirely synchronized (aka `&mut Task`), so you shouldn't require an
|
||||
// `Arc<Mutex<T>>` to share as the synchronization isn't necessary!
|
||||
//
|
||||
// So the idea here is that you insert data into a task via `Task::insert`, and
|
||||
// a handle to that data is then returned to you. That handle can later get
|
||||
// presented to the task itself to actually retrieve the underlying data. The
|
||||
// invariant is that the data can only ever be accessed with the task present,
|
||||
// and the lifetime of the actual data returned is connected to the lifetime of
|
||||
// the task itself.
|
||||
//
|
||||
// Conceptually I at least like to think of this as "dynamically adding more
|
||||
// struct fields to a `Task`". Each call to insert creates a new "name" for the
|
||||
// struct field, a `TaskRc<A>`, and then you can access the fields of a struct
|
||||
// with the struct itself (`Task`) as well as the name of the field
|
||||
// (`TaskRc<A>`). If that analogy doesn't make sense then oh well, it at least
|
||||
// helped me!
|
||||
//
|
||||
// So anyway, we do some interesting trickery here to actually get it to work.
|
||||
// Each `TaskRc<A>` handle stores `Arc<UnsafeCell<A>>`. So it turns out, we're
|
||||
// not even adding data to the `Task`! Each `TaskRc<A>` contains a reference
|
||||
// to this `Arc`, and `TaskRc` handles can be cloned which just bumps the
|
||||
// reference count on the `Arc` itself.
|
||||
//
|
||||
// As before, though, you can present the `Arc` to a `Task` and if they
|
||||
// originated from the same place you're allowed safe access to the internals.
|
||||
// We allow but shared and mutable access without the `Sync` bound on the data,
|
||||
// crucially noting that a `Task` itself is not `Sync`.
|
||||
//
|
||||
// So hopefully I've convinced you of this point that the `get` and `get_mut`
|
||||
// methods below are indeed safe. The data is always valid as it's stored in an
|
||||
// `Arc`, and access is only allowed with the proof of the associated `Task`.
|
||||
// One thing you might be asking yourself though is what exactly is this "proof
|
||||
// of a task"? Right now it's a `usize` corresponding to the `Task`'s
|
||||
// `TaskHandle` arc allocation.
|
||||
//
|
||||
// Wait a minute, isn't that the ABA problem! That is, we create a task A, add
|
||||
// some data to it, destroy task A, do some work, create a task B, and then ask
|
||||
// to get the data from task B. In this case though the point of the
|
||||
// `task_inner` "proof" field is simply that there's some non-`Sync` token
|
||||
// proving that you can get access to the data. So while weird, this case should
|
||||
// still be safe, as the data's not stored in the task itself.
|
||||
|
||||
/// A reference to a piece of data that's accessible only within a specific
|
||||
/// `Task`.
|
||||
///
|
||||
/// This data is `Send` even when `A` is not `Sync`, because the data stored
|
||||
/// within is accessed in a single-threaded way. The thread accessing it may
|
||||
/// change over time, if the task migrates, so `A` must be `Send`.
|
||||
#[derive(Debug)]
|
||||
pub struct TaskRc<A> {
|
||||
task_id: usize,
|
||||
ptr: Arc<UnsafeCell<A>>,
|
||||
}
|
||||
|
||||
// for safety here, see docs at the top of this module
|
||||
unsafe impl<A: Send> Send for TaskRc<A> {}
|
||||
unsafe impl<A: Sync> Sync for TaskRc<A> {}
|
||||
|
||||
impl<A> TaskRc<A> {
|
||||
/// Inserts a new piece of task-local data into this task, returning a
|
||||
/// reference to it.
|
||||
///
|
||||
/// Ownership of the data will be transferred to the task, and the data will
|
||||
/// be destroyed when the task itself is destroyed. The returned value can
|
||||
/// be passed to the `with` method to get a reference back to the original
|
||||
/// data.
|
||||
///
|
||||
/// Note that the returned handle is cloneable and copyable and can be sent
|
||||
/// to other futures which will be associated with the same task. All
|
||||
/// futures will then have access to this data when passed the reference
|
||||
/// back.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if a task is not currently running.
|
||||
pub fn new(a: A) -> TaskRc<A> {
|
||||
super::with(|task| {
|
||||
TaskRc {
|
||||
task_id: task.id,
|
||||
ptr: Arc::new(UnsafeCell::new(a)),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Operate with a reference to the underlying data.
|
||||
///
|
||||
/// This method should be passed a handle previously returned by
|
||||
/// `Task::insert`. That handle, when passed back into this method, will
|
||||
/// retrieve a reference to the original data.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method will panic if a task is not currently running or if `self`
|
||||
/// does not belong to the task that is currently running. That is, if
|
||||
/// another task generated the `data` handle passed in, this method will
|
||||
/// panic.
|
||||
pub fn with<F, R>(&self, f: F) -> R
|
||||
where F: FnOnce(&A) -> R
|
||||
{
|
||||
// for safety here, see docs at the top of this module
|
||||
super::with(|task| {
|
||||
assert!(self.task_id == task.id,
|
||||
"TaskRc being accessed on task it does not belong to");
|
||||
f(unsafe { &*self.ptr.get() })
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<A> Clone for TaskRc<A> {
|
||||
fn clone(&self) -> TaskRc<A> {
|
||||
TaskRc {
|
||||
task_id: self.task_id,
|
||||
ptr: self.ptr.clone(),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,144 @@
|
|||
use std::cell::UnsafeCell;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
|
||||
/// A "lock" around data `D`, which employs a *helping* strategy.
|
||||
///
|
||||
/// Used to ensure that concurrent `unpark` invocations lead to (1) `poll` being
|
||||
/// invoked on only a single thread at a time (2) `poll` being invoked at least
|
||||
/// once after each `unpark` (unless the future has completed).
|
||||
pub struct UnparkMutex<D> {
|
||||
// The state of task execution (state machine described below)
|
||||
status: AtomicUsize,
|
||||
|
||||
// The actual task data, accessible only in the POLLING state
|
||||
inner: UnsafeCell<Option<D>>,
|
||||
}
|
||||
|
||||
// `UnparkMutex<D>` functions in many ways like a `Mutex<D>`, except that on
|
||||
// acquisition failure, the current lockholder performs the desired work --
|
||||
// re-polling.
|
||||
//
|
||||
// As such, these impls mirror those for `Mutex<D>`. In particular, a reference
|
||||
// to `UnparkMutex` can be used to gain `&mut` access to the inner data, which
|
||||
// must therefore be `Send`.
|
||||
unsafe impl<D: Send> Send for UnparkMutex<D> {}
|
||||
unsafe impl<D: Send> Sync for UnparkMutex<D> {}
|
||||
|
||||
// There are four possible task states, listed below with their possible
|
||||
// transitions:
|
||||
|
||||
// The task is blocked, waiting on an event
|
||||
const WAITING: usize = 0; // --> POLLING
|
||||
|
||||
// The task is actively being polled by a thread; arrival of additional events
|
||||
// of interest should move it to the REPOLL state
|
||||
const POLLING: usize = 1; // --> WAITING, REPOLL, or COMPLETE
|
||||
|
||||
// The task is actively being polled, but will need to be re-polled upon
|
||||
// completion to ensure that all events were observed.
|
||||
const REPOLL: usize = 2; // --> POLLING
|
||||
|
||||
// The task has finished executing (either successfully or with an error/panic)
|
||||
const COMPLETE: usize = 3; // No transitions out
|
||||
|
||||
impl<D> UnparkMutex<D> {
|
||||
pub fn new() -> UnparkMutex<D> {
|
||||
UnparkMutex {
|
||||
status: AtomicUsize::new(WAITING),
|
||||
inner: UnsafeCell::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to "notify" the mutex that a poll should occur.
|
||||
///
|
||||
/// An `Ok` result indicates that the `POLLING` state has been entered, and
|
||||
/// the caller can proceed to poll the future. An `Err` result indicates
|
||||
/// that polling is not necessary (because the task is finished or the
|
||||
/// polling has been delegated).
|
||||
pub fn notify(&self) -> Result<D, ()> {
|
||||
let mut status = self.status.load(SeqCst);
|
||||
loop {
|
||||
match status {
|
||||
// The task is idle, so try to run it immediately.
|
||||
WAITING => {
|
||||
match self.status.compare_exchange(WAITING, POLLING,
|
||||
SeqCst, SeqCst) {
|
||||
Ok(_) => {
|
||||
let data = unsafe {
|
||||
// SAFETY: we've ensured mutual exclusion via
|
||||
// the status protocol; we are the only thread
|
||||
// that has transitioned to the POLLING state,
|
||||
// and we won't transition back to QUEUED until
|
||||
// the lock is "released" by this thread. See
|
||||
// the protocol diagram above.
|
||||
(*self.inner.get()).take().unwrap()
|
||||
};
|
||||
return Ok(data);
|
||||
}
|
||||
Err(cur) => status = cur,
|
||||
}
|
||||
}
|
||||
|
||||
// The task is being polled, so we need to record that it should
|
||||
// be *repolled* when complete.
|
||||
POLLING => {
|
||||
match self.status.compare_exchange(POLLING, REPOLL,
|
||||
SeqCst, SeqCst) {
|
||||
Ok(_) => return Err(()),
|
||||
Err(cur) => status = cur,
|
||||
}
|
||||
}
|
||||
|
||||
// The task is already scheduled for polling, or is complete, so
|
||||
// we've got nothing to do.
|
||||
_ => return Err(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Alert the mutex that polling is about to begin, clearing any accumulated
|
||||
/// re-poll requests.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Callable only from the `POLLING`/`REPOLL` states, i.e. between
|
||||
/// successful calls to `notify` and `wait`/`complete`.
|
||||
pub unsafe fn start_poll(&self) {
|
||||
self.status.store(POLLING, SeqCst);
|
||||
}
|
||||
|
||||
/// Alert the mutex that polling completed with NotReady.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Callable only from the `POLLING`/`REPOLL` states, i.e. between
|
||||
/// successful calls to `notify` and `wait`/`complete`.
|
||||
pub unsafe fn wait(&self, data: D) -> Result<(), D> {
|
||||
*self.inner.get() = Some(data);
|
||||
|
||||
match self.status.compare_exchange(POLLING, WAITING, SeqCst, SeqCst) {
|
||||
// no unparks came in while we were running
|
||||
Ok(_) => Ok(()),
|
||||
|
||||
// guaranteed to be in REPOLL state; just clobber the
|
||||
// state and run again.
|
||||
Err(status) => {
|
||||
assert_eq!(status, REPOLL);
|
||||
self.status.store(POLLING, SeqCst);
|
||||
Err((*self.inner.get()).take().unwrap())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Alert the mutex that the task has completed execution and should not be
|
||||
/// notified again.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Callable only from the `POLLING`/`REPOLL` states, i.e. between
|
||||
/// successful calls to `notify` and `wait`/`complete`.
|
||||
pub unsafe fn complete(&self) {
|
||||
self.status.store(COMPLETE, SeqCst);
|
||||
}
|
||||
}
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче