зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1443988 - P2: Update futures and futures-cpupool crates. r=mbrubeck
* futures: 0.1.13 -> 0.1.18 * futures-cpupool: 0.1.5 -> 0.1.8 MozReview-Commit-ID: LDYFHxBfQMU --HG-- extra : rebase_source : f1693246c545da9dcf32a5ae72fb023c9d565061
This commit is contained in:
Родитель
c0770d8852
Коммит
431554b5cf
|
@ -56,7 +56,7 @@ dependencies = [
|
|||
"bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cubeb 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
|
@ -76,8 +76,8 @@ dependencies = [
|
|||
"audioipc 0.2.1",
|
||||
"cubeb-backend 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"foreign-types 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"tokio-core 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
|
@ -92,7 +92,7 @@ dependencies = [
|
|||
"bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"cubeb 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"lazycell 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
|
@ -644,15 +644,15 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "futures"
|
||||
version = "0.1.13"
|
||||
version = "0.1.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "futures-cpupool"
|
||||
version = "0.1.5"
|
||||
version = "0.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
|
@ -1839,7 +1839,7 @@ version = "0.1.7"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"mio 0.6.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
|
@ -1854,7 +1854,7 @@ version = "0.1.3"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
]
|
||||
|
||||
|
@ -1864,7 +1864,7 @@ version = "0.1.7"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
dependencies = [
|
||||
"bytes 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"libc 0.2.33 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
|
@ -2279,8 +2279,8 @@ dependencies = [
|
|||
"checksum fs2 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9ab76cfd2aaa59b7bf6688ad9ba15bbae64bff97f04ea02144cfd3443e5c2866"
|
||||
"checksum fuchsia-zircon 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f6c0581a4e363262e52b87f59ee2afe3415361c6ec35e665924eb08afe8ff159"
|
||||
"checksum fuchsia-zircon-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "43f3795b4bae048dc6123a6b972cadde2e676f9ded08aef6bb77f5f157684a82"
|
||||
"checksum futures 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)" = "55f0008e13fc853f79ea8fc86e931486860d4c4c156cdffb59fa5f7fa833660a"
|
||||
"checksum futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "a283c84501e92cade5ea673a2a7ca44f71f209ccdd302a3e0896f50083d2c5ff"
|
||||
"checksum futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "0bab5b5e94f5c31fc764ba5dd9ad16568aae5d4825538c01d6bca680c9bf94a7"
|
||||
"checksum futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4"
|
||||
"checksum fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c"
|
||||
"checksum gcc 0.3.54 (registry+https://github.com/rust-lang/crates.io-index)" = "5e33ec290da0d127825013597dbdfc28bee4964690c7ce1166cbc2a7bd08b1bb"
|
||||
"checksum gdi32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0912515a8ff24ba900422ecda800b52f4016a56251922d397c576bf92c690518"
|
||||
|
|
|
@ -1 +1 @@
|
|||
{"files":{"Cargo.toml":"07c97c2816b3cc41857a0cbbb5109f2a7ef2bd81131a3f4f3621f438a1eb7561","README.md":"09c5f4bacff34b3f7e1969f5b9590c062a8aabac7c2442944eab1d2fc1301373","src/lib.rs":"a368e87ed6f93552ba12391cd765d0b0b34b9fe42617a2c1f6a5ce81a0c5de11","tests/smoke.rs":"3e237fc14d19775026f6cff45d73de6bb6b4db6699ce8ab4972ed85165200ec2"},"package":"a283c84501e92cade5ea673a2a7ca44f71f209ccdd302a3e0896f50083d2c5ff"}
|
||||
{"files":{"Cargo.toml":"d65d12c309bb5af442353ceb79339c2d426b1ed643f5eddee14ad22637225ca2","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"69036b033e4bb951821964dbc3d9b1efe6913a6e36d9c1f206de4035a1a85cc4","README.md":"09c5f4bacff34b3f7e1969f5b9590c062a8aabac7c2442944eab1d2fc1301373","src/lib.rs":"2bffe7435a2c13028978955882338fbb9df3644f725a7e9d27b5f1495e3e9f90","tests/smoke.rs":"4c07aad02b0dd17f4723f3be1abbe320629b9e0756c885b44cbc1268141668f1"},"package":"ab90cde24b3319636588d0c35fe03b1333857621051837ed769faefb4c2162e4"}
|
|
@ -1,24 +1,31 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g. crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
name = "futures-cpupool"
|
||||
version = "0.1.5"
|
||||
version = "0.1.8"
|
||||
authors = ["Alex Crichton <alex@alexcrichton.com>"]
|
||||
license = "MIT/Apache-2.0"
|
||||
repository = "https://github.com/alexcrichton/futures-rs"
|
||||
description = "An implementation of thread pools which hand out futures to the results of the\ncomputation on the threads themselves.\n"
|
||||
homepage = "https://github.com/alexcrichton/futures-rs"
|
||||
documentation = "https://docs.rs/futures-cpupool"
|
||||
description = """
|
||||
An implementation of thread pools which hand out futures to the results of the
|
||||
computation on the threads themselves.
|
||||
"""
|
||||
|
||||
[dependencies]
|
||||
num_cpus = "1.0"
|
||||
|
||||
license = "MIT/Apache-2.0"
|
||||
repository = "https://github.com/alexcrichton/futures-rs"
|
||||
[dependencies.futures]
|
||||
path = ".."
|
||||
version = "0.1"
|
||||
default-features = false
|
||||
features = ["use_std"]
|
||||
default-features = false
|
||||
|
||||
[dependencies.num_cpus]
|
||||
version = "1.0"
|
||||
|
||||
[features]
|
||||
default = ["with-deprecated"]
|
||||
|
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,25 @@
|
|||
Copyright (c) 2016 Alex Crichton
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
|
@ -13,8 +13,8 @@
|
|||
//! use futures::Future;
|
||||
//! use futures_cpupool::CpuPool;
|
||||
//!
|
||||
//! # fn long_running_future(a: u32) -> futures::future::BoxFuture<u32, ()> {
|
||||
//! # futures::future::result(Ok(a)).boxed()
|
||||
//! # fn long_running_future(a: u32) -> Box<futures::future::Future<Item = u32, Error = ()> + Send> {
|
||||
//! # Box::new(futures::future::result(Ok(a)))
|
||||
//! # }
|
||||
//! # fn main() {
|
||||
//!
|
||||
|
@ -35,6 +35,7 @@
|
|||
//! ```
|
||||
|
||||
#![deny(missing_docs)]
|
||||
#![deny(missing_debug_implementations)]
|
||||
|
||||
extern crate futures;
|
||||
extern crate num_cpus;
|
||||
|
@ -44,11 +45,12 @@ use std::sync::{Arc, Mutex};
|
|||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
use std::fmt;
|
||||
|
||||
use futures::{IntoFuture, Future, Poll, Async};
|
||||
use futures::future::lazy;
|
||||
use futures::future::{lazy, Executor, ExecuteError};
|
||||
use futures::sync::oneshot::{channel, Sender, Receiver};
|
||||
use futures::executor::{self, Run, Executor};
|
||||
use futures::executor::{self, Run, Executor as OldExecutor};
|
||||
|
||||
/// A thread pool intended to run CPU intensive work.
|
||||
///
|
||||
|
@ -78,6 +80,7 @@ pub struct CpuPool {
|
|||
/// of CPUs on the host. But you can change it until you call `create()`.
|
||||
pub struct Builder {
|
||||
pool_size: usize,
|
||||
stack_size: usize,
|
||||
name_prefix: Option<String>,
|
||||
after_start: Option<Arc<Fn() + Send + Sync>>,
|
||||
before_stop: Option<Arc<Fn() + Send + Sync>>,
|
||||
|
@ -89,20 +92,31 @@ struct MySender<F, T> {
|
|||
keep_running_flag: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
fn _assert() {
|
||||
fn _assert_send<T: Send>() {}
|
||||
fn _assert_sync<T: Sync>() {}
|
||||
_assert_send::<CpuPool>();
|
||||
_assert_sync::<CpuPool>();
|
||||
}
|
||||
trait AssertSendSync: Send + Sync {}
|
||||
impl AssertSendSync for CpuPool {}
|
||||
|
||||
struct Inner {
|
||||
tx: Mutex<mpsc::Sender<Message>>,
|
||||
rx: Mutex<mpsc::Receiver<Message>>,
|
||||
cnt: AtomicUsize,
|
||||
size: usize,
|
||||
after_start: Option<Arc<Fn() + Send + Sync>>,
|
||||
before_stop: Option<Arc<Fn() + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl fmt::Debug for CpuPool {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("CpuPool")
|
||||
.field("size", &self.inner.size)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Builder {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Builder")
|
||||
.field("pool_size", &self.pool_size)
|
||||
.field("name_prefix", &self.name_prefix)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// The type of future returned from the `CpuPool::spawn` function, which
|
||||
|
@ -111,6 +125,7 @@ struct Inner {
|
|||
/// This future will resolve in the same way as the underlying future, and it
|
||||
/// will propagate panics.
|
||||
#[must_use]
|
||||
#[derive(Debug)]
|
||||
pub struct CpuFuture<T, E> {
|
||||
inner: Receiver<thread::Result<Result<T, E>>>,
|
||||
keep_running_flag: Arc<AtomicBool>,
|
||||
|
@ -129,8 +144,13 @@ impl CpuPool {
|
|||
/// thread pool.
|
||||
///
|
||||
/// This is a shortcut for:
|
||||
///
|
||||
/// ```rust
|
||||
/// # use futures_cpupool::{Builder, CpuPool};
|
||||
/// #
|
||||
/// # fn new(size: usize) -> CpuPool {
|
||||
/// Builder::new().pool_size(size).create()
|
||||
/// # }
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
|
@ -144,8 +164,13 @@ impl CpuPool {
|
|||
/// of CPUs on the host.
|
||||
///
|
||||
/// This is a shortcut for:
|
||||
///
|
||||
/// ```rust
|
||||
/// # use futures_cpupool::{Builder, CpuPool};
|
||||
/// #
|
||||
/// # fn new_num_cpus() -> CpuPool {
|
||||
/// Builder::new().create()
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn new_num_cpus() -> CpuPool {
|
||||
Builder::new().create()
|
||||
|
@ -178,7 +203,7 @@ impl CpuPool {
|
|||
{
|
||||
let (tx, rx) = channel();
|
||||
let keep_running_flag = Arc::new(AtomicBool::new(false));
|
||||
// AssertUnwindSafe is used here becuase `Send + 'static` is basically
|
||||
// AssertUnwindSafe is used here because `Send + 'static` is basically
|
||||
// an alias for an implementation of the `UnwindSafe` trait but we can't
|
||||
// express that in the standard library right now.
|
||||
let sender = MySender {
|
||||
|
@ -210,13 +235,22 @@ impl CpuPool {
|
|||
}
|
||||
}
|
||||
|
||||
impl<F> Executor<F> for CpuPool
|
||||
where F: Future<Item = (), Error = ()> + Send + 'static,
|
||||
{
|
||||
fn execute(&self, future: F) -> Result<(), ExecuteError<F>> {
|
||||
executor::spawn(future).execute(self.inner.clone());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Inner {
|
||||
fn send(&self, msg: Message) {
|
||||
self.tx.lock().unwrap().send(msg).unwrap();
|
||||
}
|
||||
|
||||
fn work(&self) {
|
||||
self.after_start.as_ref().map(|fun| fun());
|
||||
fn work(&self, after_start: Option<Arc<Fn() + Send + Sync>>, before_stop: Option<Arc<Fn() + Send + Sync>>) {
|
||||
after_start.map(|fun| fun());
|
||||
loop {
|
||||
let msg = self.rx.lock().unwrap().recv().unwrap();
|
||||
match msg {
|
||||
|
@ -224,7 +258,7 @@ impl Inner {
|
|||
Message::Close => break,
|
||||
}
|
||||
}
|
||||
self.before_stop.as_ref().map(|fun| fun());
|
||||
before_stop.map(|fun| fun());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -245,7 +279,7 @@ impl Drop for CpuPool {
|
|||
}
|
||||
}
|
||||
|
||||
impl Executor for Inner {
|
||||
impl OldExecutor for Inner {
|
||||
fn execute(&self, run: Run) {
|
||||
self.send(Message::Run(run))
|
||||
}
|
||||
|
@ -267,7 +301,7 @@ impl<T: Send + 'static, E: Send + 'static> Future for CpuFuture<T, E> {
|
|||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<T, E> {
|
||||
match self.inner.poll().expect("shouldn't be canceled") {
|
||||
match self.inner.poll().expect("cannot poll CpuFuture twice") {
|
||||
Async::Ready(Ok(Ok(e))) => Ok(e.into()),
|
||||
Async::Ready(Ok(Err(e))) => Err(e),
|
||||
Async::Ready(Err(e)) => panic::resume_unwind(e),
|
||||
|
@ -307,6 +341,7 @@ impl Builder {
|
|||
pub fn new() -> Builder {
|
||||
Builder {
|
||||
pool_size: num_cpus::get(),
|
||||
stack_size: 0,
|
||||
name_prefix: None,
|
||||
after_start: None,
|
||||
before_stop: None,
|
||||
|
@ -321,6 +356,12 @@ impl Builder {
|
|||
self
|
||||
}
|
||||
|
||||
/// Set stack size of threads in the pool.
|
||||
pub fn stack_size(&mut self, stack_size: usize) -> &mut Self {
|
||||
self.stack_size = stack_size;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set thread name prefix of a future CpuPool
|
||||
///
|
||||
/// Thread name prefix is used for generating thread names. For example, if prefix is
|
||||
|
@ -331,9 +372,11 @@ impl Builder {
|
|||
}
|
||||
|
||||
/// Execute function `f` right after each thread is started but before
|
||||
/// running any jobs on it
|
||||
/// running any jobs on it.
|
||||
///
|
||||
/// This is initially intended for bookkeeping and monitoring uses
|
||||
/// This is initially intended for bookkeeping and monitoring uses.
|
||||
/// The `f` will be deconstructed after the `builder` is deconstructed
|
||||
/// and all threads in the pool has executed it.
|
||||
pub fn after_start<F>(&mut self, f: F) -> &mut Self
|
||||
where F: Fn() + Send + Sync + 'static
|
||||
{
|
||||
|
@ -341,9 +384,11 @@ impl Builder {
|
|||
self
|
||||
}
|
||||
|
||||
/// Execute function `f` before each worker thread stops
|
||||
/// Execute function `f` before each worker thread stops.
|
||||
///
|
||||
/// This is initially intended for bookkeeping and monitoring uses
|
||||
/// This is initially intended for bookkeeping and monitoring uses.
|
||||
/// The `f` will be deconstructed after the `builder` is deconstructed
|
||||
/// and all threads in the pool has executed it.
|
||||
pub fn before_stop<F>(&mut self, f: F) -> &mut Self
|
||||
where F: Fn() + Send + Sync + 'static
|
||||
{
|
||||
|
@ -364,21 +409,42 @@ impl Builder {
|
|||
rx: Mutex::new(rx),
|
||||
cnt: AtomicUsize::new(1),
|
||||
size: self.pool_size,
|
||||
after_start: self.after_start.clone(),
|
||||
before_stop: self.before_stop.clone(),
|
||||
}),
|
||||
};
|
||||
assert!(self.pool_size > 0);
|
||||
|
||||
for counter in 0..self.pool_size {
|
||||
let inner = pool.inner.clone();
|
||||
let after_start = self.after_start.clone();
|
||||
let before_stop = self.before_stop.clone();
|
||||
let mut thread_builder = thread::Builder::new();
|
||||
if let Some(ref name_prefix) = self.name_prefix {
|
||||
thread_builder = thread_builder.name(format!("{}{}", name_prefix, counter));
|
||||
}
|
||||
thread_builder.spawn(move || inner.work()).unwrap();
|
||||
if self.stack_size > 0 {
|
||||
thread_builder = thread_builder.stack_size(self.stack_size);
|
||||
}
|
||||
thread_builder.spawn(move || inner.work(after_start, before_stop)).unwrap();
|
||||
}
|
||||
|
||||
return pool
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::mpsc;
|
||||
|
||||
#[test]
|
||||
fn test_drop_after_start() {
|
||||
let (tx, rx) = mpsc::sync_channel(2);
|
||||
let _cpu_pool = Builder::new()
|
||||
.pool_size(2)
|
||||
.after_start(move || tx.send(1).unwrap()).create();
|
||||
|
||||
// After Builder is deconstructed, the tx should be droped
|
||||
// so that we can use rx as an iterator.
|
||||
let count = rx.into_iter().count();
|
||||
assert_eq!(count, 2);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,11 +5,11 @@ use std::sync::atomic::{AtomicUsize, Ordering, ATOMIC_USIZE_INIT};
|
|||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
use futures::future::{Future, BoxFuture};
|
||||
use futures::future::Future;
|
||||
use futures_cpupool::{CpuPool, Builder};
|
||||
|
||||
fn done<T: Send + 'static>(t: T) -> BoxFuture<T, ()> {
|
||||
futures::future::ok(t).boxed()
|
||||
fn done<T: Send + 'static>(t: T) -> Box<Future<Item = T, Error = ()> + Send> {
|
||||
Box::new(futures::future::ok(t))
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -2,18 +2,20 @@ language: rust
|
|||
|
||||
matrix:
|
||||
include:
|
||||
- os: osx
|
||||
- rust: stable
|
||||
- rust: beta
|
||||
- rust: nightly
|
||||
env: BENCH=1
|
||||
before_script:
|
||||
- pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH
|
||||
after_success:
|
||||
- travis-cargo doc-upload
|
||||
- os: linux
|
||||
rust: 1.10.0
|
||||
rust: 1.15.0
|
||||
script: cargo test
|
||||
rust:
|
||||
- stable
|
||||
- beta
|
||||
- nightly
|
||||
sudo: false
|
||||
before_script:
|
||||
- pip install 'travis-cargo<0.2' --user && export PATH=$HOME/.local/bin:$PATH
|
||||
script:
|
||||
- export CARGO_TARGET_DIR=`pwd`/target
|
||||
- cargo build
|
||||
- cargo build --no-default-features
|
||||
- cargo test
|
||||
|
@ -23,8 +25,7 @@ script:
|
|||
|
||||
- cargo doc --no-deps
|
||||
- cargo doc --no-deps --manifest-path futures-cpupool/Cargo.toml
|
||||
after_success:
|
||||
- travis-cargo --only nightly doc-upload
|
||||
- if [ "$BENCH" = "1" ]; then cargo bench; fi
|
||||
env:
|
||||
global:
|
||||
- secure: "iwVcMVIF7ZSY82fK5UyyUvVvJxMSYrbZawh1+4Oi8pvOdYq1gptcDoOC8jxWwCwrNF1b+/85n+jlEUngEqqSmV5PjAbWPjoc+u4Zn7CRi1AlxoUlvHPiQm4vM4Mkkd6GsqoIZttCeedU9m/w0nQ18uUtK8uD6vr2FVdcMnUnkYQAxuGOowGLrwidukzfBXMCu/JrwKMIbt61knAFiI/KJknu0h1mRrhpeF/sQ3tJFzRRcQeFJkbfwDzltMpPo1hq5D3HI4ONjYi/qO2pwUhDk4umfp9cLW9MS8rQvptxJTQmWemHi+f2/U4ld6a0URL6kEuMkt/EbH0A74eFtlicfRs44dX9MlWoqbLypnC3ymqmHcpwcwNA3HmZyg800MTuU+BPK41HIPdO9tPpxjHEiqvNDknH7qs+YBnis0eH7DHJgEjXq651PjW7pm+rnHPwsj+OzKE1YBNxBQZZDkS3VnZJz+O4tVsOzc3IOz0e+lf7VVuI17C9haj117nKp3umC4MVBA0S8RfreFgqpyDeY2zwcqOr0YOlEGGRl0vyWP8Qcxx12kQ7+doLolt6Kxda4uO0hKRmIF6+qki1T+L7v8BOGOtCncz4f7IX48eQ7+Wu0OtglRn45qAa3CxjUuW6xX3KSNH66PCXV0Jtp8Ga2SSevX2wtbbFu9f+9R+PQY4="
|
||||
|
@ -32,6 +33,3 @@ env:
|
|||
notifications:
|
||||
email:
|
||||
on_success: never
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
|
|
@ -0,0 +1,289 @@
|
|||
# 0.1.17 - 2017-10-31
|
||||
|
||||
* Add a `close` method on `sink::Wait`
|
||||
* Undeprecate `stream::iter` as `stream::iter_result`
|
||||
* Improve performance of wait-related methods
|
||||
* Tweak buffered sinks with a 0 capacity to forward directly to the underlying
|
||||
sink.
|
||||
* Add `FromIterator` implementation for `FuturesOrdered` and `FuturesUnordered`.
|
||||
|
||||
# 0.1.16 - 2017-09-15
|
||||
|
||||
* A `prelude` module has been added to glob import from and pick up a whole
|
||||
bunch of useful types
|
||||
* `sync::mpsc::Sender::poll_ready` has been added as an API
|
||||
* `sync::mpsc::Sender::try_send` has been added as an API
|
||||
|
||||
# 0.1.15 - 2017-08-24
|
||||
|
||||
* Improve performance of `BiLock` methods
|
||||
* Implement `Clone` for `FutureResult`
|
||||
* Forward `Stream` trait through `SinkMapErr`
|
||||
* Add `stream::futures_ordered` next to `futures_unordered`
|
||||
* Reimplement `Stream::buffered` on top of `stream::futures_ordered` (much more
|
||||
efficient at scale).
|
||||
* Add a `with_notify` function for abstractions which previously required
|
||||
`UnparkEvent`.
|
||||
* Add `get_ref`/`get_mut`/`into_inner` functions for stream take/skip methods
|
||||
* Add a `Clone` implementation for `SharedItem` and `SharedError`
|
||||
* Add a `mpsc::spawn` function to spawn a `Stream` into an `Executor`
|
||||
* Add a `reunite` function for `BiLock` and the split stream/sink types to
|
||||
rejoin two halves and reclaim the original item.
|
||||
* Add `stream::poll_fn` to behave similarly to `future::poll_fn`
|
||||
* Add `Sink::with_flat_map` like `Iterator::flat_map`
|
||||
* Bump the minimum Rust version to 1.13.0
|
||||
* Expose `AtomicTask` in the public API for managing synchronization around task
|
||||
notifications.
|
||||
* Unify the `Canceled` type of the `sync` and `unsync` modules.
|
||||
* Deprecate the `boxed` methods. These methods have caused more confusion than
|
||||
they've solved historically, so it's recommended to use a local extension
|
||||
trait or a local helper instead of the trait-based methods.
|
||||
* Deprecate the `Stream::merge` method as it's less ergonomic than `select`.
|
||||
* Add `oneshot::Sender::is_canceled` to test if a oneshot is canceled off a
|
||||
task.
|
||||
* Deprecates `UnboundedSender::send` in favor of a method named `unbounded_send`
|
||||
to avoid a conflict with `Sink::send`.
|
||||
* Deprecate the `stream::iter` function in favor of an `stream::iter_ok` adaptor
|
||||
to avoid the need to deal with `Result` manually.
|
||||
* Add an `inspect` function to the `Future` and `Stream` traits along the lines
|
||||
of `Iterator::inspect`
|
||||
|
||||
# 0.1.14 - 2017-05-30
|
||||
|
||||
This is a relatively large release of the `futures` crate, although much of it
|
||||
is from reworking internals rather than new APIs. The banner feature of this
|
||||
release is that the `futures::{task, executor}` modules are now available in
|
||||
`no_std` contexts! A large refactoring of the task system was performed in
|
||||
PR #436 to accommodate custom memory allocation schemes and otherwise remove
|
||||
all dependencies on `std` for the task module. More details about this change
|
||||
can be found on the PR itself.
|
||||
|
||||
Other API additions in this release are:
|
||||
|
||||
* A `FuturesUnordered::push` method was added and the `FuturesUnordered` type
|
||||
itself was completely rewritten to efficiently track a large number of
|
||||
futures.
|
||||
* A `Task::will_notify_current` method was added with a slightly different
|
||||
implementation than `Task::is_current` but with stronger guarantees and
|
||||
documentation wording about its purpose.
|
||||
* Many combinators now have `get_ref`, `get_mut`, and `into_inner` methods for
|
||||
accessing internal futures and state.
|
||||
* A `Stream::concat2` method was added which should be considered the "fixed"
|
||||
version of `concat`, this one doesn't panic on empty streams.
|
||||
* An `Executor` trait has been added to represent abstracting over the concept
|
||||
of spawning a new task. Crates which only need the ability to spawn a future
|
||||
can now be generic over `Executor` rather than requiring a
|
||||
`tokio_core::reactor::Handle`.
|
||||
|
||||
As with all 0.1.x releases this PR is intended to be 100% backwards compatible.
|
||||
All code that previously compiled should continue to do so with these changes.
|
||||
As with other changes, though, there are also some updates to be aware of:
|
||||
|
||||
* The `task::park` function has been renamed to `task::current`.
|
||||
* The `Task::unpark` function has been renamed to `Task::notify`, and in general
|
||||
terminology around "unpark" has shifted to terminology around "notify"
|
||||
* The `Unpark` trait has been deprecated in favor of the `Notify` trait
|
||||
mentioned above.
|
||||
* The `UnparkEvent` structure has been deprecated. It currently should perform
|
||||
the same as it used to, but it's planned that in a future 0.1.x release the
|
||||
performance will regress for crates that have not transitioned away. The
|
||||
primary primitive to replace this is the addition of a `push` function on the
|
||||
`FuturesUnordered` type. If this does not help implement your use case though,
|
||||
please let us know!
|
||||
* The `Task::is_current` method is now deprecated, and you likely want to use
|
||||
`Task::will_notify_current` instead, but let us know if this doesn't suffice!
|
||||
|
||||
# 0.1.13 - 2017-04-05
|
||||
|
||||
* Add forwarding sink/stream impls for `stream::FromErr` and `sink::SinkFromErr`
|
||||
* Add `PartialEq` and `Eq` to `mpsc::SendError`
|
||||
* Reimplement `Shared` with `spawn` instead of `UnparkEvent`
|
||||
|
||||
# 0.1.12 - 2017-04-03
|
||||
|
||||
* Add `Stream::from_err` and `Sink::from_err`
|
||||
* Allow `SendError` to be `Clone` when possible
|
||||
|
||||
# 0.1.11 - 2017-03-13
|
||||
|
||||
The major highlight of this release is the addition of a new "default" method on
|
||||
the `Sink` trait, `Sink::close`. This method is used to indicate to a sink that
|
||||
no new values will ever need to get pushed into it. This can be used to
|
||||
implement graceful shutdown of protocols and otherwise simply indicates to a
|
||||
sink that it can start freeing up resources.
|
||||
|
||||
Currently this method is **not** a default method to preserve backwards
|
||||
compatibility, but it's intended to become a default method in the 0.2 series of
|
||||
the `futures` crate. It's highly recommended to audit implementations of `Sink`
|
||||
to implement the `close` method as is fit.
|
||||
|
||||
Other changes in this release are:
|
||||
|
||||
* A new select combinator, `Future::select2` was added for a heterogeneous
|
||||
select.
|
||||
* A `Shared::peek` method was added to check to see if it's done.
|
||||
* `Sink::map_err` was implemented
|
||||
* The `log` dependency was removed
|
||||
* Implementations of the `Debug` trait are now generally available.
|
||||
* The `stream::IterStream` type was renamed to `stream::Iter` (with a reexport
|
||||
for the old name).
|
||||
* Add a `Sink::wait` method which returns an adapter to use an arbitrary `Sink`
|
||||
synchronously.
|
||||
* A `Stream::concat` method was added to concatenate a sequence of lists.
|
||||
* The `oneshot::Sender::complete` method was renamed to `send` and now returns a
|
||||
`Result` indicating successful transmission of a message or not. Note that the
|
||||
`complete` method still exists, it's just deprecated.
|
||||
|
||||
# 0.1.10 - 2017-01-30
|
||||
|
||||
* Add a new `unsync` module which mirrors `sync` to the extent that it can but
|
||||
is intended to not perform cross-thread synchronization (only usable within
|
||||
one thread).
|
||||
* Tweak `Shared` to work when handles may not get poll'd again.
|
||||
|
||||
# 0.1.9 - 2017-01-18
|
||||
|
||||
* Fix `Send/Sync` of a few types
|
||||
* Add `future::tail_fn` for more easily writing loops
|
||||
* Export SharedItem/SharedError
|
||||
* Remove an unused type parameter in `from_err`
|
||||
|
||||
# 0.1.8 - 2017-01-11
|
||||
|
||||
* Fix some race conditions in the `Shared` implementation
|
||||
* Add `Stream::take_while`
|
||||
* Fix an unwrap in `stream::futures_unordered`
|
||||
* Generalize `Stream::for_each`
|
||||
* Add `Stream::chain`
|
||||
* Add `stream::repeat`
|
||||
* Relax `&mut self` to `&self` in `UnboundedSender::send`
|
||||
|
||||
# 0.1.7 - 2016-12-18
|
||||
|
||||
* Add a `Future::shared` method for creating a future that can be shared
|
||||
amongst threads by cloning the future itself. All derivative futures
|
||||
will resolve to the same value once the original future has been
|
||||
resolved.
|
||||
* Add a `FutureFrom` trait for future-based conversion
|
||||
* Fix a wakeup bug in `Receiver::close`
|
||||
* Add `future::poll_fn` for quickly adapting a `Poll`-based function to
|
||||
a future.
|
||||
* Add an `Either` enum with two branches to easily create one future
|
||||
type based on two different futures created on two branches of control
|
||||
flow.
|
||||
* Remove the `'static` bound on `Unpark`
|
||||
* Optimize `send_all` and `forward` to send as many items as possible
|
||||
before calling `poll_complete`.
|
||||
* Unify the return types of the `ok`, `err`, and `result` future to
|
||||
assist returning different varieties in different branches of a function.
|
||||
* Add `CpuFuture::forget` to allow the computation to continue running
|
||||
after a drop.
|
||||
* Add a `stream::futures_unordered` combinator to turn a list of futures
|
||||
into a stream representing their order of completion.
|
||||
|
||||
# 0.1.6 - 2016-11-22
|
||||
|
||||
* Fix `Clone` bound on the type parameter on `UnboundedSender`
|
||||
|
||||
# 0.1.5 - 2016-11-22
|
||||
|
||||
* Fix `#![no_std]` support
|
||||
|
||||
# 0.1.4 - 2016-11-22
|
||||
|
||||
This is quite a large release relative to the previous point releases! As
|
||||
with all 0.1 releases, this release should be fully compatible with the 0.1.3
|
||||
release. If any incompatibilities are discovered please file an issue!
|
||||
|
||||
The largest changes in 0.1.4 are the addition of a `Sink` trait coupled with a
|
||||
reorganization of this crate. Note that all old locations for types/traits
|
||||
still exist, they're just deprecated and tagged with `#[doc(hidden)]`.
|
||||
|
||||
The new `Sink` trait is used to represent types which can periodically over
|
||||
time accept items, but may take some time to fully process the item before
|
||||
another can be accepted. Essentially, a sink is the opposite of a stream. This
|
||||
trait will then be used in the tokio-core crate to implement simple framing by
|
||||
modeling I/O streams as both a stream and a sink of frames.
|
||||
|
||||
The organization of this crate is to now have three primary submodules,
|
||||
`future`, `stream`, and `sink`. The traits as well as all combinator types are
|
||||
defined in these submodules. The traits and types like `Async` and `Poll` are
|
||||
then reexported at the top of the crate for convenient usage. It should be a
|
||||
relatively rare occasion that the modules themselves are reached into.
|
||||
|
||||
Finally, the 0.1.4 release comes with a new module, `sync`, in the futures
|
||||
crate. This is intended to be the home of a suite of futures-aware
|
||||
synchronization primitives. Currently this is inhabited with a `oneshot` module
|
||||
(the old `oneshot` function), a `mpsc` module for a new multi-producer
|
||||
single-consumer channel, and a `BiLock` type which represents sharing ownership
|
||||
of one value between two consumers. This module may expand over time with more
|
||||
types like a mutex, rwlock, spsc channel, etc.
|
||||
|
||||
Notable deprecations in the 0.1.4 release that will be deleted in an eventual
|
||||
0.2 release:
|
||||
|
||||
* The `TaskRc` type is now deprecated in favor of `BiLock` or otherwise `Arc`
|
||||
sharing.
|
||||
* All future combinators should be accessed through the `future` module, not
|
||||
the top-level of the crate.
|
||||
* The `Oneshot` and `Complete` types are now replaced with the `sync::oneshot`
|
||||
module.
|
||||
* Some old names like `collect` are deprecated in favor of more appropriately
|
||||
named versions like `join_all`
|
||||
* The `finished` constructor is now `ok`.
|
||||
* The `failed` constructor is now `err`.
|
||||
* The `done` constructor is now `result`.
|
||||
|
||||
As always, please report bugs to https://github.com/alexcrichton/futures-rs and
|
||||
we always love feedback! If you've got situations we don't cover, combinators
|
||||
you'd like to see, or slow code, please let us know!
|
||||
|
||||
Full changelog:
|
||||
|
||||
* Improve scalability of `buffer_unordered` combinator
|
||||
* Fix a memory ordering bug in oneshot
|
||||
* Add a new trait, `Sink`
|
||||
* Reorganize the crate into three primary modules
|
||||
* Add a new `sync` module for synchronization primitives
|
||||
* Add a `BiLock` sync primitive for two-way sharing
|
||||
* Deprecate `TaskRc`
|
||||
* Rename `collect` to `join_all`
|
||||
* Use a small vec in `Events` for improved clone performance
|
||||
* Add `Stream::select` for selecting items from two streams like `merge` but
|
||||
requiring the same types.
|
||||
* Add `stream::unfold` constructor
|
||||
* Add a `sync::mpsc` module with a futures-aware multi-producer single-consumer
|
||||
queue. Both bounded (with backpressure) and unbounded (no backpressure)
|
||||
variants are provided.
|
||||
* Renamed `failed`, `finished`, and `done` combinators to `err`, `ok`, and
|
||||
`result`.
|
||||
* Add `Stream::forward` to send all items to a sink, like `Sink::send_all`
|
||||
* Add `Stream::split` for streams which are both sinks and streams to have
|
||||
separate ownership of the stream/sink halves
|
||||
* Improve `join_all` with concurrency
|
||||
|
||||
# 0.1.3 - 2016-10-24
|
||||
|
||||
* Rewrite `oneshot` for efficiency and removing allocations on send/recv
|
||||
* Errors are passed through in `Stream::take` and `Stream::skip`
|
||||
* Add a `select_ok` combinator to pick the first of a list that succeeds
|
||||
* Remove the unnecessary `SelectAllNext` typedef
|
||||
* Add `Stream::chunks` for receiving chunks of data
|
||||
* Rewrite `stream::channel` for efficiency, correctness, and removing
|
||||
allocations
|
||||
* Remove `Send + 'static` bounds on the `stream::Empty` type
|
||||
|
||||
# 0.1.2 - 2016-10-04
|
||||
|
||||
* Fixed a bug in drop of `FutureSender`
|
||||
* Expose the channel `SendError` type
|
||||
* Add `Future::into_stream` to convert to a single-element stream
|
||||
* Add `Future::flatten_to_stream` to convert a future of a stream to a stream
|
||||
* impl Debug for SendError
|
||||
* Add stream::once for a one element stream
|
||||
* Accept IntoIterator in stream::iter
|
||||
* Add `Stream::catch_unwind`
|
||||
|
||||
# 0.1.1 - 2016-09-09
|
||||
|
||||
Initial release!
|
|
@ -1,29 +1,36 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g. crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
name = "futures"
|
||||
version = "0.1.13"
|
||||
version = "0.1.18"
|
||||
authors = ["Alex Crichton <alex@alexcrichton.com>"]
|
||||
license = "MIT/Apache-2.0"
|
||||
readme = "README.md"
|
||||
keywords = ["futures", "async", "future"]
|
||||
repository = "https://github.com/alexcrichton/futures-rs"
|
||||
description = "An implementation of futures and streams featuring zero allocations,\ncomposability, and iterator-like interfaces.\n"
|
||||
homepage = "https://github.com/alexcrichton/futures-rs"
|
||||
documentation = "https://docs.rs/futures"
|
||||
description = """
|
||||
An implementation of futures and streams featuring zero allocations,
|
||||
composability, and iterator-like interfaces.
|
||||
"""
|
||||
readme = "README.md"
|
||||
keywords = ["futures", "async", "future"]
|
||||
categories = ["asynchronous"]
|
||||
|
||||
[badges]
|
||||
travis-ci = { repository = "alexcrichton/futures-rs" }
|
||||
appveyor = { repository = "alexcrichton/futures-rs" }
|
||||
license = "MIT/Apache-2.0"
|
||||
repository = "https://github.com/alexcrichton/futures-rs"
|
||||
|
||||
[dependencies]
|
||||
|
||||
[features]
|
||||
default = ["use_std", "with-deprecated"]
|
||||
use_std = []
|
||||
with-deprecated = []
|
||||
default = ["use_std", "with-deprecated"]
|
||||
[badges.appveyor]
|
||||
repository = "alexcrichton/futures-rs"
|
||||
|
||||
[workspace]
|
||||
members = ["futures-cpupool"]
|
||||
[badges.travis-ci]
|
||||
repository = "alexcrichton/futures-rs"
|
||||
|
|
|
@ -1,99 +0,0 @@
|
|||
# FAQ
|
||||
|
||||
A collection of some commonly asked questions, with responses! If you find any
|
||||
of these unsatisfactory feel free to ping me (@alexcrichton) on github,
|
||||
acrichto on IRC, or just by email!
|
||||
|
||||
### Why both `Item` and `Error` associated types?
|
||||
|
||||
An alternative design of the `Future` trait would be to only have one associated
|
||||
type, `Item`, and then most futures would resolve to `Result<T, E>`. The
|
||||
intention of futures, the fundamental support for async I/O, typically means
|
||||
that errors will be encoded in almost all futures anyway though. By encoding an
|
||||
error type in the future as well we're able to provide convenient combinators
|
||||
like `and_then` which automatically propagate errors, as well as combinators
|
||||
like `join` which can act differently depending on whether a future resolves to
|
||||
an error or not.
|
||||
|
||||
### Do futures work with multiple event loops?
|
||||
|
||||
Yes! Futures are designed to source events from any location, including multiple
|
||||
event loops. All of the basic combinators will work on any number of event loops
|
||||
across any number of threads.
|
||||
|
||||
### What if I have CPU intensive work?
|
||||
|
||||
The documentation of the `Future::poll` function says that's it's supposed to
|
||||
"return quickly", what if I have work that doesn't return quickly! In this case
|
||||
it's intended that this work will run on a dedicated pool of threads intended
|
||||
for this sort of work, and a future to the returned value is used to represent
|
||||
its completion.
|
||||
|
||||
A proof-of-concept method of doing this is the `futures-cpupool` crate in this
|
||||
repository, where you can execute work on a thread pool and receive a future to
|
||||
the value generated. This future is then composable with `and_then`, for
|
||||
example, to mesh in with the rest of a future's computation.
|
||||
|
||||
### How do I call `poll`?
|
||||
|
||||
In general it's not recommended to call `poll` unless you're implementing
|
||||
another `poll` function. If you need to poll a future, however, you can use
|
||||
`task::spawn` followed by the `poll_future` method on `Spawn<T>`.
|
||||
|
||||
### How do I return a future?
|
||||
|
||||
Returning a future is like returning an iterator in Rust today. It's not the
|
||||
easiest thing to do and you frequently need to resort to `Box` with a trait
|
||||
object. Thankfully though [`impl Trait`] is just around the corner and will
|
||||
allow returning these types unboxed in the future.
|
||||
|
||||
[`impl Trait`]: https://github.com/rust-lang/rust/issues/34511
|
||||
|
||||
For now though the cost of boxing shouldn't actually be that high. A future
|
||||
computation can be constructed *without boxing* and only the final step actually
|
||||
places a `Box` around the entire future. In that sense you're only paying the
|
||||
allocation at the very end, not for any of the intermediate futures.
|
||||
|
||||
More information can be found [in the tutorial][return-future].
|
||||
|
||||
[return-future]: https://github.com/alexcrichton/futures-rs/blob/master/TUTORIAL.md#returning-futures
|
||||
|
||||
### Does it work on Windows?
|
||||
|
||||
Yes! This library builds on top of mio, which works on Windows.
|
||||
|
||||
### What version of Rust should I use?
|
||||
|
||||
Rust 1.10 or later.
|
||||
|
||||
### Is it on crates.io?
|
||||
|
||||
Not yet! A few names are reserved, but crates cannot have dependencies from a
|
||||
git repository. Right now we depend on the master branch of `mio`, and crates
|
||||
will be published once that's on crates.io as well!
|
||||
|
||||
### Does this implement tail call optimization?
|
||||
|
||||
One aspect of many existing futures libraries is whether or not a tail call
|
||||
optimization is implemented. The exact meaning of this varies from framework to
|
||||
framework, but it typically boils down to whether common patterns can be
|
||||
implemented in such a way that prevents blowing the stack if the system is
|
||||
overloaded for a moment or leaking memory for the entire lifetime of a
|
||||
future/server.
|
||||
|
||||
For the prior case, blowing the stack, this typically arises as loops are often
|
||||
implemented through recursion with futures. This recursion can end up proceeding
|
||||
too quickly if the "loop" makes lots of turns very quickly. At this time neither
|
||||
the `Future` nor `Stream` traits handle tail call optimizations in this case,
|
||||
but rather combinators are patterns are provided to avoid recursion. For example
|
||||
a `Stream` implements `fold`, `for_each`, etc. These combinators can often be
|
||||
used to implement an asynchronous loop to avoid recursion, and they all execute
|
||||
in constant stack space. Note that we're very interested in exploring more
|
||||
generalized loop combinators, so PRs are always welcome!
|
||||
|
||||
For the latter case, leaking memory, this can happen where a future accidentally
|
||||
"remembers" all of its previous states when it'll never use them again. This
|
||||
also can arise through recursion or otherwise manufacturing of futures of
|
||||
infinite length. Like above, however, these also tend to show up in situations
|
||||
that would otherwise be expressed with a loop, so the same solutions should
|
||||
apply there regardless.
|
|
@ -16,7 +16,7 @@ First, add this to your `Cargo.toml`:
|
|||
|
||||
```toml
|
||||
[dependencies]
|
||||
futures = "0.1.9"
|
||||
futures = "0.1.17"
|
||||
```
|
||||
|
||||
Next, add this to your crate:
|
||||
|
@ -39,13 +39,22 @@ a `#[no_std]` environment, use:
|
|||
|
||||
```toml
|
||||
[dependencies]
|
||||
futures = { version = "0.1", default-features = false }
|
||||
futures = { version = "0.1.17", default-features = false }
|
||||
```
|
||||
|
||||
# License
|
||||
|
||||
`futures-rs` is primarily distributed under the terms of both the MIT license and
|
||||
the Apache License (Version 2.0), with portions covered by various BSD-like
|
||||
licenses.
|
||||
This project is licensed under either of
|
||||
|
||||
See LICENSE-APACHE, and LICENSE-MIT for details.
|
||||
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
http://www.apache.org/licenses/LICENSE-2.0)
|
||||
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
http://opensource.org/licenses/MIT)
|
||||
|
||||
at your option.
|
||||
|
||||
### Contribution
|
||||
|
||||
Unless you explicitly state otherwise, any contribution intentionally submitted
|
||||
for inclusion in Futures by you, as defined in the Apache-2.0 license, shall be
|
||||
dual licensed as above, without any additional terms or conditions.
|
||||
|
|
|
@ -1,4 +1,24 @@
|
|||
environment:
|
||||
|
||||
# At the time this was added AppVeyor was having troubles with checking
|
||||
# revocation of SSL certificates of sites like static.rust-lang.org and what
|
||||
# we think is crates.io. The libcurl HTTP client by default checks for
|
||||
# revocation on Windows and according to a mailing list [1] this can be
|
||||
# disabled.
|
||||
#
|
||||
# The `CARGO_HTTP_CHECK_REVOKE` env var here tells cargo to disable SSL
|
||||
# revocation checking on Windows in libcurl. Note, though, that rustup, which
|
||||
# we're using to download Rust here, also uses libcurl as the default backend.
|
||||
# Unlike Cargo, however, rustup doesn't have a mechanism to disable revocation
|
||||
# checking. To get rustup working we set `RUSTUP_USE_HYPER` which forces it to
|
||||
# use the Hyper instead of libcurl backend. Both Hyper and libcurl use
|
||||
# schannel on Windows but it appears that Hyper configures it slightly
|
||||
# differently such that revocation checking isn't turned on by default.
|
||||
#
|
||||
# [1]: https://curl.haxx.se/mail/lib-2016-03/0202.html
|
||||
RUSTUP_USE_HYPER: 1
|
||||
CARGO_HTTP_CHECK_REVOKE: false
|
||||
|
||||
matrix:
|
||||
- TARGET: x86_64-pc-windows-msvc
|
||||
install:
|
||||
|
|
|
@ -0,0 +1,121 @@
|
|||
#![feature(test)]
|
||||
|
||||
extern crate futures;
|
||||
extern crate test;
|
||||
|
||||
use futures::{Async, Poll};
|
||||
use futures::executor;
|
||||
use futures::executor::{Notify, NotifyHandle};
|
||||
use futures::sync::BiLock;
|
||||
use futures::sync::BiLockAcquire;
|
||||
use futures::sync::BiLockAcquired;
|
||||
use futures::future::Future;
|
||||
use futures::stream::Stream;
|
||||
|
||||
|
||||
use test::Bencher;
|
||||
|
||||
fn notify_noop() -> NotifyHandle {
|
||||
struct Noop;
|
||||
|
||||
impl Notify for Noop {
|
||||
fn notify(&self, _id: usize) {}
|
||||
}
|
||||
|
||||
const NOOP : &'static Noop = &Noop;
|
||||
|
||||
NotifyHandle::from(NOOP)
|
||||
}
|
||||
|
||||
|
||||
/// Pseudo-stream which simply calls `lock.poll()` on `poll`
|
||||
struct LockStream {
|
||||
lock: BiLockAcquire<u32>,
|
||||
}
|
||||
|
||||
impl LockStream {
|
||||
fn new(lock: BiLock<u32>) -> LockStream {
|
||||
LockStream {
|
||||
lock: lock.lock()
|
||||
}
|
||||
}
|
||||
|
||||
/// Release a lock after it was acquired in `poll`,
|
||||
/// so `poll` could be called again.
|
||||
fn release_lock(&mut self, guard: BiLockAcquired<u32>) {
|
||||
self.lock = guard.unlock().lock()
|
||||
}
|
||||
}
|
||||
|
||||
impl Stream for LockStream {
|
||||
type Item = BiLockAcquired<u32>;
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
self.lock.poll().map(|a| match a {
|
||||
Async::Ready(a) => Async::Ready(Some(a)),
|
||||
Async::NotReady => Async::NotReady,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[bench]
|
||||
fn contended(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let (x, y) = BiLock::new(1);
|
||||
|
||||
let mut x = executor::spawn(LockStream::new(x));
|
||||
let mut y = executor::spawn(LockStream::new(y));
|
||||
|
||||
for _ in 0..1000 {
|
||||
let x_guard = match x.poll_stream_notify(¬ify_noop(), 11) {
|
||||
Ok(Async::Ready(Some(guard))) => guard,
|
||||
_ => panic!(),
|
||||
};
|
||||
|
||||
// Try poll second lock while first lock still holds the lock
|
||||
match y.poll_stream_notify(¬ify_noop(), 11) {
|
||||
Ok(Async::NotReady) => (),
|
||||
_ => panic!(),
|
||||
};
|
||||
|
||||
x.get_mut().release_lock(x_guard);
|
||||
|
||||
let y_guard = match y.poll_stream_notify(¬ify_noop(), 11) {
|
||||
Ok(Async::Ready(Some(guard))) => guard,
|
||||
_ => panic!(),
|
||||
};
|
||||
|
||||
y.get_mut().release_lock(y_guard);
|
||||
}
|
||||
(x, y)
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn lock_unlock(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let (x, y) = BiLock::new(1);
|
||||
|
||||
let mut x = executor::spawn(LockStream::new(x));
|
||||
let mut y = executor::spawn(LockStream::new(y));
|
||||
|
||||
for _ in 0..1000 {
|
||||
let x_guard = match x.poll_stream_notify(¬ify_noop(), 11) {
|
||||
Ok(Async::Ready(Some(guard))) => guard,
|
||||
_ => panic!(),
|
||||
};
|
||||
|
||||
x.get_mut().release_lock(x_guard);
|
||||
|
||||
let y_guard = match y.poll_stream_notify(¬ify_noop(), 11) {
|
||||
Ok(Async::Ready(Some(guard))) => guard,
|
||||
_ => panic!(),
|
||||
};
|
||||
|
||||
y.get_mut().release_lock(y_guard);
|
||||
}
|
||||
(x, y)
|
||||
})
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
#![feature(test)]
|
||||
|
||||
extern crate futures;
|
||||
extern crate test;
|
||||
|
||||
use futures::*;
|
||||
use futures::stream::FuturesUnordered;
|
||||
use futures::sync::oneshot;
|
||||
|
||||
use test::Bencher;
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::thread;
|
||||
|
||||
#[bench]
|
||||
fn oneshots(b: &mut Bencher) {
|
||||
const NUM: usize = 10_000;
|
||||
|
||||
b.iter(|| {
|
||||
let mut txs = VecDeque::with_capacity(NUM);
|
||||
let mut rxs = FuturesUnordered::new();
|
||||
|
||||
for _ in 0..NUM {
|
||||
let (tx, rx) = oneshot::channel();
|
||||
txs.push_back(tx);
|
||||
rxs.push(rx);
|
||||
}
|
||||
|
||||
thread::spawn(move || {
|
||||
while let Some(tx) = txs.pop_front() {
|
||||
let _ = tx.send("hello");
|
||||
}
|
||||
});
|
||||
|
||||
future::lazy(move || {
|
||||
loop {
|
||||
if let Ok(Async::Ready(None)) = rxs.poll() {
|
||||
return Ok::<(), ()>(());
|
||||
}
|
||||
}
|
||||
}).wait().unwrap();
|
||||
});
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
#![feature(test)]
|
||||
|
||||
extern crate futures;
|
||||
extern crate test;
|
||||
|
||||
use futures::*;
|
||||
use futures::executor::{Notify, NotifyHandle};
|
||||
use futures::task::Task;
|
||||
|
||||
use test::Bencher;
|
||||
|
||||
fn notify_noop() -> NotifyHandle {
|
||||
struct Noop;
|
||||
|
||||
impl Notify for Noop {
|
||||
fn notify(&self, _id: usize) {}
|
||||
}
|
||||
|
||||
const NOOP : &'static Noop = &Noop;
|
||||
|
||||
NotifyHandle::from(NOOP)
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn task_init(b: &mut Bencher) {
|
||||
const NUM: u32 = 100_000;
|
||||
|
||||
struct MyFuture {
|
||||
num: u32,
|
||||
task: Option<Task>,
|
||||
};
|
||||
|
||||
impl Future for MyFuture {
|
||||
type Item = ();
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<(), ()> {
|
||||
if self.num == NUM {
|
||||
Ok(Async::Ready(()))
|
||||
} else {
|
||||
self.num += 1;
|
||||
|
||||
if let Some(ref t) = self.task {
|
||||
if t.will_notify_current() {
|
||||
t.notify();
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
}
|
||||
|
||||
let t = task::current();
|
||||
t.notify();
|
||||
self.task = Some(t);
|
||||
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let notify = notify_noop();
|
||||
|
||||
let mut fut = executor::spawn(MyFuture {
|
||||
num: 0,
|
||||
task: None,
|
||||
});
|
||||
|
||||
b.iter(|| {
|
||||
fut.get_mut().num = 0;
|
||||
|
||||
while let Ok(Async::NotReady) = fut.poll_future_notify(¬ify, 0) {
|
||||
}
|
||||
});
|
||||
}
|
|
@ -0,0 +1,168 @@
|
|||
#![feature(test)]
|
||||
|
||||
extern crate futures;
|
||||
extern crate test;
|
||||
|
||||
use futures::{Async, Poll, AsyncSink};
|
||||
use futures::executor;
|
||||
use futures::executor::{Notify, NotifyHandle};
|
||||
|
||||
use futures::sink::Sink;
|
||||
use futures::stream::Stream;
|
||||
|
||||
use futures::sync::mpsc::unbounded;
|
||||
use futures::sync::mpsc::channel;
|
||||
use futures::sync::mpsc::Sender;
|
||||
use futures::sync::mpsc::UnboundedSender;
|
||||
|
||||
|
||||
use test::Bencher;
|
||||
|
||||
fn notify_noop() -> NotifyHandle {
|
||||
struct Noop;
|
||||
|
||||
impl Notify for Noop {
|
||||
fn notify(&self, _id: usize) {}
|
||||
}
|
||||
|
||||
const NOOP : &'static Noop = &Noop;
|
||||
|
||||
NotifyHandle::from(NOOP)
|
||||
}
|
||||
|
||||
/// Single producer, single consumer
|
||||
#[bench]
|
||||
fn unbounded_1_tx(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let (tx, rx) = unbounded();
|
||||
|
||||
let mut rx = executor::spawn(rx);
|
||||
|
||||
// 1000 iterations to avoid measuring overhead of initialization
|
||||
// Result should be divided by 1000
|
||||
for i in 0..1000 {
|
||||
|
||||
// Poll, not ready, park
|
||||
assert_eq!(Ok(Async::NotReady), rx.poll_stream_notify(¬ify_noop(), 1));
|
||||
|
||||
UnboundedSender::unbounded_send(&tx, i).unwrap();
|
||||
|
||||
// Now poll ready
|
||||
assert_eq!(Ok(Async::Ready(Some(i))), rx.poll_stream_notify(¬ify_noop(), 1));
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// 100 producers, single consumer
|
||||
#[bench]
|
||||
fn unbounded_100_tx(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let (tx, rx) = unbounded();
|
||||
|
||||
let mut rx = executor::spawn(rx);
|
||||
|
||||
let tx: Vec<_> = (0..100).map(|_| tx.clone()).collect();
|
||||
|
||||
// 1000 send/recv operations total, result should be divided by 1000
|
||||
for _ in 0..10 {
|
||||
for i in 0..tx.len() {
|
||||
assert_eq!(Ok(Async::NotReady), rx.poll_stream_notify(¬ify_noop(), 1));
|
||||
|
||||
UnboundedSender::unbounded_send(&tx[i], i).unwrap();
|
||||
|
||||
assert_eq!(Ok(Async::Ready(Some(i))), rx.poll_stream_notify(¬ify_noop(), 1));
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn unbounded_uncontended(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let (tx, mut rx) = unbounded();
|
||||
|
||||
for i in 0..1000 {
|
||||
UnboundedSender::unbounded_send(&tx, i).expect("send");
|
||||
// No need to create a task, because poll is not going to park.
|
||||
assert_eq!(Ok(Async::Ready(Some(i))), rx.poll());
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
/// A Stream that continuously sends incrementing number of the queue
|
||||
struct TestSender {
|
||||
tx: Sender<u32>,
|
||||
last: u32, // Last number sent
|
||||
}
|
||||
|
||||
// Could be a Future, it doesn't matter
|
||||
impl Stream for TestSender {
|
||||
type Item = u32;
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
match self.tx.start_send(self.last + 1) {
|
||||
Err(_) => panic!(),
|
||||
Ok(AsyncSink::Ready) => {
|
||||
self.last += 1;
|
||||
assert_eq!(Ok(Async::Ready(())), self.tx.poll_complete());
|
||||
Ok(Async::Ready(Some(self.last)))
|
||||
}
|
||||
Ok(AsyncSink::NotReady(_)) => {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Single producers, single consumer
|
||||
#[bench]
|
||||
fn bounded_1_tx(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let (tx, rx) = channel(0);
|
||||
|
||||
let mut tx = executor::spawn(TestSender {
|
||||
tx: tx,
|
||||
last: 0,
|
||||
});
|
||||
|
||||
let mut rx = executor::spawn(rx);
|
||||
|
||||
for i in 0..1000 {
|
||||
assert_eq!(Ok(Async::Ready(Some(i + 1))), tx.poll_stream_notify(¬ify_noop(), 1));
|
||||
assert_eq!(Ok(Async::NotReady), tx.poll_stream_notify(¬ify_noop(), 1));
|
||||
assert_eq!(Ok(Async::Ready(Some(i + 1))), rx.poll_stream_notify(¬ify_noop(), 1));
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// 100 producers, single consumer
|
||||
#[bench]
|
||||
fn bounded_100_tx(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
// Each sender can send one item after specified capacity
|
||||
let (tx, rx) = channel(0);
|
||||
|
||||
let mut tx: Vec<_> = (0..100).map(|_| {
|
||||
executor::spawn(TestSender {
|
||||
tx: tx.clone(),
|
||||
last: 0
|
||||
})
|
||||
}).collect();
|
||||
|
||||
let mut rx = executor::spawn(rx);
|
||||
|
||||
for i in 0..10 {
|
||||
for j in 0..tx.len() {
|
||||
// Send an item
|
||||
assert_eq!(Ok(Async::Ready(Some(i + 1))), tx[j].poll_stream_notify(¬ify_noop(), 1));
|
||||
// Then block
|
||||
assert_eq!(Ok(Async::NotReady), tx[j].poll_stream_notify(¬ify_noop(), 1));
|
||||
// Recv the item
|
||||
assert_eq!(Ok(Async::Ready(Some(i + 1))), rx.poll_stream_notify(¬ify_noop(), 1));
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
#![feature(test)]
|
||||
|
||||
extern crate futures;
|
||||
extern crate test;
|
||||
|
||||
use futures::{Future, Poll, Async};
|
||||
use futures::task::{self, Task};
|
||||
|
||||
use test::Bencher;
|
||||
|
||||
#[bench]
|
||||
fn thread_yield_single_thread_one_wait(b: &mut Bencher) {
|
||||
const NUM: usize = 10_000;
|
||||
|
||||
struct Yield {
|
||||
rem: usize,
|
||||
}
|
||||
|
||||
impl Future for Yield {
|
||||
type Item = ();
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<(), ()> {
|
||||
if self.rem == 0 {
|
||||
Ok(Async::Ready(()))
|
||||
} else {
|
||||
self.rem -= 1;
|
||||
task::current().notify();
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b.iter(|| {
|
||||
let y = Yield { rem: NUM };
|
||||
y.wait().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn thread_yield_single_thread_many_wait(b: &mut Bencher) {
|
||||
const NUM: usize = 10_000;
|
||||
|
||||
struct Yield {
|
||||
rem: usize,
|
||||
}
|
||||
|
||||
impl Future for Yield {
|
||||
type Item = ();
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<(), ()> {
|
||||
if self.rem == 0 {
|
||||
Ok(Async::Ready(()))
|
||||
} else {
|
||||
self.rem -= 1;
|
||||
task::current().notify();
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
b.iter(|| {
|
||||
for _ in 0..NUM {
|
||||
let y = Yield { rem: 1 };
|
||||
y.wait().unwrap();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn thread_yield_multi_thread(b: &mut Bencher) {
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
|
||||
const NUM: usize = 1_000;
|
||||
|
||||
let (tx, rx) = mpsc::sync_channel::<Task>(10_000);
|
||||
|
||||
struct Yield {
|
||||
rem: usize,
|
||||
tx: mpsc::SyncSender<Task>,
|
||||
}
|
||||
|
||||
impl Future for Yield {
|
||||
type Item = ();
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<(), ()> {
|
||||
if self.rem == 0 {
|
||||
Ok(Async::Ready(()))
|
||||
} else {
|
||||
self.rem -= 1;
|
||||
self.tx.send(task::current()).unwrap();
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
thread::spawn(move || {
|
||||
while let Ok(task) = rx.recv() {
|
||||
task.notify();
|
||||
}
|
||||
});
|
||||
|
||||
b.iter(move || {
|
||||
let y = Yield {
|
||||
rem: NUM,
|
||||
tx: tx.clone(),
|
||||
};
|
||||
|
||||
y.wait().unwrap();
|
||||
});
|
||||
}
|
|
@ -5,6 +5,12 @@
|
|||
//!
|
||||
//! More information about executors can be [found online at tokio.rs][online].
|
||||
//!
|
||||
//! [online]: https://tokio.rs/docs/going-deeper/tasks/
|
||||
//! [online]: https://tokio.rs/docs/going-deeper-futures/tasks/
|
||||
|
||||
pub use task_impl::{Spawn, spawn, Unpark, Executor, Run};
|
||||
#[allow(deprecated)]
|
||||
#[cfg(feature = "use_std")]
|
||||
pub use task_impl::{Unpark, Executor, Run};
|
||||
|
||||
pub use task_impl::{Spawn, spawn, Notify, with_notify};
|
||||
|
||||
pub use task_impl::{UnsafeNotify, NotifyHandle};
|
||||
|
|
|
@ -29,7 +29,7 @@ impl<F> Future for CatchUnwind<F>
|
|||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
let mut future = self.future.take().expect("cannot poll twice");
|
||||
let (res, future) = try!(catch_unwind(|| (future.poll(), future)));
|
||||
let (res, future) = catch_unwind(|| (future.poll(), future))?;
|
||||
match res {
|
||||
Ok(Async::NotReady) => {
|
||||
self.future = Some(future);
|
||||
|
|
|
@ -36,7 +36,7 @@ impl<A, B, C> Chain<A, B, C>
|
|||
Chain::First(_, c) => c,
|
||||
_ => panic!(),
|
||||
};
|
||||
match try!(f(a_result, data)) {
|
||||
match f(a_result, data)? {
|
||||
Ok(e) => Ok(Async::Ready(e)),
|
||||
Err(mut b) => {
|
||||
let ret = b.poll();
|
||||
|
|
|
@ -11,7 +11,7 @@ pub enum Either<A, B> {
|
|||
}
|
||||
|
||||
impl<T, A, B> Either<(T, A), (T, B)> {
|
||||
/// Splits out the homogenous type from an either of tuples.
|
||||
/// Splits out the homogeneous type from an either of tuples.
|
||||
///
|
||||
/// This method is typically useful when combined with the `Future::select2`
|
||||
/// combinator.
|
||||
|
@ -20,7 +20,7 @@ impl<T, A, B> Either<(T, A), (T, B)> {
|
|||
Either::A((a, b)) => (a, Either::A(b)),
|
||||
Either::B((a, b)) => (a, Either::B(b)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, B> Future for Either<A, B>
|
||||
|
|
|
@ -42,7 +42,7 @@ impl<A> Future for Flatten<A>
|
|||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
self.state.poll(|a, ()| {
|
||||
let future = try!(a).into_future();
|
||||
let future = a?.into_future();
|
||||
Ok(Err(future))
|
||||
})
|
||||
}
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
use {Future, Poll, Async};
|
||||
|
||||
/// Do something with the item of a future, passing it on.
|
||||
///
|
||||
/// This is created by the `Future::inspect` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct Inspect<A, F> where A: Future {
|
||||
future: A,
|
||||
f: Option<F>,
|
||||
}
|
||||
|
||||
pub fn new<A, F>(future: A, f: F) -> Inspect<A, F>
|
||||
where A: Future,
|
||||
F: FnOnce(&A::Item),
|
||||
{
|
||||
Inspect {
|
||||
future: future,
|
||||
f: Some(f),
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, F> Future for Inspect<A, F>
|
||||
where A: Future,
|
||||
F: FnOnce(&A::Item),
|
||||
{
|
||||
type Item = A::Item;
|
||||
type Error = A::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<A::Item, A::Error> {
|
||||
match self.future.poll() {
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
Ok(Async::Ready(e)) => {
|
||||
(self.f.take().expect("cannot poll Inspect twice"))(&e);
|
||||
Ok(Async::Ready(e))
|
||||
},
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -150,7 +150,7 @@ enum MaybeDone<A: Future> {
|
|||
impl<A: Future> MaybeDone<A> {
|
||||
fn poll(&mut self) -> Result<bool, A::Error> {
|
||||
let res = match *self {
|
||||
MaybeDone::NotYet(ref mut a) => try!(a.poll()),
|
||||
MaybeDone::NotYet(ref mut a) => a.poll()?,
|
||||
MaybeDone::Done(_) => return Ok(true),
|
||||
MaybeDone::Gone => panic!("cannot poll Join twice"),
|
||||
};
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//! Definition of the JoinAll combinator, waiting for all of a list of futures
|
||||
//! Definition of the `JoinAll` combinator, waiting for all of a list of futures
|
||||
//! to finish.
|
||||
|
||||
use std::prelude::v1::*;
|
||||
|
@ -43,10 +43,11 @@ impl<I> fmt::Debug for JoinAll<I>
|
|||
/// given.
|
||||
///
|
||||
/// The returned future will drive execution for all of its underlying futures,
|
||||
/// collecting the results into a destination `Vec<T>`. If any future returns
|
||||
/// an error then all other futures will be canceled and an error will be
|
||||
/// returned immediately. If all futures complete successfully, however, then
|
||||
/// the returned future will succeed with a `Vec` of all the successful results.
|
||||
/// collecting the results into a destination `Vec<T>` in the same order as they
|
||||
/// were provided. If any future returns an error then all other futures will be
|
||||
/// canceled and an error will be returned immediately. If all futures complete
|
||||
/// successfully, however, then the returned future will succeed with a `Vec` of
|
||||
/// all the successful results.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
|
@ -63,9 +64,9 @@ impl<I> fmt::Debug for JoinAll<I>
|
|||
/// });
|
||||
///
|
||||
/// let f = join_all(vec![
|
||||
/// ok::<u32, u32>(1).boxed(),
|
||||
/// err::<u32, u32>(2).boxed(),
|
||||
/// ok::<u32, u32>(3).boxed(),
|
||||
/// Box::new(ok::<u32, u32>(1)),
|
||||
/// Box::new(err::<u32, u32>(2)),
|
||||
/// Box::new(ok::<u32, u32>(3)),
|
||||
/// ]);
|
||||
/// let f = f.then(|x| {
|
||||
/// assert_eq!(x, Err(2));
|
||||
|
@ -94,8 +95,8 @@ impl<I> Future for JoinAll<I>
|
|||
let mut all_done = true;
|
||||
|
||||
for idx in 0 .. self.elems.len() {
|
||||
let done_val = match &mut self.elems[idx] {
|
||||
&mut ElemState::Pending(ref mut t) => {
|
||||
let done_val = match self.elems[idx] {
|
||||
ElemState::Pending(ref mut t) => {
|
||||
match t.poll() {
|
||||
Ok(Async::Ready(v)) => Ok(v),
|
||||
Ok(Async::NotReady) => {
|
||||
|
@ -105,7 +106,7 @@ impl<I> Future for JoinAll<I>
|
|||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
&mut ElemState::Done(ref mut _v) => continue,
|
||||
ElemState::Done(ref mut _v) => continue,
|
||||
};
|
||||
|
||||
match done_val {
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
//! This module contains the `Future` trait and a number of adaptors for this
|
||||
//! trait. See the crate docs, and the docs for `Future`, for full detail.
|
||||
|
||||
use core::fmt;
|
||||
use core::result;
|
||||
|
||||
// Primitive futures
|
||||
|
@ -55,6 +56,7 @@ mod select;
|
|||
mod select2;
|
||||
mod then;
|
||||
mod either;
|
||||
mod inspect;
|
||||
|
||||
// impl details
|
||||
mod chain;
|
||||
|
@ -73,6 +75,7 @@ pub use self::select::{Select, SelectNext};
|
|||
pub use self::select2::Select2;
|
||||
pub use self::then::Then;
|
||||
pub use self::either::Either;
|
||||
pub use self::inspect::Inspect;
|
||||
|
||||
if_std! {
|
||||
mod catch_unwind;
|
||||
|
@ -96,6 +99,10 @@ if_std! {
|
|||
pub use self::join_all::JoinAll as Collect;
|
||||
|
||||
/// A type alias for `Box<Future + Send>`
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note = "removed without replacement, recommended to use a \
|
||||
local extension trait or function if needed, more \
|
||||
details in https://github.com/alexcrichton/futures-rs/issues/228")]
|
||||
pub type BoxFuture<T, E> = ::std::boxed::Box<Future<Item = T, Error = E> + Send>;
|
||||
|
||||
impl<F: ?Sized + Future> Future for ::std::boxed::Box<F> {
|
||||
|
@ -148,7 +155,7 @@ use {Poll, stream};
|
|||
/// More information about the details of `poll` and the nitty-gritty of tasks
|
||||
/// can be [found online at tokio.rs][poll-dox].
|
||||
///
|
||||
/// [poll-dox]: https://tokio.rs/docs/going-deeper/futures-model/
|
||||
/// [poll-dox]: https://tokio.rs/docs/going-deeper-futures/futures-model/
|
||||
///
|
||||
/// # Combinators
|
||||
///
|
||||
|
@ -166,7 +173,7 @@ use {Poll, stream};
|
|||
///
|
||||
/// More information about combinators can be found [on tokio.rs].
|
||||
///
|
||||
/// [on tokio.rs]: https://tokio.rs/docs/going-deeper/futures-mechanics/
|
||||
/// [on tokio.rs]: https://tokio.rs/docs/going-deeper-futures/futures-mechanics/
|
||||
pub trait Future {
|
||||
/// The type of value that this future will resolved with if it is
|
||||
/// successful.
|
||||
|
@ -180,7 +187,7 @@ pub trait Future {
|
|||
/// interest if it is not.
|
||||
///
|
||||
/// This function will check the internal state of the future and assess
|
||||
/// whether the value is ready to be produced. Implementors of this function
|
||||
/// whether the value is ready to be produced. Implementers of this function
|
||||
/// should ensure that a call to this **never blocks** as event loops may
|
||||
/// not work properly otherwise.
|
||||
///
|
||||
|
@ -194,7 +201,7 @@ pub trait Future {
|
|||
/// More information about the details of `poll` and the nitty-gritty of
|
||||
/// tasks can be [found online at tokio.rs][poll-dox].
|
||||
///
|
||||
/// [poll-dox]: https://tokio.rs/docs/going-deeper/futures-model/
|
||||
/// [poll-dox]: https://tokio.rs/docs/going-deeper-futures/futures-model/
|
||||
///
|
||||
/// # Runtime characteristics
|
||||
///
|
||||
|
@ -234,6 +241,14 @@ pub trait Future {
|
|||
/// notification (through the `unpark` method) once the value is ready to be
|
||||
/// produced or the future can make progress.
|
||||
///
|
||||
/// Note that if `NotReady` is returned it only means that *this* task will
|
||||
/// receive a notification. Historical calls to `poll` with different tasks
|
||||
/// will not receive notifications. In other words, implementers of the
|
||||
/// `Future` trait need not store a queue of tasks to notify, but only the
|
||||
/// last task that called this method. Alternatively callers of this method
|
||||
/// can only rely on the most recent task which call `poll` being notified
|
||||
/// when a future is ready.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Once a future has completed (returned `Ready` or `Err` from `poll`),
|
||||
|
@ -299,11 +314,17 @@ pub trait Future {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future::{BoxFuture, result};
|
||||
///
|
||||
/// let a: BoxFuture<i32, i32> = result(Ok(1)).boxed();
|
||||
/// ```
|
||||
#[cfg(feature = "use_std")]
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note = "removed without replacement, recommended to use a \
|
||||
local extension trait or function if needed, more \
|
||||
details in https://github.com/alexcrichton/futures-rs/issues/228")]
|
||||
#[allow(deprecated)]
|
||||
fn boxed(self) -> BoxFuture<Self::Item, Self::Error>
|
||||
where Self: Sized + Send + 'static
|
||||
{
|
||||
|
@ -328,10 +349,23 @@ pub trait Future {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// let future_of_1 = ok::<u32, u32>(1);
|
||||
/// let future_of_4 = future_of_1.map(|x| x + 3);
|
||||
/// let future = future::ok::<u32, u32>(1);
|
||||
/// let new_future = future.map(|x| x + 3);
|
||||
/// assert_eq!(new_future.wait(), Ok(4));
|
||||
/// ```
|
||||
///
|
||||
/// Calling `map` on an errored `Future` has no effect:
|
||||
///
|
||||
/// ```
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// let future = future::err::<u32, u32>(1);
|
||||
/// let new_future = future.map(|x| x + 3);
|
||||
/// assert_eq!(new_future.wait(), Err(1));
|
||||
/// ```
|
||||
fn map<F, U>(self, f: F) -> Map<Self, F>
|
||||
where F: FnOnce(Self::Item) -> U,
|
||||
|
@ -359,8 +393,19 @@ pub trait Future {
|
|||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let future_of_err_1 = err::<u32, u32>(1);
|
||||
/// let future_of_err_4 = future_of_err_1.map_err(|x| x + 3);
|
||||
/// let future = err::<u32, u32>(1);
|
||||
/// let new_future = future.map_err(|x| x + 3);
|
||||
/// assert_eq!(new_future.wait(), Err(4));
|
||||
/// ```
|
||||
///
|
||||
/// Calling `map_err` on a successful `Future` has no effect:
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// let future = ok::<u32, u32>(1);
|
||||
/// let new_future = future.map_err(|x| x + 3);
|
||||
/// assert_eq!(new_future.wait(), Ok(1));
|
||||
/// ```
|
||||
fn map_err<F, E>(self, f: F) -> MapErr<Self, F>
|
||||
where F: FnOnce(Self::Error) -> E,
|
||||
|
@ -386,10 +431,11 @@ pub trait Future {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// let future_of_err_1 = err::<u32, u32>(1);
|
||||
/// let future_of_err_4 = future_of_err_1.from_err::<u32>();
|
||||
/// let future_with_err_u8 = future::err::<(), u8>(1);
|
||||
/// let future_with_err_u32 = future_with_err_u8.from_err::<u32>();
|
||||
/// ```
|
||||
fn from_err<E:From<Self::Error>>(self) -> FromErr<Self, E>
|
||||
where Self: Sized,
|
||||
|
@ -419,18 +465,19 @@ pub trait Future {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// let future_of_1 = ok::<u32, u32>(1);
|
||||
/// let future_of_1 = future::ok::<u32, u32>(1);
|
||||
/// let future_of_4 = future_of_1.then(|x| {
|
||||
/// x.map(|y| y + 3)
|
||||
/// });
|
||||
///
|
||||
/// let future_of_err_1 = err::<u32, u32>(1);
|
||||
/// let future_of_err_1 = future::err::<u32, u32>(1);
|
||||
/// let future_of_4 = future_of_err_1.then(|x| {
|
||||
/// match x {
|
||||
/// Ok(_) => panic!("expected an error"),
|
||||
/// Err(y) => ok::<u32, u32>(y + 3),
|
||||
/// Err(y) => future::ok::<u32, u32>(y + 3),
|
||||
/// }
|
||||
/// });
|
||||
/// ```
|
||||
|
@ -462,14 +509,15 @@ pub trait Future {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future::{self, FutureResult};
|
||||
///
|
||||
/// let future_of_1 = ok::<u32, u32>(1);
|
||||
/// let future_of_1 = future::ok::<u32, u32>(1);
|
||||
/// let future_of_4 = future_of_1.and_then(|x| {
|
||||
/// Ok(x + 3)
|
||||
/// });
|
||||
///
|
||||
/// let future_of_err_1 = err::<u32, u32>(1);
|
||||
/// let future_of_err_1 = future::err::<u32, u32>(1);
|
||||
/// future_of_err_1.and_then(|_| -> FutureResult<u32, u32> {
|
||||
/// panic!("should not be called in case of an error");
|
||||
/// });
|
||||
|
@ -502,14 +550,15 @@ pub trait Future {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future::{self, FutureResult};
|
||||
///
|
||||
/// let future_of_err_1 = err::<u32, u32>(1);
|
||||
/// let future_of_err_1 = future::err::<u32, u32>(1);
|
||||
/// let future_of_4 = future_of_err_1.or_else(|x| -> Result<u32, u32> {
|
||||
/// Ok(x + 3)
|
||||
/// });
|
||||
///
|
||||
/// let future_of_1 = ok::<u32, u32>(1);
|
||||
/// let future_of_1 = future::ok::<u32, u32>(1);
|
||||
/// future_of_1.or_else(|_| -> FutureResult<u32, u32> {
|
||||
/// panic!("should not be called in case of success");
|
||||
/// });
|
||||
|
@ -534,20 +583,42 @@ pub trait Future {
|
|||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```no_run
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
/// use std::thread;
|
||||
/// use std::time;
|
||||
///
|
||||
/// let future1 = future::lazy(|| {
|
||||
/// thread::sleep(time::Duration::from_secs(5));
|
||||
/// future::ok::<char, ()>('a')
|
||||
/// });
|
||||
///
|
||||
/// let future2 = future::lazy(|| {
|
||||
/// thread::sleep(time::Duration::from_secs(3));
|
||||
/// future::ok::<char, ()>('b')
|
||||
/// });
|
||||
///
|
||||
/// let (value, last_future) = future1.select(future2).wait().ok().unwrap();
|
||||
/// assert_eq!(value, 'a');
|
||||
/// assert_eq!(last_future.wait().unwrap(), 'b');
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
///
|
||||
/// // A poor-man's join implemented on top of select
|
||||
/// A poor-man's `join` implemented on top of `select`:
|
||||
///
|
||||
/// fn join<A>(a: A, b: A) -> BoxFuture<(u32, u32), u32>
|
||||
/// where A: Future<Item = u32, Error = u32> + Send + 'static,
|
||||
/// ```
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// fn join<A>(a: A, b: A) -> Box<Future<Item=(u32, u32), Error=u32>>
|
||||
/// where A: Future<Item = u32, Error = u32> + 'static,
|
||||
/// {
|
||||
/// a.select(b).then(|res| {
|
||||
/// Box::new(a.select(b).then(|res| -> Box<Future<Item=_, Error=_>> {
|
||||
/// match res {
|
||||
/// Ok((a, b)) => b.map(move |b| (a, b)).boxed(),
|
||||
/// Err((a, _)) => err(a).boxed(),
|
||||
/// Ok((a, b)) => Box::new(b.map(move |b| (a, b))),
|
||||
/// Err((a, _)) => Box::new(future::err(a)),
|
||||
/// }
|
||||
/// }).boxed()
|
||||
/// }))
|
||||
/// }
|
||||
/// ```
|
||||
fn select<B>(self, other: B) -> Select<Self, B::Future>
|
||||
|
@ -576,23 +647,24 @@ pub trait Future {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future::{self, Either};
|
||||
///
|
||||
/// // A poor-man's join implemented on top of select2
|
||||
///
|
||||
/// fn join<A, B, E>(a: A, b: B) -> BoxFuture<(A::Item, B::Item), E>
|
||||
/// where A: Future<Error = E> + Send + 'static,
|
||||
/// B: Future<Error = E> + Send + 'static,
|
||||
/// A::Item: Send, B::Item: Send, E: Send + 'static,
|
||||
/// fn join<A, B, E>(a: A, b: B) -> Box<Future<Item=(A::Item, B::Item), Error=E>>
|
||||
/// where A: Future<Error = E> + 'static,
|
||||
/// B: Future<Error = E> + 'static,
|
||||
/// E: 'static,
|
||||
/// {
|
||||
/// a.select2(b).then(|res| {
|
||||
/// Box::new(a.select2(b).then(|res| -> Box<Future<Item=_, Error=_>> {
|
||||
/// match res {
|
||||
/// Ok(Either::A((x, b))) => b.map(move |y| (x, y)).boxed(),
|
||||
/// Ok(Either::B((y, a))) => a.map(move |x| (x, y)).boxed(),
|
||||
/// Err(Either::A((e, _))) => err(e).boxed(),
|
||||
/// Err(Either::B((e, _))) => err(e).boxed(),
|
||||
/// Ok(Either::A((x, b))) => Box::new(b.map(move |y| (x, y))),
|
||||
/// Ok(Either::B((y, a))) => Box::new(a.map(move |x| (x, y))),
|
||||
/// Err(Either::A((e, _))) => Box::new(future::err(e)),
|
||||
/// Err(Either::B((e, _))) => Box::new(future::err(e)),
|
||||
/// }
|
||||
/// }).boxed()
|
||||
/// }))
|
||||
/// }
|
||||
/// ```
|
||||
fn select2<B>(self, other: B) -> Select2<Self, B::Future>
|
||||
|
@ -617,16 +689,28 @@ pub trait Future {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// let a = ok::<u32, u32>(1);
|
||||
/// let b = ok::<u32, u32>(2);
|
||||
/// let a = future::ok::<u32, u32>(1);
|
||||
/// let b = future::ok::<u32, u32>(2);
|
||||
/// let pair = a.join(b);
|
||||
///
|
||||
/// pair.map(|(a, b)| {
|
||||
/// assert_eq!(a, 1);
|
||||
/// assert_eq!(b, 2);
|
||||
/// });
|
||||
/// assert_eq!(pair.wait(), Ok((1, 2)));
|
||||
/// ```
|
||||
///
|
||||
/// If one or both of the joined `Future`s is errored, the resulting
|
||||
/// `Future` will be errored:
|
||||
///
|
||||
/// ```
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// let a = future::ok::<u32, u32>(1);
|
||||
/// let b = future::err::<u32, u32>(2);
|
||||
/// let pair = a.join(b);
|
||||
///
|
||||
/// assert_eq!(pair.wait(), Err(2));
|
||||
/// ```
|
||||
fn join<B>(self, other: B) -> Join<Self, B::Future>
|
||||
where B: IntoFuture<Error=Self::Error>,
|
||||
|
@ -677,15 +761,15 @@ pub trait Future {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::{Stream, Async};
|
||||
/// use futures::future::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// let future = ok::<_, bool>(17);
|
||||
/// let future = future::ok::<_, bool>(17);
|
||||
/// let mut stream = future.into_stream();
|
||||
/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
|
||||
/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
|
||||
///
|
||||
/// let future = err::<bool, _>(19);
|
||||
/// let future = future::err::<bool, _>(19);
|
||||
/// let mut stream = future.into_stream();
|
||||
/// assert_eq!(Err(19), stream.poll());
|
||||
/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
|
||||
|
@ -700,7 +784,7 @@ pub trait Future {
|
|||
/// future is itself another future.
|
||||
///
|
||||
/// This can be useful when combining futures together to flatten the
|
||||
/// computation out the the final result. This method can only be called
|
||||
/// computation out the final result. This method can only be called
|
||||
/// when the successful result of this future itself implements the
|
||||
/// `IntoFuture` trait and the error can be created from this future's error
|
||||
/// type.
|
||||
|
@ -713,10 +797,24 @@ pub trait Future {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// let future_of_a_future = ok::<_, u32>(ok::<u32, u32>(1));
|
||||
/// let future_of_1 = future_of_a_future.flatten();
|
||||
/// let nested_future = future::ok::<_, u32>(future::ok::<u32, u32>(1));
|
||||
/// let future = nested_future.flatten();
|
||||
/// assert_eq!(future.wait(), Ok(1));
|
||||
/// ```
|
||||
///
|
||||
/// Calling `flatten` on an errored `Future`, or if the inner `Future` is
|
||||
/// errored, will result in an errored `Future`:
|
||||
///
|
||||
/// ```
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// let nested_future = future::ok::<_, u32>(future::err::<u32, u32>(1));
|
||||
/// let future = nested_future.flatten();
|
||||
/// assert_eq!(future.wait(), Err(1));
|
||||
/// ```
|
||||
fn flatten(self) -> Flatten<Self>
|
||||
where Self::Item: IntoFuture,
|
||||
|
@ -743,17 +841,18 @@ pub trait Future {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::stream::{self, Stream};
|
||||
/// use futures::future::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
/// use futures::stream;
|
||||
///
|
||||
/// let stream_items = vec![Ok(17), Err(true), Ok(19)];
|
||||
/// let future_of_a_stream = ok::<_, bool>(stream::iter(stream_items));
|
||||
/// let stream_items = vec![17, 18, 19];
|
||||
/// let future_of_a_stream = future::ok::<_, bool>(stream::iter_ok(stream_items));
|
||||
///
|
||||
/// let stream = future_of_a_stream.flatten_stream();
|
||||
///
|
||||
/// let mut iter = stream.wait();
|
||||
/// assert_eq!(Ok(17), iter.next().unwrap());
|
||||
/// assert_eq!(Err(true), iter.next().unwrap());
|
||||
/// assert_eq!(Ok(18), iter.next().unwrap());
|
||||
/// assert_eq!(Ok(19), iter.next().unwrap());
|
||||
/// assert_eq!(None, iter.next());
|
||||
/// ```
|
||||
|
@ -783,17 +882,17 @@ pub trait Future {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use futures::Async;
|
||||
/// use futures::future::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// let mut future = ok::<i32, u32>(2);
|
||||
/// let mut future = future::ok::<i32, u32>(2);
|
||||
/// assert_eq!(future.poll(), Ok(Async::Ready(2)));
|
||||
///
|
||||
/// // Normally, a call such as this would panic:
|
||||
/// //future.poll();
|
||||
///
|
||||
/// // This, however, is guaranteed to not panic
|
||||
/// let mut future = ok::<i32, u32>(2).fuse();
|
||||
/// let mut future = future::ok::<i32, u32>(2).fuse();
|
||||
/// assert_eq!(future.poll(), Ok(Async::Ready(2)));
|
||||
/// assert_eq!(future.poll(), Ok(Async::NotReady));
|
||||
/// ```
|
||||
|
@ -804,6 +903,29 @@ pub trait Future {
|
|||
assert_future::<Self::Item, Self::Error, _>(f)
|
||||
}
|
||||
|
||||
/// Do something with the item of a future, passing it on.
|
||||
///
|
||||
/// When using futures, you'll often chain several of them together.
|
||||
/// While working on such code, you might want to check out what's happening at
|
||||
/// various parts in the pipeline. To do that, insert a call to inspect().
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// let future = future::ok::<u32, u32>(1);
|
||||
/// let new_future = future.inspect(|&x| println!("about to resolve: {}", x));
|
||||
/// assert_eq!(new_future.wait(), Ok(1));
|
||||
/// ```
|
||||
fn inspect<F>(self, f: F) -> Inspect<Self, F>
|
||||
where F: FnOnce(&Self::Item) -> (),
|
||||
Self: Sized,
|
||||
{
|
||||
assert_future::<Self::Item, Self::Error, _>(inspect::new(self, f))
|
||||
}
|
||||
|
||||
/// Catches unwinding panics while polling the future.
|
||||
///
|
||||
/// In general, panics within a future can propagate all the way out to the
|
||||
|
@ -823,14 +945,15 @@ pub trait Future {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use futures::future::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future::{self, FutureResult};
|
||||
///
|
||||
/// let mut future = ok::<i32, u32>(2);
|
||||
/// let mut future = future::ok::<i32, u32>(2);
|
||||
/// assert!(future.catch_unwind().wait().is_ok());
|
||||
///
|
||||
/// let mut future = lazy(|| -> FutureResult<i32, u32> {
|
||||
/// let mut future = future::lazy(|| -> FutureResult<i32, u32> {
|
||||
/// panic!();
|
||||
/// ok::<i32, u32>(2)
|
||||
/// future::ok::<i32, u32>(2)
|
||||
/// });
|
||||
/// assert!(future.catch_unwind().wait().is_err());
|
||||
/// ```
|
||||
|
@ -844,7 +967,7 @@ pub trait Future {
|
|||
/// Create a cloneable handle to this future where all handles will resolve
|
||||
/// to the same result.
|
||||
///
|
||||
/// The shared() method provides a mean to convert any future into a
|
||||
/// The shared() method provides a method to convert any future into a
|
||||
/// cloneable future. It enables a future to be polled by multiple threads.
|
||||
///
|
||||
/// The returned `Shared` future resolves successfully with
|
||||
|
@ -859,9 +982,10 @@ pub trait Future {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::future::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// let future = ok::<_, bool>(6);
|
||||
/// let future = future::ok::<_, bool>(6);
|
||||
/// let shared1 = future.shared();
|
||||
/// let shared2 = shared1.clone();
|
||||
/// assert_eq!(6, *shared1.wait().unwrap());
|
||||
|
@ -870,9 +994,10 @@ pub trait Future {
|
|||
///
|
||||
/// ```
|
||||
/// use std::thread;
|
||||
/// use futures::future::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// let future = ok::<_, bool>(6);
|
||||
/// let future = future::ok::<_, bool>(6);
|
||||
/// let shared1 = future.shared();
|
||||
/// let shared2 = shared1.clone();
|
||||
/// let join_handle = thread::spawn(move || {
|
||||
|
@ -957,3 +1082,89 @@ pub trait FutureFrom<T>: Sized {
|
|||
/// Consume the given value, beginning the conversion.
|
||||
fn future_from(T) -> Self::Future;
|
||||
}
|
||||
|
||||
/// A trait for types which can spawn fresh futures.
|
||||
///
|
||||
/// This trait is typically implemented for "executors", or those types which
|
||||
/// can execute futures to completion. Futures passed to `Spawn::spawn`
|
||||
/// typically get turned into a *task* and are then driven to completion.
|
||||
///
|
||||
/// On spawn, the executor takes ownership of the future and becomes responsible
|
||||
/// to call `Future::poll()` whenever a readiness notification is raised.
|
||||
pub trait Executor<F: Future<Item = (), Error = ()>> {
|
||||
/// Spawns a future to run on this `Executor`, typically in the
|
||||
/// "background".
|
||||
///
|
||||
/// This function will return immediately, and schedule the future `future`
|
||||
/// to run on `self`. The details of scheduling and execution are left to
|
||||
/// the implementations of `Executor`, but this is typically a primary point
|
||||
/// for injecting concurrency in a futures-based system. Futures spawned
|
||||
/// through this `execute` function tend to run concurrently while they're
|
||||
/// waiting on events.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Implementers of this trait are allowed to reject accepting this future
|
||||
/// as well. This can happen for various reason such as:
|
||||
///
|
||||
/// * The executor is shut down
|
||||
/// * The executor has run out of capacity to execute futures
|
||||
///
|
||||
/// The decision is left to the caller how to work with this form of error.
|
||||
/// The error returned transfers ownership of the future back to the caller.
|
||||
fn execute(&self, future: F) -> Result<(), ExecuteError<F>>;
|
||||
}
|
||||
|
||||
/// Errors returned from the `Spawn::spawn` function.
|
||||
pub struct ExecuteError<F> {
|
||||
future: F,
|
||||
kind: ExecuteErrorKind,
|
||||
}
|
||||
|
||||
/// Kinds of errors that can be returned from the `Execute::spawn` function.
|
||||
///
|
||||
/// Executors which may not always be able to accept a future may return one of
|
||||
/// these errors, indicating why it was unable to spawn a future.
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
pub enum ExecuteErrorKind {
|
||||
/// This executor has shut down and will no longer accept new futures to
|
||||
/// spawn.
|
||||
Shutdown,
|
||||
|
||||
/// This executor has no more capacity to run more futures. Other futures
|
||||
/// need to finish before this executor can accept another.
|
||||
NoCapacity,
|
||||
|
||||
#[doc(hidden)]
|
||||
__Nonexhaustive,
|
||||
}
|
||||
|
||||
impl<F> ExecuteError<F> {
|
||||
/// Create a new `ExecuteError`
|
||||
pub fn new(kind: ExecuteErrorKind, future: F) -> ExecuteError<F> {
|
||||
ExecuteError {
|
||||
future: future,
|
||||
kind: kind,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the associated reason for the error
|
||||
pub fn kind(&self) -> ExecuteErrorKind {
|
||||
self.kind
|
||||
}
|
||||
|
||||
/// Consumes self and returns the original future that was spawned.
|
||||
pub fn into_future(self) -> F {
|
||||
self.future
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> fmt::Debug for ExecuteError<F> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self.kind {
|
||||
ExecuteErrorKind::Shutdown => "executor has shut down".fmt(f),
|
||||
ExecuteErrorKind::NoCapacity => "executor has no more capacity".fmt(f),
|
||||
ExecuteErrorKind::__Nonexhaustive => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ use {Future, Poll, Async};
|
|||
/// A future representing a value that is immediately ready.
|
||||
///
|
||||
/// Created by the `result` function.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Clone)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
// TODO: rename this to `Result` on the next major version
|
||||
pub struct FutureResult<T, E> {
|
||||
|
@ -16,7 +16,7 @@ pub struct FutureResult<T, E> {
|
|||
|
||||
/// Creates a new "leaf future" which will resolve with the given result.
|
||||
///
|
||||
/// The returned future represents a computation which is finshed immediately.
|
||||
/// The returned future represents a computation which is finished immediately.
|
||||
/// This can be useful with the `finished` and `failed` base future types to
|
||||
/// convert an immediate value to a future to interoperate elsewhere.
|
||||
///
|
||||
|
@ -73,3 +73,9 @@ impl<T, E> Future for FutureResult<T, E> {
|
|||
self.inner.take().expect("cannot poll Result twice").map(Async::Ready)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> From<Result<T, E>> for FutureResult<T, E> {
|
||||
fn from(r: Result<T, E>) -> Self {
|
||||
result(r)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
use {Future, Poll, Async};
|
||||
use future::Either;
|
||||
|
||||
/// Future for the `merge` combinator, waiting for one of two differently-typed
|
||||
/// Future for the `select2` combinator, waiting for one of two differently-typed
|
||||
/// futures to complete.
|
||||
///
|
||||
/// This is created by the `Future::merge` method.
|
||||
/// This is created by the [`Future::select2`] method.
|
||||
///
|
||||
/// [`Future::select2`]: trait.Future.html#method.select2
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
#[derive(Debug)]
|
||||
pub struct Select2<A, B> {
|
||||
|
@ -23,10 +25,10 @@ impl<A, B> Future for Select2<A, B> where A: Future, B: Future {
|
|||
let (mut a, mut b) = self.inner.take().expect("cannot poll Select2 twice");
|
||||
match a.poll() {
|
||||
Err(e) => Err(Either::A((e, b))),
|
||||
Ok(Async::Ready(x)) => Ok(Async::Ready((Either::A((x, b))))),
|
||||
Ok(Async::Ready(x)) => Ok(Async::Ready(Either::A((x, b)))),
|
||||
Ok(Async::NotReady) => match b.poll() {
|
||||
Err(e) => Err(Either::B((e, a))),
|
||||
Ok(Async::Ready(x)) => Ok(Async::Ready((Either::B((x, a))))),
|
||||
Ok(Async::Ready(x)) => Ok(Async::Ready(Either::B((x, a)))),
|
||||
Ok(Async::NotReady) => {
|
||||
self.inner = Some((a, b));
|
||||
Ok(Async::NotReady)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
//! Definition of the SelectAll, finding the first future in a list that
|
||||
//! Definition of the `SelectAll`, finding the first future in a list that
|
||||
//! finishes.
|
||||
|
||||
use std::mem;
|
||||
|
|
|
@ -7,7 +7,7 @@ use std::prelude::v1::*;
|
|||
use {Future, IntoFuture, Poll, Async};
|
||||
|
||||
/// Future for the `select_ok` combinator, waiting for one of any of a list of
|
||||
/// futures to succesfully complete. unlike `select_all`, this future ignores all
|
||||
/// futures to successfully complete. Unlike `select_all`, this future ignores all
|
||||
/// but the last error, if there are any.
|
||||
///
|
||||
/// This is created by the `select_ok` function.
|
||||
|
|
|
@ -14,10 +14,10 @@
|
|||
//! ```
|
||||
|
||||
use {Future, Poll, Async};
|
||||
use executor::{self, Spawn, Unpark};
|
||||
use task::{self, Task};
|
||||
use executor::{self, Notify, Spawn};
|
||||
|
||||
use std::{fmt, mem, ops};
|
||||
use std::{error, fmt, mem, ops};
|
||||
use std::cell::UnsafeCell;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
|
@ -25,7 +25,7 @@ use std::sync::atomic::Ordering::SeqCst;
|
|||
use std::collections::HashMap;
|
||||
|
||||
/// A future that is cloneable and can be polled in multiple threads.
|
||||
/// Use Future::shared() method to convert any future into a `Shared` future.
|
||||
/// Use `Future::shared()` method to convert any future into a `Shared` future.
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct Shared<F: Future> {
|
||||
inner: Arc<Inner<F>>,
|
||||
|
@ -49,10 +49,10 @@ struct Inner<F: Future> {
|
|||
next_clone_id: AtomicUsize,
|
||||
future: UnsafeCell<Option<Spawn<F>>>,
|
||||
result: UnsafeCell<Option<Result<SharedItem<F::Item>, SharedError<F::Error>>>>,
|
||||
unparker: Arc<Unparker>,
|
||||
notifier: Arc<Notifier>,
|
||||
}
|
||||
|
||||
struct Unparker {
|
||||
struct Notifier {
|
||||
state: AtomicUsize,
|
||||
waiters: Mutex<HashMap<usize, Task>>,
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ pub fn new<F: Future>(future: F) -> Shared<F> {
|
|||
Shared {
|
||||
inner: Arc::new(Inner {
|
||||
next_clone_id: AtomicUsize::new(1),
|
||||
unparker: Arc::new(Unparker {
|
||||
notifier: Arc::new(Notifier {
|
||||
state: AtomicUsize::new(IDLE),
|
||||
waiters: Mutex::new(HashMap::new()),
|
||||
}),
|
||||
|
@ -91,7 +91,7 @@ impl<F> Shared<F> where F: Future {
|
|||
/// without blocking. Otherwise, returns None without triggering the work represented by
|
||||
/// this `Shared`.
|
||||
pub fn peek(&self) -> Option<Result<SharedItem<F::Item>, SharedError<F::Error>>> {
|
||||
match self.inner.unparker.state.load(SeqCst) {
|
||||
match self.inner.notifier.state.load(SeqCst) {
|
||||
COMPLETE => {
|
||||
Some(unsafe { self.clone_result() })
|
||||
}
|
||||
|
@ -101,8 +101,8 @@ impl<F> Shared<F> where F: Future {
|
|||
}
|
||||
|
||||
fn set_waiter(&mut self) {
|
||||
let mut waiters = self.inner.unparker.waiters.lock().unwrap();
|
||||
waiters.insert(self.waiter, task::park());
|
||||
let mut waiters = self.inner.notifier.waiters.lock().unwrap();
|
||||
waiters.insert(self.waiter, task::current());
|
||||
}
|
||||
|
||||
unsafe fn clone_result(&self) -> Result<SharedItem<F::Item>, SharedError<F::Error>> {
|
||||
|
@ -115,8 +115,8 @@ impl<F> Shared<F> where F: Future {
|
|||
|
||||
fn complete(&self) {
|
||||
unsafe { *self.inner.future.get() = None };
|
||||
self.inner.unparker.state.store(COMPLETE, SeqCst);
|
||||
self.inner.unparker.unpark();
|
||||
self.inner.notifier.state.store(COMPLETE, SeqCst);
|
||||
self.inner.notifier.notify(0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -129,7 +129,7 @@ impl<F> Future for Shared<F>
|
|||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
self.set_waiter();
|
||||
|
||||
match self.inner.unparker.state.compare_and_swap(IDLE, POLLING, SeqCst) {
|
||||
match self.inner.notifier.state.compare_and_swap(IDLE, POLLING, SeqCst) {
|
||||
IDLE => {
|
||||
// Lock acquired, fall through
|
||||
}
|
||||
|
@ -159,23 +159,24 @@ impl<F> Future for Shared<F>
|
|||
}
|
||||
}
|
||||
|
||||
let _reset = Reset(&self.inner.unparker.state);
|
||||
|
||||
// Get a handle to the unparker
|
||||
let unpark: Arc<Unpark> = self.inner.unparker.clone();
|
||||
let _reset = Reset(&self.inner.notifier.state);
|
||||
|
||||
// Poll the future
|
||||
match unsafe { (*self.inner.future.get()).as_mut().unwrap().poll_future(unpark) } {
|
||||
let res = unsafe {
|
||||
(*self.inner.future.get()).as_mut().unwrap()
|
||||
.poll_future_notify(&self.inner.notifier, 0)
|
||||
};
|
||||
match res {
|
||||
Ok(Async::NotReady) => {
|
||||
// Not ready, try to release the handle
|
||||
match self.inner.unparker.state.compare_and_swap(POLLING, IDLE, SeqCst) {
|
||||
match self.inner.notifier.state.compare_and_swap(POLLING, IDLE, SeqCst) {
|
||||
POLLING => {
|
||||
// Success
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
REPOLL => {
|
||||
// Gotta poll again!
|
||||
let prev = self.inner.unparker.state.swap(POLLING, SeqCst);
|
||||
let prev = self.inner.notifier.state.swap(POLLING, SeqCst);
|
||||
assert_eq!(prev, REPOLL);
|
||||
}
|
||||
_ => unreachable!(),
|
||||
|
@ -217,19 +218,19 @@ impl<F> Clone for Shared<F> where F: Future {
|
|||
|
||||
impl<F> Drop for Shared<F> where F: Future {
|
||||
fn drop(&mut self) {
|
||||
let mut waiters = self.inner.unparker.waiters.lock().unwrap();
|
||||
let mut waiters = self.inner.notifier.waiters.lock().unwrap();
|
||||
waiters.remove(&self.waiter);
|
||||
}
|
||||
}
|
||||
|
||||
impl Unpark for Unparker {
|
||||
fn unpark(&self) {
|
||||
impl Notify for Notifier {
|
||||
fn notify(&self, _id: usize) {
|
||||
self.state.compare_and_swap(POLLING, REPOLL, SeqCst);
|
||||
|
||||
let waiters = mem::replace(&mut *self.waiters.lock().unwrap(), HashMap::new());
|
||||
|
||||
for (_, waiter) in waiters {
|
||||
waiter.unpark();
|
||||
waiter.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -248,9 +249,9 @@ impl<F> fmt::Debug for Inner<F>
|
|||
}
|
||||
}
|
||||
|
||||
/// A wrapped item of the original future that is clonable and implements Deref
|
||||
/// A wrapped item of the original future that is cloneable and implements Deref
|
||||
/// for ease of use.
|
||||
#[derive(Debug)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SharedItem<T> {
|
||||
item: Arc<T>,
|
||||
}
|
||||
|
@ -263,9 +264,9 @@ impl<T> ops::Deref for SharedItem<T> {
|
|||
}
|
||||
}
|
||||
|
||||
/// A wrapped error of the original future that is clonable and implements Deref
|
||||
/// A wrapped error of the original future that is cloneable and implements Deref
|
||||
/// for ease of use.
|
||||
#[derive(Debug)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SharedError<E> {
|
||||
error: Arc<E>,
|
||||
}
|
||||
|
@ -277,3 +278,23 @@ impl<E> ops::Deref for SharedError<E> {
|
|||
&self.error.as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<E> fmt::Display for SharedError<E>
|
||||
where E: fmt::Display,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
self.error.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E> error::Error for SharedError<E>
|
||||
where E: error::Error,
|
||||
{
|
||||
fn description(&self) -> &str {
|
||||
self.error.description()
|
||||
}
|
||||
|
||||
fn cause(&self) -> Option<&error::Error> {
|
||||
self.error.cause()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,8 @@
|
|||
//!
|
||||
//! use std::io;
|
||||
//! use std::time::Duration;
|
||||
//! use futures::future::{Future, Map};
|
||||
//! use futures::prelude::*;
|
||||
//! use futures::future::Map;
|
||||
//!
|
||||
//! // A future is actually a trait implementation, so we can generically take a
|
||||
//! // future of any integer and return back a future that will resolve to that
|
||||
|
@ -196,16 +197,21 @@ pub use future::{
|
|||
SelectNext, Then
|
||||
};
|
||||
|
||||
#[cfg(feature = "use_std")]
|
||||
mod lock;
|
||||
mod task_impl;
|
||||
|
||||
mod resultstream;
|
||||
|
||||
pub mod task;
|
||||
pub mod executor;
|
||||
#[cfg(feature = "use_std")]
|
||||
pub mod sync;
|
||||
#[cfg(feature = "use_std")]
|
||||
pub mod unsync;
|
||||
|
||||
|
||||
if_std! {
|
||||
mod lock;
|
||||
mod task_impl;
|
||||
mod stack;
|
||||
|
||||
pub mod task;
|
||||
pub mod executor;
|
||||
pub mod sync;
|
||||
pub mod unsync;
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "use sync::oneshot::channel instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
|
@ -229,6 +235,7 @@ if_std! {
|
|||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "import through the future module instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
#[allow(deprecated)]
|
||||
pub use future::{BoxFuture, collect, select_all, select_ok};
|
||||
|
||||
#[doc(hidden)]
|
||||
|
@ -236,3 +243,23 @@ if_std! {
|
|||
#[cfg(feature = "with-deprecated")]
|
||||
pub use future::{SelectAll, SelectAllNext, Collect, SelectOk};
|
||||
}
|
||||
|
||||
/// A "prelude" for crates using the `futures` crate.
|
||||
///
|
||||
/// This prelude is similar to the standard library's prelude in that you'll
|
||||
/// almost always want to import its entire contents, but unlike the standard
|
||||
/// library's prelude you'll have to do so manually. An example of using this is:
|
||||
///
|
||||
/// ```
|
||||
/// use futures::prelude::*;
|
||||
/// ```
|
||||
///
|
||||
/// We may add items to this over time as they become ubiquitous as well, but
|
||||
/// otherwise this should help cut down on futures-related imports when you're
|
||||
/// working with the `futures` crate!
|
||||
pub mod prelude {
|
||||
#[doc(no_inline)]
|
||||
pub use {Future, Stream, Sink, Async, AsyncSink, Poll, StartSend};
|
||||
#[doc(no_inline)]
|
||||
pub use IntoFuture;
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ pub enum Async<T> {
|
|||
}
|
||||
|
||||
impl<T> Async<T> {
|
||||
/// Change the success type of this `Async` value with the closure provided
|
||||
/// Change the success value of this `Async` with the closure provided
|
||||
pub fn map<F, U>(self, f: F) -> Async<U>
|
||||
where F: FnOnce(T) -> U
|
||||
{
|
||||
|
@ -75,6 +75,16 @@ pub enum AsyncSink<T> {
|
|||
}
|
||||
|
||||
impl<T> AsyncSink<T> {
|
||||
/// Change the NotReady value of this `AsyncSink` with the closure provided
|
||||
pub fn map<F, U>(self, f: F) -> AsyncSink<U>
|
||||
where F: FnOnce(T) -> U,
|
||||
{
|
||||
match self {
|
||||
AsyncSink::Ready => AsyncSink::Ready,
|
||||
AsyncSink::NotReady(t) => AsyncSink::NotReady(f(t)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether this is `AsyncSink::Ready`
|
||||
pub fn is_ready(&self) -> bool {
|
||||
match *self {
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
// This should really be in the stream module,
|
||||
// but `pub(crate)` isn't available until Rust 1.18,
|
||||
// and pre-1.18 there isn't a really good way to have a sub-module
|
||||
// available to the crate, but not without it.
|
||||
use core::marker::PhantomData;
|
||||
|
||||
use {Poll, Async};
|
||||
use stream::Stream;
|
||||
|
||||
|
||||
/// A stream combinator used to convert a `Stream<Item=T,Error=E>`
|
||||
/// to a `Stream<Item=Result<T,E>>`.
|
||||
///
|
||||
/// A poll on this stream will never return an `Err`. As such the
|
||||
/// actual error type is parameterized, so it can match whatever error
|
||||
/// type is needed.
|
||||
///
|
||||
/// This structure is produced by the `Stream::results` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Results<S: Stream, E> {
|
||||
inner: S,
|
||||
phantom: PhantomData<E>
|
||||
}
|
||||
|
||||
pub fn new<S, E>(s: S) -> Results<S, E> where S: Stream {
|
||||
Results {
|
||||
inner: s,
|
||||
phantom: PhantomData
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream, E> Stream for Results<S, E> {
|
||||
type Item = Result<S::Item, S::Error>;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Result<S::Item, S::Error>>, E> {
|
||||
match self.inner.poll() {
|
||||
Ok(Async::Ready(Some(item))) => Ok(Async::Ready(Some(Ok(item)))),
|
||||
Err(e) => Ok(Async::Ready(Some(Err(e)))),
|
||||
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -36,13 +36,21 @@ impl<S: Sink> Buffer<S> {
|
|||
&mut self.sink
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying sink.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.sink
|
||||
}
|
||||
|
||||
fn try_empty_buffer(&mut self) -> Poll<(), S::SinkError> {
|
||||
while let Some(item) = self.buf.pop_front() {
|
||||
if let AsyncSink::NotReady(item) = try!(self.sink.start_send(item)) {
|
||||
if let AsyncSink::NotReady(item) = self.sink.start_send(item)? {
|
||||
self.buf.push_front(item);
|
||||
|
||||
// ensure that we attempt to complete any pushes we've started
|
||||
try!(self.sink.poll_complete());
|
||||
self.sink.poll_complete()?;
|
||||
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
|
@ -67,8 +75,12 @@ impl<S: Sink> Sink for Buffer<S> {
|
|||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
|
||||
try!(self.try_empty_buffer());
|
||||
if self.buf.len() > self.cap {
|
||||
if self.cap == 0 {
|
||||
return self.sink.start_send(item);
|
||||
}
|
||||
|
||||
self.try_empty_buffer()?;
|
||||
if self.buf.len() == self.cap {
|
||||
return Ok(AsyncSink::NotReady(item));
|
||||
}
|
||||
self.buf.push_back(item);
|
||||
|
@ -76,12 +88,20 @@ impl<S: Sink> Sink for Buffer<S> {
|
|||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
|
||||
if self.cap == 0 {
|
||||
return self.sink.poll_complete();
|
||||
}
|
||||
|
||||
try_ready!(self.try_empty_buffer());
|
||||
debug_assert!(self.buf.is_empty());
|
||||
self.sink.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
if self.cap == 0 {
|
||||
return self.sink.close();
|
||||
}
|
||||
|
||||
if self.buf.len() > 0 {
|
||||
try_ready!(self.try_empty_buffer());
|
||||
}
|
||||
|
|
|
@ -0,0 +1,135 @@
|
|||
use core::fmt::{Debug, Formatter, Result as FmtResult};
|
||||
use core::mem::replace;
|
||||
|
||||
use {Async, AsyncSink, Poll, Sink, StartSend};
|
||||
|
||||
/// Sink that clones incoming items and forwards them to two sinks at the same time.
|
||||
///
|
||||
/// Backpressure from any downstream sink propagates up, which means that this sink
|
||||
/// can only process items as fast as its _slowest_ downstream sink.
|
||||
pub struct Fanout<A: Sink, B: Sink> {
|
||||
left: Downstream<A>,
|
||||
right: Downstream<B>
|
||||
}
|
||||
|
||||
impl<A: Sink, B: Sink> Fanout<A, B> {
|
||||
/// Consumes this combinator, returning the underlying sinks.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator,
|
||||
/// so care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> (A, B) {
|
||||
(self.left.sink, self.right.sink)
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: Sink + Debug, B: Sink + Debug> Debug for Fanout<A, B>
|
||||
where A::SinkItem: Debug,
|
||||
B::SinkItem: Debug
|
||||
{
|
||||
fn fmt(&self, f: &mut Formatter) -> FmtResult {
|
||||
f.debug_struct("Fanout")
|
||||
.field("left", &self.left)
|
||||
.field("right", &self.right)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new<A: Sink, B: Sink>(left: A, right: B) -> Fanout<A, B> {
|
||||
Fanout {
|
||||
left: Downstream::new(left),
|
||||
right: Downstream::new(right)
|
||||
}
|
||||
}
|
||||
|
||||
impl<A, B> Sink for Fanout<A, B>
|
||||
where A: Sink,
|
||||
A::SinkItem: Clone,
|
||||
B: Sink<SinkItem=A::SinkItem, SinkError=A::SinkError>
|
||||
{
|
||||
type SinkItem = A::SinkItem;
|
||||
type SinkError = A::SinkError;
|
||||
|
||||
fn start_send(
|
||||
&mut self,
|
||||
item: Self::SinkItem
|
||||
) -> StartSend<Self::SinkItem, Self::SinkError> {
|
||||
// Attempt to complete processing any outstanding requests.
|
||||
self.left.keep_flushing()?;
|
||||
self.right.keep_flushing()?;
|
||||
// Only if both downstream sinks are ready, start sending the next item.
|
||||
if self.left.is_ready() && self.right.is_ready() {
|
||||
self.left.state = self.left.sink.start_send(item.clone())?;
|
||||
self.right.state = self.right.sink.start_send(item)?;
|
||||
Ok(AsyncSink::Ready)
|
||||
} else {
|
||||
Ok(AsyncSink::NotReady(item))
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
|
||||
let left_async = self.left.poll_complete()?;
|
||||
let right_async = self.right.poll_complete()?;
|
||||
// Only if both downstream sinks are ready, signal readiness.
|
||||
if left_async.is_ready() && right_async.is_ready() {
|
||||
Ok(Async::Ready(()))
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
let left_async = self.left.close()?;
|
||||
let right_async = self.right.close()?;
|
||||
// Only if both downstream sinks are ready, signal readiness.
|
||||
if left_async.is_ready() && right_async.is_ready() {
|
||||
Ok(Async::Ready(()))
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Downstream<S: Sink> {
|
||||
sink: S,
|
||||
state: AsyncSink<S::SinkItem>
|
||||
}
|
||||
|
||||
impl<S: Sink> Downstream<S> {
|
||||
fn new(sink: S) -> Self {
|
||||
Downstream { sink: sink, state: AsyncSink::Ready }
|
||||
}
|
||||
|
||||
fn is_ready(&self) -> bool {
|
||||
self.state.is_ready()
|
||||
}
|
||||
|
||||
fn keep_flushing(&mut self) -> Result<(), S::SinkError> {
|
||||
if let AsyncSink::NotReady(item) = replace(&mut self.state, AsyncSink::Ready) {
|
||||
self.state = self.sink.start_send(item)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.keep_flushing()?;
|
||||
let async = self.sink.poll_complete()?;
|
||||
// Only if all values have been sent _and_ the underlying
|
||||
// sink is completely flushed, signal readiness.
|
||||
if self.state.is_ready() && async.is_ready() {
|
||||
Ok(Async::Ready(()))
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.keep_flushing()?;
|
||||
// If all items have been flushed, initiate close.
|
||||
if self.state.is_ready() {
|
||||
self.sink.close()
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -23,6 +23,11 @@ impl<S: Sink> Flush<S> {
|
|||
pub fn get_mut(&mut self) -> &mut S {
|
||||
self.sink.as_mut().expect("Attempted `Flush::get_mut` after the flush completed")
|
||||
}
|
||||
|
||||
/// Consume the `Flush` and return the inner sink.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.sink.expect("Attempted `Flush::into_inner` after the flush completed")
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Sink> Future for Flush<S> {
|
||||
|
@ -31,7 +36,7 @@ impl<S: Sink> Future for Flush<S> {
|
|||
|
||||
fn poll(&mut self) -> Poll<S, S::SinkError> {
|
||||
let mut sink = self.sink.take().expect("Attempted to poll Flush after it completed");
|
||||
if try!(sink.poll_complete()).is_ready() {
|
||||
if sink.poll_complete()?.is_ready() {
|
||||
Ok(Async::Ready(sink))
|
||||
} else {
|
||||
self.sink = Some(sink);
|
||||
|
|
|
@ -7,7 +7,7 @@ use {Sink, Poll, StartSend};
|
|||
/// This is created by the `Sink::from_err` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct SinkFromErr<S, E> where S: Sink {
|
||||
pub struct SinkFromErr<S, E> {
|
||||
sink: S,
|
||||
f: PhantomData<E>
|
||||
}
|
||||
|
@ -21,6 +21,26 @@ pub fn new<S, E>(sink: S) -> SinkFromErr<S, E>
|
|||
}
|
||||
}
|
||||
|
||||
impl<S, E> SinkFromErr<S, E> {
|
||||
/// Get a shared reference to the inner sink.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.sink
|
||||
}
|
||||
|
||||
/// Get a mutable reference to the inner sink.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.sink
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying sink.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.sink
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, E> Sink for SinkFromErr<S, E>
|
||||
where S: Sink,
|
||||
E: From<S::SinkError>
|
||||
|
@ -41,7 +61,7 @@ impl<S, E> Sink for SinkFromErr<S, E>
|
|||
}
|
||||
}
|
||||
|
||||
impl<S: ::stream::Stream, E> ::stream::Stream for SinkFromErr<S, E> where S: Sink {
|
||||
impl<S: ::stream::Stream, E> ::stream::Stream for SinkFromErr<S, E> {
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use sink::Sink;
|
||||
|
||||
use {Poll, StartSend};
|
||||
use {Poll, StartSend, Stream};
|
||||
|
||||
/// Sink for the `Sink::sink_map_err` combinator.
|
||||
#[derive(Debug)]
|
||||
|
@ -14,6 +14,26 @@ pub fn new<S, F>(s: S, f: F) -> SinkMapErr<S, F> {
|
|||
SinkMapErr { sink: s, f: Some(f) }
|
||||
}
|
||||
|
||||
impl<S, E> SinkMapErr<S, E> {
|
||||
/// Get a shared reference to the inner sink.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.sink
|
||||
}
|
||||
|
||||
/// Get a mutable reference to the inner sink.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.sink
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying sink.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.sink
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, F, E> Sink for SinkMapErr<S, F>
|
||||
where S: Sink,
|
||||
F: FnOnce(S::SinkError) -> E,
|
||||
|
@ -33,3 +53,12 @@ impl<S, F, E> Sink for SinkMapErr<S, F>
|
|||
self.sink.close().map_err(|e| self.f.take().expect("cannot use MapErr after an error")(e))
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream, F> Stream for SinkMapErr<S, F> {
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
|
||||
self.sink.poll()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
//! Asynchronous sinks
|
||||
//!
|
||||
//! This module contains the `Sink` trait, along with a number of adapter types
|
||||
//! for it. An overview is available in the documentaiton for the trait itself.
|
||||
//! for it. An overview is available in the documentation for the trait itself.
|
||||
//!
|
||||
//! You can find more information/tutorials about streams [online at
|
||||
//! https://tokio.rs][online]
|
||||
|
@ -12,6 +12,7 @@ use {IntoFuture, Poll, StartSend};
|
|||
use stream::Stream;
|
||||
|
||||
mod with;
|
||||
mod with_flat_map;
|
||||
// mod with_map;
|
||||
// mod with_filter;
|
||||
// mod with_filter_map;
|
||||
|
@ -20,6 +21,7 @@ mod from_err;
|
|||
mod send;
|
||||
mod send_all;
|
||||
mod map_err;
|
||||
mod fanout;
|
||||
|
||||
if_std! {
|
||||
mod buffer;
|
||||
|
@ -49,7 +51,7 @@ if_std! {
|
|||
}
|
||||
}
|
||||
|
||||
/// A type alias for `Box<Stream + Send>`
|
||||
/// A type alias for `Box<Sink + Send>`
|
||||
pub type BoxSink<T, E> = ::std::boxed::Box<Sink<SinkItem = T, SinkError = E> +
|
||||
::core::marker::Send>;
|
||||
|
||||
|
@ -73,11 +75,13 @@ if_std! {
|
|||
}
|
||||
|
||||
pub use self::with::With;
|
||||
pub use self::with_flat_map::WithFlatMap;
|
||||
pub use self::flush::Flush;
|
||||
pub use self::send::Send;
|
||||
pub use self::send_all::SendAll;
|
||||
pub use self::map_err::SinkMapErr;
|
||||
pub use self::from_err::SinkFromErr;
|
||||
pub use self::fanout::Fanout;
|
||||
|
||||
/// A `Sink` is a value into which other values can be sent, asynchronously.
|
||||
///
|
||||
|
@ -237,7 +241,7 @@ pub trait Sink {
|
|||
///
|
||||
/// If the value returned is `NotReady` then the sink is not yet closed and
|
||||
/// work needs to be done to close it. The work has been scheduled and the
|
||||
/// current task will recieve a notification when it's next ready to call
|
||||
/// current task will receive a notification when it's next ready to call
|
||||
/// this method again.
|
||||
///
|
||||
/// Finally, this function may also return an error.
|
||||
|
@ -315,6 +319,44 @@ pub trait Sink {
|
|||
with::new(self, f)
|
||||
}
|
||||
|
||||
/// Composes a function *in front of* the sink.
|
||||
///
|
||||
/// This adapter produces a new sink that passes each value through the
|
||||
/// given function `f` before sending it to `self`.
|
||||
///
|
||||
/// To process each value, `f` produces a *stream*, of which each value
|
||||
/// is passed to the underlying sink. A new value will not be accepted until
|
||||
/// the stream has been drained
|
||||
///
|
||||
/// Note that this function consumes the given sink, returning a wrapped
|
||||
/// version, much like `Iterator::flat_map`.
|
||||
///
|
||||
/// # Examples
|
||||
/// ---
|
||||
/// Using this function with an iterator through use of the `stream::iter_ok()`
|
||||
/// function
|
||||
///
|
||||
/// ```
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::stream;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (tx, rx) = mpsc::channel::<i32>(5);
|
||||
///
|
||||
/// let tx = tx.with_flat_map(|x| {
|
||||
/// stream::iter_ok(vec![42; x].into_iter().map(|y| y))
|
||||
/// });
|
||||
/// tx.send(5).wait().unwrap();
|
||||
/// assert_eq!(rx.collect().wait(), Ok(vec![42, 42, 42, 42, 42]))
|
||||
/// ```
|
||||
fn with_flat_map<U, F, St>(self, f: F) -> WithFlatMap<Self, U, F, St>
|
||||
where F: FnMut(U) -> St,
|
||||
St: Stream<Item = Self::SinkItem, Error=Self::SinkError>,
|
||||
Self: Sized
|
||||
{
|
||||
with_flat_map::new(self, f)
|
||||
}
|
||||
|
||||
/*
|
||||
fn with_map<U, F>(self, f: F) -> WithMap<Self, U, F>
|
||||
where F: FnMut(U) -> Self::SinkItem,
|
||||
|
@ -367,6 +409,18 @@ pub trait Sink {
|
|||
buffer::new(self, amt)
|
||||
}
|
||||
|
||||
/// Fanout items to multiple sinks.
|
||||
///
|
||||
/// This adapter clones each incoming item and forwards it to both this as well as
|
||||
/// the other sink at the same time.
|
||||
fn fanout<S>(self, other: S) -> Fanout<Self, S>
|
||||
where Self: Sized,
|
||||
Self::SinkItem: Clone,
|
||||
S: Sink<SinkItem=Self::SinkItem, SinkError=Self::SinkError>
|
||||
{
|
||||
fanout::new(self, other)
|
||||
}
|
||||
|
||||
/// A future that completes when the sink has finished processing all
|
||||
/// pending requests.
|
||||
///
|
||||
|
@ -398,11 +452,13 @@ pub trait Sink {
|
|||
///
|
||||
/// This future will drive the stream to keep producing items until it is
|
||||
/// exhausted, sending each item to the sink. It will complete once both the
|
||||
/// stream is exhausted, and the sink has fully processed and flushed all of
|
||||
/// the items sent to it.
|
||||
/// stream is exhausted, the sink has received all items, the sink has been
|
||||
/// flushed, and the sink has been closed.
|
||||
///
|
||||
/// Doing `sink.send_all(stream)` is roughly equivalent to
|
||||
/// `stream.forward(sink)`.
|
||||
/// `stream.forward(sink)`. The returned future will exhaust all items from
|
||||
/// `stream` and send them to `self`, closing `self` when all items have been
|
||||
/// received.
|
||||
///
|
||||
/// On completion, the pair `(sink, source)` is returned.
|
||||
fn send_all<S>(self, stream: S) -> SendAll<Self, S>
|
||||
|
|
|
@ -43,9 +43,9 @@ impl<S: Sink> Future for Send<S> {
|
|||
|
||||
fn poll(&mut self) -> Poll<S, S::SinkError> {
|
||||
if let Some(item) = self.item.take() {
|
||||
if let AsyncSink::NotReady(item) = try!(self.sink_mut().start_send(item)) {
|
||||
if let AsyncSink::NotReady(item) = self.sink_mut().start_send(item)? {
|
||||
self.item = Some(item);
|
||||
return Ok(Async::NotReady)
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -54,6 +54,6 @@ impl<S: Sink> Future for Send<S> {
|
|||
try_ready!(self.sink_mut().poll_complete());
|
||||
|
||||
// now everything's emptied, so return the sink for further use
|
||||
return Ok(Async::Ready(self.take_sink()))
|
||||
Ok(Async::Ready(self.take_sink()))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,12 +43,12 @@ impl<T, U> SendAll<T, U>
|
|||
.expect("Attempted to poll Forward after completion");
|
||||
let fuse = self.stream.take()
|
||||
.expect("Attempted to poll Forward after completion");
|
||||
return (sink, fuse.into_inner());
|
||||
(sink, fuse.into_inner())
|
||||
}
|
||||
|
||||
fn try_start_send(&mut self, item: U::Item) -> Poll<(), T::SinkError> {
|
||||
debug_assert!(self.buffered.is_none());
|
||||
if let AsyncSink::NotReady(item) = try!(self.sink_mut().start_send(item)) {
|
||||
if let AsyncSink::NotReady(item) = self.sink_mut().start_send(item)? {
|
||||
self.buffered = Some(item);
|
||||
return Ok(Async::NotReady)
|
||||
}
|
||||
|
@ -72,7 +72,7 @@ impl<T, U> Future for SendAll<T, U>
|
|||
}
|
||||
|
||||
loop {
|
||||
match try!(self.stream_mut().poll()) {
|
||||
match self.stream_mut().poll()? {
|
||||
Async::Ready(Some(item)) => try_ready!(self.try_start_send(item)),
|
||||
Async::Ready(None) => {
|
||||
try_ready!(self.sink_mut().close());
|
||||
|
|
|
@ -47,4 +47,13 @@ impl<S: Sink> Wait<S> {
|
|||
pub fn flush(&mut self) -> Result<(), S::SinkError> {
|
||||
self.sink.wait_flush()
|
||||
}
|
||||
|
||||
/// Close this sink, blocking the current thread until it's entirely closed.
|
||||
///
|
||||
/// This function will call the underlying sink's `close` method
|
||||
/// until it returns that it's closed. If the method returns
|
||||
/// `NotReady` the current thread will be blocked until it's otherwise closed.
|
||||
pub fn close(&mut self) -> Result<(), S::SinkError> {
|
||||
self.sink.wait_close()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,12 +81,20 @@ impl<S, U, F, Fut> With<S, U, F, Fut>
|
|||
&mut self.sink
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying sink.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.sink
|
||||
}
|
||||
|
||||
fn poll(&mut self) -> Poll<(), Fut::Error> {
|
||||
loop {
|
||||
match mem::replace(&mut self.state, State::Empty) {
|
||||
State::Empty => break,
|
||||
State::Process(mut fut) => {
|
||||
match try!(fut.poll()) {
|
||||
match fut.poll()? {
|
||||
Async::Ready(item) => {
|
||||
self.state = State::Buffered(item);
|
||||
}
|
||||
|
@ -97,7 +105,7 @@ impl<S, U, F, Fut> With<S, U, F, Fut>
|
|||
}
|
||||
}
|
||||
State::Buffered(item) => {
|
||||
if let AsyncSink::NotReady(item) = try!(self.sink.start_send(item)) {
|
||||
if let AsyncSink::NotReady(item) = self.sink.start_send(item)? {
|
||||
self.state = State::Buffered(item);
|
||||
break
|
||||
}
|
||||
|
@ -123,7 +131,7 @@ impl<S, U, F, Fut> Sink for With<S, U, F, Fut>
|
|||
type SinkError = Fut::Error;
|
||||
|
||||
fn start_send(&mut self, item: Self::SinkItem) -> StartSend<Self::SinkItem, Fut::Error> {
|
||||
if try!(self.poll()).is_not_ready() {
|
||||
if self.poll()?.is_not_ready() {
|
||||
return Ok(AsyncSink::NotReady(item))
|
||||
}
|
||||
self.state = State::Process((self.f)(item).into_future());
|
||||
|
@ -132,7 +140,7 @@ impl<S, U, F, Fut> Sink for With<S, U, F, Fut>
|
|||
|
||||
fn poll_complete(&mut self) -> Poll<(), Fut::Error> {
|
||||
// poll ourselves first, to push data downward
|
||||
let me_ready = try!(self.poll());
|
||||
let me_ready = self.poll()?;
|
||||
// always propagate `poll_complete` downward to attempt to make progress
|
||||
try_ready!(self.sink.poll_complete());
|
||||
Ok(me_ready)
|
||||
|
@ -140,6 +148,6 @@ impl<S, U, F, Fut> Sink for With<S, U, F, Fut>
|
|||
|
||||
fn close(&mut self) -> Poll<(), Fut::Error> {
|
||||
try_ready!(self.poll());
|
||||
Ok(try!(self.sink.close()))
|
||||
Ok(self.sink.close()?)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,126 @@
|
|||
use core::marker::PhantomData;
|
||||
|
||||
use {Poll, Async, StartSend, AsyncSink};
|
||||
use sink::Sink;
|
||||
use stream::Stream;
|
||||
|
||||
/// Sink for the `Sink::with_flat_map` combinator, chaining a computation that returns an iterator
|
||||
/// to run prior to pushing a value into the underlying sink
|
||||
#[derive(Debug)]
|
||||
#[must_use = "sinks do nothing unless polled"]
|
||||
pub struct WithFlatMap<S, U, F, St>
|
||||
where
|
||||
S: Sink,
|
||||
F: FnMut(U) -> St,
|
||||
St: Stream<Item = S::SinkItem, Error=S::SinkError>,
|
||||
{
|
||||
sink: S,
|
||||
f: F,
|
||||
stream: Option<St>,
|
||||
buffer: Option<S::SinkItem>,
|
||||
_phantom: PhantomData<fn(U)>,
|
||||
}
|
||||
|
||||
pub fn new<S, U, F, St>(sink: S, f: F) -> WithFlatMap<S, U, F, St>
|
||||
where
|
||||
S: Sink,
|
||||
F: FnMut(U) -> St,
|
||||
St: Stream<Item = S::SinkItem, Error=S::SinkError>,
|
||||
{
|
||||
WithFlatMap {
|
||||
sink: sink,
|
||||
f: f,
|
||||
stream: None,
|
||||
buffer: None,
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, U, F, St> WithFlatMap<S, U, F, St>
|
||||
where
|
||||
S: Sink,
|
||||
F: FnMut(U) -> St,
|
||||
St: Stream<Item = S::SinkItem, Error=S::SinkError>,
|
||||
{
|
||||
/// Get a shared reference to the inner sink.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.sink
|
||||
}
|
||||
|
||||
/// Get a mutable reference to the inner sink.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.sink
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying sink.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.sink
|
||||
}
|
||||
|
||||
fn try_empty_stream(&mut self) -> Poll<(), S::SinkError> {
|
||||
if let Some(x) = self.buffer.take() {
|
||||
if let AsyncSink::NotReady(x) = self.sink.start_send(x)? {
|
||||
self.buffer = Some(x);
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
}
|
||||
if let Some(mut stream) = self.stream.take() {
|
||||
while let Some(x) = try_ready!(stream.poll()) {
|
||||
if let AsyncSink::NotReady(x) = self.sink.start_send(x)? {
|
||||
self.stream = Some(stream);
|
||||
self.buffer = Some(x);
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Async::Ready(()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, U, F, St> Stream for WithFlatMap<S, U, F, St>
|
||||
where
|
||||
S: Stream + Sink,
|
||||
F: FnMut(U) -> St,
|
||||
St: Stream<Item = S::SinkItem, Error=S::SinkError>,
|
||||
{
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
|
||||
self.sink.poll()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, U, F, St> Sink for WithFlatMap<S, U, F, St>
|
||||
where
|
||||
S: Sink,
|
||||
F: FnMut(U) -> St,
|
||||
St: Stream<Item = S::SinkItem, Error=S::SinkError>,
|
||||
{
|
||||
type SinkItem = U;
|
||||
type SinkError = S::SinkError;
|
||||
fn start_send(&mut self, i: Self::SinkItem) -> StartSend<Self::SinkItem, Self::SinkError> {
|
||||
if self.try_empty_stream()?.is_not_ready() {
|
||||
return Ok(AsyncSink::NotReady(i));
|
||||
}
|
||||
assert!(self.stream.is_none());
|
||||
self.stream = Some((self.f)(i));
|
||||
self.try_empty_stream()?;
|
||||
Ok(AsyncSink::Ready)
|
||||
}
|
||||
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
|
||||
if self.try_empty_stream()?.is_not_ready() {
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
self.sink.poll_complete()
|
||||
}
|
||||
fn close(&mut self) -> Poll<(), Self::SinkError> {
|
||||
if self.try_empty_stream()?.is_not_ready() {
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
assert!(self.stream.is_none());
|
||||
self.sink.close()
|
||||
}
|
||||
}
|
|
@ -1,140 +0,0 @@
|
|||
//! A lock-free stack which supports concurrent pushes and a concurrent call to
|
||||
//! drain the entire stack all at once.
|
||||
|
||||
use std::prelude::v1::*;
|
||||
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::atomic::AtomicPtr;
|
||||
use std::sync::atomic::Ordering::SeqCst;
|
||||
|
||||
use task::EventSet;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Stack<T> {
|
||||
head: AtomicPtr<Node<T>>,
|
||||
}
|
||||
|
||||
struct Node<T> {
|
||||
data: T,
|
||||
next: *mut Node<T>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Drain<T> {
|
||||
head: *mut Node<T>,
|
||||
}
|
||||
|
||||
unsafe impl<T: Send> Send for Drain<T> {}
|
||||
unsafe impl<T: Sync> Sync for Drain<T> {}
|
||||
|
||||
impl<T> Stack<T> {
|
||||
pub fn new() -> Stack<T> {
|
||||
Stack {
|
||||
head: AtomicPtr::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push(&self, data: T) {
|
||||
let mut node = Box::new(Node { data: data, next: ptr::null_mut() });
|
||||
let mut head = self.head.load(SeqCst);
|
||||
loop {
|
||||
node.next = head;
|
||||
match self.head.compare_exchange(head, &mut *node, SeqCst, SeqCst) {
|
||||
Ok(_) => {
|
||||
mem::forget(node);
|
||||
return
|
||||
}
|
||||
Err(cur) => head = cur,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn drain(&self) -> Drain<T> {
|
||||
Drain {
|
||||
head: self.head.swap(ptr::null_mut(), SeqCst),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Stack<T> {
|
||||
fn drop(&mut self) {
|
||||
self.drain();
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Iterator for Drain<T> {
|
||||
type Item = T;
|
||||
|
||||
fn next(&mut self) -> Option<T> {
|
||||
if self.head.is_null() {
|
||||
return None
|
||||
}
|
||||
unsafe {
|
||||
let node = Box::from_raw(self.head);
|
||||
self.head = node.next;
|
||||
return Some(node.data)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Drain<T> {
|
||||
fn drop(&mut self) {
|
||||
for item in self.by_ref() {
|
||||
drop(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::prelude::v1::*;
|
||||
use std::rc::Rc;
|
||||
use std::cell::Cell;
|
||||
|
||||
use super::Stack;
|
||||
|
||||
struct Set(Rc<Cell<usize>>, usize);
|
||||
|
||||
impl Drop for Set {
|
||||
fn drop(&mut self) {
|
||||
self.0.set(self.1);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn simple() {
|
||||
let s = Stack::new();
|
||||
s.push(1);
|
||||
s.push(2);
|
||||
s.push(4);
|
||||
assert_eq!(s.drain().collect::<Vec<_>>(), vec![4, 2, 1]);
|
||||
s.push(5);
|
||||
assert_eq!(s.drain().collect::<Vec<_>>(), vec![5]);
|
||||
assert_eq!(s.drain().collect::<Vec<_>>(), vec![]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drain_drops() {
|
||||
let data = Rc::new(Cell::new(0));
|
||||
let s = Stack::new();
|
||||
s.push(Set(data.clone(), 1));
|
||||
drop(s.drain());
|
||||
assert_eq!(data.get(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drop_drops() {
|
||||
let data = Rc::new(Cell::new(0));
|
||||
let s = Stack::new();
|
||||
s.push(Set(data.clone(), 1));
|
||||
drop(s);
|
||||
assert_eq!(data.get(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
impl EventSet for Stack<usize> {
|
||||
fn insert(&self, id: usize) {
|
||||
self.push(id);
|
||||
}
|
||||
}
|
|
@ -27,6 +27,33 @@ pub fn new<S, F, U>(s: S, f: F) -> AndThen<S, F, U>
|
|||
}
|
||||
}
|
||||
|
||||
impl<S, F, U> AndThen<S, F, U>
|
||||
where U: IntoFuture,
|
||||
{
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.stream
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.stream
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, F, U: IntoFuture> ::sink::Sink for AndThen<S, F, U>
|
||||
where S: ::sink::Sink
|
||||
|
|
|
@ -1,13 +1,7 @@
|
|||
use std::prelude::v1::*;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::sync::Arc;
|
||||
|
||||
use task::{self, UnparkEvent};
|
||||
|
||||
use {Async, IntoFuture, Poll, Future};
|
||||
use stream::{Stream, Fuse};
|
||||
use stack::{Stack, Drain};
|
||||
use {Async, IntoFuture, Poll};
|
||||
use stream::{Stream, Fuse, FuturesUnordered};
|
||||
|
||||
/// An adaptor for a stream of futures to execute the futures concurrently, if
|
||||
/// possible, delivering results as they become available.
|
||||
|
@ -21,26 +15,8 @@ pub struct BufferUnordered<S>
|
|||
S::Item: IntoFuture,
|
||||
{
|
||||
stream: Fuse<S>,
|
||||
|
||||
// A slab of futures that are being executed. Each slot in this vector is
|
||||
// either an active future or a pointer to the next empty slot. This is used
|
||||
// to get O(1) deallocation in the slab and O(1) allocation.
|
||||
//
|
||||
// The `next_future` field is the next slot in the `futures` array that's a
|
||||
// `Slot::Next` variant. If it points to the end of the array then the array
|
||||
// is full.
|
||||
futures: Vec<Slot<<S::Item as IntoFuture>::Future>>,
|
||||
next_future: usize,
|
||||
|
||||
// A list of events that will get pushed onto concurrently by our many
|
||||
// futures. This is filled in and used with the `with_unpark_event`
|
||||
// function. The `pending` list here is the last time we drained events from
|
||||
// our stack.
|
||||
stack: Arc<Stack<usize>>,
|
||||
pending: Drain<usize>,
|
||||
|
||||
// Number of active futures running in the `futures` slab
|
||||
active: usize,
|
||||
queue: FuturesUnordered<<S::Item as IntoFuture>::Future>,
|
||||
max: usize,
|
||||
}
|
||||
|
||||
impl<S> fmt::Debug for BufferUnordered<S>
|
||||
|
@ -51,32 +27,20 @@ impl<S> fmt::Debug for BufferUnordered<S>
|
|||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_struct("BufferUnordered")
|
||||
.field("stream", &self.stream)
|
||||
.field("futures", &self.futures)
|
||||
.field("next_future", &self.next_future)
|
||||
.field("stack", &self.stack)
|
||||
.field("pending", &self.pending)
|
||||
.field("active", &self.active)
|
||||
.field("queue", &self.queue)
|
||||
.field("max", &self.max)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Slot<T> {
|
||||
Next(usize),
|
||||
Data(T),
|
||||
}
|
||||
|
||||
pub fn new<S>(s: S, amt: usize) -> BufferUnordered<S>
|
||||
where S: Stream,
|
||||
S::Item: IntoFuture<Error=<S as Stream>::Error>,
|
||||
{
|
||||
BufferUnordered {
|
||||
stream: super::fuse::new(s),
|
||||
futures: (0..amt).map(|i| Slot::Next(i + 1)).collect(),
|
||||
next_future: 0,
|
||||
pending: Stack::new().drain(),
|
||||
stack: Arc::new(Stack::new()),
|
||||
active: 0,
|
||||
queue: FuturesUnordered::new(),
|
||||
max: amt,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -84,27 +48,27 @@ impl<S> BufferUnordered<S>
|
|||
where S: Stream,
|
||||
S::Item: IntoFuture<Error=<S as Stream>::Error>,
|
||||
{
|
||||
fn poll_pending(&mut self)
|
||||
-> Option<Poll<Option<<S::Item as IntoFuture>::Item>,
|
||||
S::Error>> {
|
||||
while let Some(idx) = self.pending.next() {
|
||||
let result = match self.futures[idx] {
|
||||
Slot::Data(ref mut f) => {
|
||||
let event = UnparkEvent::new(self.stack.clone(), idx);
|
||||
match task::with_unpark_event(event, || f.poll()) {
|
||||
Ok(Async::NotReady) => continue,
|
||||
Ok(Async::Ready(e)) => Ok(Async::Ready(Some(e))),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
},
|
||||
Slot::Next(_) => continue,
|
||||
};
|
||||
self.active -= 1;
|
||||
self.futures[idx] = Slot::Next(self.next_future);
|
||||
self.next_future = idx;
|
||||
return Some(result)
|
||||
}
|
||||
None
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
self.stream.get_ref()
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
self.stream.get_mut()
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream.into_inner()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -118,43 +82,29 @@ impl<S> Stream for BufferUnordered<S>
|
|||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
// First up, try to spawn off as many futures as possible by filling up
|
||||
// our slab of futures.
|
||||
while self.next_future < self.futures.len() {
|
||||
let future = match try!(self.stream.poll()) {
|
||||
while self.queue.len() < self.max {
|
||||
let future = match self.stream.poll()? {
|
||||
Async::Ready(Some(s)) => s.into_future(),
|
||||
Async::Ready(None) |
|
||||
Async::NotReady => break,
|
||||
};
|
||||
self.active += 1;
|
||||
self.stack.push(self.next_future);
|
||||
match mem::replace(&mut self.futures[self.next_future],
|
||||
Slot::Data(future)) {
|
||||
Slot::Next(next) => self.next_future = next,
|
||||
Slot::Data(_) => panic!(),
|
||||
}
|
||||
|
||||
self.queue.push(future);
|
||||
}
|
||||
|
||||
// Next, see if our list of `pending` events from last time has any
|
||||
// items, and if so process them here.
|
||||
if let Some(ret) = self.poll_pending() {
|
||||
return ret
|
||||
// Try polling a new future
|
||||
if let Some(val) = try_ready!(self.queue.poll()) {
|
||||
return Ok(Async::Ready(Some(val)));
|
||||
}
|
||||
|
||||
// And finally, take a look at our stack of events, attempting to
|
||||
// process all of those.
|
||||
assert!(self.pending.next().is_none());
|
||||
self.pending = self.stack.drain();
|
||||
if let Some(ret) = self.poll_pending() {
|
||||
return ret
|
||||
}
|
||||
|
||||
// If we've gotten this far then there's no events for us to process and
|
||||
// nothing was ready, so figure out if we're not done yet or if we've
|
||||
// reached the end.
|
||||
Ok(if self.active > 0 || !self.stream.is_done() {
|
||||
Async::NotReady
|
||||
// If we've gotten this far, then there are no events for us to process
|
||||
// and nothing was ready, so figure out if we're not done yet or if
|
||||
// we've reached the end.
|
||||
if self.stream.is_done() {
|
||||
Ok(Async::Ready(None))
|
||||
} else {
|
||||
Async::Ready(None)
|
||||
})
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
use std::prelude::v1::*;
|
||||
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
|
||||
use {Async, IntoFuture, Poll, Future};
|
||||
use stream::{Stream, Fuse};
|
||||
use {Async, IntoFuture, Poll};
|
||||
use stream::{Stream, Fuse, FuturesOrdered};
|
||||
|
||||
/// An adaptor for a stream of futures to execute the futures concurrently, if
|
||||
/// possible.
|
||||
|
@ -18,8 +15,8 @@ pub struct Buffered<S>
|
|||
S::Item: IntoFuture,
|
||||
{
|
||||
stream: Fuse<S>,
|
||||
futures: Vec<State<<S::Item as IntoFuture>::Future>>,
|
||||
cur: usize,
|
||||
queue: FuturesOrdered<<S::Item as IntoFuture>::Future>,
|
||||
max: usize,
|
||||
}
|
||||
|
||||
impl<S> fmt::Debug for Buffered<S>
|
||||
|
@ -30,29 +27,50 @@ impl<S> fmt::Debug for Buffered<S>
|
|||
<<S as Stream>::Item as IntoFuture>::Error: fmt::Debug,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_struct("Stream")
|
||||
fmt.debug_struct("Buffered")
|
||||
.field("stream", &self.stream)
|
||||
.field("futures", &self.futures)
|
||||
.field("cur", &self.cur)
|
||||
.field("queue", &self.queue)
|
||||
.field("max", &self.max)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum State<S: Future> {
|
||||
Empty,
|
||||
Running(S),
|
||||
Finished(Result<S::Item, S::Error>),
|
||||
}
|
||||
|
||||
pub fn new<S>(s: S, amt: usize) -> Buffered<S>
|
||||
where S: Stream,
|
||||
S::Item: IntoFuture<Error=<S as Stream>::Error>,
|
||||
{
|
||||
Buffered {
|
||||
stream: super::fuse::new(s),
|
||||
futures: (0..amt).map(|_| State::Empty).collect(),
|
||||
cur: 0,
|
||||
queue: FuturesOrdered::new(),
|
||||
max: amt,
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Buffered<S>
|
||||
where S: Stream,
|
||||
S::Item: IntoFuture<Error=<S as Stream>::Error>,
|
||||
{
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
self.stream.get_ref()
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
self.stream.get_mut()
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream.into_inner()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -85,58 +103,30 @@ impl<S> Stream for Buffered<S>
|
|||
type Error = <S as Stream>::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
// First, try to fill in all the futures
|
||||
for i in 0..self.futures.len() {
|
||||
let mut idx = self.cur + i;
|
||||
if idx >= self.futures.len() {
|
||||
idx -= self.futures.len();
|
||||
}
|
||||
|
||||
if let State::Empty = self.futures[idx] {
|
||||
match try!(self.stream.poll()) {
|
||||
Async::Ready(Some(future)) => {
|
||||
let future = future.into_future();
|
||||
self.futures[idx] = State::Running(future);
|
||||
}
|
||||
Async::Ready(None) => break,
|
||||
Async::NotReady => break,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Next, try and step all the futures forward
|
||||
for future in self.futures.iter_mut() {
|
||||
let result = match *future {
|
||||
State::Running(ref mut s) => {
|
||||
match s.poll() {
|
||||
Ok(Async::NotReady) => continue,
|
||||
Ok(Async::Ready(e)) => Ok(e),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
_ => continue,
|
||||
// First up, try to spawn off as many futures as possible by filling up
|
||||
// our slab of futures.
|
||||
while self.queue.len() < self.max {
|
||||
let future = match self.stream.poll()? {
|
||||
Async::Ready(Some(s)) => s.into_future(),
|
||||
Async::Ready(None) |
|
||||
Async::NotReady => break,
|
||||
};
|
||||
*future = State::Finished(result);
|
||||
|
||||
self.queue.push(future);
|
||||
}
|
||||
|
||||
// Check to see if our current future is done.
|
||||
if let State::Finished(_) = self.futures[self.cur] {
|
||||
let r = match mem::replace(&mut self.futures[self.cur], State::Empty) {
|
||||
State::Finished(r) => r,
|
||||
_ => panic!(),
|
||||
};
|
||||
self.cur += 1;
|
||||
if self.cur >= self.futures.len() {
|
||||
self.cur = 0;
|
||||
}
|
||||
return Ok(Async::Ready(Some(try!(r))))
|
||||
// Try polling a new future
|
||||
if let Some(val) = try_ready!(self.queue.poll()) {
|
||||
return Ok(Async::Ready(Some(val)));
|
||||
}
|
||||
|
||||
// If we've gotten this far, then there are no events for us to process
|
||||
// and nothing was ready, so figure out if we're not done yet or if
|
||||
// we've reached the end.
|
||||
if self.stream.is_done() {
|
||||
if let State::Empty = self.futures[self.cur] {
|
||||
return Ok(Async::Ready(None))
|
||||
}
|
||||
Ok(Async::Ready(None))
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,6 +57,29 @@ impl<S> Chunks<S> where S: Stream {
|
|||
let cap = self.items.capacity();
|
||||
mem::replace(&mut self.items, Vec::with_capacity(cap))
|
||||
}
|
||||
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
self.stream.get_ref()
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
self.stream.get_mut()
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream.into_inner()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Stream for Chunks<S>
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
use core::mem;
|
||||
use core::fmt::{Debug, Formatter, Result as FmtResult};
|
||||
use core::default::Default;
|
||||
|
||||
use {Poll, Async};
|
||||
use future::Future;
|
||||
|
@ -8,25 +10,82 @@ use stream::Stream;
|
|||
/// yielded item.
|
||||
///
|
||||
/// This structure is produced by the `Stream::concat` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Concat2<S>
|
||||
where S: Stream,
|
||||
{
|
||||
inner: ConcatSafe<S>
|
||||
}
|
||||
|
||||
impl<S: Debug> Debug for Concat2<S> where S: Stream, S::Item: Debug {
|
||||
fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
|
||||
fmt.debug_struct("Concat2")
|
||||
.field("inner", &self.inner)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new2<S>(s: S) -> Concat2<S>
|
||||
where S: Stream,
|
||||
S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default,
|
||||
{
|
||||
Concat2 {
|
||||
inner: new_safe(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Future for Concat2<S>
|
||||
where S: Stream,
|
||||
S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default,
|
||||
|
||||
{
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
self.inner.poll().map(|a| {
|
||||
match a {
|
||||
Async::NotReady => Async::NotReady,
|
||||
Async::Ready(None) => Async::Ready(Default::default()),
|
||||
Async::Ready(Some(e)) => Async::Ready(e)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// A stream combinator to concatenate the results of a stream into the first
|
||||
/// yielded item.
|
||||
///
|
||||
/// This structure is produced by the `Stream::concat` method.
|
||||
#[deprecated(since="0.1.18", note="please use `Stream::Concat2` instead")]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Concat<S>
|
||||
where S: Stream,
|
||||
{
|
||||
stream: S,
|
||||
extend: Inner<S::Item>,
|
||||
inner: ConcatSafe<S>
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
impl<S: Debug> Debug for Concat<S> where S: Stream, S::Item: Debug {
|
||||
fn fmt(&self, fmt: &mut Formatter) -> FmtResult {
|
||||
fmt.debug_struct("Concat")
|
||||
.field("inner", &self.inner)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
pub fn new<S>(s: S) -> Concat<S>
|
||||
where S: Stream,
|
||||
S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
|
||||
{
|
||||
Concat {
|
||||
stream: s,
|
||||
extend: Inner::First,
|
||||
inner: new_safe(s)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
impl<S> Future for Concat<S>
|
||||
where S: Stream,
|
||||
S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
|
||||
|
@ -35,6 +94,44 @@ impl<S> Future for Concat<S>
|
|||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
self.inner.poll().map(|a| {
|
||||
match a {
|
||||
Async::NotReady => Async::NotReady,
|
||||
Async::Ready(None) => panic!("attempted concatenation of empty stream"),
|
||||
Async::Ready(Some(e)) => Async::Ready(e)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ConcatSafe<S>
|
||||
where S: Stream,
|
||||
{
|
||||
stream: S,
|
||||
extend: Inner<S::Item>,
|
||||
}
|
||||
|
||||
fn new_safe<S>(s: S) -> ConcatSafe<S>
|
||||
where S: Stream,
|
||||
S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
|
||||
{
|
||||
ConcatSafe {
|
||||
stream: s,
|
||||
extend: Inner::First,
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Future for ConcatSafe<S>
|
||||
where S: Stream,
|
||||
S::Item: Extend<<<S as Stream>::Item as IntoIterator>::Item> + IntoIterator,
|
||||
|
||||
{
|
||||
type Item = Option<S::Item>;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
loop {
|
||||
match self.stream.poll() {
|
||||
|
@ -49,10 +146,16 @@ impl<S> Future for Concat<S>
|
|||
Inner::Done => unreachable!(),
|
||||
}
|
||||
},
|
||||
Ok(Async::Ready(None)) => return Ok(Async::Ready(expect(self.extend.take()))),
|
||||
Ok(Async::Ready(None)) => {
|
||||
match mem::replace(&mut self.extend, Inner::Done) {
|
||||
Inner::First => return Ok(Async::Ready(None)),
|
||||
Inner::Extending(e) => return Ok(Async::Ready(Some(e))),
|
||||
Inner::Done => panic!("cannot poll Concat again")
|
||||
}
|
||||
},
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Err(e) => {
|
||||
self.extend.take();
|
||||
self.extend = Inner::Done;
|
||||
return Err(e)
|
||||
}
|
||||
}
|
||||
|
@ -60,22 +163,10 @@ impl<S> Future for Concat<S>
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Inner<E> {
|
||||
First,
|
||||
Extending(E),
|
||||
Done,
|
||||
}
|
||||
|
||||
impl<E> Inner<E> {
|
||||
fn take(&mut self) -> Option<E> {
|
||||
match mem::replace(self, Inner::Done) {
|
||||
Inner::Extending(e) => Some(e),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn expect<T>(opt: Option<T>) -> T {
|
||||
opt.expect("cannot poll Concat again")
|
||||
}
|
||||
}
|
|
@ -22,6 +22,31 @@ pub fn new<S, F>(s: S, f: F) -> Filter<S, F>
|
|||
}
|
||||
}
|
||||
|
||||
impl<S, F> Filter<S, F> {
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.stream
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.stream
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, F> ::sink::Sink for Filter<S, F>
|
||||
where S: ::sink::Sink
|
||||
|
|
|
@ -22,6 +22,31 @@ pub fn new<S, F, B>(s: S, f: F) -> FilterMap<S, F>
|
|||
}
|
||||
}
|
||||
|
||||
impl<S, F> FilterMap<S, F> {
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.stream
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.stream
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, F> ::sink::Sink for FilterMap<S, F>
|
||||
where S: ::sink::Sink
|
||||
|
|
|
@ -25,6 +25,31 @@ pub fn new<S>(s: S) -> Flatten<S>
|
|||
}
|
||||
}
|
||||
|
||||
impl<S: Stream> Flatten<S> {
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.stream
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.stream
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S> ::sink::Sink for Flatten<S>
|
||||
where S: ::sink::Sink + Stream
|
||||
|
|
|
@ -53,7 +53,7 @@ impl<S, F, Fut, T> Future for Fold<S, F, Fut, T>
|
|||
match mem::replace(&mut self.state, State::Empty) {
|
||||
State::Empty => panic!("cannot poll Fold twice"),
|
||||
State::Ready(state) => {
|
||||
match try!(self.stream.poll()) {
|
||||
match self.stream.poll()? {
|
||||
Async::Ready(Some(e)) => {
|
||||
let future = (self.f)(state, e);
|
||||
let future = future.into_future();
|
||||
|
@ -67,7 +67,7 @@ impl<S, F, Fut, T> Future for Fold<S, F, Fut, T>
|
|||
}
|
||||
}
|
||||
State::Processing(mut fut) => {
|
||||
match try!(fut.poll()) {
|
||||
match fut.poll()? {
|
||||
Async::Ready(state) => self.state = State::Ready(state),
|
||||
Async::NotReady => {
|
||||
self.state = State::Processing(fut);
|
||||
|
|
|
@ -36,7 +36,7 @@ impl<S, F, U> Future for ForEach<S, F, U>
|
|||
fn poll(&mut self) -> Poll<(), S::Error> {
|
||||
loop {
|
||||
if let Some(mut fut) = self.fut.take() {
|
||||
if try!(fut.poll()).is_not_ready() {
|
||||
if fut.poll()?.is_not_ready() {
|
||||
self.fut = Some(fut);
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
|
|
|
@ -30,14 +30,28 @@ impl<T, U> Forward<T, U>
|
|||
T: Stream,
|
||||
T::Error: From<U::SinkError>,
|
||||
{
|
||||
fn sink_mut(&mut self) -> &mut U {
|
||||
self.sink.as_mut().take()
|
||||
.expect("Attempted to poll Forward after completion")
|
||||
/// Get a shared reference to the inner sink.
|
||||
/// If this combinator has already been polled to completion, None will be returned.
|
||||
pub fn sink_ref(&self) -> Option<&U> {
|
||||
self.sink.as_ref()
|
||||
}
|
||||
|
||||
fn stream_mut(&mut self) -> &mut Fuse<T> {
|
||||
self.stream.as_mut().take()
|
||||
.expect("Attempted to poll Forward after completion")
|
||||
/// Get a mutable reference to the inner sink.
|
||||
/// If this combinator has already been polled to completion, None will be returned.
|
||||
pub fn sink_mut(&mut self) -> Option<&mut U> {
|
||||
self.sink.as_mut()
|
||||
}
|
||||
|
||||
/// Get a shared reference to the inner stream.
|
||||
/// If this combinator has already been polled to completion, None will be returned.
|
||||
pub fn stream_ref(&self) -> Option<&T> {
|
||||
self.stream.as_ref().map(|x| x.get_ref())
|
||||
}
|
||||
|
||||
/// Get a mutable reference to the inner stream.
|
||||
/// If this combinator has already been polled to completion, None will be returned.
|
||||
pub fn stream_mut(&mut self) -> Option<&mut T> {
|
||||
self.stream.as_mut().map(|x| x.get_mut())
|
||||
}
|
||||
|
||||
fn take_result(&mut self) -> (T, U) {
|
||||
|
@ -45,12 +59,15 @@ impl<T, U> Forward<T, U>
|
|||
.expect("Attempted to poll Forward after completion");
|
||||
let fuse = self.stream.take()
|
||||
.expect("Attempted to poll Forward after completion");
|
||||
return (fuse.into_inner(), sink)
|
||||
(fuse.into_inner(), sink)
|
||||
}
|
||||
|
||||
fn try_start_send(&mut self, item: T::Item) -> Poll<(), U::SinkError> {
|
||||
debug_assert!(self.buffered.is_none());
|
||||
if let AsyncSink::NotReady(item) = try!(self.sink_mut().start_send(item)) {
|
||||
if let AsyncSink::NotReady(item) = self.sink_mut()
|
||||
.take().expect("Attempted to poll Forward after completion")
|
||||
.start_send(item)?
|
||||
{
|
||||
self.buffered = Some(item);
|
||||
return Ok(Async::NotReady)
|
||||
}
|
||||
|
@ -74,14 +91,17 @@ impl<T, U> Future for Forward<T, U>
|
|||
}
|
||||
|
||||
loop {
|
||||
match try!(self.stream_mut().poll()) {
|
||||
match self.stream_mut()
|
||||
.take().expect("Attempted to poll Forward after completion")
|
||||
.poll()?
|
||||
{
|
||||
Async::Ready(Some(item)) => try_ready!(self.try_start_send(item)),
|
||||
Async::Ready(None) => {
|
||||
try_ready!(self.sink_mut().close());
|
||||
try_ready!(self.sink_mut().take().expect("Attempted to poll Forward after completion").close());
|
||||
return Ok(Async::Ready(self.take_result()))
|
||||
}
|
||||
Async::NotReady => {
|
||||
try_ready!(self.sink_mut().poll_complete());
|
||||
try_ready!(self.sink_mut().take().expect("Attempted to poll Forward after completion").poll_complete());
|
||||
return Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ use stream::Stream;
|
|||
/// This is created by the `Stream::from_err` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "futures do nothing unless polled"]
|
||||
pub struct FromErr<S, E> where S: Stream {
|
||||
pub struct FromErr<S, E> {
|
||||
stream: S,
|
||||
f: PhantomData<E>
|
||||
}
|
||||
|
@ -22,6 +22,32 @@ pub fn new<S, E>(stream: S) -> FromErr<S, E>
|
|||
}
|
||||
}
|
||||
|
||||
impl<S, E> FromErr<S, E> {
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.stream
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.stream
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<S: Stream, E: From<S::Error>> Stream for FromErr<S, E> {
|
||||
type Item = S::Item;
|
||||
type Error = E;
|
||||
|
|
|
@ -64,7 +64,25 @@ impl<S> Fuse<S> {
|
|||
self.done
|
||||
}
|
||||
|
||||
/// Recover original stream
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.stream
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.stream
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream
|
||||
}
|
||||
|
|
|
@ -0,0 +1,213 @@
|
|||
use std::cmp::{Eq, PartialEq, PartialOrd, Ord, Ordering};
|
||||
use std::collections::BinaryHeap;
|
||||
use std::fmt::{self, Debug};
|
||||
use std::iter::FromIterator;
|
||||
|
||||
use {Async, Future, IntoFuture, Poll, Stream};
|
||||
use stream::FuturesUnordered;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct OrderWrapper<T> {
|
||||
item: T,
|
||||
index: usize,
|
||||
}
|
||||
|
||||
impl<T> PartialEq for OrderWrapper<T> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.index == other.index
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Eq for OrderWrapper<T> {}
|
||||
|
||||
impl<T> PartialOrd for OrderWrapper<T> {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Ord for OrderWrapper<T> {
|
||||
fn cmp(&self, other: &Self) -> Ordering {
|
||||
// BinaryHeap is a max heap, so compare backwards here.
|
||||
other.index.cmp(&self.index)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Future for OrderWrapper<T>
|
||||
where T: Future
|
||||
{
|
||||
type Item = OrderWrapper<T::Item>;
|
||||
type Error = T::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
|
||||
let result = try_ready!(self.item.poll());
|
||||
Ok(Async::Ready(OrderWrapper {
|
||||
item: result,
|
||||
index: self.index
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
/// An unbounded queue of futures.
|
||||
///
|
||||
/// This "combinator" is similar to `FuturesUnordered`, but it imposes an order
|
||||
/// on top of the set of futures. While futures in the set will race to
|
||||
/// completion in parallel, results will only be returned in the order their
|
||||
/// originating futures were added to the queue.
|
||||
///
|
||||
/// Futures are pushed into this queue and their realized values are yielded in
|
||||
/// order. This structure is optimized to manage a large number of futures.
|
||||
/// Futures managed by `FuturesOrdered` will only be polled when they generate
|
||||
/// notifications. This reduces the required amount of work needed to coordinate
|
||||
/// large numbers of futures.
|
||||
///
|
||||
/// When a `FuturesOrdered` is first created, it does not contain any futures.
|
||||
/// Calling `poll` in this state will result in `Ok(Async::Ready(None))` to be
|
||||
/// returned. Futures are submitted to the queue using `push`; however, the
|
||||
/// future will **not** be polled at this point. `FuturesOrdered` will only
|
||||
/// poll managed futures when `FuturesOrdered::poll` is called. As such, it
|
||||
/// is important to call `poll` after pushing new futures.
|
||||
///
|
||||
/// If `FuturesOrdered::poll` returns `Ok(Async::Ready(None))` this means that
|
||||
/// the queue is currently not managing any futures. A future may be submitted
|
||||
/// to the queue at a later time. At that point, a call to
|
||||
/// `FuturesOrdered::poll` will either return the future's resolved value
|
||||
/// **or** `Ok(Async::NotReady)` if the future has not yet completed. When
|
||||
/// multiple futures are submitted to the queue, `FuturesOrdered::poll` will
|
||||
/// return `Ok(Async::NotReady)` until the first future completes, even if
|
||||
/// some of the later futures have already completed.
|
||||
///
|
||||
/// Note that you can create a ready-made `FuturesOrdered` via the
|
||||
/// `futures_ordered` function in the `stream` module, or you can start with an
|
||||
/// empty queue with the `FuturesOrdered::new` constructor.
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct FuturesOrdered<T>
|
||||
where T: Future
|
||||
{
|
||||
in_progress: FuturesUnordered<OrderWrapper<T>>,
|
||||
queued_results: BinaryHeap<OrderWrapper<T::Item>>,
|
||||
next_incoming_index: usize,
|
||||
next_outgoing_index: usize,
|
||||
}
|
||||
|
||||
/// Converts a list of futures into a `Stream` of results from the futures.
|
||||
///
|
||||
/// This function will take an list of futures (e.g. a vector, an iterator,
|
||||
/// etc), and return a stream. The stream will yield items as they become
|
||||
/// available on the futures internally, in the order that their originating
|
||||
/// futures were submitted to the queue. If the futures complete out of order,
|
||||
/// items will be stored internally within `FuturesOrdered` until all preceding
|
||||
/// items have been yielded.
|
||||
///
|
||||
/// Note that the returned queue can also be used to dynamically push more
|
||||
/// futures into the queue as they become available.
|
||||
pub fn futures_ordered<I>(futures: I) -> FuturesOrdered<<I::Item as IntoFuture>::Future>
|
||||
where I: IntoIterator,
|
||||
I::Item: IntoFuture
|
||||
{
|
||||
let mut queue = FuturesOrdered::new();
|
||||
|
||||
for future in futures {
|
||||
queue.push(future.into_future());
|
||||
}
|
||||
|
||||
return queue
|
||||
}
|
||||
|
||||
impl<T> FuturesOrdered<T>
|
||||
where T: Future
|
||||
{
|
||||
/// Constructs a new, empty `FuturesOrdered`
|
||||
///
|
||||
/// The returned `FuturesOrdered` does not contain any futures and, in this
|
||||
/// state, `FuturesOrdered::poll` will return `Ok(Async::Ready(None))`.
|
||||
pub fn new() -> FuturesOrdered<T> {
|
||||
FuturesOrdered {
|
||||
in_progress: FuturesUnordered::new(),
|
||||
queued_results: BinaryHeap::new(),
|
||||
next_incoming_index: 0,
|
||||
next_outgoing_index: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of futures contained in the queue.
|
||||
///
|
||||
/// This represents the total number of in-flight futures, both
|
||||
/// those currently processing and those that have completed but
|
||||
/// which are waiting for earlier futures to complete.
|
||||
pub fn len(&self) -> usize {
|
||||
self.in_progress.len() + self.queued_results.len()
|
||||
}
|
||||
|
||||
/// Returns `true` if the queue contains no futures
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.in_progress.is_empty() && self.queued_results.is_empty()
|
||||
}
|
||||
|
||||
/// Push a future into the queue.
|
||||
///
|
||||
/// This function submits the given future to the internal set for managing.
|
||||
/// This function will not call `poll` on the submitted future. The caller
|
||||
/// must ensure that `FuturesOrdered::poll` is called in order to receive
|
||||
/// task notifications.
|
||||
pub fn push(&mut self, future: T) {
|
||||
let wrapped = OrderWrapper {
|
||||
item: future,
|
||||
index: self.next_incoming_index,
|
||||
};
|
||||
self.next_incoming_index += 1;
|
||||
self.in_progress.push(wrapped);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Stream for FuturesOrdered<T>
|
||||
where T: Future
|
||||
{
|
||||
type Item = T::Item;
|
||||
type Error = T::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
// Get any completed futures from the unordered set.
|
||||
loop {
|
||||
match self.in_progress.poll()? {
|
||||
Async::Ready(Some(result)) => self.queued_results.push(result),
|
||||
Async::Ready(None) | Async::NotReady => break,
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(next_result) = self.queued_results.peek() {
|
||||
// PeekMut::pop is not stable yet QQ
|
||||
if next_result.index != self.next_outgoing_index {
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
} else if !self.in_progress.is_empty() {
|
||||
return Ok(Async::NotReady);
|
||||
} else {
|
||||
return Ok(Async::Ready(None));
|
||||
}
|
||||
|
||||
let next_result = self.queued_results.pop().unwrap();
|
||||
self.next_outgoing_index += 1;
|
||||
Ok(Async::Ready(Some(next_result.item)))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Debug> Debug for FuturesOrdered<T>
|
||||
where T: Future
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(fmt, "FuturesOrdered {{ ... }}")
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Future> FromIterator<F> for FuturesOrdered<F> {
|
||||
fn from_iter<T>(iter: T) -> Self
|
||||
where T: IntoIterator<Item = F>
|
||||
{
|
||||
let mut new = FuturesOrdered::new();
|
||||
for future in iter.into_iter() {
|
||||
new.push(future);
|
||||
}
|
||||
new
|
||||
}
|
||||
}
|
|
@ -1,107 +1,672 @@
|
|||
use future::{Future, IntoFuture};
|
||||
use stream::Stream;
|
||||
use poll::Poll;
|
||||
use Async;
|
||||
use stack::{Stack, Drain};
|
||||
use std::sync::Arc;
|
||||
use task::{self, UnparkEvent};
|
||||
//! An unbounded set of futures.
|
||||
|
||||
use std::prelude::v1::*;
|
||||
use std::cell::UnsafeCell;
|
||||
use std::fmt::{self, Debug};
|
||||
use std::iter::FromIterator;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::atomic::Ordering::{Relaxed, SeqCst, Acquire, Release, AcqRel};
|
||||
use std::sync::atomic::{AtomicPtr, AtomicBool};
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::usize;
|
||||
|
||||
/// An adaptor for a stream of futures to execute the futures concurrently, if
|
||||
/// possible, delivering results as they become available.
|
||||
use {task, Stream, Future, Poll, Async};
|
||||
use executor::{Notify, UnsafeNotify, NotifyHandle};
|
||||
use task_impl::{self, AtomicTask};
|
||||
|
||||
/// An unbounded set of futures.
|
||||
///
|
||||
/// This adaptor will return their results in the order that they complete.
|
||||
/// This is created by the `futures` method.
|
||||
/// This "combinator" also serves a special function in this library, providing
|
||||
/// the ability to maintain a set of futures that and manage driving them all
|
||||
/// to completion.
|
||||
///
|
||||
#[derive(Debug)]
|
||||
/// Futures are pushed into this set and their realized values are yielded as
|
||||
/// they are ready. This structure is optimized to manage a large number of
|
||||
/// futures. Futures managed by `FuturesUnordered` will only be polled when they
|
||||
/// generate notifications. This reduces the required amount of work needed to
|
||||
/// coordinate large numbers of futures.
|
||||
///
|
||||
/// When a `FuturesUnordered` is first created, it does not contain any futures.
|
||||
/// Calling `poll` in this state will result in `Ok(Async::Ready(None))` to be
|
||||
/// returned. Futures are submitted to the set using `push`; however, the
|
||||
/// future will **not** be polled at this point. `FuturesUnordered` will only
|
||||
/// poll managed futures when `FuturesUnordered::poll` is called. As such, it
|
||||
/// is important to call `poll` after pushing new futures.
|
||||
///
|
||||
/// If `FuturesUnordered::poll` returns `Ok(Async::Ready(None))` this means that
|
||||
/// the set is currently not managing any futures. A future may be submitted
|
||||
/// to the set at a later time. At that point, a call to
|
||||
/// `FuturesUnordered::poll` will either return the future's resolved value
|
||||
/// **or** `Ok(Async::NotReady)` if the future has not yet completed.
|
||||
///
|
||||
/// Note that you can create a ready-made `FuturesUnordered` via the
|
||||
/// `futures_unordered` function in the `stream` module, or you can start with an
|
||||
/// empty set with the `FuturesUnordered::new` constructor.
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct FuturesUnordered<F>
|
||||
where F: Future
|
||||
{
|
||||
futures: Vec<Option<F>>,
|
||||
stack: Arc<Stack<usize>>,
|
||||
pending: Option<Drain<usize>>,
|
||||
active: usize,
|
||||
pub struct FuturesUnordered<F> {
|
||||
inner: Arc<Inner<F>>,
|
||||
len: usize,
|
||||
head_all: *const Node<F>,
|
||||
}
|
||||
|
||||
/// Converts a list of futures into a `Stream` of results from the futures.
|
||||
///
|
||||
/// This function will take an list of futures (e.g. a vector, an iterator,
|
||||
/// etc), and return a stream. The stream will yield items as they become
|
||||
/// available on the futures internally, in the order that they become
|
||||
/// available. This function is similar to `buffer_unordered` in that it may
|
||||
/// return items in a different order than in the list specified.
|
||||
pub fn futures_unordered<I>(futures: I) -> FuturesUnordered<<I::Item as IntoFuture>::Future>
|
||||
where I: IntoIterator,
|
||||
I::Item: IntoFuture
|
||||
unsafe impl<T: Send> Send for FuturesUnordered<T> {}
|
||||
unsafe impl<T: Sync> Sync for FuturesUnordered<T> {}
|
||||
|
||||
// FuturesUnordered is implemented using two linked lists. One which links all
|
||||
// futures managed by a `FuturesUnordered` and one that tracks futures that have
|
||||
// been scheduled for polling. The first linked list is not thread safe and is
|
||||
// only accessed by the thread that owns the `FuturesUnordered` value. The
|
||||
// second linked list is an implementation of the intrusive MPSC queue algorithm
|
||||
// described by 1024cores.net.
|
||||
//
|
||||
// When a future is submitted to the set a node is allocated and inserted in
|
||||
// both linked lists. The next call to `poll` will (eventually) see this node
|
||||
// and call `poll` on the future.
|
||||
//
|
||||
// Before a managed future is polled, the current task's `Notify` is replaced
|
||||
// with one that is aware of the specific future being run. This ensures that
|
||||
// task notifications generated by that specific future are visible to
|
||||
// `FuturesUnordered`. When a notification is received, the node is scheduled
|
||||
// for polling by being inserted into the concurrent linked list.
|
||||
//
|
||||
// Each node uses an `AtomicUsize` to track it's state. The node state is the
|
||||
// reference count (the number of outstanding handles to the node) as well as a
|
||||
// flag tracking if the node is currently inserted in the atomic queue. When the
|
||||
// future is notified, it will only insert itself into the linked list if it
|
||||
// isn't currently inserted.
|
||||
|
||||
#[allow(missing_debug_implementations)]
|
||||
struct Inner<T> {
|
||||
// The task using `FuturesUnordered`.
|
||||
parent: AtomicTask,
|
||||
|
||||
// Head/tail of the readiness queue
|
||||
head_readiness: AtomicPtr<Node<T>>,
|
||||
tail_readiness: UnsafeCell<*const Node<T>>,
|
||||
stub: Arc<Node<T>>,
|
||||
}
|
||||
|
||||
struct Node<T> {
|
||||
// The future
|
||||
future: UnsafeCell<Option<T>>,
|
||||
|
||||
// Next pointer for linked list tracking all active nodes
|
||||
next_all: UnsafeCell<*const Node<T>>,
|
||||
|
||||
// Previous node in linked list tracking all active nodes
|
||||
prev_all: UnsafeCell<*const Node<T>>,
|
||||
|
||||
// Next pointer in readiness queue
|
||||
next_readiness: AtomicPtr<Node<T>>,
|
||||
|
||||
// Queue that we'll be enqueued to when notified
|
||||
queue: Weak<Inner<T>>,
|
||||
|
||||
// Whether or not this node is currently in the mpsc queue.
|
||||
queued: AtomicBool,
|
||||
}
|
||||
|
||||
enum Dequeue<T> {
|
||||
Data(*const Node<T>),
|
||||
Empty,
|
||||
Inconsistent,
|
||||
}
|
||||
|
||||
impl<T> FuturesUnordered<T>
|
||||
where T: Future,
|
||||
{
|
||||
let futures = futures.into_iter()
|
||||
.map(IntoFuture::into_future)
|
||||
.map(Some)
|
||||
.collect::<Vec<_>>();
|
||||
let stack = Arc::new(Stack::new());
|
||||
for i in 0..futures.len() {
|
||||
stack.push(i);
|
||||
}
|
||||
FuturesUnordered {
|
||||
active: futures.len(),
|
||||
futures: futures,
|
||||
pending: None,
|
||||
stack: stack,
|
||||
/// Constructs a new, empty `FuturesUnordered`
|
||||
///
|
||||
/// The returned `FuturesUnordered` does not contain any futures and, in this
|
||||
/// state, `FuturesUnordered::poll` will return `Ok(Async::Ready(None))`.
|
||||
pub fn new() -> FuturesUnordered<T> {
|
||||
let stub = Arc::new(Node {
|
||||
future: UnsafeCell::new(None),
|
||||
next_all: UnsafeCell::new(ptr::null()),
|
||||
prev_all: UnsafeCell::new(ptr::null()),
|
||||
next_readiness: AtomicPtr::new(ptr::null_mut()),
|
||||
queued: AtomicBool::new(true),
|
||||
queue: Weak::new(),
|
||||
});
|
||||
let stub_ptr = &*stub as *const Node<T>;
|
||||
let inner = Arc::new(Inner {
|
||||
parent: AtomicTask::new(),
|
||||
head_readiness: AtomicPtr::new(stub_ptr as *mut _),
|
||||
tail_readiness: UnsafeCell::new(stub_ptr),
|
||||
stub: stub,
|
||||
});
|
||||
|
||||
FuturesUnordered {
|
||||
len: 0,
|
||||
head_all: ptr::null_mut(),
|
||||
inner: inner,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> FuturesUnordered<F>
|
||||
where F: Future
|
||||
{
|
||||
fn poll_pending(&mut self, mut drain: Drain<usize>)
|
||||
-> Option<Poll<Option<F::Item>, F::Error>> {
|
||||
while let Some(id) = drain.next() {
|
||||
// If this future was already done just skip the notification
|
||||
if self.futures[id].is_none() {
|
||||
continue
|
||||
impl<T> FuturesUnordered<T> {
|
||||
/// Returns the number of futures contained in the set.
|
||||
///
|
||||
/// This represents the total number of in-flight futures.
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
||||
/// Returns `true` if the set contains no futures
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len == 0
|
||||
}
|
||||
|
||||
/// Push a future into the set.
|
||||
///
|
||||
/// This function submits the given future to the set for managing. This
|
||||
/// function will not call `poll` on the submitted future. The caller must
|
||||
/// ensure that `FuturesUnordered::poll` is called in order to receive task
|
||||
/// notifications.
|
||||
pub fn push(&mut self, future: T) {
|
||||
let node = Arc::new(Node {
|
||||
future: UnsafeCell::new(Some(future)),
|
||||
next_all: UnsafeCell::new(ptr::null_mut()),
|
||||
prev_all: UnsafeCell::new(ptr::null_mut()),
|
||||
next_readiness: AtomicPtr::new(ptr::null_mut()),
|
||||
queued: AtomicBool::new(true),
|
||||
queue: Arc::downgrade(&self.inner),
|
||||
});
|
||||
|
||||
// Right now our node has a strong reference count of 1. We transfer
|
||||
// ownership of this reference count to our internal linked list
|
||||
// and we'll reclaim ownership through the `unlink` function below.
|
||||
let ptr = self.link(node);
|
||||
|
||||
// We'll need to get the future "into the system" to start tracking it,
|
||||
// e.g. getting its unpark notifications going to us tracking which
|
||||
// futures are ready. To do that we unconditionally enqueue it for
|
||||
// polling here.
|
||||
self.inner.enqueue(ptr);
|
||||
}
|
||||
|
||||
/// Returns an iterator that allows modifying each future in the set.
|
||||
pub fn iter_mut(&mut self) -> IterMut<T> {
|
||||
IterMut {
|
||||
node: self.head_all,
|
||||
len: self.len,
|
||||
_marker: PhantomData
|
||||
}
|
||||
}
|
||||
|
||||
fn release_node(&mut self, node: Arc<Node<T>>) {
|
||||
// The future is done, try to reset the queued flag. This will prevent
|
||||
// `notify` from doing any work in the future
|
||||
let prev = node.queued.swap(true, SeqCst);
|
||||
|
||||
// Drop the future, even if it hasn't finished yet. This is safe
|
||||
// because we're dropping the future on the thread that owns
|
||||
// `FuturesUnordered`, which correctly tracks T's lifetimes and such.
|
||||
unsafe {
|
||||
drop((*node.future.get()).take());
|
||||
}
|
||||
|
||||
// If the queued flag was previously set then it means that this node
|
||||
// is still in our internal mpsc queue. We then transfer ownership
|
||||
// of our reference count to the mpsc queue, and it'll come along and
|
||||
// free it later, noticing that the future is `None`.
|
||||
//
|
||||
// If, however, the queued flag was *not* set then we're safe to
|
||||
// release our reference count on the internal node. The queued flag
|
||||
// was set above so all future `enqueue` operations will not actually
|
||||
// enqueue the node, so our node will never see the mpsc queue again.
|
||||
// The node itself will be deallocated once all reference counts have
|
||||
// been dropped by the various owning tasks elsewhere.
|
||||
if prev {
|
||||
mem::forget(node);
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a new node into the internal linked list.
|
||||
fn link(&mut self, node: Arc<Node<T>>) -> *const Node<T> {
|
||||
let ptr = arc2ptr(node);
|
||||
unsafe {
|
||||
*(*ptr).next_all.get() = self.head_all;
|
||||
if !self.head_all.is_null() {
|
||||
*(*self.head_all).prev_all.get() = ptr;
|
||||
}
|
||||
let event = UnparkEvent::new(self.stack.clone(), id);
|
||||
let ret = match task::with_unpark_event(event, || {
|
||||
self.futures[id]
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.poll()
|
||||
}) {
|
||||
Ok(Async::NotReady) => continue,
|
||||
Ok(Async::Ready(val)) => Ok(Async::Ready(Some(val))),
|
||||
Err(e) => Err(e),
|
||||
};
|
||||
self.pending = Some(drain);
|
||||
self.active -= 1;
|
||||
self.futures[id] = None;
|
||||
return Some(ret)
|
||||
}
|
||||
None
|
||||
|
||||
self.head_all = ptr;
|
||||
self.len += 1;
|
||||
return ptr
|
||||
}
|
||||
|
||||
/// Remove the node from the linked list tracking all nodes currently
|
||||
/// managed by `FuturesUnordered`.
|
||||
unsafe fn unlink(&mut self, node: *const Node<T>) -> Arc<Node<T>> {
|
||||
let node = ptr2arc(node);
|
||||
let next = *node.next_all.get();
|
||||
let prev = *node.prev_all.get();
|
||||
*node.next_all.get() = ptr::null_mut();
|
||||
*node.prev_all.get() = ptr::null_mut();
|
||||
|
||||
if !next.is_null() {
|
||||
*(*next).prev_all.get() = prev;
|
||||
}
|
||||
|
||||
if !prev.is_null() {
|
||||
*(*prev).next_all.get() = next;
|
||||
} else {
|
||||
self.head_all = next;
|
||||
}
|
||||
self.len -= 1;
|
||||
return node
|
||||
}
|
||||
}
|
||||
|
||||
impl<F> Stream for FuturesUnordered<F>
|
||||
where F: Future
|
||||
impl<T> Stream for FuturesUnordered<T>
|
||||
where T: Future
|
||||
{
|
||||
type Item = F::Item;
|
||||
type Error = F::Error;
|
||||
type Item = T::Item;
|
||||
type Error = T::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
if self.active == 0 {
|
||||
return Ok(Async::Ready(None))
|
||||
}
|
||||
if let Some(drain) = self.pending.take() {
|
||||
if let Some(ret) = self.poll_pending(drain) {
|
||||
fn poll(&mut self) -> Poll<Option<T::Item>, T::Error> {
|
||||
// Ensure `parent` is correctly set.
|
||||
self.inner.parent.register();
|
||||
|
||||
loop {
|
||||
let node = match unsafe { self.inner.dequeue() } {
|
||||
Dequeue::Empty => {
|
||||
if self.is_empty() {
|
||||
return Ok(Async::Ready(None));
|
||||
} else {
|
||||
return Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
Dequeue::Inconsistent => {
|
||||
// At this point, it may be worth yielding the thread &
|
||||
// spinning a few times... but for now, just yield using the
|
||||
// task system.
|
||||
task::current().notify();
|
||||
return Ok(Async::NotReady);
|
||||
}
|
||||
Dequeue::Data(node) => node,
|
||||
};
|
||||
|
||||
debug_assert!(node != self.inner.stub());
|
||||
|
||||
unsafe {
|
||||
let mut future = match (*(*node).future.get()).take() {
|
||||
Some(future) => future,
|
||||
|
||||
// If the future has already gone away then we're just
|
||||
// cleaning out this node. See the comment in
|
||||
// `release_node` for more information, but we're basically
|
||||
// just taking ownership of our reference count here.
|
||||
None => {
|
||||
let node = ptr2arc(node);
|
||||
assert!((*node.next_all.get()).is_null());
|
||||
assert!((*node.prev_all.get()).is_null());
|
||||
continue
|
||||
}
|
||||
};
|
||||
|
||||
// Unset queued flag... this must be done before
|
||||
// polling. This ensures that the future gets
|
||||
// rescheduled if it is notified **during** a call
|
||||
// to `poll`.
|
||||
let prev = (*node).queued.swap(false, SeqCst);
|
||||
assert!(prev);
|
||||
|
||||
// We're going to need to be very careful if the `poll`
|
||||
// function below panics. We need to (a) not leak memory and
|
||||
// (b) ensure that we still don't have any use-after-frees. To
|
||||
// manage this we do a few things:
|
||||
//
|
||||
// * This "bomb" here will call `release_node` if dropped
|
||||
// abnormally. That way we'll be sure the memory management
|
||||
// of the `node` is managed correctly.
|
||||
// * The future was extracted above (taken ownership). That way
|
||||
// if it panics we're guaranteed that the future is
|
||||
// dropped on this thread and doesn't accidentally get
|
||||
// dropped on a different thread (bad).
|
||||
// * We unlink the node from our internal queue to preemptively
|
||||
// assume it'll panic, in which case we'll want to discard it
|
||||
// regardless.
|
||||
struct Bomb<'a, T: 'a> {
|
||||
queue: &'a mut FuturesUnordered<T>,
|
||||
node: Option<Arc<Node<T>>>,
|
||||
}
|
||||
impl<'a, T> Drop for Bomb<'a, T> {
|
||||
fn drop(&mut self) {
|
||||
if let Some(node) = self.node.take() {
|
||||
self.queue.release_node(node);
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut bomb = Bomb {
|
||||
node: Some(self.unlink(node)),
|
||||
queue: self,
|
||||
};
|
||||
|
||||
// Poll the underlying future with the appropriate `notify`
|
||||
// implementation. This is where a large bit of the unsafety
|
||||
// starts to stem from internally. The `notify` instance itself
|
||||
// is basically just our `Arc<Node<T>>` and tracks the mpsc
|
||||
// queue of ready futures.
|
||||
//
|
||||
// Critically though `Node<T>` won't actually access `T`, the
|
||||
// future, while it's floating around inside of `Task`
|
||||
// instances. These structs will basically just use `T` to size
|
||||
// the internal allocation, appropriately accessing fields and
|
||||
// deallocating the node if need be.
|
||||
let res = {
|
||||
let notify = NodeToHandle(bomb.node.as_ref().unwrap());
|
||||
task_impl::with_notify(¬ify, 0, || {
|
||||
future.poll()
|
||||
})
|
||||
};
|
||||
|
||||
let ret = match res {
|
||||
Ok(Async::NotReady) => {
|
||||
let node = bomb.node.take().unwrap();
|
||||
*node.future.get() = Some(future);
|
||||
bomb.queue.link(node);
|
||||
continue
|
||||
}
|
||||
Ok(Async::Ready(e)) => Ok(Async::Ready(Some(e))),
|
||||
Err(e) => Err(e),
|
||||
};
|
||||
return ret
|
||||
}
|
||||
}
|
||||
let drain = self.stack.drain();
|
||||
if let Some(ret) = self.poll_pending(drain) {
|
||||
return ret
|
||||
}
|
||||
assert!(self.active > 0);
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Debug> Debug for FuturesUnordered<T> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(fmt, "FuturesUnordered {{ ... }}")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for FuturesUnordered<T> {
|
||||
fn drop(&mut self) {
|
||||
// When a `FuturesUnordered` is dropped we want to drop all futures associated
|
||||
// with it. At the same time though there may be tons of `Task` handles
|
||||
// flying around which contain `Node<T>` references inside them. We'll
|
||||
// let those naturally get deallocated when the `Task` itself goes out
|
||||
// of scope or gets notified.
|
||||
unsafe {
|
||||
while !self.head_all.is_null() {
|
||||
let head = self.head_all;
|
||||
let node = self.unlink(head);
|
||||
self.release_node(node);
|
||||
}
|
||||
}
|
||||
|
||||
// Note that at this point we could still have a bunch of nodes in the
|
||||
// mpsc queue. None of those nodes, however, have futures associated
|
||||
// with them so they're safe to destroy on any thread. At this point
|
||||
// the `FuturesUnordered` struct, the owner of the one strong reference
|
||||
// to `Inner<T>` will drop the strong reference. At that point
|
||||
// whichever thread releases the strong refcount last (be it this
|
||||
// thread or some other thread as part of an `upgrade`) will clear out
|
||||
// the mpsc queue and free all remaining nodes.
|
||||
//
|
||||
// While that freeing operation isn't guaranteed to happen here, it's
|
||||
// guaranteed to happen "promptly" as no more "blocking work" will
|
||||
// happen while there's a strong refcount held.
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Future> FromIterator<F> for FuturesUnordered<F> {
|
||||
fn from_iter<T>(iter: T) -> Self
|
||||
where T: IntoIterator<Item = F>
|
||||
{
|
||||
let mut new = FuturesUnordered::new();
|
||||
for future in iter.into_iter() {
|
||||
new.push(future);
|
||||
}
|
||||
new
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// Mutable iterator over all futures in the unordered set.
|
||||
pub struct IterMut<'a, F: 'a> {
|
||||
node: *const Node<F>,
|
||||
len: usize,
|
||||
_marker: PhantomData<&'a mut FuturesUnordered<F>>
|
||||
}
|
||||
|
||||
impl<'a, F> Iterator for IterMut<'a, F> {
|
||||
type Item = &'a mut F;
|
||||
|
||||
fn next(&mut self) -> Option<&'a mut F> {
|
||||
if self.node.is_null() {
|
||||
return None;
|
||||
}
|
||||
unsafe {
|
||||
let future = (*(*self.node).future.get()).as_mut().unwrap();
|
||||
let next = *(*self.node).next_all.get();
|
||||
self.node = next;
|
||||
self.len -= 1;
|
||||
return Some(future);
|
||||
}
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
(self.len, Some(self.len))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, F> ExactSizeIterator for IterMut<'a, F> {}
|
||||
|
||||
impl<T> Inner<T> {
|
||||
/// The enqueue function from the 1024cores intrusive MPSC queue algorithm.
|
||||
fn enqueue(&self, node: *const Node<T>) {
|
||||
unsafe {
|
||||
debug_assert!((*node).queued.load(Relaxed));
|
||||
|
||||
// This action does not require any coordination
|
||||
(*node).next_readiness.store(ptr::null_mut(), Relaxed);
|
||||
|
||||
// Note that these atomic orderings come from 1024cores
|
||||
let node = node as *mut _;
|
||||
let prev = self.head_readiness.swap(node, AcqRel);
|
||||
(*prev).next_readiness.store(node, Release);
|
||||
}
|
||||
}
|
||||
|
||||
/// The dequeue function from the 1024cores intrusive MPSC queue algorithm
|
||||
///
|
||||
/// Note that this unsafe as it required mutual exclusion (only one thread
|
||||
/// can call this) to be guaranteed elsewhere.
|
||||
unsafe fn dequeue(&self) -> Dequeue<T> {
|
||||
let mut tail = *self.tail_readiness.get();
|
||||
let mut next = (*tail).next_readiness.load(Acquire);
|
||||
|
||||
if tail == self.stub() {
|
||||
if next.is_null() {
|
||||
return Dequeue::Empty;
|
||||
}
|
||||
|
||||
*self.tail_readiness.get() = next;
|
||||
tail = next;
|
||||
next = (*next).next_readiness.load(Acquire);
|
||||
}
|
||||
|
||||
if !next.is_null() {
|
||||
*self.tail_readiness.get() = next;
|
||||
debug_assert!(tail != self.stub());
|
||||
return Dequeue::Data(tail);
|
||||
}
|
||||
|
||||
if self.head_readiness.load(Acquire) as *const _ != tail {
|
||||
return Dequeue::Inconsistent;
|
||||
}
|
||||
|
||||
self.enqueue(self.stub());
|
||||
|
||||
next = (*tail).next_readiness.load(Acquire);
|
||||
|
||||
if !next.is_null() {
|
||||
*self.tail_readiness.get() = next;
|
||||
return Dequeue::Data(tail);
|
||||
}
|
||||
|
||||
Dequeue::Inconsistent
|
||||
}
|
||||
|
||||
fn stub(&self) -> *const Node<T> {
|
||||
&*self.stub
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Inner<T> {
|
||||
fn drop(&mut self) {
|
||||
// Once we're in the destructor for `Inner<T>` we need to clear out the
|
||||
// mpsc queue of nodes if there's anything left in there.
|
||||
//
|
||||
// Note that each node has a strong reference count associated with it
|
||||
// which is owned by the mpsc queue. All nodes should have had their
|
||||
// futures dropped already by the `FuturesUnordered` destructor above,
|
||||
// so we're just pulling out nodes and dropping their refcounts.
|
||||
unsafe {
|
||||
loop {
|
||||
match self.dequeue() {
|
||||
Dequeue::Empty => break,
|
||||
Dequeue::Inconsistent => abort("inconsistent in drop"),
|
||||
Dequeue::Data(ptr) => drop(ptr2arc(ptr)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(missing_debug_implementations)]
|
||||
struct NodeToHandle<'a, T: 'a>(&'a Arc<Node<T>>);
|
||||
|
||||
impl<'a, T> Clone for NodeToHandle<'a, T> {
|
||||
fn clone(&self) -> Self {
|
||||
NodeToHandle(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> From<NodeToHandle<'a, T>> for NotifyHandle {
|
||||
fn from(handle: NodeToHandle<'a, T>) -> NotifyHandle {
|
||||
unsafe {
|
||||
let ptr = handle.0.clone();
|
||||
let ptr = mem::transmute::<Arc<Node<T>>, *mut ArcNode<T>>(ptr);
|
||||
NotifyHandle::new(hide_lt(ptr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct ArcNode<T>(PhantomData<T>);
|
||||
|
||||
// We should never touch `T` on any thread other than the one owning
|
||||
// `FuturesUnordered`, so this should be a safe operation.
|
||||
unsafe impl<T> Send for ArcNode<T> {}
|
||||
unsafe impl<T> Sync for ArcNode<T> {}
|
||||
|
||||
impl<T> Notify for ArcNode<T> {
|
||||
fn notify(&self, _id: usize) {
|
||||
unsafe {
|
||||
let me: *const ArcNode<T> = self;
|
||||
let me: *const *const ArcNode<T> = &me;
|
||||
let me = me as *const Arc<Node<T>>;
|
||||
Node::notify(&*me)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T> UnsafeNotify for ArcNode<T> {
|
||||
unsafe fn clone_raw(&self) -> NotifyHandle {
|
||||
let me: *const ArcNode<T> = self;
|
||||
let me: *const *const ArcNode<T> = &me;
|
||||
let me = &*(me as *const Arc<Node<T>>);
|
||||
NodeToHandle(me).into()
|
||||
}
|
||||
|
||||
unsafe fn drop_raw(&self) {
|
||||
let mut me: *const ArcNode<T> = self;
|
||||
let me = &mut me as *mut *const ArcNode<T> as *mut Arc<Node<T>>;
|
||||
ptr::drop_in_place(me);
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn hide_lt<T>(p: *mut ArcNode<T>) -> *mut UnsafeNotify {
|
||||
mem::transmute(p as *mut UnsafeNotify)
|
||||
}
|
||||
|
||||
impl<T> Node<T> {
|
||||
fn notify(me: &Arc<Node<T>>) {
|
||||
let inner = match me.queue.upgrade() {
|
||||
Some(inner) => inner,
|
||||
None => return,
|
||||
};
|
||||
|
||||
// It's our job to notify the node that it's ready to get polled,
|
||||
// meaning that we need to enqueue it into the readiness queue. To
|
||||
// do this we flag that we're ready to be queued, and if successful
|
||||
// we then do the literal queueing operation, ensuring that we're
|
||||
// only queued once.
|
||||
//
|
||||
// Once the node is inserted we be sure to notify the parent task,
|
||||
// as it'll want to come along and pick up our node now.
|
||||
//
|
||||
// Note that we don't change the reference count of the node here,
|
||||
// we're just enqueueing the raw pointer. The `FuturesUnordered`
|
||||
// implementation guarantees that if we set the `queued` flag true that
|
||||
// there's a reference count held by the main `FuturesUnordered` queue
|
||||
// still.
|
||||
let prev = me.queued.swap(true, SeqCst);
|
||||
if !prev {
|
||||
inner.enqueue(&**me);
|
||||
inner.parent.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Node<T> {
|
||||
fn drop(&mut self) {
|
||||
// Currently a `Node<T>` is sent across all threads for any lifetime,
|
||||
// regardless of `T`. This means that for memory safety we can't
|
||||
// actually touch `T` at any time except when we have a reference to the
|
||||
// `FuturesUnordered` itself.
|
||||
//
|
||||
// Consequently it *should* be the case that we always drop futures from
|
||||
// the `FuturesUnordered` instance, but this is a bomb in place to catch
|
||||
// any bugs in that logic.
|
||||
unsafe {
|
||||
if (*self.future.get()).is_some() {
|
||||
abort("future still here when dropping");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn arc2ptr<T>(ptr: Arc<T>) -> *const T {
|
||||
let addr = &*ptr as *const T;
|
||||
mem::forget(ptr);
|
||||
return addr
|
||||
}
|
||||
|
||||
unsafe fn ptr2arc<T>(ptr: *const T) -> Arc<T> {
|
||||
let anchor = mem::transmute::<usize, Arc<T>>(0x10);
|
||||
let addr = &*anchor as *const T;
|
||||
mem::forget(anchor);
|
||||
let offset = addr as isize - 0x10;
|
||||
mem::transmute::<isize, Arc<T>>(ptr as isize - offset)
|
||||
}
|
||||
|
||||
fn abort(s: &str) -> ! {
|
||||
struct DoublePanic;
|
||||
|
||||
impl Drop for DoublePanic {
|
||||
fn drop(&mut self) {
|
||||
panic!("panicking twice to abort the program");
|
||||
}
|
||||
}
|
||||
|
||||
let _bomb = DoublePanic;
|
||||
panic!("{}", s);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
use {Stream, Poll, Async};
|
||||
|
||||
/// Do something with the items of a stream, passing it on.
|
||||
///
|
||||
/// This is created by the `Stream::inspect` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Inspect<S, F> where S: Stream {
|
||||
stream: S,
|
||||
inspect: F,
|
||||
}
|
||||
|
||||
pub fn new<S, F>(stream: S, f: F) -> Inspect<S, F>
|
||||
where S: Stream,
|
||||
F: FnMut(&S::Item) -> (),
|
||||
{
|
||||
Inspect {
|
||||
stream: stream,
|
||||
inspect: f,
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream, F> Inspect<S, F> {
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.stream
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.stream
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, F> ::sink::Sink for Inspect<S, F>
|
||||
where S: ::sink::Sink + Stream
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, F> Stream for Inspect<S, F>
|
||||
where S: Stream,
|
||||
F: FnMut(&S::Item),
|
||||
{
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
|
||||
match try_ready!(self.stream.poll()) {
|
||||
Some(e) => {
|
||||
(self.inspect)(&e);
|
||||
Ok(Async::Ready(Some(e)))
|
||||
}
|
||||
None => Ok(Async::Ready(None)),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
use {Stream, Poll};
|
||||
|
||||
/// Do something with the error of a stream, passing it on.
|
||||
///
|
||||
/// This is created by the `Stream::inspect_err` method.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct InspectErr<S, F> where S: Stream {
|
||||
stream: S,
|
||||
inspect: F,
|
||||
}
|
||||
|
||||
pub fn new<S, F>(stream: S, f: F) -> InspectErr<S, F>
|
||||
where S: Stream,
|
||||
F: FnMut(&S::Error) -> (),
|
||||
{
|
||||
InspectErr {
|
||||
stream: stream,
|
||||
inspect: f,
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream, F> InspectErr<S, F> {
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.stream
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.stream
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, F> ::sink::Sink for InspectErr<S, F>
|
||||
where S: ::sink::Sink + Stream
|
||||
{
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
||||
fn start_send(&mut self, item: S::SinkItem) -> ::StartSend<S::SinkItem, S::SinkError> {
|
||||
self.stream.start_send(item)
|
||||
}
|
||||
|
||||
fn poll_complete(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.poll_complete()
|
||||
}
|
||||
|
||||
fn close(&mut self) -> Poll<(), S::SinkError> {
|
||||
self.stream.close()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S, F> Stream for InspectErr<S, F>
|
||||
where S: Stream,
|
||||
F: FnMut(&S::Error),
|
||||
{
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<S::Item>, S::Error> {
|
||||
self.stream.poll().map_err(|e| {
|
||||
(self.inspect)(&e);
|
||||
e
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,14 +1,15 @@
|
|||
use {Async, Poll};
|
||||
use stream::Stream;
|
||||
#![deprecated(note = "implementation moved to `iter_ok` and `iter_result`")]
|
||||
#![allow(deprecated)]
|
||||
|
||||
use Poll;
|
||||
use stream::{iter_result, IterResult, Stream};
|
||||
|
||||
/// A stream which is just a shim over an underlying instance of `Iterator`.
|
||||
///
|
||||
/// This stream will never block and is always ready.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Iter<I> {
|
||||
iter: I,
|
||||
}
|
||||
pub struct Iter<I>(IterResult<I>);
|
||||
|
||||
/// Converts an `Iterator` over `Result`s into a `Stream` which is always ready
|
||||
/// to yield the next value.
|
||||
|
@ -25,12 +26,11 @@ pub struct Iter<I> {
|
|||
/// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll());
|
||||
/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn iter<J, T, E>(i: J) -> Iter<J::IntoIter>
|
||||
where J: IntoIterator<Item=Result<T, E>>,
|
||||
{
|
||||
Iter {
|
||||
iter: i.into_iter(),
|
||||
}
|
||||
Iter(iter_result(i))
|
||||
}
|
||||
|
||||
impl<I, T, E> Stream for Iter<I>
|
||||
|
@ -39,11 +39,8 @@ impl<I, T, E> Stream for Iter<I>
|
|||
type Item = T;
|
||||
type Error = E;
|
||||
|
||||
#[inline]
|
||||
fn poll(&mut self) -> Poll<Option<T>, E> {
|
||||
match self.iter.next() {
|
||||
Some(Ok(e)) => Ok(Async::Ready(Some(e))),
|
||||
Some(Err(e)) => Err(e),
|
||||
None => Ok(Async::Ready(None)),
|
||||
}
|
||||
self.0.poll()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
use core::marker;
|
||||
|
||||
use {Async, Poll};
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream which is just a shim over an underlying instance of `Iterator`.
|
||||
///
|
||||
/// This stream will never block and is always ready.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct IterOk<I, E> {
|
||||
iter: I,
|
||||
_marker: marker::PhantomData<fn() -> E>,
|
||||
}
|
||||
|
||||
/// Converts an `Iterator` into a `Stream` which is always ready
|
||||
/// to yield the next value.
|
||||
///
|
||||
/// Iterators in Rust don't express the ability to block, so this adapter
|
||||
/// simply always calls `iter.next()` and returns that.
|
||||
///
|
||||
/// ```rust
|
||||
/// use futures::*;
|
||||
///
|
||||
/// let mut stream = stream::iter_ok::<_, ()>(vec![17, 19]);
|
||||
/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
|
||||
/// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll());
|
||||
/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
|
||||
/// ```
|
||||
pub fn iter_ok<I, E>(i: I) -> IterOk<I::IntoIter, E>
|
||||
where I: IntoIterator,
|
||||
{
|
||||
IterOk {
|
||||
iter: i.into_iter(),
|
||||
_marker: marker::PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, E> Stream for IterOk<I, E>
|
||||
where I: Iterator,
|
||||
{
|
||||
type Item = I::Item;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<I::Item>, E> {
|
||||
Ok(Async::Ready(self.iter.next()))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
use {Async, Poll};
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream which is just a shim over an underlying instance of `Iterator`.
|
||||
///
|
||||
/// This stream will never block and is always ready.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct IterResult<I> {
|
||||
iter: I,
|
||||
}
|
||||
|
||||
/// Converts an `Iterator` over `Result`s into a `Stream` which is always ready
|
||||
/// to yield the next value.
|
||||
///
|
||||
/// Iterators in Rust don't express the ability to block, so this adapter simply
|
||||
/// always calls `iter.next()` and returns that.
|
||||
///
|
||||
/// ```rust
|
||||
/// use futures::*;
|
||||
///
|
||||
/// let mut stream = stream::iter_result(vec![Ok(17), Err(false), Ok(19)]);
|
||||
/// assert_eq!(Ok(Async::Ready(Some(17))), stream.poll());
|
||||
/// assert_eq!(Err(false), stream.poll());
|
||||
/// assert_eq!(Ok(Async::Ready(Some(19))), stream.poll());
|
||||
/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
|
||||
/// ```
|
||||
pub fn iter_result<J, T, E>(i: J) -> IterResult<J::IntoIter>
|
||||
where
|
||||
J: IntoIterator<Item = Result<T, E>>,
|
||||
{
|
||||
IterResult {
|
||||
iter: i.into_iter(),
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, T, E> Stream for IterResult<I>
|
||||
where
|
||||
I: Iterator<Item = Result<T, E>>,
|
||||
{
|
||||
type Item = T;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<T>, E> {
|
||||
match self.iter.next() {
|
||||
Some(Ok(e)) => Ok(Async::Ready(Some(e))),
|
||||
Some(Err(e)) => Err(e),
|
||||
None => Ok(Async::Ready(None)),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -22,6 +22,31 @@ pub fn new<S, F, U>(s: S, f: F) -> Map<S, F>
|
|||
}
|
||||
}
|
||||
|
||||
impl<S, F> Map<S, F> {
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.stream
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.stream
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, F> ::sink::Sink for Map<S, F>
|
||||
where S: ::sink::Sink
|
||||
|
|
|
@ -22,6 +22,31 @@ pub fn new<S, F, U>(s: S, f: F) -> MapErr<S, F>
|
|||
}
|
||||
}
|
||||
|
||||
impl<S, F> MapErr<S, F> {
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.stream
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.stream
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, F> ::sink::Sink for MapErr<S, F>
|
||||
where S: ::sink::Sink
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
#![deprecated(note = "functionality provided by `select` now")]
|
||||
#![allow(deprecated)]
|
||||
|
||||
use {Poll, Async};
|
||||
use stream::{Stream, Fuse};
|
||||
|
||||
|
@ -47,7 +50,7 @@ impl<S1, S2> Stream for Merge<S1, S2>
|
|||
return Err(e)
|
||||
}
|
||||
|
||||
match try!(self.stream1.poll()) {
|
||||
match self.stream1.poll()? {
|
||||
Async::NotReady => {
|
||||
match try_ready!(self.stream2.poll()) {
|
||||
Some(item2) => Ok(Async::Ready(Some(MergedItem::Second(item2)))),
|
||||
|
|
|
@ -18,9 +18,15 @@
|
|||
use {IntoFuture, Poll};
|
||||
|
||||
mod iter;
|
||||
#[allow(deprecated)]
|
||||
pub use self::iter::{iter, Iter};
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
#[allow(deprecated)]
|
||||
pub use self::Iter as IterStream;
|
||||
mod iter_ok;
|
||||
pub use self::iter_ok::{iter_ok, IterOk};
|
||||
mod iter_result;
|
||||
pub use self::iter_result::{iter_result, IterResult};
|
||||
|
||||
mod repeat;
|
||||
pub use self::repeat::{repeat, Repeat};
|
||||
|
@ -37,12 +43,15 @@ mod for_each;
|
|||
mod from_err;
|
||||
mod fuse;
|
||||
mod future;
|
||||
mod inspect;
|
||||
mod inspect_err;
|
||||
mod map;
|
||||
mod map_err;
|
||||
mod merge;
|
||||
mod once;
|
||||
mod or_else;
|
||||
mod peek;
|
||||
mod poll_fn;
|
||||
mod select;
|
||||
mod skip;
|
||||
mod skip_while;
|
||||
|
@ -54,7 +63,9 @@ mod zip;
|
|||
mod forward;
|
||||
pub use self::and_then::AndThen;
|
||||
pub use self::chain::Chain;
|
||||
#[allow(deprecated)]
|
||||
pub use self::concat::Concat;
|
||||
pub use self::concat::Concat2;
|
||||
pub use self::empty::{Empty, empty};
|
||||
pub use self::filter::Filter;
|
||||
pub use self::filter_map::FilterMap;
|
||||
|
@ -64,12 +75,16 @@ pub use self::for_each::ForEach;
|
|||
pub use self::from_err::FromErr;
|
||||
pub use self::fuse::Fuse;
|
||||
pub use self::future::StreamFuture;
|
||||
pub use self::inspect::Inspect;
|
||||
pub use self::inspect_err::InspectErr;
|
||||
pub use self::map::Map;
|
||||
pub use self::map_err::MapErr;
|
||||
#[allow(deprecated)]
|
||||
pub use self::merge::{Merge, MergedItem};
|
||||
pub use self::once::{Once, once};
|
||||
pub use self::or_else::OrElse;
|
||||
pub use self::peek::Peekable;
|
||||
pub use self::poll_fn::{poll_fn, PollFn};
|
||||
pub use self::select::Select;
|
||||
pub use self::skip::Skip;
|
||||
pub use self::skip_while::SkipWhile;
|
||||
|
@ -92,7 +107,8 @@ if_std! {
|
|||
mod wait;
|
||||
mod channel;
|
||||
mod split;
|
||||
mod futures_unordered;
|
||||
pub mod futures_unordered;
|
||||
mod futures_ordered;
|
||||
pub use self::buffered::Buffered;
|
||||
pub use self::buffer_unordered::BufferUnordered;
|
||||
pub use self::catch_unwind::CatchUnwind;
|
||||
|
@ -100,7 +116,8 @@ if_std! {
|
|||
pub use self::collect::Collect;
|
||||
pub use self::wait::Wait;
|
||||
pub use self::split::{SplitStream, SplitSink};
|
||||
pub use self::futures_unordered::{futures_unordered, FuturesUnordered};
|
||||
pub use self::futures_unordered::FuturesUnordered;
|
||||
pub use self::futures_ordered::{futures_ordered, FuturesOrdered};
|
||||
|
||||
#[doc(hidden)]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
|
@ -108,6 +125,10 @@ if_std! {
|
|||
pub use self::channel::{channel, Sender, Receiver, FutureSender, SendError};
|
||||
|
||||
/// A type alias for `Box<Stream + Send>`
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note = "removed without replacement, recommended to use a \
|
||||
local extension trait or function if needed, more \
|
||||
details in https://github.com/alexcrichton/futures-rs/issues/228")]
|
||||
pub type BoxStream<T, E> = ::std::boxed::Box<Stream<Item = T, Error = E> + Send>;
|
||||
|
||||
impl<S: ?Sized + Stream> Stream for ::std::boxed::Box<S> {
|
||||
|
@ -247,6 +268,11 @@ pub trait Stream {
|
|||
/// let a: BoxStream<i32, ()> = rx.boxed();
|
||||
/// ```
|
||||
#[cfg(feature = "use_std")]
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note = "removed without replacement, recommended to use a \
|
||||
local extension trait or function if needed, more \
|
||||
details in https://github.com/alexcrichton/futures-rs/issues/228")]
|
||||
#[allow(deprecated)]
|
||||
fn boxed(self) -> BoxStream<Self::Item, Self::Error>
|
||||
where Self: Sized + Send + 'static,
|
||||
{
|
||||
|
@ -281,7 +307,7 @@ pub trait Stream {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::Stream;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (_tx, rx) = mpsc::channel::<i32>(1);
|
||||
|
@ -307,7 +333,7 @@ pub trait Stream {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::Stream;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (_tx, rx) = mpsc::channel::<i32>(1);
|
||||
|
@ -337,11 +363,11 @@ pub trait Stream {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::Stream;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (_tx, rx) = mpsc::channel::<i32>(1);
|
||||
/// let evens = rx.filter(|x| x % 0 == 2);
|
||||
/// let evens = rx.filter(|x| x % 2 == 0);
|
||||
/// ```
|
||||
fn filter<F>(self, f: F) -> Filter<Self, F>
|
||||
where F: FnMut(&Self::Item) -> bool,
|
||||
|
@ -367,7 +393,7 @@ pub trait Stream {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::Stream;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (_tx, rx) = mpsc::channel::<i32>(1);
|
||||
|
@ -406,7 +432,7 @@ pub trait Stream {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::Stream;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (_tx, rx) = mpsc::channel::<i32>(1);
|
||||
|
@ -446,10 +472,13 @@ pub trait Stream {
|
|||
/// Note that this function consumes the receiving stream and returns a
|
||||
/// wrapped version of it.
|
||||
///
|
||||
/// To process the entire stream and return a single future representing
|
||||
/// success or error, use `for_each` instead.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::stream::*;
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (_tx, rx) = mpsc::channel::<i32>(1);
|
||||
|
@ -515,7 +544,7 @@ pub trait Stream {
|
|||
/// ```
|
||||
/// use std::thread;
|
||||
///
|
||||
/// use futures::{Stream, Future, Sink};
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (mut tx, rx) = mpsc::channel(1);
|
||||
|
@ -540,16 +569,52 @@ pub trait Stream {
|
|||
/// destination, returning a future representing the end result.
|
||||
///
|
||||
/// This combinator will extend the first item with the contents
|
||||
/// of all the successful results of the stream. If an error
|
||||
/// occurs, all the results will be dropped and the error will be
|
||||
/// returned.
|
||||
/// of all the successful results of the stream. If the stream is
|
||||
/// empty, the default value will be returned. If an error occurs,
|
||||
/// all the results will be dropped and the error will be returned.
|
||||
///
|
||||
/// The name `concat2` is an intermediate measure until the release of
|
||||
/// futures 0.2, at which point it will be renamed back to `concat`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::thread;
|
||||
///
|
||||
/// use futures::{Future, Sink, Stream};
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (mut tx, rx) = mpsc::channel(1);
|
||||
///
|
||||
/// thread::spawn(move || {
|
||||
/// for i in (0..3).rev() {
|
||||
/// let n = i * 3;
|
||||
/// tx = tx.send(vec![n + 1, n + 2, n + 3]).wait().unwrap();
|
||||
/// }
|
||||
/// });
|
||||
/// let result = rx.concat2();
|
||||
/// assert_eq!(result.wait(), Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3]));
|
||||
/// ```
|
||||
fn concat2(self) -> Concat2<Self>
|
||||
where Self: Sized,
|
||||
Self::Item: Extend<<<Self as Stream>::Item as IntoIterator>::Item> + IntoIterator + Default,
|
||||
{
|
||||
concat::new2(self)
|
||||
}
|
||||
|
||||
/// Concatenate all results of a stream into a single extendable
|
||||
/// destination, returning a future representing the end result.
|
||||
///
|
||||
/// This combinator will extend the first item with the contents
|
||||
/// of all the successful results of the stream. If an error occurs,
|
||||
/// all the results will be dropped and the error will be returned.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::thread;
|
||||
///
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (mut tx, rx) = mpsc::channel(1);
|
||||
|
@ -563,6 +628,13 @@ pub trait Stream {
|
|||
/// let result = rx.concat();
|
||||
/// assert_eq!(result.wait(), Ok(vec![7, 8, 9, 4, 5, 6, 1, 2, 3]));
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// It's important to note that this function will panic if the stream
|
||||
/// is empty, which is the reason for its deprecation.
|
||||
#[deprecated(since="0.1.14", note="please use `Stream::concat2` instead")]
|
||||
#[allow(deprecated)]
|
||||
fn concat(self) -> Concat<Self>
|
||||
where Self: Sized,
|
||||
Self::Item: Extend<<<Self as Stream>::Item as IntoIterator>::Item> + IntoIterator,
|
||||
|
@ -585,11 +657,12 @@ pub trait Stream {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::stream::{self, Stream};
|
||||
/// use futures::future::{ok, Future};
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::stream;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// let number_stream = stream::iter::<_, _, ()>((0..6).map(Ok));
|
||||
/// let sum = number_stream.fold(0, |a, b| ok(a + b));
|
||||
/// let number_stream = stream::iter_ok::<_, ()>(0..6);
|
||||
/// let sum = number_stream.fold(0, |acc, x| future::ok(acc + x));
|
||||
/// assert_eq!(sum.wait(), Ok(15));
|
||||
/// ```
|
||||
fn fold<F, T, Fut>(self, init: T, f: F) -> Fold<Self, F, Fut, T>
|
||||
|
@ -611,7 +684,7 @@ pub trait Stream {
|
|||
/// ```
|
||||
/// use std::thread;
|
||||
///
|
||||
/// use futures::{Future, Stream, Poll, Sink};
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::sync::mpsc;
|
||||
///
|
||||
/// let (tx1, rx1) = mpsc::channel::<i32>(1);
|
||||
|
@ -682,6 +755,9 @@ pub trait Stream {
|
|||
/// errors are otherwise threaded through. Any error on the stream or in the
|
||||
/// closure will cause iteration to be halted immediately and the future
|
||||
/// will resolve to that error.
|
||||
///
|
||||
/// To process each item in the stream and produce another stream instead
|
||||
/// of a single future, use `and_then` instead.
|
||||
fn for_each<F, U>(self, f: F) -> ForEach<Self, F, U>
|
||||
where F: FnMut(Self::Item) -> U,
|
||||
U: IntoFuture<Item=(), Error = Self::Error>,
|
||||
|
@ -759,6 +835,31 @@ pub trait Stream {
|
|||
fuse::new(self)
|
||||
}
|
||||
|
||||
/// Borrows a stream, rather than consuming it.
|
||||
///
|
||||
/// This is useful to allow applying stream adaptors while still retaining
|
||||
/// ownership of the original stream.
|
||||
///
|
||||
/// ```
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::stream;
|
||||
/// use futures::future;
|
||||
///
|
||||
/// let mut stream = stream::iter_ok::<_, ()>(1..5);
|
||||
///
|
||||
/// let sum = stream.by_ref().take(2).fold(0, |a, b| future::ok(a + b)).wait();
|
||||
/// assert_eq!(sum, Ok(3));
|
||||
///
|
||||
/// // You can use the stream again
|
||||
/// let sum = stream.take(2).fold(0, |a, b| future::ok(a + b)).wait();
|
||||
/// assert_eq!(sum, Ok(7));
|
||||
/// ```
|
||||
fn by_ref(&mut self) -> &mut Self
|
||||
where Self: Sized
|
||||
{
|
||||
self
|
||||
}
|
||||
|
||||
/// Catches unwinding panics while polling the stream.
|
||||
///
|
||||
/// Caught panic (if any) will be the last element of the resulting stream.
|
||||
|
@ -780,11 +881,10 @@ pub trait Stream {
|
|||
/// # Examples
|
||||
///
|
||||
/// ```rust
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::stream;
|
||||
/// use futures::stream::Stream;
|
||||
///
|
||||
/// let stream = stream::iter::<_, Option<i32>, bool>(vec![
|
||||
/// Some(10), None, Some(11)].into_iter().map(Ok));
|
||||
/// let stream = stream::iter_ok::<_, bool>(vec![Some(10), None, Some(11)]);
|
||||
/// // panic on second element
|
||||
/// let stream_panicking = stream.map(|o| o.unwrap());
|
||||
/// let mut iter = stream_panicking.catch_unwind().wait();
|
||||
|
@ -847,6 +947,8 @@ pub trait Stream {
|
|||
/// The merged stream produces items from one or both of the underlying
|
||||
/// streams as they become available. Errors, however, are not merged: you
|
||||
/// get at most one error at a time.
|
||||
#[deprecated(note = "functionality provided by `select` now")]
|
||||
#[allow(deprecated)]
|
||||
fn merge<S>(self, other: S) -> Merge<Self, S>
|
||||
where S: Stream<Error = Self::Error>,
|
||||
Self: Sized,
|
||||
|
@ -872,11 +974,11 @@ pub trait Stream {
|
|||
/// first stream reaches the end, emits the elements from the second stream.
|
||||
///
|
||||
/// ```rust
|
||||
/// use futures::prelude::*;
|
||||
/// use futures::stream;
|
||||
/// use futures::stream::Stream;
|
||||
///
|
||||
/// let stream1 = stream::iter(vec![Ok(10), Err(false)]);
|
||||
/// let stream2 = stream::iter(vec![Err(true), Ok(20)]);
|
||||
/// let stream1 = stream::iter_result(vec![Ok(10), Err(false)]);
|
||||
/// let stream2 = stream::iter_result(vec![Err(true), Ok(20)]);
|
||||
/// let mut chain = stream1.chain(stream2).wait();
|
||||
///
|
||||
/// assert_eq!(Some(Ok(10)), chain.next());
|
||||
|
@ -951,11 +1053,13 @@ pub trait Stream {
|
|||
///
|
||||
/// This future will drive the stream to keep producing items until it is
|
||||
/// exhausted, sending each item to the sink. It will complete once both the
|
||||
/// stream is exhausted, and the sink has fully processed and flushed all of
|
||||
/// the items sent to it.
|
||||
/// stream is exhausted, and the sink has fully processed received item,
|
||||
/// flushed successfully, and closed successfully.
|
||||
///
|
||||
/// Doing `stream.forward(sink)` is roughly equivalent to
|
||||
/// `sink.send_all(stream)`.
|
||||
/// `sink.send_all(stream)`. The returned future will exhaust all items from
|
||||
/// `self`, sending them all to `sink`. Furthermore the `sink` will be
|
||||
/// closed and flushed.
|
||||
///
|
||||
/// On completion, the pair `(stream, sink)` is returned.
|
||||
fn forward<S>(self, sink: S) -> Forward<Self, S>
|
||||
|
@ -981,6 +1085,30 @@ pub trait Stream {
|
|||
{
|
||||
split::split(self)
|
||||
}
|
||||
|
||||
/// Do something with each item of this stream, afterwards passing it on.
|
||||
///
|
||||
/// This is similar to the `Iterator::inspect` method in the standard
|
||||
/// library where it allows easily inspecting each value as it passes
|
||||
/// through the stream, for example to debug what's going on.
|
||||
fn inspect<F>(self, f: F) -> Inspect<Self, F>
|
||||
where F: FnMut(&Self::Item),
|
||||
Self: Sized,
|
||||
{
|
||||
inspect::new(self, f)
|
||||
}
|
||||
|
||||
/// Do something with the error of this stream, afterwards passing it on.
|
||||
///
|
||||
/// This is similar to the `Stream::inspect` method where it allows
|
||||
/// easily inspecting the error as it passes through the stream, for
|
||||
/// example to debug what's going on.
|
||||
fn inspect_err<F>(self, f: F) -> InspectErr<Self, F>
|
||||
where F: FnMut(&Self::Error),
|
||||
Self: Sized,
|
||||
{
|
||||
inspect_err::new(self, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, S: ?Sized + Stream> Stream for &'a mut S {
|
||||
|
@ -991,3 +1119,27 @@ impl<'a, S: ?Sized + Stream> Stream for &'a mut S {
|
|||
(**self).poll()
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a list of futures into a `Stream` of results from the futures.
|
||||
///
|
||||
/// This function will take an list of futures (e.g. a vector, an iterator,
|
||||
/// etc), and return a stream. The stream will yield items as they become
|
||||
/// available on the futures internally, in the order that they become
|
||||
/// available. This function is similar to `buffer_unordered` in that it may
|
||||
/// return items in a different order than in the list specified.
|
||||
///
|
||||
/// Note that the returned set can also be used to dynamically push more
|
||||
/// futures into the set as they become available.
|
||||
#[cfg(feature = "use_std")]
|
||||
pub fn futures_unordered<I>(futures: I) -> FuturesUnordered<<I::Item as IntoFuture>::Future>
|
||||
where I: IntoIterator,
|
||||
I::Item: IntoFuture
|
||||
{
|
||||
let mut set = FuturesUnordered::new();
|
||||
|
||||
for future in futures {
|
||||
set.push(future.into_future());
|
||||
}
|
||||
|
||||
return set
|
||||
}
|
||||
|
|
|
@ -1,7 +1,4 @@
|
|||
use core;
|
||||
|
||||
use Poll;
|
||||
use stream;
|
||||
use {Poll, Async};
|
||||
use stream::Stream;
|
||||
|
||||
/// A stream which emits single element and then EOF.
|
||||
|
@ -9,7 +6,7 @@ use stream::Stream;
|
|||
/// This stream will never block and is always ready.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Once<T, E>(stream::Iter<core::iter::Once<Result<T, E>>>);
|
||||
pub struct Once<T, E>(Option<Result<T, E>>);
|
||||
|
||||
/// Creates a stream of single element
|
||||
///
|
||||
|
@ -21,7 +18,7 @@ pub struct Once<T, E>(stream::Iter<core::iter::Once<Result<T, E>>>);
|
|||
/// assert_eq!(Ok(Async::Ready(None)), stream.poll());
|
||||
/// ```
|
||||
pub fn once<T, E>(item: Result<T, E>) -> Once<T, E> {
|
||||
Once(stream::iter(core::iter::once(item)))
|
||||
Once(Some(item))
|
||||
}
|
||||
|
||||
impl<T, E> Stream for Once<T, E> {
|
||||
|
@ -29,6 +26,10 @@ impl<T, E> Stream for Once<T, E> {
|
|||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<T>, E> {
|
||||
self.0.poll()
|
||||
match self.0.take() {
|
||||
Some(Ok(e)) => Ok(Async::Ready(Some(e))),
|
||||
Some(Err(e)) => Err(e),
|
||||
None => Ok(Async::Ready(None)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
//! Definition of the `PollFn` combinator
|
||||
|
||||
use {Stream, Poll};
|
||||
|
||||
/// A stream which adapts a function returning `Poll`.
|
||||
///
|
||||
/// Created by the `poll_fn` function.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct PollFn<F> {
|
||||
inner: F,
|
||||
}
|
||||
|
||||
/// Creates a new stream wrapping around a function returning `Poll`.
|
||||
///
|
||||
/// Polling the returned stream delegates to the wrapped function.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use futures::stream::poll_fn;
|
||||
/// use futures::{Async, Poll};
|
||||
///
|
||||
/// let mut counter = 1usize;
|
||||
///
|
||||
/// let read_stream = poll_fn(move || -> Poll<Option<String>, std::io::Error> {
|
||||
/// if counter == 0 { return Ok(Async::Ready(None)); }
|
||||
/// counter -= 1;
|
||||
/// Ok(Async::Ready(Some("Hello, World!".to_owned())))
|
||||
/// });
|
||||
/// ```
|
||||
pub fn poll_fn<T, E, F>(f: F) -> PollFn<F>
|
||||
where
|
||||
F: FnMut() -> Poll<Option<T>, E>,
|
||||
{
|
||||
PollFn { inner: f }
|
||||
}
|
||||
|
||||
impl<T, E, F> Stream for PollFn<F>
|
||||
where
|
||||
F: FnMut() -> Poll<Option<T>, E>,
|
||||
{
|
||||
type Item = T;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<T>, E> {
|
||||
(self.inner)()
|
||||
}
|
||||
}
|
|
@ -7,6 +7,8 @@ use {Async, Poll};
|
|||
|
||||
|
||||
/// Stream that produces the same element repeatedly.
|
||||
///
|
||||
/// This structure is created by the `stream::repeat` function.
|
||||
#[derive(Debug)]
|
||||
#[must_use = "streams do nothing unless polled"]
|
||||
pub struct Repeat<T, E>
|
||||
|
@ -18,7 +20,9 @@ pub struct Repeat<T, E>
|
|||
|
||||
/// Create a stream which produces the same item repeatedly.
|
||||
///
|
||||
/// Stream never produces an error or EOF.
|
||||
/// Stream never produces an error or EOF. Note that you likely want to avoid
|
||||
/// usage of `collect` or such on the returned stream as it will exhaust
|
||||
/// available memory as it tries to just fill up all RAM.
|
||||
///
|
||||
/// ```rust
|
||||
/// use futures::*;
|
||||
|
|
|
@ -42,24 +42,23 @@ impl<S1, S2> Stream for Select<S1, S2>
|
|||
};
|
||||
self.flag = !self.flag;
|
||||
|
||||
let a_done = match try!(a.poll()) {
|
||||
let a_done = match a.poll()? {
|
||||
Async::Ready(Some(item)) => return Ok(Some(item).into()),
|
||||
Async::Ready(None) => true,
|
||||
Async::NotReady => false,
|
||||
};
|
||||
|
||||
match try!(b.poll()) {
|
||||
match b.poll()? {
|
||||
Async::Ready(Some(item)) => {
|
||||
// If the other stream isn't finished yet, give them a chance to
|
||||
// go first next time as we pulled something off `b`.
|
||||
if !a_done {
|
||||
self.flag = !self.flag;
|
||||
}
|
||||
return Ok(Some(item).into())
|
||||
Ok(Some(item).into())
|
||||
}
|
||||
Async::Ready(None) if a_done => Ok(None.into()),
|
||||
Async::Ready(None) => Ok(Async::NotReady),
|
||||
Async::NotReady => Ok(Async::NotReady),
|
||||
Async::Ready(None) | Async::NotReady => Ok(Async::NotReady),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,31 @@ pub fn new<S>(s: S, amt: u64) -> Skip<S>
|
|||
}
|
||||
}
|
||||
|
||||
impl<S> Skip<S> {
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.stream
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.stream
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S> ::sink::Sink for Skip<S>
|
||||
where S: ::sink::Sink
|
||||
|
|
|
@ -27,6 +27,31 @@ pub fn new<S, P, R>(s: S, p: P) -> SkipWhile<S, P, R>
|
|||
}
|
||||
}
|
||||
|
||||
impl<S, P, R> SkipWhile<S, P, R> where S: Stream, R: IntoFuture {
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.stream
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.stream
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, P, R> ::sink::Sink for SkipWhile<S, P, R>
|
||||
where S: ::sink::Sink + Stream, R: IntoFuture
|
||||
|
|
|
@ -1,3 +1,7 @@
|
|||
use std::any::Any;
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
|
||||
use {StartSend, Sink, Stream, Poll, Async, AsyncSink};
|
||||
use sync::BiLock;
|
||||
|
||||
|
@ -5,6 +9,15 @@ use sync::BiLock;
|
|||
#[derive(Debug)]
|
||||
pub struct SplitStream<S>(BiLock<S>);
|
||||
|
||||
impl<S> SplitStream<S> {
|
||||
/// Attempts to put the two "halves" of a split `Stream + Sink` back
|
||||
/// together. Succeeds only if the `SplitStream<S>` and `SplitSink<S>` are
|
||||
/// a matching pair originating from the same call to `Stream::split`.
|
||||
pub fn reunite(self, other: SplitSink<S>) -> Result<S, ReuniteError<S>> {
|
||||
other.reunite(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream> Stream for SplitStream<S> {
|
||||
type Item = S::Item;
|
||||
type Error = S::Error;
|
||||
|
@ -21,6 +34,17 @@ impl<S: Stream> Stream for SplitStream<S> {
|
|||
#[derive(Debug)]
|
||||
pub struct SplitSink<S>(BiLock<S>);
|
||||
|
||||
impl<S> SplitSink<S> {
|
||||
/// Attempts to put the two "halves" of a split `Stream + Sink` back
|
||||
/// together. Succeeds only if the `SplitStream<S>` and `SplitSink<S>` are
|
||||
/// a matching pair originating from the same call to `Stream::split`.
|
||||
pub fn reunite(self, other: SplitStream<S>) -> Result<S, ReuniteError<S>> {
|
||||
self.0.reunite(other.0).map_err(|err| {
|
||||
ReuniteError(SplitSink(err.0), SplitStream(err.1))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Sink> Sink for SplitSink<S> {
|
||||
type SinkItem = S::SinkItem;
|
||||
type SinkError = S::SinkError;
|
||||
|
@ -55,3 +79,27 @@ pub fn split<S: Stream + Sink>(s: S) -> (SplitSink<S>, SplitStream<S>) {
|
|||
let write = SplitSink(b);
|
||||
(write, read)
|
||||
}
|
||||
|
||||
/// Error indicating a `SplitSink<S>` and `SplitStream<S>` were not two halves
|
||||
/// of a `Stream + Split`, and thus could not be `reunite`d.
|
||||
pub struct ReuniteError<T>(pub SplitSink<T>, pub SplitStream<T>);
|
||||
|
||||
impl<T> fmt::Debug for ReuniteError<T> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_tuple("ReuniteError")
|
||||
.field(&"...")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> fmt::Display for ReuniteError<T> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(fmt, "tried to reunite a SplitStream and SplitSink that don't form a pair")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Any> Error for ReuniteError<T> {
|
||||
fn description(&self) -> &str {
|
||||
"tried to reunite a SplitStream and SplitSink that don't form a pair"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,31 @@ pub fn new<S>(s: S, amt: u64) -> Take<S>
|
|||
}
|
||||
}
|
||||
|
||||
impl<S> Take<S> {
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.stream
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.stream
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S> ::sink::Sink for Take<S>
|
||||
where S: ::sink::Sink + Stream
|
||||
|
|
|
@ -27,6 +27,31 @@ pub fn new<S, P, R>(s: S, p: P) -> TakeWhile<S, P, R>
|
|||
}
|
||||
}
|
||||
|
||||
impl<S, P, R> TakeWhile<S, P, R> where S: Stream, R: IntoFuture {
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
&self.stream
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
&mut self.stream
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream
|
||||
}
|
||||
}
|
||||
|
||||
// Forwarding impl of Sink from the underlying stream
|
||||
impl<S, P, R> ::sink::Sink for TakeWhile<S, P, R>
|
||||
where S: ::sink::Sink + Stream, R: IntoFuture
|
||||
|
|
|
@ -6,7 +6,7 @@ use stream::Stream;
|
|||
/// Creates a `Stream` from a seed and a closure returning a `Future`.
|
||||
///
|
||||
/// This function is the dual for the `Stream::fold()` adapter: while
|
||||
/// `Stream:fold()` reduces a `Stream` to one single value, `unfold()` creates a
|
||||
/// `Stream::fold()` reduces a `Stream` to one single value, `unfold()` creates a
|
||||
/// `Stream` from a seed value.
|
||||
///
|
||||
/// `unfold()` will call the provided closure with the provided seed, then wait
|
||||
|
@ -85,7 +85,7 @@ impl <T, F, Fut, It> Stream for Unfold<T, F, Fut>
|
|||
}
|
||||
}
|
||||
State::Processing(mut fut) => {
|
||||
match try!(fut.poll()) {
|
||||
match fut.poll()? {
|
||||
Async:: Ready((item, next_state)) => {
|
||||
self.state = State::Ready(next_state);
|
||||
return Ok(Async::Ready(Some(item)));
|
||||
|
|
|
@ -13,6 +13,31 @@ pub struct Wait<S> {
|
|||
stream: executor::Spawn<S>,
|
||||
}
|
||||
|
||||
impl<S> Wait<S> {
|
||||
/// Acquires a reference to the underlying stream that this combinator is
|
||||
/// pulling from.
|
||||
pub fn get_ref(&self) -> &S {
|
||||
self.stream.get_ref()
|
||||
}
|
||||
|
||||
/// Acquires a mutable reference to the underlying stream that this
|
||||
/// combinator is pulling from.
|
||||
///
|
||||
/// Note that care must be taken to avoid tampering with the state of the
|
||||
/// stream which may otherwise confuse this combinator.
|
||||
pub fn get_mut(&mut self) -> &mut S {
|
||||
self.stream.get_mut()
|
||||
}
|
||||
|
||||
/// Consumes this combinator, returning the underlying stream.
|
||||
///
|
||||
/// Note that this may discard intermediate state of this combinator, so
|
||||
/// care should be taken to avoid losing resources when this is called.
|
||||
pub fn into_inner(self) -> S {
|
||||
self.stream.into_inner()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new<S: Stream>(s: S) -> Wait<S> {
|
||||
Wait {
|
||||
stream: executor::spawn(s),
|
||||
|
|
|
@ -34,17 +34,15 @@ impl<S1, S2> Stream for Zip<S1, S2>
|
|||
|
||||
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
|
||||
if self.queued1.is_none() {
|
||||
match try!(self.stream1.poll()) {
|
||||
Async::NotReady => {}
|
||||
match self.stream1.poll()? {
|
||||
Async::Ready(Some(item1)) => self.queued1 = Some(item1),
|
||||
Async::Ready(None) => {}
|
||||
Async::Ready(None) | Async::NotReady => {}
|
||||
}
|
||||
}
|
||||
if self.queued2.is_none() {
|
||||
match try!(self.stream2.poll()) {
|
||||
Async::NotReady => {}
|
||||
match self.stream2.poll()? {
|
||||
Async::Ready(Some(item2)) => self.queued2 = Some(item2),
|
||||
Async::Ready(None) => {}
|
||||
Async::Ready(None) | Async::NotReady => {}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
use std::any::Any;
|
||||
use std::boxed::Box;
|
||||
use std::cell::UnsafeCell;
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::sync::Arc;
|
||||
|
@ -35,7 +38,7 @@ pub struct BiLock<T> {
|
|||
#[derive(Debug)]
|
||||
struct Inner<T> {
|
||||
state: AtomicUsize,
|
||||
inner: UnsafeCell<T>,
|
||||
inner: Option<UnsafeCell<T>>,
|
||||
}
|
||||
|
||||
unsafe impl<T: Send> Send for Inner<T> {}
|
||||
|
@ -50,7 +53,7 @@ impl<T> BiLock<T> {
|
|||
pub fn new(t: T) -> (BiLock<T>, BiLock<T>) {
|
||||
let inner = Arc::new(Inner {
|
||||
state: AtomicUsize::new(0),
|
||||
inner: UnsafeCell::new(t),
|
||||
inner: Some(UnsafeCell::new(t)),
|
||||
});
|
||||
|
||||
(BiLock { inner: inner.clone() }, BiLock { inner: inner })
|
||||
|
@ -90,7 +93,7 @@ impl<T> BiLock<T> {
|
|||
}
|
||||
}
|
||||
|
||||
let me = Box::new(task::park());
|
||||
let me = Box::new(task::current());
|
||||
let me = Box::into_raw(me) as usize;
|
||||
|
||||
match self.inner.state.compare_exchange(1, me, SeqCst, SeqCst) {
|
||||
|
@ -127,7 +130,22 @@ impl<T> BiLock<T> {
|
|||
/// Note that the returned future will never resolve to an error.
|
||||
pub fn lock(self) -> BiLockAcquire<T> {
|
||||
BiLockAcquire {
|
||||
inner: self,
|
||||
inner: Some(self),
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to put the two "halves" of a `BiLock<T>` back together and
|
||||
/// recover the original value. Succeeds only if the two `BiLock<T>`s
|
||||
/// originated from the same call to `BiLock::new`.
|
||||
pub fn reunite(self, other: Self) -> Result<T, ReuniteError<T>> {
|
||||
if &*self.inner as *const _ == &*other.inner as *const _ {
|
||||
drop(other);
|
||||
let inner = Arc::try_unwrap(self.inner)
|
||||
.ok()
|
||||
.expect("futures: try_unwrap failed in BiLock<T>::reunite");
|
||||
Ok(unsafe { inner.into_inner() })
|
||||
} else {
|
||||
Err(ReuniteError(self, other))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -143,18 +161,48 @@ impl<T> BiLock<T> {
|
|||
// Another task has parked themselves on this lock, let's wake them
|
||||
// up as its now their turn.
|
||||
n => unsafe {
|
||||
Box::from_raw(n as *mut Task).unpark();
|
||||
Box::from_raw(n as *mut Task).notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Inner<T> {
|
||||
unsafe fn into_inner(mut self) -> T {
|
||||
mem::replace(&mut self.inner, None).unwrap().into_inner()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Inner<T> {
|
||||
fn drop(&mut self) {
|
||||
assert_eq!(self.state.load(SeqCst), 0);
|
||||
}
|
||||
}
|
||||
|
||||
/// Error indicating two `BiLock<T>`s were not two halves of a whole, and
|
||||
/// thus could not be `reunite`d.
|
||||
pub struct ReuniteError<T>(pub BiLock<T>, pub BiLock<T>);
|
||||
|
||||
impl<T> fmt::Debug for ReuniteError<T> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_tuple("ReuniteError")
|
||||
.field(&"...")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> fmt::Display for ReuniteError<T> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(fmt, "tried to reunite two BiLocks that don't form a pair")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Any> Error for ReuniteError<T> {
|
||||
fn description(&self) -> &str {
|
||||
"tried to reunite two BiLocks that don't form a pair"
|
||||
}
|
||||
}
|
||||
|
||||
/// Returned RAII guard from the `poll_lock` method.
|
||||
///
|
||||
/// This structure acts as a sentinel to the data in the `BiLock<T>` itself,
|
||||
|
@ -168,13 +216,13 @@ pub struct BiLockGuard<'a, T: 'a> {
|
|||
impl<'a, T> Deref for BiLockGuard<'a, T> {
|
||||
type Target = T;
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { &*self.inner.inner.inner.get() }
|
||||
unsafe { &*self.inner.inner.inner.as_ref().unwrap().get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> DerefMut for BiLockGuard<'a, T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { &mut *self.inner.inner.inner.get() }
|
||||
unsafe { &mut *self.inner.inner.inner.as_ref().unwrap().get() }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -188,7 +236,7 @@ impl<'a, T> Drop for BiLockGuard<'a, T> {
|
|||
/// acquired.
|
||||
#[derive(Debug)]
|
||||
pub struct BiLockAcquire<T> {
|
||||
inner: BiLock<T>,
|
||||
inner: Option<BiLock<T>>,
|
||||
}
|
||||
|
||||
impl<T> Future for BiLockAcquire<T> {
|
||||
|
@ -196,15 +244,13 @@ impl<T> Future for BiLockAcquire<T> {
|
|||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<BiLockAcquired<T>, ()> {
|
||||
match self.inner.poll_lock() {
|
||||
match self.inner.as_ref().expect("cannot poll after Ready").poll_lock() {
|
||||
Async::Ready(r) => {
|
||||
mem::forget(r);
|
||||
Ok(BiLockAcquired {
|
||||
inner: BiLock { inner: self.inner.inner.clone() },
|
||||
}.into())
|
||||
}
|
||||
Async::NotReady => Ok(Async::NotReady),
|
||||
Async::NotReady => return Ok(Async::NotReady),
|
||||
}
|
||||
Ok(Async::Ready(BiLockAcquired { inner: self.inner.take() }))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -216,33 +262,37 @@ impl<T> Future for BiLockAcquire<T> {
|
|||
/// `unlock` method.
|
||||
#[derive(Debug)]
|
||||
pub struct BiLockAcquired<T> {
|
||||
inner: BiLock<T>,
|
||||
inner: Option<BiLock<T>>,
|
||||
}
|
||||
|
||||
impl<T> BiLockAcquired<T> {
|
||||
/// Recovers the original `BiLock<T>`, unlocking this lock.
|
||||
pub fn unlock(self) -> BiLock<T> {
|
||||
// note that unlocked is implemented in `Drop`, so we don't do anything
|
||||
// here other than creating a new handle to return.
|
||||
BiLock { inner: self.inner.inner.clone() }
|
||||
pub fn unlock(mut self) -> BiLock<T> {
|
||||
let bi_lock = self.inner.take().unwrap();
|
||||
|
||||
bi_lock.unlock();
|
||||
|
||||
bi_lock
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for BiLockAcquired<T> {
|
||||
type Target = T;
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { &*self.inner.inner.inner.get() }
|
||||
unsafe { &*self.inner.as_ref().unwrap().inner.inner.as_ref().unwrap().get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for BiLockAcquired<T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { &mut *self.inner.inner.inner.get() }
|
||||
unsafe { &mut *self.inner.as_mut().unwrap().inner.inner.as_ref().unwrap().get() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for BiLockAcquired<T> {
|
||||
fn drop(&mut self) {
|
||||
self.inner.unlock();
|
||||
if let Some(ref bi_lock) = self.inner {
|
||||
bi_lock.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
//! More information and examples of how to use these synchronization primitives
|
||||
//! can be found [online at tokio.rs].
|
||||
//!
|
||||
//! [online at tokio.rs]: https://tokio.rs/docs/going-deeper/synchronization/
|
||||
//! [online at tokio.rs]: https://tokio.rs/docs/going-deeper-futures/synchronization/
|
||||
|
||||
pub mod oneshot;
|
||||
pub mod mpsc;
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
// Since most of this work is lock-free, once the work starts, it is impossible
|
||||
// to safely revert.
|
||||
//
|
||||
// If the sender is unable to process a send operation, then the the curren
|
||||
// If the sender is unable to process a send operation, then the current
|
||||
// task is parked and the handle is sent on the parked task queue.
|
||||
//
|
||||
// Note that the implementation guarantees that the channel capacity will never
|
||||
|
@ -77,8 +77,12 @@ use std::thread;
|
|||
use std::usize;
|
||||
|
||||
use sync::mpsc::queue::{Queue, PopResult};
|
||||
use sync::oneshot;
|
||||
use task::{self, Task};
|
||||
use {Async, AsyncSink, Poll, StartSend, Sink, Stream};
|
||||
use future::Executor;
|
||||
use sink::SendAll;
|
||||
use resultstream::{self, Results};
|
||||
use {Async, AsyncSink, Future, Poll, StartSend, Sink, Stream};
|
||||
|
||||
mod queue;
|
||||
|
||||
|
@ -93,7 +97,7 @@ pub struct Sender<T> {
|
|||
// Handle to the task that is blocked on this sender. This handle is sent
|
||||
// to the receiver half in order to be notified when the sender becomes
|
||||
// unblocked.
|
||||
sender_task: SenderTask,
|
||||
sender_task: Arc<Mutex<SenderTask>>,
|
||||
|
||||
// True if the sender might be blocked. This is an optimization to avoid
|
||||
// having to lock the mutex most of the time.
|
||||
|
@ -106,14 +110,8 @@ pub struct Sender<T> {
|
|||
#[derive(Debug)]
|
||||
pub struct UnboundedSender<T>(Sender<T>);
|
||||
|
||||
fn _assert_kinds() {
|
||||
fn _assert_send<T: Send>() {}
|
||||
fn _assert_sync<T: Sync>() {}
|
||||
fn _assert_clone<T: Clone>() {}
|
||||
_assert_send::<UnboundedSender<u32>>();
|
||||
_assert_sync::<UnboundedSender<u32>>();
|
||||
_assert_clone::<UnboundedSender<u32>>();
|
||||
}
|
||||
trait AssertKinds: Send + Sync + Clone {}
|
||||
impl AssertKinds for UnboundedSender<u32> {}
|
||||
|
||||
|
||||
/// The receiving end of a channel which implements the `Stream` trait.
|
||||
|
@ -139,6 +137,18 @@ pub struct UnboundedReceiver<T>(Receiver<T>);
|
|||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub struct SendError<T>(T);
|
||||
|
||||
/// Error type returned from `try_send`
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub struct TrySendError<T> {
|
||||
kind: TrySendErrorKind<T>,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
enum TrySendErrorKind<T> {
|
||||
Full(T),
|
||||
Disconnected(T),
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for SendError<T> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_tuple("SendError")
|
||||
|
@ -167,6 +177,65 @@ impl<T> SendError<T> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for TrySendError<T> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt.debug_tuple("TrySendError")
|
||||
.field(&"...")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> fmt::Display for TrySendError<T> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
if self.is_full() {
|
||||
write!(fmt, "send failed because channel is full")
|
||||
} else {
|
||||
write!(fmt, "send failed because receiver is gone")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Any> Error for TrySendError<T> {
|
||||
fn description(&self) -> &str {
|
||||
if self.is_full() {
|
||||
"send failed because channel is full"
|
||||
} else {
|
||||
"send failed because receiver is gone"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> TrySendError<T> {
|
||||
/// Returns true if this error is a result of the channel being full
|
||||
pub fn is_full(&self) -> bool {
|
||||
use self::TrySendErrorKind::*;
|
||||
|
||||
match self.kind {
|
||||
Full(_) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if this error is a result of the receiver being dropped
|
||||
pub fn is_disconnected(&self) -> bool {
|
||||
use self::TrySendErrorKind::*;
|
||||
|
||||
match self.kind {
|
||||
Disconnected(_) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the message that was attempted to be sent but failed.
|
||||
pub fn into_inner(self) -> T {
|
||||
use self::TrySendErrorKind::*;
|
||||
|
||||
match self.kind {
|
||||
Full(v) | Disconnected(v) => v,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Inner<T> {
|
||||
// Max buffer size of the channel. If `None` then the channel is unbounded.
|
||||
|
@ -180,7 +249,7 @@ struct Inner<T> {
|
|||
message_queue: Queue<Option<T>>,
|
||||
|
||||
// Atomic, FIFO queue used to send parked task handles to the receiver.
|
||||
parked_queue: Queue<SenderTask>,
|
||||
parked_queue: Queue<Arc<Mutex<SenderTask>>>,
|
||||
|
||||
// Number of senders in existence
|
||||
num_senders: AtomicUsize,
|
||||
|
@ -213,13 +282,13 @@ enum TryPark {
|
|||
}
|
||||
|
||||
// The `is_open` flag is stored in the left-most bit of `Inner::state`
|
||||
const OPEN_MASK: usize = 1 << 31;
|
||||
const OPEN_MASK: usize = usize::MAX - (usize::MAX >> 1);
|
||||
|
||||
// When a new channel is created, it is created in the open state with no
|
||||
// pending messages.
|
||||
const INIT_STATE: usize = OPEN_MASK;
|
||||
|
||||
// The maximum number of messages that a channel can track is `usize::MAX > 1`
|
||||
// The maximum number of messages that a channel can track is `usize::MAX >> 1`
|
||||
const MAX_CAPACITY: usize = !(OPEN_MASK);
|
||||
|
||||
// The maximum requested buffer size must be less than the maximum capacity of
|
||||
|
@ -227,7 +296,28 @@ const MAX_CAPACITY: usize = !(OPEN_MASK);
|
|||
const MAX_BUFFER: usize = MAX_CAPACITY >> 1;
|
||||
|
||||
// Sent to the consumer to wake up blocked producers
|
||||
type SenderTask = Arc<Mutex<Option<Task>>>;
|
||||
#[derive(Debug)]
|
||||
struct SenderTask {
|
||||
task: Option<Task>,
|
||||
is_parked: bool,
|
||||
}
|
||||
|
||||
impl SenderTask {
|
||||
fn new() -> Self {
|
||||
SenderTask {
|
||||
task: None,
|
||||
is_parked: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn notify(&mut self) {
|
||||
self.is_parked = false;
|
||||
|
||||
if let Some(task) = self.task.take() {
|
||||
task.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an in-memory channel implementation of the `Stream` trait with
|
||||
/// bounded capacity.
|
||||
|
@ -281,7 +371,7 @@ fn channel2<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) {
|
|||
|
||||
let tx = Sender {
|
||||
inner: inner.clone(),
|
||||
sender_task: Arc::new(Mutex::new(None)),
|
||||
sender_task: Arc::new(Mutex::new(SenderTask::new())),
|
||||
maybe_parked: false,
|
||||
};
|
||||
|
||||
|
@ -299,8 +389,35 @@ fn channel2<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) {
|
|||
*/
|
||||
|
||||
impl<T> Sender<T> {
|
||||
/// Attempts to send a message on this `Sender<T>` without blocking.
|
||||
///
|
||||
/// This function, unlike `start_send`, is safe to call whether it's being
|
||||
/// called on a task or not. Note that this function, however, will *not*
|
||||
/// attempt to block the current task if the message cannot be sent.
|
||||
///
|
||||
/// It is not recommended to call this function from inside of a future,
|
||||
/// only from an external thread where you've otherwise arranged to be
|
||||
/// notified when the channel is no longer full.
|
||||
pub fn try_send(&mut self, msg: T) -> Result<(), TrySendError<T>> {
|
||||
// If the sender is currently blocked, reject the message
|
||||
if !self.poll_unparked(false).is_ready() {
|
||||
return Err(TrySendError {
|
||||
kind: TrySendErrorKind::Full(msg),
|
||||
});
|
||||
}
|
||||
|
||||
// The channel has capacity to accept the message, so send it
|
||||
self.do_send(Some(msg), false)
|
||||
.map_err(|SendError(v)| {
|
||||
TrySendError {
|
||||
kind: TrySendErrorKind::Disconnected(v),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Do the send without failing
|
||||
fn do_send(&mut self, msg: Option<T>, can_park: bool) -> Result<(), SendError<T>> {
|
||||
// None means close
|
||||
fn do_send(&mut self, msg: Option<T>, do_park: bool) -> Result<(), SendError<T>> {
|
||||
// First, increment the number of messages contained by the channel.
|
||||
// This operation will also atomically determine if the sender task
|
||||
// should be parked.
|
||||
|
@ -331,11 +448,11 @@ impl<T> Sender<T> {
|
|||
// be parked. This will send the task handle on the parked task queue.
|
||||
//
|
||||
// However, when `do_send` is called while dropping the `Sender`,
|
||||
// `task::park()` can't be called safely. In this case, in order to
|
||||
// `task::current()` can't be called safely. In this case, in order to
|
||||
// maintain internal consistency, a blank message is pushed onto the
|
||||
// parked task queue.
|
||||
if park_self {
|
||||
self.park(can_park);
|
||||
self.park(do_park);
|
||||
}
|
||||
|
||||
self.queue_push_and_signal(msg);
|
||||
|
@ -428,27 +545,31 @@ impl<T> Sender<T> {
|
|||
}
|
||||
|
||||
// Setting this flag enables the receiving end to detect that
|
||||
// an unpark event happened in order to avoid unecessarily
|
||||
// an unpark event happened in order to avoid unnecessarily
|
||||
// parking.
|
||||
recv_task.unparked = true;
|
||||
recv_task.task.take()
|
||||
};
|
||||
|
||||
if let Some(task) = task {
|
||||
task.unpark();
|
||||
task.notify();
|
||||
}
|
||||
}
|
||||
|
||||
fn park(&mut self, can_park: bool) {
|
||||
// TODO: clean up internal state if the task::park will fail
|
||||
// TODO: clean up internal state if the task::current will fail
|
||||
|
||||
let task = if can_park {
|
||||
Some(task::park())
|
||||
Some(task::current())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
*self.sender_task.lock().unwrap() = task;
|
||||
{
|
||||
let mut sender = self.sender_task.lock().unwrap();
|
||||
sender.task = task;
|
||||
sender.is_parked = true;
|
||||
}
|
||||
|
||||
// Send handle over queue
|
||||
let t = self.sender_task.clone();
|
||||
|
@ -460,14 +581,33 @@ impl<T> Sender<T> {
|
|||
self.maybe_parked = state.is_open;
|
||||
}
|
||||
|
||||
fn poll_unparked(&mut self) -> Async<()> {
|
||||
/// Polls the channel to determine if there is guaranteed to be capacity to send at least one
|
||||
/// item without waiting.
|
||||
///
|
||||
/// Returns `Ok(Async::Ready(_))` if there is sufficient capacity, or returns
|
||||
/// `Ok(Async::NotReady)` if the channel is not guaranteed to have capacity. Returns
|
||||
/// `Err(SendError(_))` if the receiver has been dropped.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method will panic if called from outside the context of a task or future.
|
||||
pub fn poll_ready(&mut self) -> Poll<(), SendError<()>> {
|
||||
let state = decode_state(self.inner.state.load(SeqCst));
|
||||
if !state.is_open {
|
||||
return Err(SendError(()));
|
||||
}
|
||||
|
||||
Ok(self.poll_unparked(true))
|
||||
}
|
||||
|
||||
fn poll_unparked(&mut self, do_park: bool) -> Async<()> {
|
||||
// First check the `maybe_parked` variable. This avoids acquiring the
|
||||
// lock in most cases
|
||||
if self.maybe_parked {
|
||||
// Get a lock on the task handle
|
||||
let mut task = self.sender_task.lock().unwrap();
|
||||
|
||||
if task.is_none() {
|
||||
if !task.is_parked {
|
||||
self.maybe_parked = false;
|
||||
return Async::Ready(())
|
||||
}
|
||||
|
@ -478,7 +618,11 @@ impl<T> Sender<T> {
|
|||
//
|
||||
// Update the task in case the `Sender` has been moved to another
|
||||
// task
|
||||
*task = Some(task::park());
|
||||
task.task = if do_park {
|
||||
Some(task::current())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Async::NotReady
|
||||
} else {
|
||||
|
@ -494,12 +638,12 @@ impl<T> Sink for Sender<T> {
|
|||
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
|
||||
// If the sender is currently blocked, reject the message before doing
|
||||
// any work.
|
||||
if !self.poll_unparked().is_ready() {
|
||||
if !self.poll_unparked(true).is_ready() {
|
||||
return Ok(AsyncSink::NotReady(msg));
|
||||
}
|
||||
|
||||
// The channel has capacity to accept the message, so send it.
|
||||
try!(self.do_send(Some(msg), true));
|
||||
self.do_send(Some(msg), true)?;
|
||||
|
||||
Ok(AsyncSink::Ready)
|
||||
}
|
||||
|
@ -519,7 +663,18 @@ impl<T> UnboundedSender<T> {
|
|||
/// This is an unbounded sender, so this function differs from `Sink::send`
|
||||
/// by ensuring the return type reflects that the channel is always ready to
|
||||
/// receive messages.
|
||||
#[deprecated(note = "renamed to `unbounded_send`")]
|
||||
#[doc(hidden)]
|
||||
pub fn send(&self, msg: T) -> Result<(), SendError<T>> {
|
||||
self.unbounded_send(msg)
|
||||
}
|
||||
|
||||
/// Sends the provided message along this channel.
|
||||
///
|
||||
/// This is an unbounded sender, so this function differs from `Sink::send`
|
||||
/// by ensuring the return type reflects that the channel is always ready to
|
||||
/// receive messages.
|
||||
pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> {
|
||||
self.0.do_send_nb(msg)
|
||||
}
|
||||
}
|
||||
|
@ -546,7 +701,7 @@ impl<'a, T> Sink for &'a UnboundedSender<T> {
|
|||
type SinkError = SendError<T>;
|
||||
|
||||
fn start_send(&mut self, msg: T) -> StartSend<T, SendError<T>> {
|
||||
try!(self.0.do_send_nb(msg));
|
||||
self.0.do_send_nb(msg)?;
|
||||
Ok(AsyncSink::Ready)
|
||||
}
|
||||
|
||||
|
@ -589,7 +744,7 @@ impl<T> Clone for Sender<T> {
|
|||
if actual == curr {
|
||||
return Sender {
|
||||
inner: self.inner.clone(),
|
||||
sender_task: Arc::new(Mutex::new(None)),
|
||||
sender_task: Arc::new(Mutex::new(SenderTask::new())),
|
||||
maybe_parked: false,
|
||||
};
|
||||
}
|
||||
|
@ -645,10 +800,7 @@ impl<T> Receiver<T> {
|
|||
loop {
|
||||
match unsafe { self.inner.parked_queue.pop() } {
|
||||
PopResult::Data(task) => {
|
||||
let task = task.lock().unwrap().take();
|
||||
if let Some(task) = task {
|
||||
task.unpark();
|
||||
}
|
||||
task.lock().unwrap().notify();
|
||||
}
|
||||
PopResult::Empty => break,
|
||||
PopResult::Inconsistent => thread::yield_now(),
|
||||
|
@ -675,7 +827,7 @@ impl<T> Receiver<T> {
|
|||
//
|
||||
// 1) Spin
|
||||
// 2) thread::yield_now()
|
||||
// 3) task::park().unwrap() & return NotReady
|
||||
// 3) task::current().unwrap() & return NotReady
|
||||
//
|
||||
// For now, thread::yield_now() is used, but it would
|
||||
// probably be better to spin a few times then yield.
|
||||
|
@ -690,14 +842,7 @@ impl<T> Receiver<T> {
|
|||
loop {
|
||||
match unsafe { self.inner.parked_queue.pop() } {
|
||||
PopResult::Data(task) => {
|
||||
// Do this step first so that the lock is dropped when
|
||||
// `unpark` is called
|
||||
let task = task.lock().unwrap().take();
|
||||
|
||||
if let Some(task) = task {
|
||||
task.unpark();
|
||||
}
|
||||
|
||||
task.lock().unwrap().notify();
|
||||
return;
|
||||
}
|
||||
PopResult::Empty => {
|
||||
|
@ -731,7 +876,7 @@ impl<T> Receiver<T> {
|
|||
return TryPark::NotEmpty;
|
||||
}
|
||||
|
||||
recv_task.task = Some(task::park());
|
||||
recv_task.task = Some(task::current());
|
||||
TryPark::Parked
|
||||
}
|
||||
|
||||
|
@ -828,6 +973,138 @@ impl<T> Stream for UnboundedReceiver<T> {
|
|||
}
|
||||
}
|
||||
|
||||
/// Handle returned from the `spawn` function.
|
||||
///
|
||||
/// This handle is a stream that proxies a stream on a separate `Executor`.
|
||||
/// Created through the `mpsc::spawn` function, this handle will produce
|
||||
/// the same values as the proxied stream, as they are produced in the executor,
|
||||
/// and uses a limited buffer to exert back-pressure on the remote stream.
|
||||
///
|
||||
/// If this handle is dropped, then the stream will no longer be polled and is
|
||||
/// scheduled to be dropped.
|
||||
pub struct SpawnHandle<Item, Error> {
|
||||
rx: Receiver<Result<Item, Error>>,
|
||||
_cancel_tx: oneshot::Sender<()>,
|
||||
}
|
||||
|
||||
/// Type of future which `Executor` instances must be able to execute for `spawn`.
|
||||
pub struct Execute<S: Stream> {
|
||||
inner: SendAll<Sender<Result<S::Item, S::Error>>, Results<S, SendError<Result<S::Item, S::Error>>>>,
|
||||
cancel_rx: oneshot::Receiver<()>,
|
||||
}
|
||||
|
||||
/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
|
||||
/// returning a handle representing the remote stream.
|
||||
///
|
||||
/// The `stream` will be canceled if the `SpawnHandle` is dropped.
|
||||
///
|
||||
/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
|
||||
/// When `stream` has additional items available, then the `SpawnHandle`
|
||||
/// will have those same items available.
|
||||
///
|
||||
/// At most `buffer + 1` elements will be buffered at a time. If the buffer
|
||||
/// is full, then `stream` will stop progressing until more space is available.
|
||||
/// This allows the `SpawnHandle` to exert backpressure on the `stream`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if `executor` is unable spawn a `Future` containing
|
||||
/// the entirety of the `stream`.
|
||||
pub fn spawn<S, E>(stream: S, executor: &E, buffer: usize) -> SpawnHandle<S::Item, S::Error>
|
||||
where S: Stream,
|
||||
E: Executor<Execute<S>>
|
||||
{
|
||||
let (cancel_tx, cancel_rx) = oneshot::channel();
|
||||
let (tx, rx) = channel(buffer);
|
||||
executor.execute(Execute {
|
||||
inner: tx.send_all(resultstream::new(stream)),
|
||||
cancel_rx: cancel_rx,
|
||||
}).expect("failed to spawn stream");
|
||||
SpawnHandle {
|
||||
rx: rx,
|
||||
_cancel_tx: cancel_tx,
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
|
||||
/// returning a handle representing the remote stream, with unbounded buffering.
|
||||
///
|
||||
/// The `stream` will be canceled if the `SpawnHandle` is dropped.
|
||||
///
|
||||
/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
|
||||
/// When `stream` has additional items available, then the `SpawnHandle`
|
||||
/// will have those same items available.
|
||||
///
|
||||
/// An unbounded buffer is used, which means that values will be buffered as
|
||||
/// fast as `stream` can produce them, without any backpressure. Therefore, if
|
||||
/// `stream` is an infinite stream, it can use an unbounded amount of memory, and
|
||||
/// potentially hog CPU resources.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if `executor` is unable spawn a `Future` containing
|
||||
/// the entirety of the `stream`.
|
||||
pub fn spawn_unbounded<S, E>(stream: S, executor: &E) -> SpawnHandle<S::Item, S::Error>
|
||||
where S: Stream,
|
||||
E: Executor<Execute<S>>
|
||||
{
|
||||
let (cancel_tx, cancel_rx) = oneshot::channel();
|
||||
let (tx, rx) = channel2(None);
|
||||
executor.execute(Execute {
|
||||
inner: tx.send_all(resultstream::new(stream)),
|
||||
cancel_rx: cancel_rx,
|
||||
}).expect("failed to spawn stream");
|
||||
SpawnHandle {
|
||||
rx: rx,
|
||||
_cancel_tx: cancel_tx,
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, E> Stream for SpawnHandle<I, E> {
|
||||
type Item = I;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<I>, E> {
|
||||
match self.rx.poll() {
|
||||
Ok(Async::Ready(Some(Ok(t)))) => Ok(Async::Ready(Some(t.into()))),
|
||||
Ok(Async::Ready(Some(Err(e)))) => Err(e),
|
||||
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
Err(_) => unreachable!("mpsc::Receiver should never return Err"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, E> fmt::Debug for SpawnHandle<I, E> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("SpawnHandle")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream> Future for Execute<S> {
|
||||
type Item = ();
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<(), ()> {
|
||||
match self.cancel_rx.poll() {
|
||||
Ok(Async::NotReady) => (),
|
||||
_ => return Ok(Async::Ready(())),
|
||||
}
|
||||
match self.inner.poll() {
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
_ => Ok(Async::Ready(()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream> fmt::Debug for Execute<S> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Execute")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
* ===== impl Inner =====
|
||||
|
|
|
@ -114,7 +114,7 @@ impl<T> Queue<T> {
|
|||
/// return `Option<T>`. It is possible for this queue to be in an
|
||||
/// inconsistent state where many pushes have succeeded and completely
|
||||
/// finished, but pops cannot return `Some(t)`. This inconsistent state
|
||||
/// happens when a pusher is pre-empted at an inopportune moment.
|
||||
/// happens when a pusher is preempted at an inopportune moment.
|
||||
///
|
||||
/// This inconsistent state means that this queue does indeed have data, but
|
||||
/// it does not currently have access to it at this time.
|
||||
|
|
|
@ -7,6 +7,7 @@ use std::error::Error;
|
|||
use std::fmt;
|
||||
|
||||
use {Future, Poll, Async};
|
||||
use future::{lazy, Lazy, Executor, IntoFuture};
|
||||
use lock::Lock;
|
||||
use task::{self, Task};
|
||||
|
||||
|
@ -34,7 +35,7 @@ pub struct Sender<T> {
|
|||
#[derive(Debug)]
|
||||
struct Inner<T> {
|
||||
/// Indicates whether this oneshot is complete yet. This is filled in both
|
||||
/// by `Sender::drop` and by `Receiver::drop`, and both sides iterpret it
|
||||
/// by `Sender::drop` and by `Receiver::drop`, and both sides interpret it
|
||||
/// appropriately.
|
||||
///
|
||||
/// For `Receiver`, if this is `true`, then it's guaranteed that `data` is
|
||||
|
@ -83,23 +84,18 @@ struct Inner<T> {
|
|||
/// use futures::sync::oneshot;
|
||||
/// use futures::*;
|
||||
///
|
||||
/// let (c, p) = oneshot::channel::<i32>();
|
||||
/// let (p, c) = oneshot::channel::<i32>();
|
||||
///
|
||||
/// thread::spawn(|| {
|
||||
/// p.map(|i| {
|
||||
/// c.map(|i| {
|
||||
/// println!("got: {}", i);
|
||||
/// }).wait();
|
||||
/// });
|
||||
///
|
||||
/// c.send(3).unwrap();
|
||||
/// p.send(3).unwrap();
|
||||
/// ```
|
||||
pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
|
||||
let inner = Arc::new(Inner {
|
||||
complete: AtomicBool::new(false),
|
||||
data: Lock::new(None),
|
||||
rx_task: Lock::new(None),
|
||||
tx_task: Lock::new(None),
|
||||
});
|
||||
let inner = Arc::new(Inner::new());
|
||||
let receiver = Receiver {
|
||||
inner: inner.clone(),
|
||||
};
|
||||
|
@ -109,64 +105,55 @@ pub fn channel<T>() -> (Sender<T>, Receiver<T>) {
|
|||
(sender, receiver)
|
||||
}
|
||||
|
||||
impl<T> Sender<T> {
|
||||
#[deprecated(note = "renamed to `send`", since = "0.1.11")]
|
||||
#[doc(hidden)]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub fn complete(self, t: T) {
|
||||
drop(self.send(t));
|
||||
impl<T> Inner<T> {
|
||||
fn new() -> Inner<T> {
|
||||
Inner {
|
||||
complete: AtomicBool::new(false),
|
||||
data: Lock::new(None),
|
||||
rx_task: Lock::new(None),
|
||||
tx_task: Lock::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Completes this oneshot with a successful result.
|
||||
///
|
||||
/// This function will consume `self` and indicate to the other end, the
|
||||
/// `Receiver`, that the error provided is the result of the computation this
|
||||
/// represents.
|
||||
///
|
||||
/// If the value is successfully enqueued for the remote end to receive,
|
||||
/// then `Ok(())` is returned. If the receiving end was deallocated before
|
||||
/// this function was called, however, then `Err` is returned with the value
|
||||
/// provided.
|
||||
pub fn send(self, t: T) -> Result<(), T> {
|
||||
if self.inner.complete.load(SeqCst) {
|
||||
fn send(&self, t: T) -> Result<(), T> {
|
||||
if self.complete.load(SeqCst) {
|
||||
return Err(t)
|
||||
}
|
||||
|
||||
// Note that this lock acquisition should always succeed as it can only
|
||||
// interfere with `poll` in `Receiver` which is only called when the
|
||||
// `complete` flag is true, which we're setting here.
|
||||
let mut slot = self.inner.data.try_lock().unwrap();
|
||||
assert!(slot.is_none());
|
||||
*slot = Some(t);
|
||||
drop(slot);
|
||||
Ok(())
|
||||
// Note that this lock acquisition may fail if the receiver
|
||||
// is closed and sets the `complete` flag to true, whereupon
|
||||
// the receiver may call `poll()`.
|
||||
if let Some(mut slot) = self.data.try_lock() {
|
||||
assert!(slot.is_none());
|
||||
*slot = Some(t);
|
||||
drop(slot);
|
||||
|
||||
// If the receiver called `close()` between the check at the
|
||||
// start of the function, and the lock being released, then
|
||||
// the receiver may not be around to receive it, so try to
|
||||
// pull it back out.
|
||||
if self.complete.load(SeqCst) {
|
||||
// If lock acquisition fails, then receiver is actually
|
||||
// receiving it, so we're good.
|
||||
if let Some(mut slot) = self.data.try_lock() {
|
||||
if let Some(t) = slot.take() {
|
||||
return Err(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
// Must have been closed
|
||||
Err(t)
|
||||
}
|
||||
}
|
||||
|
||||
/// Polls this `Sender` half to detect whether the `Receiver` this has
|
||||
/// paired with has gone away.
|
||||
///
|
||||
/// This function can be used to learn about when the `Receiver` (consumer)
|
||||
/// half has gone away and nothing will be able to receive a message sent
|
||||
/// from `complete`.
|
||||
///
|
||||
/// Like `Future::poll`, this function will panic if it's not called from
|
||||
/// within the context of a task. In otherwords, this should only ever be
|
||||
/// called from inside another future.
|
||||
///
|
||||
/// If `Ready` is returned then it means that the `Receiver` has disappeared
|
||||
/// and the result this `Sender` would otherwise produce should no longer
|
||||
/// be produced.
|
||||
///
|
||||
/// If `NotReady` is returned then the `Receiver` is still alive and may be
|
||||
/// able to receive a message if sent. The current task, however, is
|
||||
/// scheduled to receive a notification if the corresponding `Receiver` goes
|
||||
/// away.
|
||||
pub fn poll_cancel(&mut self) -> Poll<(), ()> {
|
||||
fn poll_cancel(&self) -> Poll<(), ()> {
|
||||
// Fast path up first, just read the flag and see if our other half is
|
||||
// gone. This flag is set both in our destructor and the oneshot
|
||||
// destructor, but our destructor hasn't run yet so if it's set then the
|
||||
// oneshot is gone.
|
||||
if self.inner.complete.load(SeqCst) {
|
||||
if self.complete.load(SeqCst) {
|
||||
return Ok(Async::Ready(()))
|
||||
}
|
||||
|
||||
|
@ -183,21 +170,23 @@ impl<T> Sender<T> {
|
|||
// may have been dropped. The first thing it does is set the flag, and
|
||||
// if it fails to acquire the lock it assumes that we'll see the flag
|
||||
// later on. So... we then try to see the flag later on!
|
||||
let handle = task::park();
|
||||
match self.inner.tx_task.try_lock() {
|
||||
let handle = task::current();
|
||||
match self.tx_task.try_lock() {
|
||||
Some(mut p) => *p = Some(handle),
|
||||
None => return Ok(Async::Ready(())),
|
||||
}
|
||||
if self.inner.complete.load(SeqCst) {
|
||||
if self.complete.load(SeqCst) {
|
||||
Ok(Async::Ready(()))
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Sender<T> {
|
||||
fn drop(&mut self) {
|
||||
fn is_canceled(&self) -> bool {
|
||||
self.complete.load(SeqCst)
|
||||
}
|
||||
|
||||
fn drop_tx(&self) {
|
||||
// Flag that we're a completed `Sender` and try to wake up a receiver.
|
||||
// Whether or not we actually stored any data will get picked up and
|
||||
// translated to either an item or cancellation.
|
||||
|
@ -218,17 +207,176 @@ impl<T> Drop for Sender<T> {
|
|||
// then it would not necessarily synchronize with `inner.complete`
|
||||
// and deadlock might be possible, as was observed in
|
||||
// https://github.com/alexcrichton/futures-rs/pull/219.
|
||||
self.inner.complete.store(true, SeqCst);
|
||||
if let Some(mut slot) = self.inner.rx_task.try_lock() {
|
||||
self.complete.store(true, SeqCst);
|
||||
if let Some(mut slot) = self.rx_task.try_lock() {
|
||||
if let Some(task) = slot.take() {
|
||||
drop(slot);
|
||||
task.unpark();
|
||||
task.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn close_rx(&self) {
|
||||
// Flag our completion and then attempt to wake up the sender if it's
|
||||
// blocked. See comments in `drop` below for more info
|
||||
self.complete.store(true, SeqCst);
|
||||
if let Some(mut handle) = self.tx_task.try_lock() {
|
||||
if let Some(task) = handle.take() {
|
||||
drop(handle);
|
||||
task.notify()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn recv(&self) -> Poll<T, Canceled> {
|
||||
let mut done = false;
|
||||
|
||||
// Check to see if some data has arrived. If it hasn't then we need to
|
||||
// block our task.
|
||||
//
|
||||
// Note that the acquisition of the `rx_task` lock might fail below, but
|
||||
// the only situation where this can happen is during `Sender::drop`
|
||||
// when we are indeed completed already. If that's happening then we
|
||||
// know we're completed so keep going.
|
||||
if self.complete.load(SeqCst) {
|
||||
done = true;
|
||||
} else {
|
||||
let task = task::current();
|
||||
match self.rx_task.try_lock() {
|
||||
Some(mut slot) => *slot = Some(task),
|
||||
None => done = true,
|
||||
}
|
||||
}
|
||||
|
||||
// If we're `done` via one of the paths above, then look at the data and
|
||||
// figure out what the answer is. If, however, we stored `rx_task`
|
||||
// successfully above we need to check again if we're completed in case
|
||||
// a message was sent while `rx_task` was locked and couldn't notify us
|
||||
// otherwise.
|
||||
//
|
||||
// If we're not done, and we're not complete, though, then we've
|
||||
// successfully blocked our task and we return `NotReady`.
|
||||
if done || self.complete.load(SeqCst) {
|
||||
// If taking the lock fails, the sender will realise that the we're
|
||||
// `done` when it checks the `complete` flag on the way out, and will
|
||||
// treat the send as a failure.
|
||||
if let Some(mut slot) = self.data.try_lock() {
|
||||
if let Some(data) = slot.take() {
|
||||
return Ok(data.into());
|
||||
}
|
||||
}
|
||||
Err(Canceled)
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
|
||||
fn drop_rx(&self) {
|
||||
// Indicate to the `Sender` that we're done, so any future calls to
|
||||
// `poll_cancel` are weeded out.
|
||||
self.complete.store(true, SeqCst);
|
||||
|
||||
// If we've blocked a task then there's no need for it to stick around,
|
||||
// so we need to drop it. If this lock acquisition fails, though, then
|
||||
// it's just because our `Sender` is trying to take the task, so we
|
||||
// let them take care of that.
|
||||
if let Some(mut slot) = self.rx_task.try_lock() {
|
||||
let task = slot.take();
|
||||
drop(slot);
|
||||
drop(task);
|
||||
}
|
||||
|
||||
// Finally, if our `Sender` wants to get notified of us going away, it
|
||||
// would have stored something in `tx_task`. Here we try to peel that
|
||||
// out and unpark it.
|
||||
//
|
||||
// Note that the `try_lock` here may fail, but only if the `Sender` is
|
||||
// in the process of filling in the task. If that happens then we
|
||||
// already flagged `complete` and they'll pick that up above.
|
||||
if let Some(mut handle) = self.tx_task.try_lock() {
|
||||
if let Some(task) = handle.take() {
|
||||
drop(handle);
|
||||
task.notify()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error returned from a `Receiver<T>` whenever the correponding `Sender<T>`
|
||||
impl<T> Sender<T> {
|
||||
#[deprecated(note = "renamed to `send`", since = "0.1.11")]
|
||||
#[doc(hidden)]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub fn complete(self, t: T) {
|
||||
drop(self.send(t));
|
||||
}
|
||||
|
||||
/// Completes this oneshot with a successful result.
|
||||
///
|
||||
/// This function will consume `self` and indicate to the other end, the
|
||||
/// `Receiver`, that the value provided is the result of the computation this
|
||||
/// represents.
|
||||
///
|
||||
/// If the value is successfully enqueued for the remote end to receive,
|
||||
/// then `Ok(())` is returned. If the receiving end was deallocated before
|
||||
/// this function was called, however, then `Err` is returned with the value
|
||||
/// provided.
|
||||
pub fn send(self, t: T) -> Result<(), T> {
|
||||
self.inner.send(t)
|
||||
}
|
||||
|
||||
/// Polls this `Sender` half to detect whether the `Receiver` this has
|
||||
/// paired with has gone away.
|
||||
///
|
||||
/// This function can be used to learn about when the `Receiver` (consumer)
|
||||
/// half has gone away and nothing will be able to receive a message sent
|
||||
/// from `send`.
|
||||
///
|
||||
/// If `Ready` is returned then it means that the `Receiver` has disappeared
|
||||
/// and the result this `Sender` would otherwise produce should no longer
|
||||
/// be produced.
|
||||
///
|
||||
/// If `NotReady` is returned then the `Receiver` is still alive and may be
|
||||
/// able to receive a message if sent. The current task, however, is
|
||||
/// scheduled to receive a notification if the corresponding `Receiver` goes
|
||||
/// away.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Like `Future::poll`, this function will panic if it's not called from
|
||||
/// within the context of a task. In other words, this should only ever be
|
||||
/// called from inside another future.
|
||||
///
|
||||
/// If you're calling this function from a context that does not have a
|
||||
/// task, then you can use the `is_canceled` API instead.
|
||||
pub fn poll_cancel(&mut self) -> Poll<(), ()> {
|
||||
self.inner.poll_cancel()
|
||||
}
|
||||
|
||||
/// Tests to see whether this `Sender`'s corresponding `Receiver`
|
||||
/// has gone away.
|
||||
///
|
||||
/// This function can be used to learn about when the `Receiver` (consumer)
|
||||
/// half has gone away and nothing will be able to receive a message sent
|
||||
/// from `send`.
|
||||
///
|
||||
/// Note that this function is intended to *not* be used in the context of a
|
||||
/// future. If you're implementing a future you probably want to call the
|
||||
/// `poll_cancel` function which will block the current task if the
|
||||
/// cancellation hasn't happened yet. This can be useful when working on a
|
||||
/// non-futures related thread, though, which would otherwise panic if
|
||||
/// `poll_cancel` were called.
|
||||
pub fn is_canceled(&self) -> bool {
|
||||
self.inner.is_canceled()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Sender<T> {
|
||||
fn drop(&mut self) {
|
||||
self.inner.drop_tx()
|
||||
}
|
||||
}
|
||||
|
||||
/// Error returned from a `Receiver<T>` whenever the corresponding `Sender<T>`
|
||||
/// is dropped.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Canceled;
|
||||
|
@ -253,15 +401,7 @@ impl<T> Receiver<T> {
|
|||
/// can be used to determine whether a message was actually sent or not. If
|
||||
/// `Canceled` is returned from `poll` then no message was sent.
|
||||
pub fn close(&mut self) {
|
||||
// Flag our completion and then attempt to wake up the sender if it's
|
||||
// blocked. See comments in `drop` below for more info
|
||||
self.inner.complete.store(true, SeqCst);
|
||||
if let Some(mut handle) = self.inner.tx_task.try_lock() {
|
||||
if let Some(task) = handle.take() {
|
||||
drop(handle);
|
||||
task.unpark()
|
||||
}
|
||||
}
|
||||
self.inner.close_rx()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -270,72 +410,167 @@ impl<T> Future for Receiver<T> {
|
|||
type Error = Canceled;
|
||||
|
||||
fn poll(&mut self) -> Poll<T, Canceled> {
|
||||
let mut done = false;
|
||||
|
||||
// Check to see if some data has arrived. If it hasn't then we need to
|
||||
// block our task.
|
||||
//
|
||||
// Note that the acquisition of the `rx_task` lock might fail below, but
|
||||
// the only situation where this can happen is during `Sender::drop`
|
||||
// when we are indeed completed already. If that's happening then we
|
||||
// know we're completed so keep going.
|
||||
if self.inner.complete.load(SeqCst) {
|
||||
done = true;
|
||||
} else {
|
||||
let task = task::park();
|
||||
match self.inner.rx_task.try_lock() {
|
||||
Some(mut slot) => *slot = Some(task),
|
||||
None => done = true,
|
||||
}
|
||||
}
|
||||
|
||||
// If we're `done` via one of the paths above, then look at the data and
|
||||
// figure out what the answer is. If, however, we stored `rx_task`
|
||||
// successfully above we need to check again if we're completed in case
|
||||
// a message was sent while `rx_task` was locked and couldn't notify us
|
||||
// otherwise.
|
||||
//
|
||||
// If we're not done, and we're not complete, though, then we've
|
||||
// successfully blocked our task and we return `NotReady`.
|
||||
if done || self.inner.complete.load(SeqCst) {
|
||||
match self.inner.data.try_lock().unwrap().take() {
|
||||
Some(data) => Ok(data.into()),
|
||||
None => Err(Canceled),
|
||||
}
|
||||
} else {
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
self.inner.recv()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Receiver<T> {
|
||||
fn drop(&mut self) {
|
||||
// Indicate to the `Sender` that we're done, so any future calls to
|
||||
// `poll_cancel` are weeded out.
|
||||
self.inner.complete.store(true, SeqCst);
|
||||
self.inner.drop_rx()
|
||||
}
|
||||
}
|
||||
|
||||
// If we've blocked a task then there's no need for it to stick around,
|
||||
// so we need to drop it. If this lock acquisition fails, though, then
|
||||
// it's just because our `Sender` is trying to take the task, so we
|
||||
// let them take care of that.
|
||||
if let Some(mut slot) = self.inner.rx_task.try_lock() {
|
||||
let task = slot.take();
|
||||
drop(slot);
|
||||
drop(task);
|
||||
}
|
||||
/// Handle returned from the `spawn` function.
|
||||
///
|
||||
/// This handle is a future representing the completion of a different future on
|
||||
/// a separate executor. Created through the `oneshot::spawn` function this
|
||||
/// handle will resolve when the future provided to `spawn` resolves on the
|
||||
/// `Executor` instance provided to that function.
|
||||
///
|
||||
/// If this handle is dropped then the future will automatically no longer be
|
||||
/// polled and is scheduled to be dropped. This can be canceled with the
|
||||
/// `forget` function, however.
|
||||
pub struct SpawnHandle<T, E> {
|
||||
rx: Arc<ExecuteInner<Result<T, E>>>,
|
||||
}
|
||||
|
||||
// Finally, if our `Sender` wants to get notified of us going away, it
|
||||
// would have stored something in `tx_task`. Here we try to peel that
|
||||
// out and unpark it.
|
||||
//
|
||||
// Note that the `try_lock` here may fail, but only if the `Sender` is
|
||||
// in the process of filling in the task. If that happens then we
|
||||
// already flagged `complete` and they'll pick that up above.
|
||||
if let Some(mut handle) = self.inner.tx_task.try_lock() {
|
||||
if let Some(task) = handle.take() {
|
||||
drop(handle);
|
||||
task.unpark()
|
||||
}
|
||||
struct ExecuteInner<T> {
|
||||
inner: Inner<T>,
|
||||
keep_running: AtomicBool,
|
||||
}
|
||||
|
||||
/// Type of future which `Execute` instances below must be able to spawn.
|
||||
pub struct Execute<F: Future> {
|
||||
future: F,
|
||||
tx: Arc<ExecuteInner<Result<F::Item, F::Error>>>,
|
||||
}
|
||||
|
||||
/// Spawns a `future` onto the instance of `Executor` provided, `executor`,
|
||||
/// returning a handle representing the completion of the future.
|
||||
///
|
||||
/// The `SpawnHandle` returned is a future that is a proxy for `future` itself.
|
||||
/// When `future` completes on `executor` then the `SpawnHandle` will itself be
|
||||
/// resolved. Internally `SpawnHandle` contains a `oneshot` channel and is
|
||||
/// thus safe to send across threads.
|
||||
///
|
||||
/// The `future` will be canceled if the `SpawnHandle` is dropped. If this is
|
||||
/// not desired then the `SpawnHandle::forget` function can be used to continue
|
||||
/// running the future to completion.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if the instance of `Spawn` provided is unable to
|
||||
/// spawn the `future` provided.
|
||||
///
|
||||
/// If the provided instance of `Spawn` does not actually run `future` to
|
||||
/// completion, then the returned handle may panic when polled. Typically this
|
||||
/// is not a problem, though, as most instances of `Spawn` will run futures to
|
||||
/// completion.
|
||||
///
|
||||
/// Note that the returned future will likely panic if the `futures` provided
|
||||
/// panics. If a future running on an executor panics that typically means that
|
||||
/// the executor drops the future, which falls into the above case of not
|
||||
/// running the future to completion essentially.
|
||||
pub fn spawn<F, E>(future: F, executor: &E) -> SpawnHandle<F::Item, F::Error>
|
||||
where F: Future,
|
||||
E: Executor<Execute<F>>,
|
||||
{
|
||||
let data = Arc::new(ExecuteInner {
|
||||
inner: Inner::new(),
|
||||
keep_running: AtomicBool::new(false),
|
||||
});
|
||||
executor.execute(Execute {
|
||||
future: future,
|
||||
tx: data.clone(),
|
||||
}).expect("failed to spawn future");
|
||||
SpawnHandle { rx: data }
|
||||
}
|
||||
|
||||
/// Spawns a function `f` onto the `Spawn` instance provided `s`.
|
||||
///
|
||||
/// For more information see the `spawn` function in this module. This function
|
||||
/// is just a thin wrapper around `spawn` which will execute the closure on the
|
||||
/// executor provided and then complete the future that the closure returns.
|
||||
pub fn spawn_fn<F, R, E>(f: F, executor: &E) -> SpawnHandle<R::Item, R::Error>
|
||||
where F: FnOnce() -> R,
|
||||
R: IntoFuture,
|
||||
E: Executor<Execute<Lazy<F, R>>>,
|
||||
{
|
||||
spawn(lazy(f), executor)
|
||||
}
|
||||
|
||||
impl<T, E> SpawnHandle<T, E> {
|
||||
/// Drop this future without canceling the underlying future.
|
||||
///
|
||||
/// When `SpawnHandle` is dropped, the spawned future will be canceled as
|
||||
/// well if the future hasn't already resolved. This function can be used
|
||||
/// when to drop this future but keep executing the underlying future.
|
||||
pub fn forget(self) {
|
||||
self.rx.keep_running.store(true, SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> Future for SpawnHandle<T, E> {
|
||||
type Item = T;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<T, E> {
|
||||
match self.rx.inner.recv() {
|
||||
Ok(Async::Ready(Ok(t))) => Ok(t.into()),
|
||||
Ok(Async::Ready(Err(e))) => Err(e),
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
Err(_) => panic!("future was canceled before completion"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: fmt::Debug, E: fmt::Debug> fmt::Debug for SpawnHandle<T, E> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("SpawnHandle")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> Drop for SpawnHandle<T, E> {
|
||||
fn drop(&mut self) {
|
||||
self.rx.inner.drop_rx();
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Future> Future for Execute<F> {
|
||||
type Item = ();
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<(), ()> {
|
||||
// If we're canceled then we may want to bail out early.
|
||||
//
|
||||
// If the `forget` function was called, though, then we keep going.
|
||||
if self.tx.inner.poll_cancel().unwrap().is_ready() {
|
||||
if !self.tx.keep_running.load(SeqCst) {
|
||||
return Ok(().into())
|
||||
}
|
||||
}
|
||||
|
||||
let result = match self.future.poll() {
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Ok(Async::Ready(t)) => Ok(t),
|
||||
Err(e) => Err(e),
|
||||
};
|
||||
drop(self.tx.inner.send(result));
|
||||
Ok(().into())
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Future + fmt::Debug> fmt::Debug for Execute<F> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Execute")
|
||||
.field("future", &self.future)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Future> Drop for Execute<F> {
|
||||
fn drop(&mut self) {
|
||||
self.tx.inner.drop_tx();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,23 +18,29 @@
|
|||
//!
|
||||
//! More information about the task model can be found [online at tokio.rs].
|
||||
//!
|
||||
//! [online at tokio.rs]: https://tokio.rs/docs/going-deeper/futures-model/
|
||||
//! [online at tokio.rs]: https://tokio.rs/docs/going-deeper-futures/futures-model/
|
||||
//!
|
||||
//! ## Functions
|
||||
//!
|
||||
//! There is an important bare function in this module: `park`. The `park`
|
||||
//! function is similar to the standard library's `thread::park` method where it
|
||||
//! returns a handle to wake up a task at a later date (via an `unpark` method).
|
||||
//! There is an important bare function in this module: `current`. The
|
||||
//! `current` function returns a handle to the currently running task, panicking
|
||||
//! if one isn't present. This handle is then used to later notify the task that
|
||||
//! it's ready to make progress through the `Task::notify` method.
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "import through the executor module instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use task_impl::{Spawn, spawn, Unpark, Executor, Run};
|
||||
#[cfg(all(feature = "with-deprecated", feature = "use_std"))]
|
||||
#[allow(deprecated)]
|
||||
pub use task_impl::{Spawn, spawn, Unpark, Executor, Run, park};
|
||||
|
||||
pub use task_impl::{Task, LocalKey, park, with_unpark_event, UnparkEvent, EventSet};
|
||||
pub use task_impl::{Task, AtomicTask, current, init};
|
||||
|
||||
#[allow(deprecated)]
|
||||
#[cfg(feature = "use_std")]
|
||||
pub use task_impl::{LocalKey, with_unpark_event, UnparkEvent, EventSet};
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "0.1.4", note = "import through the executor module instead")]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
#[cfg(all(feature = "with-deprecated", feature = "use_std"))]
|
||||
#[allow(deprecated)]
|
||||
pub use task_impl::TaskRc;
|
||||
|
|
|
@ -0,0 +1,191 @@
|
|||
#![allow(dead_code)]
|
||||
|
||||
use super::Task;
|
||||
|
||||
use core::fmt;
|
||||
use core::cell::UnsafeCell;
|
||||
use core::sync::atomic::AtomicUsize;
|
||||
use core::sync::atomic::Ordering::{Acquire, Release};
|
||||
|
||||
/// A synchronization primitive for task notification.
|
||||
///
|
||||
/// `AtomicTask` will coordinate concurrent notifications with the consumer
|
||||
/// potentially "updating" the underlying task to notify. This is useful in
|
||||
/// scenarios where a computation completes in another thread and wants to
|
||||
/// notify the consumer, but the consumer is in the process of being migrated to
|
||||
/// a new logical task.
|
||||
///
|
||||
/// Consumers should call `register` before checking the result of a computation
|
||||
/// and producers should call `notify` after producing the computation (this
|
||||
/// differs from the usual `thread::park` pattern). It is also permitted for
|
||||
/// `notify` to be called **before** `register`. This results in a no-op.
|
||||
///
|
||||
/// A single `AtomicTask` may be reused for any number of calls to `register` or
|
||||
/// `notify`.
|
||||
///
|
||||
/// `AtomicTask` does not provide any memory ordering guarantees, as such the
|
||||
/// user should use caution and use other synchronization primitives to guard
|
||||
/// the result of the underlying computation.
|
||||
pub struct AtomicTask {
|
||||
state: AtomicUsize,
|
||||
task: UnsafeCell<Option<Task>>,
|
||||
}
|
||||
|
||||
/// Initial state, the `AtomicTask` is currently not being used.
|
||||
///
|
||||
/// The value `2` is picked specifically because it between the write lock &
|
||||
/// read lock values. Since the read lock is represented by an incrementing
|
||||
/// counter, this enables an atomic fetch_sub operation to be used for releasing
|
||||
/// a lock.
|
||||
const WAITING: usize = 2;
|
||||
|
||||
/// The `register` function has determined that the task is no longer current.
|
||||
/// This implies that `AtomicTask::register` is being called from a different
|
||||
/// task than is represented by the currently stored task. The write lock is
|
||||
/// obtained to update the task cell.
|
||||
const LOCKED_WRITE: usize = 0;
|
||||
|
||||
/// At least one call to `notify` happened concurrently to `register` updating
|
||||
/// the task cell. This state is detected when `register` exits the mutation
|
||||
/// code and signals to `register` that it is responsible for notifying its own
|
||||
/// task.
|
||||
const LOCKED_WRITE_NOTIFIED: usize = 1;
|
||||
|
||||
|
||||
/// The `notify` function has locked access to the task cell for notification.
|
||||
///
|
||||
/// The constant is left here mostly for documentation reasons.
|
||||
#[allow(dead_code)]
|
||||
const LOCKED_READ: usize = 3;
|
||||
|
||||
impl AtomicTask {
|
||||
/// Create an `AtomicTask` initialized with the given `Task`
|
||||
pub fn new() -> AtomicTask {
|
||||
// Make sure that task is Sync
|
||||
trait AssertSync: Sync {}
|
||||
impl AssertSync for Task {}
|
||||
|
||||
AtomicTask {
|
||||
state: AtomicUsize::new(WAITING),
|
||||
task: UnsafeCell::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Registers the current task to be notified on calls to `notify`.
|
||||
///
|
||||
/// The new task will take place of any previous tasks that were registered
|
||||
/// by previous calls to `register`. Any calls to `notify` that happen after
|
||||
/// a call to `register` (as defined by the memory ordering rules), will
|
||||
/// notify the `register` caller's task.
|
||||
///
|
||||
/// It is safe to call `register` with multiple other threads concurrently
|
||||
/// calling `notify`. This will result in the `register` caller's current
|
||||
/// task being notified once.
|
||||
///
|
||||
/// This function is safe to call concurrently, but this is generally a bad
|
||||
/// idea. Concurrent calls to `register` will attempt to register different
|
||||
/// tasks to be notified. One of the callers will win and have its task set,
|
||||
/// but there is no guarantee as to which caller will succeed.
|
||||
pub fn register(&self) {
|
||||
// Get a new task handle
|
||||
let task = super::current();
|
||||
|
||||
match self.state.compare_and_swap(WAITING, LOCKED_WRITE, Acquire) {
|
||||
WAITING => {
|
||||
unsafe {
|
||||
// Locked acquired, update the task cell
|
||||
*self.task.get() = Some(task);
|
||||
|
||||
// Release the lock. If the state transitioned to
|
||||
// `LOCKED_NOTIFIED`, this means that an notify has been
|
||||
// signaled, so notify the task.
|
||||
if LOCKED_WRITE_NOTIFIED == self.state.swap(WAITING, Release) {
|
||||
(*self.task.get()).as_ref().unwrap().notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
LOCKED_WRITE | LOCKED_WRITE_NOTIFIED => {
|
||||
// A thread is concurrently calling `register`. This shouldn't
|
||||
// happen as it doesn't really make much sense, but it isn't
|
||||
// unsafe per se. Since two threads are concurrently trying to
|
||||
// update the task, it's undefined which one "wins" (no ordering
|
||||
// guarantees), so we can just do nothing.
|
||||
}
|
||||
state => {
|
||||
debug_assert!(state != LOCKED_WRITE, "unexpected state LOCKED_WRITE");
|
||||
debug_assert!(state != LOCKED_WRITE_NOTIFIED, "unexpected state LOCKED_WRITE_NOTIFIED");
|
||||
|
||||
// Currently in a read locked state, this implies that `notify`
|
||||
// is currently being called on the old task handle. So, we call
|
||||
// notify on the new task handle
|
||||
task.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Notifies the task that last called `register`.
|
||||
///
|
||||
/// If `register` has not been called yet, then this does nothing.
|
||||
pub fn notify(&self) {
|
||||
let mut curr = WAITING;
|
||||
|
||||
loop {
|
||||
if curr == LOCKED_WRITE {
|
||||
// Transition the state to LOCKED_NOTIFIED
|
||||
let actual = self.state.compare_and_swap(LOCKED_WRITE, LOCKED_WRITE_NOTIFIED, Release);
|
||||
|
||||
if curr == actual {
|
||||
// Success, return
|
||||
return;
|
||||
}
|
||||
|
||||
// update current state variable and try again
|
||||
curr = actual;
|
||||
|
||||
} else if curr == LOCKED_WRITE_NOTIFIED {
|
||||
// Currently in `LOCKED_WRITE_NOTIFIED` state, nothing else to do.
|
||||
return;
|
||||
|
||||
} else {
|
||||
// Currently in a LOCKED_READ state, so attempt to increment the
|
||||
// lock count.
|
||||
let actual = self.state.compare_and_swap(curr, curr + 1, Acquire);
|
||||
|
||||
// Locked acquired
|
||||
if actual == curr {
|
||||
// Notify the task
|
||||
unsafe {
|
||||
if let Some(ref task) = *self.task.get() {
|
||||
task.notify();
|
||||
}
|
||||
}
|
||||
|
||||
// Release the lock
|
||||
self.state.fetch_sub(1, Release);
|
||||
|
||||
// Done
|
||||
return;
|
||||
}
|
||||
|
||||
// update current state variable and try again
|
||||
curr = actual;
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AtomicTask {
|
||||
fn default() -> Self {
|
||||
AtomicTask::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for AtomicTask {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(fmt, "AtomicTask")
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Send for AtomicTask {}
|
||||
unsafe impl Sync for AtomicTask {}
|
|
@ -0,0 +1,173 @@
|
|||
#![cfg_attr(feature = "use_std", allow(dead_code))]
|
||||
|
||||
use core::marker;
|
||||
use core::mem;
|
||||
use core::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
|
||||
use core::sync::atomic::Ordering::{SeqCst, Relaxed};
|
||||
|
||||
use super::{BorrowedTask, NotifyHandle};
|
||||
|
||||
pub struct LocalKey;
|
||||
pub struct LocalMap;
|
||||
pub fn local_map() -> LocalMap { LocalMap }
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct BorrowedEvents<'a>(marker::PhantomData<&'a ()>);
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct BorrowedUnpark<'a> {
|
||||
f: &'a Fn() -> NotifyHandle,
|
||||
id: usize,
|
||||
}
|
||||
|
||||
pub struct TaskUnpark {
|
||||
handle: NotifyHandle,
|
||||
id: usize,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct UnparkEvents;
|
||||
|
||||
impl<'a> BorrowedEvents<'a> {
|
||||
pub fn new() -> BorrowedEvents<'a> {
|
||||
BorrowedEvents(marker::PhantomData)
|
||||
}
|
||||
|
||||
pub fn to_owned(&self) -> UnparkEvents {
|
||||
UnparkEvents
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> BorrowedUnpark<'a> {
|
||||
#[inline]
|
||||
pub fn new(f: &'a Fn() -> NotifyHandle, id: usize) -> BorrowedUnpark<'a> {
|
||||
BorrowedUnpark { f: f, id: id }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_owned(&self) -> TaskUnpark {
|
||||
let handle = (self.f)();
|
||||
let id = handle.clone_id(self.id);
|
||||
TaskUnpark { handle: handle, id: id }
|
||||
}
|
||||
}
|
||||
|
||||
impl UnparkEvents {
|
||||
pub fn notify(&self) {}
|
||||
|
||||
pub fn will_notify(&self, _other: &BorrowedEvents) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl TaskUnpark {
|
||||
pub fn notify(&self) {
|
||||
self.handle.notify(self.id);
|
||||
}
|
||||
|
||||
pub fn will_notify(&self, other: &BorrowedUnpark) -> bool {
|
||||
self.id == other.id && self.handle.inner == (other.f)().inner
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for TaskUnpark {
|
||||
fn clone(&self) -> TaskUnpark {
|
||||
let handle = self.handle.clone();
|
||||
let id = handle.clone_id(self.id);
|
||||
TaskUnpark { handle: handle, id: id }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for TaskUnpark {
|
||||
fn drop(&mut self) {
|
||||
self.handle.drop_id(self.id);
|
||||
}
|
||||
}
|
||||
|
||||
static GET: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
static SET: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
/// Initialize the `futures` task system.
|
||||
///
|
||||
/// This function is an unsafe low-level implementation detail typically only
|
||||
/// used by crates using `futures` in `no_std` context. Users of this crate
|
||||
/// who also use the standard library never need to invoke this function.
|
||||
///
|
||||
/// The task system in the `futures` crate relies on some notion of "local
|
||||
/// storage" for the running thread and/or context. The `task::current` function
|
||||
/// can get invoked in any context, for example, and needs to be able to return
|
||||
/// a `Task`. Typically with the standard library this is supported with
|
||||
/// thread-local-storage, but this is not available in `no_std` contexts!
|
||||
///
|
||||
/// This function is provided to allow `no_std` contexts to continue to be able
|
||||
/// to use the standard task system in this crate. The functions provided here
|
||||
/// will be used as-if they were thread-local-storage getters/setters. The `get`
|
||||
/// function provided is used to retrieve the current thread-local value of the
|
||||
/// task system's pointer, returning null if not initialized. The `set` function
|
||||
/// updates the value of the pointer.
|
||||
///
|
||||
/// # Return value
|
||||
///
|
||||
/// This function will return whether initialization succeeded or not. This
|
||||
/// function can be called concurrently and only the first invocation will
|
||||
/// succeed. If `false` is returned then the `get` and `set` pointers provided
|
||||
/// were *not* registered for use with the task system, but if `true` was
|
||||
/// provided then they will be called when the task system is used.
|
||||
///
|
||||
/// Note that while safe to call concurrently it's recommended to still perform
|
||||
/// external synchronization when calling this function. This task system is
|
||||
/// not guaranteed to be ready to go until a call to this function returns
|
||||
/// `true`. In other words, if you call this function and see `false`, the
|
||||
/// task system may not be ready to go as another thread may still be calling
|
||||
/// `init`.
|
||||
///
|
||||
/// # Unsafety
|
||||
///
|
||||
/// This function is unsafe due to the requirements on the behavior of the
|
||||
/// `get` and `set` functions. The pointers returned from these functions must
|
||||
/// reflect the semantics specified above and must also be thread-local,
|
||||
/// depending on the definition of a "thread" in the calling context.
|
||||
pub unsafe fn init(get: fn() -> *mut u8, set: fn(*mut u8)) -> bool {
|
||||
if GET.compare_exchange(0, get as usize, SeqCst, SeqCst).is_ok() {
|
||||
SET.store(set as usize, SeqCst);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn get_ptr() -> Option<*mut u8> {
|
||||
match GET.load(Relaxed) {
|
||||
0 => None,
|
||||
n => Some(unsafe { mem::transmute::<usize, fn() -> *mut u8>(n)() }),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "use_std")]
|
||||
#[inline]
|
||||
pub fn is_get_ptr(f: usize) -> bool {
|
||||
GET.load(Relaxed) == f
|
||||
}
|
||||
|
||||
pub fn set<'a, F, R>(task: &BorrowedTask<'a>, f: F) -> R
|
||||
where F: FnOnce() -> R
|
||||
{
|
||||
let set = match SET.load(Relaxed) {
|
||||
0 => panic!("not initialized"),
|
||||
n => unsafe { mem::transmute::<usize, fn(*mut u8)>(n) },
|
||||
};
|
||||
|
||||
struct Reset(fn(*mut u8), *mut u8);
|
||||
|
||||
impl Drop for Reset {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
(self.0)(self.1);
|
||||
}
|
||||
}
|
||||
|
||||
let _reset = Reset(set, get_ptr().unwrap());
|
||||
set(task as *const _ as *mut u8);
|
||||
f()
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -5,6 +5,8 @@ use std::cell::RefCell;
|
|||
use std::hash::{BuildHasherDefault, Hasher};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use task_impl::with;
|
||||
|
||||
/// A macro to create a `static` of type `LocalKey`
|
||||
///
|
||||
/// This macro is intentionally similar to the `thread_local!`, and creates a
|
||||
|
@ -113,7 +115,7 @@ impl<T: Send + 'static> LocalKey<T> {
|
|||
where F: FnOnce(&T) -> R
|
||||
{
|
||||
let key = (self.__key)();
|
||||
super::with(|task| {
|
||||
with(|task| {
|
||||
let raw_pointer = {
|
||||
let mut data = task.map.borrow_mut();
|
||||
let entry = data.entry(key).or_insert_with(|| {
|
|
@ -0,0 +1,730 @@
|
|||
use std::prelude::v1::*;
|
||||
|
||||
use std::cell::Cell;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
use std::ptr;
|
||||
use std::sync::{Arc, Mutex, Condvar, Once, ONCE_INIT};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use {Future, Stream, Sink, Poll, Async, StartSend, AsyncSink};
|
||||
use super::core;
|
||||
use super::{BorrowedTask, NotifyHandle, Spawn, spawn, Notify, UnsafeNotify};
|
||||
|
||||
mod unpark_mutex;
|
||||
pub use self::unpark_mutex::UnparkMutex;
|
||||
|
||||
mod data;
|
||||
pub use self::data::*;
|
||||
|
||||
mod task_rc;
|
||||
#[allow(deprecated)]
|
||||
#[cfg(feature = "with-deprecated")]
|
||||
pub use self::task_rc::TaskRc;
|
||||
|
||||
pub use task_impl::core::init;
|
||||
|
||||
thread_local!(static CURRENT_TASK: Cell<*mut u8> = Cell::new(ptr::null_mut()));
|
||||
|
||||
static INIT: Once = ONCE_INIT;
|
||||
|
||||
pub fn get_ptr() -> Option<*mut u8> {
|
||||
// Since this condition will always return true when TLS task storage is
|
||||
// used (the default), the branch predictor will be able to optimize the
|
||||
// branching and a dynamic dispatch will be avoided, which makes the
|
||||
// compiler happier.
|
||||
if core::is_get_ptr(0x1) {
|
||||
Some(CURRENT_TASK.with(|c| c.get()))
|
||||
} else {
|
||||
core::get_ptr()
|
||||
}
|
||||
}
|
||||
|
||||
fn tls_slot() -> *const Cell<*mut u8> {
|
||||
CURRENT_TASK.with(|c| c as *const _)
|
||||
}
|
||||
|
||||
pub fn set<'a, F, R>(task: &BorrowedTask<'a>, f: F) -> R
|
||||
where F: FnOnce() -> R
|
||||
{
|
||||
// Lazily initialize the get / set ptrs
|
||||
//
|
||||
// Note that we won't actually use these functions ever, we'll instead be
|
||||
// testing the pointer's value elsewhere and calling our own functions.
|
||||
INIT.call_once(|| unsafe {
|
||||
let get = mem::transmute::<usize, _>(0x1);
|
||||
let set = mem::transmute::<usize, _>(0x2);
|
||||
init(get, set);
|
||||
});
|
||||
|
||||
// Same as above.
|
||||
if core::is_get_ptr(0x1) {
|
||||
struct Reset(*const Cell<*mut u8>, *mut u8);
|
||||
|
||||
impl Drop for Reset {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
(*self.0).set(self.1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe {
|
||||
let slot = tls_slot();
|
||||
let _reset = Reset(slot, (*slot).get());
|
||||
(*slot).set(task as *const _ as *mut u8);
|
||||
f()
|
||||
}
|
||||
} else {
|
||||
core::set(task, f)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
#[allow(deprecated)]
|
||||
pub enum BorrowedUnpark<'a> {
|
||||
Old(&'a Arc<Unpark>),
|
||||
New(core::BorrowedUnpark<'a>),
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
#[allow(deprecated)]
|
||||
pub enum BorrowedEvents<'a> {
|
||||
None,
|
||||
One(&'a UnparkEvent, &'a BorrowedEvents<'a>),
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum TaskUnpark {
|
||||
#[allow(deprecated)]
|
||||
Old(Arc<Unpark>),
|
||||
New(core::TaskUnpark),
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[allow(deprecated)]
|
||||
pub enum UnparkEvents {
|
||||
None,
|
||||
One(UnparkEvent),
|
||||
Many(Box<[UnparkEvent]>),
|
||||
}
|
||||
|
||||
impl<'a> BorrowedUnpark<'a> {
|
||||
#[inline]
|
||||
pub fn new(f: &'a Fn() -> NotifyHandle, id: usize) -> BorrowedUnpark<'a> {
|
||||
BorrowedUnpark::New(core::BorrowedUnpark::new(f, id))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_owned(&self) -> TaskUnpark {
|
||||
match *self {
|
||||
BorrowedUnpark::Old(old) => TaskUnpark::Old(old.clone()),
|
||||
BorrowedUnpark::New(new) => TaskUnpark::New(new.to_owned()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> BorrowedEvents<'a> {
|
||||
#[inline]
|
||||
pub fn new() -> BorrowedEvents<'a> {
|
||||
BorrowedEvents::None
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_owned(&self) -> UnparkEvents {
|
||||
let mut one_event = None;
|
||||
let mut list = Vec::new();
|
||||
let mut cur = self;
|
||||
while let BorrowedEvents::One(event, next) = *cur {
|
||||
let event = event.clone();
|
||||
match one_event.take() {
|
||||
None if list.len() == 0 => one_event = Some(event),
|
||||
None => list.push(event),
|
||||
Some(event2) => {
|
||||
list.push(event2);
|
||||
list.push(event);
|
||||
}
|
||||
}
|
||||
cur = next;
|
||||
}
|
||||
|
||||
match one_event {
|
||||
None if list.len() == 0 => UnparkEvents::None,
|
||||
None => UnparkEvents::Many(list.into_boxed_slice()),
|
||||
Some(e) => UnparkEvents::One(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UnparkEvents {
|
||||
pub fn notify(&self) {
|
||||
match *self {
|
||||
UnparkEvents::None => {}
|
||||
UnparkEvents::One(ref e) => e.unpark(),
|
||||
UnparkEvents::Many(ref list) => {
|
||||
for event in list.iter() {
|
||||
event.unpark();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn will_notify(&self, events: &BorrowedEvents) -> bool {
|
||||
// Pessimistically assume that any unpark events mean that we're not
|
||||
// equivalent to the current task.
|
||||
match *self {
|
||||
UnparkEvents::None => {}
|
||||
_ => return false,
|
||||
}
|
||||
|
||||
match *events {
|
||||
BorrowedEvents::None => return true,
|
||||
_ => {},
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
impl TaskUnpark {
|
||||
pub fn notify(&self) {
|
||||
match *self {
|
||||
TaskUnpark::Old(ref old) => old.unpark(),
|
||||
TaskUnpark::New(ref new) => new.notify(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn will_notify(&self, unpark: &BorrowedUnpark) -> bool {
|
||||
match (unpark, self) {
|
||||
(&BorrowedUnpark::Old(old1), &TaskUnpark::Old(ref old2)) => {
|
||||
&**old1 as *const Unpark == &**old2 as *const Unpark
|
||||
}
|
||||
(&BorrowedUnpark::New(ref new1), &TaskUnpark::New(ref new2)) => {
|
||||
new2.will_notify(new1)
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Future> Spawn<F> {
|
||||
/// Polls the internal future, scheduling notifications to be sent to the
|
||||
/// `unpark` argument.
|
||||
///
|
||||
/// This method will poll the internal future, testing if it's completed
|
||||
/// yet. The `unpark` argument is used as a sink for notifications sent to
|
||||
/// this future. That is, while the future is being polled, any call to
|
||||
/// `task::park()` will return a handle that contains the `unpark`
|
||||
/// specified.
|
||||
///
|
||||
/// If this function returns `NotReady`, then the `unpark` should have been
|
||||
/// scheduled to receive a notification when poll can be called again.
|
||||
/// Otherwise if `Ready` or `Err` is returned, the `Spawn` task can be
|
||||
/// safely destroyed.
|
||||
#[deprecated(note = "recommended to use `poll_future_notify` instead")]
|
||||
#[allow(deprecated)]
|
||||
pub fn poll_future(&mut self, unpark: Arc<Unpark>) -> Poll<F::Item, F::Error> {
|
||||
self.enter(BorrowedUnpark::Old(&unpark), |f| f.poll())
|
||||
}
|
||||
|
||||
/// Waits for the internal future to complete, blocking this thread's
|
||||
/// execution until it does.
|
||||
///
|
||||
/// This function will call `poll_future` in a loop, waiting for the future
|
||||
/// to complete. When a future cannot make progress it will use
|
||||
/// `thread::park` to block the current thread.
|
||||
pub fn wait_future(&mut self) -> Result<F::Item, F::Error> {
|
||||
ThreadNotify::with_current(|notify| {
|
||||
|
||||
loop {
|
||||
match self.poll_future_notify(notify, 0)? {
|
||||
Async::NotReady => notify.park(),
|
||||
Async::Ready(e) => return Ok(e),
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// A specialized function to request running a future to completion on the
|
||||
/// specified executor.
|
||||
///
|
||||
/// This function only works for futures whose item and error types are `()`
|
||||
/// and also implement the `Send` and `'static` bounds. This will submit
|
||||
/// units of work (instances of `Run`) to the `exec` argument provided
|
||||
/// necessary to drive the future to completion.
|
||||
///
|
||||
/// When the future would block, it's arranged that when the future is again
|
||||
/// ready it will submit another unit of work to the `exec` provided. This
|
||||
/// will happen in a loop until the future has completed.
|
||||
///
|
||||
/// This method is not appropriate for all futures, and other kinds of
|
||||
/// executors typically provide a similar function with perhaps relaxed
|
||||
/// bounds as well.
|
||||
///
|
||||
/// Note that this method is likely to be deprecated in favor of the
|
||||
/// `futures::Executor` trait and `execute` method, but if this'd cause
|
||||
/// difficulty for you please let us know!
|
||||
pub fn execute(self, exec: Arc<Executor>)
|
||||
where F: Future<Item=(), Error=()> + Send + 'static,
|
||||
{
|
||||
exec.clone().execute(Run {
|
||||
// Ideally this method would be defined directly on
|
||||
// `Spawn<BoxFuture<(), ()>>` so we wouldn't have to box here and
|
||||
// it'd be more explicit, but unfortunately that currently has a
|
||||
// link error on nightly: rust-lang/rust#36155
|
||||
spawn: spawn(Box::new(self.into_inner())),
|
||||
inner: Arc::new(RunInner {
|
||||
exec: exec,
|
||||
mutex: UnparkMutex::new()
|
||||
}),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream> Spawn<S> {
|
||||
/// Like `poll_future`, except polls the underlying stream.
|
||||
#[deprecated(note = "recommended to use `poll_stream_notify` instead")]
|
||||
#[allow(deprecated)]
|
||||
pub fn poll_stream(&mut self, unpark: Arc<Unpark>)
|
||||
-> Poll<Option<S::Item>, S::Error> {
|
||||
self.enter(BorrowedUnpark::Old(&unpark), |s| s.poll())
|
||||
}
|
||||
|
||||
/// Like `wait_future`, except only waits for the next element to arrive on
|
||||
/// the underlying stream.
|
||||
pub fn wait_stream(&mut self) -> Option<Result<S::Item, S::Error>> {
|
||||
ThreadNotify::with_current(|notify| {
|
||||
|
||||
loop {
|
||||
match self.poll_stream_notify(notify, 0) {
|
||||
Ok(Async::NotReady) => notify.park(),
|
||||
Ok(Async::Ready(Some(e))) => return Some(Ok(e)),
|
||||
Ok(Async::Ready(None)) => return None,
|
||||
Err(e) => return Some(Err(e)),
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Sink> Spawn<S> {
|
||||
/// Invokes the underlying `start_send` method with this task in place.
|
||||
///
|
||||
/// If the underlying operation returns `NotReady` then the `unpark` value
|
||||
/// passed in will receive a notification when the operation is ready to be
|
||||
/// attempted again.
|
||||
#[deprecated(note = "recommended to use `start_send_notify` instead")]
|
||||
#[allow(deprecated)]
|
||||
pub fn start_send(&mut self, value: S::SinkItem, unpark: &Arc<Unpark>)
|
||||
-> StartSend<S::SinkItem, S::SinkError> {
|
||||
self.enter(BorrowedUnpark::Old(unpark), |s| s.start_send(value))
|
||||
}
|
||||
|
||||
/// Invokes the underlying `poll_complete` method with this task in place.
|
||||
///
|
||||
/// If the underlying operation returns `NotReady` then the `unpark` value
|
||||
/// passed in will receive a notification when the operation is ready to be
|
||||
/// attempted again.
|
||||
#[deprecated(note = "recommended to use `poll_flush_notify` instead")]
|
||||
#[allow(deprecated)]
|
||||
pub fn poll_flush(&mut self, unpark: &Arc<Unpark>)
|
||||
-> Poll<(), S::SinkError> {
|
||||
self.enter(BorrowedUnpark::Old(unpark), |s| s.poll_complete())
|
||||
}
|
||||
|
||||
/// Blocks the current thread until it's able to send `value` on this sink.
|
||||
///
|
||||
/// This function will send the `value` on the sink that this task wraps. If
|
||||
/// the sink is not ready to send the value yet then the current thread will
|
||||
/// be blocked until it's able to send the value.
|
||||
pub fn wait_send(&mut self, mut value: S::SinkItem)
|
||||
-> Result<(), S::SinkError> {
|
||||
ThreadNotify::with_current(|notify| {
|
||||
|
||||
loop {
|
||||
value = match self.start_send_notify(value, notify, 0)? {
|
||||
AsyncSink::NotReady(v) => v,
|
||||
AsyncSink::Ready => return Ok(()),
|
||||
};
|
||||
notify.park();
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Blocks the current thread until it's able to flush this sink.
|
||||
///
|
||||
/// This function will call the underlying sink's `poll_complete` method
|
||||
/// until it returns that it's ready, proxying out errors upwards to the
|
||||
/// caller if one occurs.
|
||||
///
|
||||
/// The thread will be blocked until `poll_complete` returns that it's
|
||||
/// ready.
|
||||
pub fn wait_flush(&mut self) -> Result<(), S::SinkError> {
|
||||
ThreadNotify::with_current(|notify| {
|
||||
|
||||
loop {
|
||||
if self.poll_flush_notify(notify, 0)?.is_ready() {
|
||||
return Ok(())
|
||||
}
|
||||
notify.park();
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Blocks the current thread until it's able to close this sink.
|
||||
///
|
||||
/// This function will close the sink that this task wraps. If the sink
|
||||
/// is not ready to be close yet, then the current thread will be blocked
|
||||
/// until it's closed.
|
||||
pub fn wait_close(&mut self) -> Result<(), S::SinkError> {
|
||||
ThreadNotify::with_current(|notify| {
|
||||
|
||||
loop {
|
||||
if self.close_notify(notify, 0)?.is_ready() {
|
||||
return Ok(())
|
||||
}
|
||||
notify.park();
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait which represents a sink of notifications that a future is ready to
|
||||
/// make progress.
|
||||
///
|
||||
/// This trait is provided as an argument to the `Spawn::poll_future` and
|
||||
/// `Spawn::poll_stream` functions. It's transitively used as part of the
|
||||
/// `Task::unpark` method to internally deliver notifications of readiness of a
|
||||
/// future to move forward.
|
||||
#[deprecated(note = "recommended to use `Notify` instead")]
|
||||
pub trait Unpark: Send + Sync {
|
||||
/// Indicates that an associated future and/or task are ready to make
|
||||
/// progress.
|
||||
///
|
||||
/// Typically this means that the receiver of the notification should
|
||||
/// arrange for the future to get poll'd in a prompt fashion.
|
||||
fn unpark(&self);
|
||||
}
|
||||
|
||||
/// A trait representing requests to poll futures.
|
||||
///
|
||||
/// This trait is an argument to the `Spawn::execute` which is used to run a
|
||||
/// future to completion. An executor will receive requests to run a future and
|
||||
/// an executor is responsible for ensuring that happens in a timely fashion.
|
||||
///
|
||||
/// Note that this trait is likely to be deprecated and/or renamed to avoid
|
||||
/// clashing with the `future::Executor` trait. If you've got a use case for
|
||||
/// this or would like to comment on the name please let us know!
|
||||
pub trait Executor: Send + Sync + 'static {
|
||||
/// Requests that `Run` is executed soon on the given executor.
|
||||
fn execute(&self, r: Run);
|
||||
}
|
||||
|
||||
/// Units of work submitted to an `Executor`, currently only created
|
||||
/// internally.
|
||||
pub struct Run {
|
||||
spawn: Spawn<Box<Future<Item = (), Error = ()> + Send>>,
|
||||
inner: Arc<RunInner>,
|
||||
}
|
||||
|
||||
struct RunInner {
|
||||
mutex: UnparkMutex<Run>,
|
||||
exec: Arc<Executor>,
|
||||
}
|
||||
|
||||
impl Run {
|
||||
/// Actually run the task (invoking `poll` on its future) on the current
|
||||
/// thread.
|
||||
pub fn run(self) {
|
||||
let Run { mut spawn, inner } = self;
|
||||
|
||||
// SAFETY: the ownership of this `Run` object is evidence that
|
||||
// we are in the `POLLING`/`REPOLL` state for the mutex.
|
||||
unsafe {
|
||||
inner.mutex.start_poll();
|
||||
|
||||
loop {
|
||||
match spawn.poll_future_notify(&inner, 0) {
|
||||
Ok(Async::NotReady) => {}
|
||||
Ok(Async::Ready(())) |
|
||||
Err(()) => return inner.mutex.complete(),
|
||||
}
|
||||
let run = Run { spawn: spawn, inner: inner.clone() };
|
||||
match inner.mutex.wait(run) {
|
||||
Ok(()) => return, // we've waited
|
||||
Err(r) => spawn = r.spawn, // someone's notified us
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Run {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Run")
|
||||
.field("contents", &"...")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl Notify for RunInner {
|
||||
fn notify(&self, _id: usize) {
|
||||
match self.mutex.notify() {
|
||||
Ok(run) => self.exec.execute(run),
|
||||
Err(()) => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ===== ThreadNotify =====
|
||||
|
||||
struct ThreadNotify {
|
||||
state: AtomicUsize,
|
||||
mutex: Mutex<()>,
|
||||
condvar: Condvar,
|
||||
}
|
||||
|
||||
const IDLE: usize = 0;
|
||||
const NOTIFY: usize = 1;
|
||||
const SLEEP: usize = 2;
|
||||
|
||||
thread_local! {
|
||||
static CURRENT_THREAD_NOTIFY: Arc<ThreadNotify> = Arc::new(ThreadNotify {
|
||||
state: AtomicUsize::new(IDLE),
|
||||
mutex: Mutex::new(()),
|
||||
condvar: Condvar::new(),
|
||||
});
|
||||
}
|
||||
|
||||
impl ThreadNotify {
|
||||
fn with_current<F, R>(f: F) -> R
|
||||
where F: FnOnce(&Arc<ThreadNotify>) -> R,
|
||||
{
|
||||
CURRENT_THREAD_NOTIFY.with(|notify| f(notify))
|
||||
}
|
||||
|
||||
fn park(&self) {
|
||||
// If currently notified, then we skip sleeping. This is checked outside
|
||||
// of the lock to avoid acquiring a mutex if not necessary.
|
||||
match self.state.compare_and_swap(NOTIFY, IDLE, Ordering::SeqCst) {
|
||||
NOTIFY => return,
|
||||
IDLE => {},
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
// The state is currently idle, so obtain the lock and then try to
|
||||
// transition to a sleeping state.
|
||||
let mut m = self.mutex.lock().unwrap();
|
||||
|
||||
// Transition to sleeping
|
||||
match self.state.compare_and_swap(IDLE, SLEEP, Ordering::SeqCst) {
|
||||
NOTIFY => {
|
||||
// Notified before we could sleep, consume the notification and
|
||||
// exit
|
||||
self.state.store(IDLE, Ordering::SeqCst);
|
||||
return;
|
||||
}
|
||||
IDLE => {},
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
// Loop until we've been notified
|
||||
loop {
|
||||
m = self.condvar.wait(m).unwrap();
|
||||
|
||||
// Transition back to idle, loop otherwise
|
||||
if NOTIFY == self.state.compare_and_swap(NOTIFY, IDLE, Ordering::SeqCst) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Notify for ThreadNotify {
|
||||
fn notify(&self, _unpark_id: usize) {
|
||||
// First, try transitioning from IDLE -> NOTIFY, this does not require a
|
||||
// lock.
|
||||
match self.state.compare_and_swap(IDLE, NOTIFY, Ordering::SeqCst) {
|
||||
IDLE | NOTIFY => return,
|
||||
SLEEP => {}
|
||||
_ => unreachable!(),
|
||||
}
|
||||
|
||||
// The other half is sleeping, this requires a lock
|
||||
let _m = self.mutex.lock().unwrap();
|
||||
|
||||
// Transition from SLEEP -> NOTIFY
|
||||
match self.state.compare_and_swap(SLEEP, NOTIFY, Ordering::SeqCst) {
|
||||
SLEEP => {}
|
||||
_ => return,
|
||||
}
|
||||
|
||||
// Wakeup the sleeper
|
||||
self.condvar.notify_one();
|
||||
}
|
||||
}
|
||||
|
||||
// ===== UnparkEvent =====
|
||||
|
||||
/// For the duration of the given callback, add an "unpark event" to be
|
||||
/// triggered when the task handle is used to unpark the task.
|
||||
///
|
||||
/// Unpark events are used to pass information about what event caused a task to
|
||||
/// be unparked. In some cases, tasks are waiting on a large number of possible
|
||||
/// events, and need precise information about the wakeup to avoid extraneous
|
||||
/// polling.
|
||||
///
|
||||
/// Every `Task` handle comes with a set of unpark events which will fire when
|
||||
/// `unpark` is called. When fired, these events insert an identifier into a
|
||||
/// concurrent set, which the task can read from to determine what events
|
||||
/// occurred.
|
||||
///
|
||||
/// This function immediately invokes the closure, `f`, but arranges things so
|
||||
/// that `task::park` will produce a `Task` handle that includes the given
|
||||
/// unpark event.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if a task is not currently being executed. That
|
||||
/// is, this method can be dangerous to call outside of an implementation of
|
||||
/// `poll`.
|
||||
#[deprecated(note = "recommended to use `FuturesUnordered` instead")]
|
||||
#[allow(deprecated)]
|
||||
pub fn with_unpark_event<F, R>(event: UnparkEvent, f: F) -> R
|
||||
where F: FnOnce() -> R
|
||||
{
|
||||
super::with(|task| {
|
||||
let new_task = BorrowedTask {
|
||||
id: task.id,
|
||||
unpark: task.unpark,
|
||||
events: BorrowedEvents::One(&event, &task.events),
|
||||
map: task.map,
|
||||
};
|
||||
|
||||
super::set(&new_task, f)
|
||||
})
|
||||
}
|
||||
|
||||
/// A set insertion to trigger upon `unpark`.
|
||||
///
|
||||
/// Unpark events are used to communicate information about *why* an unpark
|
||||
/// occurred, in particular populating sets with event identifiers so that the
|
||||
/// unparked task can avoid extraneous polling. See `with_unpark_event` for
|
||||
/// more.
|
||||
#[derive(Clone)]
|
||||
#[deprecated(note = "recommended to use `FuturesUnordered` instead")]
|
||||
#[allow(deprecated)]
|
||||
pub struct UnparkEvent {
|
||||
set: Arc<EventSet>,
|
||||
item: usize,
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
impl UnparkEvent {
|
||||
/// Construct an unpark event that will insert `id` into `set` when
|
||||
/// triggered.
|
||||
#[deprecated(note = "recommended to use `FuturesUnordered` instead")]
|
||||
pub fn new(set: Arc<EventSet>, id: usize) -> UnparkEvent {
|
||||
UnparkEvent {
|
||||
set: set,
|
||||
item: id,
|
||||
}
|
||||
}
|
||||
|
||||
fn unpark(&self) {
|
||||
self.set.insert(self.item);
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
impl fmt::Debug for UnparkEvent {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("UnparkEvent")
|
||||
.field("set", &"...")
|
||||
.field("item", &self.item)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// A concurrent set which allows for the insertion of `usize` values.
|
||||
///
|
||||
/// `EventSet`s are used to communicate precise information about the event(s)
|
||||
/// that triggered a task notification. See `task::with_unpark_event` for details.
|
||||
#[deprecated(since="0.1.18", note = "recommended to use `FuturesUnordered` instead")]
|
||||
pub trait EventSet: Send + Sync + 'static {
|
||||
/// Insert the given ID into the set
|
||||
fn insert(&self, id: usize);
|
||||
}
|
||||
|
||||
// Safe implementation of `UnsafeNotify` for `Arc` in the standard library.
|
||||
//
|
||||
// Note that this is a very unsafe implementation! The crucial pieces is that
|
||||
// these two values are considered equivalent:
|
||||
//
|
||||
// * Arc<T>
|
||||
// * *const ArcWrapped<T>
|
||||
//
|
||||
// We don't actually know the layout of `ArcWrapped<T>` as it's an
|
||||
// implementation detail in the standard library. We can work, though, by
|
||||
// casting it through and back an `Arc<T>`.
|
||||
//
|
||||
// This also means that you won't actually fine `UnsafeNotify for Arc<T>`
|
||||
// because it's the wrong level of indirection. These methods are sort of
|
||||
// receiving Arc<T>, but not an owned version. It's... complicated. We may be
|
||||
// one of the first users of unsafe trait objects!
|
||||
|
||||
struct ArcWrapped<T>(PhantomData<T>);
|
||||
|
||||
impl<T: Notify + 'static> Notify for ArcWrapped<T> {
|
||||
fn notify(&self, id: usize) {
|
||||
unsafe {
|
||||
let me: *const ArcWrapped<T> = self;
|
||||
T::notify(&*(&me as *const *const ArcWrapped<T> as *const Arc<T>),
|
||||
id)
|
||||
}
|
||||
}
|
||||
|
||||
fn clone_id(&self, id: usize) -> usize {
|
||||
unsafe {
|
||||
let me: *const ArcWrapped<T> = self;
|
||||
T::clone_id(&*(&me as *const *const ArcWrapped<T> as *const Arc<T>),
|
||||
id)
|
||||
}
|
||||
}
|
||||
|
||||
fn drop_id(&self, id: usize) {
|
||||
unsafe {
|
||||
let me: *const ArcWrapped<T> = self;
|
||||
T::drop_id(&*(&me as *const *const ArcWrapped<T> as *const Arc<T>),
|
||||
id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Notify + 'static> UnsafeNotify for ArcWrapped<T> {
|
||||
unsafe fn clone_raw(&self) -> NotifyHandle {
|
||||
let me: *const ArcWrapped<T> = self;
|
||||
let arc = (*(&me as *const *const ArcWrapped<T> as *const Arc<T>)).clone();
|
||||
NotifyHandle::from(arc)
|
||||
}
|
||||
|
||||
unsafe fn drop_raw(&self) {
|
||||
let mut me: *const ArcWrapped<T> = self;
|
||||
let me = &mut me as *mut *const ArcWrapped<T> as *mut Arc<T>;
|
||||
ptr::drop_in_place(me);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<Arc<T>> for NotifyHandle
|
||||
where T: Notify + 'static,
|
||||
{
|
||||
fn from(rc: Arc<T>) -> NotifyHandle {
|
||||
unsafe {
|
||||
let ptr = mem::transmute::<Arc<T>, *mut ArcWrapped<T>>(rc);
|
||||
NotifyHandle::new(ptr)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -7,12 +7,13 @@
|
|||
use std::prelude::v1::*;
|
||||
use std::sync::Arc;
|
||||
use std::cell::UnsafeCell;
|
||||
use task_impl;
|
||||
|
||||
// One critical piece of this module's contents are the `TaskRc<A>` handles.
|
||||
// The purpose of this is to conceptually be able to store data in a task,
|
||||
// allowing it to be accessed within multiple futures at once. For example if
|
||||
// you have some concurrent futures working, they may all want mutable access to
|
||||
// some data. We already know that when the futures are being poll'ed that we're
|
||||
// some data. We already know that when the futures are being poll'd that we're
|
||||
// entirely synchronized (aka `&mut Task`), so you shouldn't require an
|
||||
// `Arc<Mutex<T>>` to share as the synchronization isn't necessary!
|
||||
//
|
||||
|
@ -63,7 +64,7 @@ use std::cell::UnsafeCell;
|
|||
/// change over time, if the task migrates, so `A` must be `Send`.
|
||||
#[derive(Debug)]
|
||||
pub struct TaskRc<A> {
|
||||
task_id: usize,
|
||||
task: task_impl::Task,
|
||||
ptr: Arc<UnsafeCell<A>>,
|
||||
}
|
||||
|
||||
|
@ -89,12 +90,10 @@ impl<A> TaskRc<A> {
|
|||
///
|
||||
/// This function will panic if a task is not currently running.
|
||||
pub fn new(a: A) -> TaskRc<A> {
|
||||
super::with(|task| {
|
||||
TaskRc {
|
||||
task_id: task.id,
|
||||
ptr: Arc::new(UnsafeCell::new(a)),
|
||||
}
|
||||
})
|
||||
TaskRc {
|
||||
task: task_impl::park(),
|
||||
ptr: Arc::new(UnsafeCell::new(a)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Operate with a reference to the underlying data.
|
||||
|
@ -112,19 +111,18 @@ impl<A> TaskRc<A> {
|
|||
pub fn with<F, R>(&self, f: F) -> R
|
||||
where F: FnOnce(&A) -> R
|
||||
{
|
||||
// for safety here, see docs at the top of this module
|
||||
super::with(|task| {
|
||||
assert!(self.task_id == task.id,
|
||||
"TaskRc being accessed on task it does not belong to");
|
||||
f(unsafe { &*self.ptr.get() })
|
||||
})
|
||||
if !self.task.is_current() {
|
||||
panic!("TaskRc being accessed on task it does not belong to");
|
||||
}
|
||||
|
||||
f(unsafe { &*self.ptr.get() })
|
||||
}
|
||||
}
|
||||
|
||||
impl<A> Clone for TaskRc<A> {
|
||||
fn clone(&self) -> TaskRc<A> {
|
||||
TaskRc {
|
||||
task_id: self.task_id,
|
||||
task: self.task.clone(),
|
||||
ptr: self.ptr.clone(),
|
||||
}
|
||||
}
|
|
@ -16,7 +16,7 @@ pub struct UnparkMutex<D> {
|
|||
}
|
||||
|
||||
// `UnparkMutex<D>` functions in many ways like a `Mutex<D>`, except that on
|
||||
// acquisition failure, the current lockholder performs the desired work --
|
||||
// acquisition failure, the current lock holder performs the desired work --
|
||||
// re-polling.
|
||||
//
|
||||
// As such, these impls mirror those for `Mutex<D>`. In particular, a reference
|
|
@ -13,7 +13,11 @@ use std::mem;
|
|||
use std::rc::{Rc, Weak};
|
||||
|
||||
use task::{self, Task};
|
||||
use {Async, AsyncSink, Poll, StartSend, Sink, Stream};
|
||||
use future::Executor;
|
||||
use sink::SendAll;
|
||||
use resultstream::{self, Results};
|
||||
use unsync::oneshot;
|
||||
use {Async, AsyncSink, Future, Poll, StartSend, Sink, Stream};
|
||||
|
||||
/// Creates a bounded in-memory channel with buffered storage.
|
||||
///
|
||||
|
@ -31,7 +35,6 @@ fn channel_<T>(buffer: Option<usize>) -> (Sender<T>, Receiver<T>) {
|
|||
capacity: buffer,
|
||||
blocked_senders: VecDeque::new(),
|
||||
blocked_recv: None,
|
||||
sender_count: 1,
|
||||
}));
|
||||
let sender = Sender { shared: Rc::downgrade(&shared) };
|
||||
let receiver = Receiver { state: State::Open(shared) };
|
||||
|
@ -44,8 +47,6 @@ struct Shared<T> {
|
|||
capacity: Option<usize>,
|
||||
blocked_senders: VecDeque<Task>,
|
||||
blocked_recv: Option<Task>,
|
||||
// TODO: Redundant to Rc::weak_count; use that if/when stabilized
|
||||
sender_count: usize,
|
||||
}
|
||||
|
||||
/// The transmission end of a channel.
|
||||
|
@ -60,20 +61,19 @@ impl<T> Sender<T> {
|
|||
fn do_send(&self, msg: T) -> StartSend<T, SendError<T>> {
|
||||
let shared = match self.shared.upgrade() {
|
||||
Some(shared) => shared,
|
||||
None => return Err(SendError(msg)),
|
||||
None => return Err(SendError(msg)), // receiver was dropped
|
||||
};
|
||||
let mut shared = shared.borrow_mut();
|
||||
|
||||
match shared.capacity {
|
||||
Some(capacity) if shared.buffer.len() == capacity => {
|
||||
shared.blocked_senders.push_back(task::park());
|
||||
shared.blocked_senders.push_back(task::current());
|
||||
Ok(AsyncSink::NotReady(msg))
|
||||
}
|
||||
_ => {
|
||||
shared.buffer.push_back(msg);
|
||||
if let Some(task) = shared.blocked_recv.take() {
|
||||
drop(shared);
|
||||
task.unpark();
|
||||
task.notify();
|
||||
}
|
||||
Ok(AsyncSink::Ready)
|
||||
}
|
||||
|
@ -83,11 +83,7 @@ impl<T> Sender<T> {
|
|||
|
||||
impl<T> Clone for Sender<T> {
|
||||
fn clone(&self) -> Self {
|
||||
let result = Sender { shared: self.shared.clone() };
|
||||
if let Some(shared) = self.shared.upgrade() {
|
||||
shared.borrow_mut().sender_count += 1;
|
||||
}
|
||||
result
|
||||
Sender { shared: self.shared.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -114,13 +110,10 @@ impl<T> Drop for Sender<T> {
|
|||
Some(shared) => shared,
|
||||
None => return,
|
||||
};
|
||||
let mut shared = shared.borrow_mut();
|
||||
shared.sender_count -= 1;
|
||||
if shared.sender_count == 0 {
|
||||
if let Some(task) = shared.blocked_recv.take() {
|
||||
if Rc::weak_count(&shared) == 0 {
|
||||
if let Some(task) = shared.borrow_mut().blocked_recv.take() {
|
||||
// Wake up receiver as its stream has ended
|
||||
drop(shared);
|
||||
task.unpark();
|
||||
task.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -159,7 +152,7 @@ impl<T> Receiver<T> {
|
|||
};
|
||||
self.state = State::Closed(items);
|
||||
for task in blockers {
|
||||
task.unpark();
|
||||
task.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -186,11 +179,11 @@ impl<T> Stream for Receiver<T> {
|
|||
if let Some(msg) = shared.buffer.pop_front() {
|
||||
if let Some(task) = shared.blocked_senders.pop_front() {
|
||||
drop(shared);
|
||||
task.unpark();
|
||||
task.notify();
|
||||
}
|
||||
Ok(Async::Ready(Some(msg)))
|
||||
} else {
|
||||
shared.blocked_recv = Some(task::park());
|
||||
shared.blocked_recv = Some(task::current());
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
|
@ -252,7 +245,18 @@ impl<T> UnboundedSender<T> {
|
|||
/// This is an unbounded sender, so this function differs from `Sink::send`
|
||||
/// by ensuring the return type reflects that the channel is always ready to
|
||||
/// receive messages.
|
||||
#[deprecated(note = "renamed to `unbounded_send`")]
|
||||
#[doc(hidden)]
|
||||
pub fn send(&self, msg: T) -> Result<(), SendError<T>> {
|
||||
self.unbounded_send(msg)
|
||||
}
|
||||
|
||||
/// Sends the provided message along this channel.
|
||||
///
|
||||
/// This is an unbounded sender, so this function differs from `Sink::send`
|
||||
/// by ensuring the return type reflects that the channel is always ready to
|
||||
/// receive messages.
|
||||
pub fn unbounded_send(&self, msg: T) -> Result<(), SendError<T>> {
|
||||
let shared = match self.0.shared.upgrade() {
|
||||
Some(shared) => shared,
|
||||
None => return Err(SendError(msg)),
|
||||
|
@ -261,7 +265,7 @@ impl<T> UnboundedSender<T> {
|
|||
shared.buffer.push_back(msg);
|
||||
if let Some(task) = shared.blocked_recv.take() {
|
||||
drop(shared);
|
||||
task.unpark();
|
||||
task.notify();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -330,3 +334,137 @@ impl<T> SendError<T> {
|
|||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle returned from the `spawn` function.
|
||||
///
|
||||
/// This handle is a stream that proxies a stream on a separate `Executor`.
|
||||
/// Created through the `mpsc::spawn` function, this handle will produce
|
||||
/// the same values as the proxied stream, as they are produced in the executor,
|
||||
/// and uses a limited buffer to exert back-pressure on the remote stream.
|
||||
///
|
||||
/// If this handle is dropped, then the stream will no longer be polled and is
|
||||
/// scheduled to be dropped.
|
||||
pub struct SpawnHandle<Item, Error> {
|
||||
inner: Receiver<Result<Item, Error>>,
|
||||
_cancel_tx: oneshot::Sender<()>,
|
||||
}
|
||||
|
||||
/// Type of future which `Executor` instances must be able to execute for `spawn`.
|
||||
pub struct Execute<S: Stream> {
|
||||
inner: SendAll<Sender<Result<S::Item, S::Error>>, Results<S, SendError<Result<S::Item, S::Error>>>>,
|
||||
cancel_rx: oneshot::Receiver<()>,
|
||||
}
|
||||
|
||||
/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
|
||||
/// returning a handle representing the remote stream.
|
||||
///
|
||||
/// The `stream` will be canceled if the `SpawnHandle` is dropped.
|
||||
///
|
||||
/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
|
||||
/// When `stream` has additional items available, then the `SpawnHandle`
|
||||
/// will have those same items available.
|
||||
///
|
||||
/// At most `buffer + 1` elements will be buffered at a time. If the buffer
|
||||
/// is full, then `stream` will stop progressing until more space is available.
|
||||
/// This allows the `SpawnHandle` to exert backpressure on the `stream`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if `executor` is unable spawn a `Future` containing
|
||||
/// the entirety of the `stream`.
|
||||
pub fn spawn<S, E>(stream: S, executor: &E, buffer: usize) -> SpawnHandle<S::Item, S::Error>
|
||||
where S: Stream,
|
||||
E: Executor<Execute<S>>
|
||||
{
|
||||
let (cancel_tx, cancel_rx) = oneshot::channel();
|
||||
let (tx, rx) = channel(buffer);
|
||||
executor.execute(Execute {
|
||||
inner: tx.send_all(resultstream::new(stream)),
|
||||
cancel_rx: cancel_rx,
|
||||
}).expect("failed to spawn stream");
|
||||
SpawnHandle {
|
||||
inner: rx,
|
||||
_cancel_tx: cancel_tx,
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawns a `stream` onto the instance of `Executor` provided, `executor`,
|
||||
/// returning a handle representing the remote stream, with unbounded buffering.
|
||||
///
|
||||
/// The `stream` will be canceled if the `SpawnHandle` is dropped.
|
||||
///
|
||||
/// The `SpawnHandle` returned is a stream that is a proxy for `stream` itself.
|
||||
/// When `stream` has additional items available, then the `SpawnHandle`
|
||||
/// will have those same items available.
|
||||
///
|
||||
/// An unbounded buffer is used, which means that values will be buffered as
|
||||
/// fast as `stream` can produce them, without any backpressure. Therefore, if
|
||||
/// `stream` is an infinite stream, it can use an unbounded amount of memory, and
|
||||
/// potentially hog CPU resources. In particular, if `stream` is infinite
|
||||
/// and doesn't ever yield (by returning `Async::NotReady` from `poll`), it
|
||||
/// will result in an infinite loop.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if `executor` is unable spawn a `Future` containing
|
||||
/// the entirety of the `stream`.
|
||||
pub fn spawn_unbounded<S,E>(stream: S, executor: &E) -> SpawnHandle<S::Item, S::Error>
|
||||
where S: Stream,
|
||||
E: Executor<Execute<S>>
|
||||
{
|
||||
let (cancel_tx, cancel_rx) = oneshot::channel();
|
||||
let (tx, rx) = channel_(None);
|
||||
executor.execute(Execute {
|
||||
inner: tx.send_all(resultstream::new(stream)),
|
||||
cancel_rx: cancel_rx,
|
||||
}).expect("failed to spawn stream");
|
||||
SpawnHandle {
|
||||
inner: rx,
|
||||
_cancel_tx: cancel_tx,
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, E> Stream for SpawnHandle<I, E> {
|
||||
type Item = I;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<Option<I>, E> {
|
||||
match self.inner.poll() {
|
||||
Ok(Async::Ready(Some(Ok(t)))) => Ok(Async::Ready(Some(t.into()))),
|
||||
Ok(Async::Ready(Some(Err(e)))) => Err(e),
|
||||
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
Err(_) => unreachable!("mpsc::Receiver should never return Err"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<I, E> fmt::Debug for SpawnHandle<I, E> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("SpawnHandle")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream> Future for Execute<S> {
|
||||
type Item = ();
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<(), ()> {
|
||||
match self.cancel_rx.poll() {
|
||||
Ok(Async::NotReady) => (),
|
||||
_ => return Ok(Async::Ready(())),
|
||||
}
|
||||
match self.inner.poll() {
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
_ => Ok(Async::Ready(()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: Stream> fmt::Debug for Execute<S> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Execute")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,10 +3,12 @@
|
|||
//! This channel is similar to that in `sync::oneshot` but cannot be sent across
|
||||
//! threads.
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::cell::{Cell, RefCell};
|
||||
use std::fmt;
|
||||
use std::rc::{Rc, Weak};
|
||||
|
||||
use {Future, Poll, Async};
|
||||
use future::{Executor, IntoFuture, Lazy, lazy};
|
||||
use task::{self, Task};
|
||||
|
||||
/// Creates a new futures-aware, one-shot channel.
|
||||
|
@ -57,9 +59,7 @@ enum State<T> {
|
|||
Closed(Option<T>),
|
||||
}
|
||||
|
||||
/// Represents that the `Sender` dropped before sending a message.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Canceled;
|
||||
pub use sync::oneshot::Canceled;
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Inner<T> {
|
||||
|
@ -96,7 +96,7 @@ impl<T> Sender<T> {
|
|||
/// from `complete`.
|
||||
///
|
||||
/// Like `Future::poll`, this function will panic if it's not called from
|
||||
/// within the context of a task. In otherwords, this should only ever be
|
||||
/// within the context of a task. In other words, this should only ever be
|
||||
/// called from inside another future.
|
||||
///
|
||||
/// If `Ready` is returned then it means that the `Receiver` has disappeared
|
||||
|
@ -110,12 +110,29 @@ impl<T> Sender<T> {
|
|||
pub fn poll_cancel(&mut self) -> Poll<(), ()> {
|
||||
match self.inner.upgrade() {
|
||||
Some(inner) => {
|
||||
inner.borrow_mut().tx_task = Some(task::park());
|
||||
inner.borrow_mut().tx_task = Some(task::current());
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
None => Ok(().into()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Tests to see whether this `Sender`'s corresponding `Receiver`
|
||||
/// has gone away.
|
||||
///
|
||||
/// This function can be used to learn about when the `Receiver` (consumer)
|
||||
/// half has gone away and nothing will be able to receive a message sent
|
||||
/// from `send`.
|
||||
///
|
||||
/// Note that this function is intended to *not* be used in the context of a
|
||||
/// future. If you're implementing a future you probably want to call the
|
||||
/// `poll_cancel` function which will block the current task if the
|
||||
/// cancellation hasn't happened yet. This can be useful when working on a
|
||||
/// non-futures related thread, though, which would otherwise panic if
|
||||
/// `poll_cancel` were called.
|
||||
pub fn is_canceled(&self) -> bool {
|
||||
!self.inner.upgrade().is_some()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Sender<T> {
|
||||
|
@ -130,7 +147,7 @@ impl<T> Drop for Sender<T> {
|
|||
borrow.rx_task.take()
|
||||
};
|
||||
if let Some(task) = rx_task {
|
||||
task.unpark();
|
||||
task.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -153,7 +170,7 @@ impl<T> Receiver<T> {
|
|||
};
|
||||
self.state = State::Closed(item);
|
||||
if let Some(task) = task {
|
||||
task.unpark();
|
||||
task.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -184,7 +201,7 @@ impl<T> Future for Receiver<T> {
|
|||
if Rc::get_mut(inner).is_some() {
|
||||
Err(Canceled)
|
||||
} else {
|
||||
inner.borrow_mut().rx_task = Some(task::park());
|
||||
inner.borrow_mut().rx_task = Some(task::current());
|
||||
Ok(Async::NotReady)
|
||||
}
|
||||
}
|
||||
|
@ -195,3 +212,140 @@ impl<T> Drop for Receiver<T> {
|
|||
self.close();
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle returned from the `spawn` function.
|
||||
///
|
||||
/// This handle is a future representing the completion of a different future on
|
||||
/// a separate executor. Created through the `oneshot::spawn` function this
|
||||
/// handle will resolve when the future provided to `spawn` resolves on the
|
||||
/// `Executor` instance provided to that function.
|
||||
///
|
||||
/// If this handle is dropped then the future will automatically no longer be
|
||||
/// polled and is scheduled to be dropped. This can be canceled with the
|
||||
/// `forget` function, however.
|
||||
pub struct SpawnHandle<T, E> {
|
||||
rx: Receiver<Result<T, E>>,
|
||||
keep_running: Rc<Cell<bool>>,
|
||||
}
|
||||
|
||||
/// Type of future which `Spawn` instances below must be able to spawn.
|
||||
pub struct Execute<F: Future> {
|
||||
future: F,
|
||||
tx: Option<Sender<Result<F::Item, F::Error>>>,
|
||||
keep_running: Rc<Cell<bool>>,
|
||||
}
|
||||
|
||||
/// Spawns a `future` onto the instance of `Executor` provided, `executor`,
|
||||
/// returning a handle representing the completion of the future.
|
||||
///
|
||||
/// The `SpawnHandle` returned is a future that is a proxy for `future` itself.
|
||||
/// When `future` completes on `executor` then the `SpawnHandle` will itself be
|
||||
/// resolved. Internally `SpawnHandle` contains a `oneshot` channel and is
|
||||
/// thus not safe to send across threads.
|
||||
///
|
||||
/// The `future` will be canceled if the `SpawnHandle` is dropped. If this is
|
||||
/// not desired then the `SpawnHandle::forget` function can be used to continue
|
||||
/// running the future to completion.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function will panic if the instance of `Spawn` provided is unable to
|
||||
/// spawn the `future` provided.
|
||||
///
|
||||
/// If the provided instance of `Spawn` does not actually run `future` to
|
||||
/// completion, then the returned handle may panic when polled. Typically this
|
||||
/// is not a problem, though, as most instances of `Spawn` will run futures to
|
||||
/// completion.
|
||||
pub fn spawn<F, E>(future: F, executor: &E) -> SpawnHandle<F::Item, F::Error>
|
||||
where F: Future,
|
||||
E: Executor<Execute<F>>,
|
||||
{
|
||||
let flag = Rc::new(Cell::new(false));
|
||||
let (tx, rx) = channel();
|
||||
executor.execute(Execute {
|
||||
future: future,
|
||||
tx: Some(tx),
|
||||
keep_running: flag.clone(),
|
||||
}).expect("failed to spawn future");
|
||||
SpawnHandle {
|
||||
rx: rx,
|
||||
keep_running: flag,
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawns a function `f` onto the `Spawn` instance provided `s`.
|
||||
///
|
||||
/// For more information see the `spawn` function in this module. This function
|
||||
/// is just a thin wrapper around `spawn` which will execute the closure on the
|
||||
/// executor provided and then complete the future that the closure returns.
|
||||
pub fn spawn_fn<F, R, E>(f: F, executor: &E) -> SpawnHandle<R::Item, R::Error>
|
||||
where F: FnOnce() -> R,
|
||||
R: IntoFuture,
|
||||
E: Executor<Execute<Lazy<F, R>>>,
|
||||
{
|
||||
spawn(lazy(f), executor)
|
||||
}
|
||||
|
||||
impl<T, E> SpawnHandle<T, E> {
|
||||
/// Drop this future without canceling the underlying future.
|
||||
///
|
||||
/// When `SpawnHandle` is dropped, the spawned future will be canceled as
|
||||
/// well if the future hasn't already resolved. This function can be used
|
||||
/// when to drop this future but keep executing the underlying future.
|
||||
pub fn forget(self) {
|
||||
self.keep_running.set(true);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, E> Future for SpawnHandle<T, E> {
|
||||
type Item = T;
|
||||
type Error = E;
|
||||
|
||||
fn poll(&mut self) -> Poll<T, E> {
|
||||
match self.rx.poll() {
|
||||
Ok(Async::Ready(Ok(t))) => Ok(t.into()),
|
||||
Ok(Async::Ready(Err(e))) => Err(e),
|
||||
Ok(Async::NotReady) => Ok(Async::NotReady),
|
||||
Err(_) => panic!("future was canceled before completion"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: fmt::Debug, E: fmt::Debug> fmt::Debug for SpawnHandle<T, E> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("SpawnHandle")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Future> Future for Execute<F> {
|
||||
type Item = ();
|
||||
type Error = ();
|
||||
|
||||
fn poll(&mut self) -> Poll<(), ()> {
|
||||
// If we're canceled then we may want to bail out early.
|
||||
//
|
||||
// If the `forget` function was called, though, then we keep going.
|
||||
if self.tx.as_mut().unwrap().poll_cancel().unwrap().is_ready() {
|
||||
if !self.keep_running.get() {
|
||||
return Ok(().into())
|
||||
}
|
||||
}
|
||||
|
||||
let result = match self.future.poll() {
|
||||
Ok(Async::NotReady) => return Ok(Async::NotReady),
|
||||
Ok(Async::Ready(t)) => Ok(t),
|
||||
Err(e) => Err(e),
|
||||
};
|
||||
drop(self.tx.take().unwrap().send(result));
|
||||
Ok(().into())
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Future + fmt::Debug> fmt::Debug for Execute<F> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Execute")
|
||||
.field("future", &self.future)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,10 +27,10 @@ fn result_smoke() {
|
|||
|
||||
is_future_v::<i32, u32, _>(f_ok(1).map(|a| a + 1));
|
||||
is_future_v::<i32, u32, _>(f_ok(1).map_err(|a| a + 1));
|
||||
is_future_v::<i32, u32, _>(f_ok(1).and_then(|a| Ok(a)));
|
||||
is_future_v::<i32, u32, _>(f_ok(1).or_else(|a| Err(a)));
|
||||
is_future_v::<i32, u32, _>(f_ok(1).and_then(Ok));
|
||||
is_future_v::<i32, u32, _>(f_ok(1).or_else(Err));
|
||||
is_future_v::<(i32, i32), u32, _>(f_ok(1).join(Err(3)));
|
||||
is_future_v::<i32, u32, _>(f_ok(1).map(move |a| f_ok(a)).flatten());
|
||||
is_future_v::<i32, u32, _>(f_ok(1).map(f_ok).flatten());
|
||||
|
||||
assert_done(|| f_ok(1), r_ok(1));
|
||||
assert_done(|| f_err(1), r_err(1));
|
||||
|
@ -127,7 +127,7 @@ fn smoke_oneshot() {
|
|||
|
||||
let (c, p) = oneshot::channel::<i32>();
|
||||
drop(c);
|
||||
let res = executor::spawn(p).poll_future(unpark_panic());
|
||||
let res = executor::spawn(p).poll_future_notify(¬ify_panic(), 0);
|
||||
assert!(res.is_err());
|
||||
let (c, p) = oneshot::channel::<i32>();
|
||||
drop(c);
|
||||
|
@ -150,7 +150,7 @@ fn select_cancels() {
|
|||
assert!(brx.try_recv().is_err());
|
||||
assert!(drx.try_recv().is_err());
|
||||
a.send(1).unwrap();
|
||||
let res = executor::spawn(f).poll_future(unpark_panic());
|
||||
let res = executor::spawn(f).poll_future_notify(¬ify_panic(), 0);
|
||||
assert!(res.ok().unwrap().is_ready());
|
||||
assert_eq!(brx.recv().unwrap(), 1);
|
||||
drop(c);
|
||||
|
@ -162,10 +162,10 @@ fn select_cancels() {
|
|||
let d = d.map(move |d| { dtx.send(d).unwrap(); d });
|
||||
|
||||
let mut f = executor::spawn(b.select(d).then(unselect));
|
||||
assert!(f.poll_future(unpark_noop()).ok().unwrap().is_not_ready());
|
||||
assert!(f.poll_future(unpark_noop()).ok().unwrap().is_not_ready());
|
||||
assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_not_ready());
|
||||
assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_not_ready());
|
||||
a.send(1).unwrap();
|
||||
assert!(f.poll_future(unpark_panic()).ok().unwrap().is_ready());
|
||||
assert!(f.poll_future_notify(¬ify_panic(), 0).ok().unwrap().is_ready());
|
||||
drop((c, f));
|
||||
assert!(drx.recv().is_err());
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ fn join_cancels() {
|
|||
|
||||
let f = b.join(d);
|
||||
drop(a);
|
||||
let res = executor::spawn(f).poll_future(unpark_panic());
|
||||
let res = executor::spawn(f).poll_future_notify(¬ify_panic(), 0);
|
||||
assert!(res.is_err());
|
||||
drop(c);
|
||||
assert!(drx.recv().is_err());
|
||||
|
@ -208,37 +208,37 @@ fn join_incomplete() {
|
|||
let (a, b) = oneshot::channel::<i32>();
|
||||
let (tx, rx) = channel();
|
||||
let mut f = executor::spawn(ok(1).join(b).map(move |r| tx.send(r).unwrap()));
|
||||
assert!(f.poll_future(unpark_noop()).ok().unwrap().is_not_ready());
|
||||
assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_not_ready());
|
||||
assert!(rx.try_recv().is_err());
|
||||
a.send(2).unwrap();
|
||||
assert!(f.poll_future(unpark_noop()).ok().unwrap().is_ready());
|
||||
assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_ready());
|
||||
assert_eq!(rx.recv().unwrap(), (1, 2));
|
||||
|
||||
let (a, b) = oneshot::channel::<i32>();
|
||||
let (tx, rx) = channel();
|
||||
let mut f = executor::spawn(b.join(Ok(2)).map(move |r| tx.send(r).unwrap()));
|
||||
assert!(f.poll_future(unpark_noop()).ok().unwrap().is_not_ready());
|
||||
assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_not_ready());
|
||||
assert!(rx.try_recv().is_err());
|
||||
a.send(1).unwrap();
|
||||
assert!(f.poll_future(unpark_noop()).ok().unwrap().is_ready());
|
||||
assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_ready());
|
||||
assert_eq!(rx.recv().unwrap(), (1, 2));
|
||||
|
||||
let (a, b) = oneshot::channel::<i32>();
|
||||
let (tx, rx) = channel();
|
||||
let mut f = executor::spawn(ok(1).join(b).map_err(move |_r| tx.send(2).unwrap()));
|
||||
assert!(f.poll_future(unpark_noop()).ok().unwrap().is_not_ready());
|
||||
assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_not_ready());
|
||||
assert!(rx.try_recv().is_err());
|
||||
drop(a);
|
||||
assert!(f.poll_future(unpark_noop()).is_err());
|
||||
assert!(f.poll_future_notify(¬ify_noop(), 0).is_err());
|
||||
assert_eq!(rx.recv().unwrap(), 2);
|
||||
|
||||
let (a, b) = oneshot::channel::<i32>();
|
||||
let (tx, rx) = channel();
|
||||
let mut f = executor::spawn(b.join(Ok(2)).map_err(move |_r| tx.send(1).unwrap()));
|
||||
assert!(f.poll_future(unpark_noop()).ok().unwrap().is_not_ready());
|
||||
assert!(f.poll_future_notify(¬ify_noop(), 0).ok().unwrap().is_not_ready());
|
||||
assert!(rx.try_recv().is_err());
|
||||
drop(a);
|
||||
assert!(f.poll_future(unpark_noop()).is_err());
|
||||
assert!(f.poll_future_notify(¬ify_noop(), 0).is_err());
|
||||
assert_eq!(rx.recv().unwrap(), 1);
|
||||
}
|
||||
|
||||
|
@ -323,7 +323,7 @@ fn select2() {
|
|||
let b = b.map(move |v| { btx.send(v).unwrap(); v });
|
||||
let d = d.map(move |v| { dtx.send(v).unwrap(); v });
|
||||
let f = b.select(d);
|
||||
drop(executor::spawn(f).poll_future(support::unpark_noop()));
|
||||
drop(executor::spawn(f).poll_future_notify(&support::notify_noop(), 0));
|
||||
assert!(drx.recv().is_err());
|
||||
assert!(brx.recv().is_err());
|
||||
}
|
||||
|
@ -359,3 +359,17 @@ fn option() {
|
|||
assert_eq!(Ok(Some(())), Some(ok::<(), ()>(())).wait());
|
||||
assert_eq!(Ok(None), <Option<FutureResult<(), ()>> as Future>::wait(None));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn spawn_does_unsize() {
|
||||
#[derive(Clone, Copy)]
|
||||
struct EmptyNotify;
|
||||
impl executor::Notify for EmptyNotify {
|
||||
fn notify(&self, _: usize) { panic!("Cannot notify"); }
|
||||
}
|
||||
static EMPTY: &'static EmptyNotify = &EmptyNotify;
|
||||
|
||||
let spawn: executor::Spawn<FutureResult<(), ()>> = executor::spawn(future::ok(()));
|
||||
let mut spawn_box: Box<executor::Spawn<Future<Item = (), Error = ()>>> = Box::new(spawn);
|
||||
spawn_box.poll_future_notify(&EMPTY, 0).unwrap();
|
||||
}
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче