зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1484462 - Revendor Rust dependencies. r=ato
--HG-- rename : third_party/rust/crossbeam-deque/.cargo-checksum.json => third_party/rust/crossbeam-deque-0.2.0/.cargo-checksum.json rename : third_party/rust/crossbeam-deque/.travis.yml => third_party/rust/crossbeam-deque-0.2.0/.travis.yml rename : third_party/rust/crossbeam-deque/CHANGELOG.md => third_party/rust/crossbeam-deque-0.2.0/CHANGELOG.md rename : third_party/rust/crossbeam-deque/Cargo.toml => third_party/rust/crossbeam-deque-0.2.0/Cargo.toml rename : third_party/rust/bitflags-0.7.0/LICENSE-APACHE => third_party/rust/crossbeam-deque-0.2.0/LICENSE-APACHE rename : third_party/rust/crossbeam-deque/README.md => third_party/rust/crossbeam-deque-0.2.0/README.md rename : third_party/rust/crossbeam-deque/src/lib.rs => third_party/rust/crossbeam-deque-0.2.0/src/lib.rs rename : third_party/rust/crossbeam-epoch/.cargo-checksum.json => third_party/rust/crossbeam-epoch-0.3.1/.cargo-checksum.json rename : third_party/rust/crossbeam-epoch/.travis.yml => third_party/rust/crossbeam-epoch-0.3.1/.travis.yml rename : third_party/rust/crossbeam-epoch/CHANGELOG.md => third_party/rust/crossbeam-epoch-0.3.1/CHANGELOG.md rename : third_party/rust/crossbeam-epoch/Cargo.toml => third_party/rust/crossbeam-epoch-0.3.1/Cargo.toml rename : third_party/rust/tokio-io/LICENSE-APACHE => third_party/rust/crossbeam-epoch-0.3.1/LICENSE-APACHE rename : third_party/rust/crossbeam-epoch/README.md => third_party/rust/crossbeam-epoch-0.3.1/README.md rename : third_party/rust/crossbeam-epoch/examples/sanitize.rs => third_party/rust/crossbeam-epoch-0.3.1/examples/sanitize.rs rename : third_party/rust/crossbeam-epoch/src/atomic.rs => third_party/rust/crossbeam-epoch-0.3.1/src/atomic.rs rename : third_party/rust/crossbeam-epoch/src/collector.rs => third_party/rust/crossbeam-epoch-0.3.1/src/collector.rs rename : third_party/rust/crossbeam-epoch/src/default.rs => third_party/rust/crossbeam-epoch-0.3.1/src/default.rs rename : third_party/rust/crossbeam-epoch/src/deferred.rs => third_party/rust/crossbeam-epoch-0.3.1/src/deferred.rs rename : third_party/rust/crossbeam-epoch/src/epoch.rs => third_party/rust/crossbeam-epoch-0.3.1/src/epoch.rs rename : third_party/rust/crossbeam-epoch/src/garbage.rs => third_party/rust/crossbeam-epoch-0.3.1/src/garbage.rs rename : third_party/rust/crossbeam-epoch/src/guard.rs => third_party/rust/crossbeam-epoch-0.3.1/src/guard.rs rename : third_party/rust/crossbeam-epoch/src/internal.rs => third_party/rust/crossbeam-epoch-0.3.1/src/internal.rs rename : third_party/rust/crossbeam-epoch/src/lib.rs => third_party/rust/crossbeam-epoch-0.3.1/src/lib.rs rename : third_party/rust/crossbeam-epoch/src/sync/list.rs => third_party/rust/crossbeam-epoch-0.3.1/src/sync/list.rs rename : third_party/rust/crossbeam-epoch/src/sync/queue.rs => third_party/rust/crossbeam-epoch-0.3.1/src/sync/queue.rs rename : third_party/rust/crossbeam-utils/.cargo-checksum.json => third_party/rust/crossbeam-utils-0.2.2/.cargo-checksum.json rename : third_party/rust/crossbeam-utils/CHANGELOG.md => third_party/rust/crossbeam-utils-0.2.2/CHANGELOG.md rename : third_party/rust/crossbeam-utils/Cargo.toml => third_party/rust/crossbeam-utils-0.2.2/Cargo.toml rename : third_party/rust/bitflags-0.7.0/LICENSE-APACHE => third_party/rust/crossbeam-utils-0.2.2/LICENSE-APACHE rename : third_party/rust/crossbeam-utils/src/atomic_option.rs => third_party/rust/crossbeam-utils-0.2.2/src/atomic_option.rs rename : third_party/rust/crossbeam-utils/src/lib.rs => third_party/rust/crossbeam-utils-0.2.2/src/lib.rs rename : third_party/rust/crossbeam-utils/src/scoped.rs => third_party/rust/crossbeam-utils-0.2.2/src/scoped.rs rename : third_party/rust/bitflags-0.7.0/LICENSE-APACHE => third_party/rust/indexmap/LICENSE-APACHE rename : third_party/rust/lazycell/.cargo-checksum.json => third_party/rust/lazycell-0.4.0/.cargo-checksum.json rename : third_party/rust/lazycell/CHANGELOG.md => third_party/rust/lazycell-0.4.0/CHANGELOG.md rename : third_party/rust/lazycell/Cargo.toml => third_party/rust/lazycell-0.4.0/Cargo.toml rename : third_party/rust/bitflags-0.7.0/LICENSE-APACHE => third_party/rust/lazycell-0.4.0/LICENSE-APACHE rename : third_party/rust/lazycell/LICENSE-MIT => third_party/rust/lazycell-0.4.0/LICENSE-MIT rename : third_party/rust/lazycell/README.md => third_party/rust/lazycell-0.4.0/README.md rename : third_party/rust/lazycell/src/lib.rs => third_party/rust/lazycell-0.4.0/src/lib.rs rename : third_party/rust/bitflags-0.7.0/LICENSE-APACHE => third_party/rust/rand-0.3.22/LICENSE-APACHE rename : third_party/rust/bitflags-0.7.0/LICENSE-MIT => third_party/rust/rand-0.3.22/LICENSE-MIT rename : third_party/rust/rand/appveyor.yml => third_party/rust/rand-0.3.22/appveyor.yml rename : third_party/rust/slab/.cargo-checksum.json => third_party/rust/slab-0.3.0/.cargo-checksum.json rename : third_party/rust/slab/Cargo.toml => third_party/rust/slab-0.3.0/Cargo.toml rename : third_party/rust/slab/README.md => third_party/rust/slab-0.3.0/README.md rename : third_party/rust/slab/src/lib.rs => third_party/rust/slab-0.3.0/src/lib.rs rename : third_party/rust/tokio-io/src/read_to_end.rs => third_party/rust/tokio-io/src/io/read_to_end.rs rename : third_party/rust/tokio-io/src/read_until.rs => third_party/rust/tokio-io/src/io/read_until.rs
This commit is contained in:
Родитель
963e21bf7f
Коммит
aa65723136
|
@ -1 +0,0 @@
|
|||
{"files":{".travis.yml":"2b615144d3f4b2e63ba6ec435cc18df7d76354aa07c2a02d6c707028cc448784","Cargo.toml":"db8c2e9ea912c5f3d2d89cf4cf936c448300e356b0fb533db8875923cb135256","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"8cfbc986af45867d9e620188af2392320fe6e0d9536753ba415c94ab522f5fb5","src/lib.rs":"618ce383bb219725363fba174fc66beb4874d9682e5da953f9e3e9cb3f786d5f","tests/external.rs":"546e549ec831876a5dc272bd0537adc9e9886c6da54656c825e7bffc079e2c74","tests/external_no_std.rs":"48929f5109aabc156442d5ae2ab07b4bce5d648488bf49dba725f6ab23bcb48a"},"package":"aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d"}
|
|
@ -1,24 +0,0 @@
|
|||
language: rust
|
||||
rust:
|
||||
- stable
|
||||
- beta
|
||||
- nightly
|
||||
sudo: false
|
||||
script:
|
||||
- cargo build --verbose
|
||||
- cargo test --verbose
|
||||
- cargo doc
|
||||
after_success: |
|
||||
[ $TRAVIS_BRANCH = master ] &&
|
||||
[ $TRAVIS_PULL_REQUEST = false ] &&
|
||||
[ $TRAVIS_RUST_VERSION = nightly ] &&
|
||||
echo '<meta http-equiv=refresh content=0;url=bitflags/index.html>' > target/doc/index.html &&
|
||||
pip install ghp-import --user $USER &&
|
||||
$HOME/.local/bin/ghp-import -n target/doc &&
|
||||
git push -qf https://${TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages
|
||||
env:
|
||||
global:
|
||||
secure: d+l63TtlF6cfFVDGauYRexgx4lBww4ORqqK4Vt75nWbiCbjZYsKXbcTUdhAr193nIVGiNW50A8SekM01F3EngHwHwr6u5kFleOggm+HA0kkBVeX+k2A4WCVVfYI+gth+zk99WaF8h46MA0evhx6FYDoqeyl9oqmVifI4kaqhMwc=
|
||||
notifications:
|
||||
email:
|
||||
on_success: never
|
|
@ -1,13 +0,0 @@
|
|||
[package]
|
||||
|
||||
name = "bitflags"
|
||||
version = "0.7.0"
|
||||
authors = ["The Rust Project Developers"]
|
||||
license = "MIT/Apache-2.0"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/rust-lang/bitflags"
|
||||
homepage = "https://github.com/rust-lang/bitflags"
|
||||
documentation = "https://doc.rust-lang.org/bitflags"
|
||||
description = """
|
||||
A macro to generate structures which behave like bitflags.
|
||||
"""
|
|
@ -1,24 +0,0 @@
|
|||
bitflags
|
||||
========
|
||||
|
||||
A Rust macro to generate structures which behave like a set of bitflags
|
||||
|
||||
[![Build Status](https://travis-ci.org/rust-lang-nursery/bitflags.svg?branch=master)](https://travis-ci.org/rust-lang-nursery/bitflags)
|
||||
|
||||
[Documentation](https://doc.rust-lang.org/bitflags)
|
||||
|
||||
## Usage
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
bitflags = "0.6"
|
||||
```
|
||||
|
||||
and this to your crate root:
|
||||
|
||||
```rust
|
||||
#[macro_use]
|
||||
extern crate bitflags;
|
||||
```
|
|
@ -1,808 +0,0 @@
|
|||
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
|
||||
// file at the top-level directory of this distribution and at
|
||||
// http://rust-lang.org/COPYRIGHT.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
//! A typesafe bitmask flag generator.
|
||||
|
||||
#![no_std]
|
||||
|
||||
#[cfg(test)]
|
||||
#[macro_use]
|
||||
extern crate std;
|
||||
|
||||
// Re-export libstd/libcore using an alias so that the macros can work in no_std
|
||||
// crates while remaining compatible with normal crates.
|
||||
#[allow(private_in_public)]
|
||||
#[doc(hidden)]
|
||||
pub use core as __core;
|
||||
|
||||
/// The `bitflags!` macro generates a `struct` that holds a set of C-style
|
||||
/// bitmask flags. It is useful for creating typesafe wrappers for C APIs.
|
||||
///
|
||||
/// The flags should only be defined for integer types, otherwise unexpected
|
||||
/// type errors may occur at compile time.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```{.rust}
|
||||
/// #[macro_use]
|
||||
/// extern crate bitflags;
|
||||
///
|
||||
/// bitflags! {
|
||||
/// flags Flags: u32 {
|
||||
/// const FLAG_A = 0b00000001,
|
||||
/// const FLAG_B = 0b00000010,
|
||||
/// const FLAG_C = 0b00000100,
|
||||
/// const FLAG_ABC = FLAG_A.bits
|
||||
/// | FLAG_B.bits
|
||||
/// | FLAG_C.bits,
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// fn main() {
|
||||
/// let e1 = FLAG_A | FLAG_C;
|
||||
/// let e2 = FLAG_B | FLAG_C;
|
||||
/// assert_eq!((e1 | e2), FLAG_ABC); // union
|
||||
/// assert_eq!((e1 & e2), FLAG_C); // intersection
|
||||
/// assert_eq!((e1 - e2), FLAG_A); // set difference
|
||||
/// assert_eq!(!e2, FLAG_A); // set complement
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// The generated `struct`s can also be extended with type and trait
|
||||
/// implementations:
|
||||
///
|
||||
/// ```{.rust}
|
||||
/// #[macro_use]
|
||||
/// extern crate bitflags;
|
||||
///
|
||||
/// use std::fmt;
|
||||
///
|
||||
/// bitflags! {
|
||||
/// flags Flags: u32 {
|
||||
/// const FLAG_A = 0b00000001,
|
||||
/// const FLAG_B = 0b00000010,
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// impl Flags {
|
||||
/// pub fn clear(&mut self) {
|
||||
/// self.bits = 0; // The `bits` field can be accessed from within the
|
||||
/// // same module where the `bitflags!` macro was invoked.
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// impl fmt::Display for Flags {
|
||||
/// fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
/// write!(f, "hi!")
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// fn main() {
|
||||
/// let mut flags = FLAG_A | FLAG_B;
|
||||
/// flags.clear();
|
||||
/// assert!(flags.is_empty());
|
||||
/// assert_eq!(format!("{}", flags), "hi!");
|
||||
/// assert_eq!(format!("{:?}", FLAG_A | FLAG_B), "FLAG_A | FLAG_B");
|
||||
/// assert_eq!(format!("{:?}", FLAG_B), "FLAG_B");
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// # Visibility
|
||||
///
|
||||
/// The generated struct and its associated flag constants are not exported
|
||||
/// out of the current module by default. A definition can be exported out of
|
||||
/// the current module by adding `pub` before `flags`:
|
||||
///
|
||||
/// ```{.rust},ignore
|
||||
/// #[macro_use]
|
||||
/// extern crate bitflags;
|
||||
///
|
||||
/// mod example {
|
||||
/// bitflags! {
|
||||
/// pub flags Flags1: u32 {
|
||||
/// const FLAG_A = 0b00000001,
|
||||
/// }
|
||||
/// }
|
||||
/// bitflags! {
|
||||
/// flags Flags2: u32 {
|
||||
/// const FLAG_B = 0b00000010,
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// fn main() {
|
||||
/// let flag1 = example::FLAG_A;
|
||||
/// let flag2 = example::FLAG_B; // error: const `FLAG_B` is private
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// # Attributes
|
||||
///
|
||||
/// Attributes can be attached to the generated `struct` by placing them
|
||||
/// before the `flags` keyword.
|
||||
///
|
||||
/// # Trait implementations
|
||||
///
|
||||
/// The `Copy`, `Clone`, `PartialEq`, `Eq`, `PartialOrd`, `Ord` and `Hash`
|
||||
/// traits automatically derived for the `struct` using the `derive` attribute.
|
||||
/// Additional traits can be derived by providing an explicit `derive`
|
||||
/// attribute on `flags`.
|
||||
///
|
||||
/// The `Extend` and `FromIterator` traits are implemented for the `struct`,
|
||||
/// too: `Extend` adds the union of the instances of the `struct` iterated over,
|
||||
/// while `FromIterator` calculates the union.
|
||||
///
|
||||
/// The `Debug` trait is also implemented by displaying the bits value of the
|
||||
/// internal struct.
|
||||
///
|
||||
/// ## Operators
|
||||
///
|
||||
/// The following operator traits are implemented for the generated `struct`:
|
||||
///
|
||||
/// - `BitOr` and `BitOrAssign`: union
|
||||
/// - `BitAnd` and `BitAndAssign`: intersection
|
||||
/// - `BitXor` and `BitXorAssign`: toggle
|
||||
/// - `Sub` and `SubAssign`: set difference
|
||||
/// - `Not`: set complement
|
||||
///
|
||||
/// As long as the assignment operators are unstable rust feature they are only
|
||||
/// available with the crate feature `assignment_ops` enabled.
|
||||
///
|
||||
/// # Methods
|
||||
///
|
||||
/// The following methods are defined for the generated `struct`:
|
||||
///
|
||||
/// - `empty`: an empty set of flags
|
||||
/// - `all`: the set of all flags
|
||||
/// - `bits`: the raw value of the flags currently stored
|
||||
/// - `from_bits`: convert from underlying bit representation, unless that
|
||||
/// representation contains bits that do not correspond to a flag
|
||||
/// - `from_bits_truncate`: convert from underlying bit representation, dropping
|
||||
/// any bits that do not correspond to flags
|
||||
/// - `is_empty`: `true` if no flags are currently stored
|
||||
/// - `is_all`: `true` if all flags are currently set
|
||||
/// - `intersects`: `true` if there are flags common to both `self` and `other`
|
||||
/// - `contains`: `true` all of the flags in `other` are contained within `self`
|
||||
/// - `insert`: inserts the specified flags in-place
|
||||
/// - `remove`: removes the specified flags in-place
|
||||
/// - `toggle`: the specified flags will be inserted if not present, and removed
|
||||
/// if they are.
|
||||
#[macro_export]
|
||||
macro_rules! bitflags {
|
||||
($(#[$attr:meta])* pub flags $BitFlags:ident: $T:ty {
|
||||
$($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+
|
||||
}) => {
|
||||
#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)]
|
||||
$(#[$attr])*
|
||||
pub struct $BitFlags {
|
||||
bits: $T,
|
||||
}
|
||||
|
||||
$($(#[$Flag_attr])* pub const $Flag: $BitFlags = $BitFlags { bits: $value };)+
|
||||
|
||||
bitflags! {
|
||||
@_impl flags $BitFlags: $T {
|
||||
$($(#[$Flag_attr])* const $Flag = $value),+
|
||||
}
|
||||
}
|
||||
};
|
||||
($(#[$attr:meta])* flags $BitFlags:ident: $T:ty {
|
||||
$($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+
|
||||
}) => {
|
||||
#[derive(Copy, PartialEq, Eq, Clone, PartialOrd, Ord, Hash)]
|
||||
$(#[$attr])*
|
||||
struct $BitFlags {
|
||||
bits: $T,
|
||||
}
|
||||
|
||||
$($(#[$Flag_attr])* const $Flag: $BitFlags = $BitFlags { bits: $value };)+
|
||||
|
||||
bitflags! {
|
||||
@_impl flags $BitFlags: $T {
|
||||
$($(#[$Flag_attr])* const $Flag = $value),+
|
||||
}
|
||||
}
|
||||
};
|
||||
(@_impl flags $BitFlags:ident: $T:ty {
|
||||
$($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+
|
||||
}) => {
|
||||
impl $crate::__core::fmt::Debug for $BitFlags {
|
||||
fn fmt(&self, f: &mut $crate::__core::fmt::Formatter) -> $crate::__core::fmt::Result {
|
||||
// This convoluted approach is to handle #[cfg]-based flag
|
||||
// omission correctly. Some of the $Flag variants may not be
|
||||
// defined in this module so we create an inner module which
|
||||
// defines *all* flags to the value of 0. We then create a
|
||||
// second inner module that defines all of the flags with #[cfg]
|
||||
// to their real values. Afterwards the glob will import
|
||||
// variants from the second inner module, shadowing all
|
||||
// defined variants, leaving only the undefined ones with the
|
||||
// bit value of 0.
|
||||
#[allow(dead_code)]
|
||||
#[allow(unused_assignments)]
|
||||
mod dummy {
|
||||
// We can't use the real $BitFlags struct because it may be
|
||||
// private, which prevents us from using it to define
|
||||
// public constants.
|
||||
pub struct $BitFlags {
|
||||
bits: u64,
|
||||
}
|
||||
mod real_flags {
|
||||
use super::$BitFlags;
|
||||
$($(#[$Flag_attr])* pub const $Flag: $BitFlags = $BitFlags {
|
||||
bits: super::super::$Flag.bits as u64
|
||||
};)+
|
||||
}
|
||||
// Now we define the "undefined" versions of the flags.
|
||||
// This way, all the names exist, even if some are #[cfg]ed
|
||||
// out.
|
||||
$(const $Flag: $BitFlags = $BitFlags { bits: 0 };)+
|
||||
|
||||
#[inline]
|
||||
pub fn fmt(self_: u64,
|
||||
f: &mut $crate::__core::fmt::Formatter)
|
||||
-> $crate::__core::fmt::Result {
|
||||
// Now we import the real values for the flags.
|
||||
// Only ones that are #[cfg]ed out will be 0.
|
||||
use self::real_flags::*;
|
||||
|
||||
let mut first = true;
|
||||
$(
|
||||
// $Flag.bits == 0 means that $Flag doesn't exist
|
||||
if $Flag.bits != 0 && self_ & $Flag.bits as u64 == $Flag.bits as u64 {
|
||||
if !first {
|
||||
try!(f.write_str(" | "));
|
||||
}
|
||||
first = false;
|
||||
try!(f.write_str(stringify!($Flag)));
|
||||
}
|
||||
)+
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
dummy::fmt(self.bits as u64, f)
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl $BitFlags {
|
||||
/// Returns an empty set of flags.
|
||||
#[inline]
|
||||
pub fn empty() -> $BitFlags {
|
||||
$BitFlags { bits: 0 }
|
||||
}
|
||||
|
||||
/// Returns the set containing all flags.
|
||||
#[inline]
|
||||
pub fn all() -> $BitFlags {
|
||||
// See above `dummy` module for why this approach is taken.
|
||||
#[allow(dead_code)]
|
||||
mod dummy {
|
||||
pub struct $BitFlags {
|
||||
bits: u64,
|
||||
}
|
||||
mod real_flags {
|
||||
use super::$BitFlags;
|
||||
$($(#[$Flag_attr])* pub const $Flag: $BitFlags = $BitFlags {
|
||||
bits: super::super::$Flag.bits as u64
|
||||
};)+
|
||||
}
|
||||
$(const $Flag: $BitFlags = $BitFlags { bits: 0 };)+
|
||||
|
||||
#[inline]
|
||||
pub fn all() -> u64 {
|
||||
use self::real_flags::*;
|
||||
$($Flag.bits)|+
|
||||
}
|
||||
}
|
||||
$BitFlags { bits: dummy::all() as $T }
|
||||
}
|
||||
|
||||
/// Returns the raw value of the flags currently stored.
|
||||
#[inline]
|
||||
pub fn bits(&self) -> $T {
|
||||
self.bits
|
||||
}
|
||||
|
||||
/// Convert from underlying bit representation, unless that
|
||||
/// representation contains bits that do not correspond to a flag.
|
||||
#[inline]
|
||||
pub fn from_bits(bits: $T) -> $crate::__core::option::Option<$BitFlags> {
|
||||
if (bits & !$BitFlags::all().bits()) == 0 {
|
||||
$crate::__core::option::Option::Some($BitFlags { bits: bits })
|
||||
} else {
|
||||
$crate::__core::option::Option::None
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert from underlying bit representation, dropping any bits
|
||||
/// that do not correspond to flags.
|
||||
#[inline]
|
||||
pub fn from_bits_truncate(bits: $T) -> $BitFlags {
|
||||
$BitFlags { bits: bits } & $BitFlags::all()
|
||||
}
|
||||
|
||||
/// Returns `true` if no flags are currently stored.
|
||||
#[inline]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
*self == $BitFlags::empty()
|
||||
}
|
||||
|
||||
/// Returns `true` if all flags are currently set.
|
||||
#[inline]
|
||||
pub fn is_all(&self) -> bool {
|
||||
*self == $BitFlags::all()
|
||||
}
|
||||
|
||||
/// Returns `true` if there are flags common to both `self` and `other`.
|
||||
#[inline]
|
||||
pub fn intersects(&self, other: $BitFlags) -> bool {
|
||||
!(*self & other).is_empty()
|
||||
}
|
||||
|
||||
/// Returns `true` all of the flags in `other` are contained within `self`.
|
||||
#[inline]
|
||||
pub fn contains(&self, other: $BitFlags) -> bool {
|
||||
(*self & other) == other
|
||||
}
|
||||
|
||||
/// Inserts the specified flags in-place.
|
||||
#[inline]
|
||||
pub fn insert(&mut self, other: $BitFlags) {
|
||||
self.bits |= other.bits;
|
||||
}
|
||||
|
||||
/// Removes the specified flags in-place.
|
||||
#[inline]
|
||||
pub fn remove(&mut self, other: $BitFlags) {
|
||||
self.bits &= !other.bits;
|
||||
}
|
||||
|
||||
/// Toggles the specified flags in-place.
|
||||
#[inline]
|
||||
pub fn toggle(&mut self, other: $BitFlags) {
|
||||
self.bits ^= other.bits;
|
||||
}
|
||||
}
|
||||
|
||||
impl $crate::__core::ops::BitOr for $BitFlags {
|
||||
type Output = $BitFlags;
|
||||
|
||||
/// Returns the union of the two sets of flags.
|
||||
#[inline]
|
||||
fn bitor(self, other: $BitFlags) -> $BitFlags {
|
||||
$BitFlags { bits: self.bits | other.bits }
|
||||
}
|
||||
}
|
||||
|
||||
impl $crate::__core::ops::BitOrAssign for $BitFlags {
|
||||
|
||||
/// Adds the set of flags.
|
||||
#[inline]
|
||||
fn bitor_assign(&mut self, other: $BitFlags) {
|
||||
self.bits |= other.bits;
|
||||
}
|
||||
}
|
||||
|
||||
impl $crate::__core::ops::BitXor for $BitFlags {
|
||||
type Output = $BitFlags;
|
||||
|
||||
/// Returns the left flags, but with all the right flags toggled.
|
||||
#[inline]
|
||||
fn bitxor(self, other: $BitFlags) -> $BitFlags {
|
||||
$BitFlags { bits: self.bits ^ other.bits }
|
||||
}
|
||||
}
|
||||
|
||||
impl $crate::__core::ops::BitXorAssign for $BitFlags {
|
||||
|
||||
/// Toggles the set of flags.
|
||||
#[inline]
|
||||
fn bitxor_assign(&mut self, other: $BitFlags) {
|
||||
self.bits ^= other.bits;
|
||||
}
|
||||
}
|
||||
|
||||
impl $crate::__core::ops::BitAnd for $BitFlags {
|
||||
type Output = $BitFlags;
|
||||
|
||||
/// Returns the intersection between the two sets of flags.
|
||||
#[inline]
|
||||
fn bitand(self, other: $BitFlags) -> $BitFlags {
|
||||
$BitFlags { bits: self.bits & other.bits }
|
||||
}
|
||||
}
|
||||
|
||||
impl $crate::__core::ops::BitAndAssign for $BitFlags {
|
||||
|
||||
/// Disables all flags disabled in the set.
|
||||
#[inline]
|
||||
fn bitand_assign(&mut self, other: $BitFlags) {
|
||||
self.bits &= other.bits;
|
||||
}
|
||||
}
|
||||
|
||||
impl $crate::__core::ops::Sub for $BitFlags {
|
||||
type Output = $BitFlags;
|
||||
|
||||
/// Returns the set difference of the two sets of flags.
|
||||
#[inline]
|
||||
fn sub(self, other: $BitFlags) -> $BitFlags {
|
||||
$BitFlags { bits: self.bits & !other.bits }
|
||||
}
|
||||
}
|
||||
|
||||
impl $crate::__core::ops::SubAssign for $BitFlags {
|
||||
|
||||
/// Disables all flags enabled in the set.
|
||||
#[inline]
|
||||
fn sub_assign(&mut self, other: $BitFlags) {
|
||||
self.bits &= !other.bits;
|
||||
}
|
||||
}
|
||||
|
||||
impl $crate::__core::ops::Not for $BitFlags {
|
||||
type Output = $BitFlags;
|
||||
|
||||
/// Returns the complement of this set of flags.
|
||||
#[inline]
|
||||
fn not(self) -> $BitFlags {
|
||||
$BitFlags { bits: !self.bits } & $BitFlags::all()
|
||||
}
|
||||
}
|
||||
|
||||
impl $crate::__core::iter::Extend<$BitFlags> for $BitFlags {
|
||||
fn extend<T: $crate::__core::iter::IntoIterator<Item=$BitFlags>>(&mut self, iterator: T) {
|
||||
for item in iterator {
|
||||
self.insert(item)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl $crate::__core::iter::FromIterator<$BitFlags> for $BitFlags {
|
||||
fn from_iter<T: $crate::__core::iter::IntoIterator<Item=$BitFlags>>(iterator: T) -> $BitFlags {
|
||||
let mut result = Self::empty();
|
||||
result.extend(iterator);
|
||||
result
|
||||
}
|
||||
}
|
||||
};
|
||||
($(#[$attr:meta])* pub flags $BitFlags:ident: $T:ty {
|
||||
$($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+,
|
||||
}) => {
|
||||
bitflags! {
|
||||
$(#[$attr])*
|
||||
pub flags $BitFlags: $T {
|
||||
$($(#[$Flag_attr])* const $Flag = $value),+
|
||||
}
|
||||
}
|
||||
};
|
||||
($(#[$attr:meta])* flags $BitFlags:ident: $T:ty {
|
||||
$($(#[$Flag_attr:meta])* const $Flag:ident = $value:expr),+,
|
||||
}) => {
|
||||
bitflags! {
|
||||
$(#[$attr])*
|
||||
flags $BitFlags: $T {
|
||||
$($(#[$Flag_attr])* const $Flag = $value),+
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(non_upper_case_globals, dead_code)]
|
||||
mod tests {
|
||||
use std::hash::{SipHasher, Hash, Hasher};
|
||||
|
||||
bitflags! {
|
||||
#[doc = "> The first principle is that you must not fool yourself — and"]
|
||||
#[doc = "> you are the easiest person to fool."]
|
||||
#[doc = "> "]
|
||||
#[doc = "> - Richard Feynman"]
|
||||
flags Flags: u32 {
|
||||
const FlagA = 0b00000001,
|
||||
#[doc = "<pcwalton> macros are way better at generating code than trans is"]
|
||||
const FlagB = 0b00000010,
|
||||
const FlagC = 0b00000100,
|
||||
#[doc = "* cmr bed"]
|
||||
#[doc = "* strcat table"]
|
||||
#[doc = "<strcat> wait what?"]
|
||||
const FlagABC = FlagA.bits
|
||||
| FlagB.bits
|
||||
| FlagC.bits,
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
flags _CfgFlags: u32 {
|
||||
#[cfg(windows)]
|
||||
const _CfgA = 0b01,
|
||||
#[cfg(unix)]
|
||||
const _CfgB = 0b01,
|
||||
#[cfg(windows)]
|
||||
const _CfgC = _CfgA.bits | 0b10,
|
||||
}
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
flags AnotherSetOfFlags: i8 {
|
||||
const AnotherFlag = -1_i8,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bits(){
|
||||
assert_eq!(Flags::empty().bits(), 0b00000000);
|
||||
assert_eq!(FlagA.bits(), 0b00000001);
|
||||
assert_eq!(FlagABC.bits(), 0b00000111);
|
||||
|
||||
assert_eq!(AnotherSetOfFlags::empty().bits(), 0b00);
|
||||
assert_eq!(AnotherFlag.bits(), !0_i8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_bits() {
|
||||
assert_eq!(Flags::from_bits(0), Some(Flags::empty()));
|
||||
assert_eq!(Flags::from_bits(0b1), Some(FlagA));
|
||||
assert_eq!(Flags::from_bits(0b10), Some(FlagB));
|
||||
assert_eq!(Flags::from_bits(0b11), Some(FlagA | FlagB));
|
||||
assert_eq!(Flags::from_bits(0b1000), None);
|
||||
|
||||
assert_eq!(AnotherSetOfFlags::from_bits(!0_i8), Some(AnotherFlag));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_bits_truncate() {
|
||||
assert_eq!(Flags::from_bits_truncate(0), Flags::empty());
|
||||
assert_eq!(Flags::from_bits_truncate(0b1), FlagA);
|
||||
assert_eq!(Flags::from_bits_truncate(0b10), FlagB);
|
||||
assert_eq!(Flags::from_bits_truncate(0b11), (FlagA | FlagB));
|
||||
assert_eq!(Flags::from_bits_truncate(0b1000), Flags::empty());
|
||||
assert_eq!(Flags::from_bits_truncate(0b1001), FlagA);
|
||||
|
||||
assert_eq!(AnotherSetOfFlags::from_bits_truncate(0_i8), AnotherSetOfFlags::empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_empty(){
|
||||
assert!(Flags::empty().is_empty());
|
||||
assert!(!FlagA.is_empty());
|
||||
assert!(!FlagABC.is_empty());
|
||||
|
||||
assert!(!AnotherFlag.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_all() {
|
||||
assert!(Flags::all().is_all());
|
||||
assert!(!FlagA.is_all());
|
||||
assert!(FlagABC.is_all());
|
||||
|
||||
assert!(AnotherFlag.is_all());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_two_empties_do_not_intersect() {
|
||||
let e1 = Flags::empty();
|
||||
let e2 = Flags::empty();
|
||||
assert!(!e1.intersects(e2));
|
||||
|
||||
assert!(AnotherFlag.intersects(AnotherFlag));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_does_not_intersect_with_full() {
|
||||
let e1 = Flags::empty();
|
||||
let e2 = FlagABC;
|
||||
assert!(!e1.intersects(e2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_disjoint_intersects() {
|
||||
let e1 = FlagA;
|
||||
let e2 = FlagB;
|
||||
assert!(!e1.intersects(e2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_overlapping_intersects() {
|
||||
let e1 = FlagA;
|
||||
let e2 = FlagA | FlagB;
|
||||
assert!(e1.intersects(e2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_contains() {
|
||||
let e1 = FlagA;
|
||||
let e2 = FlagA | FlagB;
|
||||
assert!(!e1.contains(e2));
|
||||
assert!(e2.contains(e1));
|
||||
assert!(FlagABC.contains(e2));
|
||||
|
||||
assert!(AnotherFlag.contains(AnotherFlag));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_insert(){
|
||||
let mut e1 = FlagA;
|
||||
let e2 = FlagA | FlagB;
|
||||
e1.insert(e2);
|
||||
assert_eq!(e1, e2);
|
||||
|
||||
let mut e3 = AnotherSetOfFlags::empty();
|
||||
e3.insert(AnotherFlag);
|
||||
assert_eq!(e3, AnotherFlag);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove(){
|
||||
let mut e1 = FlagA | FlagB;
|
||||
let e2 = FlagA | FlagC;
|
||||
e1.remove(e2);
|
||||
assert_eq!(e1, FlagB);
|
||||
|
||||
let mut e3 = AnotherFlag;
|
||||
e3.remove(AnotherFlag);
|
||||
assert_eq!(e3, AnotherSetOfFlags::empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_operators() {
|
||||
let e1 = FlagA | FlagC;
|
||||
let e2 = FlagB | FlagC;
|
||||
assert_eq!((e1 | e2), FlagABC); // union
|
||||
assert_eq!((e1 & e2), FlagC); // intersection
|
||||
assert_eq!((e1 - e2), FlagA); // set difference
|
||||
assert_eq!(!e2, FlagA); // set complement
|
||||
assert_eq!(e1 ^ e2, FlagA | FlagB); // toggle
|
||||
let mut e3 = e1;
|
||||
e3.toggle(e2);
|
||||
assert_eq!(e3, FlagA | FlagB);
|
||||
|
||||
let mut m4 = AnotherSetOfFlags::empty();
|
||||
m4.toggle(AnotherSetOfFlags::empty());
|
||||
assert_eq!(m4, AnotherSetOfFlags::empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_assignment_operators() {
|
||||
let mut m1 = Flags::empty();
|
||||
let e1 = FlagA | FlagC;
|
||||
// union
|
||||
m1 |= FlagA;
|
||||
assert_eq!(m1, FlagA);
|
||||
// intersection
|
||||
m1 &= e1;
|
||||
assert_eq!(m1, FlagA);
|
||||
// set difference
|
||||
m1 -= m1;
|
||||
assert_eq!(m1, Flags::empty());
|
||||
// toggle
|
||||
m1 ^= e1;
|
||||
assert_eq!(m1, e1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extend() {
|
||||
let mut flags;
|
||||
|
||||
flags = Flags::empty();
|
||||
flags.extend([].iter().cloned());
|
||||
assert_eq!(flags, Flags::empty());
|
||||
|
||||
flags = Flags::empty();
|
||||
flags.extend([FlagA, FlagB].iter().cloned());
|
||||
assert_eq!(flags, FlagA | FlagB);
|
||||
|
||||
flags = FlagA;
|
||||
flags.extend([FlagA, FlagB].iter().cloned());
|
||||
assert_eq!(flags, FlagA | FlagB);
|
||||
|
||||
flags = FlagB;
|
||||
flags.extend([FlagA, FlagABC].iter().cloned());
|
||||
assert_eq!(flags, FlagABC);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_iterator() {
|
||||
assert_eq!([].iter().cloned().collect::<Flags>(), Flags::empty());
|
||||
assert_eq!([FlagA, FlagB].iter().cloned().collect::<Flags>(), FlagA | FlagB);
|
||||
assert_eq!([FlagA, FlagABC].iter().cloned().collect::<Flags>(), FlagABC);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lt() {
|
||||
let mut a = Flags::empty();
|
||||
let mut b = Flags::empty();
|
||||
|
||||
assert!(!(a < b) && !(b < a));
|
||||
b = FlagB;
|
||||
assert!(a < b);
|
||||
a = FlagC;
|
||||
assert!(!(a < b) && b < a);
|
||||
b = FlagC | FlagB;
|
||||
assert!(a < b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ord() {
|
||||
let mut a = Flags::empty();
|
||||
let mut b = Flags::empty();
|
||||
|
||||
assert!(a <= b && a >= b);
|
||||
a = FlagA;
|
||||
assert!(a > b && a >= b);
|
||||
assert!(b < a && b <= a);
|
||||
b = FlagB;
|
||||
assert!(b > a && b >= a);
|
||||
assert!(a < b && a <= b);
|
||||
}
|
||||
|
||||
fn hash<T: Hash>(t: &T) -> u64 {
|
||||
let mut s = SipHasher::new_with_keys(0, 0);
|
||||
t.hash(&mut s);
|
||||
s.finish()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hash() {
|
||||
let mut x = Flags::empty();
|
||||
let mut y = Flags::empty();
|
||||
assert_eq!(hash(&x), hash(&y));
|
||||
x = Flags::all();
|
||||
y = FlagABC;
|
||||
assert_eq!(hash(&x), hash(&y));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debug() {
|
||||
assert_eq!(format!("{:?}", FlagA | FlagB), "FlagA | FlagB");
|
||||
assert_eq!(format!("{:?}", FlagABC), "FlagA | FlagB | FlagC | FlagABC");
|
||||
}
|
||||
|
||||
mod submodule {
|
||||
bitflags! {
|
||||
pub flags PublicFlags: i8 {
|
||||
const FlagX = 0,
|
||||
}
|
||||
}
|
||||
bitflags! {
|
||||
flags PrivateFlags: i8 {
|
||||
const FlagY = 0,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_private() {
|
||||
let _ = FlagY;
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_public() {
|
||||
let _ = submodule::FlagX;
|
||||
}
|
||||
|
||||
mod t1 {
|
||||
mod foo {
|
||||
pub type Bar = i32;
|
||||
}
|
||||
|
||||
bitflags! {
|
||||
/// baz
|
||||
flags Flags: foo::Bar {
|
||||
const A = 0b00000001,
|
||||
#[cfg(foo)]
|
||||
const B = 0b00000010,
|
||||
#[cfg(foo)]
|
||||
const C = 0b00000010,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
#![allow(dead_code)]
|
||||
|
||||
#[macro_use]
|
||||
extern crate bitflags;
|
||||
|
||||
bitflags! {
|
||||
/// baz
|
||||
flags Flags: u32 {
|
||||
const A = 0b00000001,
|
||||
#[doc = "bar"]
|
||||
const B = 0b00000010,
|
||||
const C = 0b00000100,
|
||||
#[doc = "foo"]
|
||||
const ABC = A.bits | B.bits | C.bits,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn smoke() {
|
||||
assert_eq!(ABC, A | B | C);
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
#![allow(dead_code)]
|
||||
#![no_std]
|
||||
|
||||
#[macro_use]
|
||||
extern crate bitflags;
|
||||
|
||||
bitflags! {
|
||||
/// baz
|
||||
flags Flags: u32 {
|
||||
const A = 0b00000001,
|
||||
#[doc = "bar"]
|
||||
const B = 0b00000010,
|
||||
const C = 0b00000100,
|
||||
#[doc = "foo"]
|
||||
const ABC = A.bits | B.bits | C.bits,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn smoke() {
|
||||
assert_eq!(ABC, A | B | C);
|
||||
}
|
|
@ -1 +1 @@
|
|||
{"files":{"CHANGELOG.md":"c6b490cbd81117cd0500e8dc26ca74cdf04eb49639ac0287eef559d7791cde1f","Cargo.toml":"3f1fe6c7e1e0bb164730bb584a58e128587dd742cfd1ab6bcda4c482be694bf5","LICENSE-APACHE":"01b5abb4a95cc87b220efbd67a1e99c74bef3d744806dd44b4d57e81db814962","LICENSE-MIT":"d4784f55731ba75b77ad73a52808914b26b2f93b69dd4c03249528a75afbd946","README.md":"7f5f585db959c73bcb1e8afd52b1c4110e57c2264a387f713b388f98181faebf","benches/bytes.rs":"bc1ef63dae52f111c78009399b16308e9e3c454b3ab5c46f89626e246fce3bd4","ci/before_deploy.ps1":"a8ee0204dd1397a245a47626fecd98eff5da76e12b15139c06271b3cc309a3e1","ci/before_deploy.sh":"ea008e2c544482cba5b659c17887ccd5354779c629096f28e667d40391299cc5","ci/install.sh":"8b165fc99df296261fcc9cdcbc8b8a177c11c505cdc9255cc19efb66cb0055db","ci/script.sh":"4e6f6b7df02d316ce5166a3526dc6bca6b6d051dbc5bd6d5b28a7c79fc646834","src/buf/buf.rs":"a8a26bb22fd5becd2062e756fc272eb6e09606e9e40120c4999634cb068b1837","src/buf/buf_mut.rs":"35e7fee4727f1628bc899216a74f9652235be255a035687d56bf8df71ebd29a4","src/buf/chain.rs":"3a4f88879d27240e84e58bbeddf3f7c0958d0d81f4707245199b53e922029a26","src/buf/from_buf.rs":"949683c6a08099b280bd324d0c8646b1d6ff80af4d3e9397edb76cc2f1b18c88","src/buf/into_buf.rs":"d982cb82f3f2ddba863366c36f9f6041b2076e7bb8906e882e47ef65742974db","src/buf/iter.rs":"325428e4f913beb602f6451b59847d4c8658ec23939a15f7b145733969c17f03","src/buf/mod.rs":"4f385ce47d6d19a064a1dbec3339e95e116aa9b501eb9d8a47030c2794e1ee9e","src/buf/reader.rs":"62098e87bd1aa8b7f57ed4a4d1b5417462f01ad2cfebfbac46b6ce7f00ea0192","src/buf/take.rs":"0bdd0720afc546c999e5a3125f20b6f31a5692b37f7218c25f414773e2702f3d","src/buf/writer.rs":"4a28c1d362e837682a4b3197732a6dbb4072dc660f0dbba18616679adf8a60f2","src/bytes.rs":"7b1ba792e6062ac9453b46bf1f8af7ea7784ccb142d38b40491b1a3c6d2f2e5a","src/debug.rs":"f01f07b199994400a62aa872344a19737198c8bce0fdc5a4b5b34d9cd37dee75","src/lib.rs":"cf5e336f8e04a35204e092eb9a6bf0fd8dc1cf8c639b5bb45f1298e7178deef4","src/serde.rs":"e8d0fe3630e173272756fb24a8c3ccb112f4cb551b8b88b64f669a71f39ef83b","tests/test_buf.rs":"5a29764cdc3f7c1eda563562dea1b624b923c088330eb4b894c28eb4e0faaf87","tests/test_buf_mut.rs":"5aefacb92183c747c9e91a469d675d6490618742ee5982d74af220faa9343ef1","tests/test_bytes.rs":"5fbd44ae30dc07883b5c5a5e6d8c91037525dc0cf6cfdcfb78033c3867089665","tests/test_chain.rs":"7bda7550927cf7799c708fedaaf4cd2924ed3fd800f30ef126d6c9efe48c3986","tests/test_debug.rs":"232f8a604668a61dc580eb064cf0fbc21f664182928438710c7cfde14bd637f4","tests/test_from_buf.rs":"9bf743c77e69c643d0a7673426547dacaedbcc65028a26cf5864eb6714e4897a","tests/test_iter.rs":"bc8a5da0b3cc7e5a5dc37e91dd2a3ca3fc78ba74b087883473043be45cd9b265","tests/test_serde.rs":"98e0ab121153a7ead47538257ac7fc7d5db081fc35050552b5e5dc9500b414f9","tests/test_take.rs":"bb81822eec5d3774bd2626f0f29b543d3651f4f5a95c51dfe8f93dec8b4f8e94"},"package":"d828f97b58cc5de3e40c421d0cf2132d6b2da4ee0e11b8632fa838f0f9333ad6"}
|
||||
{"files":{"CHANGELOG.md":"55941e30721c4b104cc8f84473da5acd0cd57903d66e8fd029b8c5160d99ed53","Cargo.toml":"f71e10b42ed8637ed615222f6d9e2af5df707f7f3d9d4fd203358c2af87b7ff0","LICENSE":"45f522cacecb1023856e46df79ca625dfc550c94910078bd8aec6e02880b3d42","README.md":"3ca600d7b4175eee634621a870904fe5ec761e6fd623f745423d378dec1bfd51","benches/bytes.rs":"a60889c35cf76faf2b403f94d3ab2831a569f2e1f6e4cc4d5e88f3c26bddb8b0","ci/before_deploy.ps1":"a8ee0204dd1397a245a47626fecd98eff5da76e12b15139c06271b3cc309a3e1","ci/before_deploy.sh":"ea008e2c544482cba5b659c17887ccd5354779c629096f28e667d40391299cc5","ci/install.sh":"8b165fc99df296261fcc9cdcbc8b8a177c11c505cdc9255cc19efb66cb0055db","ci/script.sh":"4e6f6b7df02d316ce5166a3526dc6bca6b6d051dbc5bd6d5b28a7c79fc646834","ci/tsan":"905d22267f7493550d123b1482fc1a7f4b24e8cbc4ae4f0e0c2d42383e79ad83","src/buf/buf.rs":"1b5ff3ab694380fe59588b8d195111ba663c5f8901b272b531851deb26e4629a","src/buf/buf_mut.rs":"d2f54e9c64b86c8ddd325d40b3c8e1b2132d361937bac3b5fccb7a81154b89b8","src/buf/chain.rs":"3a4f88879d27240e84e58bbeddf3f7c0958d0d81f4707245199b53e922029a26","src/buf/from_buf.rs":"949683c6a08099b280bd324d0c8646b1d6ff80af4d3e9397edb76cc2f1b18c88","src/buf/into_buf.rs":"b6e35d34533fae229f5209b95a39a1c35485f48a873a1d357d99218c486b0b95","src/buf/iter.rs":"325428e4f913beb602f6451b59847d4c8658ec23939a15f7b145733969c17f03","src/buf/mod.rs":"4f385ce47d6d19a064a1dbec3339e95e116aa9b501eb9d8a47030c2794e1ee9e","src/buf/reader.rs":"62098e87bd1aa8b7f57ed4a4d1b5417462f01ad2cfebfbac46b6ce7f00ea0192","src/buf/take.rs":"0bdd0720afc546c999e5a3125f20b6f31a5692b37f7218c25f414773e2702f3d","src/buf/writer.rs":"4a28c1d362e837682a4b3197732a6dbb4072dc660f0dbba18616679adf8a60f2","src/bytes.rs":"546f2ef082656be2639314994d4228833f331747578a9ebf69075d2bcec0ae2d","src/debug.rs":"a8bd8062e7e500fdc5a79cb6c848fb860be8359d95e1c91034777fe33c78d54e","src/lib.rs":"fb61bba13236978f2c3b93cc39eb4a99c02f1ecd539c917a8380e5d344e67706","src/serde.rs":"e8d0fe3630e173272756fb24a8c3ccb112f4cb551b8b88b64f669a71f39ef83b","tests/test_buf.rs":"6409f32f734969bebeffa7592fed531953d252c5a639e422b6e4b14ec024b1d5","tests/test_buf_mut.rs":"a6a653d5053340b0254900c33e36df6db1421f821c3e985be0044b1b447ecedc","tests/test_bytes.rs":"92ae28671dee4ab91c7e0366e094b009c547defd8fd1c977520e5ad574eea70d","tests/test_chain.rs":"3fe1f28f3bce4377f8ed506718f95f3ed3ebaf251a1cb43b2705331e3dd6b43a","tests/test_debug.rs":"4cfd44c30d0b8f7c5eb8e8916ad7436e9f538732fe9f4b696dc22b84c31ac64a","tests/test_from_buf.rs":"9bf743c77e69c643d0a7673426547dacaedbcc65028a26cf5864eb6714e4897a","tests/test_iter.rs":"bc8a5da0b3cc7e5a5dc37e91dd2a3ca3fc78ba74b087883473043be45cd9b265","tests/test_serde.rs":"98e0ab121153a7ead47538257ac7fc7d5db081fc35050552b5e5dc9500b414f9","tests/test_take.rs":"bb81822eec5d3774bd2626f0f29b543d3651f4f5a95c51dfe8f93dec8b4f8e94"},"package":"e178b8e0e239e844b083d5a0d4a156b2654e67f9f80144d48398fcd736a24fb8"}
|
|
@ -1,3 +1,27 @@
|
|||
# 0.4.9 (July 12, 2018)
|
||||
|
||||
* Add 128 bit number support behind a feature flag (#209).
|
||||
* Implement `IntoBuf` for `&mut [u8]`
|
||||
|
||||
# 0.4.8 (May 25, 2018)
|
||||
|
||||
* Fix panic in `BytesMut` `FromIterator` implementation.
|
||||
* Bytes: Recycle space when reserving space in vec mode (#197).
|
||||
* Bytes: Add resize fn (#203).
|
||||
|
||||
# 0.4.7 (April 27, 2018)
|
||||
|
||||
* Make `Buf` and `BufMut` usable as trait objects (#186).
|
||||
* impl BorrowMut for BytesMut (#185).
|
||||
* Improve accessor performance (#195).
|
||||
|
||||
# 0.4.6 (Janary 8, 2018)
|
||||
|
||||
* Implement FromIterator for Bytes/BytesMut (#148).
|
||||
* Add `advance` fn to Bytes/BytesMut (#166).
|
||||
* Add `unsplit` fn to `BytesMut` (#162, #173).
|
||||
* Improvements to Bytes split fns (#92).
|
||||
|
||||
# 0.4.5 (August 12, 2017)
|
||||
|
||||
* Fix range bug in `Take::bytes`
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
[package]
|
||||
name = "bytes"
|
||||
version = "0.4.5"
|
||||
version = "0.4.9"
|
||||
authors = ["Carl Lerche <me@carllerche.com>"]
|
||||
exclude = [".gitignore", ".travis.yml", "deploy.sh", "bench/**/*", "test/**/*"]
|
||||
description = "Types and traits for working with bytes"
|
||||
|
@ -21,16 +21,21 @@ documentation = "https://carllerche.github.io/bytes/bytes"
|
|||
readme = "README.md"
|
||||
keywords = ["buffers", "zero-copy", "io"]
|
||||
categories = ["network-programming", "data-structures"]
|
||||
license = "MIT/Apache-2.0"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/carllerche/bytes"
|
||||
[package.metadata.docs.rs]
|
||||
features = ["i128"]
|
||||
[dependencies.byteorder]
|
||||
version = "1.0.0"
|
||||
version = "1.1.0"
|
||||
|
||||
[dependencies.iovec]
|
||||
version = "0.1"
|
||||
|
||||
[dependencies.serde]
|
||||
version = "1.0"
|
||||
optional = true
|
||||
|
||||
[dependencies.iovec]
|
||||
version = "0.1"
|
||||
[dev-dependencies.serde_test]
|
||||
version = "1.0"
|
||||
|
||||
[features]
|
||||
i128 = ["byteorder/i128"]
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2016 Alex Crichton
|
||||
Copyright (c) 2018 Carl Lerche
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
|
@ -33,10 +33,13 @@ Serde support is optional and disabled by default. To enable use the feature `se
|
|||
bytes = { version = "0.4", features = ["serde"] }
|
||||
```
|
||||
|
||||
# License
|
||||
## License
|
||||
|
||||
`bytes` is primarily distributed under the terms of both the MIT license and the
|
||||
Apache License (Version 2.0), with portions covered by various BSD-like
|
||||
licenses.
|
||||
This project is licensed under the [MIT license](LICENSE).
|
||||
|
||||
### Contribution
|
||||
|
||||
Unless you explicitly state otherwise, any contribution intentionally submitted
|
||||
for inclusion in `bytes` by you, shall be licensed as MIT, without any additional
|
||||
terms or conditions.
|
||||
|
||||
See LICENSE-APACHE, and LICENSE-MIT for details.
|
||||
|
|
|
@ -29,6 +29,18 @@ fn alloc_big(b: &mut Bencher) {
|
|||
})
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn split_off_and_drop(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
for _ in 0..1024 {
|
||||
let v = vec![10; 200];
|
||||
let mut b = Bytes::from(v);
|
||||
test::black_box(b.split_off(100));
|
||||
test::black_box(b);
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn deref_unique(b: &mut Bencher) {
|
||||
let mut buf = BytesMut::with_capacity(4096);
|
||||
|
@ -101,6 +113,39 @@ fn deref_two(b: &mut Bencher) {
|
|||
})
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn clone_inline(b: &mut Bencher) {
|
||||
let bytes = Bytes::from_static(b"hello world");
|
||||
|
||||
b.iter(|| {
|
||||
for _ in 0..1024 {
|
||||
test::black_box(&bytes.clone());
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn clone_static(b: &mut Bencher) {
|
||||
let bytes = Bytes::from_static("hello world 1234567890 and have a good byte 0987654321".as_bytes());
|
||||
|
||||
b.iter(|| {
|
||||
for _ in 0..1024 {
|
||||
test::black_box(&bytes.clone());
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn clone_arc(b: &mut Bencher) {
|
||||
let bytes = Bytes::from("hello world 1234567890 and have a good byte 0987654321".as_bytes());
|
||||
|
||||
b.iter(|| {
|
||||
for _ in 0..1024 {
|
||||
test::black_box(&bytes.clone());
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn alloc_write_split_to_mid(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
# TSAN suppressions file for `bytes`
|
||||
|
||||
# TSAN does not understand fences and `Arc::drop` is implemented using a fence.
|
||||
# This causes many false positives.
|
||||
race:Arc*drop
|
||||
race:arc*Weak*drop
|
||||
|
||||
# `std` mpsc is not used in any Bytes code base. This race is triggered by some
|
||||
# rust runtime logic.
|
||||
race:std*mpsc_queue
|
||||
|
||||
# Not sure why this is warning, but it is in the test harness and not the library.
|
||||
race:TestEvent*clone
|
||||
race:test::run_tests_console::*closure
|
||||
|
||||
# Probably more fences in std.
|
||||
race:__call_tls_dtors
|
||||
|
||||
# `is_inline_or_static` is explicitly called concurrently without synchronization.
|
||||
# The safety explanation can be found in a comment.
|
||||
race:Inner::is_inline_or_static
|
|
@ -1,9 +1,41 @@
|
|||
use super::{IntoBuf, Take, Reader, Iter, FromBuf, Chain};
|
||||
use byteorder::ByteOrder;
|
||||
use byteorder::{BigEndian, ByteOrder, LittleEndian};
|
||||
use iovec::IoVec;
|
||||
|
||||
use std::{cmp, io, ptr};
|
||||
|
||||
macro_rules! buf_get_impl {
|
||||
($this:ident, $size:expr, $conv:path) => ({
|
||||
// try to convert directly from the bytes
|
||||
let ret = {
|
||||
// this Option<ret> trick is to avoid keeping a borrow on self
|
||||
// when advance() is called (mut borrow) and to call bytes() only once
|
||||
if let Some(src) = $this.bytes().get(..($size)) {
|
||||
Some($conv(src))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
if let Some(ret) = ret {
|
||||
// if the direct convertion was possible, advance and return
|
||||
$this.advance($size);
|
||||
return ret;
|
||||
} else {
|
||||
// if not we copy the bytes in a temp buffer then convert
|
||||
let mut buf = [0; ($size)];
|
||||
$this.copy_to_slice(&mut buf); // (do the advance)
|
||||
return $conv(&buf);
|
||||
}
|
||||
});
|
||||
($this:ident, $buf_size:expr, $conv:path, $len_to_read:expr) => ({
|
||||
// The same trick as above does not improve the best case speed.
|
||||
// It seems to be linked to the way the method is optimised by the compiler
|
||||
let mut buf = [0; ($buf_size)];
|
||||
$this.copy_to_slice(&mut buf[..($len_to_read)]);
|
||||
return $conv(&buf[..($len_to_read)], $len_to_read);
|
||||
});
|
||||
}
|
||||
|
||||
/// Read bytes from a buffer.
|
||||
///
|
||||
/// A buffer stores bytes in memory such that read operations are infallible.
|
||||
|
@ -243,9 +275,10 @@ pub trait Buf {
|
|||
///
|
||||
/// This function panics if there is no more remaining data in `self`.
|
||||
fn get_u8(&mut self) -> u8 {
|
||||
let mut buf = [0; 1];
|
||||
self.copy_to_slice(&mut buf);
|
||||
buf[0]
|
||||
assert!(self.remaining() >= 1);
|
||||
let ret = self.bytes()[0];
|
||||
self.advance(1);
|
||||
ret
|
||||
}
|
||||
|
||||
/// Gets a signed 8 bit integer from `self`.
|
||||
|
@ -266,243 +299,608 @@ pub trait Buf {
|
|||
///
|
||||
/// This function panics if there is no more remaining data in `self`.
|
||||
fn get_i8(&mut self) -> i8 {
|
||||
let mut buf = [0; 1];
|
||||
self.copy_to_slice(&mut buf);
|
||||
buf[0] as i8
|
||||
assert!(self.remaining() >= 1);
|
||||
let ret = self.bytes()[0] as i8;
|
||||
self.advance(1);
|
||||
ret
|
||||
}
|
||||
|
||||
/// Gets an unsigned 16 bit integer from `self` in the specified byte order.
|
||||
///
|
||||
/// The current position is advanced by 2.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{Buf, BigEndian};
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x08\x09 hello");
|
||||
/// assert_eq!(0x0809, buf.get_u16::<BigEndian>());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_u16<T: ByteOrder>(&mut self) -> u16 {
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use get_u16_be or get_u16_le")]
|
||||
fn get_u16<T: ByteOrder>(&mut self) -> u16 where Self: Sized {
|
||||
let mut buf = [0; 2];
|
||||
self.copy_to_slice(&mut buf);
|
||||
T::read_u16(&buf)
|
||||
}
|
||||
|
||||
/// Gets a signed 16 bit integer from `self` in the specified byte order.
|
||||
/// Gets an unsigned 16 bit integer from `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 2.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{Buf, BigEndian};
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x08\x09 hello");
|
||||
/// assert_eq!(0x0809, buf.get_i16::<BigEndian>());
|
||||
/// assert_eq!(0x0809, buf.get_u16_be());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_i16<T: ByteOrder>(&mut self) -> i16 {
|
||||
fn get_u16_be(&mut self) -> u16 {
|
||||
buf_get_impl!(self, 2, BigEndian::read_u16);
|
||||
}
|
||||
|
||||
/// Gets an unsigned 16 bit integer from `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 2.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x09\x08 hello");
|
||||
/// assert_eq!(0x0809, buf.get_u16_le());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_u16_le(&mut self) -> u16 {
|
||||
buf_get_impl!(self, 2, LittleEndian::read_u16);
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use get_i16_be or get_i16_le")]
|
||||
fn get_i16<T: ByteOrder>(&mut self) -> i16 where Self: Sized {
|
||||
let mut buf = [0; 2];
|
||||
self.copy_to_slice(&mut buf);
|
||||
T::read_i16(&buf)
|
||||
}
|
||||
|
||||
/// Gets an unsigned 32 bit integer from `self` in the specified byte order.
|
||||
/// Gets a signed 16 bit integer from `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 4.
|
||||
/// The current position is advanced by 2.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{Buf, BigEndian};
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x08\x09\xA0\xA1 hello");
|
||||
/// assert_eq!(0x0809A0A1, buf.get_u32::<BigEndian>());
|
||||
/// let mut buf = Cursor::new(b"\x08\x09 hello");
|
||||
/// assert_eq!(0x0809, buf.get_i16_be());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_u32<T: ByteOrder>(&mut self) -> u32 {
|
||||
fn get_i16_be(&mut self) -> i16 {
|
||||
buf_get_impl!(self, 2, BigEndian::read_i16);
|
||||
}
|
||||
|
||||
/// Gets a signed 16 bit integer from `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 2.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x09\x08 hello");
|
||||
/// assert_eq!(0x0809, buf.get_i16_le());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_i16_le(&mut self) -> i16 {
|
||||
buf_get_impl!(self, 2, LittleEndian::read_i16);
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use get_u32_be or get_u32_le")]
|
||||
fn get_u32<T: ByteOrder>(&mut self) -> u32 where Self: Sized {
|
||||
let mut buf = [0; 4];
|
||||
self.copy_to_slice(&mut buf);
|
||||
T::read_u32(&buf)
|
||||
}
|
||||
|
||||
/// Gets a signed 32 bit integer from `self` in the specified byte order.
|
||||
/// Gets an unsigned 32 bit integer from `self` in the big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 4.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{Buf, BigEndian};
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x08\x09\xA0\xA1 hello");
|
||||
/// assert_eq!(0x0809A0A1, buf.get_i32::<BigEndian>());
|
||||
/// assert_eq!(0x0809A0A1, buf.get_u32_be());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_i32<T: ByteOrder>(&mut self) -> i32 {
|
||||
fn get_u32_be(&mut self) -> u32 {
|
||||
buf_get_impl!(self, 4, BigEndian::read_u32);
|
||||
}
|
||||
|
||||
/// Gets an unsigned 32 bit integer from `self` in the little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 4.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\xA1\xA0\x09\x08 hello");
|
||||
/// assert_eq!(0x0809A0A1, buf.get_u32_le());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_u32_le(&mut self) -> u32 {
|
||||
buf_get_impl!(self, 4, LittleEndian::read_u32);
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use get_i32_be or get_i32_le")]
|
||||
fn get_i32<T: ByteOrder>(&mut self) -> i32 where Self: Sized {
|
||||
let mut buf = [0; 4];
|
||||
self.copy_to_slice(&mut buf);
|
||||
T::read_i32(&buf)
|
||||
}
|
||||
|
||||
/// Gets an unsigned 64 bit integer from `self` in the specified byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{Buf, BigEndian};
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08 hello");
|
||||
/// assert_eq!(0x0102030405060708, buf.get_u64::<BigEndian>());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_u64<T: ByteOrder>(&mut self) -> u64 {
|
||||
let mut buf = [0; 8];
|
||||
self.copy_to_slice(&mut buf);
|
||||
T::read_u64(&buf)
|
||||
}
|
||||
|
||||
/// Gets a signed 64 bit integer from `self` in the specified byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{Buf, BigEndian};
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08 hello");
|
||||
/// assert_eq!(0x0102030405060708, buf.get_i64::<BigEndian>());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_i64<T: ByteOrder>(&mut self) -> i64 {
|
||||
let mut buf = [0; 8];
|
||||
self.copy_to_slice(&mut buf);
|
||||
T::read_i64(&buf)
|
||||
}
|
||||
|
||||
/// Gets an unsigned n-byte integer from `self` in the specified byte order.
|
||||
///
|
||||
/// The current position is advanced by `nbytes`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{Buf, BigEndian};
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x01\x02\x03 hello");
|
||||
/// assert_eq!(0x010203, buf.get_uint::<BigEndian>(3));
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_uint<T: ByteOrder>(&mut self, nbytes: usize) -> u64 {
|
||||
let mut buf = [0; 8];
|
||||
self.copy_to_slice(&mut buf[..nbytes]);
|
||||
T::read_uint(&buf[..nbytes], nbytes)
|
||||
}
|
||||
|
||||
/// Gets a signed n-byte integer from `self` in the specified byte order.
|
||||
///
|
||||
/// The current position is advanced by `nbytes`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{Buf, BigEndian};
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x01\x02\x03 hello");
|
||||
/// assert_eq!(0x010203, buf.get_int::<BigEndian>(3));
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_int<T: ByteOrder>(&mut self, nbytes: usize) -> i64 {
|
||||
let mut buf = [0; 8];
|
||||
self.copy_to_slice(&mut buf[..nbytes]);
|
||||
T::read_int(&buf[..nbytes], nbytes)
|
||||
}
|
||||
|
||||
/// Gets an IEEE754 single-precision (4 bytes) floating point number from
|
||||
/// `self` in the specified byte order.
|
||||
/// Gets a signed 32 bit integer from `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 4.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{Buf, BigEndian};
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x3F\x99\x99\x9A hello");
|
||||
/// assert_eq!(1.2f32, buf.get_f32::<BigEndian>());
|
||||
/// let mut buf = Cursor::new(b"\x08\x09\xA0\xA1 hello");
|
||||
/// assert_eq!(0x0809A0A1, buf.get_i32_be());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_f32<T: ByteOrder>(&mut self) -> f32 {
|
||||
let mut buf = [0; 4];
|
||||
self.copy_to_slice(&mut buf);
|
||||
T::read_f32(&buf)
|
||||
fn get_i32_be(&mut self) -> i32 {
|
||||
buf_get_impl!(self, 4, BigEndian::read_i32);
|
||||
}
|
||||
|
||||
/// Gets an IEEE754 double-precision (8 bytes) floating point number from
|
||||
/// `self` in the specified byte order.
|
||||
/// Gets a signed 32 bit integer from `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 4.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\xA1\xA0\x09\x08 hello");
|
||||
/// assert_eq!(0x0809A0A1, buf.get_i32_le());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_i32_le(&mut self) -> i32 {
|
||||
buf_get_impl!(self, 4, LittleEndian::read_i32);
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use get_u64_be or get_u64_le")]
|
||||
fn get_u64<T: ByteOrder>(&mut self) -> u64 where Self: Sized {
|
||||
let mut buf = [0; 8];
|
||||
self.copy_to_slice(&mut buf);
|
||||
T::read_u64(&buf)
|
||||
}
|
||||
|
||||
/// Gets an unsigned 64 bit integer from `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08 hello");
|
||||
/// assert_eq!(0x0102030405060708, buf.get_u64_be());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_u64_be(&mut self) -> u64 {
|
||||
buf_get_impl!(self, 8, BigEndian::read_u64);
|
||||
}
|
||||
|
||||
/// Gets an unsigned 64 bit integer from `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x08\x07\x06\x05\x04\x03\x02\x01 hello");
|
||||
/// assert_eq!(0x0102030405060708, buf.get_u64_le());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_u64_le(&mut self) -> u64 {
|
||||
buf_get_impl!(self, 8, LittleEndian::read_u64);
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use get_i64_be or get_i64_le")]
|
||||
fn get_i64<T: ByteOrder>(&mut self) -> i64 where Self: Sized {
|
||||
let mut buf = [0; 8];
|
||||
self.copy_to_slice(&mut buf);
|
||||
T::read_i64(&buf)
|
||||
}
|
||||
|
||||
/// Gets a signed 64 bit integer from `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08 hello");
|
||||
/// assert_eq!(0x0102030405060708, buf.get_i64_be());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_i64_be(&mut self) -> i64 {
|
||||
buf_get_impl!(self, 8, BigEndian::read_i64);
|
||||
}
|
||||
|
||||
/// Gets a signed 64 bit integer from `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x08\x07\x06\x05\x04\x03\x02\x01 hello");
|
||||
/// assert_eq!(0x0102030405060708, buf.get_i64_le());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_i64_le(&mut self) -> i64 {
|
||||
buf_get_impl!(self, 8, LittleEndian::read_i64);
|
||||
}
|
||||
|
||||
/// Gets an unsigned 128 bit integer from `self` in big-endian byte order.
|
||||
///
|
||||
/// **NOTE:** This method requires the `i128` feature.
|
||||
/// The current position is advanced by 16.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello");
|
||||
/// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_be());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
#[cfg(feature = "i128")]
|
||||
fn get_u128_be(&mut self) -> u128 {
|
||||
buf_get_impl!(self, 16, BigEndian::read_u128);
|
||||
}
|
||||
|
||||
/// Gets an unsigned 128 bit integer from `self` in little-endian byte order.
|
||||
///
|
||||
/// **NOTE:** This method requires the `i128` feature.
|
||||
/// The current position is advanced by 16.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello");
|
||||
/// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_le());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
#[cfg(feature = "i128")]
|
||||
fn get_u128_le(&mut self) -> u128 {
|
||||
buf_get_impl!(self, 16, LittleEndian::read_u128);
|
||||
}
|
||||
|
||||
/// Gets a signed 128 bit integer from `self` in big-endian byte order.
|
||||
///
|
||||
/// **NOTE:** This method requires the `i128` feature.
|
||||
/// The current position is advanced by 16.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello");
|
||||
/// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_be());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
#[cfg(feature = "i128")]
|
||||
fn get_i128_be(&mut self) -> i128 {
|
||||
buf_get_impl!(self, 16, BigEndian::read_i128);
|
||||
}
|
||||
|
||||
/// Gets a signed 128 bit integer from `self` in little-endian byte order.
|
||||
///
|
||||
/// **NOTE:** This method requires the `i128` feature.
|
||||
/// The current position is advanced by 16.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello");
|
||||
/// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_le());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
#[cfg(feature = "i128")]
|
||||
fn get_i128_le(&mut self) -> i128 {
|
||||
buf_get_impl!(self, 16, LittleEndian::read_i128);
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use get_uint_be or get_uint_le")]
|
||||
fn get_uint<T: ByteOrder>(&mut self, nbytes: usize) -> u64 where Self: Sized {
|
||||
let mut buf = [0; 8];
|
||||
self.copy_to_slice(&mut buf[..nbytes]);
|
||||
T::read_uint(&buf[..nbytes], nbytes)
|
||||
}
|
||||
|
||||
/// Gets an unsigned n-byte integer from `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by `nbytes`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{Buf, BigEndian};
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello");
|
||||
/// assert_eq!(1.2f64, buf.get_f64::<BigEndian>());
|
||||
/// let mut buf = Cursor::new(b"\x01\x02\x03 hello");
|
||||
/// assert_eq!(0x010203, buf.get_uint_be(3));
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_f64<T: ByteOrder>(&mut self) -> f64 {
|
||||
fn get_uint_be(&mut self, nbytes: usize) -> u64 {
|
||||
buf_get_impl!(self, 8, BigEndian::read_uint, nbytes);
|
||||
}
|
||||
|
||||
/// Gets an unsigned n-byte integer from `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by `nbytes`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x03\x02\x01 hello");
|
||||
/// assert_eq!(0x010203, buf.get_uint_le(3));
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_uint_le(&mut self, nbytes: usize) -> u64 {
|
||||
buf_get_impl!(self, 8, LittleEndian::read_uint, nbytes);
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use get_int_be or get_int_le")]
|
||||
fn get_int<T: ByteOrder>(&mut self, nbytes: usize) -> i64 where Self: Sized {
|
||||
let mut buf = [0; 8];
|
||||
self.copy_to_slice(&mut buf[..nbytes]);
|
||||
T::read_int(&buf[..nbytes], nbytes)
|
||||
}
|
||||
|
||||
/// Gets a signed n-byte integer from `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by `nbytes`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x01\x02\x03 hello");
|
||||
/// assert_eq!(0x010203, buf.get_int_be(3));
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_int_be(&mut self, nbytes: usize) -> i64 {
|
||||
buf_get_impl!(self, 8, BigEndian::read_int, nbytes);
|
||||
}
|
||||
|
||||
/// Gets a signed n-byte integer from `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by `nbytes`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x03\x02\x01 hello");
|
||||
/// assert_eq!(0x010203, buf.get_int_le(3));
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_int_le(&mut self, nbytes: usize) -> i64 {
|
||||
buf_get_impl!(self, 8, LittleEndian::read_int, nbytes);
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use get_f32_be or get_f32_le")]
|
||||
fn get_f32<T: ByteOrder>(&mut self) -> f32 where Self: Sized {
|
||||
let mut buf = [0; 4];
|
||||
self.copy_to_slice(&mut buf);
|
||||
T::read_f32(&buf)
|
||||
}
|
||||
|
||||
/// Gets an IEEE754 single-precision (4 bytes) floating point number from
|
||||
/// `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 4.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x3F\x99\x99\x9A hello");
|
||||
/// assert_eq!(1.2f32, buf.get_f32_be());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_f32_be(&mut self) -> f32 {
|
||||
buf_get_impl!(self, 4, BigEndian::read_f32);
|
||||
}
|
||||
|
||||
/// Gets an IEEE754 single-precision (4 bytes) floating point number from
|
||||
/// `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 4.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x9A\x99\x99\x3F hello");
|
||||
/// assert_eq!(1.2f32, buf.get_f32_le());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_f32_le(&mut self) -> f32 {
|
||||
buf_get_impl!(self, 4, LittleEndian::read_f32);
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use get_f64_be or get_f64_le")]
|
||||
fn get_f64<T: ByteOrder>(&mut self) -> f64 where Self: Sized {
|
||||
let mut buf = [0; 8];
|
||||
self.copy_to_slice(&mut buf);
|
||||
T::read_f64(&buf)
|
||||
}
|
||||
|
||||
/// Gets an IEEE754 double-precision (8 bytes) floating point number from
|
||||
/// `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello");
|
||||
/// assert_eq!(1.2f64, buf.get_f64_be());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_f64_be(&mut self) -> f64 {
|
||||
buf_get_impl!(self, 8, BigEndian::read_f64);
|
||||
}
|
||||
|
||||
/// Gets an IEEE754 double-precision (8 bytes) floating point number from
|
||||
/// `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::Buf;
|
||||
/// use std::io::Cursor;
|
||||
///
|
||||
/// let mut buf = Cursor::new(b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello");
|
||||
/// assert_eq!(1.2f64, buf.get_f64_le());
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining data in `self`.
|
||||
fn get_f64_le(&mut self) -> f64 {
|
||||
buf_get_impl!(self, 8, LittleEndian::read_f64);
|
||||
}
|
||||
|
||||
/// Transforms a `Buf` into a concrete buffer.
|
||||
///
|
||||
/// `collect()` can operate on any value that implements `Buf`, and turn it
|
||||
|
@ -749,3 +1147,7 @@ impl Buf for Option<[u8; 1]> {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The existance of this function makes the compiler catch if the Buf
|
||||
// trait is "object-safe" or not.
|
||||
fn _assert_trait_object(_b: &Buf) {}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
use super::{IntoBuf, Writer};
|
||||
use byteorder::ByteOrder;
|
||||
use byteorder::{LittleEndian, ByteOrder, BigEndian};
|
||||
use iovec::IoVec;
|
||||
|
||||
use std::{cmp, io, ptr, usize};
|
||||
|
@ -338,41 +338,25 @@ pub trait BufMut {
|
|||
self.put_slice(&src)
|
||||
}
|
||||
|
||||
/// Writes an unsigned 16 bit integer to `self` in the specified byte order.
|
||||
///
|
||||
/// The current position is advanced by 2.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{BufMut, BigEndian};
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_u16::<BigEndian>(0x0809);
|
||||
/// assert_eq!(buf, b"\x08\x09");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_u16<T: ByteOrder>(&mut self, n: u16) {
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use put_u16_be or put_u16_le")]
|
||||
fn put_u16<T: ByteOrder>(&mut self, n: u16) where Self: Sized {
|
||||
let mut buf = [0; 2];
|
||||
T::write_u16(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes a signed 16 bit integer to `self` in the specified byte order.
|
||||
/// Writes an unsigned 16 bit integer to `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 2.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{BufMut, BigEndian};
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_i16::<BigEndian>(0x0809);
|
||||
/// buf.put_u16_be(0x0809);
|
||||
/// assert_eq!(buf, b"\x08\x09");
|
||||
/// ```
|
||||
///
|
||||
|
@ -380,47 +364,111 @@ pub trait BufMut {
|
|||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_i16<T: ByteOrder>(&mut self, n: i16) {
|
||||
fn put_u16_be(&mut self, n: u16) {
|
||||
let mut buf = [0; 2];
|
||||
BigEndian::write_u16(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes an unsigned 16 bit integer to `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 2.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_u16_le(0x0809);
|
||||
/// assert_eq!(buf, b"\x09\x08");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_u16_le(&mut self, n: u16) {
|
||||
let mut buf = [0; 2];
|
||||
LittleEndian::write_u16(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use put_i16_be or put_i16_le")]
|
||||
fn put_i16<T: ByteOrder>(&mut self, n: i16) where Self: Sized {
|
||||
let mut buf = [0; 2];
|
||||
T::write_i16(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes an unsigned 32 bit integer to `self` in the specified byte order.
|
||||
/// Writes a signed 16 bit integer to `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 4.
|
||||
/// The current position is advanced by 2.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{BufMut, BigEndian};
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_u32::<BigEndian>(0x0809A0A1);
|
||||
/// assert_eq!(buf, b"\x08\x09\xA0\xA1");
|
||||
/// buf.put_i16_be(0x0809);
|
||||
/// assert_eq!(buf, b"\x08\x09");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_u32<T: ByteOrder>(&mut self, n: u32) {
|
||||
fn put_i16_be(&mut self, n: i16) {
|
||||
let mut buf = [0; 2];
|
||||
BigEndian::write_i16(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes a signed 16 bit integer to `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 2.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_i16_le(0x0809);
|
||||
/// assert_eq!(buf, b"\x09\x08");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_i16_le(&mut self, n: i16) {
|
||||
let mut buf = [0; 2];
|
||||
LittleEndian::write_i16(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use put_u32_be or put_u32_le")]
|
||||
fn put_u32<T: ByteOrder>(&mut self, n: u32) where Self: Sized {
|
||||
let mut buf = [0; 4];
|
||||
T::write_u32(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes a signed 32 bit integer to `self` in the specified byte order.
|
||||
/// Writes an unsigned 32 bit integer to `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 4.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{BufMut, BigEndian};
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_i32::<BigEndian>(0x0809A0A1);
|
||||
/// buf.put_u32_be(0x0809A0A1);
|
||||
/// assert_eq!(buf, b"\x08\x09\xA0\xA1");
|
||||
/// ```
|
||||
///
|
||||
|
@ -428,120 +476,440 @@ pub trait BufMut {
|
|||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_i32<T: ByteOrder>(&mut self, n: i32) {
|
||||
fn put_u32_be(&mut self, n: u32) {
|
||||
let mut buf = [0; 4];
|
||||
T::write_i32(&mut buf, n);
|
||||
BigEndian::write_u32(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes an unsigned 64 bit integer to `self` in the specified byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{BufMut, BigEndian};
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_u64::<BigEndian>(0x0102030405060708);
|
||||
/// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_u64<T: ByteOrder>(&mut self, n: u64) {
|
||||
let mut buf = [0; 8];
|
||||
T::write_u64(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes a signed 64 bit integer to `self` in the specified byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{BufMut, BigEndian};
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_i64::<BigEndian>(0x0102030405060708);
|
||||
/// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_i64<T: ByteOrder>(&mut self, n: i64) {
|
||||
let mut buf = [0; 8];
|
||||
T::write_i64(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes an unsigned n-byte integer to `self` in the specified byte order.
|
||||
///
|
||||
/// The current position is advanced by `nbytes`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{BufMut, BigEndian};
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_uint::<BigEndian>(0x010203, 3);
|
||||
/// assert_eq!(buf, b"\x01\x02\x03");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_uint<T: ByteOrder>(&mut self, n: u64, nbytes: usize) {
|
||||
let mut buf = [0; 8];
|
||||
T::write_uint(&mut buf, n, nbytes);
|
||||
self.put_slice(&buf[0..nbytes])
|
||||
}
|
||||
|
||||
/// Writes a signed n-byte integer to `self` in the specified byte order.
|
||||
///
|
||||
/// The current position is advanced by `nbytes`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{BufMut, BigEndian};
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_int::<BigEndian>(0x010203, 3);
|
||||
/// assert_eq!(buf, b"\x01\x02\x03");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_int<T: ByteOrder>(&mut self, n: i64, nbytes: usize) {
|
||||
let mut buf = [0; 8];
|
||||
T::write_int(&mut buf, n, nbytes);
|
||||
self.put_slice(&buf[0..nbytes])
|
||||
}
|
||||
|
||||
/// Writes an IEEE754 single-precision (4 bytes) floating point number to
|
||||
/// `self` in the specified byte order.
|
||||
/// Writes an unsigned 32 bit integer to `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 4.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{BufMut, BigEndian};
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_f32::<BigEndian>(1.2f32);
|
||||
/// buf.put_u32_le(0x0809A0A1);
|
||||
/// assert_eq!(buf, b"\xA1\xA0\x09\x08");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_u32_le(&mut self, n: u32) {
|
||||
let mut buf = [0; 4];
|
||||
LittleEndian::write_u32(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use put_i32_be or put_i32_le")]
|
||||
fn put_i32<T: ByteOrder>(&mut self, n: i32) where Self: Sized {
|
||||
let mut buf = [0; 4];
|
||||
T::write_i32(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes a signed 32 bit integer to `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 4.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_i32_be(0x0809A0A1);
|
||||
/// assert_eq!(buf, b"\x08\x09\xA0\xA1");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_i32_be(&mut self, n: i32) {
|
||||
let mut buf = [0; 4];
|
||||
BigEndian::write_i32(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes a signed 32 bit integer to `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 4.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_i32_le(0x0809A0A1);
|
||||
/// assert_eq!(buf, b"\xA1\xA0\x09\x08");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_i32_le(&mut self, n: i32) {
|
||||
let mut buf = [0; 4];
|
||||
LittleEndian::write_i32(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use put_u64_be or put_u64_le")]
|
||||
fn put_u64<T: ByteOrder>(&mut self, n: u64) where Self: Sized {
|
||||
let mut buf = [0; 8];
|
||||
T::write_u64(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes an unsigned 64 bit integer to `self` in the big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_u64_be(0x0102030405060708);
|
||||
/// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_u64_be(&mut self, n: u64) {
|
||||
let mut buf = [0; 8];
|
||||
BigEndian::write_u64(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes an unsigned 64 bit integer to `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_u64_le(0x0102030405060708);
|
||||
/// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_u64_le(&mut self, n: u64) {
|
||||
let mut buf = [0; 8];
|
||||
LittleEndian::write_u64(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use put_i64_be or put_i64_le")]
|
||||
fn put_i64<T: ByteOrder>(&mut self, n: i64) where Self: Sized {
|
||||
let mut buf = [0; 8];
|
||||
T::write_i64(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes a signed 64 bit integer to `self` in the big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_i64_be(0x0102030405060708);
|
||||
/// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_i64_be(&mut self, n: i64) {
|
||||
let mut buf = [0; 8];
|
||||
BigEndian::write_i64(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes a signed 64 bit integer to `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_i64_le(0x0102030405060708);
|
||||
/// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_i64_le(&mut self, n: i64) {
|
||||
let mut buf = [0; 8];
|
||||
LittleEndian::write_i64(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes an unsigned 128 bit integer to `self` in the big-endian byte order.
|
||||
///
|
||||
/// **NOTE:** This method requires the `i128` feature.
|
||||
/// The current position is advanced by 16.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_u128_be(0x01020304050607080910111213141516);
|
||||
/// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
#[cfg(feature = "i128")]
|
||||
fn put_u128_be(&mut self, n: u128) {
|
||||
let mut buf = [0; 16];
|
||||
BigEndian::write_u128(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes an unsigned 128 bit integer to `self` in little-endian byte order.
|
||||
///
|
||||
/// **NOTE:** This method requires the `i128` feature.
|
||||
/// The current position is advanced by 16.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_u128_le(0x01020304050607080910111213141516);
|
||||
/// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
#[cfg(feature = "i128")]
|
||||
fn put_u128_le(&mut self, n: u128) {
|
||||
let mut buf = [0; 16];
|
||||
LittleEndian::write_u128(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes a signed 128 bit integer to `self` in the big-endian byte order.
|
||||
///
|
||||
/// **NOTE:** This method requires the `i128` feature.
|
||||
/// The current position is advanced by 16.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_i128_be(0x01020304050607080910111213141516);
|
||||
/// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
#[cfg(feature = "i128")]
|
||||
fn put_i128_be(&mut self, n: i128) {
|
||||
let mut buf = [0; 16];
|
||||
BigEndian::write_i128(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes a signed 128 bit integer to `self` in little-endian byte order.
|
||||
///
|
||||
/// **NOTE:** This method requires the `i128` feature.
|
||||
/// The current position is advanced by 16.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_i128_le(0x01020304050607080910111213141516);
|
||||
/// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
#[cfg(feature = "i128")]
|
||||
fn put_i128_le(&mut self, n: i128) {
|
||||
let mut buf = [0; 16];
|
||||
LittleEndian::write_i128(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use put_uint_be or put_uint_le")]
|
||||
fn put_uint<T: ByteOrder>(&mut self, n: u64, nbytes: usize) where Self: Sized {
|
||||
let mut buf = [0; 8];
|
||||
T::write_uint(&mut buf, n, nbytes);
|
||||
self.put_slice(&buf[0..nbytes])
|
||||
}
|
||||
|
||||
/// Writes an unsigned n-byte integer to `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by `nbytes`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_uint_be(0x010203, 3);
|
||||
/// assert_eq!(buf, b"\x01\x02\x03");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_uint_be(&mut self, n: u64, nbytes: usize) {
|
||||
let mut buf = [0; 8];
|
||||
BigEndian::write_uint(&mut buf, n, nbytes);
|
||||
self.put_slice(&buf[0..nbytes])
|
||||
}
|
||||
|
||||
/// Writes an unsigned n-byte integer to `self` in the little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by `nbytes`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_uint_le(0x010203, 3);
|
||||
/// assert_eq!(buf, b"\x03\x02\x01");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_uint_le(&mut self, n: u64, nbytes: usize) {
|
||||
let mut buf = [0; 8];
|
||||
LittleEndian::write_uint(&mut buf, n, nbytes);
|
||||
self.put_slice(&buf[0..nbytes])
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use put_int_be or put_int_le")]
|
||||
fn put_int<T: ByteOrder>(&mut self, n: i64, nbytes: usize) where Self: Sized {
|
||||
let mut buf = [0; 8];
|
||||
T::write_int(&mut buf, n, nbytes);
|
||||
self.put_slice(&buf[0..nbytes])
|
||||
}
|
||||
|
||||
/// Writes a signed n-byte integer to `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by `nbytes`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_int_be(0x010203, 3);
|
||||
/// assert_eq!(buf, b"\x01\x02\x03");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_int_be(&mut self, n: i64, nbytes: usize) {
|
||||
let mut buf = [0; 8];
|
||||
BigEndian::write_int(&mut buf, n, nbytes);
|
||||
self.put_slice(&buf[0..nbytes])
|
||||
}
|
||||
|
||||
/// Writes a signed n-byte integer to `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by `nbytes`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_int_le(0x010203, 3);
|
||||
/// assert_eq!(buf, b"\x03\x02\x01");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_int_le(&mut self, n: i64, nbytes: usize) {
|
||||
let mut buf = [0; 8];
|
||||
LittleEndian::write_int(&mut buf, n, nbytes);
|
||||
self.put_slice(&buf[0..nbytes])
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use put_f32_be or put_f32_le")]
|
||||
fn put_f32<T: ByteOrder>(&mut self, n: f32) where Self: Sized {
|
||||
let mut buf = [0; 4];
|
||||
T::write_f32(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes an IEEE754 single-precision (4 bytes) floating point number to
|
||||
/// `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 4.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_f32_be(1.2f32);
|
||||
/// assert_eq!(buf, b"\x3F\x99\x99\x9A");
|
||||
/// ```
|
||||
///
|
||||
|
@ -549,24 +917,57 @@ pub trait BufMut {
|
|||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_f32<T: ByteOrder>(&mut self, n: f32) {
|
||||
fn put_f32_be(&mut self, n: f32) {
|
||||
let mut buf = [0; 4];
|
||||
T::write_f32(&mut buf, n);
|
||||
BigEndian::write_f32(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes an IEEE754 single-precision (4 bytes) floating point number to
|
||||
/// `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 4.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_f32_le(1.2f32);
|
||||
/// assert_eq!(buf, b"\x9A\x99\x99\x3F");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_f32_le(&mut self, n: f32) {
|
||||
let mut buf = [0; 4];
|
||||
LittleEndian::write_f32(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(note="use put_f64_be or put_f64_le")]
|
||||
fn put_f64<T: ByteOrder>(&mut self, n: f64) where Self: Sized {
|
||||
let mut buf = [0; 8];
|
||||
T::write_f64(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes an IEEE754 double-precision (8 bytes) floating point number to
|
||||
/// `self` in the specified byte order.
|
||||
/// `self` in big-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::{BufMut, BigEndian};
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_f64::<BigEndian>(1.2f64);
|
||||
/// buf.put_f64_be(1.2f64);
|
||||
/// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33");
|
||||
/// ```
|
||||
///
|
||||
|
@ -574,9 +975,34 @@ pub trait BufMut {
|
|||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_f64<T: ByteOrder>(&mut self, n: f64) {
|
||||
fn put_f64_be(&mut self, n: f64) {
|
||||
let mut buf = [0; 8];
|
||||
T::write_f64(&mut buf, n);
|
||||
BigEndian::write_f64(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
/// Writes an IEEE754 double-precision (8 bytes) floating point number to
|
||||
/// `self` in little-endian byte order.
|
||||
///
|
||||
/// The current position is advanced by 8.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use bytes::BufMut;
|
||||
///
|
||||
/// let mut buf = vec![];
|
||||
/// buf.put_f64_le(1.2f64);
|
||||
/// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F");
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if there is not enough remaining capacity in
|
||||
/// `self`.
|
||||
fn put_f64_le(&mut self, n: f64) {
|
||||
let mut buf = [0; 8];
|
||||
LittleEndian::write_f64(&mut buf, n);
|
||||
self.put_slice(&buf)
|
||||
}
|
||||
|
||||
|
@ -734,3 +1160,7 @@ impl BufMut for Vec<u8> {
|
|||
&mut slice::from_raw_parts_mut(ptr, cap)[len..]
|
||||
}
|
||||
}
|
||||
|
||||
// The existance of this function makes the compiler catch if the BufMut
|
||||
// trait is "object-safe" or not.
|
||||
fn _assert_trait_object(_b: &BufMut) {}
|
||||
|
|
|
@ -63,6 +63,14 @@ impl<'a> IntoBuf for &'a [u8] {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'a> IntoBuf for &'a mut [u8] {
|
||||
type Buf = io::Cursor<&'a mut [u8]>;
|
||||
|
||||
fn into_buf(self) -> Self::Buf {
|
||||
io::Cursor::new(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> IntoBuf for &'a str {
|
||||
type Buf = io::Cursor<&'a [u8]>;
|
||||
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -27,8 +27,8 @@ impl<'a> fmt::Debug for BsDebug<'a> {
|
|||
try!(write!(fmt, "\\{}", c as char));
|
||||
} else if c == b'\0' {
|
||||
try!(write!(fmt, "\\0"));
|
||||
// ASCII printable except space
|
||||
} else if c > 0x20 && c < 0x7f {
|
||||
// ASCII printable
|
||||
} else if c >= 0x20 && c < 0x7f {
|
||||
try!(write!(fmt, "{}", c as char));
|
||||
} else {
|
||||
try!(write!(fmt, "\\x{:02x}", c));
|
||||
|
|
|
@ -18,8 +18,8 @@
|
|||
//! using a reference count to track when the memory is no longer needed and can
|
||||
//! be freed.
|
||||
//!
|
||||
//! A `Bytes` handle can be created directly from an existing byte store (such as &[u8]
|
||||
//! or Vec<u8>), but usually a `BytesMut` is used first and written to. For
|
||||
//! A `Bytes` handle can be created directly from an existing byte store (such as `&[u8]`
|
||||
//! or `Vec<u8>`), but usually a `BytesMut` is used first and written to. For
|
||||
//! example:
|
||||
//!
|
||||
//! ```rust
|
||||
|
@ -69,7 +69,7 @@
|
|||
//! and `BufMut` are infallible.
|
||||
|
||||
#![deny(warnings, missing_docs, missing_debug_implementations)]
|
||||
#![doc(html_root_url = "https://docs.rs/bytes/0.4")]
|
||||
#![doc(html_root_url = "https://docs.rs/bytes/0.4.9")]
|
||||
|
||||
extern crate byteorder;
|
||||
extern crate iovec;
|
||||
|
@ -92,6 +92,7 @@ mod bytes;
|
|||
mod debug;
|
||||
pub use bytes::{Bytes, BytesMut};
|
||||
|
||||
#[deprecated]
|
||||
pub use byteorder::{ByteOrder, BigEndian, LittleEndian};
|
||||
|
||||
// Optional Serde support
|
||||
|
|
|
@ -33,21 +33,26 @@ fn test_get_u8() {
|
|||
#[test]
|
||||
fn test_get_u16() {
|
||||
let buf = b"\x21\x54zomg";
|
||||
assert_eq!(0x2154, Cursor::new(buf).get_u16::<byteorder::BigEndian>());
|
||||
assert_eq!(0x5421, Cursor::new(buf).get_u16::<byteorder::LittleEndian>());
|
||||
assert_eq!(0x2154, Cursor::new(buf).get_u16_be());
|
||||
assert_eq!(0x5421, Cursor::new(buf).get_u16_le());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_get_u16_buffer_underflow() {
|
||||
let mut buf = Cursor::new(b"\x21");
|
||||
buf.get_u16::<byteorder::BigEndian>();
|
||||
buf.get_u16_be();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bufs_vec() {
|
||||
let buf = Cursor::new(b"hello world");
|
||||
let mut dst: [&IoVec; 2] = Default::default();
|
||||
|
||||
let b1: &[u8] = &mut [0];
|
||||
let b2: &[u8] = &mut [0];
|
||||
|
||||
let mut dst: [&IoVec; 2] =
|
||||
[b1.into(), b2.into()];
|
||||
|
||||
assert_eq!(1, buf.bytes_vec(&mut dst[..]));
|
||||
}
|
||||
|
|
|
@ -41,11 +41,11 @@ fn test_put_u8() {
|
|||
#[test]
|
||||
fn test_put_u16() {
|
||||
let mut buf = Vec::with_capacity(8);
|
||||
buf.put_u16::<byteorder::BigEndian>(8532);
|
||||
buf.put_u16_be(8532);
|
||||
assert_eq!(b"\x21\x54", &buf[..]);
|
||||
|
||||
buf.clear();
|
||||
buf.put_u16::<byteorder::LittleEndian>(8532);
|
||||
buf.put_u16_le(8532);
|
||||
assert_eq!(b"\x54\x21", &buf[..]);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
extern crate bytes;
|
||||
|
||||
use bytes::{Bytes, BytesMut, BufMut};
|
||||
use bytes::{Bytes, BytesMut, BufMut, IntoBuf};
|
||||
|
||||
const LONG: &'static [u8] = b"mary had a little lamb, little lamb, little lamb";
|
||||
const SHORT: &'static [u8] = b"hello world";
|
||||
|
@ -303,6 +303,13 @@ fn fns_defined_for_bytes_mut() {
|
|||
assert_eq!(&v[..], bytes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mut_into_buf() {
|
||||
let mut v = vec![0, 0, 0, 0];
|
||||
let s = &mut v[..];
|
||||
s.into_buf().put_u32_le(42);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reserve_convert() {
|
||||
// Inline -> Vec
|
||||
|
@ -350,16 +357,16 @@ fn reserve_growth() {
|
|||
|
||||
#[test]
|
||||
fn reserve_allocates_at_least_original_capacity() {
|
||||
let mut bytes = BytesMut::with_capacity(128);
|
||||
let mut bytes = BytesMut::with_capacity(1024);
|
||||
|
||||
for i in 0..120 {
|
||||
for i in 0..1020 {
|
||||
bytes.put(i as u8);
|
||||
}
|
||||
|
||||
let _other = bytes.take();
|
||||
|
||||
bytes.reserve(16);
|
||||
assert_eq!(bytes.capacity(), 128);
|
||||
assert_eq!(bytes.capacity(), 1024);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -378,6 +385,21 @@ fn reserve_max_original_capacity_value() {
|
|||
assert_eq!(bytes.capacity(), 64 * 1024);
|
||||
}
|
||||
|
||||
// Without either looking at the internals of the BytesMut or doing weird stuff
|
||||
// with the memory allocator, there's no good way to automatically verify from
|
||||
// within the program that this actually recycles memory. Instead, just exercise
|
||||
// the code path to ensure that the results are correct.
|
||||
#[test]
|
||||
fn reserve_vec_recycling() {
|
||||
let mut bytes = BytesMut::from(Vec::with_capacity(16));
|
||||
assert_eq!(bytes.capacity(), 16);
|
||||
bytes.put("0123456789012345");
|
||||
bytes.advance(10);
|
||||
assert_eq!(bytes.capacity(), 6);
|
||||
bytes.reserve(8);
|
||||
assert_eq!(bytes.capacity(), 16);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reserve_in_arc_unique_does_not_overallocate() {
|
||||
let mut bytes = BytesMut::with_capacity(1000);
|
||||
|
@ -466,6 +488,44 @@ fn from_static() {
|
|||
assert_eq!(b, b"b"[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn advance_inline() {
|
||||
let mut a = Bytes::from(&b"hello world"[..]);
|
||||
a.advance(6);
|
||||
assert_eq!(a, &b"world"[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn advance_static() {
|
||||
let mut a = Bytes::from_static(b"hello world");
|
||||
a.advance(6);
|
||||
assert_eq!(a, &b"world"[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn advance_vec() {
|
||||
let mut a = BytesMut::from(b"hello world boooo yah world zomg wat wat".to_vec());
|
||||
a.advance(16);
|
||||
assert_eq!(a, b"o yah world zomg wat wat"[..]);
|
||||
|
||||
a.advance(4);
|
||||
assert_eq!(a, b"h world zomg wat wat"[..]);
|
||||
|
||||
// Reserve some space.
|
||||
a.reserve(1024);
|
||||
assert_eq!(a, b"h world zomg wat wat"[..]);
|
||||
|
||||
a.advance(6);
|
||||
assert_eq!(a, b"d zomg wat wat"[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn advance_past_len() {
|
||||
let mut a = BytesMut::from(b"hello world".to_vec());
|
||||
a.advance(20);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// Only run these tests on little endian systems. CI uses qemu for testing
|
||||
// little endian... and qemu doesn't really support threading all that well.
|
||||
|
@ -514,3 +574,146 @@ fn partial_eq_bytesmut() {
|
|||
assert!(bytes2 != bytesmut);
|
||||
assert!(bytesmut != bytes2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unsplit_basic() {
|
||||
let mut buf = BytesMut::with_capacity(64);
|
||||
buf.extend_from_slice(b"aaabbbcccddd");
|
||||
|
||||
let splitted = buf.split_off(6);
|
||||
assert_eq!(b"aaabbb", &buf[..]);
|
||||
assert_eq!(b"cccddd", &splitted[..]);
|
||||
|
||||
buf.unsplit(splitted);
|
||||
assert_eq!(b"aaabbbcccddd", &buf[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unsplit_empty_other() {
|
||||
let mut buf = BytesMut::with_capacity(64);
|
||||
buf.extend_from_slice(b"aaabbbcccddd");
|
||||
|
||||
// empty other
|
||||
let other = BytesMut::new();
|
||||
|
||||
buf.unsplit(other);
|
||||
assert_eq!(b"aaabbbcccddd", &buf[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unsplit_empty_self() {
|
||||
// empty self
|
||||
let mut buf = BytesMut::new();
|
||||
|
||||
let mut other = BytesMut::with_capacity(64);
|
||||
other.extend_from_slice(b"aaabbbcccddd");
|
||||
|
||||
buf.unsplit(other);
|
||||
assert_eq!(b"aaabbbcccddd", &buf[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unsplit_inline_arc() {
|
||||
let mut buf = BytesMut::with_capacity(8); //inline
|
||||
buf.extend_from_slice(b"aaaabbbb");
|
||||
|
||||
let mut buf2 = BytesMut::with_capacity(64);
|
||||
buf2.extend_from_slice(b"ccccddddeeee");
|
||||
|
||||
buf2.split_off(8); //arc
|
||||
|
||||
buf.unsplit(buf2);
|
||||
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unsplit_arc_inline() {
|
||||
let mut buf = BytesMut::with_capacity(64);
|
||||
buf.extend_from_slice(b"aaaabbbbeeee");
|
||||
|
||||
buf.split_off(8); //arc
|
||||
|
||||
let mut buf2 = BytesMut::with_capacity(8); //inline
|
||||
buf2.extend_from_slice(b"ccccdddd");
|
||||
|
||||
buf.unsplit(buf2);
|
||||
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
|
||||
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unsplit_both_inline() {
|
||||
let mut buf = BytesMut::with_capacity(16); //inline
|
||||
buf.extend_from_slice(b"aaaabbbbccccdddd");
|
||||
|
||||
let splitted = buf.split_off(8); // both inline
|
||||
assert_eq!(b"aaaabbbb", &buf[..]);
|
||||
assert_eq!(b"ccccdddd", &splitted[..]);
|
||||
|
||||
buf.unsplit(splitted);
|
||||
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn unsplit_arc_different() {
|
||||
let mut buf = BytesMut::with_capacity(64);
|
||||
buf.extend_from_slice(b"aaaabbbbeeee");
|
||||
|
||||
buf.split_off(8); //arc
|
||||
|
||||
let mut buf2 = BytesMut::with_capacity(64);
|
||||
buf2.extend_from_slice(b"ccccddddeeee");
|
||||
|
||||
buf2.split_off(8); //arc
|
||||
|
||||
buf.unsplit(buf2);
|
||||
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unsplit_arc_non_contiguous() {
|
||||
let mut buf = BytesMut::with_capacity(64);
|
||||
buf.extend_from_slice(b"aaaabbbbeeeeccccdddd");
|
||||
|
||||
let mut buf2 = buf.split_off(8); //arc
|
||||
|
||||
let buf3 = buf2.split_off(4); //arc
|
||||
|
||||
buf.unsplit(buf3);
|
||||
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unsplit_two_split_offs() {
|
||||
let mut buf = BytesMut::with_capacity(64);
|
||||
buf.extend_from_slice(b"aaaabbbbccccdddd");
|
||||
|
||||
let mut buf2 = buf.split_off(8); //arc
|
||||
let buf3 = buf2.split_off(4); //arc
|
||||
|
||||
buf2.unsplit(buf3);
|
||||
buf.unsplit(buf2);
|
||||
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn from_iter_no_size_hint() {
|
||||
use std::iter;
|
||||
|
||||
let mut expect = vec![];
|
||||
|
||||
let actual: Bytes = iter::repeat(b'x')
|
||||
.scan(100, |cnt, item| {
|
||||
if *cnt >= 1 {
|
||||
*cnt -= 1;
|
||||
expect.push(item);
|
||||
Some(item)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
assert_eq!(&actual[..], &expect[..]);
|
||||
}
|
||||
|
|
|
@ -55,48 +55,68 @@ fn vectored_read() {
|
|||
let mut buf = a.chain(b);
|
||||
|
||||
{
|
||||
let mut iovecs: [&IoVec; 4] = Default::default();
|
||||
let b1: &[u8] = &mut [0];
|
||||
let b2: &[u8] = &mut [0];
|
||||
let b3: &[u8] = &mut [0];
|
||||
let b4: &[u8] = &mut [0];
|
||||
let mut iovecs: [&IoVec; 4] =
|
||||
[b1.into(), b2.into(), b3.into(), b4.into()];
|
||||
|
||||
assert_eq!(2, buf.bytes_vec(&mut iovecs));
|
||||
assert_eq!(iovecs[0][..], b"hello"[..]);
|
||||
assert_eq!(iovecs[1][..], b"world"[..]);
|
||||
assert!(iovecs[2].is_empty());
|
||||
assert!(iovecs[3].is_empty());
|
||||
assert_eq!(iovecs[2][..], b"\0"[..]);
|
||||
assert_eq!(iovecs[3][..], b"\0"[..]);
|
||||
}
|
||||
|
||||
buf.advance(2);
|
||||
|
||||
{
|
||||
let mut iovecs: [&IoVec; 4] = Default::default();
|
||||
let b1: &[u8] = &mut [0];
|
||||
let b2: &[u8] = &mut [0];
|
||||
let b3: &[u8] = &mut [0];
|
||||
let b4: &[u8] = &mut [0];
|
||||
let mut iovecs: [&IoVec; 4] =
|
||||
[b1.into(), b2.into(), b3.into(), b4.into()];
|
||||
|
||||
assert_eq!(2, buf.bytes_vec(&mut iovecs));
|
||||
assert_eq!(iovecs[0][..], b"llo"[..]);
|
||||
assert_eq!(iovecs[1][..], b"world"[..]);
|
||||
assert!(iovecs[2].is_empty());
|
||||
assert!(iovecs[3].is_empty());
|
||||
assert_eq!(iovecs[2][..], b"\0"[..]);
|
||||
assert_eq!(iovecs[3][..], b"\0"[..]);
|
||||
}
|
||||
|
||||
buf.advance(3);
|
||||
|
||||
{
|
||||
let mut iovecs: [&IoVec; 4] = Default::default();
|
||||
let b1: &[u8] = &mut [0];
|
||||
let b2: &[u8] = &mut [0];
|
||||
let b3: &[u8] = &mut [0];
|
||||
let b4: &[u8] = &mut [0];
|
||||
let mut iovecs: [&IoVec; 4] =
|
||||
[b1.into(), b2.into(), b3.into(), b4.into()];
|
||||
|
||||
assert_eq!(1, buf.bytes_vec(&mut iovecs));
|
||||
assert_eq!(iovecs[0][..], b"world"[..]);
|
||||
assert!(iovecs[1].is_empty());
|
||||
assert!(iovecs[2].is_empty());
|
||||
assert!(iovecs[3].is_empty());
|
||||
assert_eq!(iovecs[1][..], b"\0"[..]);
|
||||
assert_eq!(iovecs[2][..], b"\0"[..]);
|
||||
assert_eq!(iovecs[3][..], b"\0"[..]);
|
||||
}
|
||||
|
||||
buf.advance(3);
|
||||
|
||||
{
|
||||
let mut iovecs: [&IoVec; 4] = Default::default();
|
||||
let b1: &[u8] = &mut [0];
|
||||
let b2: &[u8] = &mut [0];
|
||||
let b3: &[u8] = &mut [0];
|
||||
let b4: &[u8] = &mut [0];
|
||||
let mut iovecs: [&IoVec; 4] =
|
||||
[b1.into(), b2.into(), b3.into(), b4.into()];
|
||||
|
||||
assert_eq!(1, buf.bytes_vec(&mut iovecs));
|
||||
assert_eq!(iovecs[0][..], b"ld"[..]);
|
||||
assert!(iovecs[1].is_empty());
|
||||
assert!(iovecs[2].is_empty());
|
||||
assert!(iovecs[3].is_empty());
|
||||
assert_eq!(iovecs[1][..], b"\0"[..]);
|
||||
assert_eq!(iovecs[2][..], b"\0"[..]);
|
||||
assert_eq!(iovecs[3][..], b"\0"[..]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ fn fmt() {
|
|||
\\x08\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\
|
||||
\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\
|
||||
\\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f\
|
||||
\\x20!\\\"#$%&'()*+,-./0123456789:;<=>?\
|
||||
\x20!\\\"#$%&'()*+,-./0123456789:;<=>?\
|
||||
@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_\
|
||||
`abcdefghijklmnopqrstuvwxyz{|}~\\x7f\
|
||||
\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\
|
||||
|
|
|
@ -1 +1 @@
|
|||
{"files":{".travis.yml":"d2a9bb7c029e8ed0acfb8dc8e786014cfa4f053b6f4c525303d69fd7e28704e9","Cargo.toml":"276e89e8f02c785f020dc5c6035de314e4d1279f9a83d6654f9a689dab5c6234","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"d4860822f8c84f3a91d8c55f600bcf86453518a778f753d2c389debe5c4ad5fa","src/builder.rs":"528640f717f5769e522a9ac066a994c21973ff3a5e9359d087f410233887c83c","src/delta.rs":"510fc3dbf0a70d635d0488c5a5a32a2ba8e1490ce05bee39d944ea8c02189bbc","src/draft.rs":"bd11960db08f4e4368937845fc18b842e474391738e4457a3441df2789c9d320","src/jar.rs":"98237c4a37143e08bcb6e84c5ed69b799a8a08f89a1b83f02c425cc92b089252","src/lib.rs":"ffe4f6eaa10002c06fd52c52af1d28006a4aa7320ea302d417b244704c938e02","src/parse.rs":"ee46cee7fa445e6545f29eac3eac81e76ec29e9c53e000195af427c7315ee11c","src/secure/key.rs":"734f35ef4b0d6b63174befdcb970f0304ac63f0895871b7c2f267fefdd43b648","src/secure/macros.rs":"83d770e5c4eb7fbd3c3d86973b69042e9e2bb9fafb72a4456598e2ae78638d5f","src/secure/mod.rs":"5d7fecb62295827d474ed1ce6b7628fe93d4a09eb14babfde036d64e8e4a04f8","src/secure/private.rs":"ee114d603a7b97e6f78c09a3612be0afa2ff7aca5d68d728336797c8a36e8000","src/secure/signed.rs":"8440c9ce5a0be4e162fb502cd1fbe24572ce00709f5554c45f8bece39637590d"},"package":"746858cae4eae40fff37e1998320068df317bc247dc91a67c6cfa053afdc2abb"}
|
||||
{"files":{".travis.yml":"d2a9bb7c029e8ed0acfb8dc8e786014cfa4f053b6f4c525303d69fd7e28704e9","Cargo.toml":"6a8f9c03d5260359e497a70910ab444f32b51551e9c0aaffabcfbbb2dd7c906d","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"9205f5b7d179b5ca140ec7322c5894540aef149a38bd55874bef9de5a48e0938","src/builder.rs":"4200963d44d1a59f1268965b77407ba977eb5a777875cb76ea927ddc829be3d8","src/delta.rs":"510fc3dbf0a70d635d0488c5a5a32a2ba8e1490ce05bee39d944ea8c02189bbc","src/draft.rs":"950b43b3f6e1c4c13b1e90220c71defe02713170807b41e5ffde9a1327688f48","src/jar.rs":"0e8a6e2f0426834101bd9608baf9f695839053523e9e9ac58aea03a73506b8fb","src/lib.rs":"963ff56045a4ee22e280ee24a42efc9b1d6a96de30d3856b39287ec2b51b00db","src/parse.rs":"549844993601f20f5de3f5d5f8bea0fce3fe4f09d72e343aff9e433948a4ec5c","src/secure/key.rs":"734f35ef4b0d6b63174befdcb970f0304ac63f0895871b7c2f267fefdd43b648","src/secure/macros.rs":"83d770e5c4eb7fbd3c3d86973b69042e9e2bb9fafb72a4456598e2ae78638d5f","src/secure/mod.rs":"5d7fecb62295827d474ed1ce6b7628fe93d4a09eb14babfde036d64e8e4a04f8","src/secure/private.rs":"bea61d91772285e0db7c234bda32d9e95ce386dba5cab640859531d72f13628c","src/secure/signed.rs":"26c46c2d561ea14d1d8d79f85342a98b4bd749df776677dde91dd9b928e91fbe"},"package":"1465f8134efa296b4c19db34d909637cb2bf0f7aaf21299e23e18fa29ac557cf"}
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
[package]
|
||||
name = "cookie"
|
||||
version = "0.10.1"
|
||||
version = "0.11.0"
|
||||
authors = ["Alex Crichton <alex@alexcrichton.com>", "Sergio Benitez <sb@sergio.bz>"]
|
||||
description = "Crate for parsing HTTP cookie headers and managing a cookie jar. Supports signed\nand private (encrypted + signed) jars.\n"
|
||||
documentation = "https://docs.rs/cookie"
|
||||
|
@ -20,21 +20,21 @@ license = "MIT/Apache-2.0"
|
|||
repository = "https://github.com/alexcrichton/cookie-rs"
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
[dependencies.time]
|
||||
version = "0.1"
|
||||
[dependencies.base64]
|
||||
version = "0.9.0"
|
||||
optional = true
|
||||
|
||||
[dependencies.ring]
|
||||
version = "0.12.0"
|
||||
version = "0.13.0"
|
||||
optional = true
|
||||
|
||||
[dependencies.base64]
|
||||
version = "0.6.0"
|
||||
optional = true
|
||||
[dependencies.time]
|
||||
version = "0.1"
|
||||
|
||||
[dependencies.url]
|
||||
version = "1.0"
|
||||
optional = true
|
||||
|
||||
[features]
|
||||
secure = ["ring", "base64"]
|
||||
percent-encode = ["url"]
|
||||
secure = ["ring", "base64"]
|
||||
|
|
|
@ -18,9 +18,17 @@ See the [documentation](http://docs.rs/cookie) for detailed usage information.
|
|||
|
||||
# License
|
||||
|
||||
`cookie-rs` is primarily distributed under the terms of both the MIT license and
|
||||
the Apache License (Version 2.0), with portions covered by various BSD-like
|
||||
licenses.
|
||||
This project is licensed under either of
|
||||
|
||||
See [LICENSE-APACHE](LICENSE-APACHE), and [LICENSE-MIT](LICENSE-MIT) for
|
||||
details.
|
||||
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
http://www.apache.org/licenses/LICENSE-2.0)
|
||||
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
http://opensource.org/licenses/MIT)
|
||||
|
||||
at your option.
|
||||
|
||||
### Contribution
|
||||
|
||||
Unless you explicitly state otherwise, any contribution intentionally submitted
|
||||
for inclusion in `cookie-rs` by you, as defined in the Apache-2.0 license, shall
|
||||
be dual licensed as above, without any additional terms or conditions.
|
||||
|
|
|
@ -154,7 +154,7 @@ impl CookieBuilder {
|
|||
/// .secure(true)
|
||||
/// .finish();
|
||||
///
|
||||
/// assert_eq!(c.secure(), true);
|
||||
/// assert_eq!(c.secure(), Some(true));
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn secure(mut self, value: bool) -> CookieBuilder {
|
||||
|
@ -173,7 +173,7 @@ impl CookieBuilder {
|
|||
/// .http_only(true)
|
||||
/// .finish();
|
||||
///
|
||||
/// assert_eq!(c.http_only(), true);
|
||||
/// assert_eq!(c.http_only(), Some(true));
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn http_only(mut self, value: bool) -> CookieBuilder {
|
||||
|
|
|
@ -10,6 +10,8 @@ use std::fmt;
|
|||
/// attribute is "Strict", then the cookie is never sent in cross-site requests.
|
||||
/// If the `SameSite` attribute is "Lax", the cookie is only sent in cross-site
|
||||
/// requests with "safe" HTTP methods, i.e, `GET`, `HEAD`, `OPTIONS`, `TRACE`.
|
||||
/// If the `SameSite` attribute is not present (made explicit via the
|
||||
/// `SameSite::None` variant), then the cookie will be sent as normal.
|
||||
///
|
||||
/// **Note:** This cookie attribute is an HTTP draft! Its meaning and definition
|
||||
/// are subject to change.
|
||||
|
@ -18,7 +20,9 @@ pub enum SameSite {
|
|||
/// The "Strict" `SameSite` attribute.
|
||||
Strict,
|
||||
/// The "Lax" `SameSite` attribute.
|
||||
Lax
|
||||
Lax,
|
||||
/// No `SameSite` attribute.
|
||||
None
|
||||
}
|
||||
|
||||
impl SameSite {
|
||||
|
@ -32,12 +36,13 @@ impl SameSite {
|
|||
/// let strict = SameSite::Strict;
|
||||
/// assert!(strict.is_strict());
|
||||
/// assert!(!strict.is_lax());
|
||||
/// assert!(!strict.is_none());
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn is_strict(&self) -> bool {
|
||||
match *self {
|
||||
SameSite::Strict => true,
|
||||
SameSite::Lax => false
|
||||
SameSite::Lax | SameSite::None => false,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -51,12 +56,33 @@ impl SameSite {
|
|||
/// let lax = SameSite::Lax;
|
||||
/// assert!(lax.is_lax());
|
||||
/// assert!(!lax.is_strict());
|
||||
/// assert!(!lax.is_none());
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn is_lax(&self) -> bool {
|
||||
match *self {
|
||||
SameSite::Strict => false,
|
||||
SameSite::Lax => true
|
||||
SameSite::Lax => true,
|
||||
SameSite::Strict | SameSite::None => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if `self` is `SameSite::None` and `false` otherwise.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use cookie::SameSite;
|
||||
///
|
||||
/// let none = SameSite::None;
|
||||
/// assert!(none.is_none());
|
||||
/// assert!(!none.is_lax());
|
||||
/// assert!(!none.is_strict());
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn is_none(&self) -> bool {
|
||||
match *self {
|
||||
SameSite::None => true,
|
||||
SameSite::Lax | SameSite::Strict => false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -65,7 +91,8 @@ impl fmt::Display for SameSite {
|
|||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
SameSite::Strict => write!(f, "Strict"),
|
||||
SameSite::Lax => write!(f, "Lax")
|
||||
SameSite::Lax => write!(f, "Lax"),
|
||||
SameSite::None => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -121,10 +121,13 @@ impl CookieJar {
|
|||
.and_then(|c| if !c.removed { Some(&c.cookie) } else { None })
|
||||
}
|
||||
|
||||
/// Adds an "original" `cookie` to this jar. Adding an original cookie does
|
||||
/// not affect the [delta](#method.delta) computation. This method is
|
||||
/// intended to be used to seed the cookie jar with cookies received from a
|
||||
/// client's HTTP message.
|
||||
/// Adds an "original" `cookie` to this jar. If an original cookie with the
|
||||
/// same name already exists, it is replaced with `cookie`. Cookies added
|
||||
/// with `add` take precedence and are not replaced by this method.
|
||||
///
|
||||
/// Adding an original cookie does not affect the [delta](#method.delta)
|
||||
/// computation. This method is intended to be used to seed the cookie jar
|
||||
/// with cookies received from a client's HTTP message.
|
||||
///
|
||||
/// For accurate `delta` computations, this method should not be called
|
||||
/// after calling `remove`.
|
||||
|
@ -147,7 +150,8 @@ impl CookieJar {
|
|||
self.original_cookies.replace(DeltaCookie::added(cookie));
|
||||
}
|
||||
|
||||
/// Adds `cookie` to this jar.
|
||||
/// Adds `cookie` to this jar. If a cookie with the same name already
|
||||
/// exists, it is replaced with `cookie`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
|
@ -228,6 +232,47 @@ impl CookieJar {
|
|||
}
|
||||
}
|
||||
|
||||
/// Removes `cookie` from this jar completely. This method differs from
|
||||
/// `remove` in that no delta cookie is created under any condition. Neither
|
||||
/// the `delta` nor `iter` methods will return a cookie that is removed
|
||||
/// using this method.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// Removing an _original_ cookie; no _removal_ cookie is generated:
|
||||
///
|
||||
/// ```rust
|
||||
/// # extern crate cookie;
|
||||
/// extern crate time;
|
||||
///
|
||||
/// use cookie::{CookieJar, Cookie};
|
||||
/// use time::Duration;
|
||||
///
|
||||
/// # fn main() {
|
||||
/// let mut jar = CookieJar::new();
|
||||
///
|
||||
/// // Add an original cookie and a new cookie.
|
||||
/// jar.add_original(Cookie::new("name", "value"));
|
||||
/// jar.add(Cookie::new("key", "value"));
|
||||
/// assert_eq!(jar.delta().count(), 1);
|
||||
/// assert_eq!(jar.iter().count(), 2);
|
||||
///
|
||||
/// // Now force remove the original cookie.
|
||||
/// jar.force_remove(Cookie::new("name", "value"));
|
||||
/// assert_eq!(jar.delta().count(), 1);
|
||||
/// assert_eq!(jar.iter().count(), 1);
|
||||
///
|
||||
/// // Now force remove the new cookie.
|
||||
/// jar.force_remove(Cookie::new("key", "value"));
|
||||
/// assert_eq!(jar.delta().count(), 0);
|
||||
/// assert_eq!(jar.iter().count(), 0);
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn force_remove<'a>(&mut self, cookie: Cookie<'a>) {
|
||||
self.original_cookies.remove(cookie.name());
|
||||
self.delta_cookies.remove(cookie.name());
|
||||
}
|
||||
|
||||
/// Removes all cookies from this cookie jar.
|
||||
#[deprecated(since = "0.7.0", note = "calling this method may not remove \
|
||||
all cookies since the path and domain are not specified; use \
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
//! Add the following to the `[dependencies]` section of your `Cargo.toml`:
|
||||
//!
|
||||
//! ```ignore
|
||||
//! cookie = "0.10"
|
||||
//! cookie = "0.11"
|
||||
//! ```
|
||||
//!
|
||||
//! Then add the following line to your crate root:
|
||||
|
@ -58,7 +58,7 @@
|
|||
//! features = ["secure", "percent-encode"]
|
||||
//! ```
|
||||
|
||||
#![doc(html_root_url = "https://docs.rs/cookie/0.10")]
|
||||
#![doc(html_root_url = "https://docs.rs/cookie/0.11")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
#[cfg(feature = "percent-encode")] extern crate url;
|
||||
|
@ -74,10 +74,12 @@ mod draft;
|
|||
#[cfg(feature = "secure")] pub use secure::*;
|
||||
|
||||
use std::borrow::Cow;
|
||||
use std::ascii::AsciiExt;
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
|
||||
#[allow(unused_imports, deprecated)]
|
||||
use std::ascii::AsciiExt;
|
||||
|
||||
#[cfg(feature = "percent-encode")]
|
||||
use url::percent_encoding::{USERINFO_ENCODE_SET, percent_encode};
|
||||
use time::{Tm, Duration};
|
||||
|
@ -164,7 +166,7 @@ pub struct Cookie<'c> {
|
|||
name: CookieStr,
|
||||
/// The cookie's value.
|
||||
value: CookieStr,
|
||||
/// The cookie's experiation, if any.
|
||||
/// The cookie's expiration, if any.
|
||||
expires: Option<Tm>,
|
||||
/// The cookie's maximum age, if any.
|
||||
max_age: Option<Duration>,
|
||||
|
@ -172,10 +174,10 @@ pub struct Cookie<'c> {
|
|||
domain: Option<CookieStr>,
|
||||
/// The cookie's path domain, if any.
|
||||
path: Option<CookieStr>,
|
||||
/// Whether this cookie was marked secure.
|
||||
secure: bool,
|
||||
/// Whether this cookie was marked httponly.
|
||||
http_only: bool,
|
||||
/// Whether this cookie was marked Secure.
|
||||
secure: Option<bool>,
|
||||
/// Whether this cookie was marked HttpOnly.
|
||||
http_only: Option<bool>,
|
||||
/// The draft `SameSite` attribute.
|
||||
same_site: Option<SameSite>,
|
||||
}
|
||||
|
@ -203,8 +205,8 @@ impl Cookie<'static> {
|
|||
max_age: None,
|
||||
domain: None,
|
||||
path: None,
|
||||
secure: false,
|
||||
http_only: false,
|
||||
secure: None,
|
||||
http_only: None,
|
||||
same_site: None,
|
||||
}
|
||||
}
|
||||
|
@ -256,7 +258,7 @@ impl<'c> Cookie<'c> {
|
|||
///
|
||||
/// let c = Cookie::parse("foo=bar%20baz; HttpOnly").unwrap();
|
||||
/// assert_eq!(c.name_value(), ("foo", "bar%20baz"));
|
||||
/// assert_eq!(c.http_only(), true);
|
||||
/// assert_eq!(c.http_only(), Some(true));
|
||||
/// ```
|
||||
pub fn parse<S>(s: S) -> Result<Cookie<'c>, ParseError>
|
||||
where S: Into<Cow<'c, str>>
|
||||
|
@ -278,7 +280,7 @@ impl<'c> Cookie<'c> {
|
|||
///
|
||||
/// let c = Cookie::parse_encoded("foo=bar%20baz; HttpOnly").unwrap();
|
||||
/// assert_eq!(c.name_value(), ("foo", "bar baz"));
|
||||
/// assert_eq!(c.http_only(), true);
|
||||
/// assert_eq!(c.http_only(), Some(true));
|
||||
/// ```
|
||||
#[cfg(feature = "percent-encode")]
|
||||
pub fn parse_encoded<S>(s: S) -> Result<Cookie<'c>, ParseError>
|
||||
|
@ -379,7 +381,10 @@ impl<'c> Cookie<'c> {
|
|||
(self.name(), self.value())
|
||||
}
|
||||
|
||||
/// Returns whether this cookie was marked `HttpOnly` or not.
|
||||
/// Returns whether this cookie was marked `HttpOnly` or not. Returns
|
||||
/// `Some(true)` when the cookie was explicitly set (manually or parsed) as
|
||||
/// `HttpOnly`, `Some(false)` when `http_only` was manually set to `false`,
|
||||
/// and `None` otherwise.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
|
@ -387,14 +392,31 @@ impl<'c> Cookie<'c> {
|
|||
/// use cookie::Cookie;
|
||||
///
|
||||
/// let c = Cookie::parse("name=value; httponly").unwrap();
|
||||
/// assert_eq!(c.http_only(), true);
|
||||
/// assert_eq!(c.http_only(), Some(true));
|
||||
///
|
||||
/// let mut c = Cookie::new("name", "value");
|
||||
/// assert_eq!(c.http_only(), None);
|
||||
///
|
||||
/// let mut c = Cookie::new("name", "value");
|
||||
/// assert_eq!(c.http_only(), None);
|
||||
///
|
||||
/// // An explicitly set "false" value.
|
||||
/// c.set_http_only(false);
|
||||
/// assert_eq!(c.http_only(), Some(false));
|
||||
///
|
||||
/// // An explicitly set "true" value.
|
||||
/// c.set_http_only(true);
|
||||
/// assert_eq!(c.http_only(), Some(true));
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn http_only(&self) -> bool {
|
||||
pub fn http_only(&self) -> Option<bool> {
|
||||
self.http_only
|
||||
}
|
||||
|
||||
/// Returns whether this cookie was marked `Secure` or not.
|
||||
/// Returns whether this cookie was marked `Secure` or not. Returns
|
||||
/// `Some(true)` when the cookie was explicitly set (manually or parsed) as
|
||||
/// `Secure`, `Some(false)` when `secure` was manually set to `false`, and
|
||||
/// `None` otherwise.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
|
@ -402,10 +424,24 @@ impl<'c> Cookie<'c> {
|
|||
/// use cookie::Cookie;
|
||||
///
|
||||
/// let c = Cookie::parse("name=value; Secure").unwrap();
|
||||
/// assert_eq!(c.secure(), true);
|
||||
/// assert_eq!(c.secure(), Some(true));
|
||||
///
|
||||
/// let mut c = Cookie::parse("name=value").unwrap();
|
||||
/// assert_eq!(c.secure(), None);
|
||||
///
|
||||
/// let mut c = Cookie::new("name", "value");
|
||||
/// assert_eq!(c.secure(), None);
|
||||
///
|
||||
/// // An explicitly set "false" value.
|
||||
/// c.set_secure(false);
|
||||
/// assert_eq!(c.secure(), Some(false));
|
||||
///
|
||||
/// // An explicitly set "true" value.
|
||||
/// c.set_secure(true);
|
||||
/// assert_eq!(c.secure(), Some(true));
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn secure(&self) -> bool {
|
||||
pub fn secure(&self) -> Option<bool> {
|
||||
self.secure
|
||||
}
|
||||
|
||||
|
@ -549,14 +585,14 @@ impl<'c> Cookie<'c> {
|
|||
/// use cookie::Cookie;
|
||||
///
|
||||
/// let mut c = Cookie::new("name", "value");
|
||||
/// assert_eq!(c.http_only(), false);
|
||||
/// assert_eq!(c.http_only(), None);
|
||||
///
|
||||
/// c.set_http_only(true);
|
||||
/// assert_eq!(c.http_only(), true);
|
||||
/// assert_eq!(c.http_only(), Some(true));
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn set_http_only(&mut self, value: bool) {
|
||||
self.http_only = value;
|
||||
self.http_only = Some(value);
|
||||
}
|
||||
|
||||
/// Sets the value of `secure` in `self` to `value`.
|
||||
|
@ -567,14 +603,14 @@ impl<'c> Cookie<'c> {
|
|||
/// use cookie::Cookie;
|
||||
///
|
||||
/// let mut c = Cookie::new("name", "value");
|
||||
/// assert_eq!(c.secure(), false);
|
||||
/// assert_eq!(c.secure(), None);
|
||||
///
|
||||
/// c.set_secure(true);
|
||||
/// assert_eq!(c.secure(), true);
|
||||
/// assert_eq!(c.secure(), Some(true));
|
||||
/// ```
|
||||
#[inline]
|
||||
pub fn set_secure(&mut self, value: bool) {
|
||||
self.secure = value;
|
||||
self.secure = Some(value);
|
||||
}
|
||||
|
||||
/// Sets the value of `same_site` in `self` to `value`.
|
||||
|
@ -708,16 +744,18 @@ impl<'c> Cookie<'c> {
|
|||
}
|
||||
|
||||
fn fmt_parameters(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
if self.http_only() {
|
||||
if let Some(true) = self.http_only() {
|
||||
write!(f, "; HttpOnly")?;
|
||||
}
|
||||
|
||||
if self.secure() {
|
||||
if let Some(true) = self.secure() {
|
||||
write!(f, "; Secure")?;
|
||||
}
|
||||
|
||||
if let Some(same_site) = self.same_site() {
|
||||
write!(f, "; SameSite={}", same_site)?;
|
||||
if !same_site.is_none() {
|
||||
write!(f, "; SameSite={}", same_site)?;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(path) = self.path() {
|
||||
|
@ -1002,6 +1040,10 @@ mod tests {
|
|||
let cookie = Cookie::build("foo", "bar")
|
||||
.same_site(SameSite::Lax).finish();
|
||||
assert_eq!(&cookie.to_string(), "foo=bar; SameSite=Lax");
|
||||
|
||||
let cookie = Cookie::build("foo", "bar")
|
||||
.same_site(SameSite::None).finish();
|
||||
assert_eq!(&cookie.to_string(), "foo=bar");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
use std::borrow::Cow;
|
||||
use std::cmp;
|
||||
use std::error::Error;
|
||||
use std::ascii::AsciiExt;
|
||||
use std::str::Utf8Error;
|
||||
use std::fmt;
|
||||
use std::convert::From;
|
||||
|
||||
#[allow(unused_imports, deprecated)]
|
||||
use std::ascii::AsciiExt;
|
||||
|
||||
#[cfg(feature = "percent-encode")]
|
||||
use url::percent_encoding::percent_decode;
|
||||
use time::{self, Duration};
|
||||
|
@ -133,8 +135,8 @@ fn parse_inner<'c>(s: &str, decode: bool) -> Result<Cookie<'c>, ParseError> {
|
|||
max_age: None,
|
||||
domain: None,
|
||||
path: None,
|
||||
secure: false,
|
||||
http_only: false,
|
||||
secure: None,
|
||||
http_only: None,
|
||||
same_site: None
|
||||
};
|
||||
|
||||
|
@ -145,8 +147,8 @@ fn parse_inner<'c>(s: &str, decode: bool) -> Result<Cookie<'c>, ParseError> {
|
|||
};
|
||||
|
||||
match (&*key.to_ascii_lowercase(), value) {
|
||||
("secure", _) => cookie.secure = true,
|
||||
("httponly", _) => cookie.http_only = true,
|
||||
("secure", _) => cookie.secure = Some(true),
|
||||
("httponly", _) => cookie.http_only = Some(true),
|
||||
("max-age", Some(v)) => {
|
||||
// See RFC 6265 Section 5.2.2, negative values indicate that the
|
||||
// earliest possible expiration time should be used, so set the
|
||||
|
|
|
@ -104,6 +104,44 @@ impl<'a> PrivateJar<'a> {
|
|||
/// assert_eq!(jar.private(&key).get("name").unwrap().value(), "value");
|
||||
/// ```
|
||||
pub fn add(&mut self, mut cookie: Cookie<'static>) {
|
||||
self.encrypt_cookie(&mut cookie);
|
||||
|
||||
// Add the sealed cookie to the parent.
|
||||
self.parent.add(cookie);
|
||||
}
|
||||
|
||||
/// Adds an "original" `cookie` to parent jar. The cookie's value is
|
||||
/// encrypted with authenticated encryption assuring confidentiality,
|
||||
/// integrity, and authenticity. Adding an original cookie does not affect
|
||||
/// the [`CookieJar::delta()`](struct.CookieJar.html#method.delta)
|
||||
/// computation. This method is intended to be used to seed the cookie jar
|
||||
/// with cookies received from a client's HTTP message.
|
||||
///
|
||||
/// For accurate `delta` computations, this method should not be called
|
||||
/// after calling `remove`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use cookie::{CookieJar, Cookie, Key};
|
||||
///
|
||||
/// let key = Key::generate();
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// jar.private(&key).add_original(Cookie::new("name", "value"));
|
||||
///
|
||||
/// assert_eq!(jar.iter().count(), 1);
|
||||
/// assert_eq!(jar.delta().count(), 0);
|
||||
/// ```
|
||||
pub fn add_original(&mut self, mut cookie: Cookie<'static>) {
|
||||
self.encrypt_cookie(&mut cookie);
|
||||
|
||||
// Add the sealed cookie to the parent.
|
||||
self.parent.add_original(cookie);
|
||||
}
|
||||
|
||||
/// Encrypts the cookie's value with
|
||||
/// authenticated encryption assuring confidentiality, integrity, and authenticity.
|
||||
fn encrypt_cookie(&self, cookie: &mut Cookie) {
|
||||
let mut data;
|
||||
let output_len = {
|
||||
// Create the `SealingKey` structure.
|
||||
|
@ -129,9 +167,6 @@ impl<'a> PrivateJar<'a> {
|
|||
// Base64 encode the nonce and encrypted value.
|
||||
let sealed_value = base64::encode(&data[..(NONCE_LEN + output_len)]);
|
||||
cookie.set_value(sealed_value);
|
||||
|
||||
// Add the sealed cookie to the parent.
|
||||
self.parent.add(cookie);
|
||||
}
|
||||
|
||||
/// Removes `cookie` from the parent jar.
|
||||
|
|
|
@ -96,12 +96,42 @@ impl<'a> SignedJar<'a> {
|
|||
/// assert_eq!(jar.signed(&key).get("name").unwrap().value(), "value");
|
||||
/// ```
|
||||
pub fn add(&mut self, mut cookie: Cookie<'static>) {
|
||||
self.sign_cookie(&mut cookie);
|
||||
self.parent.add(cookie);
|
||||
}
|
||||
|
||||
/// Adds an "original" `cookie` to this jar. The cookie's value is signed
|
||||
/// assuring integrity and authenticity. Adding an original cookie does not
|
||||
/// affect the [`CookieJar::delta()`](struct.CookieJar.html#method.delta)
|
||||
/// computation. This method is intended to be used to seed the cookie jar
|
||||
/// with cookies received from a client's HTTP message.
|
||||
///
|
||||
/// For accurate `delta` computations, this method should not be called
|
||||
/// after calling `remove`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```rust
|
||||
/// use cookie::{CookieJar, Cookie, Key};
|
||||
///
|
||||
/// let key = Key::generate();
|
||||
/// let mut jar = CookieJar::new();
|
||||
/// jar.signed(&key).add_original(Cookie::new("name", "value"));
|
||||
///
|
||||
/// assert_eq!(jar.iter().count(), 1);
|
||||
/// assert_eq!(jar.delta().count(), 0);
|
||||
/// ```
|
||||
pub fn add_original(&mut self, mut cookie: Cookie<'static>) {
|
||||
self.sign_cookie(&mut cookie);
|
||||
self.parent.add_original(cookie);
|
||||
}
|
||||
|
||||
/// Signs the cookie's value assuring integrity and authenticity.
|
||||
fn sign_cookie(&self, cookie: &mut Cookie) {
|
||||
let digest = sign(&self.key, cookie.value().as_bytes());
|
||||
let mut new_value = base64::encode(digest.as_ref());
|
||||
new_value.push_str(cookie.value());
|
||||
cookie.set_value(new_value);
|
||||
|
||||
self.parent.add(cookie);
|
||||
}
|
||||
|
||||
/// Removes `cookie` from the parent jar.
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
{"files":{".travis.yml":"7a28ab46755ee3ed2ad3078ecec5f26cf1b95fa122d947edfc1a15bff4849ae8","CHANGELOG.md":"c134cbbcfdf39e86a51337715daca6498d000e019f2d0d5050d04e14e7ef5219","Cargo.toml":"a247839eb4e5a43632eee8727e969a23b4474a6d1b390ea4a19e3e714d8ba060","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"acc366bfcc7262f4719306196e40d59b4e832179adc9cfe2cd27cc710a6787ac","src/lib.rs":"6f50bc16841c93b80d588bbeae9d56b55a2f3a32fe5232fd6e748362b680b4ef"},"package":"f739f8c5363aca78cfb059edf753d8f0d36908c348f3d8d1503f03d8b75d9cf3"}
|
|
@ -0,0 +1,13 @@
|
|||
language: rust
|
||||
|
||||
rust:
|
||||
- stable
|
||||
- beta
|
||||
- nightly
|
||||
- 1.13.0
|
||||
|
||||
script:
|
||||
- cargo build
|
||||
- cargo build --release
|
||||
- cargo test
|
||||
- cargo test --release
|
|
@ -0,0 +1,18 @@
|
|||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
|
||||
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.1.1] - 2017-11-29
|
||||
### Changed
|
||||
- Update `crossbeam-epoch` to `0.2.0`.
|
||||
|
||||
## 0.1.0 - 2017-11-26
|
||||
### Added
|
||||
- First implementation of the Chase-Lev deque.
|
||||
|
||||
[Unreleased]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.1.1...HEAD
|
||||
[0.1.1]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.1.0...v0.1.1
|
|
@ -0,0 +1,33 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g. crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
name = "crossbeam-deque"
|
||||
version = "0.2.0"
|
||||
authors = ["The Crossbeam Project Developers"]
|
||||
description = "Concurrent work-stealing deque"
|
||||
homepage = "https://github.com/crossbeam-rs/crossbeam-deque"
|
||||
documentation = "https://docs.rs/crossbeam-deque"
|
||||
readme = "README.md"
|
||||
keywords = ["chase-lev", "lock-free", "scheduler", "scheduling"]
|
||||
categories = ["algorithms", "concurrency", "data-structures"]
|
||||
license = "MIT/Apache-2.0"
|
||||
repository = "https://github.com/crossbeam-rs/crossbeam-deque"
|
||||
[dependencies.crossbeam-epoch]
|
||||
version = "0.3.0"
|
||||
|
||||
[dependencies.crossbeam-utils]
|
||||
version = "0.2.1"
|
||||
[dev-dependencies.rand]
|
||||
version = "0.4"
|
||||
[badges.travis-ci]
|
||||
repository = "crossbeam-rs/crossbeam-deque"
|
|
@ -0,0 +1,25 @@
|
|||
Copyright (c) 2010 The Rust Project Developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,27 @@
|
|||
# Concurrent work-stealing deque
|
||||
|
||||
[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam-deque.svg?branch=master)](https://travis-ci.org/crossbeam-rs/crossbeam-deque)
|
||||
[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/crossbeam-rs/crossbeam-deque)
|
||||
[![Cargo](https://img.shields.io/crates/v/crossbeam-deque.svg)](https://crates.io/crates/crossbeam-deque)
|
||||
[![Documentation](https://docs.rs/crossbeam-deque/badge.svg)](https://docs.rs/crossbeam-deque)
|
||||
|
||||
## Usage
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
crossbeam-deque = "0.1"
|
||||
```
|
||||
|
||||
Next, add this to your crate:
|
||||
|
||||
```rust
|
||||
extern crate crossbeam_deque;
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Licensed under the terms of MIT license and the Apache License (Version 2.0).
|
||||
|
||||
See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details.
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1 +1 @@
|
|||
{"files":{".travis.yml":"7a28ab46755ee3ed2ad3078ecec5f26cf1b95fa122d947edfc1a15bff4849ae8","CHANGELOG.md":"c134cbbcfdf39e86a51337715daca6498d000e019f2d0d5050d04e14e7ef5219","Cargo.toml":"a247839eb4e5a43632eee8727e969a23b4474a6d1b390ea4a19e3e714d8ba060","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"acc366bfcc7262f4719306196e40d59b4e832179adc9cfe2cd27cc710a6787ac","src/lib.rs":"6f50bc16841c93b80d588bbeae9d56b55a2f3a32fe5232fd6e748362b680b4ef"},"package":"f739f8c5363aca78cfb059edf753d8f0d36908c348f3d8d1503f03d8b75d9cf3"}
|
||||
{"files":{".travis.yml":"98bac4b37c60606a62a0e81a4a882a11f308637d3d946ca395422d9f9274dea1","CHANGELOG.md":"44023168ca8df497a6bf6145965d3eca080744dd0c1bb3f638d907451b9a47df","Cargo.toml":"777ef5e8132243b5096ce9e3f16cfd400d9216b0cf3f02ae3e1ecc0774f78de6","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"27ce503b57a65de4b2a3da3bbacc0ade00230495cc5cc63d2fbbb565d999ac64","src/lib.rs":"d4fac3875f95541899fa7cb79bc0d83c706c81d548a60d6c5f1b99ef4ba2b51c"},"package":"fe8153ef04a7594ded05b427ffad46ddeaf22e63fd48d42b3e1e3bb4db07cae7"}
|
|
@ -4,7 +4,7 @@ rust:
|
|||
- stable
|
||||
- beta
|
||||
- nightly
|
||||
- 1.13.0
|
||||
- 1.20.0
|
||||
|
||||
script:
|
||||
- cargo build
|
||||
|
|
|
@ -6,7 +6,32 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.3.1] - 2018-05-04
|
||||
|
||||
### Added
|
||||
- `Deque::capacity`
|
||||
- `Deque::min_capacity`
|
||||
- `Deque::shrink_to_fit`
|
||||
|
||||
### Changed
|
||||
- Update `crossbeam-epoch` to `0.3.0`.
|
||||
- Support Rust 1.20.
|
||||
- Shrink the buffer in `Deque::push` if necessary.
|
||||
|
||||
## [0.3.0] - 2018-02-10
|
||||
|
||||
### Changed
|
||||
- Update `crossbeam-epoch` to `0.4.0`.
|
||||
- Drop support for Rust 1.13.
|
||||
|
||||
## [0.2.0] - 2018-02-10
|
||||
|
||||
### Changed
|
||||
- Update `crossbeam-epoch` to `0.3.0`.
|
||||
- Support Rust 1.13.
|
||||
|
||||
## [0.1.1] - 2017-11-29
|
||||
|
||||
### Changed
|
||||
- Update `crossbeam-epoch` to `0.2.0`.
|
||||
|
||||
|
@ -14,5 +39,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
|
|||
### Added
|
||||
- First implementation of the Chase-Lev deque.
|
||||
|
||||
[Unreleased]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.1.1...HEAD
|
||||
[Unreleased]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.3.1...HEAD
|
||||
[0.3.1]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.3.0...v0.3.1
|
||||
[0.3.0]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.2.0...v0.3.0
|
||||
[0.2.0]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.1.0...v0.2.0
|
||||
[0.1.1]: https://github.com/crossbeam-rs/crossbeam-deque/compare/v0.1.0...v0.1.1
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
[package]
|
||||
name = "crossbeam-deque"
|
||||
version = "0.2.0"
|
||||
version = "0.3.1"
|
||||
authors = ["The Crossbeam Project Developers"]
|
||||
description = "Concurrent work-stealing deque"
|
||||
homepage = "https://github.com/crossbeam-rs/crossbeam-deque"
|
||||
|
@ -23,10 +23,10 @@ categories = ["algorithms", "concurrency", "data-structures"]
|
|||
license = "MIT/Apache-2.0"
|
||||
repository = "https://github.com/crossbeam-rs/crossbeam-deque"
|
||||
[dependencies.crossbeam-epoch]
|
||||
version = "0.3.0"
|
||||
version = "0.4.0"
|
||||
|
||||
[dependencies.crossbeam-utils]
|
||||
version = "0.2.1"
|
||||
version = "0.3"
|
||||
[dev-dependencies.rand]
|
||||
version = "0.4"
|
||||
[badges.travis-ci]
|
||||
|
|
|
@ -11,7 +11,7 @@ Add this to your `Cargo.toml`:
|
|||
|
||||
```toml
|
||||
[dependencies]
|
||||
crossbeam-deque = "0.1"
|
||||
crossbeam-deque = "0.3"
|
||||
```
|
||||
|
||||
Next, add this to your crate:
|
||||
|
@ -20,6 +20,8 @@ Next, add this to your crate:
|
|||
extern crate crossbeam_deque;
|
||||
```
|
||||
|
||||
The minimum required Rust version is 1.20.
|
||||
|
||||
## License
|
||||
|
||||
Licensed under the terms of MIT license and the Apache License (Version 2.0).
|
||||
|
|
|
@ -85,6 +85,7 @@
|
|||
extern crate crossbeam_epoch as epoch;
|
||||
extern crate crossbeam_utils as utils;
|
||||
|
||||
use std::cmp;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem;
|
||||
|
@ -136,10 +137,7 @@ impl<T> Buffer<T> {
|
|||
let ptr = v.as_mut_ptr();
|
||||
mem::forget(v);
|
||||
|
||||
Buffer {
|
||||
ptr: ptr,
|
||||
cap: cap,
|
||||
}
|
||||
Buffer { ptr, cap }
|
||||
}
|
||||
|
||||
/// Returns a pointer to the element at the specified `index`.
|
||||
|
@ -342,6 +340,8 @@ impl<T> Deque<T> {
|
|||
///
|
||||
/// // The minimum capacity will be rounded up to 1024.
|
||||
/// let d = Deque::<i32>::with_min_capacity(1000);
|
||||
/// assert_eq!(d.min_capacity(), 1024);
|
||||
/// assert_eq!(d.capacity(), 1024);
|
||||
/// ```
|
||||
pub fn with_min_capacity(min_cap: usize) -> Deque<T> {
|
||||
Deque {
|
||||
|
@ -385,6 +385,96 @@ impl<T> Deque<T> {
|
|||
b.wrapping_sub(t) as usize
|
||||
}
|
||||
|
||||
/// Returns the minimum capacity of the deque.
|
||||
///
|
||||
/// The minimum capacity can be specified in [`Deque::with_min_capacity`].
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_deque::Deque;
|
||||
///
|
||||
/// // Gets rounded to the next power of two.
|
||||
/// let d = Deque::<i32>::with_min_capacity(50);
|
||||
/// assert_eq!(d.min_capacity(), 64);
|
||||
/// assert_eq!(d.capacity(), 64);
|
||||
/// ```
|
||||
///
|
||||
/// [`Deque::with_min_capacity`]: struct.Deque.html#method.with_min_capacity
|
||||
pub fn min_capacity(&self) -> usize {
|
||||
self.inner.min_cap
|
||||
}
|
||||
|
||||
/// Returns the number of elements the deque can hold without reallocating.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_deque::Deque;
|
||||
///
|
||||
/// let d = Deque::with_min_capacity(50);
|
||||
/// assert_eq!(d.capacity(), 64);
|
||||
///
|
||||
/// for i in 0..200 {
|
||||
/// d.push(i);
|
||||
/// }
|
||||
/// assert_eq!(d.capacity(), 256);
|
||||
/// ```
|
||||
pub fn capacity(&self) -> usize {
|
||||
unsafe {
|
||||
let buf = self.inner.buffer.load(Relaxed, epoch::unprotected());
|
||||
buf.deref().cap
|
||||
}
|
||||
}
|
||||
|
||||
/// Shrinks the capacity of the deque as much as possible.
|
||||
///
|
||||
/// The capacity will drop down as close as possible to the length but there may still be some
|
||||
/// free space left.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_deque::Deque;
|
||||
///
|
||||
/// // Insert a lot of elements. This makes the buffer grow.
|
||||
/// let d = Deque::new();
|
||||
/// for i in 0..200 {
|
||||
/// d.push(i);
|
||||
/// }
|
||||
///
|
||||
/// // Remove all elements.
|
||||
/// let s = d.stealer();
|
||||
/// for i in 0..200 {
|
||||
/// s.steal();
|
||||
/// }
|
||||
///
|
||||
/// // Stealers cannot shrink the buffer, so the capacity is still very large.
|
||||
/// assert!(d.capacity() >= 200);
|
||||
///
|
||||
/// // Shrink the buffer. The capacity drops down, but some free space may still be left.
|
||||
/// d.shrink_to_fit();
|
||||
/// assert!(d.capacity() < 50);
|
||||
/// ```
|
||||
pub fn shrink_to_fit(&self) {
|
||||
let b = self.inner.bottom.load(Relaxed);
|
||||
let t = self.inner.top.load(Acquire);
|
||||
let cap = self.capacity();
|
||||
let len = b.wrapping_sub(t);
|
||||
|
||||
// Shrink the capacity as much as possible without overshooting `min_cap` or `len`.
|
||||
let mut new_cap = cap;
|
||||
while self.inner.min_cap <= new_cap / 2 && len <= new_cap as isize / 2 {
|
||||
new_cap /= 2;
|
||||
}
|
||||
|
||||
if new_cap != cap {
|
||||
unsafe {
|
||||
self.inner.resize(new_cap);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Pushes an element into the bottom of the deque.
|
||||
///
|
||||
/// If the internal buffer is full, a new one twice the capacity of the current one will be
|
||||
|
@ -411,12 +501,17 @@ impl<T> Deque<T> {
|
|||
// Calculate the length of the deque.
|
||||
let len = b.wrapping_sub(t);
|
||||
|
||||
// Is the deque full?
|
||||
let cap = buffer.deref().cap;
|
||||
// Is the deque full?
|
||||
if len >= cap as isize {
|
||||
// Yes. Grow the underlying buffer.
|
||||
self.inner.resize(2 * cap);
|
||||
buffer = self.inner.buffer.load(Relaxed, epoch::unprotected());
|
||||
// Is the new length less than one fourth the capacity?
|
||||
} else if cap > self.inner.min_cap && len + 1 < cap as isize / 4 {
|
||||
// Yes. Shrink the underlying buffer.
|
||||
self.inner.resize(cap / 2);
|
||||
buffer = self.inner.buffer.load(Relaxed, epoch::unprotected());
|
||||
}
|
||||
|
||||
// Write `value` into the right slot and increment `b`.
|
||||
|
@ -531,16 +626,14 @@ impl<T> Deque<T> {
|
|||
/// assert_eq!(d.steal(), Steal::Data(1));
|
||||
///
|
||||
/// // Attempt to steal an element, but keep retrying if we get `Retry`.
|
||||
/// loop {
|
||||
/// let stolen = loop {
|
||||
/// match d.steal() {
|
||||
/// Steal::Empty => panic!("should steal something"),
|
||||
/// Steal::Data(data) => {
|
||||
/// assert_eq!(data, 2);
|
||||
/// break;
|
||||
/// }
|
||||
/// Steal::Empty => break None,
|
||||
/// Steal::Data(data) => break Some(data),
|
||||
/// Steal::Retry => {}
|
||||
/// }
|
||||
/// }
|
||||
/// };
|
||||
/// assert_eq!(stolen, Some(2));
|
||||
/// ```
|
||||
///
|
||||
/// [`Steal::Retry`]: enum.Steal.html#variant.Retry
|
||||
|
@ -669,7 +762,7 @@ impl<T> Stealer<T> {
|
|||
let t = self.inner.top.load(Relaxed);
|
||||
atomic::fence(SeqCst);
|
||||
let b = self.inner.bottom.load(Relaxed);
|
||||
std::cmp::max(b.wrapping_sub(t), 0) as usize
|
||||
cmp::max(b.wrapping_sub(t), 0) as usize
|
||||
}
|
||||
|
||||
/// Steals an element from the top of the deque.
|
||||
|
@ -691,16 +784,14 @@ impl<T> Stealer<T> {
|
|||
/// d.push(2);
|
||||
///
|
||||
/// // Attempt to steal an element, but keep retrying if we get `Retry`.
|
||||
/// loop {
|
||||
/// match d.steal() {
|
||||
/// Steal::Empty => panic!("should steal something"),
|
||||
/// Steal::Data(data) => {
|
||||
/// assert_eq!(data, 1);
|
||||
/// break;
|
||||
/// }
|
||||
/// let stolen = loop {
|
||||
/// match s.steal() {
|
||||
/// Steal::Empty => break None,
|
||||
/// Steal::Data(data) => break Some(data),
|
||||
/// Steal::Retry => {}
|
||||
/// }
|
||||
/// }
|
||||
/// };
|
||||
/// assert_eq!(stolen, Some(1));
|
||||
/// ```
|
||||
///
|
||||
/// [`Steal::Retry`]: enum.Steal.html#variant.Retry
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
{"files":{".travis.yml":"d84605e26d95fabc8172af7a621d3e48117b5180d389c6a166d15acb09c9ed9f","CHANGELOG.md":"5e62172f395348eb92a3fd2532ba5d65a7f13286449a3698b41f3aac7a9a4e57","Cargo.toml":"6bcfcac3b6b20026d1020890fcd8cd5f6ceff33741b92fea001993696e2aed17","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"8728114db9ab19bca8e07b36f1cccd1e6a57db6ea03be08679aef2a982736532","benches/defer.rs":"b2b64a8bb684721f12432aa63ae1e2227511879567ed212c0430961805b9f543","benches/flush.rs":"3b8c6be173ea546ad7e93adff324244a1c289608403bb13cc3bd89082fe90e35","benches/pin.rs":"4165baf238bbe2267e1598695d41ea8d3a312aa613d567e4dd7f5581a0f1323c","examples/sanitize.rs":"41b2d03e2cfd46912a3722295843b841e74e10eae6eb23586d3bc3b6d0a41e32","src/atomic.rs":"469ae38d3e8b37eec79c1c21a29a63cd357e49f34f4b6cdde6817f8e1267bd8d","src/collector.rs":"ebebbf1229a0d5339b938825d0dca9dc8642f9fa5bbceafb4e371477186ed4b4","src/default.rs":"804c217df80e0b6df3c6e90c5d6f5153c153567ac28cc75cc62042ba75d24bf2","src/deferred.rs":"1bd6c66c58f92714088b6f9f811368a123143a5f03cf4afc4b19ab24f3181387","src/epoch.rs":"25b85734a4ec5bedb0384a1fe976ec97056a88910a046a270a3e38558f7dbd4b","src/garbage.rs":"b77a8f87701dca8b63d858bb234137335455b6fc1f223e73c7609542d13daa43","src/guard.rs":"08975d989ba558aba90d64865594b155b2135e628414f77bb8afb9de427a2e0d","src/internal.rs":"a5a6a52999ce99294d544ac7cb82cb820e78f0c41315fc8d7494d21ca6da1135","src/lib.rs":"f3093bc3411f2bd94d662c3cf8719411b62793449b3db1699865f4c08c207af1","src/sync/list.rs":"57c3674c40e30eaf92689ab0e09973d7d161e52a5bdb5b5481b62fd0d10fb4eb","src/sync/mod.rs":"2da979ca3a2293f7626a2e6a9ab2fad758d92e3d2bed6cc712ef59eeeea87eab","src/sync/queue.rs":"868b5bd651e54216fa1827d668ab564c120779113ae7a2a056fee4371db1066c"},"package":"927121f5407de9956180ff5e936fe3cf4324279280001cd56b669d28ee7e9150"}
|
|
@ -0,0 +1,64 @@
|
|||
language: rust
|
||||
|
||||
rust:
|
||||
- stable
|
||||
- beta
|
||||
- nightly
|
||||
- 1.13.0
|
||||
|
||||
addons:
|
||||
apt:
|
||||
sources:
|
||||
- ubuntu-toolchain-r-test
|
||||
- llvm-toolchain-precise
|
||||
- llvm-toolchain-precise-3.8
|
||||
packages:
|
||||
- llvm-3.8
|
||||
- llvm-3.8-dev
|
||||
- clang-3.8
|
||||
- clang-3.8-dev
|
||||
|
||||
script:
|
||||
- cargo build
|
||||
- cargo build --release
|
||||
- cargo build --no-default-features
|
||||
- cargo build --release --no-default-features
|
||||
- cargo test
|
||||
- cargo test --release
|
||||
|
||||
- |
|
||||
if [ $TRAVIS_RUST_VERSION == nightly ]; then
|
||||
cargo build --features nightly --no-default-features
|
||||
cargo build --features nightly --release --no-default-features
|
||||
fi
|
||||
|
||||
- |
|
||||
if [ $TRAVIS_RUST_VERSION == nightly ]; then
|
||||
cargo test --features nightly
|
||||
fi
|
||||
|
||||
- |
|
||||
if [[ $TRAVIS_RUST_VERSION == nightly ]]; then
|
||||
cargo test --features nightly --release
|
||||
fi
|
||||
|
||||
- |
|
||||
if [[ $TRAVIS_RUST_VERSION == nightly ]]; then
|
||||
ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0" \
|
||||
RUSTFLAGS="-Z sanitizer=address" \
|
||||
cargo run \
|
||||
--target x86_64-unknown-linux-gnu \
|
||||
--features sanitize,nightly \
|
||||
--example sanitize
|
||||
fi
|
||||
|
||||
- |
|
||||
if [[ $TRAVIS_RUST_VERSION == nightly ]]; then
|
||||
ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0" \
|
||||
RUSTFLAGS="-Z sanitizer=address" \
|
||||
cargo run \
|
||||
--release \
|
||||
--target x86_64-unknown-linux-gnu \
|
||||
--features sanitize,nightly \
|
||||
--example sanitize
|
||||
fi
|
|
@ -0,0 +1,26 @@
|
|||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
|
||||
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.2.0] - 2017-11-29
|
||||
### Added
|
||||
- Add method `Owned::into_box`.
|
||||
|
||||
### Changed
|
||||
- Fix a use-after-free bug in `Local::finalize`.
|
||||
- Fix an ordering bug in `Global::push_bag`.
|
||||
- Fix a bug in calculating distance between epochs.
|
||||
|
||||
### Removed
|
||||
- Remove `impl<T> Into<Box<T>> for Owned<T>`.
|
||||
|
||||
## 0.1.0 - 2017-11-26
|
||||
### Added
|
||||
- First version of the new epoch-based GC.
|
||||
|
||||
[Unreleased]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.2.0...HEAD
|
||||
[0.2.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.1.0...v0.2.0
|
|
@ -0,0 +1,57 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g. crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
name = "crossbeam-epoch"
|
||||
version = "0.3.1"
|
||||
authors = ["The Crossbeam Project Developers"]
|
||||
description = "Epoch-based garbage collection"
|
||||
homepage = "https://github.com/crossbeam-rs/crossbeam-epoch"
|
||||
documentation = "https://docs.rs/crossbeam-epoch"
|
||||
readme = "README.md"
|
||||
keywords = ["lock-free", "rcu", "atomic", "garbage"]
|
||||
categories = ["concurrency", "memory-management"]
|
||||
license = "MIT/Apache-2.0"
|
||||
repository = "https://github.com/crossbeam-rs/crossbeam-epoch"
|
||||
[dependencies.arrayvec]
|
||||
version = "0.4"
|
||||
default-features = false
|
||||
|
||||
[dependencies.cfg-if]
|
||||
version = "0.1"
|
||||
|
||||
[dependencies.crossbeam-utils]
|
||||
version = "0.2"
|
||||
default-features = false
|
||||
|
||||
[dependencies.lazy_static]
|
||||
version = "1.0.0"
|
||||
optional = true
|
||||
|
||||
[dependencies.memoffset]
|
||||
version = "0.2"
|
||||
|
||||
[dependencies.nodrop]
|
||||
version = "0.1.12"
|
||||
default-features = false
|
||||
|
||||
[dependencies.scopeguard]
|
||||
version = "0.3"
|
||||
default-features = false
|
||||
[dev-dependencies.rand]
|
||||
version = "0.3"
|
||||
|
||||
[features]
|
||||
default = ["use_std"]
|
||||
nightly = ["arrayvec/use_union"]
|
||||
sanitize = []
|
||||
use_std = ["lazy_static", "crossbeam-utils/use_std"]
|
|
@ -0,0 +1,25 @@
|
|||
Copyright (c) 2010 The Rust Project Developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,33 @@
|
|||
# Epoch-based garbage collection
|
||||
|
||||
[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam-epoch.svg?branch=master)](https://travis-ci.org/crossbeam-rs/crossbeam-epoch)
|
||||
[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/crossbeam-rs/crossbeam-epoch)
|
||||
[![Cargo](https://img.shields.io/crates/v/crossbeam-epoch.svg)](https://crates.io/crates/crossbeam-epoch)
|
||||
[![Documentation](https://docs.rs/crossbeam-epoch/badge.svg)](https://docs.rs/crossbeam-epoch)
|
||||
|
||||
This crate provides epoch-based garbage collection for use in concurrent data structures.
|
||||
|
||||
If a thread removes a node from a concurrent data structure, other threads
|
||||
may still have pointers to that node, so it cannot be immediately destructed.
|
||||
Epoch GC allows deferring destruction until it becomes safe to do so.
|
||||
|
||||
## Usage
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
crossbeam-epoch = "0.2"
|
||||
```
|
||||
|
||||
Next, add this to your crate:
|
||||
|
||||
```rust
|
||||
extern crate crossbeam_epoch as epoch;
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Licensed under the terms of MIT license and the Apache License (Version 2.0).
|
||||
|
||||
See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details.
|
|
@ -0,0 +1,73 @@
|
|||
#![feature(test)]
|
||||
|
||||
extern crate crossbeam_epoch as epoch;
|
||||
extern crate crossbeam_utils as utils;
|
||||
extern crate test;
|
||||
|
||||
use epoch::Owned;
|
||||
use test::Bencher;
|
||||
use utils::scoped::scope;
|
||||
|
||||
#[bench]
|
||||
fn single_alloc_defer_free(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let guard = &epoch::pin();
|
||||
let p = Owned::new(1).into_shared(guard);
|
||||
unsafe {
|
||||
guard.defer(move || p.into_owned());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn single_defer(b: &mut Bencher) {
|
||||
b.iter(|| {
|
||||
let guard = &epoch::pin();
|
||||
unsafe {
|
||||
guard.defer(move || ());
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn multi_alloc_defer_free(b: &mut Bencher) {
|
||||
const THREADS: usize = 16;
|
||||
const STEPS: usize = 10_000;
|
||||
|
||||
b.iter(|| {
|
||||
scope(|s| {
|
||||
for _ in 0..THREADS {
|
||||
s.spawn(|| {
|
||||
for _ in 0..STEPS {
|
||||
let guard = &epoch::pin();
|
||||
let p = Owned::new(1).into_shared(guard);
|
||||
unsafe {
|
||||
guard.defer(move || p.into_owned());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn multi_defer(b: &mut Bencher) {
|
||||
const THREADS: usize = 16;
|
||||
const STEPS: usize = 10_000;
|
||||
|
||||
b.iter(|| {
|
||||
scope(|s| {
|
||||
for _ in 0..THREADS {
|
||||
s.spawn(|| {
|
||||
for _ in 0..STEPS {
|
||||
let guard = &epoch::pin();
|
||||
unsafe {
|
||||
guard.defer(move || ());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
#![feature(test)]
|
||||
|
||||
extern crate crossbeam_epoch as epoch;
|
||||
extern crate crossbeam_utils as utils;
|
||||
extern crate test;
|
||||
|
||||
use std::sync::Barrier;
|
||||
|
||||
use test::Bencher;
|
||||
use utils::scoped::scope;
|
||||
|
||||
#[bench]
|
||||
fn single_flush(b: &mut Bencher) {
|
||||
const THREADS: usize = 16;
|
||||
|
||||
let start = Barrier::new(THREADS + 1);
|
||||
let end = Barrier::new(THREADS + 1);
|
||||
|
||||
scope(|s| {
|
||||
for _ in 0..THREADS {
|
||||
s.spawn(|| {
|
||||
epoch::pin();
|
||||
start.wait();
|
||||
end.wait();
|
||||
});
|
||||
}
|
||||
|
||||
start.wait();
|
||||
b.iter(|| epoch::pin().flush());
|
||||
end.wait();
|
||||
});
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn multi_flush(b: &mut Bencher) {
|
||||
const THREADS: usize = 16;
|
||||
const STEPS: usize = 10_000;
|
||||
|
||||
b.iter(|| {
|
||||
scope(|s| {
|
||||
for _ in 0..THREADS {
|
||||
s.spawn(|| {
|
||||
for _ in 0..STEPS {
|
||||
let guard = &epoch::pin();
|
||||
guard.flush();
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
#![feature(test)]
|
||||
|
||||
extern crate crossbeam_epoch as epoch;
|
||||
extern crate crossbeam_utils as utils;
|
||||
extern crate test;
|
||||
|
||||
use test::Bencher;
|
||||
use utils::scoped::scope;
|
||||
|
||||
#[bench]
|
||||
fn single_pin(b: &mut Bencher) {
|
||||
b.iter(|| epoch::pin());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn single_default_handle_pin(b: &mut Bencher) {
|
||||
b.iter(|| epoch::default_handle().pin());
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn multi_pin(b: &mut Bencher) {
|
||||
const THREADS: usize = 16;
|
||||
const STEPS: usize = 100_000;
|
||||
|
||||
b.iter(|| {
|
||||
scope(|s| {
|
||||
for _ in 0..THREADS {
|
||||
s.spawn(|| {
|
||||
for _ in 0..STEPS {
|
||||
epoch::pin();
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
|
@ -0,0 +1,70 @@
|
|||
extern crate crossbeam_epoch as epoch;
|
||||
extern crate rand;
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicUsize;
|
||||
use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed};
|
||||
use std::time::{Duration, Instant};
|
||||
use std::thread;
|
||||
|
||||
use epoch::{Atomic, Collector, Handle, Owned, Shared};
|
||||
use rand::Rng;
|
||||
|
||||
fn worker(a: Arc<Atomic<AtomicUsize>>, handle: Handle) -> usize {
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut sum = 0;
|
||||
|
||||
if rng.gen() {
|
||||
thread::sleep(Duration::from_millis(1));
|
||||
}
|
||||
let timeout = Duration::from_millis(rng.gen_range(0, 10));
|
||||
let now = Instant::now();
|
||||
|
||||
while now.elapsed() < timeout {
|
||||
for _ in 0..100 {
|
||||
let guard = &handle.pin();
|
||||
guard.flush();
|
||||
|
||||
let val = if rng.gen() {
|
||||
let p = a.swap(Owned::new(AtomicUsize::new(sum)), AcqRel, guard);
|
||||
unsafe {
|
||||
guard.defer(move || p.into_owned());
|
||||
guard.flush();
|
||||
p.deref().load(Relaxed)
|
||||
}
|
||||
} else {
|
||||
let p = a.load(Acquire, guard);
|
||||
unsafe {
|
||||
p.deref().fetch_add(sum, Relaxed)
|
||||
}
|
||||
};
|
||||
|
||||
sum = sum.wrapping_add(val);
|
||||
}
|
||||
}
|
||||
|
||||
sum
|
||||
}
|
||||
|
||||
fn main() {
|
||||
for _ in 0..100 {
|
||||
let collector = Collector::new();
|
||||
let a = Arc::new(Atomic::new(AtomicUsize::new(777)));
|
||||
|
||||
let threads = (0..16)
|
||||
.map(|_| {
|
||||
let a = a.clone();
|
||||
let h = collector.handle();
|
||||
thread::spawn(move || worker(a, h))
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for t in threads {
|
||||
t.join().unwrap();
|
||||
}
|
||||
|
||||
unsafe {
|
||||
a.swap(Shared::null(), AcqRel, epoch::unprotected()).into_owned();
|
||||
}
|
||||
}
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,426 @@
|
|||
/// Epoch-based garbage collector.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_epoch::Collector;
|
||||
///
|
||||
/// let collector = Collector::new();
|
||||
///
|
||||
/// let handle = collector.handle();
|
||||
/// drop(collector); // `handle` still works after dropping `collector`
|
||||
///
|
||||
/// handle.pin().flush();
|
||||
/// ```
|
||||
|
||||
use alloc::arc::Arc;
|
||||
|
||||
use internal::{Global, Local};
|
||||
use guard::Guard;
|
||||
|
||||
/// An epoch-based garbage collector.
|
||||
pub struct Collector {
|
||||
global: Arc<Global>,
|
||||
}
|
||||
|
||||
unsafe impl Send for Collector {}
|
||||
unsafe impl Sync for Collector {}
|
||||
|
||||
impl Collector {
|
||||
/// Creates a new collector.
|
||||
pub fn new() -> Self {
|
||||
Collector { global: Arc::new(Global::new()) }
|
||||
}
|
||||
|
||||
/// Creates a new handle for the collector.
|
||||
pub fn handle(&self) -> Handle {
|
||||
Handle { local: Local::register(&self.global) }
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Collector {
|
||||
/// Creates another reference to the same garbage collector.
|
||||
fn clone(&self) -> Self {
|
||||
Collector { global: self.global.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
/// A handle to a garbage collector.
|
||||
pub struct Handle {
|
||||
local: *const Local,
|
||||
}
|
||||
|
||||
impl Handle {
|
||||
/// Pins the handle.
|
||||
#[inline]
|
||||
pub fn pin(&self) -> Guard {
|
||||
unsafe { (*self.local).pin() }
|
||||
}
|
||||
|
||||
/// Returns `true` if the handle is pinned.
|
||||
#[inline]
|
||||
pub fn is_pinned(&self) -> bool {
|
||||
unsafe { (*self.local).is_pinned() }
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Send for Handle {}
|
||||
|
||||
impl Drop for Handle {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
Local::release_handle(&*self.local);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Handle {
|
||||
#[inline]
|
||||
fn clone(&self) -> Self {
|
||||
unsafe {
|
||||
Local::acquire_handle(&*self.local);
|
||||
}
|
||||
Handle { local: self.local }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::mem;
|
||||
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use crossbeam_utils::scoped;
|
||||
|
||||
use {Collector, Owned};
|
||||
|
||||
const NUM_THREADS: usize = 8;
|
||||
|
||||
#[test]
|
||||
fn pin_reentrant() {
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
drop(collector);
|
||||
|
||||
assert!(!handle.is_pinned());
|
||||
{
|
||||
let _guard = &handle.pin();
|
||||
assert!(handle.is_pinned());
|
||||
{
|
||||
let _guard = &handle.pin();
|
||||
assert!(handle.is_pinned());
|
||||
}
|
||||
assert!(handle.is_pinned());
|
||||
}
|
||||
assert!(!handle.is_pinned());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flush_local_bag() {
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
drop(collector);
|
||||
|
||||
for _ in 0..100 {
|
||||
let guard = &handle.pin();
|
||||
unsafe {
|
||||
let a = Owned::new(7).into_shared(guard);
|
||||
guard.defer(move || a.into_owned());
|
||||
|
||||
assert!(!(*guard.get_local()).is_bag_empty());
|
||||
|
||||
while !(*guard.get_local()).is_bag_empty() {
|
||||
guard.flush();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn garbage_buffering() {
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
drop(collector);
|
||||
|
||||
let guard = &handle.pin();
|
||||
unsafe {
|
||||
for _ in 0..10 {
|
||||
let a = Owned::new(7).into_shared(guard);
|
||||
guard.defer(move || a.into_owned());
|
||||
}
|
||||
assert!(!(*guard.get_local()).is_bag_empty());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pin_holds_advance() {
|
||||
let collector = Collector::new();
|
||||
|
||||
let threads = (0..NUM_THREADS)
|
||||
.map(|_| {
|
||||
scoped::scope(|scope| {
|
||||
scope.spawn(|| {
|
||||
let handle = collector.handle();
|
||||
for _ in 0..500_000 {
|
||||
let guard = &handle.pin();
|
||||
|
||||
let before = collector.global.load_epoch(Ordering::Relaxed);
|
||||
collector.global.collect(guard);
|
||||
let after = collector.global.load_epoch(Ordering::Relaxed);
|
||||
|
||||
assert!(after.wrapping_sub(before) <= 2);
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
drop(collector);
|
||||
|
||||
for t in threads {
|
||||
t.join();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn incremental() {
|
||||
const COUNT: usize = 100_000;
|
||||
static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
|
||||
unsafe {
|
||||
let guard = &handle.pin();
|
||||
for _ in 0..COUNT {
|
||||
let a = Owned::new(7i32).into_shared(guard);
|
||||
guard.defer(move || {
|
||||
drop(a.into_owned());
|
||||
DESTROYS.fetch_add(1, Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
guard.flush();
|
||||
}
|
||||
|
||||
let mut last = 0;
|
||||
|
||||
while last < COUNT {
|
||||
let curr = DESTROYS.load(Ordering::Relaxed);
|
||||
assert!(curr - last <= 1024);
|
||||
last = curr;
|
||||
|
||||
let guard = &handle.pin();
|
||||
collector.global.collect(guard);
|
||||
}
|
||||
assert!(DESTROYS.load(Ordering::Relaxed) == 100_000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn buffering() {
|
||||
const COUNT: usize = 10;
|
||||
static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
|
||||
unsafe {
|
||||
let guard = &handle.pin();
|
||||
for _ in 0..COUNT {
|
||||
let a = Owned::new(7i32).into_shared(guard);
|
||||
guard.defer(move || {
|
||||
drop(a.into_owned());
|
||||
DESTROYS.fetch_add(1, Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
for _ in 0..100_000 {
|
||||
collector.global.collect(&handle.pin());
|
||||
}
|
||||
assert!(DESTROYS.load(Ordering::Relaxed) < COUNT);
|
||||
|
||||
handle.pin().flush();
|
||||
|
||||
while DESTROYS.load(Ordering::Relaxed) < COUNT {
|
||||
let guard = &handle.pin();
|
||||
collector.global.collect(guard);
|
||||
}
|
||||
assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn count_drops() {
|
||||
const COUNT: usize = 100_000;
|
||||
static DROPS: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
struct Elem(i32);
|
||||
|
||||
impl Drop for Elem {
|
||||
fn drop(&mut self) {
|
||||
DROPS.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
|
||||
unsafe {
|
||||
let guard = &handle.pin();
|
||||
|
||||
for _ in 0..COUNT {
|
||||
let a = Owned::new(Elem(7i32)).into_shared(guard);
|
||||
guard.defer(move || a.into_owned());
|
||||
}
|
||||
guard.flush();
|
||||
}
|
||||
|
||||
while DROPS.load(Ordering::Relaxed) < COUNT {
|
||||
let guard = &handle.pin();
|
||||
collector.global.collect(guard);
|
||||
}
|
||||
assert_eq!(DROPS.load(Ordering::Relaxed), COUNT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn count_destroy() {
|
||||
const COUNT: usize = 100_000;
|
||||
static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
|
||||
unsafe {
|
||||
let guard = &handle.pin();
|
||||
|
||||
for _ in 0..COUNT {
|
||||
let a = Owned::new(7i32).into_shared(guard);
|
||||
guard.defer(move || {
|
||||
drop(a.into_owned());
|
||||
DESTROYS.fetch_add(1, Ordering::Relaxed);
|
||||
});
|
||||
}
|
||||
guard.flush();
|
||||
}
|
||||
|
||||
while DESTROYS.load(Ordering::Relaxed) < COUNT {
|
||||
let guard = &handle.pin();
|
||||
collector.global.collect(guard);
|
||||
}
|
||||
assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drop_array() {
|
||||
const COUNT: usize = 700;
|
||||
static DROPS: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
struct Elem(i32);
|
||||
|
||||
impl Drop for Elem {
|
||||
fn drop(&mut self) {
|
||||
DROPS.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
|
||||
let mut guard = handle.pin();
|
||||
|
||||
let mut v = Vec::with_capacity(COUNT);
|
||||
for i in 0..COUNT {
|
||||
v.push(Elem(i as i32));
|
||||
}
|
||||
|
||||
{
|
||||
let a = Owned::new(v).into_shared(&guard);
|
||||
unsafe { guard.defer(move || a.into_owned()); }
|
||||
guard.flush();
|
||||
}
|
||||
|
||||
while DROPS.load(Ordering::Relaxed) < COUNT {
|
||||
guard.repin();
|
||||
collector.global.collect(&guard);
|
||||
}
|
||||
assert_eq!(DROPS.load(Ordering::Relaxed), COUNT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn destroy_array() {
|
||||
const COUNT: usize = 100_000;
|
||||
static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
|
||||
unsafe {
|
||||
let guard = &handle.pin();
|
||||
|
||||
let mut v = Vec::with_capacity(COUNT);
|
||||
for i in 0..COUNT {
|
||||
v.push(i as i32);
|
||||
}
|
||||
|
||||
let ptr = v.as_mut_ptr() as usize;
|
||||
let len = v.len();
|
||||
guard.defer(move || {
|
||||
drop(Vec::from_raw_parts(ptr as *const u8 as *mut u8, len, len));
|
||||
DESTROYS.fetch_add(len, Ordering::Relaxed);
|
||||
});
|
||||
guard.flush();
|
||||
|
||||
mem::forget(v);
|
||||
}
|
||||
|
||||
while DESTROYS.load(Ordering::Relaxed) < COUNT {
|
||||
let guard = &handle.pin();
|
||||
collector.global.collect(guard);
|
||||
}
|
||||
assert_eq!(DESTROYS.load(Ordering::Relaxed), COUNT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stress() {
|
||||
const THREADS: usize = 8;
|
||||
const COUNT: usize = 100_000;
|
||||
static DROPS: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
struct Elem(i32);
|
||||
|
||||
impl Drop for Elem {
|
||||
fn drop(&mut self) {
|
||||
DROPS.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
let collector = Collector::new();
|
||||
|
||||
let threads = (0..THREADS)
|
||||
.map(|_| {
|
||||
scoped::scope(|scope| {
|
||||
scope.spawn(|| {
|
||||
let handle = collector.handle();
|
||||
for _ in 0..COUNT {
|
||||
let guard = &handle.pin();
|
||||
unsafe {
|
||||
let a = Owned::new(Elem(7i32)).into_shared(guard);
|
||||
guard.defer(move || a.into_owned());
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
for t in threads {
|
||||
t.join();
|
||||
}
|
||||
|
||||
let handle = collector.handle();
|
||||
while DROPS.load(Ordering::Relaxed) < COUNT * THREADS {
|
||||
let guard = &handle.pin();
|
||||
collector.global.collect(guard);
|
||||
}
|
||||
assert_eq!(DROPS.load(Ordering::Relaxed), COUNT * THREADS);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
//! The default garbage collector.
|
||||
//!
|
||||
//! For each thread, a participant is lazily initialized on its first use, when the current thread
|
||||
//! is registered in the default collector. If initialized, the thread's participant will get
|
||||
//! destructed on thread exit, which in turn unregisters the thread.
|
||||
|
||||
use collector::{Collector, Handle};
|
||||
use guard::Guard;
|
||||
|
||||
lazy_static! {
|
||||
/// The global data for the default garbage collector.
|
||||
static ref COLLECTOR: Collector = Collector::new();
|
||||
}
|
||||
|
||||
thread_local! {
|
||||
/// The per-thread participant for the default garbage collector.
|
||||
static HANDLE: Handle = COLLECTOR.handle();
|
||||
}
|
||||
|
||||
/// Pins the current thread.
|
||||
#[inline]
|
||||
pub fn pin() -> Guard {
|
||||
// FIXME(jeehoonkang): thread-local storage may be destructed at the time `pin()` is called. For
|
||||
// that case, we should use `HANDLE.try_with()` instead.
|
||||
HANDLE.with(|handle| handle.pin())
|
||||
}
|
||||
|
||||
/// Returns `true` if the current thread is pinned.
|
||||
#[inline]
|
||||
pub fn is_pinned() -> bool {
|
||||
// FIXME(jeehoonkang): thread-local storage may be destructed at the time `pin()` is called. For
|
||||
// that case, we should use `HANDLE.try_with()` instead.
|
||||
HANDLE.with(|handle| handle.is_pinned())
|
||||
}
|
||||
|
||||
/// Returns the default handle associated with the current thread.
|
||||
#[inline]
|
||||
pub fn default_handle() -> Handle {
|
||||
HANDLE.with(|handle| handle.clone())
|
||||
}
|
|
@ -0,0 +1,147 @@
|
|||
use core::mem;
|
||||
use core::ptr;
|
||||
use alloc::boxed::Box;
|
||||
|
||||
/// Number of words a piece of `Data` can hold.
|
||||
///
|
||||
/// Three words should be enough for the majority of cases. For example, you can fit inside it the
|
||||
/// function pointer together with a fat pointer representing an object that needs to be destroyed.
|
||||
const DATA_WORDS: usize = 3;
|
||||
|
||||
/// Some space to keep a `FnOnce()` object on the stack.
|
||||
type Data = [usize; DATA_WORDS];
|
||||
|
||||
/// A `FnOnce()` that is stored inline if small, or otherwise boxed on the heap.
|
||||
///
|
||||
/// This is a handy way of keeping an unsized `FnOnce()` within a sized structure.
|
||||
pub struct Deferred {
|
||||
call: unsafe fn(*mut u8),
|
||||
data: Data,
|
||||
}
|
||||
|
||||
impl Deferred {
|
||||
/// Constructs a new `Deferred` from a `FnOnce()`.
|
||||
pub fn new<F: FnOnce()>(f: F) -> Self {
|
||||
let size = mem::size_of::<F>();
|
||||
let align = mem::align_of::<F>();
|
||||
|
||||
unsafe {
|
||||
if size <= mem::size_of::<Data>() && align <= mem::align_of::<Data>() {
|
||||
let mut data: Data = mem::uninitialized();
|
||||
ptr::write(&mut data as *mut Data as *mut F, f);
|
||||
|
||||
unsafe fn call<F: FnOnce()>(raw: *mut u8) {
|
||||
let f: F = ptr::read(raw as *mut F);
|
||||
f();
|
||||
}
|
||||
|
||||
Deferred {
|
||||
call: call::<F>,
|
||||
data: data,
|
||||
}
|
||||
} else {
|
||||
let b: Box<F> = Box::new(f);
|
||||
let mut data: Data = mem::uninitialized();
|
||||
ptr::write(&mut data as *mut Data as *mut Box<F>, b);
|
||||
|
||||
unsafe fn call<F: FnOnce()>(raw: *mut u8) {
|
||||
let b: Box<F> = ptr::read(raw as *mut Box<F>);
|
||||
(*b)();
|
||||
}
|
||||
|
||||
Deferred {
|
||||
call: call::<F>,
|
||||
data: data,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls the function or panics if it was already called.
|
||||
#[inline]
|
||||
pub fn call(&mut self) {
|
||||
unsafe fn fail(_: *mut u8) {
|
||||
panic!("cannot call `FnOnce` more than once");
|
||||
}
|
||||
|
||||
let call = mem::replace(&mut self.call, fail);
|
||||
unsafe {
|
||||
call(&mut self.data as *mut Data as *mut u8);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::cell::Cell;
|
||||
use super::Deferred;
|
||||
|
||||
#[test]
|
||||
fn on_stack() {
|
||||
let fired = &Cell::new(false);
|
||||
let a = [0usize; 1];
|
||||
|
||||
let mut d = Deferred::new(move || {
|
||||
drop(a);
|
||||
fired.set(true);
|
||||
});
|
||||
|
||||
assert!(!fired.get());
|
||||
d.call();
|
||||
assert!(fired.get());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn on_heap() {
|
||||
let fired = &Cell::new(false);
|
||||
let a = [0usize; 10];
|
||||
|
||||
let mut d = Deferred::new(move || {
|
||||
drop(a);
|
||||
fired.set(true);
|
||||
});
|
||||
|
||||
assert!(!fired.get());
|
||||
d.call();
|
||||
assert!(fired.get());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "cannot call `FnOnce` more than once")]
|
||||
fn twice_on_stack() {
|
||||
let a = [0usize; 1];
|
||||
let mut d = Deferred::new(move || drop(a));
|
||||
d.call();
|
||||
d.call();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "cannot call `FnOnce` more than once")]
|
||||
fn twice_on_heap() {
|
||||
let a = [0usize; 10];
|
||||
let mut d = Deferred::new(move || drop(a));
|
||||
d.call();
|
||||
d.call();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn string() {
|
||||
let a = "hello".to_string();
|
||||
let mut d = Deferred::new(move || assert_eq!(a, "hello"));
|
||||
d.call();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn boxed_slice_i32() {
|
||||
let a: Box<[i32]> = vec![2, 3, 5, 7].into_boxed_slice();
|
||||
let mut d = Deferred::new(move || assert_eq!(*a, [2, 3, 5, 7]));
|
||||
d.call();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn long_slice_usize() {
|
||||
let a: [usize; 5] = [2, 3, 5, 7, 11];
|
||||
let mut d = Deferred::new(move || assert_eq!(a, [2, 3, 5, 7, 11]));
|
||||
d.call();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
//! The global epoch
|
||||
//!
|
||||
//! The last bit in this number is unused and is always zero. Every so often the global epoch is
|
||||
//! incremented, i.e. we say it "advances". A pinned participant may advance the global epoch only
|
||||
//! if all currently pinned participants have been pinned in the current epoch.
|
||||
//!
|
||||
//! If an object became garbage in some epoch, then we can be sure that after two advancements no
|
||||
//! participant will hold a reference to it. That is the crux of safe memory reclamation.
|
||||
|
||||
use core::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
/// An epoch that can be marked as pinned or unpinned.
|
||||
///
|
||||
/// Internally, the epoch is represented as an integer that wraps around at some unspecified point
|
||||
/// and a flag that represents whether it is pinned or unpinned.
|
||||
#[derive(Copy, Clone, Default, Debug, Eq, PartialEq)]
|
||||
pub struct Epoch {
|
||||
/// The least significant bit is set if pinned. The rest of the bits hold the epoch.
|
||||
data: usize,
|
||||
}
|
||||
|
||||
impl Epoch {
|
||||
/// Returns the starting epoch in unpinned state.
|
||||
#[inline]
|
||||
pub fn starting() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Returns the number of epochs `self` is ahead of `rhs`.
|
||||
///
|
||||
/// Internally, epochs are represented as numbers in the range `(isize::MIN / 2) .. (isize::MAX
|
||||
/// / 2)`, so the returned distance will be in the same interval.
|
||||
pub fn wrapping_sub(self, rhs: Self) -> isize {
|
||||
// The result is the same with `(self.data & !1).wrapping_sub(rhs.data & !1) as isize >> 1`,
|
||||
// because the possible difference of LSB in `(self.data & !1).wrapping_sub(rhs.data & !1)`
|
||||
// will be ignored in the shift operation.
|
||||
self.data.wrapping_sub(rhs.data & !1) as isize >> 1
|
||||
}
|
||||
|
||||
/// Returns `true` if the epoch is marked as pinned.
|
||||
#[inline]
|
||||
pub fn is_pinned(self) -> bool {
|
||||
(self.data & 1) == 1
|
||||
}
|
||||
|
||||
/// Returns the same epoch, but marked as pinned.
|
||||
#[inline]
|
||||
pub fn pinned(self) -> Epoch {
|
||||
Epoch { data: self.data | 1 }
|
||||
}
|
||||
|
||||
/// Returns the same epoch, but marked as unpinned.
|
||||
#[inline]
|
||||
pub fn unpinned(self) -> Epoch {
|
||||
Epoch { data: self.data & !1 }
|
||||
}
|
||||
|
||||
/// Returns the successor epoch.
|
||||
///
|
||||
/// The returned epoch will be marked as pinned only if the previous one was as well.
|
||||
#[inline]
|
||||
pub fn successor(self) -> Epoch {
|
||||
Epoch { data: self.data.wrapping_add(2) }
|
||||
}
|
||||
}
|
||||
|
||||
/// An atomic value that holds an `Epoch`.
|
||||
#[derive(Default, Debug)]
|
||||
pub struct AtomicEpoch {
|
||||
/// Since `Epoch` is just a wrapper around `usize`, an `AtomicEpoch` is similarly represented
|
||||
/// using an `AtomicUsize`.
|
||||
data: AtomicUsize,
|
||||
}
|
||||
|
||||
impl AtomicEpoch {
|
||||
/// Creates a new atomic epoch.
|
||||
#[inline]
|
||||
pub fn new(epoch: Epoch) -> Self {
|
||||
let data = AtomicUsize::new(epoch.data);
|
||||
AtomicEpoch { data: data }
|
||||
}
|
||||
|
||||
/// Loads a value from the atomic epoch.
|
||||
#[inline]
|
||||
pub fn load(&self, ord: Ordering) -> Epoch {
|
||||
Epoch { data: self.data.load(ord) }
|
||||
}
|
||||
|
||||
/// Stores a value into the atomic epoch.
|
||||
#[inline]
|
||||
pub fn store(&self, epoch: Epoch, ord: Ordering) {
|
||||
self.data.store(epoch.data, ord);
|
||||
}
|
||||
|
||||
/// Stores a value into the atomic epoch if the current value is the same as `current`.
|
||||
///
|
||||
/// The return value is always the previous value. If it is equal to `current`, then the value
|
||||
/// is updated.
|
||||
///
|
||||
/// The `Ordering` argument describes the memory ordering of this operation.
|
||||
#[inline]
|
||||
pub fn compare_and_swap(&self, current: Epoch, new: Epoch, ord: Ordering) -> Epoch {
|
||||
let data = self.data.compare_and_swap(current.data, new.data, ord);
|
||||
Epoch { data: data }
|
||||
}
|
||||
}
|
|
@ -0,0 +1,417 @@
|
|||
use core::ptr;
|
||||
use core::mem;
|
||||
|
||||
use garbage::Garbage;
|
||||
use internal::Local;
|
||||
|
||||
/// A guard that keeps the current thread pinned.
|
||||
///
|
||||
/// # Pinning
|
||||
///
|
||||
/// The current thread is pinned by calling [`pin`], which returns a new guard:
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_epoch as epoch;
|
||||
///
|
||||
/// // It is often convenient to prefix a call to `pin` with a `&` in order to create a reference.
|
||||
/// // This is not really necessary, but makes passing references to the guard a bit easier.
|
||||
/// let guard = &epoch::pin();
|
||||
/// ```
|
||||
///
|
||||
/// When a guard gets dropped, the current thread is automatically unpinned.
|
||||
///
|
||||
/// # Pointers on the stack
|
||||
///
|
||||
/// Having a guard allows us to create pointers on the stack to heap-allocated objects.
|
||||
/// For example:
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_epoch::{self as epoch, Atomic, Owned};
|
||||
/// use std::sync::atomic::Ordering::SeqCst;
|
||||
///
|
||||
/// // Create a heap-allocated number.
|
||||
/// let a = Atomic::new(777);
|
||||
///
|
||||
/// // Pin the current thread.
|
||||
/// let guard = &epoch::pin();
|
||||
///
|
||||
/// // Load the heap-allocated object and create pointer `p` on the stack.
|
||||
/// let p = a.load(SeqCst, guard);
|
||||
///
|
||||
/// // Dereference the pointer and print the value:
|
||||
/// if let Some(num) = unsafe { p.as_ref() } {
|
||||
/// println!("The number is {}.", num);
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// # Multiple guards
|
||||
///
|
||||
/// Pinning is reentrant and it is perfectly legal to create multiple guards. In that case, the
|
||||
/// thread will actually be pinned only when the first guard is created and unpinned when the last
|
||||
/// one is dropped:
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_epoch as epoch;
|
||||
///
|
||||
/// let guard1 = epoch::pin();
|
||||
/// let guard2 = epoch::pin();
|
||||
/// assert!(epoch::is_pinned());
|
||||
/// drop(guard1);
|
||||
/// assert!(epoch::is_pinned());
|
||||
/// drop(guard2);
|
||||
/// assert!(!epoch::is_pinned());
|
||||
/// ```
|
||||
///
|
||||
/// The same can be achieved by cloning guards:
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_epoch as epoch;
|
||||
///
|
||||
/// let guard1 = epoch::pin();
|
||||
/// let guard2 = guard1.clone();
|
||||
/// ```
|
||||
///
|
||||
/// [`pin`]: fn.pin.html
|
||||
pub struct Guard {
|
||||
local: *const Local,
|
||||
}
|
||||
|
||||
impl Guard {
|
||||
/// Creates a new guard from a pointer to `Local`.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The `local` should be a valid pointer created by `Local::register()`.
|
||||
#[doc(hidden)]
|
||||
pub unsafe fn new(local: *const Local) -> Guard {
|
||||
Guard { local: local }
|
||||
}
|
||||
|
||||
/// Accesses the internal pointer to `Local`.
|
||||
#[doc(hidden)]
|
||||
pub unsafe fn get_local(&self) -> *const Local {
|
||||
self.local
|
||||
}
|
||||
|
||||
/// Stores a function so that it can be executed at some point after all currently pinned
|
||||
/// threads get unpinned.
|
||||
///
|
||||
/// This method first stores `f` into the thread-local (or handle-local) cache. If this cache
|
||||
/// becomes full, some functions are moved into the global cache. At the same time, some
|
||||
/// functions from both local and global caches may get executed in order to incrementally
|
||||
/// clean up the caches as they fill up.
|
||||
///
|
||||
/// There is no guarantee when exactly `f` will be executed. The only guarantee is that won't
|
||||
/// until all currently pinned threads get unpinned. In theory, `f` might never be deallocated,
|
||||
/// but the epoch-based garbage collection will make an effort to execute it reasonably soon.
|
||||
///
|
||||
/// If this method is called from an [`unprotected`] guard, the function will simply be
|
||||
/// executed immediately.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The given function must not hold reference onto the stack. It is highly recommended that
|
||||
/// the passed function is **always** marked with `move` in order to prevent accidental
|
||||
/// borrows.
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_epoch as epoch;
|
||||
///
|
||||
/// let guard = &epoch::pin();
|
||||
/// let message = "Hello!";
|
||||
/// unsafe {
|
||||
/// // ALWAYS use `move` when sending a closure into `defef`.
|
||||
/// guard.defer(move || {
|
||||
/// println!("{}", message);
|
||||
/// });
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// Apart from that, keep in mind that another thread may execute `f`, so anything accessed
|
||||
/// by the closure must be `Send`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// When a heap-allocated object in a data structure becomes unreachable, it has to be
|
||||
/// deallocated. However, the current thread and other threads may be still holding references
|
||||
/// on the stack to that same object. Therefore it cannot be deallocated before those
|
||||
/// references get dropped. This method can defer deallocation until all those threads get
|
||||
/// unpinned and consequently drop all their references on the stack.
|
||||
///
|
||||
/// ```rust
|
||||
/// use crossbeam_epoch::{self as epoch, Atomic, Owned};
|
||||
/// use std::sync::atomic::Ordering::SeqCst;
|
||||
///
|
||||
/// let a = Atomic::new("foo");
|
||||
///
|
||||
/// // Now suppose that `a` is shared among multiple threads and concurrently
|
||||
/// // accessed and modified...
|
||||
///
|
||||
/// // Pin the current thread.
|
||||
/// let guard = &epoch::pin();
|
||||
///
|
||||
/// // Steal the object currently stored in `a` and swap it with another one.
|
||||
/// let p = a.swap(Owned::new("bar").into_shared(guard), SeqCst, guard);
|
||||
///
|
||||
/// if !p.is_null() {
|
||||
/// // The object `p` is pointing to is now unreachable.
|
||||
/// // Defer its deallocation until all currently pinned threads get unpinned.
|
||||
/// unsafe {
|
||||
/// // ALWAYS use `move` when sending a closure into `defer`.
|
||||
/// guard.defer(move || {
|
||||
/// println!("{} is now being deallocated.", p.deref());
|
||||
/// // Now we have unique access to the object pointed to by `p` and can turn it
|
||||
/// // into an `Owned`. Dropping the `Owned` will deallocate the object.
|
||||
/// drop(p.into_owned());
|
||||
/// });
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// [`unprotected`]: fn.unprotected.html
|
||||
pub unsafe fn defer<F, R>(&self, f: F)
|
||||
where
|
||||
F: FnOnce() -> R,
|
||||
{
|
||||
let garbage = Garbage::new(|| drop(f()));
|
||||
|
||||
if let Some(local) = self.local.as_ref() {
|
||||
local.defer(garbage, self);
|
||||
}
|
||||
}
|
||||
|
||||
/// Clears up the thread-local cache of deferred functions by executing them or moving into the
|
||||
/// global cache.
|
||||
///
|
||||
/// Call this method after deferring execution of a function if you want to get it executed as
|
||||
/// soon as possible. Flushing will make sure it is residing in in the global cache, so that
|
||||
/// any thread has a chance of taking the function and executing it.
|
||||
///
|
||||
/// If this method is called from an [`unprotected`] guard, it is a no-op (nothing happens).
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_epoch as epoch;
|
||||
///
|
||||
/// let guard = &epoch::pin();
|
||||
/// unsafe {
|
||||
/// guard.defer(move || {
|
||||
/// println!("This better be printed as soon as possible!");
|
||||
/// });
|
||||
/// }
|
||||
/// guard.flush();
|
||||
/// ```
|
||||
///
|
||||
/// [`unprotected`]: fn.unprotected.html
|
||||
pub fn flush(&self) {
|
||||
if let Some(local) = unsafe { self.local.as_ref() } {
|
||||
local.flush(self);
|
||||
}
|
||||
}
|
||||
|
||||
/// Unpins and then immediately re-pins the thread.
|
||||
///
|
||||
/// This method is useful when you don't want delay the advancement of the global epoch by
|
||||
/// holding an old epoch. For safety, you should not maintain any guard-based reference across
|
||||
/// the call (the latter is enforced by `&mut self`). The thread will only be repinned if this
|
||||
/// is the only active guard for the current thread.
|
||||
///
|
||||
/// If this method is called from an [`unprotected`] guard, then the call will be just no-op.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_epoch::{self as epoch, Atomic};
|
||||
/// use std::sync::atomic::Ordering::SeqCst;
|
||||
/// use std::thread;
|
||||
/// use std::time::Duration;
|
||||
///
|
||||
/// let a = Atomic::new(777);
|
||||
/// let mut guard = epoch::pin();
|
||||
/// {
|
||||
/// let p = a.load(SeqCst, &guard);
|
||||
/// assert_eq!(unsafe { p.as_ref() }, Some(&777));
|
||||
/// }
|
||||
/// guard.repin();
|
||||
/// {
|
||||
/// let p = a.load(SeqCst, &guard);
|
||||
/// assert_eq!(unsafe { p.as_ref() }, Some(&777));
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// [`unprotected`]: fn.unprotected.html
|
||||
pub fn repin(&mut self) {
|
||||
if let Some(local) = unsafe { self.local.as_ref() } {
|
||||
local.repin();
|
||||
}
|
||||
}
|
||||
|
||||
/// Temporarily unpins the thread, executes the given function and then re-pins the thread.
|
||||
///
|
||||
/// This method is useful when you need to perform a long-running operation (e.g. sleeping)
|
||||
/// and don't need to maintain any guard-based reference across the call (the latter is enforced
|
||||
/// by `&mut self`). The thread will only be unpinned if this is the only active guard for the
|
||||
/// current thread.
|
||||
///
|
||||
/// If this method is called from an [`unprotected`] guard, then the passed function is called
|
||||
/// directly without unpinning the thread.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_epoch::{self as epoch, Atomic};
|
||||
/// use std::sync::atomic::Ordering::SeqCst;
|
||||
/// use std::thread;
|
||||
/// use std::time::Duration;
|
||||
///
|
||||
/// let a = Atomic::new(777);
|
||||
/// let mut guard = epoch::pin();
|
||||
/// {
|
||||
/// let p = a.load(SeqCst, &guard);
|
||||
/// assert_eq!(unsafe { p.as_ref() }, Some(&777));
|
||||
/// }
|
||||
/// guard.repin_after(|| thread::sleep(Duration::from_millis(50)));
|
||||
/// {
|
||||
/// let p = a.load(SeqCst, &guard);
|
||||
/// assert_eq!(unsafe { p.as_ref() }, Some(&777));
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// [`unprotected`]: fn.unprotected.html
|
||||
pub fn repin_after<F, R>(&mut self, f: F) -> R
|
||||
where
|
||||
F: FnOnce() -> R,
|
||||
{
|
||||
if let Some(local) = unsafe { self.local.as_ref() } {
|
||||
// We need to acquire a handle here to ensure the Local doesn't
|
||||
// disappear from under us.
|
||||
local.acquire_handle();
|
||||
local.unpin();
|
||||
}
|
||||
|
||||
// Ensure the Guard is re-pinned even if the function panics
|
||||
defer! {
|
||||
if let Some(local) = unsafe { self.local.as_ref() } {
|
||||
mem::forget(local.pin());
|
||||
local.release_handle();
|
||||
}
|
||||
}
|
||||
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Guard {
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
if let Some(local) = unsafe { self.local.as_ref() } {
|
||||
local.unpin();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Guard {
|
||||
#[inline]
|
||||
fn clone(&self) -> Guard {
|
||||
match unsafe { self.local.as_ref() } {
|
||||
None => Guard { local: ptr::null() },
|
||||
Some(local) => local.pin(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s.
|
||||
///
|
||||
/// This guard should be used in special occasions only. Note that it doesn't actually keep any
|
||||
/// thread pinned - it's just a fake guard that allows loading from [`Atomic`]s unsafely.
|
||||
///
|
||||
/// Note that calling [`defer`] with a dummy guard will not defer the function - it will just
|
||||
/// execute the function immediately.
|
||||
///
|
||||
/// If necessary, it's possible to create more dummy guards by cloning: `unprotected().clone()`.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// Loading and dereferencing data from an [`Atomic`] using this guard is safe only if the
|
||||
/// [`Atomic`] is not being concurrently modified by other threads.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_epoch::{self as epoch, Atomic};
|
||||
/// use std::sync::atomic::Ordering::Relaxed;
|
||||
///
|
||||
/// let a = Atomic::new(7);
|
||||
///
|
||||
/// unsafe {
|
||||
/// // Load `a` without pinning the current thread.
|
||||
/// a.load(Relaxed, epoch::unprotected());
|
||||
///
|
||||
/// // It's possible to create more dummy guards by calling `clone()`.
|
||||
/// let dummy = &epoch::unprotected().clone();
|
||||
///
|
||||
/// dummy.defer(move || {
|
||||
/// println!("This gets executed immediately.");
|
||||
/// });
|
||||
///
|
||||
/// // Dropping `dummy` doesn't affect the current thread - it's just a noop.
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// The most common use of this function is when constructing or destructing a data structure.
|
||||
///
|
||||
/// For example, we can use a dummy guard in the destructor of a Treiber stack because at that
|
||||
/// point no other thread could concurrently modify the [`Atomic`]s we are accessing.
|
||||
///
|
||||
/// If we were to actually pin the current thread during destruction, that would just unnecessarily
|
||||
/// delay garbage collection and incur some performance cost, so in cases like these `unprotected`
|
||||
/// is very helpful.
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_epoch::{self as epoch, Atomic};
|
||||
/// use std::ptr;
|
||||
/// use std::sync::atomic::Ordering::Relaxed;
|
||||
///
|
||||
/// struct Stack {
|
||||
/// head: epoch::Atomic<Node>,
|
||||
/// }
|
||||
///
|
||||
/// struct Node {
|
||||
/// data: u32,
|
||||
/// next: epoch::Atomic<Node>,
|
||||
/// }
|
||||
///
|
||||
/// impl Drop for Stack {
|
||||
/// fn drop(&mut self) {
|
||||
/// unsafe {
|
||||
/// // Unprotected load.
|
||||
/// let mut node = self.head.load(Relaxed, epoch::unprotected());
|
||||
///
|
||||
/// while let Some(n) = node.as_ref() {
|
||||
/// // Unprotected load.
|
||||
/// let next = n.next.load(Relaxed, epoch::unprotected());
|
||||
///
|
||||
/// // Take ownership of the node, then drop it.
|
||||
/// drop(node.into_owned());
|
||||
///
|
||||
/// node = next;
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// [`Atomic`]: struct.Atomic.html
|
||||
/// [`defer`]: struct.Guard.html#method.defer
|
||||
#[inline]
|
||||
pub unsafe fn unprotected() -> &'static Guard {
|
||||
// HACK(stjepang): An unprotected guard is just a `Guard` with its field `local` set to null.
|
||||
// Since this function returns a `'static` reference to a `Guard`, we must return a reference
|
||||
// to a global guard. However, it's not possible to create a `static` `Guard` because it does
|
||||
// not implement `Sync`. To get around the problem, we create a static `usize` initialized to
|
||||
// zero and then transmute it into a `Guard`. This is safe because `usize` and `Guard`
|
||||
// (consisting of a single pointer) have the same representation in memory.
|
||||
static UNPROTECTED: usize = 0;
|
||||
&*(&UNPROTECTED as *const _ as *const Guard)
|
||||
}
|
|
@ -0,0 +1,409 @@
|
|||
//! The global data and participant for garbage collection.
|
||||
//!
|
||||
//! # Registration
|
||||
//!
|
||||
//! In order to track all participants in one place, we need some form of participant
|
||||
//! registration. When a participant is created, it is registered to a global lock-free
|
||||
//! singly-linked list of registries; and when a participant is leaving, it is unregistered from the
|
||||
//! list.
|
||||
//!
|
||||
//! # Pinning
|
||||
//!
|
||||
//! Every participant contains an integer that tells whether the participant is pinned and if so,
|
||||
//! what was the global epoch at the time it was pinned. Participants also hold a pin counter that
|
||||
//! aids in periodic global epoch advancement.
|
||||
//!
|
||||
//! When a participant is pinned, a `Guard` is returned as a witness that the participant is pinned.
|
||||
//! Guards are necessary for performing atomic operations, and for freeing/dropping locations.
|
||||
|
||||
use core::cell::{Cell, UnsafeCell};
|
||||
use core::mem;
|
||||
use core::num::Wrapping;
|
||||
use core::ptr;
|
||||
use core::sync::atomic;
|
||||
use core::sync::atomic::Ordering;
|
||||
use alloc::boxed::Box;
|
||||
use alloc::arc::Arc;
|
||||
|
||||
use crossbeam_utils::cache_padded::CachePadded;
|
||||
use nodrop::NoDrop;
|
||||
|
||||
use atomic::Owned;
|
||||
use epoch::{AtomicEpoch, Epoch};
|
||||
use guard::{unprotected, Guard};
|
||||
use garbage::{Bag, Garbage};
|
||||
use sync::list::{List, Entry, IterError, IsElement};
|
||||
use sync::queue::Queue;
|
||||
|
||||
/// Number of bags to destroy.
|
||||
const COLLECT_STEPS: usize = 8;
|
||||
|
||||
/// Number of pinnings after which a participant will execute some deferred functions from the
|
||||
/// global queue.
|
||||
const PINNINGS_BETWEEN_COLLECT: usize = 128;
|
||||
|
||||
/// The global data for a garbage collector.
|
||||
pub struct Global {
|
||||
/// The intrusive linked list of `Local`s.
|
||||
locals: List<Local>,
|
||||
|
||||
/// The global queue of bags of deferred functions.
|
||||
queue: Queue<(Epoch, Bag)>,
|
||||
|
||||
/// The global epoch.
|
||||
epoch: CachePadded<AtomicEpoch>,
|
||||
}
|
||||
|
||||
impl Global {
|
||||
/// Creates a new global data for garbage collection.
|
||||
#[inline]
|
||||
pub fn new() -> Global {
|
||||
Global {
|
||||
locals: List::new(),
|
||||
queue: Queue::new(),
|
||||
epoch: CachePadded::new(AtomicEpoch::new(Epoch::starting())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current global epoch.
|
||||
pub fn load_epoch(&self, ordering: Ordering) -> Epoch {
|
||||
self.epoch.load(ordering)
|
||||
}
|
||||
|
||||
/// Pushes the bag into the global queue and replaces the bag with a new empty bag.
|
||||
pub fn push_bag(&self, bag: &mut Bag, guard: &Guard) {
|
||||
let bag = mem::replace(bag, Bag::new());
|
||||
|
||||
atomic::fence(Ordering::SeqCst);
|
||||
|
||||
let epoch = self.epoch.load(Ordering::Relaxed);
|
||||
self.queue.push((epoch, bag), guard);
|
||||
}
|
||||
|
||||
/// Collects several bags from the global queue and executes deferred functions in them.
|
||||
///
|
||||
/// Note: This may itself produce garbage and in turn allocate new bags.
|
||||
///
|
||||
/// `pin()` rarely calls `collect()`, so we want the compiler to place that call on a cold
|
||||
/// path. In other words, we want the compiler to optimize branching for the case when
|
||||
/// `collect()` is not called.
|
||||
#[cold]
|
||||
pub fn collect(&self, guard: &Guard) {
|
||||
let global_epoch = self.try_advance(guard);
|
||||
|
||||
let condition = |item: &(Epoch, Bag)| {
|
||||
// A pinned participant can witness at most one epoch advancement. Therefore, any bag
|
||||
// that is within one epoch of the current one cannot be destroyed yet.
|
||||
global_epoch.wrapping_sub(item.0) >= 2
|
||||
};
|
||||
|
||||
let steps = if cfg!(feature = "sanitize") {
|
||||
usize::max_value()
|
||||
} else {
|
||||
COLLECT_STEPS
|
||||
};
|
||||
|
||||
for _ in 0..steps {
|
||||
match self.queue.try_pop_if(&condition, guard) {
|
||||
None => break,
|
||||
Some(bag) => drop(bag),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to advance the global epoch.
|
||||
///
|
||||
/// The global epoch can advance only if all currently pinned participants have been pinned in
|
||||
/// the current epoch.
|
||||
///
|
||||
/// Returns the current global epoch.
|
||||
///
|
||||
/// `try_advance()` is annotated `#[cold]` because it is rarely called.
|
||||
#[cold]
|
||||
pub fn try_advance(&self, guard: &Guard) -> Epoch {
|
||||
let global_epoch = self.epoch.load(Ordering::Relaxed);
|
||||
atomic::fence(Ordering::SeqCst);
|
||||
|
||||
// TODO(stjepang): `Local`s are stored in a linked list because linked lists are fairly
|
||||
// easy to implement in a lock-free manner. However, traversal can be slow due to cache
|
||||
// misses and data dependencies. We should experiment with other data structures as well.
|
||||
for local in self.locals.iter(&guard) {
|
||||
match local {
|
||||
Err(IterError::Stalled) => {
|
||||
// A concurrent thread stalled this iteration. That thread might also try to
|
||||
// advance the epoch, in which case we leave the job to it. Otherwise, the
|
||||
// epoch will not be advanced.
|
||||
return global_epoch;
|
||||
}
|
||||
Ok(local) => {
|
||||
let local_epoch = local.epoch.load(Ordering::Relaxed);
|
||||
|
||||
// If the participant was pinned in a different epoch, we cannot advance the
|
||||
// global epoch just yet.
|
||||
if local_epoch.is_pinned() && local_epoch.unpinned() != global_epoch {
|
||||
return global_epoch;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
atomic::fence(Ordering::Acquire);
|
||||
|
||||
// All pinned participants were pinned in the current global epoch.
|
||||
// Now let's advance the global epoch...
|
||||
//
|
||||
// Note that if another thread already advanced it before us, this store will simply
|
||||
// overwrite the global epoch with the same value. This is true because `try_advance` was
|
||||
// called from a thread that was pinned in `global_epoch`, and the global epoch cannot be
|
||||
// advanced two steps ahead of it.
|
||||
let new_epoch = global_epoch.successor();
|
||||
self.epoch.store(new_epoch, Ordering::Release);
|
||||
new_epoch
|
||||
}
|
||||
}
|
||||
|
||||
/// Participant for garbage collection.
|
||||
pub struct Local {
|
||||
/// A node in the intrusive linked list of `Local`s.
|
||||
entry: Entry,
|
||||
|
||||
/// The local epoch.
|
||||
epoch: AtomicEpoch,
|
||||
|
||||
/// A reference to the global data.
|
||||
///
|
||||
/// When all guards and handles get dropped, this reference is destroyed.
|
||||
global: UnsafeCell<NoDrop<Arc<Global>>>,
|
||||
|
||||
/// The local bag of deferred functions.
|
||||
bag: UnsafeCell<Bag>,
|
||||
|
||||
/// The number of guards keeping this participant pinned.
|
||||
guard_count: Cell<usize>,
|
||||
|
||||
/// The number of active handles.
|
||||
handle_count: Cell<usize>,
|
||||
|
||||
/// Total number of pinnings performed.
|
||||
///
|
||||
/// This is just an auxilliary counter that sometimes kicks off collection.
|
||||
pin_count: Cell<Wrapping<usize>>,
|
||||
}
|
||||
|
||||
unsafe impl Sync for Local {}
|
||||
|
||||
impl Local {
|
||||
/// Registers a new `Local` in the provided `Global`.
|
||||
pub fn register(global: &Arc<Global>) -> *const Local {
|
||||
unsafe {
|
||||
// Since we dereference no pointers in this block, it is safe to use `unprotected`.
|
||||
|
||||
let local = Owned::new(Local {
|
||||
entry: Entry::default(),
|
||||
epoch: AtomicEpoch::new(Epoch::starting()),
|
||||
global: UnsafeCell::new(NoDrop::new(global.clone())),
|
||||
bag: UnsafeCell::new(Bag::new()),
|
||||
guard_count: Cell::new(0),
|
||||
handle_count: Cell::new(1),
|
||||
pin_count: Cell::new(Wrapping(0)),
|
||||
}).into_shared(&unprotected());
|
||||
global.locals.insert(local, &unprotected());
|
||||
local.as_raw()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether the local garbage bag is empty.
|
||||
#[inline]
|
||||
pub fn is_bag_empty(&self) -> bool {
|
||||
unsafe { (*self.bag.get()).is_empty() }
|
||||
}
|
||||
|
||||
/// Returns a reference to the `Global` in which this `Local` resides.
|
||||
#[inline]
|
||||
pub fn global(&self) -> &Global {
|
||||
unsafe { &*self.global.get() }
|
||||
}
|
||||
|
||||
/// Returns `true` if the current participant is pinned.
|
||||
#[inline]
|
||||
pub fn is_pinned(&self) -> bool {
|
||||
self.guard_count.get() > 0
|
||||
}
|
||||
|
||||
pub fn defer(&self, mut garbage: Garbage, guard: &Guard) {
|
||||
let bag = unsafe { &mut *self.bag.get() };
|
||||
|
||||
while let Err(g) = bag.try_push(garbage) {
|
||||
self.global().push_bag(bag, guard);
|
||||
garbage = g;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn flush(&self, guard: &Guard) {
|
||||
let bag = unsafe { &mut *self.bag.get() };
|
||||
|
||||
if !bag.is_empty() {
|
||||
self.global().push_bag(bag, guard);
|
||||
}
|
||||
|
||||
self.global().collect(guard);
|
||||
}
|
||||
|
||||
/// Pins the `Local`.
|
||||
#[inline]
|
||||
pub fn pin(&self) -> Guard {
|
||||
let guard = unsafe { Guard::new(self) };
|
||||
|
||||
let guard_count = self.guard_count.get();
|
||||
self.guard_count.set(guard_count.checked_add(1).unwrap());
|
||||
|
||||
if guard_count == 0 {
|
||||
let global_epoch = self.global().epoch.load(Ordering::Relaxed);
|
||||
let new_epoch = global_epoch.pinned();
|
||||
|
||||
// Now we must store `new_epoch` into `self.epoch` and execute a `SeqCst` fence.
|
||||
// The fence makes sure that any future loads from `Atomic`s will not happen before
|
||||
// this store.
|
||||
if cfg!(any(target_arch = "x86", target_arch = "x86_64")) {
|
||||
// HACK(stjepang): On x86 architectures there are two different ways of executing
|
||||
// a `SeqCst` fence.
|
||||
//
|
||||
// 1. `atomic::fence(SeqCst)`, which compiles into a `mfence` instruction.
|
||||
// 2. `_.compare_and_swap(_, _, SeqCst)`, which compiles into a `lock cmpxchg`
|
||||
// instruction.
|
||||
//
|
||||
// Both instructions have the effect of a full barrier, but benchmarks have shown
|
||||
// that the second one makes pinning faster in this particular case.
|
||||
let current = Epoch::starting();
|
||||
let previous = self.epoch.compare_and_swap(current, new_epoch, Ordering::SeqCst);
|
||||
debug_assert_eq!(current, previous, "participant was expected to be unpinned");
|
||||
} else {
|
||||
self.epoch.store(new_epoch, Ordering::Relaxed);
|
||||
atomic::fence(Ordering::SeqCst);
|
||||
}
|
||||
|
||||
// Increment the pin counter.
|
||||
let count = self.pin_count.get();
|
||||
self.pin_count.set(count + Wrapping(1));
|
||||
|
||||
// After every `PINNINGS_BETWEEN_COLLECT` try advancing the epoch and collecting
|
||||
// some garbage.
|
||||
if count.0 % PINNINGS_BETWEEN_COLLECT == 0 {
|
||||
self.global().collect(&guard);
|
||||
}
|
||||
}
|
||||
|
||||
guard
|
||||
}
|
||||
|
||||
/// Unpins the `Local`.
|
||||
#[inline]
|
||||
pub fn unpin(&self) {
|
||||
let guard_count = self.guard_count.get();
|
||||
self.guard_count.set(guard_count - 1);
|
||||
|
||||
if guard_count == 1 {
|
||||
self.epoch.store(Epoch::starting(), Ordering::Release);
|
||||
|
||||
if self.handle_count.get() == 0 {
|
||||
self.finalize();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Unpins and then pins the `Local`.
|
||||
#[inline]
|
||||
pub fn repin(&self) {
|
||||
let guard_count = self.guard_count.get();
|
||||
|
||||
// Update the local epoch only if there's only one guard.
|
||||
if guard_count == 1 {
|
||||
let epoch = self.epoch.load(Ordering::Relaxed);
|
||||
let global_epoch = self.global().epoch.load(Ordering::Relaxed);
|
||||
|
||||
// Update the local epoch only if the global epoch is greater than the local epoch.
|
||||
if epoch != global_epoch {
|
||||
// We store the new epoch with `Release` because we need to ensure any memory
|
||||
// accesses from the previous epoch do not leak into the new one.
|
||||
self.epoch.store(global_epoch, Ordering::Release);
|
||||
|
||||
// However, we don't need a following `SeqCst` fence, because it is safe for memory
|
||||
// accesses from the new epoch to be executed before updating the local epoch. At
|
||||
// worse, other threads will see the new epoch late and delay GC slightly.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Increments the handle count.
|
||||
#[inline]
|
||||
pub fn acquire_handle(&self) {
|
||||
let handle_count = self.handle_count.get();
|
||||
debug_assert!(handle_count >= 1);
|
||||
self.handle_count.set(handle_count + 1);
|
||||
}
|
||||
|
||||
/// Decrements the handle count.
|
||||
#[inline]
|
||||
pub fn release_handle(&self) {
|
||||
let guard_count = self.guard_count.get();
|
||||
let handle_count = self.handle_count.get();
|
||||
debug_assert!(handle_count >= 1);
|
||||
self.handle_count.set(handle_count - 1);
|
||||
|
||||
if guard_count == 0 && handle_count == 1 {
|
||||
self.finalize();
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes the `Local` from the global linked list.
|
||||
#[cold]
|
||||
fn finalize(&self) {
|
||||
debug_assert_eq!(self.guard_count.get(), 0);
|
||||
debug_assert_eq!(self.handle_count.get(), 0);
|
||||
|
||||
// Temporarily increment handle count. This is required so that the following call to `pin`
|
||||
// doesn't call `finalize` again.
|
||||
self.handle_count.set(1);
|
||||
unsafe {
|
||||
// Pin and move the local bag into the global queue. It's important that `push_bag`
|
||||
// doesn't defer destruction on any new garbage.
|
||||
let guard = &self.pin();
|
||||
self.global().push_bag(&mut *self.bag.get(), guard);
|
||||
}
|
||||
// Revert the handle count back to zero.
|
||||
self.handle_count.set(0);
|
||||
|
||||
unsafe {
|
||||
// Take the reference to the `Global` out of this `Local`. Since we're not protected
|
||||
// by a guard at this time, it's crucial that the reference is read before marking the
|
||||
// `Local` as deleted.
|
||||
let global: Arc<Global> = ptr::read(&**self.global.get());
|
||||
|
||||
// Mark this node in the linked list as deleted.
|
||||
self.entry.delete(&unprotected());
|
||||
|
||||
// Finally, drop the reference to the global. Note that this might be the last
|
||||
// reference to the `Global`. If so, the global data will be destroyed and all deferred
|
||||
// functions in its queue will be executed.
|
||||
drop(global);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl IsElement<Local> for Local {
|
||||
fn entry_of(local: &Local) -> &Entry {
|
||||
let entry_ptr = (local as *const Local as usize + offset_of!(Local, entry)) as *const Entry;
|
||||
unsafe { &*entry_ptr }
|
||||
}
|
||||
|
||||
unsafe fn element_of(entry: &Entry) -> &Local {
|
||||
// offset_of! macro uses unsafe, but it's unnecessary in this context.
|
||||
#[allow(unused_unsafe)]
|
||||
let local_ptr = (entry as *const Entry as usize - offset_of!(Local, entry)) as *const Local;
|
||||
&*local_ptr
|
||||
}
|
||||
|
||||
unsafe fn finalize(entry: &Entry) {
|
||||
let local = Self::element_of(entry);
|
||||
drop(Box::from_raw(local as *const Local as *mut Local));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,110 @@
|
|||
//! Epoch-based memory reclamation.
|
||||
//!
|
||||
//! An interesting problem concurrent collections deal with comes from the remove operation.
|
||||
//! Suppose that a thread removes an element from a lock-free map, while another thread is reading
|
||||
//! that same element at the same time. The first thread must wait until the second thread stops
|
||||
//! reading the element. Only then it is safe to destruct it.
|
||||
//!
|
||||
//! Programming languages that come with garbage collectors solve this problem trivially. The
|
||||
//! garbage collector will destruct the removed element when no thread can hold a reference to it
|
||||
//! anymore.
|
||||
//!
|
||||
//! This crate implements a basic memory reclamation mechanism, which is based on epochs. When an
|
||||
//! element gets removed from a concurrent collection, it is inserted into a pile of garbage and
|
||||
//! marked with the current epoch. Every time a thread accesses a collection, it checks the current
|
||||
//! epoch, attempts to increment it, and destructs some garbage that became so old that no thread
|
||||
//! can be referencing it anymore.
|
||||
//!
|
||||
//! That is the general mechanism behind epoch-based memory reclamation, but the details are a bit
|
||||
//! more complicated. Anyhow, memory reclamation is designed to be fully automatic and something
|
||||
//! users of concurrent collections don't have to worry much about.
|
||||
//!
|
||||
//! # Pointers
|
||||
//!
|
||||
//! Concurrent collections are built using atomic pointers. This module provides [`Atomic`], which
|
||||
//! is just a shared atomic pointer to a heap-allocated object. Loading an [`Atomic`] yields a
|
||||
//! [`Shared`], which is an epoch-protected pointer through which the loaded object can be safely
|
||||
//! read.
|
||||
//!
|
||||
//! # Pinning
|
||||
//!
|
||||
//! Before an [`Atomic`] can be loaded, a participant must be [`pin`]ned. By pinning a participant
|
||||
//! we declare that any object that gets removed from now on must not be destructed just
|
||||
//! yet. Garbage collection of newly removed objects is suspended until the participant gets
|
||||
//! unpinned.
|
||||
//!
|
||||
//! # Garbage
|
||||
//!
|
||||
//! Objects that get removed from concurrent collections must be stashed away until all currently
|
||||
//! pinned participants get unpinned. Such objects can be stored into a [`Garbage`], where they are
|
||||
//! kept until the right time for their destruction comes.
|
||||
//!
|
||||
//! There is a global shared instance of garbage queue. You can [`defer`] the execution of an
|
||||
//! arbitrary function until the global epoch is advanced enough. Most notably, concurrent data
|
||||
//! structures may defer the deallocation of an object.
|
||||
//!
|
||||
//! # APIs
|
||||
//!
|
||||
//! For majority of use cases, just use the default garbage collector by invoking [`pin`]. If you
|
||||
//! want to create your own garbage collector, use the [`Collector`] API.
|
||||
//!
|
||||
//! [`Atomic`]: struct.Atomic.html
|
||||
//! [`Collector`]: struct.Collector.html
|
||||
//! [`Shared`]: struct.Shared.html
|
||||
//! [`pin`]: fn.pin.html
|
||||
//! [`defer`]: fn.defer.html
|
||||
|
||||
#![cfg_attr(feature = "nightly", feature(const_fn))]
|
||||
#![cfg_attr(feature = "nightly", feature(alloc))]
|
||||
#![cfg_attr(not(test), no_std)]
|
||||
|
||||
#[cfg(all(not(test), feature = "use_std"))]
|
||||
#[macro_use]
|
||||
extern crate std;
|
||||
#[cfg(test)]
|
||||
extern crate core;
|
||||
|
||||
// Use liballoc on nightly to avoid a dependency on libstd
|
||||
#[cfg(feature = "nightly")]
|
||||
extern crate alloc;
|
||||
#[cfg(not(feature = "nightly"))]
|
||||
mod alloc {
|
||||
// Tweak the module layout to match the one in liballoc
|
||||
extern crate std;
|
||||
pub use self::std::boxed;
|
||||
pub use self::std::sync as arc;
|
||||
}
|
||||
|
||||
#[cfg(feature = "manually_drop")]
|
||||
mod nodrop {
|
||||
pub use std::mem::ManuallyDrop as NoDrop;
|
||||
}
|
||||
#[cfg(not(feature = "manually_drop"))]
|
||||
extern crate nodrop;
|
||||
|
||||
extern crate arrayvec;
|
||||
extern crate crossbeam_utils;
|
||||
#[cfg(feature = "use_std")]
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
#[macro_use]
|
||||
extern crate memoffset;
|
||||
#[macro_use]
|
||||
extern crate scopeguard;
|
||||
|
||||
mod atomic;
|
||||
mod collector;
|
||||
#[cfg(feature = "use_std")]
|
||||
mod default;
|
||||
mod deferred;
|
||||
mod epoch;
|
||||
mod garbage;
|
||||
mod guard;
|
||||
mod internal;
|
||||
mod sync;
|
||||
|
||||
pub use self::atomic::{Atomic, CompareAndSetError, CompareAndSetOrdering, Owned, Shared};
|
||||
pub use self::guard::{unprotected, Guard};
|
||||
#[cfg(feature = "use_std")]
|
||||
pub use self::default::{default_handle, is_pinned, pin};
|
||||
pub use self::collector::{Collector, Handle};
|
|
@ -0,0 +1,473 @@
|
|||
//! Lock-free intrusive linked list.
|
||||
//!
|
||||
//! Ideas from Michael. High Performance Dynamic Lock-Free Hash Tables and List-Based Sets. SPAA
|
||||
//! 2002. http://dl.acm.org/citation.cfm?id=564870.564881
|
||||
|
||||
use core::marker::PhantomData;
|
||||
use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
|
||||
|
||||
use {Atomic, Shared, Guard, unprotected};
|
||||
|
||||
/// An entry in a linked list.
|
||||
///
|
||||
/// An Entry is accessed from multiple threads, so it would be beneficial to put it in a different
|
||||
/// cache-line than thread-local data in terms of performance.
|
||||
#[derive(Debug)]
|
||||
pub struct Entry {
|
||||
/// The next entry in the linked list.
|
||||
/// If the tag is 1, this entry is marked as deleted.
|
||||
next: Atomic<Entry>,
|
||||
}
|
||||
|
||||
/// Implementing this trait asserts that the type `T` can be used as an element in the intrusive
|
||||
/// linked list defined in this module. `T` has to contain (or otherwise be linked to) an instance
|
||||
/// of `Entry`.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```ignore
|
||||
/// struct A {
|
||||
/// entry: Entry,
|
||||
/// data: usize,
|
||||
/// }
|
||||
///
|
||||
/// impl IsElement<A> for A {
|
||||
/// fn entry_of(a: &A) -> &Entry {
|
||||
/// let entry_ptr = ((a as usize) + offset_of!(A, entry)) as *const Entry;
|
||||
/// unsafe { &*entry_ptr }
|
||||
/// }
|
||||
///
|
||||
/// unsafe fn element_of(entry: &Entry) -> &T {
|
||||
/// let elem_ptr = ((entry as usize) - offset_of!(A, entry)) as *const T;
|
||||
/// &*elem_ptr
|
||||
/// }
|
||||
///
|
||||
/// unsafe fn finalize(entry: &Entry) {
|
||||
/// let elem = Self::element_of(entry);
|
||||
/// drop(Box::from_raw(elem as *const A as *mut A));
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// This trait is implemented on a type separate from `T` (although it can be just `T`), because
|
||||
/// one type might be placeable into multiple lists, in which case it would require multiple
|
||||
/// implementations of `IsElement`. In such cases, each struct implementing `IsElement<T>`
|
||||
/// represents a distinct `Entry` in `T`.
|
||||
///
|
||||
/// For example, we can insert the following struct into two lists using `entry1` for one
|
||||
/// and `entry2` for the other:
|
||||
///
|
||||
/// ```ignore
|
||||
/// struct B {
|
||||
/// entry1: Entry,
|
||||
/// entry2: Entry,
|
||||
/// data: usize,
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
pub trait IsElement<T> {
|
||||
/// Returns a reference to this element's `Entry`.
|
||||
fn entry_of(&T) -> &Entry;
|
||||
|
||||
/// Given a reference to an element's entry, returns that element.
|
||||
///
|
||||
/// ```ignore
|
||||
/// let elem = ListElement::new();
|
||||
/// assert_eq!(elem.entry_of(),
|
||||
/// unsafe { ListElement::element_of(elem.entry_of()) } );
|
||||
/// ```
|
||||
///
|
||||
/// # Safety
|
||||
/// The caller has to guarantee that the `Entry` it
|
||||
/// is called with was retrieved from an instance of the element type (`T`).
|
||||
unsafe fn element_of(&Entry) -> &T;
|
||||
|
||||
/// Deallocates the whole element given its `Entry`. This is called when the list
|
||||
/// is ready to actually free the element.
|
||||
///
|
||||
/// # Safety
|
||||
/// The caller has to guarantee that the `Entry` it
|
||||
/// is called with was retrieved from an instance of the element type (`T`).
|
||||
unsafe fn finalize(&Entry);
|
||||
}
|
||||
|
||||
/// A lock-free, intrusive linked list of type `T`.
|
||||
#[derive(Debug)]
|
||||
pub struct List<T, C: IsElement<T> = T> {
|
||||
/// The head of the linked list.
|
||||
head: Atomic<Entry>,
|
||||
|
||||
/// The phantom data for using `T` and `C`.
|
||||
_marker: PhantomData<(T, C)>,
|
||||
}
|
||||
|
||||
/// An iterator used for retrieving values from the list.
|
||||
pub struct Iter<'g, T: 'g, C: IsElement<T>> {
|
||||
/// The guard that protects the iteration.
|
||||
guard: &'g Guard,
|
||||
|
||||
/// Pointer from the predecessor to the current entry.
|
||||
pred: &'g Atomic<Entry>,
|
||||
|
||||
/// The current entry.
|
||||
curr: Shared<'g, Entry>,
|
||||
|
||||
/// The list head, needed for restarting iteration.
|
||||
head: &'g Atomic<Entry>,
|
||||
|
||||
/// Logically, we store a borrow of an instance of `T` and
|
||||
/// use the type information from `C`.
|
||||
_marker: PhantomData<(&'g T, C)>,
|
||||
}
|
||||
|
||||
/// An error that occurs during iteration over the list.
|
||||
#[derive(PartialEq, Debug)]
|
||||
pub enum IterError {
|
||||
/// A concurrent thread modified the state of the list at the same place that this iterator
|
||||
/// was inspecting. Subsequent iteration will restart from the beginning of the list.
|
||||
Stalled,
|
||||
}
|
||||
|
||||
impl Default for Entry {
|
||||
/// Returns the empty entry.
|
||||
fn default() -> Entry {
|
||||
Entry { next: Atomic::null() }
|
||||
}
|
||||
}
|
||||
|
||||
impl Entry {
|
||||
/// Marks this entry as deleted, deferring the actual deallocation to a later iteration.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The entry should be a member of a linked list, and it should not have been deleted.
|
||||
/// It should be safe to call `C::finalize` on the entry after the `guard` is dropped, where `C`
|
||||
/// is the associated helper for the linked list.
|
||||
pub unsafe fn delete(&self, guard: &Guard) {
|
||||
self.next.fetch_or(1, Release, guard);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, C: IsElement<T>> List<T, C> {
|
||||
/// Returns a new, empty linked list.
|
||||
pub fn new() -> List<T, C> {
|
||||
List {
|
||||
head: Atomic::null(),
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts `entry` into the head of the list.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// You should guarantee that:
|
||||
///
|
||||
/// - `container` is not null
|
||||
/// - `container` is immovable, e.g. inside a `Box`
|
||||
/// - the same `Entry` is not inserted more than once
|
||||
/// - the inserted object will be removed before the list is dropped
|
||||
pub unsafe fn insert<'g>(&'g self, container: Shared<'g, T>, guard: &'g Guard) {
|
||||
// Insert right after head, i.e. at the beginning of the list.
|
||||
let to = &self.head;
|
||||
// Get the intrusively stored Entry of the new element to insert.
|
||||
let entry: &Entry = C::entry_of(container.deref());
|
||||
// Make a Shared ptr to that Entry.
|
||||
let entry_ptr = Shared::from(entry as *const _);
|
||||
// Read the current successor of where we want to insert.
|
||||
let mut next = to.load(Relaxed, guard);
|
||||
|
||||
loop {
|
||||
// Set the Entry of the to-be-inserted element to point to the previous successor of
|
||||
// `to`.
|
||||
entry.next.store(next, Relaxed);
|
||||
match to.compare_and_set_weak(next, entry_ptr, Release, guard) {
|
||||
Ok(_) => break,
|
||||
// We lost the race or weak CAS failed spuriously. Update the successor and try
|
||||
// again.
|
||||
Err(err) => next = err.current,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns an iterator over all objects.
|
||||
///
|
||||
/// # Caveat
|
||||
///
|
||||
/// Every object that is inserted at the moment this function is called and persists at least
|
||||
/// until the end of iteration will be returned. Since this iterator traverses a lock-free
|
||||
/// linked list that may be concurrently modified, some additional caveats apply:
|
||||
///
|
||||
/// 1. If a new object is inserted during iteration, it may or may not be returned.
|
||||
/// 2. If an object is deleted during iteration, it may or may not be returned.
|
||||
/// 3. The iteration may be aborted when it lost in a race condition. In this case, the winning
|
||||
/// thread will continue to iterate over the same list.
|
||||
pub fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, T, C> {
|
||||
Iter {
|
||||
guard: guard,
|
||||
pred: &self.head,
|
||||
curr: self.head.load(Acquire, guard),
|
||||
head: &self.head,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, C: IsElement<T>> Drop for List<T, C> {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
let guard = &unprotected();
|
||||
let mut curr = self.head.load(Relaxed, guard);
|
||||
while let Some(c) = curr.as_ref() {
|
||||
let succ = c.next.load(Relaxed, guard);
|
||||
// Verify that all elements have been removed from the list.
|
||||
assert_eq!(succ.tag(), 1);
|
||||
|
||||
C::finalize(curr.deref());
|
||||
curr = succ;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'g, T: 'g, C: IsElement<T>> Iterator for Iter<'g, T, C> {
|
||||
type Item = Result<&'g T, IterError>;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
while let Some(c) = unsafe { self.curr.as_ref() } {
|
||||
let succ = c.next.load(Acquire, self.guard);
|
||||
|
||||
if succ.tag() == 1 {
|
||||
// This entry was removed. Try unlinking it from the list.
|
||||
let succ = succ.with_tag(0);
|
||||
|
||||
// The tag should never be zero, because removing a node after a logically deleted
|
||||
// node leaves the list in an invalid state.
|
||||
debug_assert!(self.curr.tag() == 0);
|
||||
|
||||
match self.pred.compare_and_set(
|
||||
self.curr,
|
||||
succ,
|
||||
Acquire,
|
||||
self.guard,
|
||||
) {
|
||||
Ok(_) => {
|
||||
// We succeeded in unlinking this element from the list, so we have to
|
||||
// schedule deallocation. Deferred drop is okay, because `list.delete()`
|
||||
// can only be called if `T: 'static`.
|
||||
unsafe {
|
||||
let p = self.curr;
|
||||
self.guard.defer(move || C::finalize(p.deref()));
|
||||
}
|
||||
|
||||
// Move over the removed by only advancing `curr`, not `pred`.
|
||||
self.curr = succ;
|
||||
continue;
|
||||
}
|
||||
Err(_) => {
|
||||
// A concurrent thread modified the predecessor node. Since it might've
|
||||
// been deleted, we need to restart from `head`.
|
||||
self.pred = self.head;
|
||||
self.curr = self.head.load(Acquire, self.guard);
|
||||
|
||||
return Some(Err(IterError::Stalled));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move one step forward.
|
||||
self.pred = &c.next;
|
||||
self.curr = succ;
|
||||
|
||||
return Some(Ok(unsafe { C::element_of(c) }));
|
||||
}
|
||||
|
||||
// We reached the end of the list.
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {Collector, Owned, Guard};
|
||||
use crossbeam_utils::scoped;
|
||||
use std::sync::Barrier;
|
||||
use super::*;
|
||||
|
||||
impl IsElement<Entry> for Entry {
|
||||
fn entry_of(entry: &Entry) -> &Entry {
|
||||
entry
|
||||
}
|
||||
|
||||
unsafe fn element_of(entry: &Entry) -> &Entry {
|
||||
entry
|
||||
}
|
||||
|
||||
unsafe fn finalize(entry: &Entry) {
|
||||
drop(Box::from_raw(entry as *const Entry as *mut Entry));
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks whether the list retains inserted elements
|
||||
/// and returns them in the correct order.
|
||||
#[test]
|
||||
fn insert() {
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
let guard = handle.pin();
|
||||
|
||||
let l: List<Entry> = List::new();
|
||||
|
||||
let e1 = Owned::new(Entry::default()).into_shared(&guard);
|
||||
let e2 = Owned::new(Entry::default()).into_shared(&guard);
|
||||
let e3 = Owned::new(Entry::default()).into_shared(&guard);
|
||||
|
||||
unsafe {
|
||||
l.insert(e1, &guard);
|
||||
l.insert(e2, &guard);
|
||||
l.insert(e3, &guard);
|
||||
}
|
||||
|
||||
let mut iter = l.iter(&guard);
|
||||
let maybe_e3 = iter.next();
|
||||
assert!(maybe_e3.is_some());
|
||||
assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw());
|
||||
let maybe_e2 = iter.next();
|
||||
assert!(maybe_e2.is_some());
|
||||
assert!(maybe_e2.unwrap().unwrap() as *const Entry == e2.as_raw());
|
||||
let maybe_e1 = iter.next();
|
||||
assert!(maybe_e1.is_some());
|
||||
assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw());
|
||||
assert!(iter.next().is_none());
|
||||
|
||||
unsafe {
|
||||
e1.as_ref().unwrap().delete(&guard);
|
||||
e2.as_ref().unwrap().delete(&guard);
|
||||
e3.as_ref().unwrap().delete(&guard);
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks whether elements can be removed from the list and whether
|
||||
/// the correct elements are removed.
|
||||
#[test]
|
||||
fn delete() {
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
let guard = handle.pin();
|
||||
|
||||
let l: List<Entry> = List::new();
|
||||
|
||||
let e1 = Owned::new(Entry::default()).into_shared(&guard);
|
||||
let e2 = Owned::new(Entry::default()).into_shared(&guard);
|
||||
let e3 = Owned::new(Entry::default()).into_shared(&guard);
|
||||
unsafe {
|
||||
l.insert(e1, &guard);
|
||||
l.insert(e2, &guard);
|
||||
l.insert(e3, &guard);
|
||||
e2.as_ref().unwrap().delete(&guard);
|
||||
}
|
||||
|
||||
let mut iter = l.iter(&guard);
|
||||
let maybe_e3 = iter.next();
|
||||
assert!(maybe_e3.is_some());
|
||||
assert!(maybe_e3.unwrap().unwrap() as *const Entry == e3.as_raw());
|
||||
let maybe_e1 = iter.next();
|
||||
assert!(maybe_e1.is_some());
|
||||
assert!(maybe_e1.unwrap().unwrap() as *const Entry == e1.as_raw());
|
||||
assert!(iter.next().is_none());
|
||||
|
||||
unsafe {
|
||||
e1.as_ref().unwrap().delete(&guard);
|
||||
e3.as_ref().unwrap().delete(&guard);
|
||||
}
|
||||
|
||||
let mut iter = l.iter(&guard);
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
|
||||
const THREADS: usize = 8;
|
||||
const ITERS: usize = 512;
|
||||
|
||||
/// Contends the list on insert and delete operations to make sure they can run concurrently.
|
||||
#[test]
|
||||
fn insert_delete_multi() {
|
||||
let collector = Collector::new();
|
||||
|
||||
let l: List<Entry> = List::new();
|
||||
let b = Barrier::new(THREADS);
|
||||
|
||||
scoped::scope(|s| for _ in 0..THREADS {
|
||||
s.spawn(|| {
|
||||
b.wait();
|
||||
|
||||
let handle = collector.handle();
|
||||
let guard: Guard = handle.pin();
|
||||
let mut v = Vec::with_capacity(ITERS);
|
||||
|
||||
for _ in 0..ITERS {
|
||||
let e = Owned::new(Entry::default()).into_shared(&guard);
|
||||
v.push(e);
|
||||
unsafe {
|
||||
l.insert(e, &guard);
|
||||
}
|
||||
}
|
||||
|
||||
for e in v {
|
||||
unsafe {
|
||||
e.as_ref().unwrap().delete(&guard);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
let handle = collector.handle();
|
||||
let guard = handle.pin();
|
||||
|
||||
let mut iter = l.iter(&guard);
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
|
||||
/// Contends the list on iteration to make sure that it can be iterated over concurrently.
|
||||
#[test]
|
||||
fn iter_multi() {
|
||||
let collector = Collector::new();
|
||||
|
||||
let l: List<Entry> = List::new();
|
||||
let b = Barrier::new(THREADS);
|
||||
|
||||
scoped::scope(|s| for _ in 0..THREADS {
|
||||
s.spawn(|| {
|
||||
b.wait();
|
||||
|
||||
let handle = collector.handle();
|
||||
let guard: Guard = handle.pin();
|
||||
let mut v = Vec::with_capacity(ITERS);
|
||||
|
||||
for _ in 0..ITERS {
|
||||
let e = Owned::new(Entry::default()).into_shared(&guard);
|
||||
v.push(e);
|
||||
unsafe {
|
||||
l.insert(e, &guard);
|
||||
}
|
||||
}
|
||||
|
||||
let mut iter = l.iter(&guard);
|
||||
for _ in 0..ITERS {
|
||||
assert!(iter.next().is_some());
|
||||
}
|
||||
|
||||
for e in v {
|
||||
unsafe {
|
||||
e.as_ref().unwrap().delete(&guard);
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
let handle = collector.handle();
|
||||
let guard = handle.pin();
|
||||
|
||||
let mut iter = l.iter(&guard);
|
||||
assert!(iter.next().is_none());
|
||||
}
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
//! Synchronization primitives.
|
||||
|
||||
pub mod list;
|
||||
pub mod queue;
|
|
@ -0,0 +1,435 @@
|
|||
//! Michael-Scott lock-free queue.
|
||||
//!
|
||||
//! Usable with any number of producers and consumers.
|
||||
//!
|
||||
//! Michael and Scott. Simple, Fast, and Practical Non-Blocking and Blocking Concurrent Queue
|
||||
//! Algorithms. PODC 1996. http://dl.acm.org/citation.cfm?id=248106
|
||||
|
||||
use core::fmt;
|
||||
use core::mem;
|
||||
use core::ptr;
|
||||
use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
|
||||
|
||||
use crossbeam_utils::cache_padded::CachePadded;
|
||||
use nodrop::NoDrop;
|
||||
|
||||
use {unprotected, Atomic, Guard, Owned, Shared};
|
||||
|
||||
// The representation here is a singly-linked list, with a sentinel node at the front. In general
|
||||
// the `tail` pointer may lag behind the actual tail. Non-sentinel nodes are either all `Data` or
|
||||
// all `Blocked` (requests for data from blocked threads).
|
||||
#[derive(Debug)]
|
||||
pub struct Queue<T> {
|
||||
head: CachePadded<Atomic<Node<T>>>,
|
||||
tail: CachePadded<Atomic<Node<T>>>,
|
||||
}
|
||||
|
||||
struct Node<T> {
|
||||
/// The slot in which a value of type `T` can be stored.
|
||||
///
|
||||
/// The type of `data` is `NoDrop<T>` because a `Node<T>` doesn't always contain a `T`. For
|
||||
/// example, the sentinel node in a queue never contains a value: its slot is always empty.
|
||||
/// Other nodes start their life with a push operation and contain a value until it gets popped
|
||||
/// out. After that such empty nodes get added to the collector for destruction.
|
||||
data: NoDrop<T>,
|
||||
|
||||
next: Atomic<Node<T>>,
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for Node<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(f, "node {{ ... }}")
|
||||
}
|
||||
}
|
||||
|
||||
// Any particular `T` should never be accessed concurrently, so no need for `Sync`.
|
||||
unsafe impl<T: Send> Sync for Queue<T> {}
|
||||
unsafe impl<T: Send> Send for Queue<T> {}
|
||||
|
||||
impl<T> Queue<T> {
|
||||
/// Create a new, empty queue.
|
||||
pub fn new() -> Queue<T> {
|
||||
let q = Queue {
|
||||
head: CachePadded::new(Atomic::null()),
|
||||
tail: CachePadded::new(Atomic::null()),
|
||||
};
|
||||
let sentinel = Owned::new(Node {
|
||||
data: unsafe { mem::uninitialized() },
|
||||
next: Atomic::null(),
|
||||
});
|
||||
unsafe {
|
||||
let guard = &unprotected();
|
||||
let sentinel = sentinel.into_shared(guard);
|
||||
q.head.store(sentinel, Relaxed);
|
||||
q.tail.store(sentinel, Relaxed);
|
||||
q
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to atomically place `n` into the `next` pointer of `onto`, and returns `true` on
|
||||
/// success. The queue's `tail` pointer may be updated.
|
||||
#[inline(always)]
|
||||
fn push_internal(&self, onto: Shared<Node<T>>, new: Shared<Node<T>>, guard: &Guard) -> bool {
|
||||
// is `onto` the actual tail?
|
||||
let o = unsafe { onto.deref() };
|
||||
let next = o.next.load(Acquire, guard);
|
||||
if unsafe { next.as_ref().is_some() } {
|
||||
// if not, try to "help" by moving the tail pointer forward
|
||||
let _ = self.tail.compare_and_set(onto, next, Release, guard);
|
||||
false
|
||||
} else {
|
||||
// looks like the actual tail; attempt to link in `n`
|
||||
let result = o.next
|
||||
.compare_and_set(Shared::null(), new, Release, guard)
|
||||
.is_ok();
|
||||
if result {
|
||||
// try to move the tail pointer forward
|
||||
let _ = self.tail.compare_and_set(onto, new, Release, guard);
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds `t` to the back of the queue, possibly waking up threads blocked on `pop`.
|
||||
pub fn push(&self, t: T, guard: &Guard) {
|
||||
let new = Owned::new(Node {
|
||||
data: NoDrop::new(t),
|
||||
next: Atomic::null(),
|
||||
});
|
||||
let new = Owned::into_shared(new, guard);
|
||||
|
||||
loop {
|
||||
// We push onto the tail, so we'll start optimistically by looking there first.
|
||||
let tail = self.tail.load(Acquire, guard);
|
||||
|
||||
// Attempt to push onto the `tail` snapshot; fails if `tail.next` has changed.
|
||||
if self.push_internal(tail, new, guard) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to pop a data node. `Ok(None)` if queue is empty; `Err(())` if lost race to pop.
|
||||
#[inline(always)]
|
||||
fn pop_internal(&self, guard: &Guard) -> Result<Option<T>, ()> {
|
||||
let head = self.head.load(Acquire, guard);
|
||||
let h = unsafe { head.deref() };
|
||||
let next = h.next.load(Acquire, guard);
|
||||
match unsafe { next.as_ref() } {
|
||||
Some(n) => unsafe {
|
||||
self.head
|
||||
.compare_and_set(head, next, Release, guard)
|
||||
.map(|_| {
|
||||
guard.defer(move || drop(head.into_owned()));
|
||||
Some(NoDrop::into_inner(ptr::read(&n.data)))
|
||||
})
|
||||
.map_err(|_| ())
|
||||
},
|
||||
None => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to pop a data node, if the data satisfies the given condition. `Ok(None)` if queue
|
||||
/// is empty or the data does not satisfy the condition; `Err(())` if lost race to pop.
|
||||
#[inline(always)]
|
||||
fn pop_if_internal<F>(&self, condition: F, guard: &Guard) -> Result<Option<T>, ()>
|
||||
where
|
||||
T: Sync,
|
||||
F: Fn(&T) -> bool,
|
||||
{
|
||||
let head = self.head.load(Acquire, guard);
|
||||
let h = unsafe { head.deref() };
|
||||
let next = h.next.load(Acquire, guard);
|
||||
match unsafe { next.as_ref() } {
|
||||
Some(n) if condition(&n.data) => unsafe {
|
||||
self.head
|
||||
.compare_and_set(head, next, Release, guard)
|
||||
.map(|_| {
|
||||
guard.defer(move || drop(head.into_owned()));
|
||||
Some(NoDrop::into_inner(ptr::read(&n.data)))
|
||||
})
|
||||
.map_err(|_| ())
|
||||
},
|
||||
None | Some(_) => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to dequeue from the front.
|
||||
///
|
||||
/// Returns `None` if the queue is observed to be empty.
|
||||
pub fn try_pop(&self, guard: &Guard) -> Option<T> {
|
||||
loop {
|
||||
if let Ok(head) = self.pop_internal(guard) {
|
||||
return head;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to dequeue from the front, if the item satisfies the given condition.
|
||||
///
|
||||
/// Returns `None` if the queue is observed to be empty, or the head does not satisfy the given
|
||||
/// condition.
|
||||
pub fn try_pop_if<F>(&self, condition: F, guard: &Guard) -> Option<T>
|
||||
where
|
||||
T: Sync,
|
||||
F: Fn(&T) -> bool,
|
||||
{
|
||||
loop {
|
||||
if let Ok(head) = self.pop_if_internal(&condition, guard) {
|
||||
return head;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Queue<T> {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
let guard = &unprotected();
|
||||
|
||||
while let Some(_) = self.try_pop(guard) {}
|
||||
|
||||
// Destroy the remaining sentinel node.
|
||||
let sentinel = self.head.load(Relaxed, guard);
|
||||
drop(sentinel.into_owned());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {pin};
|
||||
|
||||
use core::sync::atomic::Ordering;
|
||||
use crossbeam_utils::scoped;
|
||||
|
||||
struct Queue<T> {
|
||||
queue: super::Queue<T>,
|
||||
}
|
||||
|
||||
impl<T> Queue<T> {
|
||||
pub fn new() -> Queue<T> {
|
||||
Queue { queue: super::Queue::new() }
|
||||
}
|
||||
|
||||
pub fn push(&self, t: T) {
|
||||
let guard = &pin();
|
||||
self.queue.push(t, guard);
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
let guard = &pin();
|
||||
let head = self.queue.head.load(Ordering::Acquire, guard);
|
||||
let h = unsafe { head.deref() };
|
||||
h.next.load(Ordering::Acquire, guard).is_null()
|
||||
}
|
||||
|
||||
pub fn try_pop(&self) -> Option<T> {
|
||||
let guard = &pin();
|
||||
self.queue.try_pop(guard)
|
||||
}
|
||||
|
||||
pub fn pop(&self) -> T {
|
||||
loop {
|
||||
match self.try_pop() {
|
||||
None => continue,
|
||||
Some(t) => return t,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const CONC_COUNT: i64 = 1000000;
|
||||
|
||||
#[test]
|
||||
fn push_try_pop_1() {
|
||||
let q: Queue<i64> = Queue::new();
|
||||
assert!(q.is_empty());
|
||||
q.push(37);
|
||||
assert!(!q.is_empty());
|
||||
assert_eq!(q.try_pop(), Some(37));
|
||||
assert!(q.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn push_try_pop_2() {
|
||||
let q: Queue<i64> = Queue::new();
|
||||
assert!(q.is_empty());
|
||||
q.push(37);
|
||||
q.push(48);
|
||||
assert_eq!(q.try_pop(), Some(37));
|
||||
assert!(!q.is_empty());
|
||||
assert_eq!(q.try_pop(), Some(48));
|
||||
assert!(q.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn push_try_pop_many_seq() {
|
||||
let q: Queue<i64> = Queue::new();
|
||||
assert!(q.is_empty());
|
||||
for i in 0..200 {
|
||||
q.push(i)
|
||||
}
|
||||
assert!(!q.is_empty());
|
||||
for i in 0..200 {
|
||||
assert_eq!(q.try_pop(), Some(i));
|
||||
}
|
||||
assert!(q.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn push_pop_1() {
|
||||
let q: Queue<i64> = Queue::new();
|
||||
assert!(q.is_empty());
|
||||
q.push(37);
|
||||
assert!(!q.is_empty());
|
||||
assert_eq!(q.pop(), 37);
|
||||
assert!(q.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn push_pop_2() {
|
||||
let q: Queue<i64> = Queue::new();
|
||||
q.push(37);
|
||||
q.push(48);
|
||||
assert_eq!(q.pop(), 37);
|
||||
assert_eq!(q.pop(), 48);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn push_pop_many_seq() {
|
||||
let q: Queue<i64> = Queue::new();
|
||||
assert!(q.is_empty());
|
||||
for i in 0..200 {
|
||||
q.push(i)
|
||||
}
|
||||
assert!(!q.is_empty());
|
||||
for i in 0..200 {
|
||||
assert_eq!(q.pop(), i);
|
||||
}
|
||||
assert!(q.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn push_try_pop_many_spsc() {
|
||||
let q: Queue<i64> = Queue::new();
|
||||
assert!(q.is_empty());
|
||||
|
||||
scoped::scope(|scope| {
|
||||
scope.spawn(|| {
|
||||
let mut next = 0;
|
||||
|
||||
while next < CONC_COUNT {
|
||||
if let Some(elem) = q.try_pop() {
|
||||
assert_eq!(elem, next);
|
||||
next += 1;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for i in 0..CONC_COUNT {
|
||||
q.push(i)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn push_try_pop_many_spmc() {
|
||||
fn recv(_t: i32, q: &Queue<i64>) {
|
||||
let mut cur = -1;
|
||||
for _i in 0..CONC_COUNT {
|
||||
if let Some(elem) = q.try_pop() {
|
||||
assert!(elem > cur);
|
||||
cur = elem;
|
||||
|
||||
if cur == CONC_COUNT - 1 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let q: Queue<i64> = Queue::new();
|
||||
assert!(q.is_empty());
|
||||
let qr = &q;
|
||||
scoped::scope(|scope| {
|
||||
for i in 0..3 {
|
||||
scope.spawn(move || recv(i, qr));
|
||||
}
|
||||
|
||||
scope.spawn(|| for i in 0..CONC_COUNT {
|
||||
q.push(i);
|
||||
})
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn push_try_pop_many_mpmc() {
|
||||
enum LR {
|
||||
Left(i64),
|
||||
Right(i64),
|
||||
}
|
||||
|
||||
let q: Queue<LR> = Queue::new();
|
||||
assert!(q.is_empty());
|
||||
|
||||
scoped::scope(|scope| for _t in 0..2 {
|
||||
scope.spawn(|| for i in CONC_COUNT - 1..CONC_COUNT {
|
||||
q.push(LR::Left(i))
|
||||
});
|
||||
scope.spawn(|| for i in CONC_COUNT - 1..CONC_COUNT {
|
||||
q.push(LR::Right(i))
|
||||
});
|
||||
scope.spawn(|| {
|
||||
let mut vl = vec![];
|
||||
let mut vr = vec![];
|
||||
for _i in 0..CONC_COUNT {
|
||||
match q.try_pop() {
|
||||
Some(LR::Left(x)) => vl.push(x),
|
||||
Some(LR::Right(x)) => vr.push(x),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
let mut vl2 = vl.clone();
|
||||
let mut vr2 = vr.clone();
|
||||
vl2.sort();
|
||||
vr2.sort();
|
||||
|
||||
assert_eq!(vl, vl2);
|
||||
assert_eq!(vr, vr2);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn push_pop_many_spsc() {
|
||||
let q: Queue<i64> = Queue::new();
|
||||
|
||||
scoped::scope(|scope| {
|
||||
scope.spawn(|| {
|
||||
let mut next = 0;
|
||||
while next < CONC_COUNT {
|
||||
assert_eq!(q.pop(), next);
|
||||
next += 1;
|
||||
}
|
||||
});
|
||||
|
||||
for i in 0..CONC_COUNT {
|
||||
q.push(i)
|
||||
}
|
||||
});
|
||||
assert!(q.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_empty_dont_pop() {
|
||||
let q: Queue<i64> = Queue::new();
|
||||
q.push(20);
|
||||
q.push(20);
|
||||
assert!(!q.is_empty());
|
||||
assert!(!q.is_empty());
|
||||
assert!(q.try_pop().is_some());
|
||||
}
|
||||
}
|
|
@ -1 +1 @@
|
|||
{"files":{".travis.yml":"d84605e26d95fabc8172af7a621d3e48117b5180d389c6a166d15acb09c9ed9f","CHANGELOG.md":"5e62172f395348eb92a3fd2532ba5d65a7f13286449a3698b41f3aac7a9a4e57","Cargo.toml":"6bcfcac3b6b20026d1020890fcd8cd5f6ceff33741b92fea001993696e2aed17","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"8728114db9ab19bca8e07b36f1cccd1e6a57db6ea03be08679aef2a982736532","benches/defer.rs":"b2b64a8bb684721f12432aa63ae1e2227511879567ed212c0430961805b9f543","benches/flush.rs":"3b8c6be173ea546ad7e93adff324244a1c289608403bb13cc3bd89082fe90e35","benches/pin.rs":"4165baf238bbe2267e1598695d41ea8d3a312aa613d567e4dd7f5581a0f1323c","examples/sanitize.rs":"41b2d03e2cfd46912a3722295843b841e74e10eae6eb23586d3bc3b6d0a41e32","src/atomic.rs":"469ae38d3e8b37eec79c1c21a29a63cd357e49f34f4b6cdde6817f8e1267bd8d","src/collector.rs":"ebebbf1229a0d5339b938825d0dca9dc8642f9fa5bbceafb4e371477186ed4b4","src/default.rs":"804c217df80e0b6df3c6e90c5d6f5153c153567ac28cc75cc62042ba75d24bf2","src/deferred.rs":"1bd6c66c58f92714088b6f9f811368a123143a5f03cf4afc4b19ab24f3181387","src/epoch.rs":"25b85734a4ec5bedb0384a1fe976ec97056a88910a046a270a3e38558f7dbd4b","src/garbage.rs":"b77a8f87701dca8b63d858bb234137335455b6fc1f223e73c7609542d13daa43","src/guard.rs":"08975d989ba558aba90d64865594b155b2135e628414f77bb8afb9de427a2e0d","src/internal.rs":"a5a6a52999ce99294d544ac7cb82cb820e78f0c41315fc8d7494d21ca6da1135","src/lib.rs":"f3093bc3411f2bd94d662c3cf8719411b62793449b3db1699865f4c08c207af1","src/sync/list.rs":"57c3674c40e30eaf92689ab0e09973d7d161e52a5bdb5b5481b62fd0d10fb4eb","src/sync/mod.rs":"2da979ca3a2293f7626a2e6a9ab2fad758d92e3d2bed6cc712ef59eeeea87eab","src/sync/queue.rs":"868b5bd651e54216fa1827d668ab564c120779113ae7a2a056fee4371db1066c"},"package":"927121f5407de9956180ff5e936fe3cf4324279280001cd56b669d28ee7e9150"}
|
||||
{"files":{".travis.yml":"b096077a6f20d96a6f6d824b98b94f73221ef7330290839ff35ad8c586dbc2e4","CHANGELOG.md":"3f0652c2ad1fc46b10d22cc3a5ad5fd8b737746dd3f3bc20d1e2a90432391892","Cargo.toml":"dc814f5487179536504adc4c77cacd827cd09b20dc81f49d3257553843599fb9","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"2721d525d6ea1309b5ce780d7748303ee24eecff074243f086bbf37768141efb","benches/defer.rs":"b2b64a8bb684721f12432aa63ae1e2227511879567ed212c0430961805b9f543","benches/flush.rs":"3b8c6be173ea546ad7e93adff324244a1c289608403bb13cc3bd89082fe90e35","benches/pin.rs":"4165baf238bbe2267e1598695d41ea8d3a312aa613d567e4dd7f5581a0f1323c","examples/sanitize.rs":"25ce494d162c4b730608e865894bda7fee6fdded5544f00b8882e482e39c12df","src/atomic.rs":"e9383337a4754c022a8d3c06372910299cb8318b620f26fe50347b244c4caee4","src/collector.rs":"0a068c19f67b094c52cd9e0e2cf4e6b7630cd6af810769cfebe4274631065e55","src/default.rs":"67c0e52f2ce85bc205e61a4f807848c0aab93dfcc034e8c460f7669694d4d43f","src/deferred.rs":"3e49824277fdc25a68498263a7ada67aca3977edef9545985f911ba42d7a2e61","src/epoch.rs":"47fb45f1cc07700473b25324dcdb00a086c5c145c69bed3eee6547552298fecf","src/guard.rs":"22c9d2a6c9a35e19f8d6da2cc69dc612226a1807e789291668f1ed85410dc351","src/internal.rs":"c2ee6dff11bb9a44afcff441fce04640da1bb070c778cedc9edf86c94b71aaf8","src/lib.rs":"325a7964f690d851006563341423ce69f9277db7e8bf21bb9139cdf22927f471","src/sync/list.rs":"abb9eae31f09d7c3692aed3c7ad7a3ad6d692992af891037db8eba50d1245f0c","src/sync/mod.rs":"2da979ca3a2293f7626a2e6a9ab2fad758d92e3d2bed6cc712ef59eeeea87eab","src/sync/queue.rs":"0254d182f820c8c880c9a80747501eb2cb9d53aa8cb958c04beceb39abf86aa9"},"package":"2af0e75710d6181e234c8ecc79f14a97907850a541b13b0be1dd10992f2e4620"}
|
|
@ -4,7 +4,6 @@ rust:
|
|||
- stable
|
||||
- beta
|
||||
- nightly
|
||||
- 1.13.0
|
||||
|
||||
addons:
|
||||
apt:
|
||||
|
|
|
@ -6,6 +6,45 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
|
|||
|
||||
## [Unreleased]
|
||||
|
||||
## [0.4.3] - 2018-06-12
|
||||
## Changed
|
||||
- Downgrade `crossbeam-utils` to 0.3 because it was a breaking change.
|
||||
|
||||
## [0.4.2] - 2018-06-12
|
||||
### Added
|
||||
- Expose the `Pointer` trait.
|
||||
- Warn missing docs and missing debug impls.
|
||||
|
||||
## Changed
|
||||
- Update `crossbeam-utils` to 0.4.
|
||||
|
||||
## [0.4.1] - 2018-03-20
|
||||
### Added
|
||||
- Add `Debug` impls for `Collector`, `Handle`, and `Guard`.
|
||||
- Add `load_consume` to `Atomic`.
|
||||
|
||||
### Changed
|
||||
- Rename `Collector::handle` to `Collector::register`.
|
||||
|
||||
### Fixed
|
||||
- Remove the `Send` implementation for `Handle` (this was a bug). Only
|
||||
`Collector`s can be shared among multiple threads, while `Handle`s and
|
||||
`Guard`s must stay within the thread in which they were created.
|
||||
|
||||
## [0.4.0] - 2018-02-10
|
||||
### Changed
|
||||
- Update dependencies.
|
||||
|
||||
### Removed
|
||||
- Remove support for Rust 1.13.
|
||||
|
||||
## [0.3.0] - 2018-02-10
|
||||
### Added
|
||||
- Add support for Rust 1.13.
|
||||
|
||||
### Changed
|
||||
- Improve documentation for CAS.
|
||||
|
||||
## [0.2.0] - 2017-11-29
|
||||
### Added
|
||||
- Add method `Owned::into_box`.
|
||||
|
@ -22,5 +61,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.
|
|||
### Added
|
||||
- First version of the new epoch-based GC.
|
||||
|
||||
[Unreleased]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.2.0...HEAD
|
||||
[Unreleased]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.3...HEAD
|
||||
[0.4.3]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.2...v0.4.3
|
||||
[0.4.2]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.1...v0.4.2
|
||||
[0.4.1]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.4.0...v0.4.1
|
||||
[0.4.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.3.0...v0.4.0
|
||||
[0.3.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.2.0...v0.3.0
|
||||
[0.2.0]: https://github.com/crossbeam-rs/crossbeam-epoch/compare/v0.1.0...v0.2.0
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
[package]
|
||||
name = "crossbeam-epoch"
|
||||
version = "0.3.1"
|
||||
version = "0.4.3"
|
||||
authors = ["The Crossbeam Project Developers"]
|
||||
description = "Epoch-based garbage collection"
|
||||
homepage = "https://github.com/crossbeam-rs/crossbeam-epoch"
|
||||
|
@ -30,25 +30,21 @@ default-features = false
|
|||
version = "0.1"
|
||||
|
||||
[dependencies.crossbeam-utils]
|
||||
version = "0.2"
|
||||
version = "0.3"
|
||||
default-features = false
|
||||
|
||||
[dependencies.lazy_static]
|
||||
version = "1.0.0"
|
||||
version = "1"
|
||||
optional = true
|
||||
|
||||
[dependencies.memoffset]
|
||||
version = "0.2"
|
||||
|
||||
[dependencies.nodrop]
|
||||
version = "0.1.12"
|
||||
default-features = false
|
||||
|
||||
[dependencies.scopeguard]
|
||||
version = "0.3"
|
||||
default-features = false
|
||||
[dev-dependencies.rand]
|
||||
version = "0.3"
|
||||
version = "0.4"
|
||||
|
||||
[features]
|
||||
default = ["use_std"]
|
||||
|
|
|
@ -17,7 +17,7 @@ Add this to your `Cargo.toml`:
|
|||
|
||||
```toml
|
||||
[dependencies]
|
||||
crossbeam-epoch = "0.2"
|
||||
crossbeam-epoch = "0.4"
|
||||
```
|
||||
|
||||
Next, add this to your crate:
|
||||
|
|
|
@ -54,8 +54,8 @@ fn main() {
|
|||
let threads = (0..16)
|
||||
.map(|_| {
|
||||
let a = a.clone();
|
||||
let h = collector.handle();
|
||||
thread::spawn(move || worker(a, h))
|
||||
let c = collector.clone();
|
||||
thread::spawn(move || worker(a, c.register()))
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
|
|
|
@ -10,15 +10,17 @@ use core::sync::atomic::Ordering;
|
|||
use alloc::boxed::Box;
|
||||
|
||||
use guard::Guard;
|
||||
use crossbeam_utils::consume::AtomicConsume;
|
||||
|
||||
/// Given ordering for the success case in a compare-exchange operation, returns the strongest
|
||||
/// appropriate ordering for the failure case.
|
||||
#[inline]
|
||||
fn strongest_failure_ordering(ord: Ordering) -> Ordering {
|
||||
use self::Ordering::*;
|
||||
match ord {
|
||||
Ordering::Relaxed | Ordering::Release => Ordering::Relaxed,
|
||||
Ordering::Acquire | Ordering::AcqRel => Ordering::Acquire,
|
||||
_ => Ordering::SeqCst,
|
||||
Relaxed | Release => Relaxed,
|
||||
Acquire | AcqRel => Acquire,
|
||||
_ => SeqCst,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -133,8 +135,8 @@ unsafe impl<T: Send + Sync> Sync for Atomic<T> {}
|
|||
|
||||
impl<T> Atomic<T> {
|
||||
/// Returns a new atomic pointer pointing to the tagged pointer `data`.
|
||||
fn from_data(data: usize) -> Atomic<T> {
|
||||
Atomic {
|
||||
fn from_usize(data: usize) -> Self {
|
||||
Self {
|
||||
data: AtomicUsize::new(data),
|
||||
_marker: PhantomData,
|
||||
}
|
||||
|
@ -151,7 +153,7 @@ impl<T> Atomic<T> {
|
|||
/// ```
|
||||
#[cfg(not(feature = "nightly"))]
|
||||
pub fn null() -> Atomic<T> {
|
||||
Atomic {
|
||||
Self {
|
||||
data: ATOMIC_USIZE_INIT,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
|
@ -205,7 +207,32 @@ impl<T> Atomic<T> {
|
|||
/// let p = a.load(SeqCst, guard);
|
||||
/// ```
|
||||
pub fn load<'g>(&self, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
|
||||
unsafe { Shared::from_data(self.data.load(ord)) }
|
||||
unsafe { Shared::from_usize(self.data.load(ord)) }
|
||||
}
|
||||
|
||||
/// Loads a `Shared` from the atomic pointer using a "consume" memory ordering.
|
||||
///
|
||||
/// This is similar to the "acquire" ordering, except that an ordering is
|
||||
/// only guaranteed with operations that "depend on" the result of the load.
|
||||
/// However consume loads are usually much faster than acquire loads on
|
||||
/// architectures with a weak memory model since they don't require memory
|
||||
/// fence instructions.
|
||||
///
|
||||
/// The exact definition of "depend on" is a bit vague, but it works as you
|
||||
/// would expect in practice since a lot of software, especially the Linux
|
||||
/// kernel, rely on this behavior.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_epoch::{self as epoch, Atomic};
|
||||
///
|
||||
/// let a = Atomic::new(1234);
|
||||
/// let guard = &epoch::pin();
|
||||
/// let p = a.load_consume(guard);
|
||||
/// ```
|
||||
pub fn load_consume<'g>(&self, _: &'g Guard) -> Shared<'g, T> {
|
||||
unsafe { Shared::from_usize(self.data.load_consume()) }
|
||||
}
|
||||
|
||||
/// Stores a `Shared` or `Owned` pointer into the atomic pointer.
|
||||
|
@ -226,7 +253,7 @@ impl<T> Atomic<T> {
|
|||
/// a.store(Owned::new(1234), SeqCst);
|
||||
/// ```
|
||||
pub fn store<'g, P: Pointer<T>>(&self, new: P, ord: Ordering) {
|
||||
self.data.store(new.into_data(), ord);
|
||||
self.data.store(new.into_usize(), ord);
|
||||
}
|
||||
|
||||
/// Stores a `Shared` or `Owned` pointer into the atomic pointer, returning the previous
|
||||
|
@ -248,7 +275,7 @@ impl<T> Atomic<T> {
|
|||
/// let p = a.swap(Shared::null(), SeqCst, guard);
|
||||
/// ```
|
||||
pub fn swap<'g, P: Pointer<T>>(&self, new: P, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
|
||||
unsafe { Shared::from_data(self.data.swap(new.into_data(), ord)) }
|
||||
unsafe { Shared::from_usize(self.data.swap(new.into_usize(), ord)) }
|
||||
}
|
||||
|
||||
/// Stores the pointer `new` (either `Shared` or `Owned`) into the atomic pointer if the current
|
||||
|
@ -288,14 +315,14 @@ impl<T> Atomic<T> {
|
|||
O: CompareAndSetOrdering,
|
||||
P: Pointer<T>,
|
||||
{
|
||||
let new = new.into_data();
|
||||
let new = new.into_usize();
|
||||
self.data
|
||||
.compare_exchange(current.into_data(), new, ord.success(), ord.failure())
|
||||
.map(|_| unsafe { Shared::from_data(new) })
|
||||
.compare_exchange(current.into_usize(), new, ord.success(), ord.failure())
|
||||
.map(|_| unsafe { Shared::from_usize(new) })
|
||||
.map_err(|current| unsafe {
|
||||
CompareAndSetError {
|
||||
current: Shared::from_data(current),
|
||||
new: P::from_data(new),
|
||||
current: Shared::from_usize(current),
|
||||
new: P::from_usize(new),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -358,14 +385,14 @@ impl<T> Atomic<T> {
|
|||
O: CompareAndSetOrdering,
|
||||
P: Pointer<T>,
|
||||
{
|
||||
let new = new.into_data();
|
||||
let new = new.into_usize();
|
||||
self.data
|
||||
.compare_exchange_weak(current.into_data(), new, ord.success(), ord.failure())
|
||||
.map(|_| unsafe { Shared::from_data(new) })
|
||||
.compare_exchange_weak(current.into_usize(), new, ord.success(), ord.failure())
|
||||
.map(|_| unsafe { Shared::from_usize(new) })
|
||||
.map_err(|current| unsafe {
|
||||
CompareAndSetError {
|
||||
current: Shared::from_data(current),
|
||||
new: P::from_data(new),
|
||||
current: Shared::from_usize(current),
|
||||
new: P::from_usize(new),
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -392,7 +419,7 @@ impl<T> Atomic<T> {
|
|||
/// assert_eq!(a.load(SeqCst, guard).tag(), 2);
|
||||
/// ```
|
||||
pub fn fetch_and<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
|
||||
unsafe { Shared::from_data(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
|
||||
unsafe { Shared::from_usize(self.data.fetch_and(val | !low_bits::<T>(), ord)) }
|
||||
}
|
||||
|
||||
/// Bitwise "or" with the current tag.
|
||||
|
@ -417,7 +444,7 @@ impl<T> Atomic<T> {
|
|||
/// assert_eq!(a.load(SeqCst, guard).tag(), 3);
|
||||
/// ```
|
||||
pub fn fetch_or<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
|
||||
unsafe { Shared::from_data(self.data.fetch_or(val & low_bits::<T>(), ord)) }
|
||||
unsafe { Shared::from_usize(self.data.fetch_or(val & low_bits::<T>(), ord)) }
|
||||
}
|
||||
|
||||
/// Bitwise "xor" with the current tag.
|
||||
|
@ -442,7 +469,7 @@ impl<T> Atomic<T> {
|
|||
/// assert_eq!(a.load(SeqCst, guard).tag(), 2);
|
||||
/// ```
|
||||
pub fn fetch_xor<'g>(&self, val: usize, ord: Ordering, _: &'g Guard) -> Shared<'g, T> {
|
||||
unsafe { Shared::from_data(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
|
||||
unsafe { Shared::from_usize(self.data.fetch_xor(val & low_bits::<T>(), ord)) }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -473,7 +500,7 @@ impl<T> Clone for Atomic<T> {
|
|||
/// atomics or fences.
|
||||
fn clone(&self) -> Self {
|
||||
let data = self.data.load(Ordering::Relaxed);
|
||||
Atomic::from_data(data)
|
||||
Atomic::from_usize(data)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -496,7 +523,7 @@ impl<T> From<Owned<T>> for Atomic<T> {
|
|||
fn from(owned: Owned<T>) -> Self {
|
||||
let data = owned.data;
|
||||
mem::forget(owned);
|
||||
Self::from_data(data)
|
||||
Self::from_usize(data)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -523,7 +550,7 @@ impl<'g, T> From<Shared<'g, T>> for Atomic<T> {
|
|||
/// let a = Atomic::<i32>::from(Shared::<i32>::null());
|
||||
/// ```
|
||||
fn from(ptr: Shared<'g, T>) -> Self {
|
||||
Self::from_data(ptr.data)
|
||||
Self::from_usize(ptr.data)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -539,17 +566,17 @@ impl<T> From<*const T> for Atomic<T> {
|
|||
/// let a = Atomic::<i32>::from(ptr::null::<i32>());
|
||||
/// ```
|
||||
fn from(raw: *const T) -> Self {
|
||||
Self::from_data(raw as usize)
|
||||
Self::from_usize(raw as usize)
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait for either `Owned` or `Shared` pointers.
|
||||
pub trait Pointer<T> {
|
||||
/// Returns the machine representation of the pointer.
|
||||
fn into_data(self) -> usize;
|
||||
fn into_usize(self) -> usize;
|
||||
|
||||
/// Returns a new pointer pointing to the tagged pointer `data`.
|
||||
unsafe fn from_data(data: usize) -> Self;
|
||||
unsafe fn from_usize(data: usize) -> Self;
|
||||
}
|
||||
|
||||
/// An owned heap-allocated object.
|
||||
|
@ -565,7 +592,7 @@ pub struct Owned<T> {
|
|||
|
||||
impl<T> Pointer<T> for Owned<T> {
|
||||
#[inline]
|
||||
fn into_data(self) -> usize {
|
||||
fn into_usize(self) -> usize {
|
||||
let data = self.data;
|
||||
mem::forget(self);
|
||||
data
|
||||
|
@ -577,7 +604,7 @@ impl<T> Pointer<T> for Owned<T> {
|
|||
///
|
||||
/// Panics if the data is zero in debug mode.
|
||||
#[inline]
|
||||
unsafe fn from_data(data: usize) -> Self {
|
||||
unsafe fn from_usize(data: usize) -> Self {
|
||||
debug_assert!(data != 0, "converting zero into `Owned`");
|
||||
Owned {
|
||||
data: data,
|
||||
|
@ -619,7 +646,7 @@ impl<T> Owned<T> {
|
|||
/// ```
|
||||
pub unsafe fn from_raw(raw: *mut T) -> Owned<T> {
|
||||
ensure_aligned(raw);
|
||||
Self::from_data(raw as usize)
|
||||
Self::from_usize(raw as usize)
|
||||
}
|
||||
|
||||
/// Converts the owned pointer into a [`Shared`].
|
||||
|
@ -636,7 +663,7 @@ impl<T> Owned<T> {
|
|||
///
|
||||
/// [`Shared`]: struct.Shared.html
|
||||
pub fn into_shared<'g>(self, _: &'g Guard) -> Shared<'g, T> {
|
||||
unsafe { Shared::from_data(self.into_data()) }
|
||||
unsafe { Shared::from_usize(self.into_usize()) }
|
||||
}
|
||||
|
||||
/// Converts the owned pointer into a `Box`.
|
||||
|
@ -680,12 +707,12 @@ impl<T> Owned<T> {
|
|||
///
|
||||
/// let o = Owned::new(0u64);
|
||||
/// assert_eq!(o.tag(), 0);
|
||||
/// let o = o.with_tag(5);
|
||||
/// assert_eq!(o.tag(), 5);
|
||||
/// let o = o.with_tag(2);
|
||||
/// assert_eq!(o.tag(), 2);
|
||||
/// ```
|
||||
pub fn with_tag(self, tag: usize) -> Owned<T> {
|
||||
let data = self.into_data();
|
||||
unsafe { Self::from_data(data_with_tag::<T>(data, tag)) }
|
||||
let data = self.into_usize();
|
||||
unsafe { Self::from_usize(data_with_tag::<T>(data, tag)) }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -804,12 +831,12 @@ impl<'g, T> Copy for Shared<'g, T> {}
|
|||
|
||||
impl<'g, T> Pointer<T> for Shared<'g, T> {
|
||||
#[inline]
|
||||
fn into_data(self) -> usize {
|
||||
fn into_usize(self) -> usize {
|
||||
self.data
|
||||
}
|
||||
|
||||
#[inline]
|
||||
unsafe fn from_data(data: usize) -> Self {
|
||||
unsafe fn from_usize(data: usize) -> Self {
|
||||
Shared {
|
||||
data: data,
|
||||
_marker: PhantomData,
|
||||
|
@ -973,7 +1000,7 @@ impl<'g, T> Shared<'g, T> {
|
|||
self.as_raw() != ptr::null(),
|
||||
"converting a null `Shared` into `Owned`"
|
||||
);
|
||||
Owned::from_data(self.data)
|
||||
Owned::from_usize(self.data)
|
||||
}
|
||||
|
||||
/// Returns the tag stored within the pointer.
|
||||
|
@ -984,10 +1011,10 @@ impl<'g, T> Shared<'g, T> {
|
|||
/// use crossbeam_epoch::{self as epoch, Atomic, Owned};
|
||||
/// use std::sync::atomic::Ordering::SeqCst;
|
||||
///
|
||||
/// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(5));
|
||||
/// let a = Atomic::<u64>::from(Owned::new(0u64).with_tag(2));
|
||||
/// let guard = &epoch::pin();
|
||||
/// let p = a.load(SeqCst, guard);
|
||||
/// assert_eq!(p.tag(), 5);
|
||||
/// assert_eq!(p.tag(), 2);
|
||||
/// ```
|
||||
pub fn tag(&self) -> usize {
|
||||
let (_, tag) = decompose_data::<T>(self.data);
|
||||
|
@ -1006,14 +1033,14 @@ impl<'g, T> Shared<'g, T> {
|
|||
/// let a = Atomic::new(0u64);
|
||||
/// let guard = &epoch::pin();
|
||||
/// let p1 = a.load(SeqCst, guard);
|
||||
/// let p2 = p1.with_tag(5);
|
||||
/// let p2 = p1.with_tag(2);
|
||||
///
|
||||
/// assert_eq!(p1.tag(), 0);
|
||||
/// assert_eq!(p2.tag(), 5);
|
||||
/// assert_eq!(p2.tag(), 2);
|
||||
/// assert_eq!(p1.as_raw(), p2.as_raw());
|
||||
/// ```
|
||||
pub fn with_tag(&self, tag: usize) -> Shared<'g, T> {
|
||||
unsafe { Self::from_data(data_with_tag::<T>(self.data, tag)) }
|
||||
unsafe { Self::from_usize(data_with_tag::<T>(self.data, tag)) }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1034,7 +1061,7 @@ impl<'g, T> From<*const T> for Shared<'g, T> {
|
|||
/// ```
|
||||
fn from(raw: *const T) -> Self {
|
||||
ensure_aligned(raw);
|
||||
unsafe { Self::from_data(raw as usize) }
|
||||
unsafe { Self::from_usize(raw as usize) }
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -7,20 +7,21 @@
|
|||
///
|
||||
/// let collector = Collector::new();
|
||||
///
|
||||
/// let handle = collector.handle();
|
||||
/// let handle = collector.register();
|
||||
/// drop(collector); // `handle` still works after dropping `collector`
|
||||
///
|
||||
/// handle.pin().flush();
|
||||
/// ```
|
||||
|
||||
use alloc::arc::Arc;
|
||||
use core::fmt;
|
||||
|
||||
use internal::{Global, Local};
|
||||
use guard::Guard;
|
||||
|
||||
/// An epoch-based garbage collector.
|
||||
pub struct Collector {
|
||||
global: Arc<Global>,
|
||||
pub(crate) global: Arc<Global>,
|
||||
}
|
||||
|
||||
unsafe impl Send for Collector {}
|
||||
|
@ -32,9 +33,9 @@ impl Collector {
|
|||
Collector { global: Arc::new(Global::new()) }
|
||||
}
|
||||
|
||||
/// Creates a new handle for the collector.
|
||||
pub fn handle(&self) -> Handle {
|
||||
Handle { local: Local::register(&self.global) }
|
||||
/// Registers a new handle for the collector.
|
||||
pub fn register(&self) -> Handle {
|
||||
Local::register(self)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -45,9 +46,23 @@ impl Clone for Collector {
|
|||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Collector {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Collector").finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for Collector {
|
||||
/// Checks if both handles point to the same collector.
|
||||
fn eq(&self, rhs: &Collector) -> bool {
|
||||
Arc::ptr_eq(&self.global, &rhs.global)
|
||||
}
|
||||
}
|
||||
impl Eq for Collector {}
|
||||
|
||||
/// A handle to a garbage collector.
|
||||
pub struct Handle {
|
||||
local: *const Local,
|
||||
pub(crate) local: *const Local,
|
||||
}
|
||||
|
||||
impl Handle {
|
||||
|
@ -62,9 +77,13 @@ impl Handle {
|
|||
pub fn is_pinned(&self) -> bool {
|
||||
unsafe { (*self.local).is_pinned() }
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Send for Handle {}
|
||||
/// Returns the `Collector` associated with this handle.
|
||||
#[inline]
|
||||
pub fn collector(&self) -> &Collector {
|
||||
unsafe { (*self.local).collector() }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Handle {
|
||||
#[inline]
|
||||
|
@ -85,6 +104,12 @@ impl Clone for Handle {
|
|||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Handle {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Handle").finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::mem;
|
||||
|
@ -100,7 +125,7 @@ mod tests {
|
|||
#[test]
|
||||
fn pin_reentrant() {
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
drop(collector);
|
||||
|
||||
assert!(!handle.is_pinned());
|
||||
|
@ -119,7 +144,7 @@ mod tests {
|
|||
#[test]
|
||||
fn flush_local_bag() {
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
drop(collector);
|
||||
|
||||
for _ in 0..100 {
|
||||
|
@ -128,9 +153,9 @@ mod tests {
|
|||
let a = Owned::new(7).into_shared(guard);
|
||||
guard.defer(move || a.into_owned());
|
||||
|
||||
assert!(!(*guard.get_local()).is_bag_empty());
|
||||
assert!(!(*(*guard.local).bag.get()).is_empty());
|
||||
|
||||
while !(*guard.get_local()).is_bag_empty() {
|
||||
while !(*(*guard.local).bag.get()).is_empty() {
|
||||
guard.flush();
|
||||
}
|
||||
}
|
||||
|
@ -140,7 +165,7 @@ mod tests {
|
|||
#[test]
|
||||
fn garbage_buffering() {
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
drop(collector);
|
||||
|
||||
let guard = &handle.pin();
|
||||
|
@ -149,7 +174,7 @@ mod tests {
|
|||
let a = Owned::new(7).into_shared(guard);
|
||||
guard.defer(move || a.into_owned());
|
||||
}
|
||||
assert!(!(*guard.get_local()).is_bag_empty());
|
||||
assert!(!(*(*guard.local).bag.get()).is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -157,29 +182,22 @@ mod tests {
|
|||
fn pin_holds_advance() {
|
||||
let collector = Collector::new();
|
||||
|
||||
let threads = (0..NUM_THREADS)
|
||||
.map(|_| {
|
||||
scoped::scope(|scope| {
|
||||
scope.spawn(|| {
|
||||
let handle = collector.handle();
|
||||
for _ in 0..500_000 {
|
||||
let guard = &handle.pin();
|
||||
scoped::scope(|scope| {
|
||||
for _ in 0..NUM_THREADS {
|
||||
scope.spawn(|| {
|
||||
let handle = collector.register();
|
||||
for _ in 0..500_000 {
|
||||
let guard = &handle.pin();
|
||||
|
||||
let before = collector.global.load_epoch(Ordering::Relaxed);
|
||||
collector.global.collect(guard);
|
||||
let after = collector.global.load_epoch(Ordering::Relaxed);
|
||||
let before = collector.global.epoch.load(Ordering::Relaxed);
|
||||
collector.global.collect(guard);
|
||||
let after = collector.global.epoch.load(Ordering::Relaxed);
|
||||
|
||||
assert!(after.wrapping_sub(before) <= 2);
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
drop(collector);
|
||||
|
||||
for t in threads {
|
||||
t.join();
|
||||
}
|
||||
assert!(after.wrapping_sub(before) <= 2);
|
||||
}
|
||||
});
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -188,7 +206,7 @@ mod tests {
|
|||
static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
|
||||
unsafe {
|
||||
let guard = &handle.pin();
|
||||
|
@ -221,7 +239,7 @@ mod tests {
|
|||
static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
|
||||
unsafe {
|
||||
let guard = &handle.pin();
|
||||
|
@ -262,7 +280,7 @@ mod tests {
|
|||
}
|
||||
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
|
||||
unsafe {
|
||||
let guard = &handle.pin();
|
||||
|
@ -287,7 +305,7 @@ mod tests {
|
|||
static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
|
||||
unsafe {
|
||||
let guard = &handle.pin();
|
||||
|
@ -323,7 +341,7 @@ mod tests {
|
|||
}
|
||||
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
|
||||
let mut guard = handle.pin();
|
||||
|
||||
|
@ -351,7 +369,7 @@ mod tests {
|
|||
static DESTROYS: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
|
||||
unsafe {
|
||||
let guard = &handle.pin();
|
||||
|
@ -395,28 +413,22 @@ mod tests {
|
|||
|
||||
let collector = Collector::new();
|
||||
|
||||
let threads = (0..THREADS)
|
||||
.map(|_| {
|
||||
scoped::scope(|scope| {
|
||||
scope.spawn(|| {
|
||||
let handle = collector.handle();
|
||||
for _ in 0..COUNT {
|
||||
let guard = &handle.pin();
|
||||
unsafe {
|
||||
let a = Owned::new(Elem(7i32)).into_shared(guard);
|
||||
guard.defer(move || a.into_owned());
|
||||
}
|
||||
scoped::scope(|scope| {
|
||||
for _ in 0..THREADS {
|
||||
scope.spawn(|| {
|
||||
let handle = collector.register();
|
||||
for _ in 0..COUNT {
|
||||
let guard = &handle.pin();
|
||||
unsafe {
|
||||
let a = Owned::new(Elem(7i32)).into_shared(guard);
|
||||
guard.defer(move || a.into_owned());
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
for t in threads {
|
||||
t.join();
|
||||
}
|
||||
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
while DROPS.load(Ordering::Relaxed) < COUNT * THREADS {
|
||||
let guard = &handle.pin();
|
||||
collector.global.collect(guard);
|
||||
|
|
|
@ -14,7 +14,7 @@ lazy_static! {
|
|||
|
||||
thread_local! {
|
||||
/// The per-thread participant for the default garbage collector.
|
||||
static HANDLE: Handle = COLLECTOR.handle();
|
||||
static HANDLE: Handle = COLLECTOR.register();
|
||||
}
|
||||
|
||||
/// Pins the current thread.
|
||||
|
@ -38,3 +38,9 @@ pub fn is_pinned() -> bool {
|
|||
pub fn default_handle() -> Handle {
|
||||
HANDLE.with(|handle| handle.clone())
|
||||
}
|
||||
|
||||
/// Returns the default handle associated with the current thread.
|
||||
#[inline]
|
||||
pub fn default_collector() -> &'static Collector {
|
||||
&COLLECTOR
|
||||
}
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
use core::fmt;
|
||||
use core::marker::PhantomData;
|
||||
use core::mem;
|
||||
use core::ptr;
|
||||
use alloc::boxed::Box;
|
||||
|
@ -17,6 +19,13 @@ type Data = [usize; DATA_WORDS];
|
|||
pub struct Deferred {
|
||||
call: unsafe fn(*mut u8),
|
||||
data: Data,
|
||||
_marker: PhantomData<*mut ()>, // !Send + !Sync
|
||||
}
|
||||
|
||||
impl fmt::Debug for Deferred {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(f, "Deferred {{ ... }}")
|
||||
}
|
||||
}
|
||||
|
||||
impl Deferred {
|
||||
|
@ -37,7 +46,8 @@ impl Deferred {
|
|||
|
||||
Deferred {
|
||||
call: call::<F>,
|
||||
data: data,
|
||||
data,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
} else {
|
||||
let b: Box<F> = Box::new(f);
|
||||
|
@ -51,23 +61,18 @@ impl Deferred {
|
|||
|
||||
Deferred {
|
||||
call: call::<F>,
|
||||
data: data,
|
||||
data,
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Calls the function or panics if it was already called.
|
||||
/// Calls the function.
|
||||
#[inline]
|
||||
pub fn call(&mut self) {
|
||||
unsafe fn fail(_: *mut u8) {
|
||||
panic!("cannot call `FnOnce` more than once");
|
||||
}
|
||||
|
||||
let call = mem::replace(&mut self.call, fail);
|
||||
unsafe {
|
||||
call(&mut self.data as *mut Data as *mut u8);
|
||||
}
|
||||
pub fn call(mut self) {
|
||||
let call = self.call;
|
||||
unsafe { call(&mut self.data as *mut Data as *mut u8) };
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -81,7 +86,7 @@ mod tests {
|
|||
let fired = &Cell::new(false);
|
||||
let a = [0usize; 1];
|
||||
|
||||
let mut d = Deferred::new(move || {
|
||||
let d = Deferred::new(move || {
|
||||
drop(a);
|
||||
fired.set(true);
|
||||
});
|
||||
|
@ -96,7 +101,7 @@ mod tests {
|
|||
let fired = &Cell::new(false);
|
||||
let a = [0usize; 10];
|
||||
|
||||
let mut d = Deferred::new(move || {
|
||||
let d = Deferred::new(move || {
|
||||
drop(a);
|
||||
fired.set(true);
|
||||
});
|
||||
|
@ -106,42 +111,24 @@ mod tests {
|
|||
assert!(fired.get());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "cannot call `FnOnce` more than once")]
|
||||
fn twice_on_stack() {
|
||||
let a = [0usize; 1];
|
||||
let mut d = Deferred::new(move || drop(a));
|
||||
d.call();
|
||||
d.call();
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "cannot call `FnOnce` more than once")]
|
||||
fn twice_on_heap() {
|
||||
let a = [0usize; 10];
|
||||
let mut d = Deferred::new(move || drop(a));
|
||||
d.call();
|
||||
d.call();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn string() {
|
||||
let a = "hello".to_string();
|
||||
let mut d = Deferred::new(move || assert_eq!(a, "hello"));
|
||||
let d = Deferred::new(move || assert_eq!(a, "hello"));
|
||||
d.call();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn boxed_slice_i32() {
|
||||
let a: Box<[i32]> = vec![2, 3, 5, 7].into_boxed_slice();
|
||||
let mut d = Deferred::new(move || assert_eq!(*a, [2, 3, 5, 7]));
|
||||
let d = Deferred::new(move || assert_eq!(*a, [2, 3, 5, 7]));
|
||||
d.call();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn long_slice_usize() {
|
||||
let a: [usize; 5] = [2, 3, 5, 7, 11];
|
||||
let mut d = Deferred::new(move || assert_eq!(a, [2, 3, 5, 7, 11]));
|
||||
let d = Deferred::new(move || assert_eq!(a, [2, 3, 5, 7, 11]));
|
||||
d.call();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ impl AtomicEpoch {
|
|||
#[inline]
|
||||
pub fn new(epoch: Epoch) -> Self {
|
||||
let data = AtomicUsize::new(epoch.data);
|
||||
AtomicEpoch { data: data }
|
||||
AtomicEpoch { data }
|
||||
}
|
||||
|
||||
/// Loads a value from the atomic epoch.
|
||||
|
@ -101,6 +101,6 @@ impl AtomicEpoch {
|
|||
#[inline]
|
||||
pub fn compare_and_swap(&self, current: Epoch, new: Epoch, ord: Ordering) -> Epoch {
|
||||
let data = self.data.compare_and_swap(current.data, new.data, ord);
|
||||
Epoch { data: data }
|
||||
Epoch { data }
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
use core::fmt;
|
||||
use core::ptr;
|
||||
use core::mem;
|
||||
|
||||
use garbage::Garbage;
|
||||
use deferred::Deferred;
|
||||
use internal::Local;
|
||||
use collector::Collector;
|
||||
|
||||
/// A guard that keeps the current thread pinned.
|
||||
///
|
||||
|
@ -73,26 +75,10 @@ use internal::Local;
|
|||
///
|
||||
/// [`pin`]: fn.pin.html
|
||||
pub struct Guard {
|
||||
local: *const Local,
|
||||
pub(crate) local: *const Local,
|
||||
}
|
||||
|
||||
impl Guard {
|
||||
/// Creates a new guard from a pointer to `Local`.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The `local` should be a valid pointer created by `Local::register()`.
|
||||
#[doc(hidden)]
|
||||
pub unsafe fn new(local: *const Local) -> Guard {
|
||||
Guard { local: local }
|
||||
}
|
||||
|
||||
/// Accesses the internal pointer to `Local`.
|
||||
#[doc(hidden)]
|
||||
pub unsafe fn get_local(&self) -> *const Local {
|
||||
self.local
|
||||
}
|
||||
|
||||
/// Stores a function so that it can be executed at some point after all currently pinned
|
||||
/// threads get unpinned.
|
||||
///
|
||||
|
@ -127,16 +113,29 @@ impl Guard {
|
|||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// Apart from that, keep in mind that another thread may execute `f`, so anything accessed
|
||||
/// by the closure must be `Send`.
|
||||
/// Apart from that, keep in mind that another thread may execute `f`, so anything accessed by
|
||||
/// the closure must be `Send`.
|
||||
///
|
||||
/// We intentionally didn't require `F: Send`, because Rust's type systems usually cannot prove
|
||||
/// `F: Send` for typical use cases. For example, consider the following code snippet, which
|
||||
/// exemplifies the typical use case of deferring the deallocation of a shared reference:
|
||||
///
|
||||
/// ```ignore
|
||||
/// let shared = Owned::new(7i32).into_shared(guard);
|
||||
/// guard.defer(Deferred::new(move || shared.into_owned())); // `Shared` is not `Send`!
|
||||
/// ```
|
||||
///
|
||||
/// While `Shared` is not `Send`, it's safe for another thread to call the deferred function,
|
||||
/// because it's called only after the grace period and `shared` is no longer shared with other
|
||||
/// threads. But we don't expect type systems to prove this.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// When a heap-allocated object in a data structure becomes unreachable, it has to be
|
||||
/// deallocated. However, the current thread and other threads may be still holding references
|
||||
/// on the stack to that same object. Therefore it cannot be deallocated before those
|
||||
/// references get dropped. This method can defer deallocation until all those threads get
|
||||
/// unpinned and consequently drop all their references on the stack.
|
||||
/// on the stack to that same object. Therefore it cannot be deallocated before those references
|
||||
/// get dropped. This method can defer deallocation until all those threads get unpinned and
|
||||
/// consequently drop all their references on the stack.
|
||||
///
|
||||
/// ```rust
|
||||
/// use crossbeam_epoch::{self as epoch, Atomic, Owned};
|
||||
|
@ -173,10 +172,8 @@ impl Guard {
|
|||
where
|
||||
F: FnOnce() -> R,
|
||||
{
|
||||
let garbage = Garbage::new(|| drop(f()));
|
||||
|
||||
if let Some(local) = self.local.as_ref() {
|
||||
local.defer(garbage, self);
|
||||
local.defer(Deferred::new(move || drop(f())), self);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -300,6 +297,28 @@ impl Guard {
|
|||
|
||||
f()
|
||||
}
|
||||
|
||||
/// Returns the `Collector` associated with this guard.
|
||||
///
|
||||
/// This method is useful when you need to ensure that all guards used with
|
||||
/// a data structure come from the same collector.
|
||||
///
|
||||
/// If this method is called from an [`unprotected`] guard, then `None` is returned.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use crossbeam_epoch as epoch;
|
||||
///
|
||||
/// let mut guard1 = epoch::pin();
|
||||
/// let mut guard2 = epoch::pin();
|
||||
/// assert!(guard1.collector() == guard2.collector());
|
||||
/// ```
|
||||
///
|
||||
/// [`unprotected`]: fn.unprotected.html
|
||||
pub fn collector(&self) -> Option<&Collector> {
|
||||
unsafe { self.local.as_ref().map(|local| local.collector()) }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Guard {
|
||||
|
@ -321,6 +340,12 @@ impl Clone for Guard {
|
|||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Guard {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Guard").finish()
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a reference to a dummy guard that allows unprotected access to [`Atomic`]s.
|
||||
///
|
||||
/// This guard should be used in special occasions only. Note that it doesn't actually keep any
|
||||
|
@ -370,19 +395,19 @@ impl Clone for Guard {
|
|||
///
|
||||
/// ```
|
||||
/// use crossbeam_epoch::{self as epoch, Atomic};
|
||||
/// use std::ptr;
|
||||
/// use std::mem::ManuallyDrop;
|
||||
/// use std::sync::atomic::Ordering::Relaxed;
|
||||
///
|
||||
/// struct Stack {
|
||||
/// head: epoch::Atomic<Node>,
|
||||
/// struct Stack<T> {
|
||||
/// head: Atomic<Node<T>>,
|
||||
/// }
|
||||
///
|
||||
/// struct Node {
|
||||
/// data: u32,
|
||||
/// next: epoch::Atomic<Node>,
|
||||
/// struct Node<T> {
|
||||
/// data: ManuallyDrop<T>,
|
||||
/// next: Atomic<Node<T>>,
|
||||
/// }
|
||||
///
|
||||
/// impl Drop for Stack {
|
||||
/// impl<T> Drop for Stack<T> {
|
||||
/// fn drop(&mut self) {
|
||||
/// unsafe {
|
||||
/// // Unprotected load.
|
||||
|
@ -392,8 +417,10 @@ impl Clone for Guard {
|
|||
/// // Unprotected load.
|
||||
/// let next = n.next.load(Relaxed, epoch::unprotected());
|
||||
///
|
||||
/// // Take ownership of the node, then drop it.
|
||||
/// drop(node.into_owned());
|
||||
/// // Take ownership of the node, then drop its data and deallocate it.
|
||||
/// let mut o = node.into_owned();
|
||||
/// ManuallyDrop::drop(&mut o.data);
|
||||
/// drop(o);
|
||||
///
|
||||
/// node = next;
|
||||
/// }
|
||||
|
|
|
@ -15,32 +15,117 @@
|
|||
//!
|
||||
//! When a participant is pinned, a `Guard` is returned as a witness that the participant is pinned.
|
||||
//! Guards are necessary for performing atomic operations, and for freeing/dropping locations.
|
||||
//!
|
||||
//! # Thread-local bag
|
||||
//!
|
||||
//! Objects that get unlinked from concurrent data structures must be stashed away until the global
|
||||
//! epoch sufficiently advances so that they become safe for destruction. Pointers to such objects
|
||||
//! are pushed into a thread-local bag, and when it becomes full, the bag is marked with the current
|
||||
//! global epoch and pushed into the global queue of bags. We store objects in thread-local storages
|
||||
//! for amortizing the synchronization cost of pushing the garbages to a global queue.
|
||||
//!
|
||||
//! # Global queue
|
||||
//!
|
||||
//! Whenever a bag is pushed into a queue, the objects in some bags in the queue are collected and
|
||||
//! destroyed along the way. This design reduces contention on data structures. The global queue
|
||||
//! cannot be explicitly accessed: the only way to interact with it is by calling functions
|
||||
//! `defer()` that adds an object tothe thread-local bag, or `collect()` that manually triggers
|
||||
//! garbage collection.
|
||||
//!
|
||||
//! Ideally each instance of concurrent data structure may have its own queue that gets fully
|
||||
//! destroyed as soon as the data structure gets dropped.
|
||||
|
||||
use core::cell::{Cell, UnsafeCell};
|
||||
use core::mem;
|
||||
use core::mem::{self, ManuallyDrop};
|
||||
use core::num::Wrapping;
|
||||
use core::ptr;
|
||||
use core::sync::atomic;
|
||||
use core::sync::atomic::Ordering;
|
||||
use alloc::boxed::Box;
|
||||
use alloc::arc::Arc;
|
||||
|
||||
use crossbeam_utils::cache_padded::CachePadded;
|
||||
use nodrop::NoDrop;
|
||||
use arrayvec::ArrayVec;
|
||||
|
||||
use atomic::Owned;
|
||||
use collector::{Handle, Collector};
|
||||
use epoch::{AtomicEpoch, Epoch};
|
||||
use guard::{unprotected, Guard};
|
||||
use garbage::{Bag, Garbage};
|
||||
use deferred::Deferred;
|
||||
use sync::list::{List, Entry, IterError, IsElement};
|
||||
use sync::queue::Queue;
|
||||
|
||||
/// Number of bags to destroy.
|
||||
const COLLECT_STEPS: usize = 8;
|
||||
/// Maximum number of objects a bag can contain.
|
||||
#[cfg(not(feature = "sanitize"))]
|
||||
const MAX_OBJECTS: usize = 64;
|
||||
#[cfg(feature = "sanitize")]
|
||||
const MAX_OBJECTS: usize = 4;
|
||||
|
||||
/// Number of pinnings after which a participant will execute some deferred functions from the
|
||||
/// global queue.
|
||||
const PINNINGS_BETWEEN_COLLECT: usize = 128;
|
||||
/// A bag of deferred functions.
|
||||
#[derive(Default, Debug)]
|
||||
pub struct Bag {
|
||||
/// Stashed objects.
|
||||
deferreds: ArrayVec<[Deferred; MAX_OBJECTS]>,
|
||||
}
|
||||
|
||||
/// `Bag::try_push()` requires that it is safe for another thread to execute the given functions.
|
||||
unsafe impl Send for Bag {}
|
||||
|
||||
impl Bag {
|
||||
/// Returns a new, empty bag.
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Returns `true` if the bag is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.deferreds.is_empty()
|
||||
}
|
||||
|
||||
/// Attempts to insert a deferred function into the bag.
|
||||
///
|
||||
/// Returns `Ok(())` if successful, and `Err(deferred)` for the given `deferred` if the bag is
|
||||
/// full.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// It should be safe for another thread to execute the given function.
|
||||
pub unsafe fn try_push(&mut self, deferred: Deferred) -> Result<(), Deferred> {
|
||||
self.deferreds.try_push(deferred).map_err(|e| e.element())
|
||||
}
|
||||
|
||||
/// Seals the bag with the given epoch.
|
||||
fn seal(self, epoch: Epoch) -> SealedBag {
|
||||
SealedBag { epoch, bag: self }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Bag {
|
||||
fn drop(&mut self) {
|
||||
// Call all deferred functions.
|
||||
for deferred in self.deferreds.drain(..) {
|
||||
deferred.call();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A pair of an epoch and a bag.
|
||||
#[derive(Default, Debug)]
|
||||
struct SealedBag {
|
||||
epoch: Epoch,
|
||||
bag: Bag,
|
||||
}
|
||||
|
||||
/// It is safe to share `SealedBag` because `is_expired` only inspects the epoch.
|
||||
unsafe impl Sync for SealedBag {}
|
||||
|
||||
impl SealedBag {
|
||||
/// Checks if it is safe to drop the bag w.r.t. the given global epoch.
|
||||
fn is_expired(&self, global_epoch: Epoch) -> bool {
|
||||
// A pinned participant can witness at most one epoch advancement. Therefore, any bag that
|
||||
// is within one epoch of the current one cannot be destroyed yet.
|
||||
global_epoch.wrapping_sub(self.epoch) >= 2
|
||||
}
|
||||
}
|
||||
|
||||
/// The global data for a garbage collector.
|
||||
pub struct Global {
|
||||
|
@ -48,28 +133,26 @@ pub struct Global {
|
|||
locals: List<Local>,
|
||||
|
||||
/// The global queue of bags of deferred functions.
|
||||
queue: Queue<(Epoch, Bag)>,
|
||||
queue: Queue<SealedBag>,
|
||||
|
||||
/// The global epoch.
|
||||
epoch: CachePadded<AtomicEpoch>,
|
||||
pub(crate) epoch: CachePadded<AtomicEpoch>,
|
||||
}
|
||||
|
||||
impl Global {
|
||||
/// Number of bags to destroy.
|
||||
const COLLECT_STEPS: usize = 8;
|
||||
|
||||
/// Creates a new global data for garbage collection.
|
||||
#[inline]
|
||||
pub fn new() -> Global {
|
||||
Global {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
locals: List::new(),
|
||||
queue: Queue::new(),
|
||||
epoch: CachePadded::new(AtomicEpoch::new(Epoch::starting())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current global epoch.
|
||||
pub fn load_epoch(&self, ordering: Ordering) -> Epoch {
|
||||
self.epoch.load(ordering)
|
||||
}
|
||||
|
||||
/// Pushes the bag into the global queue and replaces the bag with a new empty bag.
|
||||
pub fn push_bag(&self, bag: &mut Bag, guard: &Guard) {
|
||||
let bag = mem::replace(bag, Bag::new());
|
||||
|
@ -77,7 +160,7 @@ impl Global {
|
|||
atomic::fence(Ordering::SeqCst);
|
||||
|
||||
let epoch = self.epoch.load(Ordering::Relaxed);
|
||||
self.queue.push((epoch, bag), guard);
|
||||
self.queue.push(bag.seal(epoch), guard);
|
||||
}
|
||||
|
||||
/// Collects several bags from the global queue and executes deferred functions in them.
|
||||
|
@ -91,22 +174,20 @@ impl Global {
|
|||
pub fn collect(&self, guard: &Guard) {
|
||||
let global_epoch = self.try_advance(guard);
|
||||
|
||||
let condition = |item: &(Epoch, Bag)| {
|
||||
// A pinned participant can witness at most one epoch advancement. Therefore, any bag
|
||||
// that is within one epoch of the current one cannot be destroyed yet.
|
||||
global_epoch.wrapping_sub(item.0) >= 2
|
||||
};
|
||||
|
||||
let steps = if cfg!(feature = "sanitize") {
|
||||
usize::max_value()
|
||||
} else {
|
||||
COLLECT_STEPS
|
||||
Self::COLLECT_STEPS
|
||||
};
|
||||
|
||||
for _ in 0..steps {
|
||||
match self.queue.try_pop_if(&condition, guard) {
|
||||
match self.queue.try_pop_if(
|
||||
&|sealed_bag: &SealedBag| sealed_bag.is_expired(global_epoch),
|
||||
guard,
|
||||
)
|
||||
{
|
||||
None => break,
|
||||
Some(bag) => drop(bag),
|
||||
Some(sealed_bag) => drop(sealed_bag),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -172,10 +253,10 @@ pub struct Local {
|
|||
/// A reference to the global data.
|
||||
///
|
||||
/// When all guards and handles get dropped, this reference is destroyed.
|
||||
global: UnsafeCell<NoDrop<Arc<Global>>>,
|
||||
collector: UnsafeCell<ManuallyDrop<Collector>>,
|
||||
|
||||
/// The local bag of deferred functions.
|
||||
bag: UnsafeCell<Bag>,
|
||||
pub(crate) bag: UnsafeCell<Bag>,
|
||||
|
||||
/// The number of guards keeping this participant pinned.
|
||||
guard_count: Cell<usize>,
|
||||
|
@ -189,38 +270,40 @@ pub struct Local {
|
|||
pin_count: Cell<Wrapping<usize>>,
|
||||
}
|
||||
|
||||
unsafe impl Sync for Local {}
|
||||
|
||||
impl Local {
|
||||
/// Number of pinnings after which a participant will execute some deferred functions from the
|
||||
/// global queue.
|
||||
const PINNINGS_BETWEEN_COLLECT: usize = 128;
|
||||
|
||||
/// Registers a new `Local` in the provided `Global`.
|
||||
pub fn register(global: &Arc<Global>) -> *const Local {
|
||||
pub fn register(collector: &Collector) -> Handle {
|
||||
unsafe {
|
||||
// Since we dereference no pointers in this block, it is safe to use `unprotected`.
|
||||
|
||||
let local = Owned::new(Local {
|
||||
entry: Entry::default(),
|
||||
epoch: AtomicEpoch::new(Epoch::starting()),
|
||||
global: UnsafeCell::new(NoDrop::new(global.clone())),
|
||||
collector: UnsafeCell::new(ManuallyDrop::new(collector.clone())),
|
||||
bag: UnsafeCell::new(Bag::new()),
|
||||
guard_count: Cell::new(0),
|
||||
handle_count: Cell::new(1),
|
||||
pin_count: Cell::new(Wrapping(0)),
|
||||
}).into_shared(&unprotected());
|
||||
global.locals.insert(local, &unprotected());
|
||||
local.as_raw()
|
||||
collector.global.locals.insert(local, &unprotected());
|
||||
Handle { local: local.as_raw() }
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether the local garbage bag is empty.
|
||||
#[inline]
|
||||
pub fn is_bag_empty(&self) -> bool {
|
||||
unsafe { (*self.bag.get()).is_empty() }
|
||||
}
|
||||
|
||||
/// Returns a reference to the `Global` in which this `Local` resides.
|
||||
#[inline]
|
||||
pub fn global(&self) -> &Global {
|
||||
unsafe { &*self.global.get() }
|
||||
&self.collector().global
|
||||
}
|
||||
|
||||
/// Returns a reference to the `Collector` in which this `Local` resides.
|
||||
#[inline]
|
||||
pub fn collector(&self) -> &Collector {
|
||||
unsafe { &**self.collector.get() }
|
||||
}
|
||||
|
||||
/// Returns `true` if the current participant is pinned.
|
||||
|
@ -229,12 +312,17 @@ impl Local {
|
|||
self.guard_count.get() > 0
|
||||
}
|
||||
|
||||
pub fn defer(&self, mut garbage: Garbage, guard: &Guard) {
|
||||
let bag = unsafe { &mut *self.bag.get() };
|
||||
/// Adds `deferred` to the thread-local bag.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// It should be safe for another thread to execute the given function.
|
||||
pub unsafe fn defer(&self, mut deferred: Deferred, guard: &Guard) {
|
||||
let bag = &mut *self.bag.get();
|
||||
|
||||
while let Err(g) = bag.try_push(garbage) {
|
||||
while let Err(d) = bag.try_push(deferred) {
|
||||
self.global().push_bag(bag, guard);
|
||||
garbage = g;
|
||||
deferred = d;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -251,7 +339,7 @@ impl Local {
|
|||
/// Pins the `Local`.
|
||||
#[inline]
|
||||
pub fn pin(&self) -> Guard {
|
||||
let guard = unsafe { Guard::new(self) };
|
||||
let guard = Guard { local: self };
|
||||
|
||||
let guard_count = self.guard_count.get();
|
||||
self.guard_count.set(guard_count.checked_add(1).unwrap());
|
||||
|
@ -287,7 +375,7 @@ impl Local {
|
|||
|
||||
// After every `PINNINGS_BETWEEN_COLLECT` try advancing the epoch and collecting
|
||||
// some garbage.
|
||||
if count.0 % PINNINGS_BETWEEN_COLLECT == 0 {
|
||||
if count.0 % Self::PINNINGS_BETWEEN_COLLECT == 0 {
|
||||
self.global().collect(&guard);
|
||||
}
|
||||
}
|
||||
|
@ -327,7 +415,7 @@ impl Local {
|
|||
self.epoch.store(global_epoch, Ordering::Release);
|
||||
|
||||
// However, we don't need a following `SeqCst` fence, because it is safe for memory
|
||||
// accesses from the new epoch to be executed before updating the local epoch. At
|
||||
// accesses from the new epoch to be executed before updating the local epoch. At
|
||||
// worse, other threads will see the new epoch late and delay GC slightly.
|
||||
}
|
||||
}
|
||||
|
@ -376,15 +464,15 @@ impl Local {
|
|||
// Take the reference to the `Global` out of this `Local`. Since we're not protected
|
||||
// by a guard at this time, it's crucial that the reference is read before marking the
|
||||
// `Local` as deleted.
|
||||
let global: Arc<Global> = ptr::read(&**self.global.get());
|
||||
let collector: Collector = ptr::read(&*(*self.collector.get()));
|
||||
|
||||
// Mark this node in the linked list as deleted.
|
||||
self.entry.delete(&unprotected());
|
||||
|
||||
// Finally, drop the reference to the global. Note that this might be the last
|
||||
// reference to the `Global`. If so, the global data will be destroyed and all deferred
|
||||
// functions in its queue will be executed.
|
||||
drop(global);
|
||||
// Finally, drop the reference to the global. Note that this might be the last reference
|
||||
// to the `Global`. If so, the global data will be destroyed and all deferred functions
|
||||
// in its queue will be executed.
|
||||
drop(collector);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -407,3 +495,49 @@ impl IsElement<Local> for Local {
|
|||
drop(Box::from_raw(local as *const Local as *mut Local));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT};
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn check_defer() {
|
||||
static FLAG: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
fn set() {
|
||||
FLAG.store(42, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
let d = Deferred::new(set);
|
||||
assert_eq!(FLAG.load(Ordering::Relaxed), 0);
|
||||
d.call();
|
||||
assert_eq!(FLAG.load(Ordering::Relaxed), 42);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_bag() {
|
||||
static FLAG: AtomicUsize = ATOMIC_USIZE_INIT;
|
||||
fn incr() {
|
||||
FLAG.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
let mut bag = Bag::new();
|
||||
assert!(bag.is_empty());
|
||||
|
||||
for _ in 0..MAX_OBJECTS {
|
||||
assert!(unsafe { bag.try_push(Deferred::new(incr)).is_ok() });
|
||||
assert!(!bag.is_empty());
|
||||
assert_eq!(FLAG.load(Ordering::Relaxed), 0);
|
||||
}
|
||||
|
||||
let result = unsafe { bag.try_push(Deferred::new(incr)) };
|
||||
assert!(result.is_err());
|
||||
assert!(!bag.is_empty());
|
||||
assert_eq!(FLAG.load(Ordering::Relaxed), 0);
|
||||
|
||||
drop(bag);
|
||||
assert_eq!(FLAG.load(Ordering::Relaxed), MAX_OBJECTS);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,8 +36,8 @@
|
|||
//! # Garbage
|
||||
//!
|
||||
//! Objects that get removed from concurrent collections must be stashed away until all currently
|
||||
//! pinned participants get unpinned. Such objects can be stored into a [`Garbage`], where they are
|
||||
//! kept until the right time for their destruction comes.
|
||||
//! pinned participants get unpinned. Such objects can be stored into a thread-local or global
|
||||
//! storage, where they are kept until the right time for their destruction comes.
|
||||
//!
|
||||
//! There is a global shared instance of garbage queue. You can [`defer`] the execution of an
|
||||
//! arbitrary function until the global epoch is advanced enough. Most notably, concurrent data
|
||||
|
@ -58,11 +58,13 @@
|
|||
#![cfg_attr(feature = "nightly", feature(alloc))]
|
||||
#![cfg_attr(not(test), no_std)]
|
||||
|
||||
#![warn(missing_docs, missing_debug_implementations)]
|
||||
|
||||
#[cfg(test)]
|
||||
extern crate core;
|
||||
#[cfg(all(not(test), feature = "use_std"))]
|
||||
#[macro_use]
|
||||
extern crate std;
|
||||
#[cfg(test)]
|
||||
extern crate core;
|
||||
|
||||
// Use liballoc on nightly to avoid a dependency on libstd
|
||||
#[cfg(feature = "nightly")]
|
||||
|
@ -75,13 +77,6 @@ mod alloc {
|
|||
pub use self::std::sync as arc;
|
||||
}
|
||||
|
||||
#[cfg(feature = "manually_drop")]
|
||||
mod nodrop {
|
||||
pub use std::mem::ManuallyDrop as NoDrop;
|
||||
}
|
||||
#[cfg(not(feature = "manually_drop"))]
|
||||
extern crate nodrop;
|
||||
|
||||
extern crate arrayvec;
|
||||
extern crate crossbeam_utils;
|
||||
#[cfg(feature = "use_std")]
|
||||
|
@ -98,13 +93,12 @@ mod collector;
|
|||
mod default;
|
||||
mod deferred;
|
||||
mod epoch;
|
||||
mod garbage;
|
||||
mod guard;
|
||||
mod internal;
|
||||
mod sync;
|
||||
|
||||
pub use self::atomic::{Atomic, CompareAndSetError, CompareAndSetOrdering, Owned, Shared};
|
||||
pub use self::atomic::{Atomic, CompareAndSetError, CompareAndSetOrdering, Owned, Shared, Pointer};
|
||||
pub use self::guard::{unprotected, Guard};
|
||||
#[cfg(feature = "use_std")]
|
||||
pub use self::default::{default_handle, is_pinned, pin};
|
||||
pub use self::default::{default_collector, default_handle, is_pinned, pin};
|
||||
pub use self::collector::{Collector, Handle};
|
||||
|
|
|
@ -130,8 +130,8 @@ pub enum IterError {
|
|||
|
||||
impl Default for Entry {
|
||||
/// Returns the empty entry.
|
||||
fn default() -> Entry {
|
||||
Entry { next: Atomic::null() }
|
||||
fn default() -> Self {
|
||||
Self { next: Atomic::null() }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -150,8 +150,8 @@ impl Entry {
|
|||
|
||||
impl<T, C: IsElement<T>> List<T, C> {
|
||||
/// Returns a new, empty linked list.
|
||||
pub fn new() -> List<T, C> {
|
||||
List {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
head: Atomic::null(),
|
||||
_marker: PhantomData,
|
||||
}
|
||||
|
@ -204,7 +204,7 @@ impl<T, C: IsElement<T>> List<T, C> {
|
|||
/// thread will continue to iterate over the same list.
|
||||
pub fn iter<'g>(&'g self, guard: &'g Guard) -> Iter<'g, T, C> {
|
||||
Iter {
|
||||
guard: guard,
|
||||
guard,
|
||||
pred: &self.head,
|
||||
curr: self.head.load(Acquire, guard),
|
||||
head: &self.head,
|
||||
|
@ -289,7 +289,7 @@ impl<'g, T: 'g, C: IsElement<T>> Iterator for Iter<'g, T, C> {
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use {Collector, Owned, Guard};
|
||||
use {Collector, Owned};
|
||||
use crossbeam_utils::scoped;
|
||||
use std::sync::Barrier;
|
||||
use super::*;
|
||||
|
@ -313,7 +313,7 @@ mod tests {
|
|||
#[test]
|
||||
fn insert() {
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
let guard = handle.pin();
|
||||
|
||||
let l: List<Entry> = List::new();
|
||||
|
@ -352,7 +352,7 @@ mod tests {
|
|||
#[test]
|
||||
fn delete() {
|
||||
let collector = Collector::new();
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
let guard = handle.pin();
|
||||
|
||||
let l: List<Entry> = List::new();
|
||||
|
@ -400,7 +400,7 @@ mod tests {
|
|||
s.spawn(|| {
|
||||
b.wait();
|
||||
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
let guard: Guard = handle.pin();
|
||||
let mut v = Vec::with_capacity(ITERS);
|
||||
|
||||
|
@ -420,7 +420,7 @@ mod tests {
|
|||
});
|
||||
});
|
||||
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
let guard = handle.pin();
|
||||
|
||||
let mut iter = l.iter(&guard);
|
||||
|
@ -439,7 +439,7 @@ mod tests {
|
|||
s.spawn(|| {
|
||||
b.wait();
|
||||
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
let guard: Guard = handle.pin();
|
||||
let mut v = Vec::with_capacity(ITERS);
|
||||
|
||||
|
@ -464,7 +464,7 @@ mod tests {
|
|||
});
|
||||
});
|
||||
|
||||
let handle = collector.handle();
|
||||
let handle = collector.register();
|
||||
let guard = handle.pin();
|
||||
|
||||
let mut iter = l.iter(&guard);
|
||||
|
|
|
@ -5,13 +5,11 @@
|
|||
//! Michael and Scott. Simple, Fast, and Practical Non-Blocking and Blocking Concurrent Queue
|
||||
//! Algorithms. PODC 1996. http://dl.acm.org/citation.cfm?id=248106
|
||||
|
||||
use core::fmt;
|
||||
use core::mem;
|
||||
use core::mem::{self, ManuallyDrop};
|
||||
use core::ptr;
|
||||
use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
|
||||
|
||||
use crossbeam_utils::cache_padded::CachePadded;
|
||||
use nodrop::NoDrop;
|
||||
|
||||
use {unprotected, Atomic, Guard, Owned, Shared};
|
||||
|
||||
|
@ -24,28 +22,24 @@ pub struct Queue<T> {
|
|||
tail: CachePadded<Atomic<Node<T>>>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Node<T> {
|
||||
/// The slot in which a value of type `T` can be stored.
|
||||
///
|
||||
/// The type of `data` is `NoDrop<T>` because a `Node<T>` doesn't always contain a `T`. For
|
||||
/// example, the sentinel node in a queue never contains a value: its slot is always empty.
|
||||
/// The type of `data` is `ManuallyDrop<T>` because a `Node<T>` doesn't always contain a `T`.
|
||||
/// For example, the sentinel node in a queue never contains a value: its slot is always empty.
|
||||
/// Other nodes start their life with a push operation and contain a value until it gets popped
|
||||
/// out. After that such empty nodes get added to the collector for destruction.
|
||||
data: NoDrop<T>,
|
||||
data: ManuallyDrop<T>,
|
||||
|
||||
next: Atomic<Node<T>>,
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for Node<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||
write!(f, "node {{ ... }}")
|
||||
}
|
||||
}
|
||||
|
||||
// Any particular `T` should never be accessed concurrently, so no need for `Sync`.
|
||||
unsafe impl<T: Send> Sync for Queue<T> {}
|
||||
unsafe impl<T: Send> Send for Queue<T> {}
|
||||
|
||||
|
||||
impl<T> Queue<T> {
|
||||
/// Create a new, empty queue.
|
||||
pub fn new() -> Queue<T> {
|
||||
|
@ -93,7 +87,7 @@ impl<T> Queue<T> {
|
|||
/// Adds `t` to the back of the queue, possibly waking up threads blocked on `pop`.
|
||||
pub fn push(&self, t: T, guard: &Guard) {
|
||||
let new = Owned::new(Node {
|
||||
data: NoDrop::new(t),
|
||||
data: ManuallyDrop::new(t),
|
||||
next: Atomic::null(),
|
||||
});
|
||||
let new = Owned::into_shared(new, guard);
|
||||
|
@ -121,7 +115,7 @@ impl<T> Queue<T> {
|
|||
.compare_and_set(head, next, Release, guard)
|
||||
.map(|_| {
|
||||
guard.defer(move || drop(head.into_owned()));
|
||||
Some(NoDrop::into_inner(ptr::read(&n.data)))
|
||||
Some(ManuallyDrop::into_inner(ptr::read(&n.data)))
|
||||
})
|
||||
.map_err(|_| ())
|
||||
},
|
||||
|
@ -146,7 +140,7 @@ impl<T> Queue<T> {
|
|||
.compare_and_set(head, next, Release, guard)
|
||||
.map(|_| {
|
||||
guard.defer(move || drop(head.into_owned()));
|
||||
Some(NoDrop::into_inner(ptr::read(&n.data)))
|
||||
Some(ManuallyDrop::into_inner(ptr::read(&n.data)))
|
||||
})
|
||||
.map_err(|_| ())
|
||||
},
|
||||
|
@ -199,10 +193,9 @@ impl<T> Drop for Queue<T> {
|
|||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use {pin};
|
||||
|
||||
use core::sync::atomic::Ordering;
|
||||
use super::*;
|
||||
use crossbeam_utils::scoped;
|
||||
use pin;
|
||||
|
||||
struct Queue<T> {
|
||||
queue: super::Queue<T>,
|
||||
|
@ -220,9 +213,9 @@ mod test {
|
|||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
let guard = &pin();
|
||||
let head = self.queue.head.load(Ordering::Acquire, guard);
|
||||
let head = self.queue.head.load(Acquire, guard);
|
||||
let h = unsafe { head.deref() };
|
||||
h.next.load(Ordering::Acquire, guard).is_null()
|
||||
h.next.load(Acquire, guard).is_null()
|
||||
}
|
||||
|
||||
pub fn try_pop(&self) -> Option<T> {
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
{"files":{".travis.yml":"da898db16b841a2f633a896d69df908fb263d63d04f6248e448ba49a6122f5e9","CHANGELOG.md":"945485d3f79a1912bfa6944ed7b07a9c60915fae992f7abcbb1de44ec147953e","Cargo.toml":"2c8f106920b27ebe60616933c4bf04cf2a6515d65f87fafa216febc4d6e1164b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"ef6edf8bcb3491d8453ca36008f9e3fa0895bb6c17db47b38867784ed7717983","src/atomic_option.rs":"0ed05d26d8980c761c4972a0f37f5b507462ed6dff5d688ef92444560e7b9c69","src/cache_padded.rs":"47a99e571bf5c213395585ff001c7abd10388609f349a2e776d481e2ed0b32cb","src/lib.rs":"ea79e01d2c2f55d27d365e8cd45e377b313f53f27c705d4e4f6a4f19d7e11a98","src/scoped.rs":"5af1b54ca167c634e4c206aeab53e6ca78682633ad0009af220b17de385b3080"},"package":"2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9"}
|
|
@ -0,0 +1,20 @@
|
|||
language: rust
|
||||
|
||||
rust:
|
||||
- stable
|
||||
- beta
|
||||
- nightly
|
||||
- 1.12.1
|
||||
|
||||
script:
|
||||
- cargo build
|
||||
- cargo build --release
|
||||
- cargo build --no-default-features
|
||||
- cargo build --release --no-default-features
|
||||
- cargo test
|
||||
- cargo test --release
|
||||
- |
|
||||
if [ $TRAVIS_RUST_VERSION == nightly ]; then
|
||||
cargo test --features nightly
|
||||
cargo test --features nightly --release
|
||||
fi
|
|
@ -0,0 +1,41 @@
|
|||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
|
||||
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
### Added
|
||||
- Support for Rust 1.12.1.
|
||||
|
||||
### Fixed
|
||||
- Call `T::clone` when cloning a `CachePadded<T>`.
|
||||
|
||||
## [0.2.1] - 2017-11-26
|
||||
### Added
|
||||
- Add `use_std` feature.
|
||||
|
||||
## [0.2.0] - 2017-11-17
|
||||
### Added
|
||||
- Add `nightly` feature.
|
||||
- Use `repr(align(64))` on `CachePadded` with the `nightly` feature.
|
||||
- Implement `Drop` for `CachePadded<T>`.
|
||||
- Implement `Clone` for `CachePadded<T>`.
|
||||
- Implement `From<T>` for `CachePadded<T>`.
|
||||
- Implement better `Debug` for `CachePadded<T>`.
|
||||
- Write more tests.
|
||||
- Add this changelog.
|
||||
|
||||
### Changed
|
||||
- Change cache line length to 64 bytes.
|
||||
|
||||
### Removed
|
||||
- Remove `ZerosValid`.
|
||||
|
||||
## 0.1.0 - 2017-08-27
|
||||
### Added
|
||||
- Old implementation of `CachePadded` from `crossbeam` version 0.3.0
|
||||
|
||||
[Unreleased]: https://github.com/crossbeam-rs/crossbeam-utils/compare/v0.2.1...HEAD
|
||||
[0.2.1]: https://github.com/crossbeam-rs/crossbeam-utils/compare/v0.2.0...v0.2.1
|
||||
[0.2.0]: https://github.com/crossbeam-rs/crossbeam-utils/compare/v0.1.0...v0.2.0
|
|
@ -11,27 +11,21 @@
|
|||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
name = "mime"
|
||||
version = "0.2.6"
|
||||
authors = ["Sean McArthur <sean.monstar@gmail.com>"]
|
||||
description = "Strongly Typed Mimes"
|
||||
documentation = "http://hyperium.github.io/mime.rs"
|
||||
keywords = ["mime", "media-extensions", "media-types"]
|
||||
license = "MIT"
|
||||
repository = "https://github.com/hyperium/mime.rs"
|
||||
[dependencies.heapsize]
|
||||
version = ">=0.2.0, <0.4"
|
||||
optional = true
|
||||
|
||||
[dependencies.log]
|
||||
version = "0.3"
|
||||
|
||||
[dependencies.serde]
|
||||
version = ">=0.7, <0.9"
|
||||
optional = true
|
||||
[dev-dependencies.serde_json]
|
||||
version = ">=0.7, <0.9"
|
||||
name = "crossbeam-utils"
|
||||
version = "0.2.2"
|
||||
authors = ["The Crossbeam Project Developers"]
|
||||
description = "Utilities for concurrent programming"
|
||||
homepage = "https://github.com/crossbeam-rs/crossbeam-utils"
|
||||
documentation = "https://docs.rs/crossbeam-utils"
|
||||
readme = "README.md"
|
||||
keywords = ["scoped", "thread", "atomic", "cache"]
|
||||
categories = ["algorithms", "concurrency", "data-structures"]
|
||||
license = "MIT/Apache-2.0"
|
||||
repository = "https://github.com/crossbeam-rs/crossbeam-utils"
|
||||
[dependencies.cfg-if]
|
||||
version = "0.1"
|
||||
|
||||
[features]
|
||||
heap_size = ["heapsize"]
|
||||
default = ["use_std"]
|
||||
nightly = []
|
||||
use_std = []
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,25 @@
|
|||
Copyright (c) 2010 The Rust Project Developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,29 @@
|
|||
# Utilities for concurrent programming
|
||||
|
||||
[![Build Status](https://travis-ci.org/crossbeam-rs/crossbeam-utils.svg?branch=master)](https://travis-ci.org/crossbeam-rs/crossbeam-utils)
|
||||
[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/crossbeam-rs/crossbeam-utils)
|
||||
[![Cargo](https://img.shields.io/crates/v/crossbeam-utils.svg)](https://crates.io/crates/crossbeam-utils)
|
||||
[![Documentation](https://docs.rs/crossbeam-utils/badge.svg)](https://docs.rs/crossbeam-utils)
|
||||
|
||||
This crate provides utilities for concurrent programming.
|
||||
|
||||
## Usage
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
crossbeam-utils = "0.2"
|
||||
```
|
||||
|
||||
Next, add this to your crate:
|
||||
|
||||
```rust
|
||||
extern crate crossbeam_utils;
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
Licensed under the terms of MIT license and the Apache License (Version 2.0).
|
||||
|
||||
See [LICENSE-MIT](LICENSE-MIT) and [LICENSE-APACHE](LICENSE-APACHE) for details.
|
|
@ -0,0 +1,290 @@
|
|||
use core::fmt;
|
||||
use core::mem;
|
||||
use core::ops::{Deref, DerefMut};
|
||||
use core::ptr;
|
||||
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "nightly")] {
|
||||
// This trick allows use to support rustc 1.12.1, which does not support the
|
||||
// #[repr(align(n))] syntax. Using the attribute makes the parser fail over.
|
||||
// It is, however, okay to use it within a macro, since it would be parsed
|
||||
// in a later stage, but that never occurs due to the cfg_if.
|
||||
// TODO(Vtec234): remove this crap when we drop support for 1.12.
|
||||
macro_rules! nightly_inner {
|
||||
() => (
|
||||
#[derive(Clone)]
|
||||
#[repr(align(64))]
|
||||
pub(crate) struct Inner<T> {
|
||||
value: T,
|
||||
}
|
||||
)
|
||||
}
|
||||
nightly_inner!();
|
||||
|
||||
impl<T> Inner<T> {
|
||||
pub(crate) fn new(t: T) -> Inner<T> {
|
||||
Self {
|
||||
value: t
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for Inner<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
&self.value
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for Inner<T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
&mut self.value
|
||||
}
|
||||
}
|
||||
} else {
|
||||
use core::marker::PhantomData;
|
||||
|
||||
struct Inner<T> {
|
||||
bytes: [u8; 64],
|
||||
|
||||
/// `[T; 0]` ensures alignment is at least that of `T`.
|
||||
/// `PhantomData<T>` signals that `CachePadded<T>` contains a `T`.
|
||||
_marker: ([T; 0], PhantomData<T>),
|
||||
}
|
||||
|
||||
impl<T> Inner<T> {
|
||||
fn new(t: T) -> Inner<T> {
|
||||
assert!(mem::size_of::<T>() <= mem::size_of::<Self>());
|
||||
assert!(mem::align_of::<T>() <= mem::align_of::<Self>());
|
||||
|
||||
unsafe {
|
||||
let mut inner: Self = mem::uninitialized();
|
||||
let p: *mut T = &mut *inner;
|
||||
ptr::write(p, t);
|
||||
inner
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for Inner<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
unsafe { &*(self.bytes.as_ptr() as *const T) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for Inner<T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
unsafe { &mut *(self.bytes.as_ptr() as *mut T) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for CachePadded<T> {
|
||||
fn drop(&mut self) {
|
||||
let p: *mut T = self.deref_mut();
|
||||
unsafe {
|
||||
ptr::drop_in_place(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> Clone for Inner<T> {
|
||||
fn clone(&self) -> Inner<T> {
|
||||
let val = self.deref().clone();
|
||||
Self::new(val)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Pads `T` to the length of a cache line.
|
||||
///
|
||||
/// Sometimes concurrent programming requires a piece of data to be padded out to the size of a
|
||||
/// cacheline to avoid "false sharing": cache lines being invalidated due to unrelated concurrent
|
||||
/// activity. Use this type when you want to *avoid* cache locality.
|
||||
///
|
||||
/// At the moment, cache lines are assumed to be 64 bytes on all architectures.
|
||||
///
|
||||
/// # Size and alignment
|
||||
///
|
||||
/// By default, the size of `CachePadded<T>` is 64 bytes. If `T` is larger than that, then
|
||||
/// `CachePadded::<T>::new` will panic. Alignment of `CachePadded<T>` is the same as that of `T`.
|
||||
///
|
||||
/// However, if the `nightly` feature is enabled, arbitrarily large types `T` can be stored inside
|
||||
/// a `CachePadded<T>`. The size will then be a multiple of 64 at least the size of `T`, and the
|
||||
/// alignment will be the maximum of 64 and the alignment of `T`.
|
||||
pub struct CachePadded<T> {
|
||||
inner: Inner<T>,
|
||||
}
|
||||
|
||||
unsafe impl<T: Send> Send for CachePadded<T> {}
|
||||
unsafe impl<T: Sync> Sync for CachePadded<T> {}
|
||||
|
||||
impl<T> CachePadded<T> {
|
||||
/// Pads a value to the length of a cache line.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If `nightly` is not enabled and `T` is larger than 64 bytes, this function will panic.
|
||||
pub fn new(t: T) -> CachePadded<T> {
|
||||
CachePadded::<T> { inner: Inner::new(t) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for CachePadded<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
self.inner.deref()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for CachePadded<T> {
|
||||
fn deref_mut(&mut self) -> &mut T {
|
||||
self.inner.deref_mut()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Default> Default for CachePadded<T> {
|
||||
fn default() -> Self {
|
||||
Self::new(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> Clone for CachePadded<T> {
|
||||
fn clone(&self) -> Self {
|
||||
CachePadded { inner: self.inner.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: fmt::Debug> fmt::Debug for CachePadded<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let inner: &T = &*self;
|
||||
write!(f, "CachePadded {{ {:?} }}", inner)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> From<T> for CachePadded<T> {
|
||||
fn from(t: T) -> Self {
|
||||
CachePadded::new(t)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use std::cell::Cell;
|
||||
|
||||
#[test]
|
||||
fn store_u64() {
|
||||
let x: CachePadded<u64> = CachePadded::new(17);
|
||||
assert_eq!(*x, 17);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn store_pair() {
|
||||
let x: CachePadded<(u64, u64)> = CachePadded::new((17, 37));
|
||||
assert_eq!(x.0, 17);
|
||||
assert_eq!(x.1, 37);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn distance() {
|
||||
let arr = [CachePadded::new(17u8), CachePadded::new(37u8)];
|
||||
let a = &*arr[0] as *const u8;
|
||||
let b = &*arr[1] as *const u8;
|
||||
assert!(unsafe { a.offset(64) } <= b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn different_sizes() {
|
||||
CachePadded::new(17u8);
|
||||
CachePadded::new(17u16);
|
||||
CachePadded::new(17u32);
|
||||
CachePadded::new([17u64; 0]);
|
||||
CachePadded::new([17u64; 1]);
|
||||
CachePadded::new([17u64; 2]);
|
||||
CachePadded::new([17u64; 3]);
|
||||
CachePadded::new([17u64; 4]);
|
||||
CachePadded::new([17u64; 5]);
|
||||
CachePadded::new([17u64; 6]);
|
||||
CachePadded::new([17u64; 7]);
|
||||
CachePadded::new([17u64; 8]);
|
||||
}
|
||||
|
||||
cfg_if! {
|
||||
if #[cfg(feature = "nightly")] {
|
||||
#[test]
|
||||
fn large() {
|
||||
let a = [17u64; 9];
|
||||
let b = CachePadded::new(a);
|
||||
assert!(mem::size_of_val(&a) <= mem::size_of_val(&b));
|
||||
}
|
||||
} else {
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn large() {
|
||||
CachePadded::new([17u64; 9]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn debug() {
|
||||
assert_eq!(
|
||||
format!("{:?}", CachePadded::new(17u64)),
|
||||
"CachePadded { 17 }"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drops() {
|
||||
let count = Cell::new(0);
|
||||
|
||||
struct Foo<'a>(&'a Cell<usize>);
|
||||
|
||||
impl<'a> Drop for Foo<'a> {
|
||||
fn drop(&mut self) {
|
||||
self.0.set(self.0.get() + 1);
|
||||
}
|
||||
}
|
||||
|
||||
let a = CachePadded::new(Foo(&count));
|
||||
let b = CachePadded::new(Foo(&count));
|
||||
|
||||
assert_eq!(count.get(), 0);
|
||||
drop(a);
|
||||
assert_eq!(count.get(), 1);
|
||||
drop(b);
|
||||
assert_eq!(count.get(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn clone() {
|
||||
let a = CachePadded::new(17);
|
||||
let b = a.clone();
|
||||
assert_eq!(*a, *b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn runs_custom_clone() {
|
||||
let count = Cell::new(0);
|
||||
|
||||
struct Foo<'a>(&'a Cell<usize>);
|
||||
|
||||
impl<'a> Clone for Foo<'a> {
|
||||
fn clone(&self) -> Foo<'a> {
|
||||
self.0.set(self.0.get() + 1);
|
||||
Foo::<'a>(self.0)
|
||||
}
|
||||
}
|
||||
|
||||
let a = CachePadded::new(Foo(&count));
|
||||
a.clone();
|
||||
|
||||
assert_eq!(count.get(), 1);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
#![cfg_attr(feature = "nightly", feature(attr_literals, repr_align))]
|
||||
#![cfg_attr(not(feature = "use_std"), no_std)]
|
||||
|
||||
#[cfg(feature = "use_std")]
|
||||
extern crate core;
|
||||
|
||||
#[macro_use]
|
||||
extern crate cfg_if;
|
||||
|
||||
pub mod cache_padded;
|
||||
#[cfg(feature = "use_std")]
|
||||
pub mod atomic_option;
|
||||
#[cfg(feature = "use_std")]
|
||||
pub mod scoped;
|
|
@ -0,0 +1,364 @@
|
|||
/// Scoped thread.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// A basic scoped thread:
|
||||
///
|
||||
/// ```
|
||||
/// crossbeam_utils::scoped::scope(|scope| {
|
||||
/// scope.spawn(|| {
|
||||
/// println!("Hello from a scoped thread!");
|
||||
/// });
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
/// When writing concurrent Rust programs, you'll sometimes see a pattern like this, using
|
||||
/// [`std::thread::spawn`][spawn]:
|
||||
///
|
||||
/// ```ignore
|
||||
/// let array = [1, 2, 3];
|
||||
/// let mut guards = vec![];
|
||||
///
|
||||
/// for i in &array {
|
||||
/// let guard = std::thread::spawn(move || {
|
||||
/// println!("element: {}", i);
|
||||
/// });
|
||||
///
|
||||
/// guards.push(guard);
|
||||
/// }
|
||||
///
|
||||
/// for guard in guards {
|
||||
/// guard.join().unwrap();
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// The basic pattern is:
|
||||
///
|
||||
/// 1. Iterate over some collection.
|
||||
/// 2. Spin up a thread to operate on each part of the collection.
|
||||
/// 3. Join all the threads.
|
||||
///
|
||||
/// However, this code actually gives an error:
|
||||
///
|
||||
/// ```text
|
||||
/// error: `array` does not live long enough
|
||||
/// for i in &array {
|
||||
/// ^~~~~
|
||||
/// in expansion of for loop expansion
|
||||
/// note: expansion site
|
||||
/// note: reference must be valid for the static lifetime...
|
||||
/// note: ...but borrowed value is only valid for the block suffix following statement 0 at ...
|
||||
/// let array = [1, 2, 3];
|
||||
/// let mut guards = vec![];
|
||||
///
|
||||
/// for i in &array {
|
||||
/// let guard = std::thread::spawn(move || {
|
||||
/// println!("element: {}", i);
|
||||
/// ...
|
||||
/// error: aborting due to previous error
|
||||
/// ```
|
||||
///
|
||||
/// Because [`std::thread::spawn`][spawn] doesn't know about this scope, it requires a
|
||||
/// `'static` lifetime. One way of giving it a proper lifetime is to use an [`Arc`][arc]:
|
||||
///
|
||||
/// [arc]: http://doc.rust-lang.org/stable/std/sync/struct.Arc.html
|
||||
///
|
||||
/// ```
|
||||
/// use std::sync::Arc;
|
||||
///
|
||||
/// let array = Arc::new([1, 2, 3]);
|
||||
/// let mut guards = vec![];
|
||||
///
|
||||
/// for i in 0..array.len() {
|
||||
/// let a = array.clone();
|
||||
///
|
||||
/// let guard = std::thread::spawn(move || {
|
||||
/// println!("element: {}", a[i]);
|
||||
/// });
|
||||
///
|
||||
/// guards.push(guard);
|
||||
/// }
|
||||
///
|
||||
/// for guard in guards {
|
||||
/// guard.join().unwrap();
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// But this introduces unnecessary allocation, as `Arc<T>` puts its data on the heap, and we
|
||||
/// also end up dealing with reference counts. We know that we're joining the threads before
|
||||
/// our function returns, so just taking a reference _should_ be safe. Rust can't know that,
|
||||
/// though.
|
||||
///
|
||||
/// Enter scoped threads. Here's our original example, using `spawn` from crossbeam rather
|
||||
/// than from `std::thread`:
|
||||
///
|
||||
/// ```
|
||||
/// let array = [1, 2, 3];
|
||||
///
|
||||
/// crossbeam_utils::scoped::scope(|scope| {
|
||||
/// for i in &array {
|
||||
/// scope.spawn(move || {
|
||||
/// println!("element: {}", i);
|
||||
/// });
|
||||
/// }
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
/// Much more straightforward.
|
||||
// FIXME(jeehoonkang): maybe we should create a new crate for scoped threads.
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::fmt;
|
||||
use std::mem;
|
||||
use std::rc::Rc;
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use std::io;
|
||||
|
||||
use atomic_option::AtomicOption;
|
||||
|
||||
#[doc(hidden)]
|
||||
trait FnBox {
|
||||
fn call_box(self: Box<Self>);
|
||||
}
|
||||
|
||||
impl<F: FnOnce()> FnBox for F {
|
||||
fn call_box(self: Box<Self>) {
|
||||
(*self)()
|
||||
}
|
||||
}
|
||||
|
||||
/// Like `std::thread::spawn`, but without the closure bounds.
|
||||
pub unsafe fn spawn_unsafe<'a, F>(f: F) -> thread::JoinHandle<()>
|
||||
where
|
||||
F: FnOnce() + Send + 'a,
|
||||
{
|
||||
let builder = thread::Builder::new();
|
||||
builder_spawn_unsafe(builder, f).unwrap()
|
||||
}
|
||||
|
||||
/// Like `std::thread::Builder::spawn`, but without the closure bounds.
|
||||
pub unsafe fn builder_spawn_unsafe<'a, F>(
|
||||
builder: thread::Builder,
|
||||
f: F,
|
||||
) -> io::Result<thread::JoinHandle<()>>
|
||||
where
|
||||
F: FnOnce() + Send + 'a,
|
||||
{
|
||||
use std::mem;
|
||||
|
||||
let closure: Box<FnBox + 'a> = Box::new(f);
|
||||
let closure: Box<FnBox + Send> = mem::transmute(closure);
|
||||
builder.spawn(move || closure.call_box())
|
||||
}
|
||||
|
||||
|
||||
pub struct Scope<'a> {
|
||||
dtors: RefCell<Option<DtorChain<'a>>>,
|
||||
}
|
||||
|
||||
struct DtorChain<'a> {
|
||||
dtor: Box<FnBox + 'a>,
|
||||
next: Option<Box<DtorChain<'a>>>,
|
||||
}
|
||||
|
||||
enum JoinState {
|
||||
Running(thread::JoinHandle<()>),
|
||||
Joined,
|
||||
}
|
||||
|
||||
impl JoinState {
|
||||
fn join(&mut self) {
|
||||
let mut state = JoinState::Joined;
|
||||
mem::swap(self, &mut state);
|
||||
if let JoinState::Running(handle) = state {
|
||||
let res = handle.join();
|
||||
|
||||
if !thread::panicking() {
|
||||
res.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A handle to a scoped thread
|
||||
pub struct ScopedJoinHandle<T> {
|
||||
inner: Rc<RefCell<JoinState>>,
|
||||
packet: Arc<AtomicOption<T>>,
|
||||
thread: thread::Thread,
|
||||
}
|
||||
|
||||
/// Create a new `scope`, for deferred destructors.
|
||||
///
|
||||
/// Scopes, in particular, support [*scoped thread spawning*](struct.Scope.html#method.spawn).
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// Creating and using a scope:
|
||||
///
|
||||
/// ```
|
||||
/// crossbeam_utils::scoped::scope(|scope| {
|
||||
/// scope.defer(|| println!("Exiting scope"));
|
||||
/// scope.spawn(|| println!("Running child thread in scope"))
|
||||
/// });
|
||||
/// // Prints messages in the reverse order written
|
||||
/// ```
|
||||
pub fn scope<'a, F, R>(f: F) -> R
|
||||
where
|
||||
F: FnOnce(&Scope<'a>) -> R,
|
||||
{
|
||||
let mut scope = Scope { dtors: RefCell::new(None) };
|
||||
let ret = f(&scope);
|
||||
scope.drop_all();
|
||||
ret
|
||||
}
|
||||
|
||||
impl<'a> fmt::Debug for Scope<'a> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "Scope {{ ... }}")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for ScopedJoinHandle<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "ScopedJoinHandle {{ ... }}")
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Scope<'a> {
|
||||
// This method is carefully written in a transactional style, so
|
||||
// that it can be called directly and, if any dtor panics, can be
|
||||
// resumed in the unwinding this causes. By initially running the
|
||||
// method outside of any destructor, we avoid any leakage problems
|
||||
// due to @rust-lang/rust#14875.
|
||||
fn drop_all(&mut self) {
|
||||
loop {
|
||||
// use a separate scope to ensure that the RefCell borrow
|
||||
// is relinquished before running `dtor`
|
||||
let dtor = {
|
||||
let mut dtors = self.dtors.borrow_mut();
|
||||
if let Some(mut node) = dtors.take() {
|
||||
*dtors = node.next.take().map(|b| *b);
|
||||
node.dtor
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
};
|
||||
dtor.call_box()
|
||||
}
|
||||
}
|
||||
|
||||
/// Schedule code to be executed when exiting the scope.
|
||||
///
|
||||
/// This is akin to having a destructor on the stack, except that it is
|
||||
/// *guaranteed* to be run.
|
||||
pub fn defer<F>(&self, f: F)
|
||||
where
|
||||
F: FnOnce() + 'a,
|
||||
{
|
||||
let mut dtors = self.dtors.borrow_mut();
|
||||
*dtors = Some(DtorChain {
|
||||
dtor: Box::new(f),
|
||||
next: dtors.take().map(Box::new),
|
||||
});
|
||||
}
|
||||
|
||||
/// Create a scoped thread.
|
||||
///
|
||||
/// `spawn` is similar to the [`spawn`][spawn] function in Rust's standard library. The
|
||||
/// difference is that this thread is scoped, meaning that it's guaranteed to terminate
|
||||
/// before the current stack frame goes away, allowing you to reference the parent stack frame
|
||||
/// directly. This is ensured by having the parent thread join on the child thread before the
|
||||
/// scope exits.
|
||||
///
|
||||
/// [spawn]: http://doc.rust-lang.org/std/thread/fn.spawn.html
|
||||
pub fn spawn<F, T>(&self, f: F) -> ScopedJoinHandle<T>
|
||||
where
|
||||
F: FnOnce() -> T + Send + 'a,
|
||||
T: Send + 'a,
|
||||
{
|
||||
self.builder().spawn(f).unwrap()
|
||||
}
|
||||
|
||||
/// Generates the base configuration for spawning a scoped thread, from which configuration
|
||||
/// methods can be chained.
|
||||
pub fn builder<'s>(&'s self) -> ScopedThreadBuilder<'s, 'a> {
|
||||
ScopedThreadBuilder {
|
||||
scope: self,
|
||||
builder: thread::Builder::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Scoped thread configuration. Provides detailed control over the properties and behavior of new
|
||||
/// scoped threads.
|
||||
pub struct ScopedThreadBuilder<'s, 'a: 's> {
|
||||
scope: &'s Scope<'a>,
|
||||
builder: thread::Builder,
|
||||
}
|
||||
|
||||
impl<'s, 'a: 's> ScopedThreadBuilder<'s, 'a> {
|
||||
/// Names the thread-to-be. Currently the name is used for identification only in panic
|
||||
/// messages.
|
||||
pub fn name(mut self, name: String) -> ScopedThreadBuilder<'s, 'a> {
|
||||
self.builder = self.builder.name(name);
|
||||
self
|
||||
}
|
||||
|
||||
/// Sets the size of the stack for the new thread.
|
||||
pub fn stack_size(mut self, size: usize) -> ScopedThreadBuilder<'s, 'a> {
|
||||
self.builder = self.builder.stack_size(size);
|
||||
self
|
||||
}
|
||||
|
||||
/// Spawns a new thread, and returns a join handle for it.
|
||||
pub fn spawn<F, T>(self, f: F) -> io::Result<ScopedJoinHandle<T>>
|
||||
where
|
||||
F: FnOnce() -> T + Send + 'a,
|
||||
T: Send + 'a,
|
||||
{
|
||||
let their_packet = Arc::new(AtomicOption::new());
|
||||
let my_packet = their_packet.clone();
|
||||
|
||||
let join_handle = try!(unsafe {
|
||||
builder_spawn_unsafe(self.builder, move || {
|
||||
their_packet.swap(f(), Ordering::Relaxed);
|
||||
})
|
||||
});
|
||||
|
||||
let thread = join_handle.thread().clone();
|
||||
let deferred_handle = Rc::new(RefCell::new(JoinState::Running(join_handle)));
|
||||
let my_handle = deferred_handle.clone();
|
||||
|
||||
self.scope.defer(move || {
|
||||
let mut state = deferred_handle.borrow_mut();
|
||||
state.join();
|
||||
});
|
||||
|
||||
Ok(ScopedJoinHandle {
|
||||
inner: my_handle,
|
||||
packet: my_packet,
|
||||
thread: thread,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ScopedJoinHandle<T> {
|
||||
/// Join the scoped thread, returning the result it produced.
|
||||
pub fn join(self) -> T {
|
||||
self.inner.borrow_mut().join();
|
||||
self.packet.take(Ordering::Relaxed).unwrap()
|
||||
}
|
||||
|
||||
/// Get the underlying thread handle.
|
||||
pub fn thread(&self) -> &thread::Thread {
|
||||
&self.thread
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Drop for Scope<'a> {
|
||||
fn drop(&mut self) {
|
||||
self.drop_all()
|
||||
}
|
||||
}
|
|
@ -1 +1 @@
|
|||
{"files":{".travis.yml":"da898db16b841a2f633a896d69df908fb263d63d04f6248e448ba49a6122f5e9","CHANGELOG.md":"945485d3f79a1912bfa6944ed7b07a9c60915fae992f7abcbb1de44ec147953e","Cargo.toml":"2c8f106920b27ebe60616933c4bf04cf2a6515d65f87fafa216febc4d6e1164b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"ef6edf8bcb3491d8453ca36008f9e3fa0895bb6c17db47b38867784ed7717983","src/atomic_option.rs":"0ed05d26d8980c761c4972a0f37f5b507462ed6dff5d688ef92444560e7b9c69","src/cache_padded.rs":"47a99e571bf5c213395585ff001c7abd10388609f349a2e776d481e2ed0b32cb","src/lib.rs":"ea79e01d2c2f55d27d365e8cd45e377b313f53f27c705d4e4f6a4f19d7e11a98","src/scoped.rs":"5af1b54ca167c634e4c206aeab53e6ca78682633ad0009af220b17de385b3080"},"package":"2760899e32a1d58d5abb31129f8fae5de75220bc2176e77ff7c627ae45c918d9"}
|
||||
{"files":{".travis.yml":"da898db16b841a2f633a896d69df908fb263d63d04f6248e448ba49a6122f5e9","CHANGELOG.md":"6b764c44d2f0ddb3a10101f738673685992bbd894152c0fc354d571f5115f85a","Cargo.toml":"48f3a37f7267b76120aa309e4e2d4e13df6e2994b5b2b402177640957dbcb18b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"ef6edf8bcb3491d8453ca36008f9e3fa0895bb6c17db47b38867784ed7717983","src/cache_padded.rs":"47a99e571bf5c213395585ff001c7abd10388609f349a2e776d481e2ed0b32cb","src/consume.rs":"422c6006dca162a80d39f1abcf1fe26dae6d69772111b3e8824c7f9b335c3ec2","src/lib.rs":"81273b19bd30f6f20084ff01af1acedadcf9ac88db89137d59cb7ee24c226588","src/scoped.rs":"1b7eaaf1fd6033875e4e368e4318a93430bedeb6f68a11c10221ace0243cd83b"},"package":"d636a8b3bcc1b409d7ffd3facef8f21dcb4009626adbd0c5e6c4305c07253c7b"}
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче