Bug 1497446 - mach rust vendor r=Yoric

Summary: The previous two changesets bump up a few dependencies. This is the companion mach rust vendor.

Test Plan: It builds.

Reviewers: ted

Tags: #secure-revision

Bug #: 1497446

--HG--
extra : amend_source : 6eeef28181e1e72891e1f3ad1d67b70cdf926e21
extra : histedit_source : e6af1b38e2272656c543f6c4f9778e80e6c75fd9
This commit is contained in:
David Teller 2018-10-10 16:50:30 +02:00
Родитель 0670ad6a33
Коммит e42e7a416c
221 изменённых файлов: 14094 добавлений и 66311 удалений

19
Cargo.lock сгенерированный
Просмотреть файл

@ -273,6 +273,15 @@ dependencies = [
"constant_time_eq 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "block-buffer"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"arrayref 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
"byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "boxfnonce"
version = "0.0.3"
@ -706,6 +715,14 @@ name = "diff"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "digest"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"generic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "dirs"
version = "1.0.4"
@ -3114,6 +3131,7 @@ dependencies = [
"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12"
"checksum bitreader 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "80b13e2ab064ff3aa0bdbf1eff533f9822dc37899821f5f98c67f263eab51707"
"checksum blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400"
"checksum block-buffer 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a076c298b9ecdb530ed9d967e74a6027d6a7478924520acddcddc24c1c8ab3ab"
"checksum boxfnonce 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8380105befe91099e6f69206164072c05bc92427ff6aa8a5171388317346dd75"
"checksum build_const 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e90dc84f5e62d2ebe7676b83c22d33b6db8bd27340fb6ffbff0a364efa0cb9c9"
"checksum byte-tools 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "560c32574a12a89ecd91f5e742165893f86e3ab98d21f8ea548658eb9eef5f40"
@ -3162,6 +3180,7 @@ dependencies = [
"checksum darling_macro 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "eb69a38fdeaeaf3db712e1df170de67ee9dfc24fb88ca3e9d21e703ec25a4d8e"
"checksum devd-rs 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "e7c9ac481c38baf400d3b732e4a06850dfaa491d1b6379a249d9d40d14c2434c"
"checksum diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "3c2b69f912779fbb121ceb775d74d51e915af17aaebc38d28a592843a2dd0a3a"
"checksum digest 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "03b072242a8cbaf9c145665af9d250c59af3b958f83ed6824e13533cf76d5b90"
"checksum dirs 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "88972de891f6118092b643d85a0b28e0678e0f948d7f879aa32f2d5aafe97d2a"
"checksum docopt 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d8acd393692c503b168471874953a2531df0e9ab77d0b6bbc582395743300a4a"
"checksum dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "09c3753c3db574d215cba4ea76018483895d7bff25a31b49ba45db21c48e50ab"

Просмотреть файл

@ -1 +0,0 @@
{"files":{".travis.yml":"71e232ec96a9f11083a6ac2e3de7d3482032a4a9ed02c0e1be30b46da47cacef","CHANGELOG.md":"6c8e98f58fc7c4c3b7039027ff521a070b511f2882eb9985e32f118aff4ee4c0","Cargo.toml":"c87fbd92db7e1f7ace5b082a4168161e21e5ac76320ad44e01b7e3ea88aeee6e","LICENSE":"643adba34cf48432ba1bac872fdd5686d129c64e06246399bacf20142820620b","README.md":"3768d87584c808a133df7547996900d7574801f2021b6e6bc8c94cd0040b3cf8","appveyor.yml":"ab80c6004eeccda11d3e10284c7cd1bc8ecc87765204dfbf9c1dc4eb3843b86a","src/lib.rs":"16610a89cc5b9f0682a08507b4aea6b1e50ed6e78bc9a63acb6317e23a84477b"},"package":"d0fd4c0631f06448cc45a6bbb3b710ebb7ff8ccb96a0800c994afe23a70d5df2"}

43
third_party/rust/atty-0.1.2/.travis.yml поставляемый
Просмотреть файл

@ -1,43 +0,0 @@
sudo: false
language: rust
matrix:
fast_finish: true
include:
- rust: nightly
- rust: beta
- rust: stable
os:
- linux
- osx
script:
- cargo build
- cargo test
cache:
apt: true
directories:
- target/debug/deps
- target/debug/build
addons:
apt:
packages:
- libcurl4-openssl-dev
- libelf-dev
- libdw-dev
- binutils-dev # required for `kcov --verify`
- libbfd-dev # required for `kcov --verify`
after_success: |
[ $TRAVIS_RUST_VERSION = stable ] &&
wget https://github.com/SimonKagstrom/kcov/archive/master.tar.gz &&
tar xzf master.tar.gz && mkdir kcov-master/build && cd kcov-master/build && cmake .. && make && make install DESTDIR=../tmp && cd ../.. &&
ls target/debug &&
./kcov-master/tmp/usr/local/bin/kcov --verify --coveralls-id=$TRAVIS_JOB_ID --exclude-pattern=/.cargo target/kcov target/debug/atty-* &&
[ $TRAVIS_BRANCH = master ] &&
[ $TRAVIS_PULL_REQUEST = false ] &&
cargo doc --no-deps &&
echo "<meta http-equiv=refresh content=0;url=`echo $TRAVIS_REPO_SLUG | cut -d '/' -f 2`/index.html>" > target/doc/index.html &&
sudo pip install --user ghp-import &&
/home/travis/.local/bin/ghp-import -n target/doc &&
git push -fq https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages
env:
global:
secure: acjXoBFG4yFklz/iW4q9PLaMmTgug0c8hOov4uiaXYjDkVGhnEePBozGc8ctKuFv2BVlwBSzvE1neE9dHcCS6il0x+G79sVTekfVN5dERja3UpwrC0/QodJuDmErIUpb6zylupPnUGq5pzZabRPNKyAnsFS5wYhLMSLxGPu4pfYdW0Eu8CEPIgPYsI6o2pfKgNpXbeizdHRLMeZCN4cbEPohO1odc+Z6WJvgKn2xEkpAcfhAuaroqGGxRtmDiJZ/JaBijAKY/O9Q3Xq1GSGOPT5lmwJSp3Fxw5dgmeX6LmN0ZODASdnEoYfoqUDUFzkCON3Sk4a7hugxlkZ7cx1tfqXxMg+0BgYIUdGQNloDJnuusWvXPBFdB2jxMsfcbrCjNsrJ8kjN6uBsW9yy0kqN7a8eOJckwh5fYRWfNta0R+BrveNXWmGp4u4aBq/85jEiHi30XKTzaEUbF0Y3cIONweWeWwBOcAvPBhO63Y07TRRe+SSk1NYm7QHGW9RsHhz89OSbaIXqn+r/o+6DZcw5XaO73DtZ62Kx48NErej9kVqcIJ6HnyvCJ/fJoT7h1ixSRI/WmS30l2S/q33Q2G4C/IZ4ZZRD/1thSltAxeA6OAUnr8ITZyW47CqOmyL1IUptrdAb9OLEedYV/QrOhcg2RJLXyP66xnItOwMp014bEp4=

12
third_party/rust/atty-0.1.2/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,12 +0,0 @@
# 0.1.2
* windows support (with automated testing)
* automated code coverage
# 0.1.1
* bumped libc dep from `0.1` to `0.2`
# 0.1.0
* initial release

15
third_party/rust/atty-0.1.2/Cargo.toml поставляемый
Просмотреть файл

@ -1,15 +0,0 @@
[package]
name = "atty"
version = "0.1.2"
authors = ["softprops <d.tangren@gmail.com>"]
description = "A simple interface for querying atty"
documentation = "http://softprops.github.io/atty"
homepage = "https://github.com/softprops/atty"
repository = "https://github.com/softprops/atty"
keywords = ["terminal", "tty"]
license = "MIT"
[dependencies]
libc = "0.2"
winapi = "0.2"
kernel32-sys = "0.2"

20
third_party/rust/atty-0.1.2/LICENSE поставляемый
Просмотреть файл

@ -1,20 +0,0 @@
Copyright (c) 2015 Doug Tangren
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

20
third_party/rust/atty-0.1.2/README.md поставляемый
Просмотреть файл

@ -1,20 +0,0 @@
# atty
[![Build Status](https://travis-ci.org/softprops/atty.svg?branch=master)](https://travis-ci.org/softprops/atty) [![Build status](https://ci.appveyor.com/api/projects/status/geggrsnsjsuse8cv?svg=true)](https://ci.appveyor.com/project/softprops/atty) [![Coverage Status](https://coveralls.io/repos/softprops/atty/badge.svg?branch=master&service=github)](https://coveralls.io/github/softprops/atty?branch=master)
> are you or are you not a tty?
## docs
Find them [here](http://softprops.github.io/atty)
## install
Add the following to your `Cargo.toml`
```toml
[dependencies]
atty = "0.1"
```
Doug Tangren (softprops) 2015

19
third_party/rust/atty-0.1.2/appveyor.yml поставляемый
Просмотреть файл

@ -1,19 +0,0 @@
environment:
matrix:
- TARGET: nightly-x86_64-pc-windows-msvc
VCVARS: "C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\bin\\amd64\\vcvars64.bat"
- TARGET: nightly-i686-pc-windows-msvc
VCVARS: "C:\\Program Files (x86)\\Microsoft Visual Studio 14.0\\VC\\bin\\vcvars32.bat"
- TARGET: nightly-x86_64-pc-windows-gnu
- TARGET: nightly-i686-pc-windows-gnu
- TARGET: 1.2.0-x86_64-pc-windows-gnu
install:
- ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-${env:TARGET}.exe" -FileName "rust-install.exe"
- ps: .\rust-install.exe /VERYSILENT /NORESTART /DIR="C:\rust" | Out-Null
- ps: $env:PATH="$env:PATH;C:\rust\bin"
- call "%VCVARS%" || ver>nul
- rustc -vV
- cargo -vV
build: false
test_script:
- cargo test --verbose

62
third_party/rust/atty-0.1.2/src/lib.rs поставляемый
Просмотреть файл

@ -1,62 +0,0 @@
//! atty is a simple utility that answers one question
//! > is this a tty?
//!
//! usage is just as simple
//!
//! ```
//! if atty::is() {
//! println!("i'm a tty")
//! }
//! ```
//!
//! ```
//! if atty::isnt() {
//! println!("i'm not a tty")
//! }
//! ```
extern crate libc;
/// returns true if this is a tty
#[cfg(unix)]
pub fn is() -> bool {
let r = unsafe { libc::isatty(libc::STDOUT_FILENO) };
r != 0
}
/// returns true if this is a tty
#[cfg(windows)]
pub fn is() -> bool {
extern crate kernel32;
extern crate winapi;
use std::ptr;
let handle: winapi::HANDLE = unsafe {
kernel32::CreateFileA(b"CONOUT$\0".as_ptr() as *const i8,
winapi::GENERIC_READ | winapi::GENERIC_WRITE,
winapi::FILE_SHARE_WRITE,
ptr::null_mut(),
winapi::OPEN_EXISTING,
0,
ptr::null_mut())
};
if handle == winapi::INVALID_HANDLE_VALUE {
return false;
}
let mut out = 0;
unsafe { kernel32::GetConsoleMode(handle, &mut out) != 0 }
}
/// returns true if this is _not_ a tty
pub fn isnt() -> bool {
!is()
}
#[cfg(test)]
mod tests {
use super::is;
#[test]
fn is_test() {
assert!(is())
}
}

Просмотреть файл

@ -1 +1 @@
{"files":{".cargo_vcs_info.json":"5a666f68ab005317d058d78c58936ebf66086a242f6a4b8415230649bbce768d","Cargo.toml":"04c87832069d5462b4b87c935fa448213e00a804fcf827334a02beda1fd7f971","README.md":"17e5ed3a3bd9b898e73c3056711daabe1238fe9682d24d255f8263fae4eb783d","examples/generate_spidermonkey.rs":"a831abf8d7a1ab73c5d70a9e8517b8af1df492589a2f180698145ac5d46d7102","src/export.rs":"e889c2f45f00c1787e2270a50fc6d9628446d620c3c0d2ac6ba3f031c561197d","src/import.rs":"7a8525aa55ff0c6c266edfb69a351345ab0c36176deeb0fb91901d4a4e6bd9d6","src/lib.rs":"d4ea18ec850054a817c6b91ed52412a2f2f39639628e5918dee688d829d3ed4b","src/spec.rs":"8f442a5d218360681ad3a5b4c4740b7ae227e087eb745df38cca07a88d8484c4","src/util.rs":"1d934eec75d9dee44289f9a9a9e67c96dd6205367430b9bcf9fc66e730bf6eb0"},"package":"cc0956bac41c458cf38340699dbb54c2220c91cdbfa33be19670fe69e0a6ac9b"}
{"files":{".cargo_vcs_info.json":"13d7d8f1c677eb54c2b0005b8e048b79461b91176796088fc70f5d40ffbefd0a","Cargo.toml":"eee9b8c9f05e442ed41ee986b07b443cb89465346dce4aae4f73f13fa7243492","README.md":"17e5ed3a3bd9b898e73c3056711daabe1238fe9682d24d255f8263fae4eb783d","examples/generate_spidermonkey.rs":"a831abf8d7a1ab73c5d70a9e8517b8af1df492589a2f180698145ac5d46d7102","src/export.rs":"56910e257a000cac963b9ac377558767d05076b677e83a7e75e570ecbd4b35f6","src/import.rs":"366bada1b19c608ffe7dc4761f1db1a1dae616f4ed99685e4260a00d5c0125d0","src/lib.rs":"d4ea18ec850054a817c6b91ed52412a2f2f39639628e5918dee688d829d3ed4b","src/spec.rs":"7cfb4705d9cfa72ba0a34c5d5beab7e23ac54d8e9fa125317364535d5aa7496a","src/util.rs":"1d934eec75d9dee44289f9a9a9e67c96dd6205367430b9bcf9fc66e730bf6eb0"},"package":"430239e4551e42b80fa5d92322ac80ea38c9dda56e5d5582e057e2288352b71a"}

Просмотреть файл

@ -1,5 +1,5 @@
{
"git": {
"sha1": "4c24254cdcfba7a929573f34e5ac12686a86bb60"
"sha1": "da502c023e7c92bff0003109935a8767d9176637"
}
}

4
third_party/rust/binjs_meta/Cargo.toml поставляемый
Просмотреть файл

@ -12,7 +12,7 @@
[package]
name = "binjs_meta"
version = "0.3.10"
version = "0.4.3"
authors = ["David Teller <D.O.Teller@gmail.com>"]
description = "Part of binjs-ref. Tools for manipulating grammars. You probably do not want to use this crate directly unless you're writing an encoder, decoder or parser generator for binjs."
homepage = "https://binast.github.io/ecmascript-binary-ast/"
@ -31,7 +31,7 @@ version = "^0.7"
version = "^0.4"
[dependencies.webidl]
version = "^0.6"
version = "^0.8"
[dev-dependencies.clap]
version = "^2"

30
third_party/rust/binjs_meta/src/export.rs поставляемый
Просмотреть файл

@ -115,11 +115,10 @@ impl TypeDeanonymizer {
// See also tagged_tuple in write.rs.
if field.is_lazy() {
declaration.with_field(skip_name_map.get(field.name()).unwrap(),
Type::offset().required(),
Laziness::Eager);
Type::offset().required());
}
declaration.with_field(field.name(), field.type_().clone(),
field.laziness());
declaration.with_field_laziness(field.name(), field.type_().clone(),
field.laziness());
}
}
// Copy and deanonymize typedefs
@ -191,6 +190,8 @@ impl TypeDeanonymizer {
TypeSpec::Boolean |
TypeSpec::Number |
TypeSpec::UnsignedLong |
TypeSpec::PropertyKey |
TypeSpec::IdentifierName |
TypeSpec::String |
TypeSpec::Offset |
TypeSpec::Void => {
@ -202,7 +203,14 @@ impl TypeDeanonymizer {
debug!(target: "export_utils", "import_typespec: Attempting to redefine typedef {name}", name = my_name.to_str());
}
}
(None, self.builder.node_name("@@"))
// This is a workaround for typedefs in the webidl that are not truly typedefs.
// See https://github.com/Yoric/ecmascript-binary-ast/pull/1
let name = match *type_spec {
TypeSpec::PropertyKey => self.builder.node_name("PropertyKey"),
TypeSpec::IdentifierName => self.builder.node_name("IdentifierName"),
_ => self.builder.node_name(&format!("@@{:?}", type_spec)),
};
(None, name)
}
TypeSpec::NamedType(ref link) => {
let resolved = spec.get_type_by_name(link)
@ -238,11 +246,13 @@ impl TypeDeanonymizer {
Some(IsNullable { is_nullable: true, .. }) |
Some(IsNullable { content: Primitive::Interface(_), .. }) => Type::named(&content).required(),
Some(IsNullable { content: Primitive::String, .. }) => Type::string().required(),
Some(IsNullable { content: Primitive::IdentifierName, .. }) => Type::identifier_name().required(),
Some(IsNullable { content: Primitive::PropertyKey, .. }) => Type::property_key().required(),
Some(IsNullable { content: Primitive::Number, .. }) => Type::number().required(),
Some(IsNullable { content: Primitive::UnsignedLong, .. }) => Type::unsigned_long().required(),
Some(IsNullable { content: Primitive::Boolean, .. }) => Type::bool().required(),
Some(IsNullable { content: Primitive::Offset, .. }) => Type::offset().required(),
Some(IsNullable { content: Primitive::Void, .. }) => Type::void().required()
Some(IsNullable { content: Primitive::Void, .. }) => Type::void().required(),
};
debug!(target: "export_utils", "import_typespec aliasing {:?} => {:?}",
my_name, deanonymized);
@ -375,6 +385,10 @@ impl TypeName {
"_String".to_string(),
TypeSpec::Void =>
"_Void".to_string(),
TypeSpec::IdentifierName =>
"IdentifierName".to_string(),
TypeSpec::PropertyKey =>
"PropertyKey".to_string(),
TypeSpec::TypeSum(ref sum) => {
format!("{}", sum.types()
.iter()
@ -408,6 +422,10 @@ impl ToWebidl {
"bool".to_string(),
TypeSpec::String =>
"string".to_string(),
TypeSpec::PropertyKey =>
"[PropertyKey] string".to_string(),
TypeSpec::IdentifierName =>
"[IdentifierName] string".to_string(),
TypeSpec::Number =>
"number".to_string(),
TypeSpec::UnsignedLong =>

104
third_party/rust/binjs_meta/src/import.rs поставляемый
Просмотреть файл

@ -1,9 +1,11 @@
use spec::{ self, SpecBuilder, TypeSum, Laziness };
use spec::{ self, Laziness, SpecBuilder, TypeSum };
use webidl::ast::*;
pub struct Importer {
builder: SpecBuilder,
/// The interfaces we have traversed so far.
path: Vec<String>,
}
impl Importer {
/// Import an AST into a SpecBuilder.
@ -62,6 +64,7 @@ impl Importer {
/// ```
pub fn import(ast: &AST) -> SpecBuilder {
let mut importer = Importer {
path: Vec::with_capacity(256),
builder: SpecBuilder::new()
};
importer.import_ast(ast);
@ -90,9 +93,22 @@ impl Importer {
}
fn import_typedef(&mut self, typedef: &Typedef) {
let name = self.builder.node_name(&typedef.name);
let type_ = self.convert_type(&*typedef.type_);
// The following are, unfortunately, not true typedefs.
// Ignore their definition.
let type_ = match typedef.name.as_ref() {
"Identifier" => spec::TypeSpec::IdentifierName
.required(),
"IdentifierName" => spec::TypeSpec::IdentifierName
.required(),
"PropertyKey" => spec::TypeSpec::PropertyKey
.required(),
_ => self.convert_type(&*typedef.type_)
};
debug!(target: "meta::import", "Importing typedef {type_:?} {name:?}",
type_ = type_,
name = name);
let mut node = self.builder.add_typedef(&name)
.expect("Name already present");
.unwrap_or_else(|| panic!("Error: Name {} is defined more than once in the spec.", name));
assert!(!type_.is_optional());
node.with_spec(type_.spec);
}
@ -102,31 +118,47 @@ impl Importer {
} else {
panic!("Expected a non-partial interface, got {:?}", interface);
};
if interface.name == "Node" {
// We're not interested in the root interface.
return;
// Handle special, hardcoded, interfaces.
match interface.name.as_ref() {
"Node" => {
// We're not interested in the root interface.
return;
}
"IdentifierName" => {
unimplemented!()
}
_ => {
}
}
if let Some(ref parent) = interface.inherits {
assert_eq!(parent, "Node");
}
self.path.push(interface.name.clone());
// Now handle regular stuff.
let mut fields = Vec::new();
for member in &interface.members {
if let InterfaceMember::Attribute(Attribute::Regular(ref attribute)) = *member {
use webidl::ast::ExtendedAttribute::NoArguments;
use webidl::ast::Other::Identifier;
let name = self.builder.field_name(&attribute.name);
let type_ = self.convert_type(&*attribute.type_);
let mut laziness = Laziness::Eager;
for extended_attribute in &attribute.extended_attributes {
use webidl::ast::ExtendedAttribute::NoArguments;
use webidl::ast::Other::Identifier;
if let &NoArguments(Identifier(ref id)) = extended_attribute.as_ref() {
if &*id == "Lazy" {
laziness = Laziness::Lazy;
let is_lazy = attribute.extended_attributes.iter()
.find(|attribute| {
if let &NoArguments(Identifier(ref id)) = attribute.as_ref() {
if &*id == "Lazy" {
return true;
}
}
}
}
fields.push((name, type_, laziness));
false
})
.is_some();
fields.push((name, type_, if is_lazy { Laziness::Lazy } else { Laziness:: Eager }));
} else {
panic!("Expected an attribute, got {:?}", member);
}
@ -134,17 +166,42 @@ impl Importer {
let name = self.builder.node_name(&interface.name);
let mut node = self.builder.add_interface(&name)
.expect("Name already present");
for (field_name, field_type, field_laziness) in fields.drain(..) {
node.with_field(&field_name, field_type, field_laziness);
for (field_name, field_type, laziness) in fields.drain(..) {
node.with_field_laziness(&field_name, field_type, laziness);
}
for extended_attribute in &interface.extended_attributes {
use webidl::ast::ExtendedAttribute::NoArguments;
use webidl::ast::Other::Identifier;
if let &NoArguments(Identifier(ref id)) = extended_attribute.as_ref() {
if &*id == "Skippable" {
panic!("Encountered deprecated attribute [Skippable]");
}
if &*id == "Scope" {
node.with_scope(true);
}
}
}
self.path.pop();
}
fn convert_type(&mut self, t: &Type) -> spec::Type {
let spec = match t.kind {
TypeKind::Boolean => spec::TypeSpec::Boolean,
TypeKind::Identifier(ref id) => {
let name = self.builder.node_name(id);
spec::TypeSpec::NamedType(name.clone())
// Sadly, some identifiers are not truly `typedef`s.
match name.to_str() {
"IdentifierName" if self.is_at_interface("StaticMemberAssignmentTarget") => spec::TypeSpec::PropertyKey,
"IdentifierName" if self.is_at_interface("StaticMemberExpression") => spec::TypeSpec::PropertyKey,
"IdentifierName" if self.is_at_interface("ImportSpecifier") => spec::TypeSpec::PropertyKey,
"IdentifierName" if self.is_at_interface("ExportSpecifier") => spec::TypeSpec::PropertyKey,
"IdentifierName" if self.is_at_interface("ExportLocalSpecifier") => spec::TypeSpec::PropertyKey,
"IdentifierName" => spec::TypeSpec::IdentifierName,
"Identifier" => spec::TypeSpec::IdentifierName,
_ => spec::TypeSpec::NamedType(name.clone())
}
}
TypeKind::DOMString if self.is_at_interface("LiteralPropertyName") => spec::TypeSpec::PropertyKey,
TypeKind::DOMString => spec::TypeSpec::String,
TypeKind::Union(ref types) => {
let mut dest = Vec::with_capacity(types.len());
@ -174,4 +231,11 @@ impl Importer {
spec.required()
}
}
fn is_at_interface(&self, name: &str) -> bool {
if self.path.len() == 0 {
return false;
}
self.path[0].as_str() == name
}
}

166
third_party/rust/binjs_meta/src/spec.rs поставляемый
Просмотреть файл

@ -2,6 +2,8 @@
pub use util::ToStr;
use itertools::Itertools;
use std;
use std::cell::*;
use std::collections::{ HashMap, HashSet };
@ -9,6 +11,14 @@ use std::fmt::{ Debug, Display };
use std::hash::*;
use std::rc::*;
/// Whether an attribute is eager or lazy.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Laziness {
/// An eager attribute is designed to be parsed immediately.
Eager,
/// A lazy attribute is designed for deferred parsing.
Lazy
}
/// The name of an interface or enum.
#[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
@ -17,6 +27,12 @@ impl NodeName {
pub fn to_string(&self) -> &String {
self.0.as_ref()
}
pub fn to_str(&self) -> &str {
self.0.as_ref()
}
pub fn to_rc_string(&self) -> &Rc<String> {
&self.0
}
}
impl Debug for NodeName {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
@ -42,6 +58,9 @@ impl FieldName {
pub fn to_string(&self) -> &String {
self.0.as_ref()
}
pub fn to_rc_string(&self) -> &Rc<String> {
&self.0
}
}
impl Debug for FieldName {
fn fmt(&self, formatter: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> {
@ -100,20 +119,19 @@ impl TypeSum {
}
}
/// Lazy for a field with [lazy] attribute. Eager for others.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum Laziness {
Eager,
Lazy,
}
/// Representation of a field in an interface.
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Field {
/// The name of the field.
name: FieldName,
/// The type of the field.
type_: Type,
/// Documentation for the field. Ignored for the time being.
documentation: Option<String>,
laziness: Laziness,
laziness: Laziness
}
impl Hash for Field {
fn hash<H>(&self, state: &mut H) where H: Hasher {
@ -141,12 +159,20 @@ impl Field {
pub fn laziness(&self) -> Laziness {
self.laziness.clone()
}
pub fn with_laziness(mut self, laziness: Laziness) -> Self {
self.laziness = laziness;
self
}
pub fn doc(&self) -> Option<&str> {
match self.documentation {
None => None,
Some(ref s) => Some(&*s)
}
}
pub fn with_doc(mut self, doc: Option<String>) -> Self {
self.documentation = doc;
self
}
}
/// The contents of a type, typically that of a field.
@ -191,6 +217,16 @@ pub enum TypeSpec {
///
/// For the moment, this spec is used only internally.
Void,
/// A string used to represent something bound in a scope (i.e. a variable, but not a property).
/// At this level, we make no distinction between `Identifier` and `IdentifierName`.
///
/// Actually maps to a subset of `IdentifierName` in webidl.
IdentifierName,
/// A key for a property. For the time being, we make no distinction between variants such
/// as `LiteralPropertyName` and `IdentifierName`-as-property-keys.
PropertyKey,
}
#[derive(Clone, Debug)]
@ -295,6 +331,8 @@ impl TypeSpec {
TypeSpec::UnsignedLong => Some(IsNullable::non_nullable(Primitive::UnsignedLong)),
TypeSpec::String => Some(IsNullable::non_nullable(Primitive::String)),
TypeSpec::Offset => Some(IsNullable::non_nullable(Primitive::Offset)),
TypeSpec::IdentifierName => Some(IsNullable::non_nullable(Primitive::IdentifierName)),
TypeSpec::PropertyKey => Some(IsNullable::non_nullable(Primitive::PropertyKey)),
TypeSpec::NamedType(ref name) => {
match spec.get_type_by_name(name).unwrap() {
NamedType::Interface(ref interface) =>
@ -332,6 +370,8 @@ pub enum Primitive {
UnsignedLong,
Offset,
Interface(Rc<Interface>),
IdentifierName,
PropertyKey,
}
#[derive(Clone, Debug, PartialEq)]
@ -393,6 +433,12 @@ impl Type {
pub fn void() -> TypeSpec {
TypeSpec::Void
}
pub fn identifier_name() -> TypeSpec {
TypeSpec::IdentifierName
}
pub fn property_key() -> TypeSpec {
TypeSpec::PropertyKey
}
/// An `offset` type, holding a number of bytes in the binary file.
pub fn offset() -> TypeSpec {
@ -474,12 +520,9 @@ impl Obj {
return self
}
let mut fields = self.fields;
fields.push(Field {
name: name.clone(),
type_,
documentation: doc.map(str::to_string),
laziness,
});
fields.push(Field::new(name.clone(), type_)
.with_doc(doc.map(str::to_string))
.with_laziness(laziness));
Obj {
fields
}
@ -487,12 +530,16 @@ impl Obj {
}
/// Extend a structure with a field.
pub fn with_field(self, name: &FieldName, type_: Type, laziness: Laziness) -> Self {
self.with_field_aux(name, type_, laziness, None)
pub fn with_field(self, name: &FieldName, type_: Type) -> Self {
self.with_field_aux(name, type_, Laziness::Eager, None)
}
pub fn with_field_doc(self, name: &FieldName, type_: Type, laziness: Laziness, doc: &str) -> Self {
self.with_field_aux(name, type_, laziness, Some(doc))
pub fn with_field_doc(self, name: &FieldName, type_: Type, doc: &str) -> Self {
self.with_field_aux(name, type_, Laziness::Eager, Some(doc))
}
pub fn with_field_lazy(self, name: &FieldName, type_: Type) -> Self {
self.with_field_aux(name, type_, Laziness::Lazy, None)
}
}
@ -529,6 +576,8 @@ pub struct InterfaceDeclaration {
/// The contents of this interface, excluding the contents of parent interfaces.
contents: Obj,
is_scope: bool,
}
impl InterfaceDeclaration {
@ -536,18 +585,28 @@ impl InterfaceDeclaration {
let _ = self.contents.with_full_field(contents);
self
}
pub fn with_field(&mut self, name: &FieldName, type_: Type, laziness: Laziness) -> &mut Self {
self.with_field_aux(name, type_, laziness, None)
pub fn with_field(&mut self, name: &FieldName, type_: Type) -> &mut Self {
self.with_field_aux(name, type_, None, Laziness::Eager)
}
pub fn with_field_doc(&mut self, name: &FieldName, type_: Type, laziness: Laziness, doc: &str) -> &mut Self {
self.with_field_aux(name, type_, laziness, Some(doc))
pub fn with_field_lazy(&mut self, name: &FieldName, type_: Type) -> &mut Self {
self.with_field_aux(name, type_, None, Laziness::Eager)
}
fn with_field_aux(&mut self, name: &FieldName, type_: Type, laziness: Laziness, doc: Option<&str>) -> &mut Self {
pub fn with_field_laziness(&mut self, name: &FieldName, type_: Type, laziness: Laziness) -> &mut Self {
self.with_field_aux(name, type_, None, laziness)
}
pub fn with_field_doc(&mut self, name: &FieldName, type_: Type, doc: &str) -> &mut Self {
self.with_field_aux(name, type_, Some(doc), Laziness::Eager)
}
fn with_field_aux(&mut self, name: &FieldName, type_: Type, doc: Option<&str>, laziness: Laziness) -> &mut Self {
let mut contents = Obj::new();
std::mem::swap(&mut self.contents, &mut contents);
self.contents = contents.with_field_aux(name, type_, laziness, doc);
self
}
pub fn with_scope(&mut self, value: bool) -> &mut Self {
self.is_scope = value;
self
}
}
/// A data structure used to progressively construct the `Spec`.
@ -617,6 +676,7 @@ impl SpecBuilder {
let result = RefCell::new(InterfaceDeclaration {
name: name.clone(),
contents: Obj::new(),
is_scope: false,
});
self.interfaces_by_name.insert(name.clone(), result);
self.interfaces_by_name.get(name)
@ -672,10 +732,20 @@ impl SpecBuilder {
.map(|(k, v)| (k, Rc::new(RefCell::into_inner(v))))
.collect();
let mut node_names = HashMap::new();
for name in interfaces_by_name.keys().chain(string_enums_by_name.keys()).chain(typedefs_by_name.keys()) {
node_names.insert(name.to_string().clone(), name.clone());
}
let node_names: HashMap<_, _> = interfaces_by_name
.keys()
.chain(string_enums_by_name
.keys())
.chain(typedefs_by_name
.keys())
.map(|name| {
(name.to_string().clone(), name.clone())
})
.collect();
debug!(target: "spec", "Established list of node names: {:?} ({})",
node_names.keys()
.sorted(),
node_names.len());
// 2. Collect all field names.
let mut fields = HashMap::new();
@ -718,6 +788,10 @@ impl SpecBuilder {
}
}
for name in &used_typenames {
// Built-in types
if name.to_str() == "IdentifierName" || name.to_str() == "Identifier" || name.to_str() == "PropertyKey" {
continue;
}
if typedefs_by_name.contains_key(name) {
continue;
}
@ -763,7 +837,14 @@ impl SpecBuilder {
debug!(target: "spec", "classify_type => don't put me in an interface");
TypeClassification::Array
},
TypeSpec::Boolean | TypeSpec::Number | TypeSpec::UnsignedLong | TypeSpec::String | TypeSpec::Void | TypeSpec::Offset => {
TypeSpec::Boolean
| TypeSpec::Number
| TypeSpec::String
| TypeSpec::Void
| TypeSpec::Offset
| TypeSpec::UnsignedLong
| TypeSpec::IdentifierName
| TypeSpec::PropertyKey => {
debug!(target: "spec", "classify_type => don't put me in an interface");
TypeClassification::Primitive
}
@ -778,17 +859,20 @@ impl SpecBuilder {
}
// Start lookup for this name.
cache.insert(name.clone(), None);
let result = if interfaces_by_name.contains_key(name) {
let mut names = HashSet::new();
names.insert(name.clone());
TypeClassification::SumOfInterfaces(names)
} else if string_enums_by_name.contains_key(name) {
TypeClassification::StringEnum
} else {
let type_ = typedefs_by_name.get(name)
.unwrap(); // Completeness checked abover in this method.
classify_type(typedefs_by_name, string_enums_by_name, interfaces_by_name, cache, type_.spec(), name)
};
let result =
if name.to_str() == "IdentifierName" || name.to_str() == "Identifier" || name.to_str() == "PropertyKey" {
TypeClassification::Primitive
} else if interfaces_by_name.contains_key(name) {
let mut names = HashSet::new();
names.insert(name.clone());
TypeClassification::SumOfInterfaces(names)
} else if string_enums_by_name.contains_key(name) {
TypeClassification::StringEnum
} else {
let type_ = typedefs_by_name.get(name)
.unwrap_or_else(|| panic!("Type {} not found", name)); // Completeness checked abover in this method.
classify_type(typedefs_by_name, string_enums_by_name, interfaces_by_name, cache, type_.spec(), name)
};
debug!(target: "spec", "classify_type {:?} => (inserting in cache) {:?}", name, result);
cache.insert(name.clone(), Some(result.clone()));
result
@ -902,6 +986,10 @@ impl Interface {
}
None
}
pub fn is_scope(&self) -> bool {
self.declaration.is_scope
}
}
/// Immutable representation of the spec.

Просмотреть файл

@ -1 +1 @@
{"files":{".travis.yml":"e684c9479b485343f5b932e8f9de7ac046accfb4c1e3c534e6e0fb9e0c8d919b","Cargo.toml":"a30078c3db5bccf6a567ad9ae78a6258d18b990034eda7e4ce8f4b3041ff2aa9","LICENSE-APACHE":"8173d5c29b4f956d532781d2b86e4e30f83e6b7878dce18c919451d6ba707c90","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"d3a2993cd15ac201b30c86fe69f2bb692b386875eace571715007637d7ca7abf","deploy-docs.sh":"7b66111b124c1c7e59cb84cf110d98b5cb783bd35a676e970d9b3035e55f7dfd","src/lib.rs":"7276279f7008dd633d0bb90cc0ff73de170b89d69644fb21c35728c94e913c4d"},"package":"d9bf6104718e80d7b26a68fdbacff3481cfc05df670821affc7e9cbc1884400c"}
{"files":{".travis.yml":"e684c9479b485343f5b932e8f9de7ac046accfb4c1e3c534e6e0fb9e0c8d919b","Cargo.toml":"3342b785a96c022128627c03d66f701ff8f5fa3b1088f1a6282bbd7fab94d99d","LICENSE-APACHE":"8173d5c29b4f956d532781d2b86e4e30f83e6b7878dce18c919451d6ba707c90","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"49741b792be0800387a30bf6300d5ad4d306e15b63510301e377670489620f40","deploy-docs.sh":"7b66111b124c1c7e59cb84cf110d98b5cb783bd35a676e970d9b3035e55f7dfd","src/lib.rs":"51809e3f8799d712a740f5bd37b658fbda44a5c7e62bf33a69c255866afa61b1"},"package":"6f1efcc46c18245a69c38fcc5cc650f16d3a59d034f3106e9ed63748f695730a"}

35
third_party/rust/bit-set/Cargo.toml поставляемый
Просмотреть файл

@ -1,20 +1,33 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g. crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "bit-set"
version = "0.4.0"
version = "0.5.0"
authors = ["Alexis Beingessner <a.beingessner@gmail.com>"]
license = "MIT/Apache-2.0"
description = "A set of bits"
repository = "https://github.com/contain-rs/bit-set"
homepage = "https://github.com/contain-rs/bit-set"
documentation = "https://contain-rs.github.io/bit-set/bit_set"
keywords = ["data-structures", "bitset"]
readme = "README.md"
[dev-dependencies]
rand = "0.3"
[dependencies]
bit-vec = "0.4"
keywords = ["data-structures", "bitset"]
license = "MIT/Apache-2.0"
repository = "https://github.com/contain-rs/bit-set"
[dependencies.bit-vec]
version = "0.5.0"
default-features = false
[dev-dependencies.rand]
version = "0.3"
[features]
nightly = []
default = ["std"]
nightly = ["bit-vec/nightly"]
std = ["bit-vec/std"]

11
third_party/rust/bit-set/README.md поставляемый
Просмотреть файл

@ -1,3 +1,14 @@
**WARNING: THIS PROJECT IS IN MAINTENANCE MODE, DUE TO INSUFFICIENT MAINTAINER RESOURCES**
It works fine, but will generally no longer be improved.
We are currently only accepting changes which:
* keep this compiling with the latest versions of Rust or its dependencies.
* have minimal review requirements, such as documentation changes (so not totally new APIs).
------
A Set of bits.
Documentation is available at https://contain-rs.github.io/bit-set/bit_set.

17
third_party/rust/bit-set/src/lib.rs поставляемый
Просмотреть файл

@ -47,17 +47,23 @@
//! assert!(bv[3]);
//! ```
#![no_std]
#![cfg_attr(all(test, feature = "nightly"), feature(test))]
#[cfg(all(test, feature = "nightly"))] extern crate test;
#[cfg(all(test, feature = "nightly"))] extern crate rand;
extern crate bit_vec;
#[cfg(test)]
#[macro_use]
extern crate std;
use bit_vec::{BitVec, Blocks, BitBlock};
use std::cmp::Ordering;
use std::cmp;
use std::fmt;
use std::hash;
use std::iter::{self, Chain, Enumerate, FromIterator, Repeat, Skip, Take};
use core::cmp::Ordering;
use core::cmp;
use core::fmt;
use core::hash;
use core::iter::{self, Chain, Enumerate, FromIterator, Repeat, Skip, Take};
type MatchWords<'a, B> = Chain<Enumerate<Blocks<'a, B>>, Skip<Take<Enumerate<Repeat<B>>>>>;
@ -941,6 +947,7 @@ mod tests {
use std::cmp::Ordering::{Equal, Greater, Less};
use super::BitSet;
use bit_vec::BitVec;
use std::vec::Vec;
#[test]
fn test_bit_set_show() {

Просмотреть файл

@ -1 +1 @@
{"files":{".travis.yml":"26dbdd3f33aeefa6216804c025626b8e2bef5c05103410faa5e6e93f20331cbe","Cargo.toml":"6376bd862fc4827a77190427180ccf86cda76907bf3bd935601840cd03ab48da","LICENSE-APACHE":"8173d5c29b4f956d532781d2b86e4e30f83e6b7878dce18c919451d6ba707c90","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.md":"2a42423b7acd5af0ee7f47dcc430b267cfe4661ced77131af2d6e97e6a15377a","benches/extern.rs":"30152d15cc55493d06396baf9eebb90c8f32b314f0dc77398ac8a121bd5ff917","crusader.sh":"e656dcb62d5122a64d55f837992e63cfd3beee37cf74c5ab6ff178a3c7ef943e","deploy-docs.sh":"7b66111b124c1c7e59cb84cf110d98b5cb783bd35a676e970d9b3035e55f7dfd","src/bench.rs":"a24345464fdbc70b5b877d13fa1b9da809ba4917e592d5de69f01b8b1340e8bb","src/lib.rs":"b784632ce3f6a16314d1d759310f297941fb5577192ba48a10ae3c6893dd5e24"},"package":"02b4ff8b16e6076c3e14220b39fbc1fabb6737522281a388998046859400895f"}
{"files":{".travis.yml":"26dbdd3f33aeefa6216804c025626b8e2bef5c05103410faa5e6e93f20331cbe","Cargo.toml":"0c1d447fdcff050a2c1f9e3267bdf5b2d3373e080603a5f9127167f31a169b7d","LICENSE-APACHE":"8173d5c29b4f956d532781d2b86e4e30f83e6b7878dce18c919451d6ba707c90","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.md":"c9d3313c3cc0d55496d8c17bf950b96accd751fc67342e3b3dd3ce7756605092","benches/extern.rs":"30152d15cc55493d06396baf9eebb90c8f32b314f0dc77398ac8a121bd5ff917","crusader.sh":"e656dcb62d5122a64d55f837992e63cfd3beee37cf74c5ab6ff178a3c7ef943e","deploy-docs.sh":"7b66111b124c1c7e59cb84cf110d98b5cb783bd35a676e970d9b3035e55f7dfd","src/bench.rs":"a24345464fdbc70b5b877d13fa1b9da809ba4917e592d5de69f01b8b1340e8bb","src/lib.rs":"5162fc2658cce4d388453e73740eb1d74fbb64b0a5d714c8e7bc9a29671bbfa5"},"package":"4440d5cb623bb7390ae27fec0bb6c61111969860f8e3ae198bfa0663645e67cf"}

27
third_party/rust/bit-vec/Cargo.toml поставляемый
Просмотреть файл

@ -1,17 +1,30 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g. crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "bit-vec"
version = "0.4.4"
version = "0.5.0"
authors = ["Alexis Beingessner <a.beingessner@gmail.com>"]
license = "MIT/Apache-2.0"
description = "A vector of bits"
repository = "https://github.com/contain-rs/bit-vec"
homepage = "https://github.com/contain-rs/bit-vec"
documentation = "https://contain-rs.github.io/bit-vec/bit_vec"
keywords = ["data-structures", "bitvec", "bitmask", "bitmap", "bit"]
readme = "README.md"
[dev-dependencies]
rand = "0.3.15"
keywords = ["data-structures", "bitvec", "bitmask", "bitmap", "bit"]
license = "MIT/Apache-2.0"
repository = "https://github.com/contain-rs/bit-vec"
[dev-dependencies.rand]
version = "0.3.15"
[features]
default = ["std"]
nightly = []
std = []

12
third_party/rust/bit-vec/README.md поставляемый
Просмотреть файл

@ -1,3 +1,15 @@
**WARNING: THIS PROJECT IS IN MAINTENANCE MODE, DUE TO INSUFFICIENT MAINTAINER RESOURCES**
It works fine, but will generally no longer be improved.
We are currently only accepting changes which:
* keep this compiling with the latest versions of Rust or its dependencies.
* have minimal review requirements, such as documentation changes (so not totally new APIs).
------
A Vec of bits.
Documentation is available at https://contain-rs.github.io/bit-vec/bit_vec.

46
third_party/rust/bit-vec/src/lib.rs поставляемый
Просмотреть файл

@ -83,23 +83,38 @@
//! assert_eq!(num_primes, 1_229);
//! ```
#![no_std]
#![cfg_attr(not(feature="std"), feature(alloc))]
#![cfg_attr(all(test, feature = "nightly"), feature(test))]
#[cfg(all(test, feature = "nightly"))] extern crate test;
#[cfg(all(test, feature = "nightly"))] extern crate rand;
use std::cmp::Ordering;
use std::cmp;
use std::fmt;
use std::hash;
use std::iter::{Chain, Enumerate, Repeat, Skip, Take, repeat};
use std::iter::FromIterator;
use std::slice;
use std::{u8, usize};
#[cfg(any(test, feature = "std"))]
#[macro_use]
extern crate std;
#[cfg(feature="std")]
use std::vec::Vec;
#[cfg(not(feature="std"))]
#[macro_use]
extern crate alloc;
#[cfg(not(feature="std"))]
use alloc::Vec;
use core::cmp::Ordering;
use core::cmp;
use core::fmt;
use core::hash;
use core::iter::{Chain, Enumerate, Repeat, Skip, Take, repeat};
use core::iter::FromIterator;
use core::slice;
use core::{u8, usize};
type MutBlocks<'a, B> = slice::IterMut<'a, B>;
type MatchWords<'a, B> = Chain<Enumerate<Blocks<'a, B>>, Skip<Take<Enumerate<Repeat<B>>>>>;
use std::ops::*;
use core::ops::*;
/// Abstracts over a pile of bits (basically unsigned primitives)
pub trait BitBlock:
@ -154,7 +169,7 @@ bit_block_impl!{
(u16, 16),
(u32, 32),
(u64, 64),
(usize, std::mem::size_of::<usize>() * 8)
(usize, core::mem::size_of::<usize>() * 8)
}
@ -1091,6 +1106,16 @@ impl<B: BitBlock> BitVec<B> {
pub fn clear(&mut self) {
for w in &mut self.storage { *w = B::zero(); }
}
/// Shrinks the capacity of the underlying storage as much as
/// possible.
///
/// It will drop down as close as possible to the length but the
/// allocator may still inform the underlying storage that there
/// is space for a few more elements/bits.
pub fn shrink_to_fit(&mut self) {
self.storage.shrink_to_fit();
}
}
impl<B: BitBlock> Default for BitVec<B> {
@ -1308,6 +1333,7 @@ impl<'a, B: BitBlock> ExactSizeIterator for Blocks<'a, B> {}
#[cfg(test)]
mod tests {
use super::{BitVec, Iter};
use std::vec::Vec;
// This is stupid, but I want to differentiate from a "random" 32
const U32_BITS: usize = 32;

1
third_party/rust/block-buffer/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"Cargo.toml":"373908618d7bdf561f84ddc5add92f69dab295c97ab0908d3a4ec428fad23bad","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"9e0dfd2dd4173a530e238cb6adb37aa78c34c6bc7444e0e10c1ab5d8881f63ba","src/lib.rs":"bdf23c8a00fb4d51beabeb6600fe45ebf1be618632db885013b6f60a5666c124","src/paddings.rs":"7a18850dab9dca0a3e6cc49d6a94a9566ea2473628f42f726a69f8e07f95872a"},"package":"a076c298b9ecdb530ed9d967e74a6027d6a7478924520acddcddc24c1c8ab3ab"}

27
third_party/rust/block-buffer/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,27 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g. crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "block-buffer"
version = "0.3.3"
authors = ["RustCrypto Developers"]
description = "Fixed size buffer for block processing of data"
documentation = "https://docs.rs/block-buffer"
keywords = ["block", "padding", "pkcs7", "ansix923", "iso7816"]
categories = ["cryptography", "no-std"]
license = "MIT/Apache-2.0"
repository = "https://github.com/RustCrypto/utils"
[dependencies.arrayref]
version = "0.3"
[dependencies.byte-tools]
version = "0.2"

201
third_party/rust/block-buffer/LICENSE-APACHE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
third_party/rust/block-buffer/LICENSE-MIT поставляемый Normal file
Просмотреть файл

@ -0,0 +1,25 @@
Copyright (c) 2017 Artyom Pavlov
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

144
third_party/rust/block-buffer/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,144 @@
#![no_std]
#[macro_use]
extern crate arrayref;
extern crate byte_tools;
use byte_tools::{zero, write_u64_le};
mod paddings;
pub use paddings::*;
macro_rules! impl_buffer {
($name:ident, $len:expr) => {
pub struct $name {
buffer: [u8; $len],
pos: usize,
}
impl Copy for $name {}
impl Clone for $name {
fn clone(&self) -> Self {
*self
}
}
impl Default for $name {
fn default() -> Self {
$name {buffer: [0; $len], pos: 0}
}
}
impl $name {
#[inline]
pub fn input<F: FnMut(&[u8; $len])>(&mut self, mut input: &[u8], mut func: F) {
// If there is already data in the buffer, copy as much as we can
// into it and process the data if the buffer becomes full.
if self.pos != 0 {
let rem = self.remaining();
if input.len() >= rem {
let (l, r) = input.split_at(rem);
input = r;
self.buffer[self.pos..].copy_from_slice(l);
self.pos = 0;
func(&self.buffer);
} else {
let end = self.pos + input.len();
self.buffer[self.pos..end].copy_from_slice(input);
self.pos = end;
return;
}
}
// While we have at least a full buffer size chunks's worth of data,
// process that data without copying it into the buffer
while input.len() >= self.size() {
let (l, r) = input.split_at(self.size());
input = r;
func(array_ref!(l, 0, $len));
}
// Copy any input data into the buffer. At this point in the method,
// the ammount of data left in the input vector will be less than
// the buffer size and the buffer will be empty.
self.buffer[..input.len()].copy_from_slice(input);
self.pos = input.len();
}
#[inline]
fn digest_pad<F>(&mut self, up_to: usize, func: &mut F)
where F: FnMut(&[u8; $len])
{
self.buffer[self.pos] = 0x80;
self.pos += 1;
zero(&mut self.buffer[self.pos..]);
if self.remaining() < up_to {
func(&self.buffer);
zero(&mut self.buffer[..self.pos]);
}
}
#[inline]
/// Will pad message with message length in big-endian format
pub fn len_padding<F>(&mut self, data_len: u64, mut func: F)
where F: FnMut(&[u8; $len])
{
self.digest_pad(8, &mut func);
let s = self.size();
write_u64_le(&mut self.buffer[s-8..], data_len);
func(&self.buffer);
self.pos = 0;
}
#[inline]
pub fn len_padding_u128<F>(&mut self, hi: u64, lo: u64, mut func: F)
where F: FnMut(&[u8; $len])
{
self.digest_pad(16, &mut func);
let s = self.size();
write_u64_le(&mut self.buffer[s-16..s-8], hi);
write_u64_le(&mut self.buffer[s-8..], lo);
func(&self.buffer);
self.pos = 0;
}
#[inline]
pub fn pad_with<P: Padding>(&mut self) -> &mut [u8; $len] {
P::pad(&mut self.buffer[..], self.pos);
self.pos = 0;
&mut self.buffer
}
#[inline]
pub fn size(&self) -> usize {
$len
}
#[inline]
pub fn position(&self) -> usize {
self.pos
}
#[inline]
pub fn remaining(&self) -> usize {
self.size() - self.pos
}
}
}
}
impl_buffer!(BlockBuffer128, 16);
impl_buffer!(BlockBuffer256, 32);
impl_buffer!(BlockBuffer512, 64);
impl_buffer!(BlockBuffer1024, 128);
impl_buffer!(BlockBuffer576, 72);
impl_buffer!(BlockBuffer832, 104);
impl_buffer!(BlockBuffer1088, 136);
impl_buffer!(BlockBuffer1152, 144);
impl_buffer!(BlockBuffer1344, 168);

129
third_party/rust/block-buffer/src/paddings.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,129 @@
use byte_tools::{zero, set};
/// Trait for padding messages divided into blocks
pub trait Padding {
/// Pads `block` filled with data up to `pos`
fn pad(block: &mut [u8], pos: usize);
}
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
/// Error for indicating failed unpadding process
pub struct UnpadError;
/// Trait for extracting oringinal message from padded medium
pub trait Unpadding {
/// Unpad given `data` by truncating it according to the used padding.
/// In case of the malformed padding will return `UnpadError`
fn unpad(data: &[u8]) -> Result<&[u8], UnpadError>;
}
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum ZeroPadding{}
impl Padding for ZeroPadding {
#[inline]
fn pad(block: &mut [u8], pos: usize) {
zero(&mut block[pos..])
}
}
impl Unpadding for ZeroPadding {
#[inline]
fn unpad(data: &[u8]) -> Result<&[u8], UnpadError> {
let mut n = data.len() - 1;
while n != 0 {
if data[n] != 0 {
break;
}
n -= 1;
}
Ok(&data[..n+1])
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum Pkcs7{}
impl Padding for Pkcs7 {
#[inline]
fn pad(block: &mut [u8], pos: usize) {
let n = block.len() - pos;
set(&mut block[pos..], n as u8);
}
}
impl Unpadding for Pkcs7 {
#[inline]
fn unpad(data: &[u8]) -> Result<&[u8], UnpadError> {
if data.is_empty() { return Err(UnpadError); }
let l = data.len();
let n = data[l-1];
if n == 0 {
return Err(UnpadError)
}
for v in &data[l-n as usize..l-1] {
if *v != n { return Err(UnpadError); }
}
Ok(&data[..l-n as usize])
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum AnsiX923{}
impl Padding for AnsiX923 {
#[inline]
fn pad(block: &mut [u8], pos: usize) {
let n = block.len() - 1;
zero(&mut block[pos..n]);
block[n] = (n - pos) as u8;
}
}
impl Unpadding for AnsiX923 {
#[inline]
fn unpad(data: &[u8]) -> Result<&[u8], UnpadError> {
if data.is_empty() { return Err(UnpadError); }
let l = data.len();
let n = data[l-1] as usize;
if n == 0 {
return Err(UnpadError)
}
for v in &data[l-n..l-1] {
if *v != 0 { return Err(UnpadError); }
}
Ok(&data[..l-n])
}
}
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub enum Iso7816{}
impl Padding for Iso7816 {
#[inline]
fn pad(block: &mut [u8], pos: usize) {
let n = block.len() - pos;
block[pos] = 0x80;
for b in block[pos+1..].iter_mut() {
*b = n as u8;
}
}
}
impl Unpadding for Iso7816 {
fn unpad(data: &[u8]) -> Result<&[u8], UnpadError> {
if data.is_empty() { return Err(UnpadError); }
let mut n = data.len() - 1;
while n != 0 {
if data[n] != 0 {
break;
}
n -= 1;
}
if data[n] != 0x80 { return Err(UnpadError); }
Ok(&data[..n])
}
}

1
third_party/rust/byte-tools/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"Cargo.toml":"af6af6ea1dfa296af5dc58986d1afb46952328588069ec0b08723db439e9972d","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"52232c2cee3bb7d8cabe47ef367f1bf8bb607c22bdfca0219d6156cb7f446e9d","src/lib.rs":"9c96cffef7458fc7bd9e4e61270b69d539ff3a9225a0319b7996155c25ff96ab","src/read_single.rs":"3ab78b15754c2a7848a1be871ff6ee2a31a099f8f4f89be44ad210cda0dbcc9a","src/read_slice.rs":"b3790f2fd080db97e239c05c63da123ea375fb9b354dc9cacb859ed9c44f552e","src/write_single.rs":"1cee4f2f5d8690e47840ea7017539ead417a26abc0717137442a6d9d2875afe4","src/write_slice.rs":"de90e6b9cfca67125871bee7cef55c63574b1871a6584e51fc00a97e5877fe69"},"package":"560c32574a12a89ecd91f5e742165893f86e3ab98d21f8ea548658eb9eef5f40"}

Просмотреть файл

@ -11,9 +11,11 @@
# will likely look very different (and much more reasonable)
[package]
name = "lalrpop-intern"
version = "0.15.1"
authors = ["Niko Matsakis <niko@alum.mit.edu>"]
description = "Simple string interner used by LALRPOP"
license = "Apache-2.0/MIT"
repository = "https://github.com/lalrpop/lalrpop"
name = "byte-tools"
version = "0.2.0"
authors = ["The Rust-Crypto Project Developers"]
description = "Utility functions for working with bytes"
documentation = "https://docs.rs/byte-tools"
keywords = ["bytes"]
license = "MIT/Apache-2.0"
repository = "https://github.com/RustCrypto/utils"

201
third_party/rust/byte-tools/LICENSE-APACHE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

26
third_party/rust/byte-tools/LICENSE-MIT поставляемый Normal file
Просмотреть файл

@ -0,0 +1,26 @@
Copyright (c) 2006-2009 Graydon Hoare
Copyright (c) 2009-2013 Mozilla Foundation
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

37
third_party/rust/byte-tools/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,37 @@
#![no_std]
use core::ptr;
mod read_single;
mod write_single;
mod read_slice;
mod write_slice;
pub use read_single::*;
pub use write_single::*;
pub use read_slice::*;
pub use write_slice::*;
/// Copy bytes from src to dest
#[inline]
pub fn copy_memory(src: &[u8], dst: &mut [u8]) {
assert!(dst.len() >= src.len());
unsafe {
let srcp = src.as_ptr();
let dstp = dst.as_mut_ptr();
ptr::copy_nonoverlapping(srcp, dstp, src.len());
}
}
/// Zero all bytes in dst
#[inline]
pub fn zero(dst: &mut [u8]) {
set(dst, 0);
}
/// Sets all bytes in `dst` equal to `value`
#[inline]
pub fn set(dst: &mut [u8], value: u8) {
unsafe {
ptr::write_bytes(dst.as_mut_ptr(), value, dst.len());
}
}

38
third_party/rust/byte-tools/src/read_single.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,38 @@
use core::{mem, ptr};
macro_rules! read_single {
($src:expr, $size:expr, $ty:ty, $which:ident) => ({
assert!($size == mem::size_of::<$ty>());
assert!($size == $src.len());
unsafe {
let mut tmp: $ty = mem::uninitialized();
let p = &mut tmp as *mut _ as *mut u8;
ptr::copy_nonoverlapping($src.as_ptr(), p, $size);
tmp.$which()
}
});
}
/// Read the value of a vector of bytes as a u32 value in little-endian format.
#[inline]
pub fn read_u32_le(src: &[u8]) -> u32 {
read_single!(src, 4, u32, to_le)
}
/// Read the value of a vector of bytes as a u32 value in big-endian format.
#[inline]
pub fn read_u32_be(src: &[u8]) -> u32 {
read_single!(src, 4, u32, to_be)
}
/// Read the value of a vector of bytes as a u64 value in little-endian format.
#[inline]
pub fn read_u64_le(src: &[u8]) -> u64 {
read_single!(src, 8, u64, to_le)
}
/// Read the value of a vector of bytes as a u64 value in big-endian format.
#[inline]
pub fn read_u64_be(src: &[u8]) -> u64 {
read_single!(src, 8, u64, to_be)
}

44
third_party/rust/byte-tools/src/read_slice.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,44 @@
use core::ptr;
macro_rules! read_slice {
($src:expr, $dst:expr, $size:expr, $which:ident) => ({
assert_eq!($size*$dst.len(), $src.len());
unsafe {
ptr::copy_nonoverlapping(
$src.as_ptr(),
$dst.as_mut_ptr() as *mut u8,
$src.len());
}
for v in $dst.iter_mut() {
*v = v.$which();
}
});
}
/// Read a vector of bytes into a vector of u32s. The values are read in
/// little-endian format.
#[inline]
pub fn read_u32v_le(dst: &mut [u32], src: &[u8]) {
read_slice!(src, dst, 4, to_le);
}
/// Read a vector of bytes into a vector of u32s. The values are read in
/// big-endian format.
#[inline]
pub fn read_u32v_be(dst: &mut [u32], src: &[u8]) {
read_slice!(src, dst, 4, to_be);
}
/// Read a vector of bytes into a vector of u64s. The values are read in
/// little-endian format.
#[inline]
pub fn read_u64v_le(dst: &mut [u64], src: &[u8]) {
read_slice!(src, dst, 8, to_le);
}
/// Read a vector of bytes into a vector of u64s. The values are read in
/// big-endian format.
#[inline]
pub fn read_u64v_be(dst: &mut [u64], src: &[u8]) {
read_slice!(src, dst, 8, to_be);
}

39
third_party/rust/byte-tools/src/write_single.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,39 @@
use core::{mem, ptr};
macro_rules! write_single {
($dst:expr, $n:expr, $size:expr, $which:ident) => ({
assert!($size == $dst.len());
unsafe {
let bytes = mem::transmute::<_, [u8; $size]>($n.$which());
ptr::copy_nonoverlapping((&bytes).as_ptr(), $dst.as_mut_ptr(), $size);
}
});
}
/// Write a u32 into a vector, which must be 4 bytes long. The value is written
/// in little-endian format.
#[inline]
pub fn write_u32_le(dst: &mut [u8], n: u32) {
write_single!(dst, n, 4, to_le);
}
/// Write a u32 into a vector, which must be 4 bytes long. The value is written
/// in big-endian format.
#[inline]
pub fn write_u32_be(dst: &mut [u8], n: u32) {
write_single!(dst, n, 4, to_be);
}
/// Write a u64 into a vector, which must be 8 bytes long. The value is written
/// in little-endian format.
#[inline]
pub fn write_u64_le(dst: &mut [u8], n: u64) {
write_single!(dst, n, 8, to_le);
}
/// Write a u64 into a vector, which must be 8 bytes long. The value is written
/// in big-endian format.
#[inline]
pub fn write_u64_be(dst: &mut [u8], n: u64) {
write_single!(dst, n, 8, to_be);
}

46
third_party/rust/byte-tools/src/write_slice.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,46 @@
use core::{ptr, mem};
macro_rules! write_slice {
($src:expr, $dst:expr, $ty:ty, $size:expr, $which:ident) => ({
assert!($size == mem::size_of::<$ty>());
assert_eq!($dst.len(), $size*$src.len());
unsafe {
ptr::copy_nonoverlapping(
$src.as_ptr() as *const u8,
$dst.as_mut_ptr(),
$dst.len());
let tmp: &mut [$ty] = mem::transmute($dst);
for v in tmp[..$src.len()].iter_mut() {
*v = v.$which();
}
}
});
}
/// Write a vector of u32s into a vector of bytes. The values are written in
/// little-endian format.
#[inline]
pub fn write_u32v_le(dst: &mut [u8], src: &[u32]) {
write_slice!(src, dst, u32, 4, to_le);
}
/// Write a vector of u32s into a vector of bytes. The values are written in
/// big-endian format.
#[inline]
pub fn write_u32v_be(dst: &mut [u8], src: &[u32]) {
write_slice!(src, dst, u32, 4, to_be);
}
/// Write a vector of u64s into a vector of bytes. The values are written in
/// little-endian format.
#[inline]
pub fn write_u64v_le(dst: &mut [u8], src: &[u64]) {
write_slice!(src, dst, u64, 8, to_le);
}
/// Write a vector of u64s into a vector of bytes. The values are written in
/// little-endian format.
#[inline]
pub fn write_u64v_be(dst: &mut [u8], src: &[u64]) {
write_slice!(src, dst, u64, 8, to_be);
}

1
third_party/rust/digest/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{".cargo_vcs_info.json":"5c4d89b9b833bb5681c04817ef4e799012a6252ba90021c6482010c8871b87a6","Cargo.toml":"b3667b1e1a3985dd2c9e7873f6945c2d7163ed7da95569f40c2097285a325ec4","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"9e0dfd2dd4173a530e238cb6adb37aa78c34c6bc7444e0e10c1ab5d8881f63ba","src/dev.rs":"c824f834fa8b8c729024e4ec61138e89c26a56bfb6b50295600dddb5ff8fff62","src/digest.rs":"6710ac33c80e6159a2396839794fc76a61b94ab573516a69486457b3e291c793","src/errors.rs":"cff5bf2350bc109ad4f08caacf6780ff1e7016d9995f0847e84e96a8e31ab9d5","src/lib.rs":"bf4e93ebd066513001f3d6d77024ae8addf4df4fd89f76549fd1b73df386f3e4"},"package":"03b072242a8cbaf9c145665af9d250c59af3b958f83ed6824e13533cf76d5b90"}

5
third_party/rust/digest/.cargo_vcs_info.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,5 @@
{
"git": {
"sha1": "c02ab3d77605b540fd5dc2ea1a45c184f7d9e7d8"
}
}

32
third_party/rust/digest/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,32 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g. crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "digest"
version = "0.7.6"
authors = ["RustCrypto Developers"]
description = "Traits for cryptographic hash functions"
documentation = "https://docs.rs/digest"
keywords = ["digest", "crypto", "hash"]
categories = ["cryptography", "no-std"]
license = "MIT/Apache-2.0"
repository = "https://github.com/RustCrypto/traits"
[package.metadata.docs.rs]
features = ["std"]
[dependencies.generic-array]
version = "0.9"
[features]
dev = []
std = []
[badges.travis-ci]
repository = "RustCrypto/traits"

201
third_party/rust/digest/LICENSE-APACHE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
third_party/rust/digest/LICENSE-MIT поставляемый Normal file
Просмотреть файл

@ -0,0 +1,25 @@
Copyright (c) 2017 Artyom Pavlov
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

171
third_party/rust/digest/src/dev.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,171 @@
use super::{Digest, Input, VariableOutput, ExtendableOutput, XofReader};
use core::fmt::Debug;
pub struct Test {
pub name: &'static str,
pub input: &'static [u8],
pub output: &'static [u8],
}
#[macro_export]
macro_rules! new_tests {
( $( $name:expr ),* ) => {
[$(
Test {
name: $name,
input: include_bytes!(concat!("data/", $name, ".input.bin")),
output: include_bytes!(concat!("data/", $name, ".output.bin")),
},
)*]
};
( $( $name:expr ),+, ) => (new_tests!($($name),+))
}
pub fn main_test<D: Digest + Debug + Clone>(tests: &[Test]) {
// Test that it works when accepting the message all at once
for t in tests.iter() {
let mut sh = D::default();
sh.input(t.input);
let out = sh.result();
assert_eq!(out[..], t.output[..]);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
let mut sh = D::default();
let len = t.input.len();
let mut left = len;
while left > 0 {
let take = (left + 1) / 2;
sh.input(&t.input[len - left..take + len - left]);
left = left - take;
}
let out = sh.result();
assert_eq!(out[..], t.output[..]);
}
}
pub fn variable_test<D>(tests: &[Test])
where D: Input + VariableOutput + Clone + Debug
{
let mut buf = [0u8; 1024];
// Test that it works when accepting the message all at once
for t in tests.iter() {
let mut sh = D::new(t.output.len()).unwrap();
sh.process(t.input);
let out = sh.variable_result(&mut buf[..t.output.len()]).unwrap();
assert_eq!(out[..], t.output[..]);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
let mut sh = D::new(t.output.len()).unwrap();
let len = t.input.len();
let mut left = len;
while left > 0 {
let take = (left + 1) / 2;
sh.process(&t.input[len - left..take + len - left]);
left = left - take;
}
let out = sh.variable_result(&mut buf[..t.output.len()]).unwrap();
assert_eq!(out[..], t.output[..]);
}
}
pub fn xof_test<D>(tests: &[Test])
where D: Input + ExtendableOutput + Default + Debug + Clone
{
let mut buf = [0u8; 1024];
// Test that it works when accepting the message all at once
for t in tests.iter() {
let mut sh = D::default();
sh.process(t.input);
let out = &mut buf[..t.output.len()];
sh.xof_result().read(out);
assert_eq!(out[..], t.output[..]);
}
// Test that it works when accepting the message in pieces
for t in tests.iter() {
let mut sh = D::default();
let len = t.input.len();
let mut left = len;
while left > 0 {
let take = (left + 1) / 2;
sh.process(&t.input[len - left..take + len - left]);
left = left - take;
}
let out = &mut buf[..t.output.len()];
sh.xof_result().read(out);
assert_eq!(out[..], t.output[..]);
}
// Test reeading from reader byte by byte
for t in tests.iter() {
let mut sh = D::default();
sh.process(t.input);
let mut reader = sh.xof_result();
let out = &mut buf[..t.output.len()];
for chunk in out.chunks_mut(1) {
reader.read(chunk);
}
assert_eq!(out[..], t.output[..]);
}
}
pub fn one_million_a<D: Digest + Default + Debug + Clone>(expected: &[u8]) {
let mut sh = D::default();
for _ in 0..50000 {
sh.input(&[b'a'; 10]);
}
sh.input(&[b'a'; 500000]);
let out = sh.result();
assert_eq!(out[..], expected[..]);
}
#[macro_export]
macro_rules! bench_digest {
($name:ident, $engine:path, $bs:expr) => {
#[bench]
fn $name(b: &mut Bencher) {
let mut d = <$engine>::default();
let data = [0; $bs];
b.iter(|| {
d.input(&data);
});
b.bytes = $bs;
}
};
($engine:path) => {
extern crate test;
use test::Bencher;
use digest::Digest;
bench_digest!(bench1_16, $engine, 1<<4);
bench_digest!(bench2_64, $engine, 1<<6);
bench_digest!(bench3_256, $engine, 1<<8);
bench_digest!(bench4_1k, $engine, 1<<10);
bench_digest!(bench5_4k, $engine, 1<<12);
bench_digest!(bench6_16k, $engine, 1<<14);
}
}

86
third_party/rust/digest/src/digest.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,86 @@
use super::{Input, BlockInput, FixedOutput};
use generic_array::GenericArray;
#[cfg(feature = "std")]
use std::io;
type Output<N> = GenericArray<u8, N>;
/// The `Digest` trait specifies an interface common for digest functions.
///
/// It's a convinience wrapper around `Input`, `FixedOutput`, `BlockInput` and
/// `Default` traits. It also provides additional convenience methods.
pub trait Digest: Input + BlockInput + FixedOutput + Default {
/// Create new hasher instance
fn new() -> Self {
Self::default()
}
/// Digest input data. This method can be called repeatedly
/// for use with streaming messages.
fn input(&mut self, input: &[u8]) {
self.process(input);
}
/// Retrieve the digest result. This method consumes digest instance.
fn result(self) -> Output<Self::OutputSize> {
self.fixed_result()
}
/// Convenience function to compute hash of the `data`. It will handle
/// hasher creation, data feeding and finalization.
///
/// Example:
///
/// ```rust,ignore
/// println!("{:x}", sha2::Sha256::digest(b"Hello world"));
/// ```
#[inline]
fn digest(data: &[u8]) -> Output<Self::OutputSize> {
let mut hasher = Self::default();
hasher.process(data);
hasher.fixed_result()
}
/// Convenience function to compute hash of the string. It's equivalent to
/// `digest(input_string.as_bytes())`.
#[inline]
fn digest_str(str: &str) -> Output<Self::OutputSize> {
Self::digest(str.as_bytes())
}
/// Convenience function which takes `std::io::Read` as a source and computes
/// value of digest function `D`, e.g. SHA-2, SHA-3, BLAKE2, etc. using 1 KB
/// blocks.
///
/// Usage example:
///
/// ```rust,ignore
/// use std::fs;
/// use sha2::{Sha256, Digest};
///
/// let mut file = fs::File::open("Cargo.toml")?;
/// let result = Sha256::digest_reader(&mut file)?;
/// println!("{:x}", result);
/// ```
#[cfg(feature = "std")]
#[inline]
fn digest_reader(source: &mut io::Read)
-> io::Result<Output<Self::OutputSize>>
{
let mut hasher = Self::default();
let mut buf = [0u8; 8 * 1024];
loop {
let len = match source.read(&mut buf) {
Ok(0) => return Ok(hasher.result()),
Ok(len) => len,
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => continue,
Err(e) => Err(e)?,
};
hasher.process(&buf[..len]);
}
}
}
impl<D: Input + FixedOutput + BlockInput + Default> Digest for D {}

37
third_party/rust/digest/src/errors.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,37 @@
use core::fmt;
#[cfg(feature = "std")]
use std::error;
/// The error type for variable hasher initialization
#[derive(Clone, Copy, Debug, Default)]
pub struct InvalidOutputSize;
/// The error type for variable hasher result
#[derive(Clone, Copy, Debug, Default)]
pub struct InvalidBufferLength;
impl fmt::Display for InvalidOutputSize {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("invalid output size")
}
}
impl fmt::Display for InvalidBufferLength {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("invalid buffer length")
}
}
#[cfg(feature = "std")]
impl error::Error for InvalidOutputSize {
fn description(&self) -> &str {
"invalid output size"
}
}
#[cfg(feature = "std")]
impl error::Error for InvalidBufferLength {
fn description(&self) -> &str {
"invalid buffer size"
}
}

98
third_party/rust/digest/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,98 @@
//! This crate provides traits for describing funcionality of cryptographic hash
//! functions.
//!
//! By default std functionality in this crate disabled. (e.g. method for
//! hashing `Read`ers) To enable it turn on `std` feature in your `Cargo.toml`
//! for this crate.
#![cfg_attr(not(feature = "std"), no_std)]
pub extern crate generic_array;
#[cfg(feature = "std")]
use std as core;
use generic_array::{GenericArray, ArrayLength};
mod digest;
mod errors;
#[cfg(feature = "dev")]
pub mod dev;
pub use errors::{InvalidOutputSize, InvalidBufferLength};
pub use digest::Digest;
// `process` is choosen to not overlap with `input` method in the digest trait
// change it on trait alias stabilization
/// Trait for processing input data
pub trait Input {
/// Digest input data. This method can be called repeatedly
/// for use with streaming messages.
fn process(&mut self, input: &[u8]);
}
/// Trait to indicate that digest function processes data in blocks of size
/// `BlockSize`. Main usage of this trait is for implementing HMAC generically.
pub trait BlockInput {
type BlockSize: ArrayLength<u8>;
}
/// Trait for returning digest result with the fixed size
pub trait FixedOutput {
type OutputSize: ArrayLength<u8>;
/// Retrieve the digest result. This method consumes digest instance.
fn fixed_result(self) -> GenericArray<u8, Self::OutputSize>;
}
/// The error type for variable digest output
#[derive(Clone, Copy, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct InvalidLength;
/// Trait for returning digest result with the varaible size
pub trait VariableOutput: core::marker::Sized {
/// Create new hasher instance with given output size. Will return
/// `Err(InvalidLength)` in case if hasher can not work with the given
/// output size. Will always return an error if output size equals to zero.
fn new(output_size: usize) -> Result<Self, InvalidLength>;
/// Get output size of the hasher instance provided to the `new` method
fn output_size(&self) -> usize;
/// Retrieve the digest result into provided buffer. Length of the buffer
/// must be equal to output size provided to the `new` method, otherwise
/// `Err(InvalidLength)` will be returned
fn variable_result(self, buffer: &mut [u8]) -> Result<&[u8], InvalidLength>;
}
/// Trait for decribing readers which are used to extract extendable output
/// from the resulting state of hash function.
pub trait XofReader {
/// Read output into the `buffer`. Can be called unlimited number of times.
fn read(&mut self, buffer: &mut [u8]);
}
/// Trait which describes extendable output (XOF) of hash functions. Using this
/// trait you first need to get structure which implements `XofReader`, using
/// which you can read extendable output.
pub trait ExtendableOutput {
type Reader: XofReader;
/// Finalize hash function and return XOF reader
fn xof_result(self) -> Self::Reader;
}
/// Macro for defining opaque `Debug` implementation. It will use the following
/// format: "HasherName { ... }". While it's convinient to have it
/// (e.g. for including in other structs), it could be undesirable to leak
/// internall state, which can happen for example through uncareful logging.
#[macro_export]
macro_rules! impl_opaque_debug {
($state:ty) => {
impl ::core::fmt::Debug for $state {
fn fmt(&self, f: &mut ::core::fmt::Formatter)
-> Result<(), ::core::fmt::Error>
{
write!(f, concat!(stringify!($state), " {{ ... }}"))
}
}
}
}

2
third_party/rust/ena/.cargo-checksum.json поставляемый
Просмотреть файл

@ -1 +1 @@
{"files":{".travis.yml":"f8e54ea908a294d46381a1bd608da3fcc7fb0a87cb15f546b93b74ee9c97bb2b","Cargo.toml":"63ff1e6e9d93ec6a81fb28f199ccbf9299e177152cd751f568623717e85ed83a","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"11d2194be1dc7460ee631a32884516f78d5d95dc6e5efa9115767a8f55f55a06","measurements.txt":"46606bc04662362369479bce5c31b109984c1a3446d7f0566556257af91b86e2","src/bitvec.rs":"c6c66c348776ff480b7ff6e4a3e0f64554a4194266f614408b45b5e3c324ec0a","src/cc/mod.rs":"fc486ba406d5761b1bd63621c37981c2b43966d269f8a596595fca36f8b395a4","src/cc/test.rs":"b6805fd4f22b3a3214c9759a674647e8b1dc83118f81c83955949a7414298423","src/constraint/mod.rs":"7df86d708ba692edd5bdb26b1da20720ee5bf51f741985c8193eb54db9365b4b","src/constraint/test.rs":"6666ec1411a61462777c88e7edf73f4bf7c04d4021007cf3340fd7ee22cece95","src/debug.rs":"0c24b9d2302c66e8f3e615c2a6689d88bc1eeac8844ae1f239fd3244c7f2ce6f","src/graph/mod.rs":"3a98ddddb4650744d5462ee442405551272e6c0ff820fd85c26dfec133974671","src/graph/tests.rs":"e2afc7912203e158d37d1f951cb76e6f67eb63890573649b3b2e9ea3afe5ba01","src/lib.rs":"d4584bb7efa3269a328d1ef373fef02e177efb8874f81556a124a58ea18fad87","src/snapshot_vec.rs":"0654cf102f05e98694b74076d5b2fcb7d52cfcbd1771853db22784ea7ad50cb1","src/unify/mod.rs":"0f8a78332c43d6776c2afa93aef174d5d10fb83a5046f0b7081262b754a31da3","src/unify/tests.rs":"9dfc23f77c6fc0565d90b0f74eceeadf666cd9c728aac388b33f138fbc30b50c"},"package":"cabe5a5078ac8c506d3e4430763b1ba9b609b1286913e7d08e581d1c2de9b7e5"}
{"files":{".travis.yml":"8effd1577dc503149f0f829c9291d844ec44d155fd253aa6b644c4ccc25e8bc8","Cargo.toml":"13e445b6bc53bf1ea2379fd2ec33205daa9b1b74d5a41e4dd9ea8cb966185c5a","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"4b02d7ebfb188b1f2cbef20ade3082197046ccaa89e49d2bcdef6102d48919e3","measurements.txt":"b209f98f2bc696904a48829e86952f4f09b59e4e685f7c12087c59d05ed31829","src/bitvec.rs":"c6c66c348776ff480b7ff6e4a3e0f64554a4194266f614408b45b5e3c324ec0a","src/lib.rs":"294aabf6fb846dbe35bba837d70ea9115f20cd808995a318c0fccb05f91d096f","src/snapshot_vec.rs":"abc649bb42dc8592741b02d53ba1ed5f6ad64710b971070872b0c42665d73c93","src/unify/backing_vec.rs":"7d57036ce671169893d069f94454f1c4b95104517ffd62859f180d80cbe490e5","src/unify/mod.rs":"9fc90951778be635fbbf4fba8b3a0a4eb21e2c955660f019377465ac773b9563","src/unify/tests.rs":"b18974faeebdf2c03e82035fe7281bf4db3360ab10ce34b1d3441547836b19f2"},"package":"88dc8393b3c7352f94092497f6b52019643e493b6b890eb417cdb7c46117e621"}

6
third_party/rust/ena/.travis.yml поставляемый
Просмотреть файл

@ -1,5 +1,9 @@
language: rust
rust:
- stable
- nightly
script:
- cargo test
- cargo test
- |
[ $TRAVIS_RUST_VERSION != nightly ] ||
cargo test --all-features

39
third_party/rust/ena/Cargo.toml поставляемый
Просмотреть файл

@ -1,8 +1,37 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g. crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "ena"
description = "Union-find, congruence closure, and other unification code. Based on code from rustc."
license = "MIT/Apache-2.0"
homepage = "https://github.com/nikomatsakis/ena"
repository = "https://github.com/nikomatsakis/ena"
version = "0.5.0"
version = "0.9.3"
authors = ["Niko Matsakis <niko@alum.mit.edu>"]
description = "Union-find, congruence closure, and other unification code. Based on code from rustc."
homepage = "https://github.com/nikomatsakis/ena"
readme = "README.md"
keywords = ["unification", "union-find"]
license = "MIT/Apache-2.0"
repository = "https://github.com/nikomatsakis/ena"
[dependencies.dogged]
version = "0.2.0"
optional = true
[dependencies.log]
version = "0.4"
[dependencies.petgraph]
version = "0.4.5"
optional = true
[features]
bench = []
congruence-closure = ["petgraph"]
persistent = ["dogged"]

16
third_party/rust/ena/README.md поставляемый
Просмотреть файл

@ -1,15 +1,19 @@
[![Build Status](https://travis-ci.org/nikomatsakis/rayon.svg?branch=master)](https://travis-ci.org/nikomatsakis/ena)
[![Build Status](https://travis-ci.org/nikomatsakis/ena.svg?branch=master)](https://travis-ci.org/nikomatsakis/ena)
An implementation of union-find / congruence-closure in Rust. Forked
from rustc for independent experimentation. My intention is to iterate
and improve this code and gradually bring back changes into rustc
itself, but also to enable other crates.io packages to use the same
code.
An implementation of union-find in Rust; extracted from (and used by)
rustc.
### Name
The name "ena" comes from the Greek word for "one".
### Features
By default, you just get the union-find implementation. You can also
opt-in to the following experimental features:
- `bench`: use to run benchmarks (`cargo bench --features bench`)
### License
Like rustc itself, this code is dual-licensed under the MIT and Apache

23
third_party/rust/ena/measurements.txt поставляемый
Просмотреть файл

@ -1,21 +1,6 @@
base
test unify::test::big_array_bench ... bench: 1,416,793 ns/iter (+/- 216,475)
test unify::tests::big_array_bench ... bench: 740,192 ns/iter (+/- 35,823)
test unify::tests::big_array_bench ... bench: 745,031 ns/iter (+/- 240,463)
test unify::tests::big_array_bench ... bench: 762,031 ns/iter (+/- 240,463)
test unify::tests::big_array_bench ... bench: 756,234 ns/iter (+/- 264,710)
assert -> debug_assert
test unify::test::big_array_bench ... bench: 1,420,368 ns/iter (+/- 144,433)
test unify::test::big_array_bench ... bench: 1,414,448 ns/iter (+/- 219,137)
don't copy for redirects
test unify::test::big_array_bench ... bench: 1,349,796 ns/iter (+/- 233,931)
test unify::test::big_array_bench ... bench: 1,367,082 ns/iter (+/- 301,644)
test unify::test::big_array_bench ... bench: 1,358,154 ns/iter (+/- 348,796)
copy less
test unify::test::big_array_bench ... bench: 744,775 ns/iter (+/- 51,865)
test unify::test::big_array_bench ... bench: 750,939 ns/iter (+/- 146,417)
test unify::test::big_array_bench ... bench: 754,104 ns/iter (+/- 121,968)
s/set-value/update-value/
test unify::test::big_array_bench ... bench: 731,531 ns/iter (+/- 125,685)
test unify::test::big_array_bench ... bench: 725,162 ns/iter (+/- 99,013)
test unify::test::big_array_bench ... bench: 735,473 ns/iter (+/- 121,156)

436
third_party/rust/ena/src/cc/mod.rs поставляемый
Просмотреть файл

@ -1,436 +0,0 @@
//! An implementation of the Congruence Closure algorithm based on the
//! paper "Fast Decision Procedures Based on Congruence Closure" by Nelson
//! and Oppen, JACM 1980.
use graph::{self, Graph, NodeIndex};
use std::collections::HashMap;
use std::fmt::Debug;
use std::hash::Hash;
use std::iter;
use unify::{UnifyKey, UnifyValue, InfallibleUnifyValue, UnificationTable, UnionedKeys};
#[cfg(test)]
mod test;
pub struct CongruenceClosure<K: Key> {
map: HashMap<K, Token>,
table: UnificationTable<Token>,
graph: Graph<K, ()>,
}
pub trait Key: Hash + Eq + Clone + Debug {
// If this Key has some efficient way of converting itself into a
// congruence closure `Token`, then it shold return `Some(token)`.
// Otherwise, return `None`, in which case the CC will internally
// map the key to a token. Typically, this is used by layers that
// wrap the CC, where inference variables are mapped directly to
// particular tokens.
fn to_token(&self) -> Option<Token> {
None
}
fn key_kind(&self) -> KeyKind;
fn shallow_eq(&self, key: &Self) -> bool;
fn successors(&self) -> Vec<Self>;
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum KeyKind {
Applicative,
Generative,
}
use self::KeyKind::*;
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Token {
// this is the index both for the graph and the unification table,
// since for every node there is also a slot in the unification
// table
index: u32,
}
impl Token {
fn new(index: u32) -> Token {
Token { index: index }
}
fn from_node(node: NodeIndex) -> Token {
Token { index: node.0 as u32 }
}
fn node(&self) -> NodeIndex {
NodeIndex(self.index as usize)
}
}
impl UnifyKey for Token {
type Value = KeyKind;
fn index(&self) -> u32 {
self.index
}
fn from_index(i: u32) -> Token {
Token::new(i)
}
fn tag() -> &'static str {
"CongruenceClosure"
}
fn order_roots(a: Self,
&a_value: &KeyKind,
b: Self,
&b_value: &KeyKind)
-> Option<(Self, Self)> {
if a_value == b_value {
None
} else if a_value == Generative {
Some((a, b))
} else {
debug_assert!(b_value == Generative);
Some((b, a))
}
}
}
impl UnifyValue for KeyKind {
fn unify_values(&kind1: &Self, &kind2: &Self) -> Result<Self, (Self, Self)> {
match (kind1, kind2) {
(Generative, _) => Ok(Generative),
(_, Generative) => Ok(Generative),
(Applicative, Applicative) => Ok(Applicative),
}
}
}
impl InfallibleUnifyValue for KeyKind {}
impl<K: Key> CongruenceClosure<K> {
pub fn new() -> CongruenceClosure<K> {
CongruenceClosure {
map: HashMap::new(),
table: UnificationTable::new(),
graph: Graph::new(),
}
}
/// Manually create a new CC token. You don't normally need to do
/// this, as CC tokens are automatically created for each key when
/// we first observe it. However, if you wish to have keys that
/// make use of the `to_token` method to bypass the `key -> token`
/// map, then you can use this function to make a new-token. The
/// callback `key_op` will be invoked to create the key for the
/// fresh token (typically, it will wrap the token in some kind of
/// enum indicating an inference variable).
///
/// **WARNING:** The new key **must** be a leaf (no successor
/// keys) or else things will not work right. This invariant is
/// not currently checked.
pub fn new_token<OP>(&mut self, key_kind: KeyKind, key_op: OP) -> Token
where OP: FnOnce(Token) -> K
{
let token = self.table.new_key(key_kind);
let key = key_op(token);
let node = self.graph.add_node(key);
assert_eq!(token.node(), node);
token
}
/// Return the key for a given token
pub fn key(&self, token: Token) -> &K {
self.graph.node_data(token.node())
}
/// Indicates they `key1` and `key2` are equivalent.
pub fn merge(&mut self, key1: K, key2: K) {
let token1 = self.add(key1);
let token2 = self.add(key2);
self.algorithm().merge(token1, token2);
}
/// Indicates whether `key1` and `key2` are equivalent.
pub fn merged(&mut self, key1: K, key2: K) -> bool {
// Careful: you cannot naively remove the `add` calls
// here. The reason is because of patterns like the test
// `struct_union_no_add`. If we unify X and Y, and then unify
// F(X) and F(Z), we need to be sure to figure out that F(Y)
// == F(Z). This requires a non-trivial deduction step, so
// just checking if the arguments are congruent will fail,
// because `Y == Z` does not hold.
debug!("merged: called({:?}, {:?})", key1, key2);
let token1 = self.add(key1);
let token2 = self.add(key2);
self.algorithm().unioned(token1, token2)
}
/// Returns an iterator over all keys that are known to have been
/// merged with `key`. This is a bit dubious, since the set of
/// merged keys will be dependent on what has been added, and is
/// not the full set of equivalencies that one might imagine. See the
/// test `merged_keys` for an example.
pub fn merged_keys(&mut self, key: K) -> MergedKeys<K> {
let token = self.add(key);
MergedKeys {
graph: &self.graph,
iterator: self.table.unioned_keys(token),
}
}
/// Add a key into the CC table, returning the corresponding
/// token. This is not part of the public API, though it could be
/// if we wanted.
fn add(&mut self, key: K) -> Token {
debug!("add(): key={:?}", key);
let (is_new, token) = self.get_or_add(&key);
debug!("add: key={:?} is_new={:?} token={:?}", key, is_new, token);
// if this node is already in the graph, we are done
if !is_new {
return token;
}
// Otherwise, we want to add the 'successors' also. So, for
// example, if we are adding `Box<Foo>`, the successor would
// be `Foo`. So go ahead and recursively add `Foo` if it
// doesn't already exist.
let successors: Vec<_> = key.successors()
.into_iter()
.map(|s| self.add(s))
.collect();
debug!("add: key={:?} successors={:?}", key, successors);
// Now we have to be a bit careful. It might be that we are
// adding `Box<Foo>`, but `Foo` was already present, and in
// fact equated with `Bar`. That is, maybe we had a graph like:
//
// Box<Bar> -> Bar == Foo
//
// Now we just added `Box<Foo>`, but we need to equate
// `Box<Foo>` and `Box<Bar>`.
for successor in successors {
// get set of predecessors for each successor BEFORE we add the new node;
// this would be `Box<Bar>` in the above example.
let predecessors: Vec<_> = self.algorithm().all_preds(successor);
debug!("add: key={:?} successor={:?} predecessors={:?}",
key,
successor,
predecessors);
// add edge from new node `Box<Foo>` to its successor `Foo`
self.graph.add_edge(token.node(), successor.node(), ());
// Now we have to consider merging the old predecessors,
// like `Box<Bar>`, with this new node `Box<Foo>`.
//
// Note that in other cases it might be that no merge will
// occur. For example, if we were adding `(A1, B1)` to a
// graph like this:
//
// (A, B) -> A == A1
// |
// v
// B
//
// In this case, the predecessor would be `(A, B)`; but we don't
// know that `B == B1`, so we can't merge that with `(A1, B1)`.
for predecessor in predecessors {
self.algorithm().maybe_merge(token, predecessor);
}
}
token
}
/// Gets the token for a key, if any.
fn get(&self, key: &K) -> Option<Token> {
key.to_token()
.or_else(|| self.map.get(key).cloned())
}
/// Gets the token for a key, adding one if none exists. Returns the token
/// and a boolean indicating whether it had to be added.
fn get_or_add(&mut self, key: &K) -> (bool, Token) {
if let Some(token) = self.get(key) {
return (false, token);
}
let token = self.new_token(key.key_kind(), |_| key.clone());
self.map.insert(key.clone(), token);
(true, token)
}
fn algorithm(&mut self) -> Algorithm<K> {
Algorithm {
graph: &self.graph,
table: &mut self.table,
}
}
}
// # Walking merged keys
pub struct MergedKeys<'cc, K: Key + 'cc> {
graph: &'cc Graph<K, ()>,
iterator: UnionedKeys<'cc, Token>,
}
impl<'cc, K: Key> Iterator for MergedKeys<'cc, K> {
type Item = K;
fn next(&mut self) -> Option<Self::Item> {
self.iterator
.next()
.map(|token| self.graph.node_data(token.node()).clone())
}
}
// # The core algorithm
struct Algorithm<'a, K: Key + 'a> {
graph: &'a Graph<K, ()>,
table: &'a mut UnificationTable<Token>,
}
impl<'a, K: Key> Algorithm<'a, K> {
fn merge(&mut self, u: Token, v: Token) {
debug!("merge(): u={:?} v={:?}", u, v);
if self.unioned(u, v) {
return;
}
let u_preds = self.all_preds(u);
let v_preds = self.all_preds(v);
self.union(u, v);
for &p_u in &u_preds {
for &p_v in &v_preds {
self.maybe_merge(p_u, p_v);
}
}
}
fn all_preds(&mut self, u: Token) -> Vec<Token> {
let graph = self.graph;
self.table
.unioned_keys(u)
.flat_map(|k| graph.predecessor_nodes(k.node()))
.map(|i| Token::from_node(i))
.collect()
}
fn maybe_merge(&mut self, p_u: Token, p_v: Token) {
debug!("maybe_merge(): p_u={:?} p_v={:?}",
self.key(p_u),
self.key(p_v));
if !self.unioned(p_u, p_v) && self.shallow_eq(p_u, p_v) && self.congruent(p_u, p_v) {
self.merge(p_u, p_v);
}
}
// Check whether each of the successors are unioned. So if you
// have `Box<X1>` and `Box<X2>`, this is true if `X1 == X2`. (The
// result of this fn is not really meaningful unless the two nodes
// are shallow equal here.)
fn congruent(&mut self, p_u: Token, p_v: Token) -> bool {
debug_assert!(self.shallow_eq(p_u, p_v));
debug!("congruent({:?}, {:?})", self.key(p_u), self.key(p_v));
let succs_u = self.successors(p_u);
let succs_v = self.successors(p_v);
let r = succs_u.zip(succs_v).all(|(s_u, s_v)| {
debug!("congruent: s_u={:?} s_v={:?}", s_u, s_v);
self.unioned(s_u, s_v)
});
debug!("congruent({:?}, {:?}) = {:?}",
self.key(p_u),
self.key(p_v),
r);
r
}
fn key(&self, u: Token) -> &'a K {
self.graph.node_data(u.node())
}
// Compare the local data, not considering successor nodes. So e.g
// `Box<X>` and `Box<Y>` are shallow equal for any `X` and `Y`.
fn shallow_eq(&self, u: Token, v: Token) -> bool {
let r = self.key(u).shallow_eq(self.key(v));
debug!("shallow_eq({:?}, {:?}) = {:?}", self.key(u), self.key(v), r);
r
}
fn token_kind(&self, u: Token) -> KeyKind {
self.graph.node_data(u.node()).key_kind()
}
fn unioned(&mut self, u: Token, v: Token) -> bool {
let r = self.table.unioned(u, v);
debug!("unioned(u={:?}, v={:?}) = {:?}",
self.key(u),
self.key(v),
r);
r
}
fn union(&mut self, u: Token, v: Token) {
debug!("union(u={:?}, v={:?})", self.key(u), self.key(v));
// find the roots of `u` and `v`; if `u` and `v` have been unioned
// with anything generative, these will be generative.
let u = self.table.find(u);
let v = self.table.find(v);
// u and v are now union'd
self.table.union(u, v);
// if both `u` and `v` were generative, we can now propagate
// the constraint that their successors must also be the same
if self.token_kind(u) == Generative && self.token_kind(v) == Generative {
if self.shallow_eq(u, v) {
let mut succs_u = self.successors(u);
let mut succs_v = self.successors(v);
for (succ_u, succ_v) in succs_u.by_ref().zip(succs_v.by_ref()) {
// assume # of succ is equal because types are WF (asserted below)
self.merge(succ_u, succ_v);
}
debug_assert!(succs_u.next().is_none());
debug_assert!(succs_v.next().is_none());
} else {
// error: user asked us to union i32/u32 or Vec<T>/Vec<U>;
// for now just panic.
panic!("inconsistent conclusion: {:?} vs {:?}",
self.key(u),
self.key(v));
}
}
}
fn successors(&self, token: Token) -> iter::Map<graph::AdjacentTargets<'a, K, ()>,
fn(NodeIndex) -> Token> {
self.graph
.successor_nodes(token.node())
.map(Token::from_node)
}
fn predecessors(&self, token: Token) -> iter::Map<graph::AdjacentSources<'a, K, ()>,
fn(NodeIndex) -> Token> {
self.graph
.predecessor_nodes(token.node())
.map(Token::from_node)
}
/// If `token` has been unioned with something generative, returns
/// `Ok(u)` where `u` is the generative token. Otherwise, returns
/// `Err(v)` where `v` is the root of `token`.
fn normalize_to_generative(&mut self, token: Token) -> Result<Token, Token> {
let token = self.table.find(token);
match self.token_kind(token) {
Generative => Ok(token),
Applicative => Err(token),
}
}
}

349
third_party/rust/ena/src/cc/test.rs поставляемый
Просмотреть файл

@ -1,349 +0,0 @@
// use debug::Logger;
use cc::{CongruenceClosure, Key, KeyKind, Token};
use self::TypeStruct::*;
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
enum TypeStruct {
// e.g., `<T as Iterator>::Item` would be `Assoc(Iterator::Item, vec![T])`
Assoc(&'static str, Vec<Type>),
// skolemized version of in-scope generic, e.g., the `T` when checking `fn foo<T>`
Skolem(u32),
// inference variable (existentially quantified)
Variable(Token),
// a nominal type applied to arguments, e.g. `i32` or `Vec<T>`
Nominal(&'static str, Vec<Type>),
}
type Type = Box<TypeStruct>;
impl Key for Type {
fn to_token(&self) -> Option<Token> {
match **self {
TypeStruct::Variable(t) => Some(t),
_ => None,
}
}
fn key_kind(&self) -> KeyKind {
match **self {
TypeStruct::Assoc(..) |
TypeStruct::Variable(_) |
TypeStruct::Skolem(_) =>
KeyKind::Applicative,
TypeStruct::Nominal(..) =>
KeyKind::Generative,
}
}
fn shallow_eq(&self, key: &Type) -> bool {
match (&**self, &**key) {
(&Assoc(i, _), &Assoc(j, _)) => i == j,
(&Skolem(i), &Skolem(j)) => i == j,
(&Nominal(i, _), &Nominal(j, _)) => i == j,
_ => false,
}
}
fn successors(&self) -> Vec<Self> {
match **self {
Assoc(_, ref s) => s.clone(),
Skolem(_) => vec![],
Variable(_) => vec![],
Nominal(_, ref s) => s.clone(),
}
}
}
fn skolem(x: u32) -> Type {
Box::new(Skolem(x))
}
fn iterator_item(t: Type) -> Type {
Box::new(Assoc("Iterator::Item", vec![t]))
}
fn integer() -> Type {
Box::new(Nominal("integer", vec![]))
}
fn character() -> Type {
Box::new(Nominal("char", vec![]))
}
fn vec(t: Type) -> Type {
Box::new(Nominal("Vec", vec![t]))
}
fn inference_var<'tcx>(cc: &mut CongruenceClosure<Type>) -> Type {
let token = cc.new_token(KeyKind::Applicative,
move |token| Box::new(TypeStruct::Variable(token)));
cc.key(token).clone()
}
#[test]
fn simple_as_it_gets() {
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
assert!(cc.merged(skolem(0), skolem(0)));
assert!(!cc.merged(skolem(0), skolem(1)));
assert!(cc.merged(skolem(1), skolem(1)));
assert!(cc.merged(iterator_item(skolem(0)), iterator_item(skolem(0))));
assert!(!cc.merged(iterator_item(skolem(0)), iterator_item(skolem(1))));
assert!(cc.merged(iterator_item(skolem(1)), iterator_item(skolem(1))));
}
#[test]
fn union_vars() {
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
cc.merge(skolem(0), skolem(1));
assert!(cc.merged(skolem(0), skolem(1)));
}
#[test]
fn union_iterator_item_then_test_var() {
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
cc.merge(skolem(0), skolem(1));
assert!(cc.merged(skolem(0), skolem(1)));
}
#[test]
fn union_direct() {
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
cc.add(iterator_item(skolem(0)));
cc.add(iterator_item(skolem(1)));
cc.add(skolem(0));
cc.add(skolem(1));
cc.merge(skolem(0), skolem(1));
assert!(cc.merged(iterator_item(skolem(0)), iterator_item(skolem(1))));
}
macro_rules! indirect_test {
($test_name:ident: $a:expr, $b:expr; $c:expr, $d:expr) => {
#[test]
fn $test_name() {
// Variant 1: call `add` explicitly
//
// This caused bugs because nodes were pre-existing.
{
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
cc.add(iterator_item(skolem(0)));
cc.add(iterator_item(skolem(2)));
cc.add(skolem(0));
cc.add(skolem(1));
cc.add(skolem(2));
cc.merge($a, $b);
cc.merge($c, $d);
assert!(cc.merged(iterator_item(skolem(0)), iterator_item(skolem(2))));
}
// Variant 2: never call `add` explicitly
//
// This is more how we expect library to be used in practice.
{
let mut cc2: CongruenceClosure<Type> = CongruenceClosure::new();
cc2.merge($a, $b);
cc2.merge($c, $d);
assert!(cc2.merged(iterator_item(skolem(0)), iterator_item(skolem(2))));
}
}
}
}
// The indirect tests test for the case where we merge V0 and V1, and
// we merged V1 and V2, and we want to use this to conclude that
// Assoc(V0) and Assoc(V2) are merged -- but there is no node created for
// Assoc(V1).
indirect_test! { indirect_test_1: skolem(1), skolem(2); skolem(1), skolem(0) }
indirect_test! { indirect_test_2: skolem(2), skolem(1); skolem(1), skolem(0) }
indirect_test! { indirect_test_3: skolem(1), skolem(2); skolem(0), skolem(1) }
indirect_test! { indirect_test_4: skolem(2), skolem(1); skolem(0), skolem(1) }
// Here we determine that `Assoc(V0) == Assoc(V1)` because `V0==V1`,
// but we never add nodes for `Assoc(_)`.
#[test]
fn merged_no_add() {
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
cc.merge(skolem(0), skolem(1));
assert!(cc.merged(iterator_item(skolem(0)), iterator_item(skolem(1))));
}
// Here we determine that `Assoc(V0) == Assoc(V2)` because `V0==V1==V2`,
// but we never add nodes for `Assoc(_)`.
#[test]
fn merged_no_add_indirect() {
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
cc.merge(skolem(0), skolem(1));
cc.merge(skolem(1), skolem(2));
assert!(cc.merged(iterator_item(skolem(0)), iterator_item(skolem(2))));
}
// Here we determine that `Assoc(V0) == Assoc(V2)` because `V0==V1==V2`,
// but we never add nodes for `Assoc(_)`.
#[test]
fn iterator_item_not_merged() {
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
cc.merge(iterator_item(skolem(0)), iterator_item(skolem(1)));
assert!(!cc.merged(skolem(0), skolem(1)));
assert!(cc.merged(iterator_item(skolem(0)), iterator_item(skolem(1))));
}
// Here we show that merging `Assoc(V1) == Assoc(V2)` does NOT imply that
// `V1 == V2`.
#[test]
fn merge_fns_not_inputs() {
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
cc.merge(iterator_item(skolem(0)), iterator_item(skolem(1)));
assert!(!cc.merged(skolem(0), skolem(1)));
assert!(cc.merged(iterator_item(skolem(0)), iterator_item(skolem(1))));
}
#[test]
fn inf_var_union() {
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
let v0 = inference_var(&mut cc);
let v1 = inference_var(&mut cc);
let v2 = inference_var(&mut cc);
let iterator_item_v0 = iterator_item(v0.clone());
let iterator_item_v1 = iterator_item(v1.clone());
let iterator_item_v2 = iterator_item(v2.clone());
cc.merge(v0.clone(), v1.clone());
assert!(cc.map.is_empty()); // inf variables don't take up map slots
assert!(cc.merged(iterator_item_v0.clone(), iterator_item_v1.clone()));
assert!(!cc.merged(iterator_item_v0.clone(), iterator_item_v2.clone()));
cc.merge(iterator_item_v0.clone(), iterator_item_v2.clone());
assert!(cc.merged(iterator_item_v0.clone(), iterator_item_v2.clone()));
assert!(cc.merged(iterator_item_v1.clone(), iterator_item_v2.clone()));
assert_eq!(cc.map.len(), 3); // each iterator_item needs an entry
}
#[test]
fn skolem_union_no_add() {
// This particular pattern of unifications exploits a potentially
// subtle bug:
// - We merge `skolem(0)` and `skolem(1)`
// and then merge `Assoc(skolem(0))` and `Assoc(skolem(2))`.
// - From this we should be able to deduce that `Assoc(skolem(1)) == Assoc(skolem(2))`.
// - However, if we are not careful with accounting for
// predecessors and so forth, this fails. For example, when
// adding `Assoc(skolem(1))`, we have to consider `Assoc(skolem(0))`
// to be a predecessor of `skolem(1)`.
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
cc.merge(skolem(0), skolem(1));
assert!(cc.merged(iterator_item(skolem(0)), iterator_item(skolem(1))));
assert!(!cc.merged(iterator_item(skolem(0)), iterator_item(skolem(2))));
cc.merge(iterator_item(skolem(0)), iterator_item(skolem(2)));
assert!(cc.merged(iterator_item(skolem(0)), iterator_item(skolem(2))));
assert!(cc.merged(iterator_item(skolem(1)), iterator_item(skolem(2))));
}
#[test]
fn merged_keys() {
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
cc.merge(skolem(0), skolem(1));
cc.merge(iterator_item(skolem(0)), iterator_item(skolem(2)));
// Here we don't yet see `iterator_item(skolem(1))` because it has no
// corresponding node:
let keys: Vec<Type> = cc.merged_keys(iterator_item(skolem(2))).collect();
assert_eq!(&keys[..], &[iterator_item(skolem(2)), iterator_item(skolem(0))]);
// But of course `merged` returns true (and adds a node):
assert!(cc.merged(iterator_item(skolem(1)), iterator_item(skolem(2))));
// So now we see it:
let keys: Vec<Type> = cc.merged_keys(iterator_item(skolem(2))).collect();
assert_eq!(&keys[..], &[iterator_item(skolem(2)),
iterator_item(skolem(1)),
iterator_item(skolem(0))]);
}
// Here we show that merging `Vec<V1> == Vec<V2>` DOES imply that
// `V1 == V2`.
#[test]
fn merge_vecs() {
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
cc.merge(vec(skolem(0)), vec(skolem(1)));
assert!(cc.merged(skolem(0), skolem(1)));
assert!(cc.merged(vec(skolem(0)), vec(skolem(1))));
assert!(cc.merged(iterator_item(skolem(0)), iterator_item(skolem(1))));
}
// Here we show that merging `Vec<V1::Item> == Vec<V2::Item>` does NOT imply that
// `V1 == V2`.
#[test]
fn merge_vecs_of_items() {
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
cc.merge(vec(iterator_item(skolem(0))),
vec(iterator_item(skolem(1))));
assert!(!cc.merged(skolem(0), skolem(1)));
assert!(!cc.merged(vec(skolem(0)), vec(skolem(1))));
assert!(cc.merged(vec(iterator_item(skolem(0))),
vec(iterator_item(skolem(1)))));
assert!(cc.merged(iterator_item(vec(iterator_item(skolem(0)))),
iterator_item(vec(iterator_item(skolem(1))))));
assert!(cc.merged(iterator_item(iterator_item(vec(iterator_item(skolem(0))))),
iterator_item(iterator_item(vec(iterator_item(skolem(1)))))));
assert!(cc.merged(iterator_item(skolem(0)), iterator_item(skolem(1))));
}
// Here we merge `Vec<Int>::Item` with `Int` and then merge that later
// with an inference variable, and show that we concluded that the
// variable is (indeed) `Int`.
#[test]
fn merge_iterator_item_generative() {
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
cc.merge(iterator_item(vec(integer())), integer());
let v0 = inference_var(&mut cc);
cc.merge(iterator_item(vec(integer())), v0.clone());
assert!(cc.merged(v0.clone(), integer()));
assert!(cc.merged(vec(iterator_item(vec(integer()))), vec(integer())));
}
#[test]
fn merge_ripple() {
let mut cc: CongruenceClosure<Type> = CongruenceClosure::new();
cc.merge(iterator_item(skolem(1)), vec(skolem(0)));
cc.merge(iterator_item(skolem(2)), vec(integer()));
assert!(!cc.merged(iterator_item(skolem(1)), iterator_item(skolem(2))));
println!("------------------------------");
cc.merge(skolem(0), integer());
println!("------------------------------");
assert!(cc.merged(iterator_item(skolem(1)),
iterator_item(skolem(2))));
assert!(cc.merged(iterator_item(iterator_item(skolem(1))),
iterator_item(iterator_item(skolem(2)))));
}

160
third_party/rust/ena/src/constraint/mod.rs поставляемый
Просмотреть файл

@ -1,160 +0,0 @@
//! Constraint graph.
#![allow(dead_code)]
use graph::{Graph, NodeIndex};
use std::collections::VecDeque;
use std::u32;
#[cfg(test)]
mod test;
pub trait Lattice {
type Element: Clone + Eq;
fn lub(&self, elem1: &Self::Element, elem2: &Self::Element) -> Option<Self::Element>;
}
pub struct ConstraintGraph<L: Lattice> {
graph: Graph<(), ()>,
values: Vec<L::Element>,
lattice: L,
}
#[derive(Copy, Clone)]
pub struct Var {
index: u32,
}
impl Var {
pub fn index(&self) -> usize {
self.index as usize
}
fn to_node_index(self) -> NodeIndex {
NodeIndex(self.index as usize)
}
fn from_node_index(ni: NodeIndex) -> Var {
assert!(ni.0 < (u32::MAX as usize));
Var { index: ni.0 as u32 }
}
}
impl<L> ConstraintGraph<L>
where L: Lattice
{
fn new(lattice: L) -> ConstraintGraph<L> {
ConstraintGraph {
graph: Graph::new(),
values: Vec::new(),
lattice: lattice,
}
}
fn new_var(&mut self, initial_value: L::Element) -> Var {
assert_eq!(self.graph.all_nodes().len(), self.values.len());
let node_index = self.graph.add_node(());
self.values.push(initial_value);
Var::from_node_index(node_index)
}
pub fn constrain_var(&mut self, var: Var, value: L::Element) -> Vec<PropagationError<L>> {
let propagation = Propagation::new(&self.lattice, &self.graph, &mut self.values);
propagation.propagate(value, var)
}
pub fn add_edge(&mut self, source: Var, target: Var) -> Vec<PropagationError<L>> {
let source_node = source.to_node_index();
let target_node = target.to_node_index();
if self.graph
.successor_nodes(source_node)
.any(|n| n == target_node) {
return vec![];
}
self.graph.add_edge(source_node, target_node, ());
let value = self.current_value(source);
self.constrain_var(target, value)
}
pub fn current_value(&self, node: Var) -> L::Element {
self.values[node.index()].clone()
}
}
/// ////////////////////////////////////////////////////////////////////////
struct Propagation<'p, L>
where L: Lattice + 'p,
L::Element: 'p
{
lattice: &'p L,
graph: &'p Graph<(), ()>,
values: &'p mut Vec<L::Element>,
queue: VecDeque<Var>,
errors: Vec<PropagationError<L>>,
}
pub struct PropagationError<L>
where L: Lattice
{
var: Var,
old_value: L::Element,
new_value: L::Element,
}
impl<'p, L> Propagation<'p, L>
where L: Lattice,
L::Element: 'p
{
fn new(lattice: &'p L,
graph: &'p Graph<(), ()>,
values: &'p mut Vec<L::Element>)
-> Propagation<'p, L> {
Propagation {
lattice: lattice,
graph: graph,
values: values,
queue: VecDeque::new(),
errors: Vec::new(),
}
}
fn propagate(mut self, value: L::Element, var: Var) -> Vec<PropagationError<L>> {
self.update_node(value, var);
while let Some(dirty) = self.queue.pop_front() {
let value = self.values[dirty.index()].clone();
for succ_node_index in self.graph.successor_nodes(dirty.to_node_index()) {
let succ_var = Var::from_node_index(succ_node_index);
self.update_node(value.clone(), succ_var);
}
}
self.errors
}
fn update_node(&mut self, value: L::Element, var: Var) {
let cur_value = self.values[var.index()].clone();
match self.lattice.lub(&cur_value, &value) {
Some(new_value) => {
if cur_value != new_value {
self.values[var.index()] = value;
self.queue.push_back(var);
}
}
None => {
// Error. Record for later.
self.errors.push(PropagationError::<L> {
var: var,
old_value: cur_value,
new_value: value,
});
}
}
}
}

69
third_party/rust/ena/src/constraint/test.rs поставляемый
Просмотреть файл

@ -1,69 +0,0 @@
use super::*;
use std::cmp;
struct MaxLattice;
impl Lattice for MaxLattice {
type Element = u32;
fn lub(&self, elem1: &u32, elem2: &u32) -> Option<u32> {
Some(cmp::max(*elem1, *elem2))
}
}
#[test]
fn basic() {
// v1 --+--> v2
// |
// v3 --+
let mut graph = ConstraintGraph::new(MaxLattice);
let v1 = graph.new_var(3);
let v2 = graph.new_var(0);
graph.add_edge(v1, v2);
assert_eq!(graph.current_value(v1), 3);
assert_eq!(graph.current_value(v2), 3);
let v3 = graph.new_var(5);
graph.add_edge(v3, v2);
assert_eq!(graph.current_value(v1), 3);
assert_eq!(graph.current_value(v2), 5);
assert_eq!(graph.current_value(v3), 5);
graph.constrain_var(v1, 10);
assert_eq!(graph.current_value(v1), 10);
assert_eq!(graph.current_value(v2), 10);
assert_eq!(graph.current_value(v3), 5);
}
#[test]
fn cycle() {
// v1 ----> v2
// ^ |
// | v
// v3 <---- v3
let mut graph = ConstraintGraph::new(MaxLattice);
let vars = [graph.new_var(0), graph.new_var(0), graph.new_var(0), graph.new_var(0)];
for i in 0..4 {
graph.add_edge(vars[i], vars[(i + 1) % vars.len()]);
}
graph.constrain_var(vars[1], 3);
assert!(vars.iter().all(|&var| graph.current_value(var) == 3));
graph.constrain_var(vars[2], 5);
assert!(vars.iter().all(|&var| graph.current_value(var) == 5));
graph.constrain_var(vars[3], 2);
assert!(vars.iter().all(|&var| graph.current_value(var) == 5));
graph.constrain_var(vars[3], 6);
assert!(vars.iter().all(|&var| graph.current_value(var) == 6));
graph.constrain_var(vars[0], 10);
assert!(vars.iter().all(|&var| graph.current_value(var) == 10));
}

43
third_party/rust/ena/src/debug.rs поставляемый
Просмотреть файл

@ -1,43 +0,0 @@
#[cfg(test)]
use std::cell::Cell;
#[cfg(test)]
thread_local!(pub static ENABLED: Cell<u32> = Cell::new(0));
#[cfg(test)]
#[macro_export]
macro_rules! debug {
($($arg:tt)*) => (
::debug::ENABLED.with(|slot| {
if slot.get() != 0 {
println!("{}", format_args!($($arg)+));
}
})
)
}
#[cfg(not(test))]
#[macro_export]
macro_rules! debug {
($($arg:tt)*) => ( () )
}
#[cfg(test)]
pub struct Logger {
_x: (),
}
#[cfg(test)]
impl Logger {
pub fn new() -> Logger {
ENABLED.with(|slot| slot.set(slot.get() + 1));
Logger { _x: () }
}
}
#[cfg(test)]
impl Drop for Logger {
fn drop(&mut self) {
ENABLED.with(|slot| slot.set(slot.get() - 1));
}
}

427
third_party/rust/ena/src/graph/mod.rs поставляемый
Просмотреть файл

@ -1,427 +0,0 @@
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A graph module for use in dataflow, region resolution, and elsewhere.
//!
//! # Interface details
//!
//! You customize the graph by specifying a "node data" type `N` and an
//! "edge data" type `E`. You can then later gain access (mutable or
//! immutable) to these "user-data" bits. Currently, you can only add
//! nodes or edges to the graph. You cannot remove or modify them once
//! added. This could be changed if we have a need.
//!
//! # Implementation details
//!
//! The main tricky thing about this code is the way that edges are
//! stored. The edges are stored in a central array, but they are also
//! threaded onto two linked lists for each node, one for incoming edges
//! and one for outgoing edges. Note that every edge is a member of some
//! incoming list and some outgoing list. Basically you can load the
//! first index of the linked list from the node data structures (the
//! field `first_edge`) and then, for each edge, load the next index from
//! the field `next_edge`). Each of those fields is an array that should
//! be indexed by the direction (see the type `Direction`).
use bitvec::BitVector;
use std::fmt::{Formatter, Error, Debug};
use std::usize;
use snapshot_vec::{SnapshotVec, SnapshotVecDelegate};
#[cfg(test)]
mod tests;
pub struct Graph<N, E> {
nodes: SnapshotVec<Node<N>>,
edges: SnapshotVec<Edge<E>>,
}
pub struct Node<N> {
first_edge: [EdgeIndex; 2], // see module comment
pub data: N,
}
pub struct Edge<E> {
next_edge: [EdgeIndex; 2], // see module comment
source: NodeIndex,
target: NodeIndex,
pub data: E,
}
impl<N> SnapshotVecDelegate for Node<N> {
type Value = Node<N>;
type Undo = ();
fn reverse(_: &mut Vec<Node<N>>, _: ()) {}
}
impl<N> SnapshotVecDelegate for Edge<N> {
type Value = Edge<N>;
type Undo = ();
fn reverse(_: &mut Vec<Edge<N>>, _: ()) {}
}
impl<E: Debug> Debug for Edge<E> {
fn fmt(&self, f: &mut Formatter) -> Result<(), Error> {
write!(f,
"Edge {{ next_edge: [{:?}, {:?}], source: {:?}, target: {:?}, data: {:?} }}",
self.next_edge[0],
self.next_edge[1],
self.source,
self.target,
self.data)
}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
pub struct NodeIndex(pub usize);
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
pub struct EdgeIndex(pub usize);
pub const INVALID_EDGE_INDEX: EdgeIndex = EdgeIndex(usize::MAX);
// Use a private field here to guarantee no more instances are created:
#[derive(Copy, Clone, Debug, PartialEq)]
pub struct Direction {
repr: usize,
}
pub const OUTGOING: Direction = Direction { repr: 0 };
pub const INCOMING: Direction = Direction { repr: 1 };
impl NodeIndex {
/// Returns unique id (unique with respect to the graph holding associated node).
pub fn node_id(&self) -> usize {
self.0
}
}
impl EdgeIndex {
/// Returns unique id (unique with respect to the graph holding associated edge).
pub fn edge_id(&self) -> usize {
self.0
}
}
impl<N: Debug, E: Debug> Graph<N, E> {
pub fn new() -> Graph<N, E> {
Graph {
nodes: SnapshotVec::new(),
edges: SnapshotVec::new(),
}
}
// # Simple accessors
#[inline]
pub fn all_nodes(&self) -> &[Node<N>] {
&self.nodes
}
#[inline]
pub fn len_nodes(&self) -> usize {
self.nodes.len()
}
#[inline]
pub fn all_edges(&self) -> &[Edge<E>] {
&self.edges
}
#[inline]
pub fn len_edges(&self) -> usize {
self.edges.len()
}
// # Node construction
pub fn next_node_index(&self) -> NodeIndex {
NodeIndex(self.nodes.len())
}
pub fn add_node(&mut self, data: N) -> NodeIndex {
let idx = self.next_node_index();
self.nodes.push(Node {
first_edge: [INVALID_EDGE_INDEX, INVALID_EDGE_INDEX],
data: data,
});
idx
}
pub fn mut_node_data(&mut self, idx: NodeIndex) -> &mut N {
&mut self.nodes[idx.0].data
}
pub fn node_data(&self, idx: NodeIndex) -> &N {
&self.nodes[idx.0].data
}
pub fn node(&self, idx: NodeIndex) -> &Node<N> {
&self.nodes[idx.0]
}
// # Edge construction and queries
pub fn next_edge_index(&self) -> EdgeIndex {
EdgeIndex(self.edges.len())
}
pub fn add_edge(&mut self, source: NodeIndex, target: NodeIndex, data: E) -> EdgeIndex {
debug!("graph: add_edge({:?}, {:?}, {:?})", source, target, data);
let idx = self.next_edge_index();
// read current first of the list of edges from each node
let source_first = self.nodes[source.0].first_edge[OUTGOING.repr];
let target_first = self.nodes[target.0].first_edge[INCOMING.repr];
// create the new edge, with the previous firsts from each node
// as the next pointers
self.edges.push(Edge {
next_edge: [source_first, target_first],
source: source,
target: target,
data: data,
});
// adjust the firsts for each node target be the next object.
self.nodes[source.0].first_edge[OUTGOING.repr] = idx;
self.nodes[target.0].first_edge[INCOMING.repr] = idx;
return idx;
}
pub fn mut_edge_data(&mut self, idx: EdgeIndex) -> &mut E {
&mut self.edges[idx.0].data
}
pub fn edge_data(&self, idx: EdgeIndex) -> &E {
&self.edges[idx.0].data
}
pub fn edge(&self, idx: EdgeIndex) -> &Edge<E> {
&self.edges[idx.0]
}
pub fn first_adjacent(&self, node: NodeIndex, dir: Direction) -> EdgeIndex {
//! Accesses the index of the first edge adjacent to `node`.
//! This is useful if you wish to modify the graph while walking
//! the linked list of edges.
self.nodes[node.0].first_edge[dir.repr]
}
pub fn next_adjacent(&self, edge: EdgeIndex, dir: Direction) -> EdgeIndex {
//! Accesses the next edge in a given direction.
//! This is useful if you wish to modify the graph while walking
//! the linked list of edges.
self.edges[edge.0].next_edge[dir.repr]
}
// # Iterating over nodes, edges
pub fn each_node<'a, F>(&'a self, mut f: F) -> bool
where F: FnMut(NodeIndex, &'a Node<N>) -> bool
{
//! Iterates over all edges defined in the graph.
self.nodes.iter().enumerate().all(|(i, node)| f(NodeIndex(i), node))
}
pub fn each_edge<'a, F>(&'a self, mut f: F) -> bool
where F: FnMut(EdgeIndex, &'a Edge<E>) -> bool
{
//! Iterates over all edges defined in the graph
self.edges.iter().enumerate().all(|(i, edge)| f(EdgeIndex(i), edge))
}
pub fn outgoing_edges(&self, source: NodeIndex) -> AdjacentEdges<N, E> {
self.adjacent_edges(source, OUTGOING)
}
pub fn incoming_edges(&self, source: NodeIndex) -> AdjacentEdges<N, E> {
self.adjacent_edges(source, INCOMING)
}
pub fn adjacent_edges(&self, source: NodeIndex, direction: Direction) -> AdjacentEdges<N, E> {
let first_edge = self.node(source).first_edge[direction.repr];
AdjacentEdges {
graph: self,
direction: direction,
next: first_edge,
}
}
pub fn successor_nodes(&self, source: NodeIndex) -> AdjacentTargets<N, E> {
self.outgoing_edges(source).targets()
}
pub fn predecessor_nodes(&self, target: NodeIndex) -> AdjacentSources<N, E> {
self.incoming_edges(target).sources()
}
// # Fixed-point iteration
//
// A common use for graphs in our compiler is to perform
// fixed-point iteration. In this case, each edge represents a
// constraint, and the nodes themselves are associated with
// variables or other bitsets. This method facilitates such a
// computation.
pub fn iterate_until_fixed_point<'a, F>(&'a self, mut op: F)
where F: FnMut(usize, EdgeIndex, &'a Edge<E>) -> bool
{
let mut iteration = 0;
let mut changed = true;
while changed {
changed = false;
iteration += 1;
for (i, edge) in self.edges.iter().enumerate() {
changed |= op(iteration, EdgeIndex(i), edge);
}
}
}
pub fn depth_traverse<'a>(&'a self, start: NodeIndex) -> DepthFirstTraversal<'a, N, E> {
DepthFirstTraversal {
graph: self,
stack: vec![start],
visited: BitVector::new(self.nodes.len()),
}
}
}
// # Iterators
pub struct AdjacentEdges<'g, N, E>
where N: 'g,
E: 'g
{
graph: &'g Graph<N, E>,
direction: Direction,
next: EdgeIndex,
}
impl<'g, N, E> AdjacentEdges<'g, N, E> {
fn targets(self) -> AdjacentTargets<'g, N, E> {
AdjacentTargets { edges: self }
}
fn sources(self) -> AdjacentSources<'g, N, E> {
AdjacentSources { edges: self }
}
}
impl<'g, N: Debug, E: Debug> Iterator for AdjacentEdges<'g, N, E> {
type Item = (EdgeIndex, &'g Edge<E>);
fn next(&mut self) -> Option<(EdgeIndex, &'g Edge<E>)> {
let edge_index = self.next;
if edge_index == INVALID_EDGE_INDEX {
return None;
}
let edge = self.graph.edge(edge_index);
self.next = edge.next_edge[self.direction.repr];
Some((edge_index, edge))
}
}
pub struct AdjacentTargets<'g, N: 'g, E: 'g>
where N: 'g,
E: 'g
{
edges: AdjacentEdges<'g, N, E>,
}
impl<'g, N: Debug, E: Debug> Iterator for AdjacentTargets<'g, N, E> {
type Item = NodeIndex;
fn next(&mut self) -> Option<NodeIndex> {
self.edges.next().map(|(_, edge)| edge.target)
}
}
pub struct AdjacentSources<'g, N: 'g, E: 'g>
where N: 'g,
E: 'g
{
edges: AdjacentEdges<'g, N, E>,
}
impl<'g, N: Debug, E: Debug> Iterator for AdjacentSources<'g, N, E> {
type Item = NodeIndex;
fn next(&mut self) -> Option<NodeIndex> {
self.edges.next().map(|(_, edge)| edge.source)
}
}
pub struct DepthFirstTraversal<'g, N: 'g, E: 'g> {
graph: &'g Graph<N, E>,
stack: Vec<NodeIndex>,
visited: BitVector,
}
impl<'g, N: Debug, E: Debug> Iterator for DepthFirstTraversal<'g, N, E> {
type Item = NodeIndex;
fn next(&mut self) -> Option<NodeIndex> {
while let Some(idx) = self.stack.pop() {
if !self.visited.insert(idx.node_id()) {
continue;
}
for (_, edge) in self.graph.outgoing_edges(idx) {
if !self.visited.contains(edge.target().node_id()) {
self.stack.push(edge.target());
}
}
return Some(idx);
}
return None;
}
}
pub fn each_edge_index<F>(max_edge_index: EdgeIndex, mut f: F)
where F: FnMut(EdgeIndex) -> bool
{
let mut i = 0;
let n = max_edge_index.0;
while i < n {
if !f(EdgeIndex(i)) {
return;
}
i += 1;
}
}
impl<E> Edge<E> {
pub fn source(&self) -> NodeIndex {
self.source
}
pub fn target(&self) -> NodeIndex {
self.target
}
pub fn source_or_target(&self, direction: Direction) -> NodeIndex {
if direction == OUTGOING {
self.target
} else {
self.source
}
}
}

141
third_party/rust/ena/src/graph/tests.rs поставляемый
Просмотреть файл

@ -1,141 +0,0 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use graph::*;
use std::fmt::Debug;
type TestNode = Node<&'static str>;
type TestEdge = Edge<&'static str>;
type TestGraph = Graph<&'static str, &'static str>;
fn create_graph() -> TestGraph {
let mut graph = Graph::new();
// Create a simple graph
//
// A -+> B --> C
// | | ^
// | v |
// F D --> E
let a = graph.add_node("A");
let b = graph.add_node("B");
let c = graph.add_node("C");
let d = graph.add_node("D");
let e = graph.add_node("E");
let f = graph.add_node("F");
graph.add_edge(a, b, "AB");
graph.add_edge(b, c, "BC");
graph.add_edge(b, d, "BD");
graph.add_edge(d, e, "DE");
graph.add_edge(e, c, "EC");
graph.add_edge(f, b, "FB");
return graph;
}
#[test]
fn each_node() {
let graph = create_graph();
let expected = ["A", "B", "C", "D", "E", "F"];
graph.each_node(|idx, node| {
assert_eq!(&expected[idx.0], graph.node_data(idx));
assert_eq!(expected[idx.0], node.data);
true
});
}
#[test]
fn each_edge() {
let graph = create_graph();
let expected = ["AB", "BC", "BD", "DE", "EC", "FB"];
graph.each_edge(|idx, edge| {
assert_eq!(&expected[idx.0], graph.edge_data(idx));
assert_eq!(expected[idx.0], edge.data);
true
});
}
fn test_adjacent_edges<N: PartialEq + Debug, E: PartialEq + Debug>(graph: &Graph<N, E>,
start_index: NodeIndex,
start_data: N,
expected_incoming: &[(E, N)],
expected_outgoing: &[(E, N)]) {
assert!(graph.node_data(start_index) == &start_data);
let mut counter = 0;
for (edge_index, edge) in graph.incoming_edges(start_index) {
assert!(graph.edge_data(edge_index) == &edge.data);
assert!(counter < expected_incoming.len());
debug!("counter={:?} expected={:?} edge_index={:?} edge={:?}",
counter,
expected_incoming[counter],
edge_index,
edge);
match expected_incoming[counter] {
(ref e, ref n) => {
assert!(e == &edge.data);
assert!(n == graph.node_data(edge.source()));
assert!(start_index == edge.target);
}
}
counter += 1;
}
assert_eq!(counter, expected_incoming.len());
let mut counter = 0;
for (edge_index, edge) in graph.outgoing_edges(start_index) {
assert!(graph.edge_data(edge_index) == &edge.data);
assert!(counter < expected_outgoing.len());
debug!("counter={:?} expected={:?} edge_index={:?} edge={:?}",
counter,
expected_outgoing[counter],
edge_index,
edge);
match expected_outgoing[counter] {
(ref e, ref n) => {
assert!(e == &edge.data);
assert!(start_index == edge.source);
assert!(n == graph.node_data(edge.target));
}
}
counter += 1;
}
assert_eq!(counter, expected_outgoing.len());
}
#[test]
fn each_adjacent_from_a() {
let graph = create_graph();
test_adjacent_edges(&graph, NodeIndex(0), "A", &[], &[("AB", "B")]);
}
#[test]
fn each_adjacent_from_b() {
let graph = create_graph();
test_adjacent_edges(&graph,
NodeIndex(1),
"B",
&[("FB", "F"), ("AB", "A")],
&[("BD", "D"), ("BC", "C")]);
}
#[test]
fn each_adjacent_from_c() {
let graph = create_graph();
test_adjacent_edges(&graph, NodeIndex(2), "C", &[("EC", "E"), ("BC", "B")], &[]);
}
#[test]
fn each_adjacent_from_d() {
let graph = create_graph();
test_adjacent_edges(&graph, NodeIndex(3), "D", &[("BD", "B")], &[("DE", "E")]);
}

16
third_party/rust/ena/src/lib.rs поставляемый
Просмотреть файл

@ -8,16 +8,16 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![cfg_attr(all(feature = "unstable", test), feature(test))]
#![allow(dead_code)]
//! An implementation of union-find. See the `unify` module for more
//! details.
#![cfg_attr(feature = "bench", feature(test))]
#[macro_use]
mod debug;
extern crate log;
#[cfg(feature = "persistent")]
extern crate dogged;
pub mod constraint;
pub mod graph;
pub mod snapshot_vec;
#[cfg(feature = "unstable")]
pub mod cc;
pub mod unify;
pub mod bitvec;

68
third_party/rust/ena/src/snapshot_vec.rs поставляемый
Просмотреть файл

@ -18,11 +18,14 @@
//! ensure that any changes you make this with this pointer are rolled back, you must invoke
//! `record` to record any changes you make and also supplying a delegate capable of reversing
//! those changes.
use self::UndoLog::*;
use std::fmt;
use std::mem;
use std::ops;
#[derive(Debug)]
pub enum UndoLog<D: SnapshotVecDelegate> {
/// Indicates where a snapshot started.
OpenSnapshot,
@ -45,6 +48,20 @@ pub struct SnapshotVec<D: SnapshotVecDelegate> {
undo_log: Vec<UndoLog<D>>,
}
impl<D> fmt::Debug for SnapshotVec<D>
where D: SnapshotVecDelegate,
D: fmt::Debug,
D::Undo: fmt::Debug,
D::Value: fmt::Debug
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("SnapshotVec")
.field("values", &self.values)
.field("undo_log", &self.undo_log)
.finish()
}
}
// Snapshots are tokens that should be created/consumed linearly.
pub struct Snapshot {
// Length of the undo log at the time the snapshot was taken.
@ -66,6 +83,13 @@ impl<D: SnapshotVecDelegate> SnapshotVec<D> {
}
}
pub fn with_capacity(c: usize) -> SnapshotVec<D> {
SnapshotVec {
values: Vec::with_capacity(c),
undo_log: Vec::new(),
}
}
fn in_snapshot(&self) -> bool {
!self.undo_log.is_empty()
}
@ -95,6 +119,12 @@ impl<D: SnapshotVecDelegate> SnapshotVec<D> {
&self.values[index]
}
/// Reserve space for new values, just like an ordinary vec.
pub fn reserve(&mut self, additional: usize) {
// This is not affected by snapshots or anything.
self.values.reserve(additional);
}
/// Returns a mutable pointer into the vec; whatever changes you make here cannot be undone
/// automatically, so you should be sure call `record()` with some sort of suitable undo
/// action.
@ -111,8 +141,24 @@ impl<D: SnapshotVecDelegate> SnapshotVec<D> {
}
}
/// Updates all elements. Potentially more efficient -- but
/// otherwise equivalent to -- invoking `set` for each element.
pub fn set_all(&mut self, mut new_elems: impl FnMut(usize) -> D::Value) {
if !self.in_snapshot() {
for (slot, index) in self.values.iter_mut().zip(0..) {
*slot = new_elems(index);
}
} else {
for i in 0..self.values.len() {
self.set(i, new_elems(i));
}
}
}
pub fn update<OP>(&mut self, index: usize, op: OP)
where OP: FnOnce(&mut D::Value), D::Value: Clone
where
OP: FnOnce(&mut D::Value),
D::Value: Clone,
{
if self.in_snapshot() {
let old_elem = self.values[index].clone();
@ -224,8 +270,21 @@ impl<D: SnapshotVecDelegate> ops::IndexMut<usize> for SnapshotVec<D> {
}
}
impl<D: SnapshotVecDelegate> Extend<D::Value> for SnapshotVec<D> {
fn extend<T>(&mut self, iterable: T)
where
T: IntoIterator<Item = D::Value>,
{
for item in iterable {
self.push(item);
}
}
}
impl<D: SnapshotVecDelegate> Clone for SnapshotVec<D>
where D::Value: Clone, D::Undo: Clone,
where
D::Value: Clone,
D::Undo: Clone,
{
fn clone(&self) -> Self {
SnapshotVec {
@ -236,7 +295,9 @@ impl<D: SnapshotVecDelegate> Clone for SnapshotVec<D>
}
impl<D: SnapshotVecDelegate> Clone for UndoLog<D>
where D::Value: Clone, D::Undo: Clone,
where
D::Value: Clone,
D::Undo: Clone,
{
fn clone(&self) -> Self {
match *self {
@ -248,4 +309,3 @@ impl<D: SnapshotVecDelegate> Clone for UndoLog<D>
}
}
}

205
third_party/rust/ena/src/unify/backing_vec.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,205 @@
#[cfg(feature = "persistent")]
use dogged::DVec;
use snapshot_vec as sv;
use std::ops;
use std::marker::PhantomData;
use super::{VarValue, UnifyKey, UnifyValue};
#[allow(dead_code)] // rustc BUG
type Key<S> = <S as UnificationStore>::Key;
/// Largely internal trait implemented by the unification table
/// backing store types. The most common such type is `InPlace`,
/// which indicates a standard, mutable unification table.
pub trait UnificationStore: ops::Index<usize, Output = VarValue<Key<Self>>> + Clone {
type Key: UnifyKey<Value = Self::Value>;
type Value: UnifyValue;
type Snapshot;
fn new() -> Self;
fn start_snapshot(&mut self) -> Self::Snapshot;
fn rollback_to(&mut self, snapshot: Self::Snapshot);
fn commit(&mut self, snapshot: Self::Snapshot);
fn reset_unifications(
&mut self,
value: impl FnMut(u32) -> VarValue<Self::Key>,
);
fn len(&self) -> usize;
fn push(&mut self, value: VarValue<Self::Key>);
fn reserve(&mut self, num_new_values: usize);
fn update<F>(&mut self, index: usize, op: F)
where F: FnOnce(&mut VarValue<Self::Key>);
fn tag() -> &'static str {
Self::Key::tag()
}
}
/// Backing store for an in-place unification table.
/// Not typically used directly.
#[derive(Clone, Debug)]
pub struct InPlace<K: UnifyKey> {
values: sv::SnapshotVec<Delegate<K>>
}
impl<K: UnifyKey> UnificationStore for InPlace<K> {
type Key = K;
type Value = K::Value;
type Snapshot = sv::Snapshot;
#[inline]
fn new() -> Self {
InPlace { values: sv::SnapshotVec::new() }
}
#[inline]
fn start_snapshot(&mut self) -> Self::Snapshot {
self.values.start_snapshot()
}
#[inline]
fn rollback_to(&mut self, snapshot: Self::Snapshot) {
self.values.rollback_to(snapshot);
}
#[inline]
fn commit(&mut self, snapshot: Self::Snapshot) {
self.values.commit(snapshot);
}
#[inline]
fn reset_unifications(
&mut self,
mut value: impl FnMut(u32) -> VarValue<Self::Key>,
) {
self.values.set_all(|i| value(i as u32));
}
#[inline]
fn len(&self) -> usize {
self.values.len()
}
#[inline]
fn push(&mut self, value: VarValue<Self::Key>) {
self.values.push(value);
}
#[inline]
fn reserve(&mut self, num_new_values: usize) {
self.values.reserve(num_new_values);
}
#[inline]
fn update<F>(&mut self, index: usize, op: F)
where F: FnOnce(&mut VarValue<Self::Key>)
{
self.values.update(index, op)
}
}
impl<K> ops::Index<usize> for InPlace<K>
where K: UnifyKey
{
type Output = VarValue<K>;
fn index(&self, index: usize) -> &VarValue<K> {
&self.values[index]
}
}
#[derive(Copy, Clone, Debug)]
struct Delegate<K>(PhantomData<K>);
impl<K: UnifyKey> sv::SnapshotVecDelegate for Delegate<K> {
type Value = VarValue<K>;
type Undo = ();
fn reverse(_: &mut Vec<VarValue<K>>, _: ()) {}
}
#[cfg(feature = "persistent")]
#[derive(Clone, Debug)]
pub struct Persistent<K: UnifyKey> {
values: DVec<VarValue<K>>
}
#[cfg(feature = "persistent")]
impl<K: UnifyKey> UnificationStore for Persistent<K> {
type Key = K;
type Value = K::Value;
type Snapshot = Self;
#[inline]
fn new() -> Self {
Persistent { values: DVec::new() }
}
#[inline]
fn start_snapshot(&mut self) -> Self::Snapshot {
self.clone()
}
#[inline]
fn rollback_to(&mut self, snapshot: Self::Snapshot) {
*self = snapshot;
}
#[inline]
fn commit(&mut self, _snapshot: Self::Snapshot) {
}
#[inline]
fn reset_unifications(
&mut self,
mut value: impl FnMut(u32) -> VarValue<Self::Key>,
) {
// Without extending dogged, there isn't obviously a more
// efficient way to do this. But it's pretty dumb. Maybe
// dogged needs a `map`.
for i in 0 .. self.values.len() {
self.values[i] = value(i as u32);
}
}
#[inline]
fn len(&self) -> usize {
self.values.len()
}
#[inline]
fn push(&mut self, value: VarValue<Self::Key>) {
self.values.push(value);
}
#[inline]
fn reserve(&mut self, _num_new_values: usize) {
// not obviously relevant to DVec.
}
#[inline]
fn update<F>(&mut self, index: usize, op: F)
where F: FnOnce(&mut VarValue<Self::Key>)
{
let p = &mut self.values[index];
op(p);
}
}
#[cfg(feature = "persistent")]
impl<K> ops::Index<usize> for Persistent<K>
where K: UnifyKey
{
type Output = VarValue<K>;
fn index(&self, index: usize) -> &VarValue<K> {
&self.values[index]
}
}

454
third_party/rust/ena/src/unify/mod.rs поставляемый
Просмотреть файл

@ -8,10 +8,38 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Union-find implementation. The main type is `UnificationTable`.
//!
//! You can define your own type for the *keys* in the table, but you
//! must implement `UnifyKey` for that type. The assumption is that
//! keys will be newtyped integers, hence we require that they
//! implement `Copy`.
//!
//! Keys can have values associated with them. The assumption is that
//! these values are cheaply cloneable (ideally, `Copy`), and some of
//! the interfaces are oriented around that assumption. If you just
//! want the classical "union-find" algorithm where you group things
//! into sets, use the `Value` type of `()`.
//!
//! When you have keys with non-trivial values, you must also define
//! how those values can be merged. As part of doing this, you can
//! define the "error" type to return on error; if errors are not
//! possible, use `NoError` (an uninstantiable struct). Using this
//! type also unlocks various more ergonomic methods (e.g., `union()`
//! in place of `unify_var_var()`).
//!
//! The best way to see how it is used is to read the `tests.rs` file;
//! search for e.g. `UnitKey`.
use std::marker;
use std::fmt::Debug;
use std::marker::PhantomData;
use snapshot_vec as sv;
mod backing_vec;
pub use self::backing_vec::{InPlace, UnificationStore};
#[cfg(feature = "persistent")]
pub use self::backing_vec::Persistent;
#[cfg(test)]
mod tests;
@ -27,7 +55,7 @@ mod tests;
///
/// Clients are expected to provide implementations of this trait; you
/// can see some examples in the `test` module.
pub trait UnifyKey : Copy + Clone + Debug + PartialEq {
pub trait UnifyKey: Copy + Clone + Debug + PartialEq {
type Value: UnifyValue;
fn index(&self) -> u32;
@ -50,21 +78,72 @@ pub trait UnifyKey : Copy + Clone + Debug + PartialEq {
/// since overriding the rank can cause execution time to increase
/// dramatically.
#[allow(unused_variables)]
fn order_roots(a: Self, a_value: &Self::Value,
b: Self, b_value: &Self::Value)
-> Option<(Self, Self)> {
fn order_roots(
a: Self,
a_value: &Self::Value,
b: Self,
b_value: &Self::Value,
) -> Option<(Self, Self)> {
None
}
}
/// Trait implemented for **values** associated with a unification
/// key. This trait defines how to merge the values from two keys that
/// are unioned together. This merging can be fallible. If you attempt
/// to union two keys whose values cannot be merged, then the error is
/// propagated up and the two keys are not unioned.
///
/// This crate provides implementations of `UnifyValue` for `()`
/// (which is infallible) and `Option<T>` (where `T: UnifyValue`). The
/// option implementation merges two sum-values using the `UnifyValue`
/// implementation of `T`.
///
/// See also `EqUnifyValue`, which is a convenience trait for cases
/// where the "merge" operation succeeds only if the two values are
/// equal.
pub trait UnifyValue: Clone + Debug {
/// Defines the type to return when merging of two values fails.
/// If merging is infallible, use the special struct `NoError`
/// found in this crate, which unlocks various more convenient
/// methods on the unification table.
type Error;
/// Given two values, produce a new value that combines them.
/// If that is not possible, produce an error.
fn unify_values(value1: &Self, value2: &Self) -> Result<Self, (Self, Self)>;
fn unify_values(value1: &Self, value2: &Self) -> Result<Self, Self::Error>;
}
/// Marker trait which indicates that `UnifyValues::unify_values` will never return `Err`.
pub trait InfallibleUnifyValue: UnifyValue {
/// A convenient helper for unification values which must be equal or
/// else an error occurs. For example, if you are unifying types in a
/// simple functional language, this may be appropriate, since (e.g.)
/// you can't unify a type variable bound to `int` with one bound to
/// `float` (but you can unify two type variables both bound to
/// `int`).
///
/// Any type which implements `EqUnifyValue` automatially implements
/// `UnifyValue`; if the two values are equal, merging is permitted.
/// Otherwise, the error `(v1, v2)` is returned, where `v1` and `v2`
/// are the two unequal values.
pub trait EqUnifyValue: Eq + Clone + Debug {}
impl<T: EqUnifyValue> UnifyValue for T {
type Error = (T, T);
fn unify_values(value1: &Self, value2: &Self) -> Result<Self, Self::Error> {
if value1 == value2 {
Ok(value1.clone())
} else {
Err((value1.clone(), value2.clone()))
}
}
}
/// A struct which can never be instantiated. Used
/// for the error type for infallible cases.
#[derive(Debug)]
pub struct NoError {
_dummy: (),
}
/// Value of a unification key. We implement Tarjan's union-find
@ -75,78 +154,73 @@ pub trait InfallibleUnifyValue: UnifyValue {
/// to keep the DAG relatively balanced, which helps keep the running
/// time of the algorithm under control. For more information, see
/// <http://en.wikipedia.org/wiki/Disjoint-set_data_structure>.
#[derive(PartialEq,Clone,Debug)]
struct VarValue<K: UnifyKey> {
#[derive(PartialEq, Clone, Debug)]
pub struct VarValue<K: UnifyKey> { // FIXME pub
parent: K, // if equal to self, this is a root
value: K::Value, // value assigned (only relevant to root)
child: K, // if equal to self, no child (relevant to both root/redirect)
sibling: K, // if equal to self, no sibling (only relevant to redirect)
rank: u32, // max depth (only relevant to root)
}
/// Table of unification keys and their values.
#[derive(Clone)]
pub struct UnificationTable<K: UnifyKey> {
/// Table of unification keys and their values. You must define a key type K
/// that implements the `UnifyKey` trait. Unification tables can be used in two-modes:
///
/// - in-place (`UnificationTable<InPlace<K>>` or `InPlaceUnificationTable<K>`):
/// - This is the standard mutable mode, where the array is modified
/// in place.
/// - To do backtracking, you can employ the `snapshot` and `rollback_to`
/// methods.
/// - persistent (`UnificationTable<Persistent<K>>` or `PersistentUnificationTable<K>`):
/// - In this mode, we use a persistent vector to store the data, so that
/// cloning the table is an O(1) operation.
/// - This implies that ordinary operations are quite a bit slower though.
/// - Requires the `persistent` feature be selected in your Cargo.toml file.
#[derive(Clone, Debug)]
pub struct UnificationTable<S: UnificationStore> {
/// Indicates the current value of each key.
values: sv::SnapshotVec<Delegate<K>>,
values: S,
}
/// A unification table that uses an "in-place" vector.
pub type InPlaceUnificationTable<K> = UnificationTable<InPlace<K>>;
/// A unification table that uses a "persistent" vector.
#[cfg(feature = "persistent")]
pub type PersistentUnificationTable<K> = UnificationTable<Persistent<K>>;
/// At any time, users may snapshot a unification table. The changes
/// made during the snapshot may either be *committed* or *rolled back*.
pub struct Snapshot<K: UnifyKey> {
// Link snapshot to the key type `K` of the table.
marker: marker::PhantomData<K>,
snapshot: sv::Snapshot,
pub struct Snapshot<S: UnificationStore> {
// Link snapshot to the unification store `S` of the table.
marker: marker::PhantomData<S>,
snapshot: S::Snapshot,
}
#[derive(Copy, Clone)]
struct Delegate<K>(PhantomData<K>);
impl<K: UnifyKey> VarValue<K> {
fn new_var(key: K, value: K::Value) -> VarValue<K> {
VarValue::new(key, value, key, key, 0)
VarValue::new(key, value, 0)
}
fn new(parent: K, value: K::Value, child: K, sibling: K, rank: u32) -> VarValue<K> {
fn new(parent: K, value: K::Value, rank: u32) -> VarValue<K> {
VarValue {
parent: parent, // this is a root
value: value,
child: child,
sibling: sibling,
rank: rank,
}
}
fn redirect(&mut self, to: K, sibling: K) {
assert_eq!(self.parent, self.sibling); // ...since this used to be a root
fn redirect(&mut self, to: K) {
self.parent = to;
self.sibling = sibling;
}
fn root(&mut self, rank: u32, child: K, value: K::Value) {
fn root(&mut self, rank: u32, value: K::Value) {
self.rank = rank;
self.child = child;
self.value = value;
}
/// Returns the key of this node. Only valid if this is a root
/// node, which you yourself must ensure.
fn key(&self) -> K {
self.parent
}
fn parent(&self, self_key: K) -> Option<K> {
self.if_not_self(self.parent, self_key)
}
fn child(&self, self_key: K) -> Option<K> {
self.if_not_self(self.child, self_key)
}
fn sibling(&self, self_key: K) -> Option<K> {
self.if_not_self(self.sibling, self_key)
}
fn if_not_self(&self, key: K, self_key: K) -> Option<K> {
if key == self_key {
None
@ -161,51 +235,73 @@ impl<K: UnifyKey> VarValue<K> {
// other type parameter U, and we have no way to say
// Option<U>:LatticeValue.
impl<K: UnifyKey> UnificationTable<K> {
pub fn new() -> UnificationTable<K> {
UnificationTable { values: sv::SnapshotVec::new() }
impl<S: UnificationStore> UnificationTable<S> {
pub fn new() -> Self {
UnificationTable {
values: S::new()
}
}
/// Starts a new snapshot. Each snapshot must be either
/// rolled back or committed in a "LIFO" (stack) order.
pub fn snapshot(&mut self) -> Snapshot<K> {
pub fn snapshot(&mut self) -> Snapshot<S> {
Snapshot {
marker: marker::PhantomData::<K>,
marker: marker::PhantomData::<S>,
snapshot: self.values.start_snapshot(),
}
}
/// Reverses all changes since the last snapshot. Also
/// removes any keys that have been created since then.
pub fn rollback_to(&mut self, snapshot: Snapshot<K>) {
debug!("{}: rollback_to()", K::tag());
pub fn rollback_to(&mut self, snapshot: Snapshot<S>) {
debug!("{}: rollback_to()", S::tag());
self.values.rollback_to(snapshot.snapshot);
}
/// Commits all changes since the last snapshot. Of course, they
/// can still be undone if there is a snapshot further out.
pub fn commit(&mut self, snapshot: Snapshot<K>) {
debug!("{}: commit()", K::tag());
pub fn commit(&mut self, snapshot: Snapshot<S>) {
debug!("{}: commit()", S::tag());
self.values.commit(snapshot.snapshot);
}
pub fn new_key(&mut self, value: K::Value) -> K {
/// Creates a fresh key with the given value.
pub fn new_key(&mut self, value: S::Value) -> S::Key {
let len = self.values.len();
let key: K = UnifyKey::from_index(len as u32);
let key: S::Key = UnifyKey::from_index(len as u32);
self.values.push(VarValue::new_var(key, value));
debug!("{}: created new key: {:?}", K::tag(), key);
debug!("{}: created new key: {:?}", S::tag(), key);
key
}
pub fn unioned_keys(&mut self, key: K) -> UnionedKeys<K> {
let root_key = self.get_root_key(key);
UnionedKeys {
table: self,
stack: vec![root_key],
}
/// Reserve memory for `num_new_keys` to be created. Does not
/// actually create the new keys; you must then invoke `new_key`.
pub fn reserve(&mut self, num_new_keys: usize) {
self.values.reserve(num_new_keys);
}
fn value(&self, key: K) -> &VarValue<K> {
/// Clears all unifications that have been performed, resetting to
/// the initial state. The values of each variable are given by
/// the closure.
pub fn reset_unifications(
&mut self,
mut value: impl FnMut(S::Key) -> S::Value,
) {
self.values.reset_unifications(|i| {
let key = UnifyKey::from_index(i as u32);
let value = value(key);
VarValue::new_var(key, value)
});
}
/// Returns the number of keys created so far.
pub fn len(&self) -> usize {
self.values.len()
}
/// Obtains the current value for a particular key.
/// Not for end-users; they can use `probe_value`.
fn value(&self, key: S::Key) -> &VarValue<S::Key> {
&self.values[key.index() as usize]
}
@ -215,7 +311,7 @@ impl<K: UnifyKey> UnificationTable<K> {
///
/// NB. This is a building-block operation and you would probably
/// prefer to call `probe` below.
fn get_root_key(&mut self, vid: K) -> K {
fn get_root_key(&mut self, vid: S::Key) -> S::Key {
let redirect = {
match self.value(vid).parent(vid) {
None => return vid,
@ -223,7 +319,7 @@ impl<K: UnifyKey> UnificationTable<K> {
}
};
let root_key: K = self.get_root_key(redirect);
let root_key: S::Key = self.get_root_key(redirect);
if root_key != redirect {
// Path compression
self.update_value(vid, |value| value.parent = root_key);
@ -232,13 +328,9 @@ impl<K: UnifyKey> UnificationTable<K> {
root_key
}
fn is_root(&self, key: K) -> bool {
let index = key.index() as usize;
self.values.get(index).parent(key).is_none()
}
fn update_value<OP>(&mut self, key: K, op: OP)
where OP: FnOnce(&mut VarValue<K>)
fn update_value<OP>(&mut self, key: S::Key, op: OP)
where
OP: FnOnce(&mut VarValue<S::Key>),
{
self.values.update(key.index() as usize, op);
debug!("Updated variable {:?} to {:?}", key, self.value(key));
@ -252,24 +344,35 @@ impl<K: UnifyKey> UnificationTable<K> {
/// really more of a building block. If the values associated with
/// your key are non-trivial, you would probably prefer to call
/// `unify_var_var` below.
fn unify_roots(&mut self, key_a: K, key_b: K, new_value: K::Value) {
debug!("unify(key_a={:?}, key_b={:?})",
key_a,
key_b);
fn unify_roots(&mut self, key_a: S::Key, key_b: S::Key, new_value: S::Value) {
debug!("unify(key_a={:?}, key_b={:?})", key_a, key_b);
let rank_a = self.value(key_a).rank;
let rank_b = self.value(key_b).rank;
if let Some((new_root, redirected)) = K::order_roots(key_a, &self.value(key_a).value,
key_b, &self.value(key_b).value) {
if let Some((new_root, redirected)) =
S::Key::order_roots(
key_a,
&self.value(key_a).value,
key_b,
&self.value(key_b).value,
) {
// compute the new rank for the new root that they chose;
// this may not be the optimal choice.
let new_rank = if new_root == key_a {
debug_assert!(redirected == key_b);
if rank_a > rank_b { rank_a } else { rank_b + 1 }
if rank_a > rank_b {
rank_a
} else {
rank_b + 1
}
} else {
debug_assert!(new_root == key_b);
debug_assert!(redirected == key_a);
if rank_b > rank_a { rank_b } else { rank_a + 1 }
if rank_b > rank_a {
rank_b
} else {
rank_a + 1
}
};
self.redirect_root(new_rank, redirected, new_root, new_value);
} else if rank_a > rank_b {
@ -286,121 +389,87 @@ impl<K: UnifyKey> UnificationTable<K> {
}
}
fn redirect_root(&mut self,
new_rank: u32,
old_root_key: K,
new_root_key: K,
new_value: K::Value) {
let sibling = self.value(new_root_key).child(new_root_key)
.unwrap_or(old_root_key);
/// Internal method to redirect `old_root_key` (which is currently
/// a root) to a child of `new_root_key` (which will remain a
/// root). The rank and value of `new_root_key` will be updated to
/// `new_rank` and `new_value` respectively.
fn redirect_root(
&mut self,
new_rank: u32,
old_root_key: S::Key,
new_root_key: S::Key,
new_value: S::Value,
) {
self.update_value(old_root_key, |old_root_value| {
old_root_value.redirect(new_root_key, sibling);
old_root_value.redirect(new_root_key);
});
self.update_value(new_root_key, |new_root_value| {
new_root_value.root(new_rank, old_root_key, new_value);
new_root_value.root(new_rank, new_value);
});
}
}
impl<K: UnifyKey> sv::SnapshotVecDelegate for Delegate<K> {
type Value = VarValue<K>;
type Undo = ();
fn reverse(_: &mut Vec<VarValue<K>>, _: ()) {}
}
/// ////////////////////////////////////////////////////////////////////////
/// Iterator over keys that have been unioned together
pub struct UnionedKeys<'a, K>
where K: UnifyKey + 'a,
K::Value: 'a
{
table: &'a mut UnificationTable<K>,
stack: Vec<K>,
}
impl<'a, K> UnionedKeys<'a, K>
where K: UnifyKey,
K::Value: 'a
{
fn var_value(&self, key: K) -> VarValue<K> {
self.table.value(key).clone()
}
}
impl<'a, K: 'a> Iterator for UnionedKeys<'a, K>
where K: UnifyKey,
K::Value: 'a
{
type Item = K;
fn next(&mut self) -> Option<K> {
let key = match self.stack.last() {
Some(k) => *k,
None => {
return None;
}
};
let vv = self.var_value(key);
match vv.child(key) {
Some(child_key) => {
self.stack.push(child_key);
}
None => {
// No child, push a sibling for the current node. If
// current node has no siblings, start popping
// ancestors until we find an aunt or uncle or
// something to push. Note that we have the invariant
// that for every node N that we reach by popping
// items off of the stack, we have already visited all
// children of N.
while let Some(ancestor_key) = self.stack.pop() {
let ancestor_vv = self.var_value(ancestor_key);
match ancestor_vv.sibling(ancestor_key) {
Some(sibling) => {
self.stack.push(sibling);
break;
}
None => {}
}
}
}
}
Some(key)
}
}
/// ////////////////////////////////////////////////////////////////////////
/// Public API
impl<'tcx, K, V> UnificationTable<K>
where K: UnifyKey<Value = V>,
V: UnifyValue,
impl<'tcx, S, K, V> UnificationTable<S>
where
S: UnificationStore<Key = K, Value = V>,
K: UnifyKey<Value = V>,
V: UnifyValue,
{
/// Unions two keys without the possibility of failure; only
/// applicable to InfallibleUnifyValue.
pub fn union(&mut self, a_id: K, b_id: K)
where V: InfallibleUnifyValue
/// applicable when unify values use `NoError` as their error
/// type.
pub fn union<K1, K2>(&mut self, a_id: K1, b_id: K2)
where
K1: Into<K>,
K2: Into<K>,
V: UnifyValue<Error = NoError>,
{
self.unify_var_var(a_id, b_id).unwrap();
}
/// Unions a key and a value without the possibility of failure;
/// only applicable when unify values use `NoError` as their error
/// type.
pub fn union_value<K1>(&mut self, id: K1, value: V)
where
K1: Into<K>,
V: UnifyValue<Error = NoError>,
{
self.unify_var_value(id, value).unwrap();
}
/// Given two keys, indicates whether they have been unioned together.
pub fn unioned(&mut self, a_id: K, b_id: K) -> bool {
pub fn unioned<K1, K2>(&mut self, a_id: K1, b_id: K2) -> bool
where
K1: Into<K>,
K2: Into<K>,
{
self.find(a_id) == self.find(b_id)
}
/// Given a key, returns the (current) root key.
pub fn find(&mut self, id: K) -> K {
pub fn find<K1>(&mut self, id: K1) -> K
where
K1: Into<K>,
{
let id = id.into();
self.get_root_key(id)
}
pub fn unify_var_var(&mut self, a_id: K, b_id: K) -> Result<(), (V, V)> {
/// Unions together two variables, merging their values. If
/// merging the values fails, the error is propagated and this
/// method has no effect.
pub fn unify_var_var<K1, K2>(&mut self, a_id: K1, b_id: K2) -> Result<(), V::Error>
where
K1: Into<K>,
K2: Into<K>,
{
let a_id = a_id.into();
let b_id = b_id.into();
let root_a = self.get_root_key(a_id);
let root_b = self.get_root_key(b_id);
@ -408,21 +477,31 @@ impl<'tcx, K, V> UnificationTable<K>
return Ok(());
}
let combined = try!(V::unify_values(&self.value(root_a).value, &self.value(root_b).value));
let combined = V::unify_values(&self.value(root_a).value, &self.value(root_b).value)?;
Ok(self.unify_roots(root_a, root_b, combined))
}
/// Sets the value of the key `a_id` to `b`, attempting to merge
/// with the previous value.
pub fn unify_var_value(&mut self, a_id: K, b: V) -> Result<(), (V, V)> {
pub fn unify_var_value<K1>(&mut self, a_id: K1, b: V) -> Result<(), V::Error>
where
K1: Into<K>,
{
let a_id = a_id.into();
let root_a = self.get_root_key(a_id);
let value = try!(V::unify_values(&self.value(root_a).value, &b));
let value = V::unify_values(&self.value(root_a).value, &b)?;
self.update_value(root_a, |node| node.value = value);
Ok(())
}
pub fn probe_value(&mut self, id: K) -> V {
/// Returns the current value for the given key. If the key has
/// been union'd, this will give the value from the current root.
pub fn probe_value<K1>(&mut self, id: K1) -> V
where
K1: Into<K>,
{
let id = id.into();
let id = self.get_root_key(id);
self.value(id).value.clone()
}
@ -432,28 +511,27 @@ impl<'tcx, K, V> UnificationTable<K>
///////////////////////////////////////////////////////////////////////////
impl UnifyValue for () {
fn unify_values(_: &(), _: &()) -> Result<(), ((), ())> {
type Error = NoError;
fn unify_values(_: &(), _: &()) -> Result<(), NoError> {
Ok(())
}
}
impl InfallibleUnifyValue for () {
}
impl<V: UnifyValue> UnifyValue for Option<V> {
fn unify_values(a: &Option<V>, b: &Option<V>) -> Result<Self, (Self, Self)> {
type Error = V::Error;
fn unify_values(a: &Option<V>, b: &Option<V>) -> Result<Self, V::Error> {
match (a, b) {
(&None, &None) => Ok(None),
(&Some(ref v), &None) | (&None, &Some(ref v)) => Ok(Some(v.clone())),
(&Some(ref v), &None) |
(&None, &Some(ref v)) => Ok(Some(v.clone())),
(&Some(ref a), &Some(ref b)) => {
match V::unify_values(a, b) {
Ok(v) => Ok(Some(v)),
Err((a, b)) => Err((Some(a), Some(b))),
Err(err) => Err(err),
}
}
}
}
}
impl<V: InfallibleUnifyValue> InfallibleUnifyValue for Option<V> {
}

457
third_party/rust/ena/src/unify/tests.rs поставляемый
Просмотреть файл

@ -8,13 +8,19 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#[cfg(feature = "unstable")]
// Naming the benchmarks using uppercase letters helps them sort
// better.
#![allow(non_snake_case)]
#[cfg(feature = "bench")]
extern crate test;
#[cfg(feature = "unstable")]
#[cfg(feature = "bench")]
use self::test::Bencher;
use std::collections::HashSet;
use std::cmp;
use unify::{UnifyKey, UnifyValue, UnificationTable, InfallibleUnifyValue};
use unify::{NoError, InPlace, InPlaceUnificationTable, UnifyKey, EqUnifyValue, UnifyValue};
use unify::{UnificationStore, UnificationTable};
#[cfg(feature = "persistent")]
use unify::Persistent;
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
struct UnitKey(u32);
@ -32,41 +38,61 @@ impl UnifyKey for UnitKey {
}
}
macro_rules! all_modes {
($name:ident for $t:ty => $body:tt) => {
fn test_body<$name: UnificationStore<Key = $t, Value = <$t as UnifyKey>::Value>>() {
$body
}
test_body::<InPlace<$t>>();
#[cfg(feature = "persistent")]
test_body::<Persistent<$t>>();
}
}
#[test]
fn basic() {
let mut ut: UnificationTable<UnitKey> = UnificationTable::new();
let k1 = ut.new_key(());
let k2 = ut.new_key(());
assert_eq!(ut.unioned(k1, k2), false);
ut.union(k1, k2);
assert_eq!(ut.unioned(k1, k2), true);
all_modes! {
S for UnitKey => {
let mut ut: UnificationTable<S> = UnificationTable::new();
let k1 = ut.new_key(());
let k2 = ut.new_key(());
assert_eq!(ut.unioned(k1, k2), false);
ut.union(k1, k2);
assert_eq!(ut.unioned(k1, k2), true);
}
}
}
#[test]
fn big_array() {
let mut ut: UnificationTable<UnitKey> = UnificationTable::new();
let mut keys = Vec::new();
const MAX: usize = 1 << 15;
all_modes! {
S for UnitKey => {
let mut ut: UnificationTable<S> = UnificationTable::new();
let mut keys = Vec::new();
const MAX: usize = 1 << 15;
for _ in 0..MAX {
keys.push(ut.new_key(()));
}
for _ in 0..MAX {
keys.push(ut.new_key(()));
}
for i in 1..MAX {
let l = keys[i - 1];
let r = keys[i];
ut.union(l, r);
}
for i in 1..MAX {
let l = keys[i - 1];
let r = keys[i];
ut.union(l, r);
}
for i in 0..MAX {
assert!(ut.unioned(keys[0], keys[i]));
for i in 0..MAX {
assert!(ut.unioned(keys[0], keys[i]));
}
}
}
}
#[cfg(feature = "unstable")]
#[bench]
fn big_array_bench(b: &mut Bencher) {
let mut ut: UnificationTable<UnitKey> = UnificationTable::new();
#[cfg(feature = "bench")]
fn big_array_bench_generic<S: UnificationStore<Key=UnitKey, Value=()>>(b: &mut Bencher) {
let mut ut: UnificationTable<S> = UnificationTable::new();
let mut keys = Vec::new();
const MAX: usize = 1 << 15;
@ -87,52 +113,122 @@ fn big_array_bench(b: &mut Bencher) {
})
}
#[test]
fn even_odd() {
let mut ut: UnificationTable<UnitKey> = UnificationTable::new();
#[cfg(feature = "bench")]
#[bench]
fn big_array_bench_InPlace(b: &mut Bencher) {
big_array_bench_generic::<InPlace<UnitKey>>(b);
}
#[cfg(all(feature = "bench", feature = "persistent"))]
#[bench]
fn big_array_bench_Persistent(b: &mut Bencher) {
big_array_bench_generic::<Persistent<UnitKey>>(b);
}
#[cfg(feature = "bench")]
fn big_array_bench_in_snapshot_generic<S: UnificationStore<Key=UnitKey, Value=()>>(b: &mut Bencher) {
let mut ut: UnificationTable<S> = UnificationTable::new();
let mut keys = Vec::new();
const MAX: usize = 1 << 10;
const MAX: usize = 1 << 15;
for i in 0..MAX {
let key = ut.new_key(());
keys.push(key);
for _ in 0..MAX {
keys.push(ut.new_key(()));
}
if i >= 2 {
ut.union(key, keys[i - 2]);
b.iter(|| {
let snapshot = ut.snapshot();
for i in 1..MAX {
let l = keys[i - 1];
let r = keys[i];
ut.union(l, r);
}
for i in 0..MAX {
assert!(ut.unioned(keys[0], keys[i]));
}
ut.rollback_to(snapshot);
})
}
#[cfg(feature = "bench")]
#[bench]
fn big_array_bench_in_snapshot_InPlace(b: &mut Bencher) {
big_array_bench_in_snapshot_generic::<InPlace<UnitKey>>(b);
}
#[cfg(all(feature = "bench", feature = "persistent"))]
#[bench]
fn big_array_bench_in_snapshot_Persistent(b: &mut Bencher) {
big_array_bench_in_snapshot_generic::<Persistent<UnitKey>>(b);
}
#[cfg(feature = "bench")]
fn big_array_bench_clone_generic<S: UnificationStore<Key=UnitKey, Value=()>>(b: &mut Bencher) {
let mut ut: UnificationTable<S> = UnificationTable::new();
let mut keys = Vec::new();
const MAX: usize = 1 << 15;
for _ in 0..MAX {
keys.push(ut.new_key(()));
}
for i in 1..MAX {
assert!(!ut.unioned(keys[i - 1], keys[i]));
}
b.iter(|| {
let saved_table = ut.clone();
for i in 2..MAX {
assert!(ut.unioned(keys[i - 2], keys[i]));
}
for i in 1..MAX {
let l = keys[i - 1];
let r = keys[i];
ut.union(l, r);
}
for i in 0..MAX {
assert!(ut.unioned(keys[0], keys[i]));
}
ut = saved_table;
})
}
#[cfg(feature = "bench")]
#[bench]
fn big_array_bench_clone_InPlace(b: &mut Bencher) {
big_array_bench_clone_generic::<InPlace<UnitKey>>(b);
}
#[cfg(all(feature = "bench", feature = "persistent"))]
#[bench]
fn big_array_bench_clone_Persistent(b: &mut Bencher) {
big_array_bench_clone_generic::<Persistent<UnitKey>>(b);
}
#[test]
fn even_odd_iter() {
let mut ut: UnificationTable<UnitKey> = UnificationTable::new();
let mut keys = Vec::new();
const MAX: usize = 1 << 10;
fn even_odd() {
all_modes! {
S for UnitKey => {
let mut ut: UnificationTable<S> = UnificationTable::new();
let mut keys = Vec::new();
const MAX: usize = 1 << 10;
for i in 0..MAX {
let key = ut.new_key(());
keys.push(key);
for i in 0..MAX {
let key = ut.new_key(());
keys.push(key);
if i >= 2 {
ut.union(key, keys[i - 2]);
if i >= 2 {
ut.union(key, keys[i - 2]);
}
}
for i in 1..MAX {
assert!(!ut.unioned(keys[i - 1], keys[i]));
}
for i in 2..MAX {
assert!(ut.unioned(keys[i - 2], keys[i]));
}
}
}
let even_keys: HashSet<UnitKey> = ut.unioned_keys(keys[22]).collect();
assert_eq!(even_keys.len(), MAX / 2);
for key in even_keys {
assert!((key.0 & 1) == 0);
}
}
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
@ -151,89 +247,109 @@ impl UnifyKey for IntKey {
}
}
impl UnifyValue for i32 {
fn unify_values(&a: &i32, &b: &i32) -> Result<Self, (Self, Self)> {
if a == b {
Ok(a)
} else {
Err((a, b))
impl EqUnifyValue for i32 {}
#[test]
fn unify_same_int_twice() {
all_modes! {
S for IntKey => {
let mut ut: UnificationTable<S> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert!(ut.unify_var_value(k2, Some(22)).is_ok());
assert!(ut.unify_var_var(k1, k2).is_ok());
assert_eq!(ut.probe_value(k1), Some(22));
}
}
}
#[test]
fn unify_same_int_twice() {
let mut ut: UnificationTable<IntKey> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert!(ut.unify_var_value(k2, Some(22)).is_ok());
assert!(ut.unify_var_var(k1, k2).is_ok());
assert_eq!(ut.probe_value(k1), Some(22));
}
#[test]
fn unify_vars_then_int_indirect() {
let mut ut: UnificationTable<IntKey> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
assert!(ut.unify_var_var(k1, k2).is_ok());
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert_eq!(ut.probe_value(k2), Some(22));
all_modes! {
S for IntKey => {
let mut ut: UnificationTable<S> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
assert!(ut.unify_var_var(k1, k2).is_ok());
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert_eq!(ut.probe_value(k2), Some(22));
}
}
}
#[test]
fn unify_vars_different_ints_1() {
let mut ut: UnificationTable<IntKey> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
assert!(ut.unify_var_var(k1, k2).is_ok());
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert!(ut.unify_var_value(k2, Some(23)).is_err());
all_modes! {
S for IntKey => {
let mut ut: UnificationTable<S> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
assert!(ut.unify_var_var(k1, k2).is_ok());
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert!(ut.unify_var_value(k2, Some(23)).is_err());
}
}
}
#[test]
fn unify_vars_different_ints_2() {
let mut ut: UnificationTable<IntKey> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
assert!(ut.unify_var_var(k2, k1).is_ok());
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert!(ut.unify_var_value(k2, Some(23)).is_err());
all_modes! {
S for IntKey => {
let mut ut: UnificationTable<S> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
assert!(ut.unify_var_var(k2, k1).is_ok());
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert!(ut.unify_var_value(k2, Some(23)).is_err());
}
}
}
#[test]
fn unify_distinct_ints_then_vars() {
let mut ut: UnificationTable<IntKey> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert!(ut.unify_var_value(k2, Some(23)).is_ok());
assert!(ut.unify_var_var(k2, k1).is_err());
all_modes! {
S for IntKey => {
let mut ut: UnificationTable<S> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert!(ut.unify_var_value(k2, Some(23)).is_ok());
assert!(ut.unify_var_var(k2, k1).is_err());
}
}
}
#[test]
fn unify_root_value_1() {
let mut ut: UnificationTable<IntKey> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
let k3 = ut.new_key(None);
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert!(ut.unify_var_var(k1, k2).is_ok());
assert!(ut.unify_var_value(k3, Some(23)).is_ok());
assert!(ut.unify_var_var(k1, k3).is_err());
all_modes! {
S for IntKey => {
let mut ut: UnificationTable<S> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
let k3 = ut.new_key(None);
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert!(ut.unify_var_var(k1, k2).is_ok());
assert!(ut.unify_var_value(k3, Some(23)).is_ok());
assert!(ut.unify_var_var(k1, k3).is_err());
}
}
}
#[test]
fn unify_root_value_2() {
let mut ut: UnificationTable<IntKey> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
let k3 = ut.new_key(None);
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert!(ut.unify_var_var(k2, k1).is_ok());
assert!(ut.unify_var_value(k3, Some(23)).is_ok());
assert!(ut.unify_var_var(k1, k3).is_err());
all_modes! {
S for IntKey => {
let mut ut: UnificationTable<S> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
let k3 = ut.new_key(None);
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert!(ut.unify_var_var(k2, k1).is_ok());
assert!(ut.unify_var_value(k3, Some(23)).is_ok());
assert!(ut.unify_var_var(k1, k3).is_err());
}
}
}
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
@ -253,9 +369,12 @@ impl UnifyKey for OrderedKey {
fn tag() -> &'static str {
"OrderedKey"
}
fn order_roots(a: OrderedKey, a_rank: &OrderedRank,
b: OrderedKey, b_rank: &OrderedRank)
-> Option<(OrderedKey, OrderedKey)> {
fn order_roots(
a: OrderedKey,
a_rank: &OrderedRank,
b: OrderedKey,
b_rank: &OrderedRank,
) -> Option<(OrderedKey, OrderedKey)> {
println!("{:?} vs {:?}", a_rank, b_rank);
if a_rank > b_rank {
Some((a, b))
@ -268,66 +387,90 @@ impl UnifyKey for OrderedKey {
}
impl UnifyValue for OrderedRank {
fn unify_values(value1: &Self, value2: &Self) -> Result<Self, (Self, Self)> {
type Error = NoError;
fn unify_values(value1: &Self, value2: &Self) -> Result<Self, NoError> {
Ok(OrderedRank(cmp::max(value1.0, value2.0)))
}
}
impl InfallibleUnifyValue for OrderedRank { }
#[test]
fn ordered_key() {
let mut ut: UnificationTable<OrderedKey> = UnificationTable::new();
all_modes! {
S for OrderedKey => {
let mut ut: UnificationTable<S> = UnificationTable::new();
let k0_1 = ut.new_key(OrderedRank(0));
let k0_2 = ut.new_key(OrderedRank(0));
let k0_3 = ut.new_key(OrderedRank(0));
let k0_4 = ut.new_key(OrderedRank(0));
let k0_1 = ut.new_key(OrderedRank(0));
let k0_2 = ut.new_key(OrderedRank(0));
let k0_3 = ut.new_key(OrderedRank(0));
let k0_4 = ut.new_key(OrderedRank(0));
ut.union(k0_1, k0_2); // rank of one of those will now be 1
ut.union(k0_3, k0_4); // rank of new root also 1
ut.union(k0_1, k0_3); // rank of new root now 2
ut.union(k0_1, k0_2); // rank of one of those will now be 1
ut.union(k0_3, k0_4); // rank of new root also 1
ut.union(k0_1, k0_3); // rank of new root now 2
let k0_5 = ut.new_key(OrderedRank(0));
let k0_6 = ut.new_key(OrderedRank(0));
ut.union(k0_5, k0_6); // rank of new root now 1
let k0_5 = ut.new_key(OrderedRank(0));
let k0_6 = ut.new_key(OrderedRank(0));
ut.union(k0_5, k0_6); // rank of new root now 1
ut.union(k0_1, k0_5); // new root rank 2, should not be k0_5 or k0_6
assert!(vec![k0_1, k0_2, k0_3, k0_4].contains(&ut.find(k0_1)));
ut.union(k0_1, k0_5); // new root rank 2, should not be k0_5 or k0_6
assert!(vec![k0_1, k0_2, k0_3, k0_4].contains(&ut.find(k0_1)));
}
}
}
#[test]
fn ordered_key_k1() {
let mut ut: UnificationTable<OrderedKey> = UnificationTable::new();
all_modes! {
S for UnitKey => {
let mut ut: InPlaceUnificationTable<OrderedKey> = UnificationTable::new();
let k0_1 = ut.new_key(OrderedRank(0));
let k0_2 = ut.new_key(OrderedRank(0));
let k0_3 = ut.new_key(OrderedRank(0));
let k0_4 = ut.new_key(OrderedRank(0));
let k0_1 = ut.new_key(OrderedRank(0));
let k0_2 = ut.new_key(OrderedRank(0));
let k0_3 = ut.new_key(OrderedRank(0));
let k0_4 = ut.new_key(OrderedRank(0));
ut.union(k0_1, k0_2); // rank of one of those will now be 1
ut.union(k0_3, k0_4); // rank of new root also 1
ut.union(k0_1, k0_3); // rank of new root now 2
ut.union(k0_1, k0_2); // rank of one of those will now be 1
ut.union(k0_3, k0_4); // rank of new root also 1
ut.union(k0_1, k0_3); // rank of new root now 2
let k1_5 = ut.new_key(OrderedRank(1));
let k1_6 = ut.new_key(OrderedRank(1));
ut.union(k1_5, k1_6); // rank of new root now 1
let k1_5 = ut.new_key(OrderedRank(1));
let k1_6 = ut.new_key(OrderedRank(1));
ut.union(k1_5, k1_6); // rank of new root now 1
ut.union(k0_1, k1_5); // even though k1 has lower rank, it wins
assert!(vec![k1_5, k1_6].contains(&ut.find(k0_1)),
"unexpected choice for root: {:?}", ut.find(k0_1));
ut.union(k0_1, k1_5); // even though k1 has lower rank, it wins
assert!(
vec![k1_5, k1_6].contains(&ut.find(k0_1)),
"unexpected choice for root: {:?}",
ut.find(k0_1)
);
}
}
}
/// Test that we *can* clone.
#[test]
fn clone_table() {
let mut ut: UnificationTable<IntKey> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert!(ut.unify_var_value(k2, Some(22)).is_ok());
assert!(ut.unify_var_var(k1, k2).is_ok());
all_modes! {
S for IntKey => {
let mut ut: UnificationTable<S> = UnificationTable::new();
let k1 = ut.new_key(None);
let k2 = ut.new_key(None);
let k3 = ut.new_key(None);
assert!(ut.unify_var_value(k1, Some(22)).is_ok());
assert!(ut.unify_var_value(k2, Some(22)).is_ok());
assert!(ut.unify_var_var(k1, k2).is_ok());
assert_eq!(ut.probe_value(k3), None);
let mut ut1 = ut.clone();
assert_eq!(ut1.probe_value(k1), Some(22));
let mut ut1 = ut.clone();
assert_eq!(ut1.probe_value(k1), Some(22));
assert_eq!(ut1.probe_value(k3), None);
assert!(ut.unify_var_value(k3, Some(44)).is_ok());
assert_eq!(ut1.probe_value(k1), Some(22));
assert_eq!(ut1.probe_value(k3), None);
assert_eq!(ut.probe_value(k3), Some(44));
}
}
}

1
third_party/rust/fake-simd/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"Cargo.toml":"c63db0226f9aac6e001898735c81392b8f01dfc8b7245f37e290990562c3c0d8","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"52232c2cee3bb7d8cabe47ef367f1bf8bb607c22bdfca0219d6156cb7f446e9d","src/lib.rs":"2cd66d61acfb96f3425194c12695d8e55cf56c6fbd02de90033c45bdcc338c1a"},"package":"e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed"}

9
third_party/rust/fake-simd/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,9 @@
[package]
name = "fake-simd"
version = "0.1.2"
authors = ["The Rust-Crypto Project Developers"]
license = "MIT/Apache-2.0"
description = "Crate for mimicking simd crate on stable Rust"
documentation = "https://docs.rs/fake-simd"
repository = "https://github.com/RustCrypto/utils"
keywords = ["simd"]

201
third_party/rust/fake-simd/LICENSE-APACHE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

26
third_party/rust/fake-simd/LICENSE-MIT поставляемый Normal file
Просмотреть файл

@ -0,0 +1,26 @@
Copyright (c) 2006-2009 Graydon Hoare
Copyright (c) 2009-2013 Mozilla Foundation
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

108
third_party/rust/fake-simd/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,108 @@
#![no_std]
use core::ops::{Add, BitAnd, BitOr, BitXor, Shl, Shr, Sub};
#[derive(Clone, Copy, PartialEq, Eq)]
#[allow(non_camel_case_types)]
pub struct u32x4(pub u32, pub u32, pub u32, pub u32);
impl Add for u32x4 {
type Output = u32x4;
#[inline(always)]
fn add(self, rhs: u32x4) -> u32x4 {
u32x4(
self.0.wrapping_add(rhs.0),
self.1.wrapping_add(rhs.1),
self.2.wrapping_add(rhs.2),
self.3.wrapping_add(rhs.3))
}
}
impl Sub for u32x4 {
type Output = u32x4;
#[inline(always)]
fn sub(self, rhs: u32x4) -> u32x4 {
u32x4(
self.0.wrapping_sub(rhs.0),
self.1.wrapping_sub(rhs.1),
self.2.wrapping_sub(rhs.2),
self.3.wrapping_sub(rhs.3))
}
}
impl BitAnd for u32x4 {
type Output = u32x4;
#[inline(always)]
fn bitand(self, rhs: u32x4) -> u32x4 {
u32x4(self.0 & rhs.0, self.1 & rhs.1, self.2 & rhs.2, self.3 & rhs.3)
}
}
impl BitOr for u32x4 {
type Output = u32x4;
#[inline(always)]
fn bitor(self, rhs: u32x4) -> u32x4 {
u32x4(self.0 | rhs.0, self.1 | rhs.1, self.2 | rhs.2, self.3 | rhs.3)
}
}
impl BitXor for u32x4 {
type Output = u32x4;
#[inline(always)]
fn bitxor(self, rhs: u32x4) -> u32x4 {
u32x4(self.0 ^ rhs.0, self.1 ^ rhs.1, self.2 ^ rhs.2, self.3 ^ rhs.3)
}
}
impl Shl<usize> for u32x4 {
type Output = u32x4;
#[inline(always)]
fn shl(self, amt: usize) -> u32x4 {
u32x4(self.0 << amt, self.1 << amt, self.2 << amt, self.3 << amt)
}
}
impl Shl<u32x4> for u32x4 {
type Output = u32x4;
#[inline(always)]
fn shl(self, rhs: u32x4) -> u32x4 {
u32x4(self.0 << rhs.0, self.1 << rhs.1, self.2 << rhs.2, self.3 << rhs.3)
}
}
impl Shr<usize> for u32x4 {
type Output = u32x4;
#[inline(always)]
fn shr(self, amt: usize) -> u32x4 {
u32x4(self.0 >> amt, self.1 >> amt, self.2 >> amt, self.3 >> amt)
}
}
impl Shr<u32x4> for u32x4 {
type Output = u32x4;
#[inline(always)]
fn shr(self, rhs: u32x4) -> u32x4 {
u32x4(self.0 >> rhs.0, self.1 >> rhs.1, self.2 >> rhs.2, self.3 >> rhs.3)
}
}
#[derive(Clone, Copy)]
#[allow(non_camel_case_types)]
pub struct u64x2(pub u64, pub u64);
impl Add for u64x2 {
type Output = u64x2;
#[inline(always)]
fn add(self, rhs: u64x2) -> u64x2 {
u64x2(self.0.wrapping_add(rhs.0), self.1.wrapping_add(rhs.1))
}
}

1
third_party/rust/generic-array/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{".travis.yml":"e54c4d5b57dd91d875a36d2d57d68fef9c14bb0c31481f2e18877edf040f8676","Cargo.toml":"87ff65d640c137c26d338f96e21e769af1e1b2e7fa615b40a1bcc755448bb118","LICENSE":"ad4fcfaf8d5b12b97409c137a03d4a4e4b21024c65c54f976cc3b609c1bd5b0f","README.md":"9a1a45416eac57050036b13df6ec84d21d555e820726af3c782896bd9d37d94b","rustfmt.toml":"2a298b4ce1fe6e16b8f281a0035567b8eb15042ed3062729fd28224f29c2f75a","src/arr.rs":"cc1ea0a9ef6a524b90767cc8a89f6b939394a2948a645ed313c0bf5ce5a258a4","src/hex.rs":"bfbf304fb4dea6f7edc0569b38bf2ac7657ce089c5761891321722509e3b5076","src/impl_serde.rs":"805885478728b3c205b842d46deb377b7dd6dd4c4c50254064431f49f0981a2a","src/impls.rs":"8c54e294a82a2bf344bdcb9949b8a84903fb65698d6b1b1e0ab9f5e7847be64f","src/iter.rs":"e52217f04d0dc046f13ef2e3539b90eabd4d55bb85cf40f76ba0bf86d5e55ef0","src/lib.rs":"da93fa505eee94b40fce0fe98e26ed3bb4d2bc4d4869af01598b6e54fc9c0f8d","tests/hex.rs":"e909bc0564e7d52c5fcf172dfc0fac7085010c6a21d38581bf73a54ab2e256e1","tests/import_name.rs":"1235729ecbde47fc9a38b3bf35c750a53ed55e3cf967c9d2b24fd759dc9e9e0c","tests/mod.rs":"f4100c5338906c038636f98f4d2b3d272f59580662afa89d915eafb96d7bbcf9"},"package":"ef25c5683767570c2bbd7deba372926a55eaae9982d7726ee2a1050239d45b9d"}

18
third_party/rust/generic-array/.travis.yml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,18 @@
language: rust
script:
- cd $TRAVIS_BUILD_DIR
- cargo build
- cargo test
- cargo build --features serde
- cargo test --features serde
after_success: |-
[ $TRAVIS_BRANCH = master ] &&
[ $TRAVIS_PULL_REQUEST = false ] &&
cargo doc &&
echo "<meta http-equiv=refresh content=0;url=`echo $TRAVIS_REPO_SLUG | cut -d '/' -f 2`/index.html>" > target/doc/index.html &&
sudo pip install ghp-import &&
ghp-import -n target/doc &&
git push -fq https://${GH_TOKEN}@github.com/${TRAVIS_REPO_SLUG}.git gh-pages
env:
global:
secure: te+DVowxg7YWHJHKRE2eEKEg5lK8IwK4aeKZ6rsDMaTcQFzP+jzSYJiodVuDMXy45sfDMCnkWmVmpfXFI5tCLBSqTDXXOZ0UpE2f4fI0d3inH6McEoXNM43HNZqvEWj6Uc4PzTSzkywcAhg39I08PRbp5zzdj+UhB0Ty++Twwjpipr2KQMNmu9RZEwPtbyjqE69yXkDWy1oM3o51uPnpK0RUH+ZE+B0StTG6CMzVY3gW+kQX96Ow+LYkhgn/YjfubVvKO7QHz8Nd1hOxg78tn1ZTHIazN7p3bJejpsZoU92cNCcx1xM0vV/rXNN1pLxzJOBxNC9tU9FNJAaLsg5kAVGZi8Xvu62nUmkpzki71/nilHBAUxJHGIyv0H52p4DyITEN8NzR5WkqN4qBv814Dpvna1Ua3TPqiYWP/LBb+xM27DuPHKuOifePNWehE84qhQMPgArQyiNCgfKaKbaiFO+J4jiUfEV/1aztuEFyHftLoRYstmHfMkhwYHfSf683QGjlqqoL3SFClp1sKAp8WO5b5ZasT9fOGaqPWi8g28/ZGIu67wocT/hJvXxwozAycsXV36JVHs1ab/ujRYMUbcnObx8E5taKLKhWn2jYWsrJ99bUag7F6wTz1erG0eboScTD8QgVY7Zfvz0Eh1MfePOhEJGZfETR80BypC9fZhY=

32
third_party/rust/generic-array/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,32 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g. crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "generic-array"
version = "0.9.0"
authors = ["Bartłomiej Kamiński <fizyk20@gmail.com>"]
description = "Generic types implementing functionality of arrays"
documentation = "http://fizyk20.github.io/generic-array/generic_array/"
license = "MIT"
repository = "https://github.com/fizyk20/generic-array.git"
[lib]
name = "generic_array"
[dependencies.typenum]
version = "1.9"
[dependencies.serde]
version = "1.0"
optional = true
default-features = false
[dev-dependencies.serde_json]
version = "1.0"

21
third_party/rust/generic-array/LICENSE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Bartłomiej Kamiński
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

34
third_party/rust/generic-array/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,34 @@
[![Crates.io](https://img.shields.io/crates/v/generic-array.svg)](https://crates.io/crates/generic-array)
[![Build Status](https://travis-ci.org/fizyk20/generic-array.svg?branch=master)](https://travis-ci.org/fizyk20/generic-array)
# generic-array
This crate implements generic array types for Rust.
[Documentation](http://fizyk20.github.io/generic-array/generic_array/)
## Usage
The Rust arrays `[T; N]` are problematic in that they can't be used generically with respect to `N`, so for example this won't work:
```rust
struct Foo<N> {
data: [i32; N]
}
```
**generic-array** defines a new trait `ArrayLength<T>` and a struct `GenericArray<T, N: ArrayLength<T>>`, which let the above be implemented as:
```rust
struct Foo<N: ArrayLength<i32>> {
data: GenericArray<i32, N>
}
```
To actually define a type implementing `ArrayLength`, you can use unsigned integer types defined in [typenum](https://github.com/paholg/typenum) crate - for example, `GenericArray<T, U5>` would work almost like `[T; 5]` :)
In version 0.1.1 an `arr!` macro was introduced, allowing for creation of arrays as shown below:
```rust
let array = arr![u32; 1, 2, 3];
assert_eq!(array[2], 3);
```

3
third_party/rust/generic-array/rustfmt.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,3 @@
reorder_imports = true
reorder_imported_names = true
use_try_shorthand = true

57
third_party/rust/generic-array/src/arr.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,57 @@
//! Implementation for `arr!` macro.
use super::ArrayLength;
use core::ops::Add;
use typenum::U1;
/// Helper trait for `arr!` macro
pub trait AddLength<T, N: ArrayLength<T>>: ArrayLength<T> {
/// Resulting length
type Output: ArrayLength<T>;
}
impl<T, N1, N2> AddLength<T, N2> for N1
where
N1: ArrayLength<T> + Add<N2>,
N2: ArrayLength<T>,
<N1 as Add<N2>>::Output: ArrayLength<T>,
{
type Output = <N1 as Add<N2>>::Output;
}
/// Helper type for `arr!` macro
pub type Inc<T, U> = <U as AddLength<T, U1>>::Output;
#[doc(hidden)]
#[macro_export]
macro_rules! arr_impl {
($T:ty; $N:ty, [$($x:expr),*], []) => ({
unsafe { $crate::transmute::<_, $crate::GenericArray<$T, $N>>([$($x),*]) }
});
($T:ty; $N:ty, [], [$x1:expr]) => (
arr_impl!($T; $crate::arr::Inc<$T, $N>, [$x1 as $T], [])
);
($T:ty; $N:ty, [], [$x1:expr, $($x:expr),+]) => (
arr_impl!($T; $crate::arr::Inc<$T, $N>, [$x1 as $T], [$($x),*])
);
($T:ty; $N:ty, [$($y:expr),+], [$x1:expr]) => (
arr_impl!($T; $crate::arr::Inc<$T, $N>, [$($y),*, $x1 as $T], [])
);
($T:ty; $N:ty, [$($y:expr),+], [$x1:expr, $($x:expr),+]) => (
arr_impl!($T; $crate::arr::Inc<$T, $N>, [$($y),*, $x1 as $T], [$($x),*])
);
}
/// Macro allowing for easy generation of Generic Arrays.
/// Example: `let test = arr![u32; 1, 2, 3];`
#[macro_export]
macro_rules! arr {
($T:ty;) => ({
unsafe { $crate::transmute::<[$T; 0], $crate::GenericArray<$T, $crate::typenum::U0>>([]) }
});
($T:ty; $($x:expr),*) => (
arr_impl!($T; $crate::typenum::U0, [], [$($x),*])
);
($($x:expr,)+) => (arr![$($x),*]);
() => ("""Macro requires a type, e.g. `let array = arr![u32; 1, 2, 3];`")
}

101
third_party/rust/generic-array/src/hex.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,101 @@
//! Generic array are commonly used as a return value for hash digests, so
//! it's a good idea to allow to hexlify them easily. This module implements
//! `std::fmt::LowerHex` and `std::fmt::UpperHex` traits.
//!
//! Example:
//!
//! ```rust
//! # #[macro_use]
//! # extern crate generic_array;
//! # extern crate typenum;
//! # fn main() {
//! let array = arr![u8; 10, 20, 30];
//! assert_eq!(format!("{:x}", array), "0a141e");
//! # }
//! ```
//!
use {ArrayLength, GenericArray};
use core::fmt;
use core::ops::Add;
use core::str;
use typenum::*;
static LOWER_CHARS: &'static [u8] = b"0123456789abcdef";
static UPPER_CHARS: &'static [u8] = b"0123456789ABCDEF";
impl<T: ArrayLength<u8>> fmt::LowerHex for GenericArray<u8, T>
where
T: Add<T>,
<T as Add<T>>::Output: ArrayLength<u8>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let max_digits = f.precision().unwrap_or_else(|| self.len());
if T::to_usize() < 1024 {
// For small arrays use a stack allocated
// buffer of 2x number of bytes
let mut res = GenericArray::<u8, Sum<T, T>>::default();
for (i, c) in self.iter().take(max_digits).enumerate() {
res[i * 2] = LOWER_CHARS[(c >> 4) as usize];
res[i * 2 + 1] = LOWER_CHARS[(c & 0xF) as usize];
}
f.write_str(
unsafe { str::from_utf8_unchecked(&res[..max_digits * 2]) },
)?;
} else {
// For large array use chunks of up to 1024 bytes (2048 hex chars)
let mut buf = [0u8; 2048];
for chunk in self[..max_digits].chunks(1024) {
for (i, c) in chunk.iter().enumerate() {
buf[i * 2] = LOWER_CHARS[(c >> 4) as usize];
buf[i * 2 + 1] = LOWER_CHARS[(c & 0xF) as usize];
}
f.write_str(unsafe {
str::from_utf8_unchecked(&buf[..chunk.len() * 2])
})?;
}
}
Ok(())
}
}
impl<T: ArrayLength<u8>> fmt::UpperHex for GenericArray<u8, T>
where
T: Add<T>,
<T as Add<T>>::Output: ArrayLength<u8>,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let max_digits = f.precision().unwrap_or_else(|| self.len());
if T::to_usize() < 1024 {
// For small arrays use a stack allocated
// buffer of 2x number of bytes
let mut res = GenericArray::<u8, Sum<T, T>>::default();
for (i, c) in self.iter().take(max_digits).enumerate() {
res[i * 2] = UPPER_CHARS[(c >> 4) as usize];
res[i * 2 + 1] = UPPER_CHARS[(c & 0xF) as usize];
}
f.write_str(
unsafe { str::from_utf8_unchecked(&res[..max_digits * 2]) },
)?;
} else {
// For large array use chunks of up to 1024 bytes (2048 hex chars)
let mut buf = [0u8; 2048];
for chunk in self[..max_digits].chunks(1024) {
for (i, c) in chunk.iter().enumerate() {
buf[i * 2] = UPPER_CHARS[(c >> 4) as usize];
buf[i * 2 + 1] = UPPER_CHARS[(c & 0xF) as usize];
}
f.write_str(unsafe {
str::from_utf8_unchecked(&buf[..chunk.len() * 2])
})?;
}
}
Ok(())
}
}

68
third_party/rust/generic-array/src/impl_serde.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,68 @@
//! Serde serialization/deserialization implementation
use {ArrayLength, GenericArray};
use core::fmt;
use core::marker::PhantomData;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde::de::{self, SeqAccess, Visitor};
impl<T, N> Serialize for GenericArray<T, N>
where
T: Serialize,
N: ArrayLength<T>,
{
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.collect_seq(self.iter())
}
}
struct GAVisitor<T, N> {
_t: PhantomData<T>,
_n: PhantomData<N>,
}
impl<'de, T, N> Visitor<'de> for GAVisitor<T, N>
where
T: Deserialize<'de> + Default,
N: ArrayLength<T>,
{
type Value = GenericArray<T, N>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("struct GenericArray")
}
fn visit_seq<A>(self, mut seq: A) -> Result<GenericArray<T, N>, A::Error>
where
A: SeqAccess<'de>,
{
let mut result = GenericArray::default();
for i in 0..N::to_usize() {
result[i] = seq.next_element()?.ok_or_else(
|| de::Error::invalid_length(i, &self),
)?;
}
Ok(result)
}
}
impl<'de, T, N> Deserialize<'de> for GenericArray<T, N>
where
T: Deserialize<'de> + Default,
N: ArrayLength<T>,
{
fn deserialize<D>(deserializer: D) -> Result<GenericArray<T, N>, D::Error>
where
D: Deserializer<'de>,
{
let visitor = GAVisitor {
_t: PhantomData,
_n: PhantomData,
};
deserializer.deserialize_seq(visitor)
}
}

171
third_party/rust/generic-array/src/impls.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,171 @@
use super::{ArrayLength, GenericArray};
use core::borrow::{Borrow, BorrowMut};
use core::cmp::Ordering;
use core::fmt::{self, Debug};
use core::hash::{Hash, Hasher};
impl<T: Default, N> Default for GenericArray<T, N>
where
N: ArrayLength<T>,
{
#[inline]
fn default() -> Self {
Self::generate(|_| T::default())
}
}
impl<T: Clone, N> Clone for GenericArray<T, N>
where
N: ArrayLength<T>,
{
fn clone(&self) -> GenericArray<T, N> {
self.map_ref(|x| x.clone())
}
}
impl<T: Copy, N> Copy for GenericArray<T, N>
where
N: ArrayLength<T>,
N::ArrayType: Copy,
{
}
impl<T: PartialEq, N> PartialEq for GenericArray<T, N>
where
N: ArrayLength<T>,
{
fn eq(&self, other: &Self) -> bool {
**self == **other
}
}
impl<T: Eq, N> Eq for GenericArray<T, N>
where
N: ArrayLength<T>,
{
}
impl<T: PartialOrd, N> PartialOrd for GenericArray<T, N>
where
N: ArrayLength<T>,
{
fn partial_cmp(&self, other: &GenericArray<T, N>) -> Option<Ordering> {
PartialOrd::partial_cmp(self.as_slice(), other.as_slice())
}
}
impl<T: Ord, N> Ord for GenericArray<T, N>
where
N: ArrayLength<T>,
{
fn cmp(&self, other: &GenericArray<T, N>) -> Ordering {
Ord::cmp(self.as_slice(), other.as_slice())
}
}
impl<T: Debug, N> Debug for GenericArray<T, N>
where
N: ArrayLength<T>,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self[..].fmt(fmt)
}
}
impl<T, N> Borrow<[T]> for GenericArray<T, N>
where
N: ArrayLength<T>,
{
fn borrow(&self) -> &[T] {
&self[..]
}
}
impl<T, N> BorrowMut<[T]> for GenericArray<T, N>
where
N: ArrayLength<T>,
{
fn borrow_mut(&mut self) -> &mut [T] {
&mut self[..]
}
}
impl<T, N> AsRef<[T]> for GenericArray<T, N>
where
N: ArrayLength<T>,
{
fn as_ref(&self) -> &[T] {
&self[..]
}
}
impl<T, N> AsMut<[T]> for GenericArray<T, N>
where
N: ArrayLength<T>,
{
fn as_mut(&mut self) -> &mut [T] {
&mut self[..]
}
}
impl<T: Hash, N> Hash for GenericArray<T, N>
where
N: ArrayLength<T>,
{
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
Hash::hash(&self[..], state)
}
}
macro_rules! impl_from {
($($n: expr => $ty: ty),*) => {
$(
impl<T> From<[T; $n]> for GenericArray<T, $ty> {
fn from(arr: [T; $n]) -> Self {
use core::mem::{forget, transmute_copy};
let x = unsafe { transmute_copy(&arr) };
forget(arr);
x
}
}
)*
}
}
impl_from! {
1 => ::typenum::U1,
2 => ::typenum::U2,
3 => ::typenum::U3,
4 => ::typenum::U4,
5 => ::typenum::U5,
6 => ::typenum::U6,
7 => ::typenum::U7,
8 => ::typenum::U8,
9 => ::typenum::U9,
10 => ::typenum::U10,
11 => ::typenum::U11,
12 => ::typenum::U12,
13 => ::typenum::U13,
14 => ::typenum::U14,
15 => ::typenum::U15,
16 => ::typenum::U16,
17 => ::typenum::U17,
18 => ::typenum::U18,
19 => ::typenum::U19,
20 => ::typenum::U20,
21 => ::typenum::U21,
22 => ::typenum::U22,
23 => ::typenum::U23,
24 => ::typenum::U24,
25 => ::typenum::U25,
26 => ::typenum::U26,
27 => ::typenum::U27,
28 => ::typenum::U28,
29 => ::typenum::U29,
30 => ::typenum::U30,
31 => ::typenum::U31,
32 => ::typenum::U32
}

117
third_party/rust/generic-array/src/iter.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,117 @@
//! `GenericArray` iterator implementation.
use super::{ArrayLength, GenericArray};
use core::{cmp, ptr};
use core::mem::ManuallyDrop;
/// An iterator that moves out of a `GenericArray`
pub struct GenericArrayIter<T, N: ArrayLength<T>> {
// Invariants: index <= index_back <= N
// Only values in array[index..index_back] are alive at any given time.
// Values from array[..index] and array[index_back..] are already moved/dropped.
array: ManuallyDrop<GenericArray<T, N>>,
index: usize,
index_back: usize,
}
impl<T, N> IntoIterator for GenericArray<T, N>
where
N: ArrayLength<T>,
{
type Item = T;
type IntoIter = GenericArrayIter<T, N>;
fn into_iter(self) -> Self::IntoIter {
GenericArrayIter {
array: ManuallyDrop::new(self),
index: 0,
index_back: N::to_usize(),
}
}
}
impl<T, N> Drop for GenericArrayIter<T, N>
where
N: ArrayLength<T>,
{
fn drop(&mut self) {
// Drop values that are still alive.
for p in &mut self.array[self.index..self.index_back] {
unsafe {
ptr::drop_in_place(p);
}
}
}
}
impl<T, N> Iterator for GenericArrayIter<T, N>
where
N: ArrayLength<T>,
{
type Item = T;
fn next(&mut self) -> Option<T> {
if self.len() > 0 {
unsafe {
let p = self.array.get_unchecked(self.index);
self.index += 1;
Some(ptr::read(p))
}
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
fn count(self) -> usize {
self.len()
}
fn nth(&mut self, n: usize) -> Option<T> {
// First consume values prior to the nth.
let ndrop = cmp::min(n, self.len());
for p in &mut self.array[self.index..self.index + ndrop] {
self.index += 1;
unsafe {
ptr::drop_in_place(p);
}
}
self.next()
}
fn last(mut self) -> Option<T> {
// Note, everything else will correctly drop first as `self` leaves scope.
self.next_back()
}
}
impl<T, N> DoubleEndedIterator for GenericArrayIter<T, N>
where
N: ArrayLength<T>,
{
fn next_back(&mut self) -> Option<T> {
if self.len() > 0 {
self.index_back -= 1;
unsafe {
let p = self.array.get_unchecked(self.index_back);
Some(ptr::read(p))
}
} else {
None
}
}
}
impl<T, N> ExactSizeIterator for GenericArrayIter<T, N>
where
N: ArrayLength<T>,
{
fn len(&self) -> usize {
self.index_back - self.index
}
}

464
third_party/rust/generic-array/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,464 @@
//! This crate implements a structure that can be used as a generic array type.use
//! Core Rust array types `[T; N]` can't be used generically with
//! respect to `N`, so for example this:
//!
//! ```{should_fail}
//! struct Foo<T, N> {
//! data: [T; N]
//! }
//! ```
//!
//! won't work.
//!
//! **generic-array** exports a `GenericArray<T,N>` type, which lets
//! the above be implemented as:
//!
//! ```
//! # use generic_array::{ArrayLength, GenericArray};
//! struct Foo<T, N: ArrayLength<T>> {
//! data: GenericArray<T,N>
//! }
//! ```
//!
//! The `ArrayLength<T>` trait is implemented by default for
//! [unsigned integer types](../typenum/uint/index.html) from
//! [typenum](../typenum/index.html).
//!
//! For ease of use, an `arr!` macro is provided - example below:
//!
//! ```
//! # #[macro_use]
//! # extern crate generic_array;
//! # extern crate typenum;
//! # fn main() {
//! let array = arr![u32; 1, 2, 3];
//! assert_eq!(array[2], 3);
//! # }
//! ```
//#![deny(missing_docs)]
#![no_std]
pub extern crate typenum;
#[cfg(feature = "serde")]
extern crate serde;
mod hex;
mod impls;
#[cfg(feature = "serde")]
pub mod impl_serde;
use core::{mem, ptr, slice};
use core::marker::PhantomData;
use core::mem::ManuallyDrop;
pub use core::mem::transmute;
use core::ops::{Deref, DerefMut};
use typenum::bit::{B0, B1};
use typenum::uint::{UInt, UTerm, Unsigned};
#[cfg_attr(test, macro_use)]
pub mod arr;
pub mod iter;
pub use iter::GenericArrayIter;
/// Trait making `GenericArray` work, marking types to be used as length of an array
pub unsafe trait ArrayLength<T>: Unsigned {
/// Associated type representing the array type for the number
type ArrayType;
}
unsafe impl<T> ArrayLength<T> for UTerm {
#[doc(hidden)]
type ArrayType = ();
}
/// Internal type used to generate a struct of appropriate size
#[allow(dead_code)]
#[repr(C)]
#[doc(hidden)]
pub struct GenericArrayImplEven<T, U> {
parent1: U,
parent2: U,
_marker: PhantomData<T>,
}
impl<T: Clone, U: Clone> Clone for GenericArrayImplEven<T, U> {
fn clone(&self) -> GenericArrayImplEven<T, U> {
GenericArrayImplEven {
parent1: self.parent1.clone(),
parent2: self.parent2.clone(),
_marker: PhantomData,
}
}
}
impl<T: Copy, U: Copy> Copy for GenericArrayImplEven<T, U> {}
/// Internal type used to generate a struct of appropriate size
#[allow(dead_code)]
#[repr(C)]
#[doc(hidden)]
pub struct GenericArrayImplOdd<T, U> {
parent1: U,
parent2: U,
data: T,
}
impl<T: Clone, U: Clone> Clone for GenericArrayImplOdd<T, U> {
fn clone(&self) -> GenericArrayImplOdd<T, U> {
GenericArrayImplOdd {
parent1: self.parent1.clone(),
parent2: self.parent2.clone(),
data: self.data.clone(),
}
}
}
impl<T: Copy, U: Copy> Copy for GenericArrayImplOdd<T, U> {}
unsafe impl<T, N: ArrayLength<T>> ArrayLength<T> for UInt<N, B0> {
#[doc(hidden)]
type ArrayType = GenericArrayImplEven<T, N::ArrayType>;
}
unsafe impl<T, N: ArrayLength<T>> ArrayLength<T> for UInt<N, B1> {
#[doc(hidden)]
type ArrayType = GenericArrayImplOdd<T, N::ArrayType>;
}
/// Struct representing a generic array - `GenericArray<T, N>` works like [T; N]
#[allow(dead_code)]
pub struct GenericArray<T, U: ArrayLength<T>> {
data: U::ArrayType,
}
impl<T, N> Deref for GenericArray<T, N>
where
N: ArrayLength<T>,
{
type Target = [T];
fn deref(&self) -> &[T] {
unsafe { slice::from_raw_parts(self as *const Self as *const T, N::to_usize()) }
}
}
impl<T, N> DerefMut for GenericArray<T, N>
where
N: ArrayLength<T>,
{
fn deref_mut(&mut self) -> &mut [T] {
unsafe { slice::from_raw_parts_mut(self as *mut Self as *mut T, N::to_usize()) }
}
}
struct ArrayBuilder<T, N: ArrayLength<T>> {
array: ManuallyDrop<GenericArray<T, N>>,
position: usize,
}
impl<T, N: ArrayLength<T>> ArrayBuilder<T, N> {
fn new() -> ArrayBuilder<T, N> {
ArrayBuilder {
array: ManuallyDrop::new(unsafe { mem::uninitialized() }),
position: 0,
}
}
fn into_inner(self) -> GenericArray<T, N> {
let array = unsafe { ptr::read(&self.array) };
mem::forget(self);
ManuallyDrop::into_inner(array)
}
}
impl<T, N: ArrayLength<T>> Drop for ArrayBuilder<T, N> {
fn drop(&mut self) {
for value in self.array.iter_mut().take(self.position) {
unsafe {
ptr::drop_in_place(value);
}
}
}
}
struct ArrayConsumer<T, N: ArrayLength<T>> {
array: ManuallyDrop<GenericArray<T, N>>,
position: usize,
}
impl<T, N: ArrayLength<T>> ArrayConsumer<T, N> {
fn new(array: GenericArray<T, N>) -> ArrayConsumer<T, N> {
ArrayConsumer {
array: ManuallyDrop::new(array),
position: 0,
}
}
}
impl<T, N: ArrayLength<T>> Drop for ArrayConsumer<T, N> {
fn drop(&mut self) {
for i in self.position..N::to_usize() {
unsafe {
ptr::drop_in_place(self.array.get_unchecked_mut(i));
}
}
}
}
impl<T, N> GenericArray<T, N>
where
N: ArrayLength<T>,
{
/// Initializes a new `GenericArray` instance using the given function.
///
/// If the generator function panics while initializing the array,
/// any already initialized elements will be dropped.
pub fn generate<F>(f: F) -> GenericArray<T, N>
where
F: Fn(usize) -> T,
{
let mut destination = ArrayBuilder::new();
for (i, dst) in destination.array.iter_mut().enumerate() {
unsafe {
ptr::write(dst, f(i));
}
destination.position += 1;
}
destination.into_inner()
}
/// Map a function over a slice to a `GenericArray`.
///
/// The length of the slice *must* be equal to the length of the array.
#[inline]
pub fn map_slice<S, F: Fn(&S) -> T>(s: &[S], f: F) -> GenericArray<T, N> {
assert_eq!(s.len(), N::to_usize());
Self::generate(|i| f(unsafe { s.get_unchecked(i) }))
}
/// Maps a `GenericArray` to another `GenericArray`.
///
/// If the mapping function panics, any already initialized elements in the new array
/// will be dropped, AND any unused elements in the source array will also be dropped.
pub fn map<U, F>(self, f: F) -> GenericArray<U, N>
where
F: Fn(T) -> U,
N: ArrayLength<U>,
{
let mut source = ArrayConsumer::new(self);
let mut destination = ArrayBuilder::new();
for (dst, src) in destination.array.iter_mut().zip(source.array.iter()) {
unsafe {
ptr::write(dst, f(ptr::read(src)));
}
source.position += 1;
destination.position += 1;
}
destination.into_inner()
}
/// Maps a `GenericArray` to another `GenericArray` by reference.
///
/// If the mapping function panics, any already initialized elements will be dropped.
#[inline]
pub fn map_ref<U, F>(&self, f: F) -> GenericArray<U, N>
where
F: Fn(&T) -> U,
N: ArrayLength<U>,
{
GenericArray::generate(|i| f(unsafe { self.get_unchecked(i) }))
}
/// Combines two `GenericArray` instances and iterates through both of them,
/// initializing a new `GenericArray` with the result of the zipped mapping function.
///
/// If the mapping function panics, any already initialized elements in the new array
/// will be dropped, AND any unused elements in the source arrays will also be dropped.
pub fn zip<B, U, F>(self, rhs: GenericArray<B, N>, f: F) -> GenericArray<U, N>
where
F: Fn(T, B) -> U,
N: ArrayLength<B> + ArrayLength<U>,
{
let mut left = ArrayConsumer::new(self);
let mut right = ArrayConsumer::new(rhs);
let mut destination = ArrayBuilder::new();
for (dst, (lhs, rhs)) in
destination.array.iter_mut().zip(left.array.iter().zip(
right.array.iter(),
))
{
unsafe {
ptr::write(dst, f(ptr::read(lhs), ptr::read(rhs)));
}
destination.position += 1;
left.position += 1;
right.position += 1;
}
destination.into_inner()
}
/// Combines two `GenericArray` instances and iterates through both of them by reference,
/// initializing a new `GenericArray` with the result of the zipped mapping function.
///
/// If the mapping function panics, any already initialized elements will be dropped.
pub fn zip_ref<B, U, F>(&self, rhs: &GenericArray<B, N>, f: F) -> GenericArray<U, N>
where
F: Fn(&T, &B) -> U,
N: ArrayLength<B> + ArrayLength<U>,
{
GenericArray::generate(|i| unsafe {
f(self.get_unchecked(i), rhs.get_unchecked(i))
})
}
/// Extracts a slice containing the entire array.
#[inline]
pub fn as_slice(&self) -> &[T] {
self.deref()
}
/// Extracts a mutable slice containing the entire array.
#[inline]
pub fn as_mut_slice(&mut self) -> &mut [T] {
self.deref_mut()
}
/// Converts slice to a generic array reference with inferred length;
///
/// Length of the slice must be equal to the length of the array.
#[inline]
pub fn from_slice(slice: &[T]) -> &GenericArray<T, N> {
assert_eq!(slice.len(), N::to_usize());
unsafe { &*(slice.as_ptr() as *const GenericArray<T, N>) }
}
/// Converts mutable slice to a mutable generic array reference
///
/// Length of the slice must be equal to the length of the array.
#[inline]
pub fn from_mut_slice(slice: &mut [T]) -> &mut GenericArray<T, N> {
assert_eq!(slice.len(), N::to_usize());
unsafe { &mut *(slice.as_mut_ptr() as *mut GenericArray<T, N>) }
}
}
impl<T: Clone, N> GenericArray<T, N>
where
N: ArrayLength<T>,
{
/// Construct a `GenericArray` from a slice by cloning its content
///
/// Length of the slice must be equal to the length of the array
#[inline]
pub fn clone_from_slice(list: &[T]) -> GenericArray<T, N> {
Self::from_exact_iter(list.iter().cloned()).expect(
"Slice must be the same length as the array",
)
}
}
impl<T, N> GenericArray<T, N>
where
N: ArrayLength<T>,
{
pub fn from_exact_iter<I>(iter: I) -> Option<Self>
where
I: IntoIterator<Item = T>,
<I as IntoIterator>::IntoIter: ExactSizeIterator,
{
let iter = iter.into_iter();
if iter.len() == N::to_usize() {
let mut destination = ArrayBuilder::new();
for (dst, src) in destination.array.iter_mut().zip(iter.into_iter()) {
unsafe {
ptr::write(dst, src);
}
destination.position += 1;
}
let array = unsafe { ptr::read(&destination.array) };
mem::forget(destination);
Some(ManuallyDrop::into_inner(array))
} else {
None
}
}
}
impl<T, N> ::core::iter::FromIterator<T> for GenericArray<T, N>
where
N: ArrayLength<T>,
T: Default,
{
fn from_iter<I>(iter: I) -> GenericArray<T, N>
where
I: IntoIterator<Item = T>,
{
let mut destination = ArrayBuilder::new();
let defaults = ::core::iter::repeat(()).map(|_| T::default());
for (dst, src) in destination.array.iter_mut().zip(
iter.into_iter().chain(defaults),
)
{
unsafe {
ptr::write(dst, src);
}
}
destination.into_inner()
}
}
#[cfg(test)]
mod test {
// Compile with:
// cargo rustc --lib --profile test --release --
// -C target-cpu=native -C opt-level=3 --emit asm
// and view the assembly to make sure test_assembly generates
// SIMD instructions instead of a niave loop.
#[inline(never)]
pub fn black_box<T>(val: T) -> T {
use core::{mem, ptr};
let ret = unsafe { ptr::read_volatile(&val) };
mem::forget(val);
ret
}
#[test]
fn test_assembly() {
let a = black_box(arr![i32; 1, 3, 5, 7]);
let b = black_box(arr![i32; 2, 4, 6, 8]);
let c = a.zip_ref(&b, |l, r| l + r);
assert_eq!(c, arr![i32; 3, 7, 11, 15]);
}
}

44
third_party/rust/generic-array/tests/hex.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,44 @@
#[macro_use]
extern crate generic_array;
extern crate typenum;
use generic_array::GenericArray;
use std::str::from_utf8;
use typenum::U2048;
#[test]
fn short_lower_hex() {
let ar = arr![u8; 10, 20, 30];
assert_eq!(format!("{:x}", ar), "0a141e");
}
#[test]
fn short_upper_hex() {
let ar = arr![u8; 30, 20, 10];
assert_eq!(format!("{:X}", ar), "1E140A");
}
#[test]
fn long_lower_hex() {
let ar = GenericArray::<u8, U2048>::default();
assert_eq!(format!("{:x}", ar), from_utf8(&[b'0'; 4096]).unwrap());
}
#[test]
fn long_upper_hex() {
let ar = GenericArray::<u8, U2048>::default();
assert_eq!(format!("{:X}", ar), from_utf8(&[b'0'; 4096]).unwrap());
}
#[test]
fn truncated_lower_hex() {
let ar = arr![u8; 10, 20, 30, 40, 50];
assert_eq!(format!("{:.2x}", ar), "0a14");
}
#[test]
fn truncated_upper_hex() {
let ar = arr![u8; 30, 20, 10, 17, 0];
assert_eq!(format!("{:.4X}", ar), "1E140A11");
}

10
third_party/rust/generic-array/tests/import_name.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,10 @@
#[macro_use]
extern crate generic_array as gen_arr;
use gen_arr::typenum;
#[test]
fn test_different_crate_name() {
let _: gen_arr::GenericArray<u32, typenum::U4> = arr![u32; 0, 1, 2, 3];
let _: gen_arr::GenericArray<u32, typenum::U0> = arr![u32;];
}

169
third_party/rust/generic-array/tests/mod.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,169 @@
#![recursion_limit="128"]
#![no_std]
#[macro_use]
extern crate generic_array;
use core::cell::Cell;
use core::ops::Drop;
use generic_array::GenericArray;
use generic_array::typenum::{U1, U3, U4, U97};
#[test]
fn test() {
let mut list97 = [0; 97];
for i in 0..97 {
list97[i] = i as i32;
}
let l: GenericArray<i32, U97> = GenericArray::clone_from_slice(&list97);
assert_eq!(l[0], 0);
assert_eq!(l[1], 1);
assert_eq!(l[32], 32);
assert_eq!(l[56], 56);
}
#[test]
fn test_drop() {
#[derive(Clone)]
struct TestDrop<'a>(&'a Cell<u32>);
impl<'a> Drop for TestDrop<'a> {
fn drop(&mut self) {
self.0.set(self.0.get() + 1);
}
}
let drop_counter = Cell::new(0);
{
let _: GenericArray<TestDrop, U3> =
arr![TestDrop; TestDrop(&drop_counter),
TestDrop(&drop_counter),
TestDrop(&drop_counter)];
}
assert_eq!(drop_counter.get(), 3);
}
#[test]
fn test_arr() {
let test: GenericArray<u32, U3> = arr![u32; 1, 2, 3];
assert_eq!(test[1], 2);
}
#[test]
fn test_copy() {
let test = arr![u32; 1, 2, 3];
let test2 = test;
// if GenericArray is not copy, this should fail as a use of a moved value
assert_eq!(test[1], 2);
assert_eq!(test2[0], 1);
}
#[test]
fn test_iter_flat_map() {
assert!((0..5).flat_map(|i| arr![i32; 2 * i, 2 * i + 1]).eq(0..10));
}
#[derive(Debug, PartialEq, Eq)]
struct NoClone<T>(T);
#[test]
fn test_from_slice() {
let arr = [1, 2, 3, 4];
let gen_arr = GenericArray::<_, U3>::from_slice(&arr[..3]);
assert_eq!(&arr[..3], gen_arr.as_slice());
let arr = [NoClone(1u32), NoClone(2), NoClone(3), NoClone(4)];
let gen_arr = GenericArray::<_, U3>::from_slice(&arr[..3]);
assert_eq!(&arr[..3], gen_arr.as_slice());
}
#[test]
fn test_from_mut_slice() {
let mut arr = [1, 2, 3, 4];
{
let gen_arr = GenericArray::<_, U3>::from_mut_slice(&mut arr[..3]);
gen_arr[2] = 10;
}
assert_eq!(arr, [1, 2, 10, 4]);
let mut arr = [NoClone(1u32), NoClone(2), NoClone(3), NoClone(4)];
{
let gen_arr = GenericArray::<_, U3>::from_mut_slice(&mut arr[..3]);
gen_arr[2] = NoClone(10);
}
assert_eq!(arr, [NoClone(1), NoClone(2), NoClone(10), NoClone(4)]);
}
#[test]
fn test_default() {
let arr = GenericArray::<u8, U1>::default();
assert_eq!(arr[0], 0);
}
#[test]
fn test_from() {
let data = [(1, 2, 3), (4, 5, 6), (7, 8, 9)];
let garray: GenericArray<(usize, usize, usize), U3> = data.into();
assert_eq!(&data, garray.as_slice());
}
#[test]
fn test_unit_macro() {
let arr = arr![f32; 3.14];
assert_eq!(arr[0], 3.14);
}
#[test]
fn test_empty_macro() {
let _arr = arr![f32;];
}
#[test]
fn test_cmp() {
arr![u8; 0x00].cmp(&arr![u8; 0x00]);
}
/// This test should cause a helpful compile error if uncommented.
// #[test]
// fn test_empty_macro2(){
// let arr = arr![];
// }
#[cfg(feature = "serde")]
mod impl_serde {
extern crate serde_json;
use generic_array::GenericArray;
use generic_array::typenum::U6;
#[test]
fn test_serde_implementation() {
let array: GenericArray<f64, U6> = arr![f64; 0.0, 5.0, 3.0, 7.07192, 76.0, -9.0];
let string = serde_json::to_string(&array).unwrap();
assert_eq!(string, "[0.0,5.0,3.0,7.07192,76.0,-9.0]");
let test_array: GenericArray<f64, U6> = serde_json::from_str(&string).unwrap();
assert_eq!(test_array, array);
}
}
#[test]
fn test_map() {
let b: GenericArray<i32, U4> = GenericArray::generate(|i| i as i32 * 4).map(|x| x - 3);
assert_eq!(b, arr![i32; -3, 1, 5, 9]);
}
#[test]
fn test_zip() {
let a: GenericArray<_, U4> = GenericArray::generate(|i| i + 1);
let b: GenericArray<_, U4> = GenericArray::generate(|i| i as i32 * 4);
let c = a.zip(b, |r, l| r as i32 + l);
assert_eq!(c, arr![i32; 1, 6, 11, 16]);
}
#[test]
fn test_from_iter() {
use core::iter::repeat;
let a: GenericArray<_, U4> = repeat(11).take(3).collect();
assert_eq!(a, arr![i32; 11, 11, 11, 0]);
}

Просмотреть файл

@ -1 +0,0 @@
{"files":{"Cargo.toml":"8b38b56ce5ba48b676a177c9382178b1931453fffbfbd62979b75b2b4885a963","src/lib.rs":"8b90882a6add8d03ba1b88e03348e711d2493791201619f68f9a213747545872","src/test.rs":"ae6d77f68ae1eedb4e5301c07cacdfdf088998b852854967e5ec6c477d27e899"},"package":"cc4fd87be4a815fd373e02773983940f0d75fb26fde8c098e9e45f7af03154c0"}

100
third_party/rust/lalrpop-intern/src/lib.rs поставляемый
Просмотреть файл

@ -1,100 +0,0 @@
use std::collections::HashMap;
use std::cell::RefCell;
use std::fmt::{Debug, Display, Error, Formatter};
use std::cmp::{Ord, Ordering, PartialOrd};
#[cfg(test)]
mod test;
thread_local! {
static INTERNER_TLS: RefCell<Interner> =
RefCell::new(Interner::new())
}
pub struct Interner {
map: HashMap<String, InternedString>,
strings: Vec<String>,
}
#[derive(Copy, Clone, Hash, Eq, PartialEq)]
pub struct InternedString {
index: u32,
}
pub fn intern(s: &str) -> InternedString {
write(|interner| {
match interner.map.get(s) {
Some(&v) => {
return v;
}
None => {}
}
let index = interner.strings.len() as u32;
let result = InternedString { index: index };
interner.map.insert(s.to_string(), result);
interner.strings.push(s.to_string());
return result;
})
}
pub fn read<F, R>(f: F) -> R
where
F: FnOnce(&Interner) -> R,
{
INTERNER_TLS.with(|interner| f(&*interner.borrow()))
}
fn write<F, R>(f: F) -> R
where
F: FnOnce(&mut Interner) -> R,
{
INTERNER_TLS.with(|interner| f(&mut *interner.borrow_mut()))
}
impl Interner {
fn new() -> Interner {
Interner {
map: HashMap::new(),
strings: vec![],
}
}
pub fn data(&self, i: InternedString) -> &str {
&self.strings[i.index()]
}
}
impl InternedString {
fn index(&self) -> usize {
self.index as usize
}
pub fn len(&self) -> usize {
read(|interner| interner.data(*self).len())
}
}
impl Debug for InternedString {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> {
read(|interner| Debug::fmt(&interner.data(*self), fmt))
}
}
impl Display for InternedString {
fn fmt(&self, fmt: &mut Formatter) -> Result<(), Error> {
read(|interner| Display::fmt(&interner.data(*self), fmt))
}
}
impl PartialOrd<InternedString> for InternedString {
fn partial_cmp(&self, other: &InternedString) -> Option<Ordering> {
read(|interner| PartialOrd::partial_cmp(interner.data(*self), interner.data(*other)))
}
}
impl Ord for InternedString {
fn cmp(&self, other: &InternedString) -> Ordering {
read(|interner| Ord::cmp(interner.data(*self), interner.data(*other)))
}
}

23
third_party/rust/lalrpop-intern/src/test.rs поставляемый
Просмотреть файл

@ -1,23 +0,0 @@
use super::intern;
#[test]
fn basic() {
let i = intern("hello");
let j = intern("world");
assert!(i != j);
assert_eq!(intern("hello"), i);
assert_eq!(i.to_string(), "hello");
assert_eq!(j.to_string(), "world");
}
#[test]
fn debug() {
let i = intern("hello");
assert_eq!(format!("{:?}", i), "\"hello\"");
}
#[test]
fn display() {
let i = intern("hello");
assert_eq!(format!("{}", i), "hello");
}

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

5
third_party/rust/lalrpop-snap/.cargo_vcs_info.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1,5 @@
{
"git": {
"sha1": "d673c84fb793f6e8cf1b2300022fa0ec037c72b0"
}
}

17
third_party/rust/lalrpop-snap/Cargo.toml поставляемый
Просмотреть файл

@ -12,7 +12,7 @@
[package]
name = "lalrpop-snap"
version = "0.15.1"
version = "0.16.0"
authors = ["Niko Matsakis <niko@alum.mit.edu>"]
description = "convenient LR(1) parser generator"
readme = "../README.md"
@ -27,31 +27,28 @@ doctest = false
version = "1.0"
[dependencies.atty]
version = "0.1.2"
version = "0.2"
[dependencies.bit-set]
version = "0.4.0"
version = "0.5.0"
[dependencies.diff]
version = "0.1.9"
[dependencies.ena]
version = "0.5"
version = "0.9"
[dependencies.itertools]
version = "0.7"
[dependencies.lalrpop-intern]
version = "0.15.1"
[dependencies.lalrpop-util]
version = "0.15.1"
version = "0.16.0"
[dependencies.petgraph]
version = "0.4.4"
version = "0.4.13"
[dependencies.regex]
version = "0.2.1"
version = "1"
[dependencies.regex-syntax]
version = "0.4.0"

Просмотреть файл

@ -325,7 +325,7 @@ fn report_content(content: &Content) -> term::Result<()> {
let try_colors = match Tls::session().color_config {
ColorConfig::Yes => true,
ColorConfig::No => false,
ColorConfig::IfTty => atty::is(),
ColorConfig::IfTty => atty::is(atty::Stream::Stdout),
};
if try_colors {

Просмотреть файл

@ -357,6 +357,10 @@ impl Types {
&self.parse_error_type
}
pub fn error_recovery_type(&self) -> &TypeRepr {
&self.error_recovery_type
}
/// Returns a type `(L, T, L)` where L is the location type and T
/// is the token type.
pub fn triple_type(&self) -> TypeRepr {

Просмотреть файл

@ -59,6 +59,7 @@ pub fn compile<W: Write>(
) -> io::Result<()> {
let prefix = &grammar.prefix;
rust!(out, "#[cfg_attr(rustfmt, rustfmt_skip)]");
rust!(out, "mod {}intern_token {{", prefix);
rust!(out, "#![allow(unused_imports)]");
try!(out.write_uses("", &grammar));
@ -101,7 +102,7 @@ pub fn compile<W: Write>(
})
.map(|regex| {
// make sure all regex are anchored at the beginning of the input
format!("^{}", regex)
format!("^({})", regex)
})
.map(|regex_str| {
// create a rust string with text of the regex; the Debug impl

Просмотреть файл

@ -71,6 +71,7 @@ impl<'codegen, 'grammar, W: Write, C> CodeGenerator<'codegen, 'grammar, W, C> {
F: FnOnce(&mut Self) -> io::Result<()>,
{
rust!(self.out, "");
rust!(self.out, "#[cfg_attr(rustfmt, rustfmt_skip)]");
rust!(self.out, "mod {}parse{} {{", self.prefix, self.start_symbol);
// these stylistic lints are annoying for the generated code,

Просмотреть файл

@ -286,6 +286,7 @@ struct TableDriven<'grammar> {
variant_names: Map<Symbol, String>,
variants: Map<TypeRepr, String>,
reduce_functions: Set<usize>,
}
impl<'ascent, 'grammar, W: Write> CodeGenerator<'ascent, 'grammar, W, TableDriven<'grammar>> {
@ -374,6 +375,7 @@ impl<'ascent, 'grammar, W: Write> CodeGenerator<'ascent, 'grammar, W, TableDrive
state_type: state_type,
variant_names: Map::new(),
variants: Map::new(),
reduce_functions: Set::new(),
},
)
}
@ -387,6 +389,7 @@ impl<'ascent, 'grammar, W: Write> CodeGenerator<'ascent, 'grammar, W, TableDrive
try!(this.write_accepts_fn());
try!(this.emit_reduce_actions());
try!(this.emit_downcast_fns());
try!(this.emit_reduce_action_functions());
Ok(())
})
}
@ -667,15 +670,11 @@ impl<'ascent, 'grammar, W: Write> CodeGenerator<'ascent, 'grammar, W, TableDrive
}
rust!(
self.out,
"if let Some(r) = {}reduce({}{}action, Some(&{}lookahead.0), &mut {}states, &mut \
{}symbols, {}) {{",
self.prefix,
"if let Some(r) = {p}reduce({}{p}action, Some(&{p}lookahead.0), &mut {p}states, &mut \
{p}symbols, {}) {{",
self.grammar.user_parameter_refs(),
self.prefix,
self.prefix,
self.prefix,
self.prefix,
phantom_data_expr
phantom_data_expr,
p = self.prefix
);
rust!(self.out, "if r.is_err() {{");
rust!(self.out, "return r;");
@ -922,7 +921,7 @@ impl<'ascent, 'grammar, W: Write> CodeGenerator<'ascent, 'grammar, W, TableDrive
rust!(self.out, "{} => {{", index);
// In debug builds LLVM is not very good at reusing stack space which makes this
// reduce function take up O(number of states) space. By wrapping each reduce action in
// an immediately called closure each reduction takes place in their own function
// an immediately called function each reduction takes place in their own function
// context which ends up reducing the stack space used.
// Fallible actions and the start symbol may do early returns so we avoid wrapping
@ -931,13 +930,18 @@ impl<'ascent, 'grammar, W: Write> CodeGenerator<'ascent, 'grammar, W, TableDrive
let reduce_stack_space = !is_fallible && production.nonterminal != self.start_symbol;
if reduce_stack_space {
rust!(self.out, "(|| {{");
}
try!(self.emit_reduce_action(production));
if reduce_stack_space {
rust!(self.out, "}})()");
self.custom.reduce_functions.insert(index);
let phantom_data_expr = self.phantom_data_expr();
rust!(
self.out,
"{p}reduce{}({}{p}action, {p}lookahead_start, {p}states, {p}symbols, {})",
index,
self.grammar.user_parameter_refs(),
phantom_data_expr,
p = self.prefix
);
} else {
try!(self.emit_reduce_action(production));
}
rust!(self.out, "}}");
@ -999,6 +1003,54 @@ impl<'ascent, 'grammar, W: Write> CodeGenerator<'ascent, 'grammar, W, TableDrive
Ok(())
}
fn emit_reduce_action_functions(&mut self) -> io::Result<()> {
for (production, index) in self.grammar
.nonterminals
.values()
.flat_map(|nt| &nt.productions)
.zip(1..)
{
if self.custom.reduce_functions.contains(&index) {
self.emit_reduce_alternative_fn_header(index)?;
self.emit_reduce_action(production)?;
rust!(self.out, "}}");
}
}
Ok(())
}
fn emit_reduce_alternative_fn_header(&mut self, index: usize) -> io::Result<()> {
let loc_type = self.types.terminal_loc_type();
let spanned_symbol_type = self.spanned_symbol_type();
let parameters = vec![
format!("{}action: {}", self.prefix, self.custom.state_type),
format!("{}lookahead_start: Option<&{}>", self.prefix, loc_type),
format!(
"{}states: &mut ::std::vec::Vec<{}>",
self.prefix, self.custom.state_type
),
format!(
"{}symbols: &mut ::std::vec::Vec<{}>",
self.prefix, spanned_symbol_type
),
format!("_: {}", self.phantom_data_type()),
];
try!(self.out.write_fn_header(
self.grammar,
&Visibility::Pub(Some(Path::from_id(Atom::from("crate")))),
format!("{}reduce{}", self.prefix, index),
vec![],
None,
parameters,
format!("(usize, {}, usize)", spanned_symbol_type,),
vec![]
));
rust!(self.out, "{{");
Ok(())
}
fn emit_reduce_action(&mut self, production: &Production) -> io::Result<()> {
rust!(self.out, "// {:?}", production);

Просмотреть файл

@ -48,6 +48,7 @@ impl<'ascent, 'grammar, W: Write> CodeGenerator<'ascent, 'grammar, W, TestAll> {
self.write_parse_mod(|this| {
try!(this.write_parser_fn());
rust!(this.out, "#[cfg_attr(rustfmt, rustfmt_skip)]");
rust!(this.out, "mod {}ascent {{", this.prefix);
try!(super::ascent::compile(
this.grammar,
@ -67,6 +68,7 @@ impl<'ascent, 'grammar, W: Write> CodeGenerator<'ascent, 'grammar, W, TestAll> {
rust!(this.out, "{}", pub_use);
rust!(this.out, "}}");
rust!(this.out, "#[cfg_attr(rustfmt, rustfmt_skip)]");
rust!(this.out, "mod {}parse_table {{", this.prefix);
try!(super::parse_table::compile(
this.grammar,

Просмотреть файл

@ -1,5 +1,5 @@
use collections::{Map, Multimap, Set};
use ena::unify::UnificationTable;
use ena::unify::InPlaceUnificationTable;
use lr1::core::{Action, LR1State, StateIndex};
use lr1::lane_table::construct::state_set::StateSet;
use lr1::lane_table::table::LaneTable;
@ -27,7 +27,7 @@ pub struct Merge<'m, 'grammar: 'm> {
impl<'m, 'grammar> Merge<'m, 'grammar> {
pub fn new(
table: &'m LaneTable<'grammar>,
unify: &'m mut UnificationTable<StateSet>,
unify: &'m mut InPlaceUnificationTable<StateSet>,
states: &'m mut Vec<LR1State<'grammar>>,
state_sets: &'m mut Map<StateIndex, StateSet>,
inconsistent_state: StateIndex,
@ -191,7 +191,7 @@ impl<'m, 'grammar> Merge<'m, 'grammar> {
struct ContextSets<'m> {
state_sets: &'m mut Map<StateIndex, StateSet>,
unify: &'m mut UnificationTable<StateSet>,
unify: &'m mut InPlaceUnificationTable<StateSet>,
}
impl<'m> ContextSets<'m> {

Просмотреть файл

@ -1,7 +1,7 @@
//!
use collections::{Map, Set};
use ena::unify::UnificationTable;
use ena::unify::InPlaceUnificationTable;
use grammar::repr::*;
use lr1::build;
use lr1::core::*;
@ -184,7 +184,7 @@ impl<'grammar> LaneTableConstruct<'grammar> {
// (To handle unification, we also map each state to a
// `StateSet` that is its entry in the `ena` table.)
let rows = table.rows()?;
let mut unify = UnificationTable::<StateSet>::new();
let mut unify = InPlaceUnificationTable::<StateSet>::new();
let mut state_sets = Map::new();
for (&state_index, context_set) in &rows {
let state_set = unify.new_key(context_set.clone());

Просмотреть файл

@ -35,6 +35,8 @@ impl UnifyKey for StateSet {
// But this is easier for now, and cloning a `ContextSet` isn't THAT
// expensive, right? :)
impl UnifyValue for ContextSet {
type Error = (Self, Self);
fn unify_values(value1: &Self, value2: &Self) -> Result<Self, (Self, Self)> {
match ContextSet::union(value1, value2) {
Ok(v) => Ok(v),

Просмотреть файл

@ -323,7 +323,7 @@ impl<'grammar> TypeInferencer<'grammar> {
SymbolKind::Nonterminal(ref id) => self.nonterminal_type(id),
SymbolKind::Choose(ref s) => self.symbol_type(&s.kind),
SymbolKind::Name(_, ref s) => self.symbol_type(&s.kind),
SymbolKind::Error => Ok(self.types.parse_error_type().clone()),
SymbolKind::Error => Ok(self.types.error_recovery_type().clone()),
SymbolKind::Repeat(..)
| SymbolKind::Expr(..)

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше