зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1768710 - Upgrade rust-cascade to 1.4.0 and sha2 to 0.10.2. r=keeler,webdriver-reviewers,whimboo,glandium
This also upgrades the headers crate to 0.3.7. Webdriver depends on warp 0.2, which depends on headers 0.3. But headers < 0.3.7 depends on sha-1 < 0.10. We need sha-1 and sha2 at the same minor version to avoid duplicate block-buffer, generic-array, and digest crates. Differential Revision: https://phabricator.services.mozilla.com/D146010
This commit is contained in:
Родитель
885798c440
Коммит
e76a11f1cf
|
@ -510,25 +510,13 @@ checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a"
|
|||
|
||||
[[package]]
|
||||
name = "block-buffer"
|
||||
version = "0.7.3"
|
||||
version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b"
|
||||
checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324"
|
||||
dependencies = [
|
||||
"block-padding",
|
||||
"byte-tools",
|
||||
"byteorder",
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "block-padding"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5"
|
||||
dependencies = [
|
||||
"byte-tools",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bookmark_sync"
|
||||
version = "0.1.0"
|
||||
|
@ -564,12 +552,6 @@ version = "3.9.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899"
|
||||
|
||||
[[package]]
|
||||
name = "byte-tools"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7"
|
||||
|
||||
[[package]]
|
||||
name = "byteorder"
|
||||
version = "1.4.3"
|
||||
|
@ -887,6 +869,15 @@ dependencies = [
|
|||
"cose",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cpufeatures"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cranelift-bforest"
|
||||
version = "0.74.0"
|
||||
|
@ -1071,6 +1062,16 @@ dependencies = [
|
|||
"lazy_static",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
version = "0.1.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8"
|
||||
dependencies = [
|
||||
"generic-array",
|
||||
"typenum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cssparser"
|
||||
version = "0.29.2"
|
||||
|
@ -1314,11 +1315,12 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.8.1"
|
||||
version = "0.10.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5"
|
||||
checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506"
|
||||
dependencies = [
|
||||
"generic-array",
|
||||
"block-buffer",
|
||||
"crypto-common",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -1504,12 +1506,6 @@ dependencies = [
|
|||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fake-simd"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed"
|
||||
|
||||
[[package]]
|
||||
name = "fallible-iterator"
|
||||
version = "0.2.0"
|
||||
|
@ -1970,11 +1966,12 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "generic-array"
|
||||
version = "0.12.4"
|
||||
version = "0.14.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd"
|
||||
checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803"
|
||||
dependencies = [
|
||||
"typenum",
|
||||
"version_check",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -2344,18 +2341,18 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "headers"
|
||||
version = "0.3.3"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62689dc57c7456e69712607ffcbd0aa1dfcccf9af73727e9b25bc1825375cac3"
|
||||
checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d"
|
||||
dependencies = [
|
||||
"base64 0.13.0",
|
||||
"bitflags",
|
||||
"bytes 1.1.0",
|
||||
"headers-core",
|
||||
"http",
|
||||
"httpdate",
|
||||
"mime",
|
||||
"sha-1",
|
||||
"time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -2436,6 +2433,12 @@ version = "1.5.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503"
|
||||
|
||||
[[package]]
|
||||
name = "httpdate"
|
||||
version = "1.0.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421"
|
||||
|
||||
[[package]]
|
||||
name = "humantime"
|
||||
version = "2.1.0"
|
||||
|
@ -3713,12 +3716,6 @@ version = "1.9.0"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5"
|
||||
|
||||
[[package]]
|
||||
name = "opaque-debug"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c"
|
||||
|
||||
[[package]]
|
||||
name = "ordered-float"
|
||||
version = "1.1.1"
|
||||
|
@ -4430,13 +4427,13 @@ checksum = "8a654c5bda722c699be6b0fe4c0d90de218928da5b724c3e467fc48865c37263"
|
|||
|
||||
[[package]]
|
||||
name = "rust_cascade"
|
||||
version = "1.2.0"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d09c17a9310f1eb79a67d307adffa7fa1c5943eaadcc21d4fb7f611536d66c4f"
|
||||
checksum = "ef248456c30c6607f1eb1e5d11025367b3340e235314dd33d2b31b41b35ac335"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"digest",
|
||||
"murmurhash3",
|
||||
"rand",
|
||||
"sha2",
|
||||
]
|
||||
|
||||
|
@ -4715,26 +4712,24 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "sha-1"
|
||||
version = "0.8.2"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df"
|
||||
checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f"
|
||||
dependencies = [
|
||||
"block-buffer",
|
||||
"cfg-if 1.0.0",
|
||||
"cpufeatures",
|
||||
"digest",
|
||||
"fake-simd",
|
||||
"opaque-debug",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.8.2"
|
||||
version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a256f46ea78a0c0d9ff00077504903ac881a1dafdc20da66545699e7776b3e69"
|
||||
checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676"
|
||||
dependencies = [
|
||||
"block-buffer",
|
||||
"cfg-if 1.0.0",
|
||||
"cpufeatures",
|
||||
"digest",
|
||||
"fake-simd",
|
||||
"opaque-debug",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
|
@ -13,8 +13,8 @@ moz_task = { path = "../../../../xpcom/rust/moz_task" }
|
|||
nserror = { path = "../../../../xpcom/rust/nserror" }
|
||||
nsstring = { path = "../../../../xpcom/rust/nsstring" }
|
||||
rkv = { version = "0.17", default-features = false }
|
||||
rust_cascade = "1.2.0"
|
||||
sha2 = "^0.8"
|
||||
rust_cascade = "1.4.0"
|
||||
sha2 = "0.10.2"
|
||||
storage_variant = { path = "../../../../storage/variant" }
|
||||
tempfile = "3"
|
||||
thin-vec = { version = "0.2.1", features = ["gecko-ffi"] }
|
||||
|
|
|
@ -352,8 +352,8 @@ impl SecurityState {
|
|||
pub_key: &[u8],
|
||||
) -> Result<i16, SecurityStateError> {
|
||||
let mut digest = Sha256::default();
|
||||
digest.input(pub_key);
|
||||
let pub_key_hash = digest.result();
|
||||
digest.update(pub_key);
|
||||
let pub_key_hash = digest.finalize();
|
||||
|
||||
let subject_pubkey = make_key!(PREFIX_REV_SPK, subject, &pub_key_hash);
|
||||
let issuer_serial = make_key!(PREFIX_REV_IS, issuer, serial);
|
||||
|
@ -386,9 +386,9 @@ impl SecurityState {
|
|||
fn issuer_is_enrolled(&self, subject: &[u8], pub_key: &[u8]) -> bool {
|
||||
if let Some(crlite_enrollment) = self.crlite_enrollment.as_ref() {
|
||||
let mut digest = Sha256::default();
|
||||
digest.input(subject);
|
||||
digest.input(pub_key);
|
||||
let issuer_id = digest.result();
|
||||
digest.update(subject);
|
||||
digest.update(pub_key);
|
||||
let issuer_id = digest.finalize();
|
||||
return crlite_enrollment.contains(&issuer_id.to_vec());
|
||||
}
|
||||
return false;
|
||||
|
@ -548,7 +548,7 @@ impl SecurityState {
|
|||
let mut filter_file = File::open(path)?;
|
||||
let mut filter_bytes = Vec::new();
|
||||
let _ = filter_file.read_to_end(&mut filter_bytes)?;
|
||||
let crlite_filter = *Cascade::from_bytes(filter_bytes)
|
||||
let crlite_filter = Cascade::from_bytes(filter_bytes)
|
||||
.map_err(|_| SecurityStateError::from("invalid CRLite filter"))?
|
||||
.ok_or(SecurityStateError::from("expecting non-empty filter"))?;
|
||||
|
||||
|
@ -658,8 +658,8 @@ impl SecurityState {
|
|||
None => return Ok(false),
|
||||
};
|
||||
let mut digest = Sha256::default();
|
||||
digest.input(issuer_spki);
|
||||
let lookup_key = digest.result().as_slice().to_vec();
|
||||
digest.update(issuer_spki);
|
||||
let lookup_key = digest.finalize().to_vec();
|
||||
let serials = match crlite_stash.get(&lookup_key) {
|
||||
Some(serials) => serials,
|
||||
None => return Ok(false),
|
||||
|
@ -684,12 +684,12 @@ impl SecurityState {
|
|||
return nsICertStorage::STATE_NOT_COVERED;
|
||||
}
|
||||
let mut digest = Sha256::default();
|
||||
digest.input(issuer_spki);
|
||||
let mut lookup_key = digest.result().as_slice().to_vec();
|
||||
digest.update(issuer_spki);
|
||||
let mut lookup_key = digest.finalize().to_vec();
|
||||
lookup_key.extend_from_slice(serial_number);
|
||||
debug!("CRLite lookup key: {:?}", lookup_key);
|
||||
let result = match &self.crlite_filter {
|
||||
Some(crlite_filter) => crlite_filter.has(&lookup_key),
|
||||
Some(crlite_filter) => crlite_filter.has(lookup_key),
|
||||
// This can only happen if the backing file was deleted or if it or our database has
|
||||
// become corrupted. In any case, we have no information.
|
||||
None => return nsICertStorage::STATE_NO_FILTER,
|
||||
|
@ -742,8 +742,8 @@ impl SecurityState {
|
|||
}
|
||||
};
|
||||
let mut digest = Sha256::default();
|
||||
digest.input(&cert_der);
|
||||
let cert_hash = digest.result();
|
||||
digest.update(&cert_der);
|
||||
let cert_hash = digest.finalize();
|
||||
let cert_key = make_key!(PREFIX_CERT, &cert_hash);
|
||||
let cert = Cert::new(&cert_der, &subject, *trust)?;
|
||||
env_and_store
|
||||
|
|
|
@ -9,7 +9,7 @@ byteorder = "1.3"
|
|||
once_cell = "1"
|
||||
pkcs11 = "0.4"
|
||||
rsclientcerts = { path = "../rsclientcerts" }
|
||||
sha2 = "0.8"
|
||||
sha2 = "0.10.2"
|
||||
|
||||
[lib]
|
||||
crate-type = ["staticlib"]
|
||||
|
|
|
@ -15,7 +15,7 @@ log = "0.4"
|
|||
mozilla-central-workspace-hack = { path = "../../../../build/workspace-hack" }
|
||||
pkcs11 = "0.4"
|
||||
rsclientcerts = { path = "../rsclientcerts" }
|
||||
sha2 = "0.8"
|
||||
sha2 = "0.10.2"
|
||||
|
||||
[target."cfg(target_os = \"macos\")".dependencies.core-foundation]
|
||||
version = "0.9"
|
||||
|
|
|
@ -1 +1 @@
|
|||
{"files":{"Cargo.toml":"e4e9e182794c2185438af0c505714df9e051d1d1b17aec7a42265be672b1d027","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"d5c22aa3118d240e877ad41c5d9fa232f9c77d757d4aac0c2f943afc0a95e0ef","src/lib.rs":"59dd4084e456153bee968153ee45e34c8e853abfb756a53571c5844ccaf18c23"},"package":"c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b"}
|
||||
{"files":{"CHANGELOG.md":"6d73524a3689766ef673022d56aa675012bb48068cc39470743a984904db72cb","Cargo.toml":"66f91b099c9807c3dbabfebf3ed42eab4be69e86ae1c759941765b5226cca3b8","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"d5c22aa3118d240e877ad41c5d9fa232f9c77d757d4aac0c2f943afc0a95e0ef","README.md":"9bf3545872bdad2fb41557be5cefc21a48f0c7804f8124e24b67760429036472","src/lib.rs":"73676ca504520fb62a25bcc442674774cc79cf27f5eb23fb997fc60c93504cd8","src/sealed.rs":"65afa9015a3ddb8d1a56733ef3103b1459842f69155f00cefdb235dc73255d4e","tests/mod.rs":"58ecfa416e3d30b420975aef1d7b9d52d779696c6ba3f204130cf9e8b4743b10"},"package":"0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324"}
|
|
@ -0,0 +1,41 @@
|
|||
# Changelog
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## 0.10.2 (2021-02-08)
|
||||
### Fixed
|
||||
- Eliminate unreachable panic in `LazyBuffer::digest_blocks` ([#731])
|
||||
|
||||
[#731]: https://github.com/RustCrypto/utils/pull/731
|
||||
|
||||
## 0.10.1 (2021-02-05)
|
||||
### Fixed
|
||||
- Use `as_mut_ptr` to get a pointer for mutation in the `set_data` method ([#728])
|
||||
|
||||
[#728]: https://github.com/RustCrypto/utils/pull/728
|
||||
|
||||
## 0.10.0 (2020-12-07) [YANKED]
|
||||
### Changed
|
||||
- Significant reduction of number of unreachable panics. ([#671])
|
||||
- Added buffer kind type parameter to `BlockBuffer`, respective marker types, and type aliases. ([#671])
|
||||
- Various `BlockBuffer` method changes. ([#671])
|
||||
|
||||
### Removed
|
||||
- `pad_with` method and dependency on `block-padding`. ([#671])
|
||||
|
||||
[#671]: https://github.com/RustCrypto/utils/pull/671
|
||||
|
||||
## 0.10.0 (2020-12-08)
|
||||
### Changed
|
||||
- Rename `input_block(s)` methods to `digest_block(s)`. ([#113])
|
||||
- Upgrade the `block-padding` dependency to v0.3. ([#113])
|
||||
|
||||
### Added
|
||||
- `par_xor_data`, `xor_data`, and `set_data` methods. ([#113])
|
||||
|
||||
### Removed
|
||||
- The `input_lazy` method. ([#113])
|
||||
|
||||
[#113]: https://github.com/RustCrypto/utils/pull/113
|
|
@ -3,34 +3,23 @@
|
|||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies
|
||||
# to registry (e.g., crates.io) dependencies.
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
# If you are reading this file be aware that the original Cargo.toml
|
||||
# will likely look very different (and much more reasonable).
|
||||
# See Cargo.toml.orig for the original contents.
|
||||
|
||||
[package]
|
||||
edition = "2018"
|
||||
name = "block-buffer"
|
||||
version = "0.7.3"
|
||||
version = "0.10.2"
|
||||
authors = ["RustCrypto Developers"]
|
||||
description = "Fixed size buffer for block processing of data"
|
||||
description = "Buffer type for block processing of data"
|
||||
documentation = "https://docs.rs/block-buffer"
|
||||
readme = "README.md"
|
||||
keywords = ["block", "buffer"]
|
||||
categories = ["cryptography", "no-std"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
repository = "https://github.com/RustCrypto/utils"
|
||||
[dependencies.block-padding]
|
||||
version = "0.1"
|
||||
|
||||
[dependencies.byte-tools]
|
||||
version = "0.3"
|
||||
|
||||
[dependencies.byteorder]
|
||||
version = "1.1"
|
||||
default-features = false
|
||||
|
||||
[dependencies.generic-array]
|
||||
version = "0.12"
|
||||
[badges.travis-ci]
|
||||
repository = "RustCrypto/utils"
|
||||
version = "0.14"
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
# [RustCrypto]: Block Buffer
|
||||
|
||||
[![crate][crate-image]][crate-link]
|
||||
[![Docs][docs-image]][docs-link]
|
||||
![Apache2/MIT licensed][license-image]
|
||||
![Rust Version][rustc-image]
|
||||
[![Project Chat][chat-image]][chat-link]
|
||||
[![Build Status][build-image]][build-link]
|
||||
|
||||
Buffer type for block processing of data with minimized amount of unreachable panics.
|
||||
|
||||
## License
|
||||
|
||||
Licensed under either of:
|
||||
|
||||
* [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
|
||||
* [MIT license](http://opensource.org/licenses/MIT)
|
||||
|
||||
at your option.
|
||||
|
||||
### Contribution
|
||||
|
||||
Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.
|
||||
|
||||
[//]: # (badges)
|
||||
|
||||
[crate-image]: https://img.shields.io/crates/v/block-buffer.svg
|
||||
[crate-link]: https://crates.io/crates/block-buffer
|
||||
[docs-image]: https://docs.rs/block-buffer/badge.svg
|
||||
[docs-link]: https://docs.rs/block-buffer/
|
||||
[license-image]: https://img.shields.io/badge/license-Apache2.0/MIT-blue.svg
|
||||
[rustc-image]: https://img.shields.io/badge/rustc-1.41+-blue.svg
|
||||
[chat-image]: https://img.shields.io/badge/zulip-join_chat-blue.svg
|
||||
[chat-link]: https://rustcrypto.zulipchat.com/#narrow/stream/260052-utils
|
||||
[build-image]: https://github.com/RustCrypto/utils/workflows/block-buffer/badge.svg?branch=master&event=push
|
||||
[build-link]: https://github.com/RustCrypto/utils/actions/workflows/block-buffer.yml
|
||||
|
||||
[//]: # (general links)
|
||||
|
||||
[RustCrypto]: https://github.com/rustcrypto
|
|
@ -1,210 +1,325 @@
|
|||
//! Fixed size buffer for block processing of data.
|
||||
#![no_std]
|
||||
pub extern crate byteorder;
|
||||
pub extern crate block_padding;
|
||||
pub extern crate generic_array;
|
||||
extern crate byte_tools;
|
||||
#![doc(
|
||||
html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
|
||||
html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
|
||||
html_root_url = "https://docs.rs/block-buffer/0.10.2"
|
||||
)]
|
||||
#![warn(missing_docs, rust_2018_idioms)]
|
||||
|
||||
use byteorder::{ByteOrder, BE};
|
||||
use byte_tools::zero;
|
||||
use block_padding::{Padding, PadError};
|
||||
use generic_array::{GenericArray, ArrayLength};
|
||||
use core::slice;
|
||||
pub use generic_array;
|
||||
|
||||
/// Buffer for block processing of data
|
||||
#[derive(Clone, Default)]
|
||||
pub struct BlockBuffer<BlockSize: ArrayLength<u8>> {
|
||||
buffer: GenericArray<u8, BlockSize>,
|
||||
pos: usize,
|
||||
use core::{marker::PhantomData, slice};
|
||||
use generic_array::{
|
||||
typenum::{IsLess, Le, NonZero, U256},
|
||||
ArrayLength, GenericArray,
|
||||
};
|
||||
|
||||
mod sealed;
|
||||
|
||||
/// Block on which `BlockBuffer` operates.
|
||||
pub type Block<BlockSize> = GenericArray<u8, BlockSize>;
|
||||
|
||||
/// Trait for buffer kinds.
|
||||
pub trait BufferKind: sealed::Sealed {}
|
||||
|
||||
/// Eager block buffer kind, which guarantees that buffer position
|
||||
/// always lies in the range of `0..BlockSize`.
|
||||
#[derive(Copy, Clone, Debug, Default)]
|
||||
pub struct Eager {}
|
||||
|
||||
/// Lazy block buffer kind, which guarantees that buffer position
|
||||
/// always lies in the range of `0..=BlockSize`.
|
||||
#[derive(Copy, Clone, Debug, Default)]
|
||||
pub struct Lazy {}
|
||||
|
||||
impl BufferKind for Eager {}
|
||||
impl BufferKind for Lazy {}
|
||||
|
||||
/// Eager block buffer.
|
||||
pub type EagerBuffer<B> = BlockBuffer<B, Eager>;
|
||||
/// Lazy block buffer.
|
||||
pub type LazyBuffer<B> = BlockBuffer<B, Lazy>;
|
||||
|
||||
/// Buffer for block processing of data.
|
||||
#[derive(Debug)]
|
||||
pub struct BlockBuffer<BlockSize, Kind>
|
||||
where
|
||||
BlockSize: ArrayLength<u8> + IsLess<U256>,
|
||||
Le<BlockSize, U256>: NonZero,
|
||||
Kind: BufferKind,
|
||||
{
|
||||
buffer: Block<BlockSize>,
|
||||
pos: u8,
|
||||
_pd: PhantomData<Kind>,
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
unsafe fn cast<N: ArrayLength<u8>>(block: &[u8]) -> &GenericArray<u8, N> {
|
||||
debug_assert_eq!(block.len(), N::to_usize());
|
||||
&*(block.as_ptr() as *const GenericArray<u8, N>)
|
||||
impl<BlockSize, Kind> Default for BlockBuffer<BlockSize, Kind>
|
||||
where
|
||||
BlockSize: ArrayLength<u8> + IsLess<U256>,
|
||||
Le<BlockSize, U256>: NonZero,
|
||||
Kind: BufferKind,
|
||||
{
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
buffer: Default::default(),
|
||||
pos: 0,
|
||||
_pd: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
impl<BlockSize: ArrayLength<u8>> BlockBuffer<BlockSize> {
|
||||
/// Process data in `input` in blocks of size `BlockSize` using function `f`.
|
||||
#[inline]
|
||||
pub fn input<F>(&mut self, mut input: &[u8], mut f: F)
|
||||
where F: FnMut(&GenericArray<u8, BlockSize>)
|
||||
{
|
||||
// If there is already data in the buffer, process it if we have
|
||||
// enough to complete the chunk.
|
||||
let rem = self.remaining();
|
||||
if self.pos != 0 && input.len() >= rem {
|
||||
let (l, r) = input.split_at(rem);
|
||||
input = r;
|
||||
self.buffer[self.pos..].copy_from_slice(l);
|
||||
self.pos = 0;
|
||||
f(&self.buffer);
|
||||
}
|
||||
|
||||
// While we have at least a full buffer size chunks's worth of data,
|
||||
// process that data without copying it into the buffer
|
||||
while input.len() >= self.size() {
|
||||
let (block, r) = input.split_at(self.size());
|
||||
input = r;
|
||||
f(unsafe { cast(block) });
|
||||
}
|
||||
|
||||
// Copy any remaining data into the buffer.
|
||||
self.buffer[self.pos..self.pos+input.len()].copy_from_slice(input);
|
||||
self.pos += input.len();
|
||||
}
|
||||
|
||||
/*
|
||||
/// Process data in `input` in blocks of size `BlockSize` using function `f`, which accepts
|
||||
/// slice of blocks.
|
||||
#[inline]
|
||||
pub fn input2<F>(&mut self, mut input: &[u8], mut f: F)
|
||||
where F: FnMut(&[GenericArray<u8, BlockSize>])
|
||||
{
|
||||
// If there is already data in the buffer, process it if we have
|
||||
// enough to complete the chunk.
|
||||
let rem = self.remaining();
|
||||
if self.pos != 0 && input.len() >= rem {
|
||||
let (l, r) = input.split_at(rem);
|
||||
input = r;
|
||||
self.buffer[self.pos..].copy_from_slice(l);
|
||||
self.pos = 0;
|
||||
f(slice::from_ref(&self.buffer));
|
||||
}
|
||||
|
||||
// While we have at least a full buffer size chunks's worth of data,
|
||||
// process it data without copying into the buffer
|
||||
let n_blocks = input.len()/self.size();
|
||||
let (left, right) = input.split_at(n_blocks*self.size());
|
||||
// safe because we guarantee that `blocks` does not point outside of `input`
|
||||
let blocks = unsafe {
|
||||
slice::from_raw_parts(
|
||||
left.as_ptr() as *const GenericArray<u8, BlockSize>,
|
||||
n_blocks,
|
||||
)
|
||||
};
|
||||
f(blocks);
|
||||
|
||||
// Copy remaining data into the buffer.
|
||||
self.buffer[self.pos..self.pos+right.len()].copy_from_slice(right);
|
||||
self.pos += right.len();
|
||||
}
|
||||
*/
|
||||
|
||||
/// Variant that doesn't flush the buffer until there's additional
|
||||
/// data to be processed. Suitable for tweakable block ciphers
|
||||
/// like Threefish that need to know whether a block is the *last*
|
||||
/// data block before processing it.
|
||||
#[inline]
|
||||
pub fn input_lazy<F>(&mut self, mut input: &[u8], mut f: F)
|
||||
where F: FnMut(&GenericArray<u8, BlockSize>)
|
||||
{
|
||||
let rem = self.remaining();
|
||||
if self.pos != 0 && input.len() > rem {
|
||||
let (l, r) = input.split_at(rem);
|
||||
input = r;
|
||||
self.buffer[self.pos..].copy_from_slice(l);
|
||||
self.pos = 0;
|
||||
f(&self.buffer);
|
||||
}
|
||||
|
||||
while input.len() > self.size() {
|
||||
let (block, r) = input.split_at(self.size());
|
||||
input = r;
|
||||
f(unsafe { cast(block) });
|
||||
}
|
||||
|
||||
self.buffer[self.pos..self.pos+input.len()].copy_from_slice(input);
|
||||
self.pos += input.len();
|
||||
}
|
||||
|
||||
/// Pad buffer with `prefix` and make sure that internall buffer
|
||||
/// has at least `up_to` free bytes. All remaining bytes get
|
||||
/// zeroed-out.
|
||||
#[inline]
|
||||
fn digest_pad<F>(&mut self, up_to: usize, f: &mut F)
|
||||
where F: FnMut(&GenericArray<u8, BlockSize>)
|
||||
{
|
||||
if self.pos == self.size() {
|
||||
f(&self.buffer);
|
||||
self.pos = 0;
|
||||
}
|
||||
self.buffer[self.pos] = 0x80;
|
||||
self.pos += 1;
|
||||
|
||||
zero(&mut self.buffer[self.pos..]);
|
||||
|
||||
if self.remaining() < up_to {
|
||||
f(&self.buffer);
|
||||
zero(&mut self.buffer[..self.pos]);
|
||||
impl<BlockSize, Kind> Clone for BlockBuffer<BlockSize, Kind>
|
||||
where
|
||||
BlockSize: ArrayLength<u8> + IsLess<U256>,
|
||||
Le<BlockSize, U256>: NonZero,
|
||||
Kind: BufferKind,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
buffer: self.buffer.clone(),
|
||||
pos: self.pos,
|
||||
_pd: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Pad message with 0x80, zeros and 64-bit message length
|
||||
/// in a byte order specified by `B`
|
||||
#[inline]
|
||||
pub fn len64_padding<B, F>(&mut self, data_len: u64, mut f: F)
|
||||
where B: ByteOrder, F: FnMut(&GenericArray<u8, BlockSize>)
|
||||
{
|
||||
// TODO: replace `F` with `impl Trait` on MSRV bump
|
||||
self.digest_pad(8, &mut f);
|
||||
let s = self.size();
|
||||
B::write_u64(&mut self.buffer[s-8..], data_len);
|
||||
f(&self.buffer);
|
||||
self.pos = 0;
|
||||
}
|
||||
|
||||
|
||||
/// Pad message with 0x80, zeros and 128-bit message length
|
||||
/// in the big-endian byte order
|
||||
#[inline]
|
||||
pub fn len128_padding_be<F>(&mut self, hi: u64, lo: u64, mut f: F)
|
||||
where F: FnMut(&GenericArray<u8, BlockSize>)
|
||||
{
|
||||
// TODO: on MSRV bump replace `F` with `impl Trait`, use `u128`, add `B`
|
||||
self.digest_pad(16, &mut f);
|
||||
let s = self.size();
|
||||
BE::write_u64(&mut self.buffer[s-16..s-8], hi);
|
||||
BE::write_u64(&mut self.buffer[s-8..], lo);
|
||||
f(&self.buffer);
|
||||
self.pos = 0;
|
||||
}
|
||||
|
||||
/// Pad message with a given padding `P`
|
||||
impl<BlockSize, Kind> BlockBuffer<BlockSize, Kind>
|
||||
where
|
||||
BlockSize: ArrayLength<u8> + IsLess<U256>,
|
||||
Le<BlockSize, U256>: NonZero,
|
||||
Kind: BufferKind,
|
||||
{
|
||||
/// Create new buffer from slice.
|
||||
///
|
||||
/// Returns `PadError` if internall buffer is full, which can only happen if
|
||||
/// `input_lazy` was used.
|
||||
#[inline]
|
||||
pub fn pad_with<P: Padding>(&mut self)
|
||||
-> Result<&mut GenericArray<u8, BlockSize>, PadError>
|
||||
{
|
||||
P::pad_block(&mut self.buffer[..], self.pos)?;
|
||||
self.pos = 0;
|
||||
Ok(&mut self.buffer)
|
||||
/// # Panics
|
||||
/// If slice length is not valid for used buffer kind.
|
||||
#[inline(always)]
|
||||
pub fn new(buf: &[u8]) -> Self {
|
||||
let pos = buf.len();
|
||||
assert!(Kind::invariant(pos, BlockSize::USIZE));
|
||||
let mut buffer = Block::<BlockSize>::default();
|
||||
buffer[..pos].copy_from_slice(buf);
|
||||
Self {
|
||||
buffer,
|
||||
pos: pos as u8,
|
||||
_pd: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Return size of the internall buffer in bytes
|
||||
/// Digest data in `input` in blocks of size `BlockSize` using
|
||||
/// the `compress` function, which accepts slice of blocks.
|
||||
#[inline]
|
||||
pub fn digest_blocks(
|
||||
&mut self,
|
||||
mut input: &[u8],
|
||||
mut compress: impl FnMut(&[Block<BlockSize>]),
|
||||
) {
|
||||
let pos = self.get_pos();
|
||||
// using `self.remaining()` for some reason
|
||||
// prevents panic elimination
|
||||
let rem = self.size() - pos;
|
||||
let n = input.len();
|
||||
// Note that checking condition `pos + n < BlockSize` is
|
||||
// equivalent to checking `n < rem`, where `rem` is equal
|
||||
// to `BlockSize - pos`. Using the latter allows us to work
|
||||
// around compiler accounting for possible overflow of
|
||||
// `pos + n` which results in it inserting unreachable
|
||||
// panic branches. Using `unreachable_unchecked` in `get_pos`
|
||||
// we convince compiler that `BlockSize - pos` never underflows.
|
||||
if Kind::invariant(n, rem) {
|
||||
// double slicing allows to remove panic branches
|
||||
self.buffer[pos..][..n].copy_from_slice(input);
|
||||
self.set_pos_unchecked(pos + n);
|
||||
return;
|
||||
}
|
||||
if pos != 0 {
|
||||
let (left, right) = input.split_at(rem);
|
||||
input = right;
|
||||
self.buffer[pos..].copy_from_slice(left);
|
||||
compress(slice::from_ref(&self.buffer));
|
||||
}
|
||||
|
||||
let (blocks, leftover) = Kind::split_blocks(input);
|
||||
if !blocks.is_empty() {
|
||||
compress(blocks);
|
||||
}
|
||||
|
||||
let n = leftover.len();
|
||||
self.buffer[..n].copy_from_slice(leftover);
|
||||
self.set_pos_unchecked(n);
|
||||
}
|
||||
|
||||
/// Reset buffer by setting cursor position to zero.
|
||||
#[inline(always)]
|
||||
pub fn reset(&mut self) {
|
||||
self.set_pos_unchecked(0);
|
||||
}
|
||||
|
||||
/// Pad remaining data with zeros and return resulting block.
|
||||
#[inline(always)]
|
||||
pub fn pad_with_zeros(&mut self) -> &mut Block<BlockSize> {
|
||||
let pos = self.get_pos();
|
||||
self.buffer[pos..].iter_mut().for_each(|b| *b = 0);
|
||||
self.set_pos_unchecked(0);
|
||||
&mut self.buffer
|
||||
}
|
||||
|
||||
/// Return current cursor position.
|
||||
#[inline(always)]
|
||||
pub fn get_pos(&self) -> usize {
|
||||
let pos = self.pos as usize;
|
||||
if !Kind::invariant(pos, BlockSize::USIZE) {
|
||||
debug_assert!(false);
|
||||
// SAFETY: `pos` never breaks the invariant
|
||||
unsafe {
|
||||
core::hint::unreachable_unchecked();
|
||||
}
|
||||
}
|
||||
pos
|
||||
}
|
||||
|
||||
/// Return slice of data stored inside the buffer.
|
||||
#[inline(always)]
|
||||
pub fn get_data(&self) -> &[u8] {
|
||||
&self.buffer[..self.get_pos()]
|
||||
}
|
||||
|
||||
/// Set buffer content and cursor position.
|
||||
///
|
||||
/// # Panics
|
||||
/// If `pos` is bigger or equal to block size.
|
||||
#[inline]
|
||||
pub fn set(&mut self, buf: Block<BlockSize>, pos: usize) {
|
||||
assert!(Kind::invariant(pos, BlockSize::USIZE));
|
||||
self.buffer = buf;
|
||||
self.set_pos_unchecked(pos);
|
||||
}
|
||||
|
||||
/// Return size of the internall buffer in bytes.
|
||||
#[inline(always)]
|
||||
pub fn size(&self) -> usize {
|
||||
BlockSize::to_usize()
|
||||
BlockSize::USIZE
|
||||
}
|
||||
|
||||
/// Return current cursor position
|
||||
#[inline]
|
||||
pub fn position(&self) -> usize {
|
||||
self.pos
|
||||
}
|
||||
|
||||
/// Return number of remaining bytes in the internall buffer
|
||||
#[inline]
|
||||
/// Return number of remaining bytes in the internall buffer.
|
||||
#[inline(always)]
|
||||
pub fn remaining(&self) -> usize {
|
||||
self.size() - self.pos
|
||||
self.size() - self.get_pos()
|
||||
}
|
||||
|
||||
/// Reset buffer by setting cursor position to zero
|
||||
#[inline]
|
||||
pub fn reset(&mut self) {
|
||||
self.pos = 0
|
||||
#[inline(always)]
|
||||
fn set_pos_unchecked(&mut self, pos: usize) {
|
||||
debug_assert!(Kind::invariant(pos, BlockSize::USIZE));
|
||||
self.pos = pos as u8;
|
||||
}
|
||||
}
|
||||
|
||||
impl<BlockSize> BlockBuffer<BlockSize, Eager>
|
||||
where
|
||||
BlockSize: ArrayLength<u8> + IsLess<U256>,
|
||||
Le<BlockSize, U256>: NonZero,
|
||||
{
|
||||
/// Set `data` to generated blocks.
|
||||
#[inline]
|
||||
pub fn set_data(
|
||||
&mut self,
|
||||
mut data: &mut [u8],
|
||||
mut process_blocks: impl FnMut(&mut [Block<BlockSize>]),
|
||||
) {
|
||||
let pos = self.get_pos();
|
||||
let r = self.remaining();
|
||||
let n = data.len();
|
||||
if pos != 0 {
|
||||
if n < r {
|
||||
// double slicing allows to remove panic branches
|
||||
data.copy_from_slice(&self.buffer[pos..][..n]);
|
||||
self.set_pos_unchecked(pos + n);
|
||||
return;
|
||||
}
|
||||
let (left, right) = data.split_at_mut(r);
|
||||
data = right;
|
||||
left.copy_from_slice(&self.buffer[pos..]);
|
||||
}
|
||||
|
||||
let (blocks, leftover) = to_blocks_mut(data);
|
||||
process_blocks(blocks);
|
||||
|
||||
let n = leftover.len();
|
||||
if n != 0 {
|
||||
let mut block = Default::default();
|
||||
process_blocks(slice::from_mut(&mut block));
|
||||
leftover.copy_from_slice(&block[..n]);
|
||||
self.buffer = block;
|
||||
}
|
||||
self.set_pos_unchecked(n);
|
||||
}
|
||||
|
||||
/// Compress remaining data after padding it with `delim`, zeros and
|
||||
/// the `suffix` bytes. If there is not enough unused space, `compress`
|
||||
/// will be called twice.
|
||||
///
|
||||
/// # Panics
|
||||
/// If suffix length is bigger than block size.
|
||||
#[inline(always)]
|
||||
pub fn digest_pad(
|
||||
&mut self,
|
||||
delim: u8,
|
||||
suffix: &[u8],
|
||||
mut compress: impl FnMut(&Block<BlockSize>),
|
||||
) {
|
||||
if suffix.len() > BlockSize::USIZE {
|
||||
panic!("suffix is too long");
|
||||
}
|
||||
let pos = self.get_pos();
|
||||
self.buffer[pos] = delim;
|
||||
for b in &mut self.buffer[pos + 1..] {
|
||||
*b = 0;
|
||||
}
|
||||
|
||||
let n = self.size() - suffix.len();
|
||||
if self.size() - pos - 1 < suffix.len() {
|
||||
compress(&self.buffer);
|
||||
let mut block = Block::<BlockSize>::default();
|
||||
block[n..].copy_from_slice(suffix);
|
||||
compress(&block);
|
||||
} else {
|
||||
self.buffer[n..].copy_from_slice(suffix);
|
||||
compress(&self.buffer);
|
||||
}
|
||||
self.set_pos_unchecked(0)
|
||||
}
|
||||
|
||||
/// Pad message with 0x80, zeros and 64-bit message length using
|
||||
/// big-endian byte order.
|
||||
#[inline]
|
||||
pub fn len64_padding_be(&mut self, data_len: u64, compress: impl FnMut(&Block<BlockSize>)) {
|
||||
self.digest_pad(0x80, &data_len.to_be_bytes(), compress);
|
||||
}
|
||||
|
||||
/// Pad message with 0x80, zeros and 64-bit message length using
|
||||
/// little-endian byte order.
|
||||
#[inline]
|
||||
pub fn len64_padding_le(&mut self, data_len: u64, compress: impl FnMut(&Block<BlockSize>)) {
|
||||
self.digest_pad(0x80, &data_len.to_le_bytes(), compress);
|
||||
}
|
||||
|
||||
/// Pad message with 0x80, zeros and 128-bit message length using
|
||||
/// big-endian byte order.
|
||||
#[inline]
|
||||
pub fn len128_padding_be(&mut self, data_len: u128, compress: impl FnMut(&Block<BlockSize>)) {
|
||||
self.digest_pad(0x80, &data_len.to_be_bytes(), compress);
|
||||
}
|
||||
}
|
||||
|
||||
/// Split message into mutable slice of parallel blocks, blocks, and leftover bytes.
|
||||
#[inline(always)]
|
||||
fn to_blocks_mut<N: ArrayLength<u8>>(data: &mut [u8]) -> (&mut [Block<N>], &mut [u8]) {
|
||||
let nb = data.len() / N::USIZE;
|
||||
let (left, right) = data.split_at_mut(nb * N::USIZE);
|
||||
let p = left.as_mut_ptr() as *mut Block<N>;
|
||||
// SAFETY: we guarantee that `blocks` does not point outside of `data`, and `p` is valid for
|
||||
// mutation
|
||||
let blocks = unsafe { slice::from_raw_parts_mut(p, nb) };
|
||||
(blocks, right)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,67 @@
|
|||
use super::{ArrayLength, Block};
|
||||
use core::slice;
|
||||
|
||||
/// Sealed trait for buffer kinds.
|
||||
pub trait Sealed {
|
||||
/// Invariant guaranteed by a buffer kind, i.e. with correct
|
||||
/// buffer code this function always returns true.
|
||||
fn invariant(pos: usize, block_size: usize) -> bool;
|
||||
|
||||
/// Split input data into slice fo blocks and tail.
|
||||
fn split_blocks<N: ArrayLength<u8>>(data: &[u8]) -> (&[Block<N>], &[u8]);
|
||||
}
|
||||
|
||||
impl Sealed for super::Eager {
|
||||
#[inline(always)]
|
||||
fn invariant(pos: usize, block_size: usize) -> bool {
|
||||
pos < block_size
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn split_blocks<N: ArrayLength<u8>>(data: &[u8]) -> (&[Block<N>], &[u8]) {
|
||||
let nb = data.len() / N::USIZE;
|
||||
let blocks_len = nb * N::USIZE;
|
||||
let tail_len = data.len() - blocks_len;
|
||||
// SAFETY: we guarantee that created slices do not point
|
||||
// outside of `data`
|
||||
unsafe {
|
||||
let blocks_ptr = data.as_ptr() as *const Block<N>;
|
||||
let tail_ptr = data.as_ptr().add(blocks_len);
|
||||
(
|
||||
slice::from_raw_parts(blocks_ptr, nb),
|
||||
slice::from_raw_parts(tail_ptr, tail_len),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Sealed for super::Lazy {
|
||||
#[inline(always)]
|
||||
fn invariant(pos: usize, block_size: usize) -> bool {
|
||||
pos <= block_size
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn split_blocks<N: ArrayLength<u8>>(data: &[u8]) -> (&[Block<N>], &[u8]) {
|
||||
if data.is_empty() {
|
||||
return (&[], &[]);
|
||||
}
|
||||
let (nb, tail_len) = if data.len() % N::USIZE == 0 {
|
||||
(data.len() / N::USIZE - 1, N::USIZE)
|
||||
} else {
|
||||
let nb = data.len() / N::USIZE;
|
||||
(nb, data.len() - nb * N::USIZE)
|
||||
};
|
||||
let blocks_len = nb * N::USIZE;
|
||||
// SAFETY: we guarantee that created slices do not point
|
||||
// outside of `data`
|
||||
unsafe {
|
||||
let blocks_ptr = data.as_ptr() as *const Block<N>;
|
||||
let tail_ptr = data.as_ptr().add(blocks_len);
|
||||
(
|
||||
slice::from_raw_parts(blocks_ptr, nb),
|
||||
slice::from_raw_parts(tail_ptr, tail_len),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,188 @@
|
|||
use block_buffer::{
|
||||
generic_array::typenum::{U10, U16, U24, U4, U8},
|
||||
Block, EagerBuffer, LazyBuffer,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_eager_digest_pad() {
|
||||
let mut buf = EagerBuffer::<U4>::default();
|
||||
let inputs = [
|
||||
&b"01234567"[..],
|
||||
&b"89"[..],
|
||||
&b"abcdefghij"[..],
|
||||
&b"klmnopqrs"[..],
|
||||
&b"tuv"[..],
|
||||
&b"wx"[..],
|
||||
];
|
||||
let exp_blocks = [
|
||||
(0, &[b"0123", b"4567"][..]),
|
||||
(2, &[b"89ab"][..]),
|
||||
(2, &[b"cdef", b"ghij"][..]),
|
||||
(3, &[b"klmn", b"opqr"][..]),
|
||||
(4, &[b"stuv"][..]),
|
||||
];
|
||||
let exp_poses = [0, 2, 0, 1, 0, 2];
|
||||
|
||||
let mut n = 0;
|
||||
for (i, input) in inputs.iter().enumerate() {
|
||||
buf.digest_blocks(input, |b| {
|
||||
let (j, exp) = exp_blocks[n];
|
||||
n += 1;
|
||||
assert_eq!(i, j);
|
||||
assert_eq!(b.len(), exp.len());
|
||||
assert!(b.iter().zip(exp.iter()).all(|v| v.0[..] == v.1[..]));
|
||||
});
|
||||
assert_eq!(exp_poses[i], buf.get_pos());
|
||||
}
|
||||
assert_eq!(buf.pad_with_zeros()[..], b"wx\0\0"[..]);
|
||||
assert_eq!(buf.get_pos(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lazy_digest_pad() {
|
||||
let mut buf = LazyBuffer::<U4>::default();
|
||||
let inputs = [
|
||||
&b"01234567"[..],
|
||||
&b"89"[..],
|
||||
&b"abcdefghij"[..],
|
||||
&b"klmnopqrs"[..],
|
||||
];
|
||||
let expected = [
|
||||
(0, &[b"0123"][..]),
|
||||
(1, &[b"4567"][..]),
|
||||
(2, &[b"89ab"][..]),
|
||||
(2, &[b"cdef"][..]),
|
||||
(3, &[b"ghij"][..]),
|
||||
(3, &[b"klmn", b"opqr"][..]),
|
||||
];
|
||||
let exp_poses = [4, 2, 4, 1];
|
||||
|
||||
let mut n = 0;
|
||||
for (i, input) in inputs.iter().enumerate() {
|
||||
buf.digest_blocks(input, |b| {
|
||||
let (j, exp) = expected[n];
|
||||
n += 1;
|
||||
assert_eq!(i, j);
|
||||
assert_eq!(b.len(), exp.len());
|
||||
assert!(b.iter().zip(exp.iter()).all(|v| v.0[..] == v.1[..]));
|
||||
});
|
||||
assert_eq!(exp_poses[i], buf.get_pos());
|
||||
}
|
||||
assert_eq!(buf.pad_with_zeros()[..], b"s\0\0\0"[..]);
|
||||
assert_eq!(buf.get_pos(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_eager_set_data() {
|
||||
let mut buf = EagerBuffer::<U4>::default();
|
||||
|
||||
let mut n = 0u8;
|
||||
let mut gen = |blocks: &mut [Block<U4>]| {
|
||||
for block in blocks {
|
||||
block.iter_mut().for_each(|b| *b = n);
|
||||
n += 1;
|
||||
}
|
||||
};
|
||||
|
||||
let mut out = [0u8; 6];
|
||||
buf.set_data(&mut out, &mut gen);
|
||||
assert_eq!(out, [0, 0, 0, 0, 1, 1]);
|
||||
assert_eq!(buf.get_pos(), 2);
|
||||
|
||||
let mut out = [0u8; 3];
|
||||
buf.set_data(&mut out, &mut gen);
|
||||
assert_eq!(out, [1, 1, 2]);
|
||||
assert_eq!(buf.get_pos(), 1);
|
||||
|
||||
let mut out = [0u8; 3];
|
||||
buf.set_data(&mut out, &mut gen);
|
||||
assert_eq!(out, [2, 2, 2]);
|
||||
assert_eq!(n, 3);
|
||||
assert_eq!(buf.get_pos(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[rustfmt::skip]
|
||||
fn test_eager_paddings() {
|
||||
let mut buf_be = EagerBuffer::<U8>::new(&[0x42]);
|
||||
let mut buf_le = buf_be.clone();
|
||||
let mut out_be = Vec::<u8>::new();
|
||||
let mut out_le = Vec::<u8>::new();
|
||||
let len = 0x0001_0203_0405_0607;
|
||||
buf_be.len64_padding_be(len, |block| out_be.extend(block));
|
||||
buf_le.len64_padding_le(len, |block| out_le.extend(block));
|
||||
|
||||
assert_eq!(
|
||||
out_be,
|
||||
[
|
||||
0x42, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
|
||||
],
|
||||
);
|
||||
assert_eq!(
|
||||
out_le,
|
||||
[
|
||||
0x42, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00,
|
||||
],
|
||||
);
|
||||
|
||||
let mut buf_be = EagerBuffer::<U10>::new(&[0x42]);
|
||||
let mut buf_le = buf_be.clone();
|
||||
let mut out_be = Vec::<u8>::new();
|
||||
let mut out_le = Vec::<u8>::new();
|
||||
buf_be.len64_padding_be(len, |block| out_be.extend(block));
|
||||
buf_le.len64_padding_le(len, |block| out_le.extend(block));
|
||||
|
||||
assert_eq!(
|
||||
out_be,
|
||||
[0x42, 0x80, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07],
|
||||
);
|
||||
assert_eq!(
|
||||
out_le,
|
||||
[0x42, 0x80, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00],
|
||||
);
|
||||
|
||||
let mut buf = EagerBuffer::<U16>::new(&[0x42]);
|
||||
let mut out = Vec::<u8>::new();
|
||||
let len = 0x0001_0203_0405_0607_0809_0a0b_0c0d_0e0f;
|
||||
buf.len128_padding_be(len, |block| out.extend(block));
|
||||
assert_eq!(
|
||||
out,
|
||||
[
|
||||
0x42, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
|
||||
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
|
||||
],
|
||||
);
|
||||
|
||||
let mut buf = EagerBuffer::<U24>::new(&[0x42]);
|
||||
let mut out = Vec::<u8>::new();
|
||||
let len = 0x0001_0203_0405_0607_0809_0a0b_0c0d_0e0f;
|
||||
buf.len128_padding_be(len, |block| out.extend(block));
|
||||
assert_eq!(
|
||||
out,
|
||||
[
|
||||
0x42, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
|
||||
0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
|
||||
],
|
||||
);
|
||||
|
||||
let mut buf = EagerBuffer::<U4>::new(&[0x42]);
|
||||
let mut out = Vec::<u8>::new();
|
||||
buf.digest_pad(0xff, &[0x10, 0x11, 0x12], |block| out.extend(block));
|
||||
assert_eq!(
|
||||
out,
|
||||
[0x42, 0xff, 0x00, 0x00, 0x00, 0x10, 0x11, 0x12],
|
||||
);
|
||||
|
||||
let mut buf = EagerBuffer::<U4>::new(&[0x42]);
|
||||
let mut out = Vec::<u8>::new();
|
||||
buf.digest_pad(0xff, &[0x10, 0x11], |block| out.extend(block));
|
||||
assert_eq!(
|
||||
out,
|
||||
[0x42, 0xff, 0x10, 0x11],
|
||||
);
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
{"files":{"Cargo.toml":"f895c4794f1c00f5f01376dae2b66870d939a01b9a366ee4d177b6b7ad99bd4a","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"d5c22aa3118d240e877ad41c5d9fa232f9c77d757d4aac0c2f943afc0a95e0ef","src/lib.rs":"962c90d43c7c2761b3c96fab2713ae357a7a8d2d048e37ce4f3354941ad41b97"},"package":"fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5"}
|
|
@ -1,26 +0,0 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
name = "block-padding"
|
||||
version = "0.1.5"
|
||||
authors = ["RustCrypto Developers"]
|
||||
description = "Padding and unpadding of messages divided into blocks."
|
||||
documentation = "https://docs.rs/block-padding"
|
||||
keywords = ["padding", "pkcs7", "ansix923", "iso7816"]
|
||||
categories = ["cryptography", "no-std"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
repository = "https://github.com/RustCrypto/utils"
|
||||
[dependencies.byte-tools]
|
||||
version = "0.3"
|
||||
[badges.travis-ci]
|
||||
repository = "RustCrypto/utils"
|
|
@ -1,323 +0,0 @@
|
|||
//! Padding and unpadding of messages divided into blocks.
|
||||
//!
|
||||
//! This crate provides `Padding` trait which provides padding and unpadding
|
||||
//! operations. Additionally several common padding schemes are available out
|
||||
//! of the box.
|
||||
#![no_std]
|
||||
#![doc(html_logo_url =
|
||||
"https://raw.githubusercontent.com/RustCrypto/meta/master/logo_small.png")]
|
||||
extern crate byte_tools;
|
||||
|
||||
use byte_tools::{zero, set};
|
||||
|
||||
/// Error for indicating failed padding operation
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct PadError;
|
||||
|
||||
/// Error for indicating failed unpadding operation
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct UnpadError;
|
||||
|
||||
/// Trait for padding messages divided into blocks
|
||||
pub trait Padding {
|
||||
/// Pads `block` filled with data up to `pos`.
|
||||
///
|
||||
/// `pos` should be inside of the block and block must not be full, i.e.
|
||||
/// `pos < block.len()` must be true. Otherwise method will return
|
||||
/// `PadError`. Some potentially irreversible padding schemes can allow
|
||||
/// padding of the full block, in this case aforementioned condition is
|
||||
/// relaxed to `pos <= block.len()`.
|
||||
fn pad_block(block: &mut [u8], pos: usize) -> Result<(), PadError>;
|
||||
|
||||
/// Pads message with length `pos` in the provided buffer.
|
||||
///
|
||||
/// `&buf[..pos]` is perceived as the message, the buffer must contain
|
||||
/// enough leftover space for padding: `block_size - (pos % block_size)`
|
||||
/// extra bytes must be available. Otherwise method will return
|
||||
/// `PadError`.
|
||||
fn pad(buf: &mut [u8], pos: usize, block_size: usize)
|
||||
-> Result<&mut [u8], PadError>
|
||||
{
|
||||
let bs = block_size * (pos / block_size);
|
||||
if buf.len() < bs || buf.len() - bs < block_size { Err(PadError)? }
|
||||
Self::pad_block(&mut buf[bs..bs+block_size], pos - bs)?;
|
||||
Ok(&mut buf[..bs+block_size])
|
||||
}
|
||||
|
||||
/// Unpad given `data` by truncating it according to the used padding.
|
||||
/// In case of the malformed padding will return `UnpadError`
|
||||
fn unpad(data: &[u8]) -> Result<&[u8], UnpadError>;
|
||||
}
|
||||
|
||||
/// Pad block with zeros.
|
||||
///
|
||||
/// ```
|
||||
/// use block_padding::{ZeroPadding, Padding};
|
||||
///
|
||||
/// let msg = b"test";
|
||||
/// let n = msg.len();
|
||||
/// let mut buffer = [0xff; 16];
|
||||
/// buffer[..n].copy_from_slice(msg);
|
||||
/// let padded_msg = ZeroPadding::pad(&mut buffer, n, 8).unwrap();
|
||||
/// assert_eq!(padded_msg, b"test\x00\x00\x00\x00");
|
||||
/// assert_eq!(ZeroPadding::unpad(&padded_msg).unwrap(), msg);
|
||||
/// ```
|
||||
/// ```
|
||||
/// # use block_padding::{ZeroPadding, Padding};
|
||||
/// # let msg = b"test";
|
||||
/// # let n = msg.len();
|
||||
/// # let mut buffer = [0xff; 16];
|
||||
/// # buffer[..n].copy_from_slice(msg);
|
||||
/// let padded_msg = ZeroPadding::pad(&mut buffer, n, 2).unwrap();
|
||||
/// assert_eq!(padded_msg, b"test");
|
||||
/// assert_eq!(ZeroPadding::unpad(&padded_msg).unwrap(), msg);
|
||||
/// ```
|
||||
///
|
||||
/// Note that zero padding may not be reversible if the original message ends
|
||||
/// with one or more zero bytes.
|
||||
pub enum ZeroPadding{}
|
||||
|
||||
impl Padding for ZeroPadding {
|
||||
fn pad_block(block: &mut [u8], pos: usize) -> Result<(), PadError> {
|
||||
if pos > block.len() { Err(PadError)? }
|
||||
zero(&mut block[pos..]);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn pad(buf: &mut [u8], pos: usize, block_size: usize)
|
||||
-> Result<&mut [u8], PadError>
|
||||
{
|
||||
if pos % block_size == 0 {
|
||||
Ok(&mut buf[..pos])
|
||||
} else {
|
||||
let bs = block_size * (pos / block_size);
|
||||
let be = bs + block_size;
|
||||
if buf.len() < be { Err(PadError)? }
|
||||
Self::pad_block(&mut buf[bs..be], pos - bs)?;
|
||||
Ok(&mut buf[..be])
|
||||
}
|
||||
}
|
||||
|
||||
fn unpad(data: &[u8]) -> Result<&[u8], UnpadError> {
|
||||
let mut n = data.len() - 1;
|
||||
while n != 0 {
|
||||
if data[n] != 0 {
|
||||
break;
|
||||
}
|
||||
n -= 1;
|
||||
}
|
||||
Ok(&data[..n+1])
|
||||
}
|
||||
}
|
||||
|
||||
/// Pad block with bytes with value equal to the number of bytes added.
|
||||
///
|
||||
/// PKCS#7 described in the [RFC 5652](https://tools.ietf.org/html/rfc5652#section-6.3).
|
||||
///
|
||||
/// ```
|
||||
/// use block_padding::{Pkcs7, Padding};
|
||||
///
|
||||
/// let msg = b"test";
|
||||
/// let n = msg.len();
|
||||
/// let mut buffer = [0xff; 8];
|
||||
/// buffer[..n].copy_from_slice(msg);
|
||||
/// let padded_msg = Pkcs7::pad(&mut buffer, n, 8).unwrap();
|
||||
/// assert_eq!(padded_msg, b"test\x04\x04\x04\x04");
|
||||
/// assert_eq!(Pkcs7::unpad(&padded_msg).unwrap(), msg);
|
||||
/// ```
|
||||
/// ```
|
||||
/// # use block_padding::{Pkcs7, Padding};
|
||||
/// # let msg = b"test";
|
||||
/// # let n = msg.len();
|
||||
/// # let mut buffer = [0xff; 8];
|
||||
/// # buffer[..n].copy_from_slice(msg);
|
||||
/// let padded_msg = Pkcs7::pad(&mut buffer, n, 2).unwrap();
|
||||
/// assert_eq!(padded_msg, b"test\x02\x02");
|
||||
/// assert_eq!(Pkcs7::unpad(&padded_msg).unwrap(), msg);
|
||||
/// ```
|
||||
/// ```
|
||||
/// # use block_padding::{Pkcs7, Padding};
|
||||
/// let mut buffer = [0xff; 5];
|
||||
/// assert!(Pkcs7::pad(&mut buffer, 4, 2).is_err());
|
||||
/// ```
|
||||
/// ```
|
||||
/// # use block_padding::{Pkcs7, Padding};
|
||||
/// # let buffer = [0xff; 16];
|
||||
/// assert!(Pkcs7::unpad(&buffer).is_err());
|
||||
/// ```
|
||||
///
|
||||
/// In addition to conditions stated in the `Padding` trait documentation,
|
||||
/// `pad_block` will return `PadError` if `block.len() > 255`, and in case of
|
||||
/// `pad` if `block_size > 255`.
|
||||
pub enum Pkcs7{}
|
||||
|
||||
impl Padding for Pkcs7 {
|
||||
fn pad_block(block: &mut [u8], pos: usize) -> Result<(), PadError> {
|
||||
if block.len() > 255 { Err(PadError)? }
|
||||
if pos >= block.len() { Err(PadError)? }
|
||||
let n = block.len() - pos;
|
||||
set(&mut block[pos..], n as u8);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn unpad(data: &[u8]) -> Result<&[u8], UnpadError> {
|
||||
if data.is_empty() { Err(UnpadError)? }
|
||||
let l = data.len();
|
||||
let n = data[l-1];
|
||||
if n == 0 || n as usize > l { Err(UnpadError)? }
|
||||
for v in &data[l-n as usize..l-1] {
|
||||
if *v != n { Err(UnpadError)? }
|
||||
}
|
||||
Ok(&data[..l - n as usize])
|
||||
}
|
||||
}
|
||||
|
||||
/// Pad block with zeros except the last byte which will be set to the number
|
||||
/// bytes.
|
||||
///
|
||||
/// ```
|
||||
/// use block_padding::{AnsiX923, Padding};
|
||||
///
|
||||
/// let msg = b"test";
|
||||
/// let n = msg.len();
|
||||
/// let mut buffer = [0xff; 16];
|
||||
/// buffer[..n].copy_from_slice(msg);
|
||||
/// let padded_msg = AnsiX923::pad(&mut buffer, n, 8).unwrap();
|
||||
/// assert_eq!(padded_msg, b"test\x00\x00\x00\x04");
|
||||
/// assert_eq!(AnsiX923::unpad(&padded_msg).unwrap(), msg);
|
||||
/// ```
|
||||
/// ```
|
||||
/// # use block_padding::{AnsiX923, Padding};
|
||||
/// # let msg = b"test";
|
||||
/// # let n = msg.len();
|
||||
/// # let mut buffer = [0xff; 16];
|
||||
/// # buffer[..n].copy_from_slice(msg);
|
||||
/// let padded_msg = AnsiX923::pad(&mut buffer, n, 2).unwrap();
|
||||
/// assert_eq!(padded_msg, b"test\x00\x02");
|
||||
/// assert_eq!(AnsiX923::unpad(&padded_msg).unwrap(), msg);
|
||||
/// ```
|
||||
/// ```
|
||||
/// # use block_padding::{AnsiX923, Padding};
|
||||
/// # let buffer = [0xff; 16];
|
||||
/// assert!(AnsiX923::unpad(&buffer).is_err());
|
||||
/// ```
|
||||
///
|
||||
/// In addition to conditions stated in the `Padding` trait documentation,
|
||||
/// `pad_block` will return `PadError` if `block.len() > 255`, and in case of
|
||||
/// `pad` if `block_size > 255`.
|
||||
pub enum AnsiX923{}
|
||||
|
||||
impl Padding for AnsiX923 {
|
||||
fn pad_block(block: &mut [u8], pos: usize) -> Result<(), PadError>{
|
||||
if block.len() > 255 { Err(PadError)? }
|
||||
if pos >= block.len() { Err(PadError)? }
|
||||
let bs = block.len();
|
||||
zero(&mut block[pos..bs-1]);
|
||||
block[bs-1] = (bs - pos) as u8;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn unpad(data: &[u8]) -> Result<&[u8], UnpadError> {
|
||||
if data.is_empty() { Err(UnpadError)? }
|
||||
let l = data.len();
|
||||
let n = data[l-1] as usize;
|
||||
if n == 0 || n > l {
|
||||
return Err(UnpadError)
|
||||
}
|
||||
for v in &data[l-n..l-1] {
|
||||
if *v != 0 { Err(UnpadError)? }
|
||||
}
|
||||
Ok(&data[..l-n])
|
||||
}
|
||||
}
|
||||
|
||||
/// Pad block with byte sequence `\x80 00...00 00`.
|
||||
///
|
||||
/// ```
|
||||
/// use block_padding::{Iso7816, Padding};
|
||||
///
|
||||
/// let msg = b"test";
|
||||
/// let n = msg.len();
|
||||
/// let mut buffer = [0xff; 16];
|
||||
/// buffer[..n].copy_from_slice(msg);
|
||||
/// let padded_msg = Iso7816::pad(&mut buffer, n, 8).unwrap();
|
||||
/// assert_eq!(padded_msg, b"test\x80\x00\x00\x00");
|
||||
/// assert_eq!(Iso7816::unpad(&padded_msg).unwrap(), msg);
|
||||
/// ```
|
||||
/// ```
|
||||
/// # use block_padding::{Iso7816, Padding};
|
||||
/// # let msg = b"test";
|
||||
/// # let n = msg.len();
|
||||
/// # let mut buffer = [0xff; 16];
|
||||
/// # buffer[..n].copy_from_slice(msg);
|
||||
/// let padded_msg = Iso7816::pad(&mut buffer, n, 2).unwrap();
|
||||
/// assert_eq!(padded_msg, b"test\x80\x00");
|
||||
/// assert_eq!(Iso7816::unpad(&padded_msg).unwrap(), msg);
|
||||
/// ```
|
||||
pub enum Iso7816{}
|
||||
|
||||
impl Padding for Iso7816 {
|
||||
fn pad_block(block: &mut [u8], pos: usize) -> Result<(), PadError> {
|
||||
if pos >= block.len() { Err(PadError)? }
|
||||
block[pos] = 0x80;
|
||||
zero(&mut block[pos+1..]);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn unpad(data: &[u8]) -> Result<&[u8], UnpadError> {
|
||||
if data.is_empty() { Err(UnpadError)? }
|
||||
let mut n = data.len() - 1;
|
||||
while n != 0 {
|
||||
if data[n] != 0 { break; }
|
||||
n -= 1;
|
||||
}
|
||||
if data[n] != 0x80 { Err(UnpadError)? }
|
||||
Ok(&data[..n])
|
||||
}
|
||||
}
|
||||
|
||||
/// Don't pad the data. Useful for key wrapping. Padding will fail if the data cannot be
|
||||
/// fitted into blocks without padding.
|
||||
///
|
||||
/// ```
|
||||
/// use block_padding::{NoPadding, Padding};
|
||||
///
|
||||
/// let msg = b"test";
|
||||
/// let n = msg.len();
|
||||
/// let mut buffer = [0xff; 16];
|
||||
/// buffer[..n].copy_from_slice(msg);
|
||||
/// let padded_msg = NoPadding::pad(&mut buffer, n, 4).unwrap();
|
||||
/// assert_eq!(padded_msg, b"test");
|
||||
/// assert_eq!(NoPadding::unpad(&padded_msg).unwrap(), msg);
|
||||
/// ```
|
||||
/// ```
|
||||
/// # use block_padding::{NoPadding, Padding};
|
||||
/// # let msg = b"test";
|
||||
/// # let n = msg.len();
|
||||
/// # let mut buffer = [0xff; 16];
|
||||
/// # buffer[..n].copy_from_slice(msg);
|
||||
/// let padded_msg = NoPadding::pad(&mut buffer, n, 2).unwrap();
|
||||
/// assert_eq!(padded_msg, b"test");
|
||||
/// assert_eq!(NoPadding::unpad(&padded_msg).unwrap(), msg);
|
||||
/// ```
|
||||
pub enum NoPadding {}
|
||||
|
||||
impl Padding for NoPadding {
|
||||
fn pad_block(block: &mut [u8], pos: usize) -> Result<(), PadError> {
|
||||
if pos % block.len() != 0 {
|
||||
Err(PadError)?
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn pad(buf: &mut [u8], pos: usize, block_size: usize) -> Result<&mut [u8], PadError> {
|
||||
if pos % block_size != 0 {
|
||||
Err(PadError)?
|
||||
}
|
||||
Ok(&mut buf[..pos])
|
||||
}
|
||||
|
||||
fn unpad(data: &[u8]) -> Result<&[u8], UnpadError> {
|
||||
Ok(data)
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
{"files":{"Cargo.toml":"0542f7bd3605206297b6708b28bfff87a2ef7fa65ccab1474ddb53072600d7e1","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"d5c22aa3118d240e877ad41c5d9fa232f9c77d757d4aac0c2f943afc0a95e0ef","src/lib.rs":"c8fb45d44cae0b1094bf8cef5059827b28b490eb14a3865fa536b61241c2c8c4"},"package":"e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7"}
|
|
@ -1,21 +0,0 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g. crates.io) dependencies
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
|
||||
[package]
|
||||
name = "byte-tools"
|
||||
version = "0.3.1"
|
||||
authors = ["RustCrypto Developers"]
|
||||
description = "Bytes related utility functions"
|
||||
documentation = "https://docs.rs/byte-tools"
|
||||
keywords = ["bytes"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
repository = "https://github.com/RustCrypto/utils"
|
|
@ -1,25 +0,0 @@
|
|||
Copyright (c) 2018-2019 The RustCrypto Project Developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
|
@ -1,29 +0,0 @@
|
|||
#![no_std]
|
||||
use core::ptr;
|
||||
|
||||
/// Copy bytes from `src` to `dst`
|
||||
///
|
||||
/// Panics if `src.len() < dst.len()`
|
||||
#[inline(always)]
|
||||
pub fn copy(src: &[u8], dst: &mut [u8]) {
|
||||
assert!(dst.len() >= src.len());
|
||||
unsafe {
|
||||
ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), src.len());
|
||||
}
|
||||
}
|
||||
|
||||
/// Zero all bytes in `dst`
|
||||
#[inline(always)]
|
||||
pub fn zero(dst: &mut [u8]) {
|
||||
unsafe {
|
||||
ptr::write_bytes(dst.as_mut_ptr(), 0, dst.len());
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets all bytes in `dst` equal to `value`
|
||||
#[inline(always)]
|
||||
pub fn set(dst: &mut [u8], value: u8) {
|
||||
unsafe {
|
||||
ptr::write_bytes(dst.as_mut_ptr(), value, dst.len());
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
{"files":{"CHANGELOG.md":"5f46ca70ecce0cf195145e4db3b3d447ec85b31f406f889f3d17e3ccd551471e","Cargo.toml":"aadb3ec41bd47386bae5d98e6adc2da07889e299083d7fb6dcfe53348958c495","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"904801faf3f1850328af8e1aa1047b9190cc22ed40df5c87f2d93d17f847ef67","README.md":"c991281c8d1525279b90dffd452bd731116eebc456ee3a23b9c997df402e8bf7","src/aarch64.rs":"697e8048929cbee0248657da956e8cc627d59ee3fc96729068fec6e7b089b7b6","src/lib.rs":"d3435ac0ff9f264baaa49db98ab954495beb6263cfc106d520ad6081a1d439b1","src/x86.rs":"45926715bc5dec0d9b37de0f0409c2c0b578dc3f7ac51f10e58cbc87cf3dcd9f","tests/aarch64.rs":"bdabbe67316c128b57003ba5faa07707b5f339b1f3e984da4bc383cc93c2bedd","tests/x86.rs":"fcf476ca6ebd0845ab547cea4fe40c2ba2a2324c024264d9a86f666586f3a480"},"package":"59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b"}
|
|
@ -0,0 +1,70 @@
|
|||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## 0.2.2 (2022-03-18)
|
||||
### Added
|
||||
- Support for Android on `aarch64` ([#752])
|
||||
|
||||
### Removed
|
||||
- Vestigial code around `crypto` target feature ([#600])
|
||||
|
||||
[#600]: https://github.com/RustCrypto/utils/pull/600
|
||||
[#752]: https://github.com/RustCrypto/utils/pull/752
|
||||
|
||||
## 0.2.1 (2021-08-26)
|
||||
### Changed
|
||||
- Revert [#583] "Use from_bytes_with_nul for string check" ([#597])
|
||||
|
||||
[#583]: https://github.com/RustCrypto/utils/pull/583
|
||||
[#597]: https://github.com/RustCrypto/utils/pull/597
|
||||
|
||||
## 0.2.0 (2021-08-26) [YANKED]
|
||||
### Removed
|
||||
- AArch64 `crypto` target feature ([#594])
|
||||
|
||||
[#594]: https://github.com/RustCrypto/utils/pull/594
|
||||
|
||||
## 0.1.5 (2021-06-21)
|
||||
### Added
|
||||
- iOS support ([#435], [#501])
|
||||
|
||||
### Changed
|
||||
- Map `aarch64` HWCAPs to target features; add `crypto` ([#456])
|
||||
|
||||
[#435]: https://github.com/RustCrypto/utils/pull/435
|
||||
[#456]: https://github.com/RustCrypto/utils/pull/456
|
||||
[#501]: https://github.com/RustCrypto/utils/pull/501
|
||||
|
||||
## 0.1.4 (2021-05-14)
|
||||
### Added
|
||||
- Support compiling on non-Linux/macOS aarch64 targets ([#408])
|
||||
|
||||
[#408]: https://github.com/RustCrypto/utils/pull/408
|
||||
|
||||
## 0.1.3 (2021-05-13)
|
||||
### Removed
|
||||
- `neon` on `aarch64` targets: already enabled by default ([#406])
|
||||
|
||||
[#406]: https://github.com/RustCrypto/utils/pull/406
|
||||
|
||||
## 0.1.2 (2021-05-13) [YANKED]
|
||||
### Added
|
||||
- `neon` feature detection on `aarch64` targets ([#403])
|
||||
|
||||
### Fixed
|
||||
- Support for `musl`-based targets ([#403])
|
||||
|
||||
[#403]: https://github.com/RustCrypto/utils/pull/403
|
||||
|
||||
## 0.1.1 (2021-05-06)
|
||||
### Added
|
||||
- `aarch64` support for Linux and macOS/M4 targets ([#393])
|
||||
|
||||
[#393]: https://github.com/RustCrypto/utils/pull/393
|
||||
|
||||
## 0.1.0 (2021-04-29)
|
||||
- Initial release
|
|
@ -0,0 +1,29 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies.
|
||||
#
|
||||
# If you are reading this file be aware that the original Cargo.toml
|
||||
# will likely look very different (and much more reasonable).
|
||||
# See Cargo.toml.orig for the original contents.
|
||||
|
||||
[package]
|
||||
edition = "2018"
|
||||
name = "cpufeatures"
|
||||
version = "0.2.2"
|
||||
authors = ["RustCrypto Developers"]
|
||||
description = "Lightweight runtime CPU feature detection for x86/x86_64 and aarch64 with\nno_std support and support for mobile targets including Android and iOS\n"
|
||||
documentation = "https://docs.rs/cpufeatures"
|
||||
readme = "README.md"
|
||||
keywords = ["cpuid", "target-feature"]
|
||||
categories = ["no-std"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
repository = "https://github.com/RustCrypto/utils"
|
||||
[target.aarch64-apple-darwin.dependencies.libc]
|
||||
version = "0.2.68"
|
||||
[target.aarch64-linux-android.dependencies.libc]
|
||||
version = "0.2.68"
|
||||
[target."cfg(all(target_arch = \"aarch64\", target_os = \"linux\"))".dependencies.libc]
|
||||
version = "0.2.68"
|
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2018-2019 The RustCrypto Project Developers
|
||||
Copyright (c) 2020 The RustCrypto Project Developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
|
@ -0,0 +1,90 @@
|
|||
# [RustCrypto]: CPU Feature Detection
|
||||
|
||||
[![crate][crate-image]][crate-link]
|
||||
[![Docs][docs-image]][docs-link]
|
||||
![Apache2/MIT licensed][license-image]
|
||||
![Rust Version][rustc-image]
|
||||
[![Project Chat][chat-image]][chat-link]
|
||||
[![Build Status][build-image]][build-link]
|
||||
|
||||
Lightweight and efficient runtime CPU feature detection for `aarch64` and
|
||||
`x86`/`x86_64` targets.
|
||||
|
||||
Supports `no_std` as well as mobile targets including iOS and Android,
|
||||
providing an alternative to the `std`-dependent `is_x86_feature_detected!`
|
||||
macro.
|
||||
|
||||
[Documentation][docs-link]
|
||||
|
||||
## Supported architectures
|
||||
|
||||
### `aarch64`: Android, iOS, Linux, and macOS/M4 only
|
||||
|
||||
Note: ARM64 does not support OS-independent feature detection, so support must
|
||||
be implemented on an OS-by-OS basis.
|
||||
|
||||
Target features:
|
||||
|
||||
- `aes`
|
||||
- `sha2`
|
||||
- `sha3`
|
||||
|
||||
Note: please open a GitHub Issue to request support for additional features.
|
||||
|
||||
### `x86`/`x86_64`: OS independent and `no_std`-friendly
|
||||
|
||||
Target features:
|
||||
|
||||
- `adx`
|
||||
- `aes`
|
||||
- `avx`
|
||||
- `avx2`
|
||||
- `bmi1`
|
||||
- `bmi2`
|
||||
- `fma`
|
||||
- `mmx`
|
||||
- `pclmulqdq`
|
||||
- `popcnt`
|
||||
- `rdrand`
|
||||
- `rdseed`
|
||||
- `sgx`
|
||||
- `sha`
|
||||
- `sse`
|
||||
- `sse2`
|
||||
- `sse3`
|
||||
- `sse4.1`
|
||||
- `sse4.2`
|
||||
- `ssse3`
|
||||
|
||||
## License
|
||||
|
||||
Licensed under either of:
|
||||
|
||||
* [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
|
||||
* [MIT license](http://opensource.org/licenses/MIT)
|
||||
|
||||
at your option.
|
||||
|
||||
### Contribution
|
||||
|
||||
Unless you explicitly state otherwise, any contribution intentionally submitted
|
||||
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
|
||||
dual licensed as above, without any additional terms or conditions.
|
||||
|
||||
[//]: # (badges)
|
||||
|
||||
[crate-image]: https://img.shields.io/crates/v/cpufeatures.svg
|
||||
[crate-link]: https://crates.io/crates/cpufeatures
|
||||
[docs-image]: https://docs.rs/cpufeatures/badge.svg
|
||||
[docs-link]: https://docs.rs/cpufeatures/
|
||||
[license-image]: https://img.shields.io/badge/license-Apache2.0/MIT-blue.svg
|
||||
[rustc-image]: https://img.shields.io/badge/rustc-1.40+-blue.svg
|
||||
[chat-image]: https://img.shields.io/badge/zulip-join_chat-blue.svg
|
||||
[chat-link]: https://rustcrypto.zulipchat.com/#narrow/stream/260052-utils
|
||||
[build-image]: https://github.com/RustCrypto/utils/workflows/cpufeatures/badge.svg?branch=master&event=push
|
||||
[build-link]: https://github.com/RustCrypto/utils/actions/workflows/cpufeatures.yml
|
||||
|
||||
[//]: # (general links)
|
||||
|
||||
[RustCrypto]: https://github.com/rustcrypto
|
||||
[RustCrypto/utils#378]: https://github.com/RustCrypto/utils/issues/378
|
|
@ -0,0 +1,182 @@
|
|||
//! ARM64 CPU feature detection support.
|
||||
//!
|
||||
//! Unfortunately ARM instructions to detect CPU features cannot be called from
|
||||
//! unprivileged userspace code, so this implementation relies on OS-specific
|
||||
//! APIs for feature detection.
|
||||
|
||||
// Evaluate the given `$body` expression any of the supplied target features
|
||||
// are not enabled. Otherwise returns true.
|
||||
#[macro_export]
|
||||
#[doc(hidden)]
|
||||
macro_rules! __unless_target_features {
|
||||
($($tf:tt),+ => $body:expr ) => {
|
||||
{
|
||||
#[cfg(not(all($(target_feature=$tf,)*)))]
|
||||
$body
|
||||
|
||||
#[cfg(all($(target_feature=$tf,)*))]
|
||||
true
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Linux runtime detection of target CPU features using `getauxval`.
|
||||
#[cfg(any(target_os = "linux", target_os = "android"))]
|
||||
#[macro_export]
|
||||
#[doc(hidden)]
|
||||
macro_rules! __detect_target_features {
|
||||
($($tf:tt),+) => {{
|
||||
let hwcaps = $crate::aarch64::getauxval_hwcap();
|
||||
$($crate::check!(hwcaps, $tf) & )+ true
|
||||
}};
|
||||
}
|
||||
|
||||
/// Linux helper function for calling `getauxval` to get `AT_HWCAP`.
|
||||
#[cfg(any(target_os = "linux", target_os = "android"))]
|
||||
pub fn getauxval_hwcap() -> u64 {
|
||||
unsafe { libc::getauxval(libc::AT_HWCAP) }
|
||||
}
|
||||
|
||||
// MacOS runtime detection of target CPU features using `sysctlbyname`.
|
||||
#[cfg(target_os = "macos")]
|
||||
#[macro_export]
|
||||
#[doc(hidden)]
|
||||
macro_rules! __detect_target_features {
|
||||
($($tf:tt),+) => {{
|
||||
$($crate::check!($tf) & )+ true
|
||||
}};
|
||||
}
|
||||
|
||||
// Linux `expand_check_macro`
|
||||
#[cfg(any(target_os = "linux", target_os = "android"))]
|
||||
macro_rules! __expand_check_macro {
|
||||
($(($name:tt, $hwcap:ident)),* $(,)?) => {
|
||||
#[macro_export]
|
||||
#[doc(hidden)]
|
||||
macro_rules! check {
|
||||
$(
|
||||
($hwcaps:expr, $name) => {
|
||||
(($hwcaps & $crate::aarch64::hwcaps::$hwcap) != 0)
|
||||
};
|
||||
)*
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Linux `expand_check_macro`
|
||||
#[cfg(any(target_os = "linux", target_os = "android"))]
|
||||
__expand_check_macro! {
|
||||
("aes", AES), // Enable AES support.
|
||||
("sha2", SHA2), // Enable SHA1 and SHA256 support.
|
||||
("sha3", SHA3), // Enable SHA512 and SHA3 support.
|
||||
}
|
||||
|
||||
/// Linux hardware capabilities mapped to target features.
|
||||
///
|
||||
/// Note that LLVM target features are coarser grained than what Linux supports
|
||||
/// and imply more capabilities under each feature. This module attempts to
|
||||
/// provide that mapping accordingly.
|
||||
///
|
||||
/// See this issue for more info: <https://github.com/RustCrypto/utils/issues/395>
|
||||
#[cfg(any(target_os = "linux", target_os = "android"))]
|
||||
pub mod hwcaps {
|
||||
use libc::c_ulong;
|
||||
|
||||
pub const AES: c_ulong = libc::HWCAP_AES | libc::HWCAP_PMULL;
|
||||
pub const SHA2: c_ulong = libc::HWCAP_SHA2;
|
||||
pub const SHA3: c_ulong = libc::HWCAP_SHA3 | libc::HWCAP_SHA512;
|
||||
}
|
||||
|
||||
// macOS `check!` macro.
|
||||
//
|
||||
// NOTE: several of these instructions (e.g. `aes`, `sha2`) can be assumed to
|
||||
// be present on all Apple ARM64 hardware.
|
||||
//
|
||||
// Newer CPU instructions now have nodes within sysctl's `hw.optional`
|
||||
// namespace, however the ones that do not can safely be assumed to be
|
||||
// present on all Apple ARM64 devices, now and for the foreseeable future.
|
||||
//
|
||||
// See discussion on this issue for more information:
|
||||
// <https://github.com/RustCrypto/utils/issues/378>
|
||||
#[cfg(target_os = "macos")]
|
||||
#[macro_export]
|
||||
#[doc(hidden)]
|
||||
macro_rules! check {
|
||||
("aes") => {
|
||||
true
|
||||
};
|
||||
("sha2") => {
|
||||
true
|
||||
};
|
||||
("sha3") => {
|
||||
unsafe {
|
||||
// `sha3` target feature implies SHA-512 as well
|
||||
$crate::aarch64::sysctlbyname(b"hw.optional.armv8_2_sha512\0")
|
||||
&& $crate::aarch64::sysctlbyname(b"hw.optional.armv8_2_sha3\0")
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// macOS helper function for calling `sysctlbyname`.
|
||||
#[cfg(target_os = "macos")]
|
||||
pub unsafe fn sysctlbyname(name: &[u8]) -> bool {
|
||||
assert_eq!(
|
||||
name.last().cloned(),
|
||||
Some(0),
|
||||
"name is not NUL terminated: {:?}",
|
||||
name
|
||||
);
|
||||
|
||||
let mut value: u32 = 0;
|
||||
let mut size = core::mem::size_of::<u32>();
|
||||
|
||||
let rc = libc::sysctlbyname(
|
||||
name.as_ptr() as *const i8,
|
||||
&mut value as *mut _ as *mut libc::c_void,
|
||||
&mut size,
|
||||
core::ptr::null_mut(),
|
||||
0,
|
||||
);
|
||||
|
||||
assert_eq!(size, 4, "unexpected sysctlbyname(3) result size");
|
||||
assert_eq!(rc, 0, "sysctlbyname returned error code: {}", rc);
|
||||
value != 0
|
||||
}
|
||||
|
||||
// iOS `check!` macro.
|
||||
//
|
||||
// Unfortunately iOS does not provide access to the `sysctl(3)` API which means
|
||||
// we can only return static values for CPU features which can be assumed to
|
||||
// be present on all Apple ARM64 hardware.
|
||||
//
|
||||
// See discussion on this issue for more information:
|
||||
// <https://github.com/RustCrypto/utils/issues/378>
|
||||
#[cfg(target_os = "ios")]
|
||||
#[macro_export]
|
||||
#[doc(hidden)]
|
||||
macro_rules! check {
|
||||
("aes") => {
|
||||
true
|
||||
};
|
||||
("sha2") => {
|
||||
true
|
||||
};
|
||||
("sha3") => {
|
||||
false
|
||||
};
|
||||
}
|
||||
|
||||
// On other targets, runtime CPU feature detection is unavailable
|
||||
#[cfg(not(any(
|
||||
target_os = "ios",
|
||||
target_os = "linux",
|
||||
target_os = "android",
|
||||
target_os = "macos"
|
||||
)))]
|
||||
#[macro_export]
|
||||
#[doc(hidden)]
|
||||
macro_rules! __detect_target_features {
|
||||
($($tf:tt),+) => {
|
||||
false
|
||||
};
|
||||
}
|
|
@ -0,0 +1,135 @@
|
|||
//! This crate provides macros for runtime CPU feature detection. It's intended
|
||||
//! as a stopgap until Rust [RFC 2725] adding first-class target feature detection
|
||||
//! macros to `libcore` is implemented.
|
||||
//!
|
||||
//! Supported target architectures:
|
||||
//! - `aarch64`: Linux and macOS/M4 only (ARM64 does not support OS-independent feature detection)
|
||||
//! - Target features: `aes`, `sha2`, `sha3`
|
||||
//! - `x86`/`x86_64`: OS independent and `no_std`-friendly
|
||||
//! - Target features: `adx`, `aes`, `avx`, `avx2`, `bmi1`, `bmi2`, `fma`,
|
||||
//! `mmx`, `pclmulqdq`, `popcnt`, `rdrand`, `rdseed`, `sgx`, `sha`, `sse`,
|
||||
//! `sse2`, `sse3`, `sse4.1`, `sse4.2`, `ssse3`
|
||||
//!
|
||||
//! If you would like detection support for a target feature which is not on
|
||||
//! this list, please [open a GitHub issue][gh].
|
||||
//!
|
||||
//! # Example
|
||||
//! ```
|
||||
//! # #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
|
||||
//! # {
|
||||
//! // This macro creates `cpuid_aes_sha` module
|
||||
//! cpufeatures::new!(cpuid_aes_sha, "aes", "sha");
|
||||
//!
|
||||
//! // `token` is a Zero Sized Type (ZST) value, which guarantees
|
||||
//! // that underlying static storage got properly initialized,
|
||||
//! // which allows to omit initialization branch
|
||||
//! let token: cpuid_aes_sha::InitToken = cpuid_aes_sha::init();
|
||||
//!
|
||||
//! if token.get() {
|
||||
//! println!("CPU supports both SHA and AES extensions");
|
||||
//! } else {
|
||||
//! println!("SHA and AES extensions are not supported");
|
||||
//! }
|
||||
//!
|
||||
//! // If stored value needed only once you can get stored value
|
||||
//! // omitting the token
|
||||
//! let val = cpuid_aes_sha::get();
|
||||
//! assert_eq!(val, token.get());
|
||||
//!
|
||||
//! // Additionally you can get both token and value
|
||||
//! let (token, val) = cpuid_aes_sha::init_get();
|
||||
//! assert_eq!(val, token.get());
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
//! Note that if all tested target features are enabled via compiler options
|
||||
//! (e.g. by using `RUSTFLAGS`), the `get` method will always return `true`
|
||||
//! and `init` will not use CPUID instruction. Such behavior allows
|
||||
//! compiler to completely eliminate fallback code.
|
||||
//!
|
||||
//! After first call macro caches result and returns it in subsequent
|
||||
//! calls, thus runtime overhead for them is minimal.
|
||||
//!
|
||||
//! [RFC 2725]: https://github.com/rust-lang/rfcs/pull/2725
|
||||
//! [gh]: https://github.com/RustCrypto/utils/issues/new?title=cpufeatures:%20requesting%20support%20for%20CHANGEME%20target%20feature
|
||||
|
||||
#![no_std]
|
||||
#![doc(
|
||||
html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
|
||||
html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg"
|
||||
)]
|
||||
|
||||
#[cfg(all(target_arch = "aarch64"))]
|
||||
#[doc(hidden)]
|
||||
pub mod aarch64;
|
||||
|
||||
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
|
||||
mod x86;
|
||||
|
||||
#[cfg(not(any(target_arch = "aarch64", target_arch = "x86", target_arch = "x86_64")))]
|
||||
compile_error!("This crate works only on `aarch64`, `x86`, and `x86-64` targets.");
|
||||
|
||||
/// Create module with CPU feature detection code.
|
||||
#[macro_export]
|
||||
macro_rules! new {
|
||||
($mod_name:ident, $($tf:tt),+ $(,)?) => {
|
||||
mod $mod_name {
|
||||
use core::sync::atomic::{AtomicU8, Ordering::Relaxed};
|
||||
|
||||
const UNINIT: u8 = u8::max_value();
|
||||
static STORAGE: AtomicU8 = AtomicU8::new(UNINIT);
|
||||
|
||||
/// Initialization token
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct InitToken(());
|
||||
|
||||
impl InitToken {
|
||||
/// Get initialized value
|
||||
#[inline(always)]
|
||||
pub fn get(&self) -> bool {
|
||||
$crate::__unless_target_features! {
|
||||
$($tf),+ => {
|
||||
STORAGE.load(Relaxed) == 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize underlying storage if needed and get
|
||||
/// stored value and initialization token.
|
||||
#[inline]
|
||||
pub fn init_get() -> (InitToken, bool) {
|
||||
let res = $crate::__unless_target_features! {
|
||||
$($tf),+ => {
|
||||
// Relaxed ordering is fine, as we only have a single atomic variable.
|
||||
let val = STORAGE.load(Relaxed);
|
||||
|
||||
if val == UNINIT {
|
||||
let res = $crate::__detect_target_features!($($tf),+);
|
||||
STORAGE.store(res as u8, Relaxed);
|
||||
res
|
||||
} else {
|
||||
val == 1
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
(InitToken(()), res)
|
||||
}
|
||||
|
||||
/// Initialize underlying storage if needed and get
|
||||
/// initialization token.
|
||||
#[inline]
|
||||
pub fn init() -> InitToken {
|
||||
init_get().0
|
||||
}
|
||||
|
||||
/// Initialize underlying storage if needed and get
|
||||
/// stored value.
|
||||
#[inline]
|
||||
pub fn get() -> bool {
|
||||
init_get().1
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
//! x86/x86-64 CPU feature detection support.
|
||||
//!
|
||||
//! Portable, `no_std`-friendly implementation that relies on the x86 `CPUID`
|
||||
//! instruction for feature detection.
|
||||
|
||||
// Evaluate the given `$body` expression any of the supplied target features
|
||||
// are not enabled. Otherwise returns true.
|
||||
//
|
||||
// The `$body` expression is not evaluated on SGX targets, and returns false
|
||||
// on these targets unless *all* supplied target features are enabled.
|
||||
#[macro_export]
|
||||
#[doc(hidden)]
|
||||
macro_rules! __unless_target_features {
|
||||
($($tf:tt),+ => $body:expr ) => {{
|
||||
#[cfg(not(all($(target_feature=$tf,)*)))]
|
||||
{
|
||||
#[cfg(not(target_env = "sgx"))]
|
||||
$body
|
||||
|
||||
// CPUID is not available on SGX targets
|
||||
#[cfg(target_env = "sgx")]
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(all($(target_feature=$tf,)*))]
|
||||
true
|
||||
}};
|
||||
}
|
||||
|
||||
// Use CPUID to detect the presence of all supplied target features.
|
||||
#[macro_export]
|
||||
#[doc(hidden)]
|
||||
macro_rules! __detect_target_features {
|
||||
($($tf:tt),+) => {{
|
||||
#[cfg(target_arch = "x86")]
|
||||
use core::arch::x86::{__cpuid, __cpuid_count};
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
use core::arch::x86_64::{__cpuid, __cpuid_count};
|
||||
|
||||
let cr = unsafe {
|
||||
[__cpuid(1), __cpuid_count(7, 0)]
|
||||
};
|
||||
|
||||
$($crate::check!(cr, $tf) & )+ true
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! __expand_check_macro {
|
||||
($(($name:tt, $i:expr, $reg:ident, $offset:expr)),* $(,)?) => {
|
||||
#[macro_export]
|
||||
#[doc(hidden)]
|
||||
macro_rules! check {
|
||||
$(
|
||||
($cr:expr, $name) => { ($cr[$i].$reg & (1 << $offset) != 0) };
|
||||
)*
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
__expand_check_macro! {
|
||||
("mmx", 0, edx, 23),
|
||||
("sse", 0, edx, 25),
|
||||
("sse2", 0, edx, 26),
|
||||
("sse3", 0, ecx, 0),
|
||||
("pclmulqdq", 0, ecx, 1),
|
||||
("ssse3", 0, ecx, 9),
|
||||
("fma", 0, ecx, 12),
|
||||
("sse4.1", 0, ecx, 19),
|
||||
("sse4.2", 0, ecx, 20),
|
||||
("popcnt", 0, ecx, 23),
|
||||
("aes", 0, ecx, 25),
|
||||
("avx", 0, ecx, 28),
|
||||
("rdrand", 0, ecx, 30),
|
||||
("sgx", 1, ebx, 2),
|
||||
("bmi1", 1, ebx, 3),
|
||||
("avx2", 1, ebx, 5),
|
||||
("bmi2", 1, ebx, 8),
|
||||
("rdseed", 1, ebx, 18),
|
||||
("adx", 1, ebx, 19),
|
||||
("sha", 1, ebx, 29),
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
//! ARM64 tests
|
||||
|
||||
#![cfg(target_arch = "aarch64")]
|
||||
|
||||
cpufeatures::new!(armcaps, "aes", "sha2", "sha3");
|
||||
|
||||
#[test]
|
||||
fn init() {
|
||||
let token: armcaps::InitToken = armcaps::init();
|
||||
assert_eq!(token.get(), armcaps::get());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn init_get() {
|
||||
let (token, val) = armcaps::init_get();
|
||||
assert_eq!(val, token.get());
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
//! x86/x86_64 tests
|
||||
|
||||
#![cfg(any(target_arch = "x86", target_arch = "x86_64"))]
|
||||
|
||||
cpufeatures::new!(cpuid, "aes", "sha");
|
||||
|
||||
#[test]
|
||||
fn init() {
|
||||
let token: cpuid::InitToken = cpuid::init();
|
||||
assert_eq!(token.get(), cpuid::get());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn init_get() {
|
||||
let (token, val) = cpuid::init_get();
|
||||
assert_eq!(val, token.get());
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
{"files":{"CHANGELOG.md":"77f7a114f0d472c5a2eae818ddb81d3997d8d28373bf16b996d45d07ca3765e5","Cargo.toml":"557171ee4c07cad91539344fa0f87752c3104d39706ff7549af3decaac293e11","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"3521672491a3479422d5fe1aca6645dd2984090f85da6e5205abfb18fb7a6897","README.md":"3451ce2bc0b658041902a8e792023d9874700e9292eac9153c45f203430e24a0","src/lib.rs":"a6247fa4e20bc84d5a371f76c334b57e5fed3c8758976b2a29296904fb2a0007"},"package":"57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8"}
|
|
@ -0,0 +1,29 @@
|
|||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## 0.1.3 (2022-02-16)
|
||||
### Fixed
|
||||
- Minimal versions build ([#940])
|
||||
|
||||
[#940]: https://github.com/RustCrypto/traits/pull/940
|
||||
|
||||
## 0.1.2 (2022-02-10)
|
||||
### Added
|
||||
- Re-export `generic-array` and `typenum`. Enable `more_lengths` feature on
|
||||
`generic-array`. Add `key_size`, `iv_size`, `block_size`, and `output_size`
|
||||
helper methods. ([#849])
|
||||
|
||||
[#849]: https://github.com/RustCrypto/traits/pull/849
|
||||
|
||||
## 0.1.1 (2021-12-14)
|
||||
### Added
|
||||
- `rand_core` re-export and proper exposure of key/IV generation methods on docs.rs ([#847])
|
||||
|
||||
[#847]: https://github.com/RustCrypto/traits/pull/847
|
||||
|
||||
## 0.1.0 (2021-12-07)
|
||||
- Initial release
|
|
@ -0,0 +1,39 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies.
|
||||
#
|
||||
# If you are reading this file be aware that the original Cargo.toml
|
||||
# will likely look very different (and much more reasonable).
|
||||
# See Cargo.toml.orig for the original contents.
|
||||
|
||||
[package]
|
||||
edition = "2018"
|
||||
name = "crypto-common"
|
||||
version = "0.1.3"
|
||||
authors = ["RustCrypto Developers"]
|
||||
description = "Common cryptographic traits"
|
||||
documentation = "https://docs.rs/crypto-common"
|
||||
readme = "README.md"
|
||||
keywords = ["crypto", "traits"]
|
||||
categories = ["cryptography", "no-std"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
repository = "https://github.com/RustCrypto/traits"
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
[dependencies.generic-array]
|
||||
version = "0.14.4"
|
||||
features = ["more_lengths"]
|
||||
|
||||
[dependencies.rand_core]
|
||||
version = "0.6"
|
||||
optional = true
|
||||
|
||||
[dependencies.typenum]
|
||||
version = "1.14"
|
||||
|
||||
[features]
|
||||
std = []
|
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2018-2019 The RustCrypto Project Developers
|
||||
Copyright (c) 2021 RustCrypto Developers
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
|
@ -0,0 +1,53 @@
|
|||
# RustCrypto: Common Cryptographic Traits
|
||||
|
||||
[![crate][crate-image]][crate-link]
|
||||
[![Docs][docs-image]][docs-link]
|
||||
![Apache2/MIT licensed][license-image]
|
||||
![Rust Version][rustc-image]
|
||||
[![Project Chat][chat-image]][chat-link]
|
||||
[![Build Status][build-image]][build-link]
|
||||
|
||||
Common traits used by cryptographic algorithms. Users should generally use
|
||||
higher-level trait crates instead of this one.
|
||||
|
||||
[Documentation][docs-link]
|
||||
|
||||
## Minimum Supported Rust Version
|
||||
|
||||
Rust **1.41** or higher.
|
||||
|
||||
Minimum supported Rust version can be changed in the future, but it will be
|
||||
done with a minor version bump.
|
||||
|
||||
## SemVer Policy
|
||||
|
||||
- All on-by-default features of this library are covered by SemVer
|
||||
- MSRV is considered exempt from SemVer as noted above
|
||||
|
||||
## License
|
||||
|
||||
Licensed under either of:
|
||||
|
||||
* [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
|
||||
* [MIT license](http://opensource.org/licenses/MIT)
|
||||
|
||||
at your option.
|
||||
|
||||
### Contribution
|
||||
|
||||
Unless you explicitly state otherwise, any contribution intentionally submitted
|
||||
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
|
||||
dual licensed as above, without any additional terms or conditions.
|
||||
|
||||
[//]: # (badges)
|
||||
|
||||
[crate-image]: https://img.shields.io/crates/v/crypto-common.svg
|
||||
[crate-link]: https://crates.io/crates/crypto-common
|
||||
[docs-image]: https://docs.rs/crypto-common/badge.svg
|
||||
[docs-link]: https://docs.rs/crypto-common/
|
||||
[license-image]: https://img.shields.io/badge/license-Apache2.0/MIT-blue.svg
|
||||
[rustc-image]: https://img.shields.io/badge/rustc-1.41+-blue.svg
|
||||
[chat-image]: https://img.shields.io/badge/zulip-join_chat-blue.svg
|
||||
[chat-link]: https://rustcrypto.zulipchat.com/#narrow/stream/260041-hashes
|
||||
[build-image]: https://github.com/RustCrypto/traits/workflows/crypto-common/badge.svg?branch=master&event=push
|
||||
[build-link]: https://github.com/RustCrypto/traits/actions?query=workflow%3Acrypto-common
|
|
@ -0,0 +1,300 @@
|
|||
//! Common cryptographic traits.
|
||||
|
||||
#![no_std]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![doc(
|
||||
html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
|
||||
html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
|
||||
html_root_url = "https://docs.rs/crypto-common/0.1.3"
|
||||
)]
|
||||
#![forbid(unsafe_code)]
|
||||
#![warn(missing_docs, rust_2018_idioms)]
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
extern crate std;
|
||||
|
||||
#[cfg(feature = "rand_core")]
|
||||
pub use rand_core;
|
||||
|
||||
pub use generic_array;
|
||||
pub use generic_array::typenum;
|
||||
|
||||
use core::fmt;
|
||||
use generic_array::{typenum::Unsigned, ArrayLength, GenericArray};
|
||||
#[cfg(feature = "rand_core")]
|
||||
use rand_core::{CryptoRng, RngCore};
|
||||
|
||||
/// Block on which [`BlockSizeUser`] implementors operate.
|
||||
pub type Block<B> = GenericArray<u8, <B as BlockSizeUser>::BlockSize>;
|
||||
/// Output array of [`OutputSizeUser`] implementors.
|
||||
pub type Output<T> = GenericArray<u8, <T as OutputSizeUser>::OutputSize>;
|
||||
/// Key used by [`KeySizeUser`] implementors.
|
||||
pub type Key<B> = GenericArray<u8, <B as KeySizeUser>::KeySize>;
|
||||
/// Initialization vector (nonce) used by [`IvSizeUser`] implementors.
|
||||
pub type Iv<B> = GenericArray<u8, <B as IvSizeUser>::IvSize>;
|
||||
|
||||
/// Types which process data in blocks.
|
||||
pub trait BlockSizeUser {
|
||||
/// Size of the block in bytes.
|
||||
type BlockSize: ArrayLength<u8> + 'static;
|
||||
|
||||
/// Return block size in bytes.
|
||||
fn block_size() -> usize {
|
||||
Self::BlockSize::USIZE
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: BlockSizeUser> BlockSizeUser for &T {
|
||||
type BlockSize = T::BlockSize;
|
||||
}
|
||||
|
||||
impl<T: BlockSizeUser> BlockSizeUser for &mut T {
|
||||
type BlockSize = T::BlockSize;
|
||||
}
|
||||
|
||||
/// Types which return data with the given size.
|
||||
pub trait OutputSizeUser {
|
||||
/// Size of the output in bytes.
|
||||
type OutputSize: ArrayLength<u8> + 'static;
|
||||
|
||||
/// Return output size in bytes.
|
||||
fn output_size() -> usize {
|
||||
Self::OutputSize::USIZE
|
||||
}
|
||||
}
|
||||
|
||||
/// Types which use key for initialization.
|
||||
///
|
||||
/// Generally it's used indirectly via [`KeyInit`] or [`KeyIvInit`].
|
||||
pub trait KeySizeUser {
|
||||
/// Key size in bytes.
|
||||
type KeySize: ArrayLength<u8> + 'static;
|
||||
|
||||
/// Return key size in bytes.
|
||||
fn key_size() -> usize {
|
||||
Self::KeySize::USIZE
|
||||
}
|
||||
}
|
||||
|
||||
/// Types which use initialization vector (nonce) for initialization.
|
||||
///
|
||||
/// Generally it's used indirectly via [`KeyIvInit`] or [`InnerIvInit`].
|
||||
pub trait IvSizeUser {
|
||||
/// Initialization vector size in bytes.
|
||||
type IvSize: ArrayLength<u8> + 'static;
|
||||
|
||||
/// Return IV size in bytes.
|
||||
fn iv_size() -> usize {
|
||||
Self::IvSize::USIZE
|
||||
}
|
||||
}
|
||||
|
||||
/// Types which use another type for initialization.
|
||||
///
|
||||
/// Generally it's used indirectly via [`InnerInit`] or [`InnerIvInit`].
|
||||
pub trait InnerUser {
|
||||
/// Inner type.
|
||||
type Inner;
|
||||
}
|
||||
|
||||
/// Resettable types.
|
||||
pub trait Reset {
|
||||
/// Reset state to its initial value.
|
||||
fn reset(&mut self);
|
||||
}
|
||||
|
||||
/// Trait which stores algorithm name constant, used in `Debug` implementations.
|
||||
pub trait AlgorithmName {
|
||||
/// Write algorithm name into `f`.
|
||||
fn write_alg_name(f: &mut fmt::Formatter<'_>) -> fmt::Result;
|
||||
}
|
||||
|
||||
/// Types which can be initialized from key.
|
||||
pub trait KeyInit: KeySizeUser + Sized {
|
||||
/// Create new value from fixed size key.
|
||||
fn new(key: &Key<Self>) -> Self;
|
||||
|
||||
/// Create new value from variable size key.
|
||||
fn new_from_slice(key: &[u8]) -> Result<Self, InvalidLength> {
|
||||
if key.len() != Self::KeySize::to_usize() {
|
||||
Err(InvalidLength)
|
||||
} else {
|
||||
Ok(Self::new(Key::<Self>::from_slice(key)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate random key using the provided [`CryptoRng`].
|
||||
#[cfg(feature = "rand_core")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))]
|
||||
#[inline]
|
||||
fn generate_key(mut rng: impl CryptoRng + RngCore) -> Key<Self> {
|
||||
let mut key = Key::<Self>::default();
|
||||
rng.fill_bytes(&mut key);
|
||||
key
|
||||
}
|
||||
}
|
||||
|
||||
/// Types which can be initialized from key and initialization vector (nonce).
|
||||
pub trait KeyIvInit: KeySizeUser + IvSizeUser + Sized {
|
||||
/// Create new value from fixed length key and nonce.
|
||||
fn new(key: &Key<Self>, iv: &Iv<Self>) -> Self;
|
||||
|
||||
/// Create new value from variable length key and nonce.
|
||||
#[inline]
|
||||
fn new_from_slices(key: &[u8], iv: &[u8]) -> Result<Self, InvalidLength> {
|
||||
let key_len = Self::KeySize::USIZE;
|
||||
let iv_len = Self::IvSize::USIZE;
|
||||
if key.len() != key_len || iv.len() != iv_len {
|
||||
Err(InvalidLength)
|
||||
} else {
|
||||
Ok(Self::new(
|
||||
Key::<Self>::from_slice(key),
|
||||
Iv::<Self>::from_slice(iv),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate random key using the provided [`CryptoRng`].
|
||||
#[cfg(feature = "rand_core")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))]
|
||||
#[inline]
|
||||
fn generate_key(mut rng: impl CryptoRng + RngCore) -> Key<Self> {
|
||||
let mut key = Key::<Self>::default();
|
||||
rng.fill_bytes(&mut key);
|
||||
key
|
||||
}
|
||||
|
||||
/// Generate random IV using the provided [`CryptoRng`].
|
||||
#[cfg(feature = "rand_core")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))]
|
||||
#[inline]
|
||||
fn generate_iv(mut rng: impl CryptoRng + RngCore) -> Iv<Self> {
|
||||
let mut iv = Iv::<Self>::default();
|
||||
rng.fill_bytes(&mut iv);
|
||||
iv
|
||||
}
|
||||
|
||||
/// Generate random key and nonce using the provided [`CryptoRng`].
|
||||
#[cfg(feature = "rand_core")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))]
|
||||
#[inline]
|
||||
fn generate_key_iv(mut rng: impl CryptoRng + RngCore) -> (Key<Self>, Iv<Self>) {
|
||||
(Self::generate_key(&mut rng), Self::generate_iv(&mut rng))
|
||||
}
|
||||
}
|
||||
|
||||
/// Types which can be initialized from another type (usually block ciphers).
|
||||
///
|
||||
/// Usually used for initializing types from block ciphers.
|
||||
pub trait InnerInit: InnerUser + Sized {
|
||||
/// Initialize value from the `inner`.
|
||||
fn inner_init(inner: Self::Inner) -> Self;
|
||||
}
|
||||
|
||||
/// Types which can be initialized from another type and additional initialization
|
||||
/// vector/nonce.
|
||||
///
|
||||
/// Usually used for initializing types from block ciphers.
|
||||
pub trait InnerIvInit: InnerUser + IvSizeUser + Sized {
|
||||
/// Initialize value using `inner` and `iv` array.
|
||||
fn inner_iv_init(inner: Self::Inner, iv: &Iv<Self>) -> Self;
|
||||
|
||||
/// Initialize value using `inner` and `iv` slice.
|
||||
fn inner_iv_slice_init(inner: Self::Inner, iv: &[u8]) -> Result<Self, InvalidLength> {
|
||||
if iv.len() != Self::IvSize::to_usize() {
|
||||
Err(InvalidLength)
|
||||
} else {
|
||||
Ok(Self::inner_iv_init(inner, Iv::<Self>::from_slice(iv)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate random IV using the provided [`CryptoRng`].
|
||||
#[cfg(feature = "rand_core")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))]
|
||||
#[inline]
|
||||
fn generate_iv(mut rng: impl CryptoRng + RngCore) -> Iv<Self> {
|
||||
let mut iv = Iv::<Self>::default();
|
||||
rng.fill_bytes(&mut iv);
|
||||
iv
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> KeySizeUser for T
|
||||
where
|
||||
T: InnerUser,
|
||||
T::Inner: KeySizeUser,
|
||||
{
|
||||
type KeySize = <T::Inner as KeySizeUser>::KeySize;
|
||||
}
|
||||
|
||||
impl<T> KeyIvInit for T
|
||||
where
|
||||
T: InnerIvInit,
|
||||
T::Inner: KeyInit,
|
||||
{
|
||||
#[inline]
|
||||
fn new(key: &Key<Self>, iv: &Iv<Self>) -> Self {
|
||||
Self::inner_iv_init(T::Inner::new(key), iv)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn new_from_slices(key: &[u8], iv: &[u8]) -> Result<Self, InvalidLength> {
|
||||
T::Inner::new_from_slice(key).and_then(|i| T::inner_iv_slice_init(i, iv))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> KeyInit for T
|
||||
where
|
||||
T: InnerInit,
|
||||
T::Inner: KeyInit,
|
||||
{
|
||||
#[inline]
|
||||
fn new(key: &Key<Self>) -> Self {
|
||||
Self::inner_init(T::Inner::new(key))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn new_from_slice(key: &[u8]) -> Result<Self, InvalidLength> {
|
||||
T::Inner::new_from_slice(key)
|
||||
.map_err(|_| InvalidLength)
|
||||
.map(Self::inner_init)
|
||||
}
|
||||
}
|
||||
|
||||
// Unfortunately this blanket impl is impossible without mutually
|
||||
// exclusive traits, see: https://github.com/rust-lang/rfcs/issues/1053
|
||||
// or at the very least without: https://github.com/rust-lang/rust/issues/20400
|
||||
/*
|
||||
impl<T> KeyIvInit for T
|
||||
where
|
||||
T: InnerInit,
|
||||
T::Inner: KeyIvInit,
|
||||
{
|
||||
#[inline]
|
||||
fn new(key: &Key<Self>, iv: &Iv<Self>) -> Self {
|
||||
Self::inner_init(T::Inner::new(key, iv))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn new_from_slices(key: &[u8], iv: &[u8]) -> Result<Self, InvalidLength> {
|
||||
T::Inner::new_from_slice(key)
|
||||
.map_err(|_| InvalidLength)
|
||||
.map(Self::inner_init)
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
/// The error type returned when key and/or IV used in the [`KeyInit`],
|
||||
/// [`KeyIvInit`], and [`InnerIvInit`] slice-based methods had
|
||||
/// an invalid length.
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
||||
pub struct InvalidLength;
|
||||
|
||||
impl fmt::Display for InvalidLength {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
f.write_str("Invalid Length")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl std::error::Error for InvalidLength {}
|
|
@ -1 +1 @@
|
|||
{"files":{"Cargo.toml":"bfdc024e55a5d9f2f415045e9083abb13159e0276c3eb3dbdca290c69f8b4824","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"9e0dfd2dd4173a530e238cb6adb37aa78c34c6bc7444e0e10c1ab5d8881f63ba","src/dev.rs":"5890305be2cd3d221d1c2ce295b911cc57017dc341966ba434def4a072f8bf1c","src/digest.rs":"73f564cb8084e61baf850948443bacdea81727dfbff5abeb520c0e5bb690da7a","src/dyn_digest.rs":"abfa9a30ed2dc71ad2042501961146c87fe3cbf9254b5b203fe24920d0e246b8","src/errors.rs":"2584007e98d691160313cc27e6237db9bd886e9774137b59a1289a20054e9375","src/lib.rs":"71d838697e87561de4b6b2fda94df44639a525d4469316d4ad21f0f0075a130d"},"package":"f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5"}
|
||||
{"files":{"CHANGELOG.md":"3acae7ce99b129f14148a93c55958aad7770dc6627dc0f0be2ae7114946d2c09","Cargo.toml":"f57aba9a99a19807a7313f2f7fc86c43ba0b4ab7fcc79dbcac66d1a2d95e5ccc","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"9e0dfd2dd4173a530e238cb6adb37aa78c34c6bc7444e0e10c1ab5d8881f63ba","README.md":"3bf6e79fb524aece1652938de1387e77cc80461d69e2e1058f609da421f641cf","src/core_api.rs":"b52728aba8a84f980f3f9cc8a94a64d3a97f1eb5f4db144904822c2f8eefb1f8","src/core_api/ct_variable.rs":"78f94f6487e1f540083c1adcc8d4e86d323876ba606229b588d7b44fece3fa81","src/core_api/rt_variable.rs":"b57f89bf3991a313e2ddde09c701375e23539e7df74d685a161707ba1fbc99e4","src/core_api/wrapper.rs":"f9fd119df19f22fc439e0e93a520fb011ba8aeaedbeff6ff04249036554550bf","src/core_api/xof_reader.rs":"f33ca7b2c17eb99d84ea460d5567af68690e4fa6c2d94069a5d6748f8c8620eb","src/dev.rs":"95046c7d95317dfdedc4d230947882770fc5602f933916ca590d7bfce858dc44","src/dev/fixed.rs":"1cbabc651645c1e781d31825791132b4e3741f426e99d7e40988e2a5ee49bddd","src/dev/mac.rs":"e8837d3b99dc8b6ddb398e7fad5731c2ed36931f851ed625d3ae59fb31244165","src/dev/rng.rs":"ff72c0d2a39a740df944d27caf4cb46b60835a4044f656876f651889d122dd5a","src/dev/variable.rs":"51939602b43f5a813fc725bc603a34246bbf76facaa7930cb7bf78c283ec94a7","src/dev/xof.rs":"b3971175e50f615247e4158cba87d77c369461eda22751d888725cec45b61985","src/digest.rs":"fd2586af06f7cd87694e0f35a9467dde7ceb577904182fc683de523d3ec20529","src/lib.rs":"969ec58f54a2bc3743d06d6aa0b3e0dfd2831390bd9d1b161f422dc260b432f6","src/mac.rs":"59ce9fa5121b1af5f762388a1f2321edacee3c112d7f488313d1b368749074b6"},"package":"f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506"}
|
|
@ -0,0 +1,112 @@
|
|||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## 0.10.3 (2022-02-16)
|
||||
### Fixed
|
||||
- Minimal versions build ([#940])
|
||||
|
||||
[#940]: https://github.com/RustCrypto/traits/pull/940
|
||||
|
||||
## 0.10.2 (2022-02-10)
|
||||
### Changed
|
||||
- Relax bounds on the `Mac` trait ([#849])
|
||||
|
||||
[#849]: https://github.com/RustCrypto/traits/pull/849
|
||||
|
||||
## 0.10.1 (2021-12-14)
|
||||
### Added
|
||||
- `Update::chain` and `Digest::new_with_prefix` methods. ([#846])
|
||||
- `Mac::generate_key` method. ([#847])
|
||||
|
||||
### Fixed
|
||||
- Doc cfg attribute for `CtOutput` and `MacError`. ([#842])
|
||||
- Expose `KeyInit::generate_key` method in docs. ([#847])
|
||||
|
||||
[#842]: https://github.com/RustCrypto/traits/pull/842
|
||||
[#846]: https://github.com/RustCrypto/traits/pull/846
|
||||
[#847]: https://github.com/RustCrypto/traits/pull/847
|
||||
|
||||
## 0.10.0 (2021-12-07)
|
||||
### Changed
|
||||
- Dirty traits are removed and instead block-level traits are introduced.
|
||||
Variable output traits reworked and now support both run and compile time selection of output size. ([#380], [#819])
|
||||
- The `crypto-mac` traits are reworked and merged in. ([#819])
|
||||
|
||||
[#819]: https://github.com/RustCrypto/traits/pull/819
|
||||
[#380]: https://github.com/RustCrypto/traits/pull/380
|
||||
|
||||
## 0.9.0 (2020-06-09)
|
||||
### Added
|
||||
- `ExtendableOutputDirty` and `VariableOutputDirty` traits ([#183])
|
||||
- `FixedOutputDirty` trait + `finalize_into*` ([#180])
|
||||
- `XofReader::read_boxed` method ([#178], [#181], [#182])
|
||||
- `alloc` feature ([#163])
|
||||
- Re-export `typenum::consts` as `consts` ([#123])
|
||||
- `Output` type alias ([#115])
|
||||
|
||||
### Changed
|
||||
- Rename `*result*` methods to `finalize` ala IUF ([#161])
|
||||
- Use `impl AsRef<[u8]>` instead of generic params on methods ([#112])
|
||||
- Rename `Input::input` to `Update::update` ala IUF ([#111])
|
||||
- Upgrade to Rust 2018 edition ([#109])
|
||||
- Bump `generic-array` to v0.14 ([#95])
|
||||
|
||||
[#183]: https://github.com/RustCrypto/traits/pull/183
|
||||
[#181]: https://github.com/RustCrypto/traits/pull/181
|
||||
[#182]: https://github.com/RustCrypto/traits/pull/182
|
||||
[#180]: https://github.com/RustCrypto/traits/pull/180
|
||||
[#178]: https://github.com/RustCrypto/traits/pull/178
|
||||
[#163]: https://github.com/RustCrypto/traits/pull/163
|
||||
[#161]: https://github.com/RustCrypto/traits/pull/161
|
||||
[#123]: https://github.com/RustCrypto/traits/pull/123
|
||||
[#115]: https://github.com/RustCrypto/traits/pull/115
|
||||
[#111]: https://github.com/RustCrypto/traits/pull/111
|
||||
[#112]: https://github.com/RustCrypto/traits/pull/112
|
||||
[#109]: https://github.com/RustCrypto/traits/pull/109
|
||||
[#95]: https://github.com/RustCrypto/traits/pull/95
|
||||
|
||||
## 0.8.1 (2019-06-30)
|
||||
|
||||
## 0.8.0 (2018-10-01)
|
||||
|
||||
## 0.7.6 (2018-09-21)
|
||||
|
||||
## 0.7.5 (2018-07-13)
|
||||
|
||||
## 0.7.4 (2018-06-21)
|
||||
|
||||
## 0.7.3 (2018-06-20)
|
||||
|
||||
## 0.7.2 (2017-11-17)
|
||||
|
||||
## 0.7.1 (2017-11-15)
|
||||
|
||||
## 0.7.0 (2017-11-14)
|
||||
|
||||
## 0.6.2 (2017-07-24)
|
||||
|
||||
## 0.6.1 (2017-06-18)
|
||||
|
||||
## 0.6.0 (2017-06-12)
|
||||
|
||||
## 0.5.2 (2017-05-02)
|
||||
|
||||
## 0.5.1 (2017-05-02)
|
||||
|
||||
## 0.5.0 (2017-04-06)
|
||||
|
||||
## 0.4.0 (2016-12-24)
|
||||
|
||||
## 0.3.1 (2016-12-16)
|
||||
|
||||
## 0.3.0 (2016-11-17)
|
||||
|
||||
## 0.2.1 (2016-10-14)
|
||||
|
||||
## 0.2.0 (2016-10-14)
|
||||
|
||||
## 0.1.0 (2016-10-06)
|
|
@ -3,34 +3,48 @@
|
|||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies
|
||||
# to registry (e.g., crates.io) dependencies.
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
# If you are reading this file be aware that the original Cargo.toml
|
||||
# will likely look very different (and much more reasonable).
|
||||
# See Cargo.toml.orig for the original contents.
|
||||
|
||||
[package]
|
||||
edition = "2018"
|
||||
name = "digest"
|
||||
version = "0.8.1"
|
||||
version = "0.10.3"
|
||||
authors = ["RustCrypto Developers"]
|
||||
description = "Traits for cryptographic hash functions"
|
||||
documentation = "https://docs.rs/digest"
|
||||
readme = "README.md"
|
||||
keywords = ["digest", "crypto", "hash"]
|
||||
categories = ["cryptography", "no-std"]
|
||||
license = "MIT OR Apache-2.0"
|
||||
repository = "https://github.com/RustCrypto/traits"
|
||||
[package.metadata.docs.rs]
|
||||
features = ["std"]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
[dependencies.blobby]
|
||||
version = "0.1"
|
||||
version = "0.3"
|
||||
optional = true
|
||||
|
||||
[dependencies.generic-array]
|
||||
version = "0.12"
|
||||
[dependencies.block-buffer]
|
||||
version = "0.10"
|
||||
optional = true
|
||||
|
||||
[dependencies.crypto-common]
|
||||
version = "0.1.3"
|
||||
|
||||
[dependencies.subtle]
|
||||
version = "=2.4"
|
||||
optional = true
|
||||
default-features = false
|
||||
|
||||
[features]
|
||||
alloc = []
|
||||
core-api = ["block-buffer"]
|
||||
default = ["core-api"]
|
||||
dev = ["blobby"]
|
||||
std = []
|
||||
[badges.travis-ci]
|
||||
repository = "RustCrypto/traits"
|
||||
mac = ["subtle"]
|
||||
rand_core = ["crypto-common/rand_core"]
|
||||
std = ["alloc", "crypto-common/std"]
|
||||
|
|
|
@ -0,0 +1,164 @@
|
|||
# RustCrypto: Digest Algorithm Traits
|
||||
|
||||
[![crate][crate-image]][crate-link]
|
||||
[![Docs][docs-image]][docs-link]
|
||||
![Apache2/MIT licensed][license-image]
|
||||
![Rust Version][rustc-image]
|
||||
[![Project Chat][chat-image]][chat-link]
|
||||
[![Build Status][build-image]][build-link]
|
||||
|
||||
Traits which describe functionality of [cryptographic hash functions][0], a.k.a.
|
||||
digest algorithms.
|
||||
|
||||
See [RustCrypto/hashes][1] for implementations which use this trait.
|
||||
|
||||
[Documentation][docs-link]
|
||||
|
||||
## Minimum Supported Rust Version
|
||||
|
||||
Rust **1.41** or higher.
|
||||
|
||||
Minimum supported Rust version can be changed in the future, but it will be
|
||||
done with a minor version bump.
|
||||
|
||||
## SemVer Policy
|
||||
|
||||
- All on-by-default features of this library are covered by SemVer
|
||||
- MSRV is considered exempt from SemVer as noted above
|
||||
|
||||
## Usage
|
||||
|
||||
Let us demonstrate how to use crates in this repository using BLAKE2b as an
|
||||
example.
|
||||
|
||||
First add `blake2` crate to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
blake2 = "0.8"
|
||||
```
|
||||
|
||||
`blake2` and other crates re-export `digest` crate and `Digest` trait for
|
||||
convenience, so you don't have to add `digest` crate as an explicit dependency.
|
||||
|
||||
Now you can write the following code:
|
||||
|
||||
```rust
|
||||
use blake2::{Blake2b, Digest};
|
||||
|
||||
let mut hasher = Blake2b::new();
|
||||
let data = b"Hello world!";
|
||||
hasher.input(data);
|
||||
// `input` can be called repeatedly and is generic over `AsRef<[u8]>`
|
||||
hasher.input("String data");
|
||||
// Note that calling `finalize()` consumes hasher
|
||||
let hash = hasher.finalize();
|
||||
println!("Result: {:x}", hash);
|
||||
```
|
||||
|
||||
In this example `hash` has type [`GenericArray<u8, U64>`][2], which is a generic
|
||||
alternative to `[u8; 64]`.
|
||||
|
||||
Alternatively you can use chained approach, which is equivalent to the previous
|
||||
example:
|
||||
|
||||
```rust
|
||||
let hash = Blake2b::new()
|
||||
.chain(b"Hello world!")
|
||||
.chain("String data")
|
||||
.finalize();
|
||||
|
||||
println!("Result: {:x}", hash);
|
||||
```
|
||||
|
||||
If the whole message is available you also can use convinience `digest` method:
|
||||
|
||||
```rust
|
||||
let hash = Blake2b::digest(b"my message");
|
||||
println!("Result: {:x}", hash);
|
||||
```
|
||||
|
||||
### Hashing `Read`-able objects
|
||||
|
||||
If you want to hash data from [`Read`][3] trait (e.g. from file) you can rely on
|
||||
implementation of [`Write`][4] trait (requires enabled-by-default `std` feature):
|
||||
|
||||
```rust
|
||||
use blake2::{Blake2b, Digest};
|
||||
use std::{fs, io};
|
||||
|
||||
let mut file = fs::File::open(&path)?;
|
||||
let mut hasher = Blake2b::new();
|
||||
let n = io::copy(&mut file, &mut hasher)?;
|
||||
let hash = hasher.finalize();
|
||||
|
||||
println!("Path: {}", path);
|
||||
println!("Bytes processed: {}", n);
|
||||
println!("Hash value: {:x}", hash);
|
||||
```
|
||||
|
||||
### Generic code
|
||||
|
||||
You can write generic code over `Digest` (or other traits from `digest` crate)
|
||||
trait which will work over different hash functions:
|
||||
|
||||
```rust
|
||||
use digest::Digest;
|
||||
|
||||
// Toy example, do not use it in practice!
|
||||
// Instead use crates from: https://github.com/RustCrypto/password-hashing
|
||||
fn hash_password<D: Digest>(password: &str, salt: &str, output: &mut [u8]) {
|
||||
let mut hasher = D::new();
|
||||
hasher.input(password.as_bytes());
|
||||
hasher.input(b"$");
|
||||
hasher.input(salt.as_bytes());
|
||||
output.copy_from_slice(hasher.finalize().as_slice())
|
||||
}
|
||||
|
||||
use blake2::Blake2b;
|
||||
use sha2::Sha256;
|
||||
|
||||
hash_password::<Blake2b>("my_password", "abcd", &mut buf);
|
||||
hash_password::<Sha256>("my_password", "abcd", &mut buf);
|
||||
```
|
||||
|
||||
If you want to use hash functions with trait objects, use `digest::DynDigest`
|
||||
trait.
|
||||
|
||||
## License
|
||||
|
||||
Licensed under either of:
|
||||
|
||||
* [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0)
|
||||
* [MIT license](http://opensource.org/licenses/MIT)
|
||||
|
||||
at your option.
|
||||
|
||||
### Contribution
|
||||
|
||||
Unless you explicitly state otherwise, any contribution intentionally submitted
|
||||
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be
|
||||
dual licensed as above, without any additional terms or conditions.
|
||||
|
||||
[//]: # (badges)
|
||||
|
||||
[crate-image]: https://img.shields.io/crates/v/digest.svg
|
||||
[crate-link]: https://crates.io/crates/digest
|
||||
[docs-image]: https://docs.rs/digest/badge.svg
|
||||
[docs-link]: https://docs.rs/digest/
|
||||
[license-image]: https://img.shields.io/badge/license-Apache2.0/MIT-blue.svg
|
||||
[rustc-image]: https://img.shields.io/badge/rustc-1.41+-blue.svg
|
||||
[chat-image]: https://img.shields.io/badge/zulip-join_chat-blue.svg
|
||||
[chat-link]: https://rustcrypto.zulipchat.com/#narrow/stream/260041-hashes
|
||||
[build-image]: https://github.com/RustCrypto/traits/workflows/digest/badge.svg?branch=master&event=push
|
||||
[build-link]: https://github.com/RustCrypto/traits/actions?query=workflow%3Adigest
|
||||
|
||||
[//]: # (general links)
|
||||
|
||||
[0]: https://en.wikipedia.org/wiki/Cryptographic_hash_function
|
||||
[1]: https://github.com/RustCrypto/hashes
|
||||
[2]: https://docs.rs/generic-array
|
||||
[3]: https://doc.rust-lang.org/std/io/trait.Read.html
|
||||
[4]: https://doc.rust-lang.org/std/io/trait.Write.html
|
||||
[5]: https://en.wikipedia.org/wiki/Hash-based_message_authentication_code
|
||||
[6]: https://github.com/RustCrypto/MACs
|
|
@ -0,0 +1,119 @@
|
|||
//! Low-level traits operating on blocks and wrappers around them.
|
||||
//!
|
||||
//! Usage of traits in this module in user code is discouraged. Instead use
|
||||
//! core algorithm wrapped by the wrapper types, which implement the
|
||||
//! higher-level traits.
|
||||
use crate::InvalidOutputSize;
|
||||
|
||||
pub use crypto_common::{AlgorithmName, Block, BlockSizeUser, OutputSizeUser, Reset};
|
||||
|
||||
use block_buffer::{BlockBuffer, BufferKind};
|
||||
use crypto_common::{
|
||||
typenum::{IsLess, Le, NonZero, U256},
|
||||
Output,
|
||||
};
|
||||
|
||||
mod ct_variable;
|
||||
mod rt_variable;
|
||||
mod wrapper;
|
||||
mod xof_reader;
|
||||
|
||||
pub use ct_variable::CtVariableCoreWrapper;
|
||||
pub use rt_variable::RtVariableCoreWrapper;
|
||||
pub use wrapper::{CoreProxy, CoreWrapper};
|
||||
pub use xof_reader::XofReaderCoreWrapper;
|
||||
|
||||
/// Buffer type used by type which implements [`BufferKindUser`].
|
||||
pub type Buffer<S> =
|
||||
BlockBuffer<<S as BlockSizeUser>::BlockSize, <S as BufferKindUser>::BufferKind>;
|
||||
|
||||
/// Types which consume data in blocks.
|
||||
pub trait UpdateCore: BlockSizeUser {
|
||||
/// Update state using the provided data blocks.
|
||||
fn update_blocks(&mut self, blocks: &[Block<Self>]);
|
||||
}
|
||||
|
||||
/// Types which use [`BlockBuffer`] functionality.
|
||||
pub trait BufferKindUser: BlockSizeUser {
|
||||
/// Block buffer kind over which type operates.
|
||||
type BufferKind: BufferKind;
|
||||
}
|
||||
|
||||
/// Core trait for hash functions with fixed output size.
|
||||
pub trait FixedOutputCore: UpdateCore + BufferKindUser + OutputSizeUser
|
||||
where
|
||||
Self::BlockSize: IsLess<U256>,
|
||||
Le<Self::BlockSize, U256>: NonZero,
|
||||
{
|
||||
/// Finalize state using remaining data stored in the provided block buffer,
|
||||
/// write result into provided array and leave `self` in a dirty state.
|
||||
fn finalize_fixed_core(&mut self, buffer: &mut Buffer<Self>, out: &mut Output<Self>);
|
||||
}
|
||||
|
||||
/// Core trait for hash functions with extendable (XOF) output size.
|
||||
pub trait ExtendableOutputCore: UpdateCore + BufferKindUser
|
||||
where
|
||||
Self::BlockSize: IsLess<U256>,
|
||||
Le<Self::BlockSize, U256>: NonZero,
|
||||
{
|
||||
/// XOF reader core state.
|
||||
type ReaderCore: XofReaderCore;
|
||||
|
||||
/// Retrieve XOF reader using remaining data stored in the block buffer
|
||||
/// and leave hasher in a dirty state.
|
||||
fn finalize_xof_core(&mut self, buffer: &mut Buffer<Self>) -> Self::ReaderCore;
|
||||
}
|
||||
|
||||
/// Core reader trait for extendable-output function (XOF) result.
|
||||
pub trait XofReaderCore: BlockSizeUser {
|
||||
/// Read next XOF block.
|
||||
fn read_block(&mut self) -> Block<Self>;
|
||||
}
|
||||
|
||||
/// Core trait for hash functions with variable output size.
|
||||
///
|
||||
/// Maximum output size is equal to [`OutputSizeUser::OutputSize`].
|
||||
/// Users are expected to truncate result returned by the
|
||||
/// [`finalize_variable_core`] to `output_size` passed to the [`new`] method
|
||||
/// during construction. Truncation side is defined by the [`TRUNC_SIDE`]
|
||||
/// associated constant.
|
||||
///
|
||||
/// [`finalize_variable_core`]: VariableOutputCore::finalize_variable_core
|
||||
/// [`new`]: VariableOutputCore::new
|
||||
/// [`TRUNC_SIDE`]: VariableOutputCore::TRUNC_SIDE
|
||||
pub trait VariableOutputCore: UpdateCore + OutputSizeUser + BufferKindUser + Sized
|
||||
where
|
||||
Self::BlockSize: IsLess<U256>,
|
||||
Le<Self::BlockSize, U256>: NonZero,
|
||||
{
|
||||
/// Side which should be used in a truncated result.
|
||||
const TRUNC_SIDE: TruncSide;
|
||||
|
||||
/// Initialize hasher state for given output size.
|
||||
///
|
||||
/// Returns [`InvalidOutputSize`] if `output_size` is not valid for
|
||||
/// the algorithm, e.g. if it's bigger than the [`OutputSize`]
|
||||
/// associated type.
|
||||
///
|
||||
/// [`OutputSize`]: OutputSizeUser::OutputSize
|
||||
fn new(output_size: usize) -> Result<Self, InvalidOutputSize>;
|
||||
|
||||
/// Finalize hasher and write full hashing result into the `out` buffer.
|
||||
///
|
||||
/// The result must be truncated to `output_size` used during hasher
|
||||
/// construction. Truncation side is defined by the [`TRUNC_SIDE`]
|
||||
/// associated constant.
|
||||
///
|
||||
/// [`TRUNC_SIDE`]: VariableOutputCore::TRUNC_SIDE
|
||||
fn finalize_variable_core(&mut self, buffer: &mut Buffer<Self>, out: &mut Output<Self>);
|
||||
}
|
||||
|
||||
/// Type which used for defining truncation side in the [`VariableOutputCore`]
|
||||
/// trait.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub enum TruncSide {
|
||||
/// Truncate left side, i.e. `&out[..n]`.
|
||||
Left,
|
||||
/// Truncate right side, i.e. `&out[m..]`.
|
||||
Right,
|
||||
}
|
|
@ -0,0 +1,167 @@
|
|||
use super::{
|
||||
AlgorithmName, Buffer, BufferKindUser, FixedOutputCore, Reset, TruncSide, UpdateCore,
|
||||
VariableOutputCore,
|
||||
};
|
||||
use crate::HashMarker;
|
||||
#[cfg(feature = "mac")]
|
||||
use crate::MacMarker;
|
||||
use core::{fmt, marker::PhantomData};
|
||||
use crypto_common::{
|
||||
generic_array::{ArrayLength, GenericArray},
|
||||
typenum::{IsLess, IsLessOrEqual, Le, LeEq, NonZero, U256},
|
||||
Block, BlockSizeUser, OutputSizeUser,
|
||||
};
|
||||
|
||||
/// Wrapper around [`VariableOutputCore`] which selects output size
|
||||
/// at compile time.
|
||||
#[derive(Clone)]
|
||||
pub struct CtVariableCoreWrapper<T, OutSize>
|
||||
where
|
||||
T: VariableOutputCore,
|
||||
OutSize: ArrayLength<u8> + IsLessOrEqual<T::OutputSize>,
|
||||
LeEq<OutSize, T::OutputSize>: NonZero,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
inner: T,
|
||||
_out: PhantomData<OutSize>,
|
||||
}
|
||||
|
||||
impl<T, OutSize> HashMarker for CtVariableCoreWrapper<T, OutSize>
|
||||
where
|
||||
T: VariableOutputCore + HashMarker,
|
||||
OutSize: ArrayLength<u8> + IsLessOrEqual<T::OutputSize>,
|
||||
LeEq<OutSize, T::OutputSize>: NonZero,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
}
|
||||
|
||||
#[cfg(feature = "mac")]
|
||||
impl<T, OutSize> MacMarker for CtVariableCoreWrapper<T, OutSize>
|
||||
where
|
||||
T: VariableOutputCore + MacMarker,
|
||||
OutSize: ArrayLength<u8> + IsLessOrEqual<T::OutputSize>,
|
||||
LeEq<OutSize, T::OutputSize>: NonZero,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
}
|
||||
|
||||
impl<T, OutSize> BlockSizeUser for CtVariableCoreWrapper<T, OutSize>
|
||||
where
|
||||
T: VariableOutputCore,
|
||||
OutSize: ArrayLength<u8> + IsLessOrEqual<T::OutputSize>,
|
||||
LeEq<OutSize, T::OutputSize>: NonZero,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
type BlockSize = T::BlockSize;
|
||||
}
|
||||
|
||||
impl<T, OutSize> UpdateCore for CtVariableCoreWrapper<T, OutSize>
|
||||
where
|
||||
T: VariableOutputCore,
|
||||
OutSize: ArrayLength<u8> + IsLessOrEqual<T::OutputSize>,
|
||||
LeEq<OutSize, T::OutputSize>: NonZero,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn update_blocks(&mut self, blocks: &[Block<Self>]) {
|
||||
self.inner.update_blocks(blocks);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, OutSize> OutputSizeUser for CtVariableCoreWrapper<T, OutSize>
|
||||
where
|
||||
T: VariableOutputCore,
|
||||
OutSize: ArrayLength<u8> + IsLessOrEqual<T::OutputSize> + 'static,
|
||||
LeEq<OutSize, T::OutputSize>: NonZero,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
type OutputSize = OutSize;
|
||||
}
|
||||
|
||||
impl<T, OutSize> BufferKindUser for CtVariableCoreWrapper<T, OutSize>
|
||||
where
|
||||
T: VariableOutputCore,
|
||||
OutSize: ArrayLength<u8> + IsLessOrEqual<T::OutputSize>,
|
||||
LeEq<OutSize, T::OutputSize>: NonZero,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
type BufferKind = T::BufferKind;
|
||||
}
|
||||
|
||||
impl<T, OutSize> FixedOutputCore for CtVariableCoreWrapper<T, OutSize>
|
||||
where
|
||||
T: VariableOutputCore,
|
||||
OutSize: ArrayLength<u8> + IsLessOrEqual<T::OutputSize> + 'static,
|
||||
LeEq<OutSize, T::OutputSize>: NonZero,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn finalize_fixed_core(
|
||||
&mut self,
|
||||
buffer: &mut Buffer<Self>,
|
||||
out: &mut GenericArray<u8, Self::OutputSize>,
|
||||
) {
|
||||
let mut full_res = Default::default();
|
||||
self.inner.finalize_variable_core(buffer, &mut full_res);
|
||||
let n = out.len();
|
||||
let m = full_res.len() - n;
|
||||
match T::TRUNC_SIDE {
|
||||
TruncSide::Left => out.copy_from_slice(&full_res[..n]),
|
||||
TruncSide::Right => out.copy_from_slice(&full_res[m..]),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, OutSize> Default for CtVariableCoreWrapper<T, OutSize>
|
||||
where
|
||||
T: VariableOutputCore,
|
||||
OutSize: ArrayLength<u8> + IsLessOrEqual<T::OutputSize>,
|
||||
LeEq<OutSize, T::OutputSize>: NonZero,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
inner: T::new(OutSize::USIZE).unwrap(),
|
||||
_out: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, OutSize> Reset for CtVariableCoreWrapper<T, OutSize>
|
||||
where
|
||||
T: VariableOutputCore,
|
||||
OutSize: ArrayLength<u8> + IsLessOrEqual<T::OutputSize>,
|
||||
LeEq<OutSize, T::OutputSize>: NonZero,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn reset(&mut self) {
|
||||
*self = Default::default();
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, OutSize> AlgorithmName for CtVariableCoreWrapper<T, OutSize>
|
||||
where
|
||||
T: VariableOutputCore + AlgorithmName,
|
||||
OutSize: ArrayLength<u8> + IsLessOrEqual<T::OutputSize>,
|
||||
LeEq<OutSize, T::OutputSize>: NonZero,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
fn write_alg_name(f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
T::write_alg_name(f)?;
|
||||
f.write_str("_")?;
|
||||
write!(f, "{}", OutSize::USIZE)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,166 @@
|
|||
use super::{AlgorithmName, TruncSide, UpdateCore, VariableOutputCore};
|
||||
#[cfg(feature = "mac")]
|
||||
use crate::MacMarker;
|
||||
use crate::{HashMarker, InvalidBufferSize};
|
||||
use crate::{InvalidOutputSize, Reset, Update, VariableOutput, VariableOutputReset};
|
||||
use block_buffer::BlockBuffer;
|
||||
use core::fmt;
|
||||
use crypto_common::typenum::{IsLess, Le, NonZero, Unsigned, U256};
|
||||
|
||||
/// Wrapper around [`VariableOutputCore`] which selects output size
|
||||
/// at run time.
|
||||
#[derive(Clone)]
|
||||
pub struct RtVariableCoreWrapper<T>
|
||||
where
|
||||
T: VariableOutputCore + UpdateCore,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
core: T,
|
||||
buffer: BlockBuffer<T::BlockSize, T::BufferKind>,
|
||||
output_size: usize,
|
||||
}
|
||||
|
||||
impl<T> RtVariableCoreWrapper<T>
|
||||
where
|
||||
T: VariableOutputCore,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn finalize_dirty(&mut self, out: &mut [u8]) -> Result<(), InvalidBufferSize> {
|
||||
let Self {
|
||||
core,
|
||||
buffer,
|
||||
output_size,
|
||||
} = self;
|
||||
if out.len() != *output_size || out.len() > Self::MAX_OUTPUT_SIZE {
|
||||
return Err(InvalidBufferSize);
|
||||
}
|
||||
let mut full_res = Default::default();
|
||||
core.finalize_variable_core(buffer, &mut full_res);
|
||||
let n = out.len();
|
||||
let m = full_res.len() - n;
|
||||
match T::TRUNC_SIDE {
|
||||
TruncSide::Left => out.copy_from_slice(&full_res[..n]),
|
||||
TruncSide::Right => out.copy_from_slice(&full_res[m..]),
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> HashMarker for RtVariableCoreWrapper<T>
|
||||
where
|
||||
T: VariableOutputCore + HashMarker,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
}
|
||||
|
||||
#[cfg(feature = "mac")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "mac")))]
|
||||
impl<T> MacMarker for RtVariableCoreWrapper<T>
|
||||
where
|
||||
T: VariableOutputCore + MacMarker,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
}
|
||||
|
||||
impl<T> Reset for RtVariableCoreWrapper<T>
|
||||
where
|
||||
T: VariableOutputCore + UpdateCore + Reset,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn reset(&mut self) {
|
||||
self.buffer.reset();
|
||||
self.core.reset();
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Update for RtVariableCoreWrapper<T>
|
||||
where
|
||||
T: VariableOutputCore + UpdateCore,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn update(&mut self, input: &[u8]) {
|
||||
let Self { core, buffer, .. } = self;
|
||||
buffer.digest_blocks(input, |blocks| core.update_blocks(blocks));
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> VariableOutput for RtVariableCoreWrapper<T>
|
||||
where
|
||||
T: VariableOutputCore + UpdateCore,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
const MAX_OUTPUT_SIZE: usize = T::OutputSize::USIZE;
|
||||
|
||||
fn new(output_size: usize) -> Result<Self, InvalidOutputSize> {
|
||||
let buffer = Default::default();
|
||||
T::new(output_size).map(|core| Self {
|
||||
core,
|
||||
buffer,
|
||||
output_size,
|
||||
})
|
||||
}
|
||||
|
||||
fn output_size(&self) -> usize {
|
||||
self.output_size
|
||||
}
|
||||
|
||||
fn finalize_variable(mut self, out: &mut [u8]) -> Result<(), InvalidBufferSize> {
|
||||
self.finalize_dirty(out)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> VariableOutputReset for RtVariableCoreWrapper<T>
|
||||
where
|
||||
T: VariableOutputCore + UpdateCore + Reset,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
fn finalize_variable_reset(&mut self, out: &mut [u8]) -> Result<(), InvalidBufferSize> {
|
||||
self.finalize_dirty(out)?;
|
||||
self.core.reset();
|
||||
self.buffer.reset();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for RtVariableCoreWrapper<T>
|
||||
where
|
||||
T: VariableOutputCore + UpdateCore + AlgorithmName,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
T::write_alg_name(f)?;
|
||||
f.write_str(" { .. }")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
|
||||
impl<T> std::io::Write for RtVariableCoreWrapper<T>
|
||||
where
|
||||
T: VariableOutputCore + UpdateCore,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
||||
Update::update(self, buf);
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> std::io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -0,0 +1,277 @@
|
|||
use super::{
|
||||
AlgorithmName, Buffer, BufferKindUser, ExtendableOutputCore, FixedOutputCore, OutputSizeUser,
|
||||
Reset, UpdateCore, XofReaderCoreWrapper,
|
||||
};
|
||||
use crate::{
|
||||
ExtendableOutput, ExtendableOutputReset, FixedOutput, FixedOutputReset, HashMarker, Update,
|
||||
};
|
||||
use block_buffer::BlockBuffer;
|
||||
use core::fmt;
|
||||
use crypto_common::{
|
||||
typenum::{IsLess, Le, NonZero, U256},
|
||||
BlockSizeUser, InvalidLength, Key, KeyInit, KeySizeUser, Output,
|
||||
};
|
||||
|
||||
#[cfg(feature = "mac")]
|
||||
use crate::MacMarker;
|
||||
|
||||
/// Wrapper around [`BufferKindUser`].
|
||||
///
|
||||
/// It handles data buffering and implements the slice-based traits.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct CoreWrapper<T>
|
||||
where
|
||||
T: BufferKindUser,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
core: T,
|
||||
buffer: BlockBuffer<T::BlockSize, T::BufferKind>,
|
||||
}
|
||||
|
||||
impl<T> HashMarker for CoreWrapper<T>
|
||||
where
|
||||
T: BufferKindUser + HashMarker,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
}
|
||||
|
||||
#[cfg(feature = "mac")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "mac")))]
|
||||
impl<T> MacMarker for CoreWrapper<T>
|
||||
where
|
||||
T: BufferKindUser + MacMarker,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
}
|
||||
|
||||
// this blanket impl is needed for HMAC
|
||||
impl<T> BlockSizeUser for CoreWrapper<T>
|
||||
where
|
||||
T: BufferKindUser + HashMarker,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
type BlockSize = T::BlockSize;
|
||||
}
|
||||
|
||||
impl<T> CoreWrapper<T>
|
||||
where
|
||||
T: BufferKindUser,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
/// Create new wrapper from `core`.
|
||||
#[inline]
|
||||
pub fn from_core(core: T) -> Self {
|
||||
let buffer = Default::default();
|
||||
Self { core, buffer }
|
||||
}
|
||||
|
||||
/// Decompose wrapper into inner parts.
|
||||
#[inline]
|
||||
pub fn decompose(self) -> (T, Buffer<T>) {
|
||||
let Self { core, buffer } = self;
|
||||
(core, buffer)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> KeySizeUser for CoreWrapper<T>
|
||||
where
|
||||
T: BufferKindUser + KeySizeUser,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
type KeySize = T::KeySize;
|
||||
}
|
||||
|
||||
impl<T> KeyInit for CoreWrapper<T>
|
||||
where
|
||||
T: BufferKindUser + KeyInit,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn new(key: &Key<Self>) -> Self {
|
||||
Self {
|
||||
core: T::new(key),
|
||||
buffer: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn new_from_slice(key: &[u8]) -> Result<Self, InvalidLength> {
|
||||
Ok(Self {
|
||||
core: T::new_from_slice(key)?,
|
||||
buffer: Default::default(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for CoreWrapper<T>
|
||||
where
|
||||
T: BufferKindUser + AlgorithmName,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
T::write_alg_name(f)?;
|
||||
f.write_str(" { .. }")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Reset for CoreWrapper<T>
|
||||
where
|
||||
T: BufferKindUser + Reset,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn reset(&mut self) {
|
||||
self.core.reset();
|
||||
self.buffer.reset();
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Update for CoreWrapper<T>
|
||||
where
|
||||
T: BufferKindUser + UpdateCore,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn update(&mut self, input: &[u8]) {
|
||||
let Self { core, buffer } = self;
|
||||
buffer.digest_blocks(input, |blocks| core.update_blocks(blocks));
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> OutputSizeUser for CoreWrapper<T>
|
||||
where
|
||||
T: BufferKindUser + OutputSizeUser,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
type OutputSize = T::OutputSize;
|
||||
}
|
||||
|
||||
impl<T> FixedOutput for CoreWrapper<T>
|
||||
where
|
||||
T: FixedOutputCore,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn finalize_into(mut self, out: &mut Output<Self>) {
|
||||
let Self { core, buffer } = &mut self;
|
||||
core.finalize_fixed_core(buffer, out);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> FixedOutputReset for CoreWrapper<T>
|
||||
where
|
||||
T: FixedOutputCore + Reset,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn finalize_into_reset(&mut self, out: &mut Output<Self>) {
|
||||
let Self { core, buffer } = self;
|
||||
core.finalize_fixed_core(buffer, out);
|
||||
core.reset();
|
||||
buffer.reset();
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ExtendableOutput for CoreWrapper<T>
|
||||
where
|
||||
T: ExtendableOutputCore,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
<T::ReaderCore as BlockSizeUser>::BlockSize: IsLess<U256>,
|
||||
Le<<T::ReaderCore as BlockSizeUser>::BlockSize, U256>: NonZero,
|
||||
{
|
||||
type Reader = XofReaderCoreWrapper<T::ReaderCore>;
|
||||
|
||||
#[inline]
|
||||
fn finalize_xof(self) -> Self::Reader {
|
||||
let (mut core, mut buffer) = self.decompose();
|
||||
let core = core.finalize_xof_core(&mut buffer);
|
||||
let buffer = Default::default();
|
||||
Self::Reader { core, buffer }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ExtendableOutputReset for CoreWrapper<T>
|
||||
where
|
||||
T: ExtendableOutputCore + Reset,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
<T::ReaderCore as BlockSizeUser>::BlockSize: IsLess<U256>,
|
||||
Le<<T::ReaderCore as BlockSizeUser>::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn finalize_xof_reset(&mut self) -> Self::Reader {
|
||||
let Self { core, buffer } = self;
|
||||
let reader_core = core.finalize_xof_core(buffer);
|
||||
core.reset();
|
||||
buffer.reset();
|
||||
let buffer = Default::default();
|
||||
Self::Reader {
|
||||
core: reader_core,
|
||||
buffer,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
|
||||
impl<T> std::io::Write for CoreWrapper<T>
|
||||
where
|
||||
T: BufferKindUser + UpdateCore,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
||||
Update::update(self, buf);
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn flush(&mut self) -> std::io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A proxy trait to a core type implemented by [`CoreWrapper`]
|
||||
// TODO: replace with an inherent associated type on stabilization:
|
||||
// https://github.com/rust-lang/rust/issues/8995
|
||||
pub trait CoreProxy: sealed::Sealed {
|
||||
/// Type wrapped by [`CoreWrapper`].
|
||||
type Core;
|
||||
}
|
||||
|
||||
mod sealed {
|
||||
pub trait Sealed {}
|
||||
}
|
||||
|
||||
impl<T> sealed::Sealed for CoreWrapper<T>
|
||||
where
|
||||
T: BufferKindUser,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
}
|
||||
|
||||
impl<T> CoreProxy for CoreWrapper<T>
|
||||
where
|
||||
T: BufferKindUser,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
type Core = T;
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
use super::{AlgorithmName, XofReaderCore};
|
||||
use crate::XofReader;
|
||||
use block_buffer::EagerBuffer;
|
||||
use core::fmt;
|
||||
use crypto_common::typenum::{IsLess, Le, NonZero, U256};
|
||||
|
||||
/// Wrapper around [`XofReaderCore`] implementations.
|
||||
///
|
||||
/// It handles data buffering and implements the mid-level traits.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct XofReaderCoreWrapper<T>
|
||||
where
|
||||
T: XofReaderCore,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
pub(super) core: T,
|
||||
pub(super) buffer: EagerBuffer<T::BlockSize>,
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for XofReaderCoreWrapper<T>
|
||||
where
|
||||
T: XofReaderCore + AlgorithmName,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
T::write_alg_name(f)?;
|
||||
f.write_str(" { .. }")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> XofReader for XofReaderCoreWrapper<T>
|
||||
where
|
||||
T: XofReaderCore,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn read(&mut self, buffer: &mut [u8]) {
|
||||
let Self { core, buffer: buf } = self;
|
||||
buf.set_data(buffer, |blocks| {
|
||||
for block in blocks {
|
||||
*block = core.read_block();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
|
||||
impl<T> std::io::Read for XofReaderCoreWrapper<T>
|
||||
where
|
||||
T: XofReaderCore,
|
||||
T::BlockSize: IsLess<U256>,
|
||||
Le<T::BlockSize, U256>: NonZero,
|
||||
{
|
||||
#[inline]
|
||||
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
XofReader::read(self, buf);
|
||||
Ok(buf.len())
|
||||
}
|
||||
}
|
|
@ -1,218 +1,78 @@
|
|||
use super::{Input, VariableOutput, ExtendableOutput, Reset, XofReader};
|
||||
use core::fmt::Debug;
|
||||
//! Development-related functionality
|
||||
|
||||
pub use blobby;
|
||||
|
||||
mod fixed;
|
||||
mod mac;
|
||||
mod rng;
|
||||
mod variable;
|
||||
mod xof;
|
||||
|
||||
pub use fixed::*;
|
||||
pub use mac::*;
|
||||
pub use variable::*;
|
||||
pub use xof::*;
|
||||
|
||||
/// Define hash function test
|
||||
#[macro_export]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "dev")))]
|
||||
macro_rules! new_test {
|
||||
($name:ident, $test_name:expr, $hasher:ty, $test_func:ident) => {
|
||||
($name:ident, $test_name:expr, $hasher:ty, $test_func:ident $(,)?) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
use digest::blobby::Blob2Iterator;
|
||||
use digest::dev::blobby::Blob2Iterator;
|
||||
let data = include_bytes!(concat!("data/", $test_name, ".blb"));
|
||||
|
||||
for (i, row) in Blob2Iterator::new(data).unwrap().enumerate() {
|
||||
let input = row[0];
|
||||
let output = row[1];
|
||||
let [input, output] = row.unwrap();
|
||||
if let Some(desc) = $test_func::<$hasher>(input, output) {
|
||||
panic!("\n\
|
||||
Failed test №{}: {}\n\
|
||||
input:\t{:?}\n\
|
||||
output:\t{:?}\n",
|
||||
panic!(
|
||||
"\n\
|
||||
Failed test №{}: {}\n\
|
||||
input:\t{:?}\n\
|
||||
output:\t{:?}\n",
|
||||
i, desc, input, output,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// module to separate Digest from other traits
|
||||
mod foo {
|
||||
use super::super::Digest;
|
||||
use core::fmt::Debug;
|
||||
|
||||
pub fn digest_test<D>(input: &[u8], output: &[u8]) -> Option<&'static str>
|
||||
where D: Digest + Debug + Clone
|
||||
{
|
||||
let mut hasher = D::new();
|
||||
// Test that it works when accepting the message all at once
|
||||
hasher.input(input);
|
||||
let mut hasher2 = hasher.clone();
|
||||
if hasher.result().as_slice() != output {
|
||||
return Some("whole message");
|
||||
}
|
||||
|
||||
// Test if reset works correctly
|
||||
hasher2.reset();
|
||||
hasher2.input(input);
|
||||
if hasher2.result().as_slice() != output {
|
||||
return Some("whole message after reset");
|
||||
}
|
||||
|
||||
// Test that it works when accepting the message in pieces
|
||||
let mut hasher = D::new();
|
||||
let len = input.len();
|
||||
let mut left = len;
|
||||
while left > 0 {
|
||||
let take = (left + 1) / 2;
|
||||
hasher.input(&input[len - left..take + len - left]);
|
||||
left = left - take;
|
||||
}
|
||||
if hasher.result().as_slice() != output {
|
||||
return Some("message in pieces");
|
||||
}
|
||||
|
||||
// Test processing byte-by-byte
|
||||
let mut hasher = D::new();
|
||||
for chunk in input.chunks(1) {
|
||||
hasher.input(chunk)
|
||||
}
|
||||
if hasher.result().as_slice() != output {
|
||||
return Some("message byte-by-byte");
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
|
||||
pub fn one_million_a<D>(expected: &[u8])
|
||||
where D: Digest + Debug + Clone
|
||||
{
|
||||
let mut sh = D::new();
|
||||
for _ in 0..50_000 {
|
||||
sh.input(&[b'a'; 10]);
|
||||
}
|
||||
sh.input(&[b'a'; 500_000][..]);
|
||||
let out = sh.result();
|
||||
assert_eq!(out[..], expected[..]);
|
||||
}
|
||||
}
|
||||
|
||||
pub use self::foo::{digest_test, one_million_a};
|
||||
|
||||
pub fn xof_test<D>(input: &[u8], output: &[u8])
|
||||
-> Option<&'static str>
|
||||
where D: Input + ExtendableOutput + Default + Debug + Reset + Clone
|
||||
{
|
||||
let mut hasher = D::default();
|
||||
let mut buf = [0u8; 1024];
|
||||
// Test that it works when accepting the message all at once
|
||||
hasher.input(input);
|
||||
|
||||
let mut hasher2 = hasher.clone();
|
||||
{
|
||||
let out = &mut buf[..output.len()];
|
||||
hasher.xof_result().read(out);
|
||||
|
||||
if out != output { return Some("whole message"); }
|
||||
}
|
||||
|
||||
// Test if hasher resets correctly
|
||||
hasher2.reset();
|
||||
hasher2.input(input);
|
||||
|
||||
{
|
||||
let out = &mut buf[..output.len()];
|
||||
hasher2.xof_result().read(out);
|
||||
|
||||
if out != output { return Some("whole message after reset"); }
|
||||
}
|
||||
|
||||
// Test if hasher accepts message in pieces correctly
|
||||
let mut hasher = D::default();
|
||||
let len = input.len();
|
||||
let mut left = len;
|
||||
while left > 0 {
|
||||
let take = (left + 1) / 2;
|
||||
hasher.input(&input[len - left..take + len - left]);
|
||||
left = left - take;
|
||||
}
|
||||
|
||||
{
|
||||
let out = &mut buf[..output.len()];
|
||||
hasher.xof_result().read(out);
|
||||
if out != output { return Some("message in pieces"); }
|
||||
}
|
||||
|
||||
// Test reading from reader byte by byte
|
||||
let mut hasher = D::default();
|
||||
hasher.input(input);
|
||||
|
||||
let mut reader = hasher.xof_result();
|
||||
let out = &mut buf[..output.len()];
|
||||
for chunk in out.chunks_mut(1) {
|
||||
reader.read(chunk);
|
||||
}
|
||||
|
||||
if out != output { return Some("message in pieces"); }
|
||||
None
|
||||
}
|
||||
|
||||
pub fn variable_test<D>(input: &[u8], output: &[u8])
|
||||
-> Option<&'static str>
|
||||
where D: Input + VariableOutput + Reset + Debug + Clone
|
||||
{
|
||||
let mut hasher = D::new(output.len()).unwrap();
|
||||
let mut buf = [0u8; 128];
|
||||
let buf = &mut buf[..output.len()];
|
||||
// Test that it works when accepting the message all at once
|
||||
hasher.input(input);
|
||||
let mut hasher2 = hasher.clone();
|
||||
hasher.variable_result(|res| buf.copy_from_slice(res));
|
||||
if buf != output { return Some("whole message"); }
|
||||
|
||||
// Test if reset works correctly
|
||||
hasher2.reset();
|
||||
hasher2.input(input);
|
||||
hasher2.variable_result(|res| buf.copy_from_slice(res));
|
||||
if buf != output { return Some("whole message after reset"); }
|
||||
|
||||
// Test that it works when accepting the message in pieces
|
||||
let mut hasher = D::new(output.len()).unwrap();
|
||||
let len = input.len();
|
||||
let mut left = len;
|
||||
while left > 0 {
|
||||
let take = (left + 1) / 2;
|
||||
hasher.input(&input[len - left..take + len - left]);
|
||||
left = left - take;
|
||||
}
|
||||
hasher.variable_result(|res| buf.copy_from_slice(res));
|
||||
if buf != output { return Some("message in pieces"); }
|
||||
|
||||
// Test processing byte-by-byte
|
||||
let mut hasher = D::new(output.len()).unwrap();
|
||||
for chunk in input.chunks(1) {
|
||||
hasher.input(chunk)
|
||||
}
|
||||
hasher.variable_result(|res| buf.copy_from_slice(res));
|
||||
if buf != output { return Some("message byte-by-byte"); }
|
||||
None
|
||||
}
|
||||
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! bench {
|
||||
($name:ident, $engine:path, $bs:expr) => {
|
||||
#[bench]
|
||||
fn $name(b: &mut Bencher) {
|
||||
let mut d = <$engine>::default();
|
||||
let data = [0; $bs];
|
||||
|
||||
b.iter(|| {
|
||||
d.input(&data[..]);
|
||||
});
|
||||
|
||||
b.bytes = $bs;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
($engine:path) => {
|
||||
extern crate test;
|
||||
/// Define [`Update`][crate::Update] impl benchmark
|
||||
#[macro_export]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "dev")))]
|
||||
macro_rules! bench_update {
|
||||
(
|
||||
$init:expr;
|
||||
$($name:ident $bs:expr;)*
|
||||
) => {
|
||||
$(
|
||||
#[bench]
|
||||
fn $name(b: &mut Bencher) {
|
||||
let mut d = $init;
|
||||
let data = [0; $bs];
|
||||
|
||||
use test::Bencher;
|
||||
use digest::Digest;
|
||||
b.iter(|| {
|
||||
digest::Update::update(&mut d, &data[..]);
|
||||
});
|
||||
|
||||
bench!(bench1_10, $engine, 10);
|
||||
bench!(bench2_100, $engine, 100);
|
||||
bench!(bench3_1000, $engine, 1000);
|
||||
bench!(bench4_10000, $engine, 10000);
|
||||
b.bytes = $bs;
|
||||
}
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
/// Feed ~1 MiB of pseudorandom data to an updatable state.
|
||||
pub fn feed_rand_16mib<D: crate::Update>(d: &mut D) {
|
||||
let buf = &mut [0u8; 1024];
|
||||
let mut rng = rng::RNG;
|
||||
let n = 16 * (1 << 20) / buf.len();
|
||||
for _ in 0..n {
|
||||
rng.fill(buf);
|
||||
d.update(buf);
|
||||
// additional byte, so size of feeded data
|
||||
// will not be multiple of block size
|
||||
d.update(&[42]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
use crate::{Digest, FixedOutput, FixedOutputReset, HashMarker, Update};
|
||||
use core::fmt::Debug;
|
||||
|
||||
/// Fixed-output resettable digest test via the `Digest` trait
|
||||
pub fn fixed_reset_test<D>(input: &[u8], output: &[u8]) -> Option<&'static str>
|
||||
where
|
||||
D: FixedOutputReset + Debug + Clone + Default + Update + HashMarker,
|
||||
{
|
||||
let mut hasher = D::new();
|
||||
// Test that it works when accepting the message all at once
|
||||
hasher.update(input);
|
||||
let mut hasher2 = hasher.clone();
|
||||
if hasher.finalize()[..] != output[..] {
|
||||
return Some("whole message");
|
||||
}
|
||||
|
||||
// Test if reset works correctly
|
||||
hasher2.reset();
|
||||
hasher2.update(input);
|
||||
if hasher2.finalize_reset()[..] != output[..] {
|
||||
return Some("whole message after reset");
|
||||
}
|
||||
|
||||
// Test that it works when accepting the message in chunks
|
||||
for n in 1..core::cmp::min(17, input.len()) {
|
||||
let mut hasher = D::new();
|
||||
for chunk in input.chunks(n) {
|
||||
hasher.update(chunk);
|
||||
hasher2.update(chunk);
|
||||
}
|
||||
if hasher.finalize()[..] != output[..] {
|
||||
return Some("message in chunks");
|
||||
}
|
||||
if hasher2.finalize_reset()[..] != output[..] {
|
||||
return Some("message in chunks");
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Variable-output resettable digest test
|
||||
pub fn fixed_test<D>(input: &[u8], output: &[u8]) -> Option<&'static str>
|
||||
where
|
||||
D: FixedOutput + Default + Debug + Clone,
|
||||
{
|
||||
let mut hasher = D::default();
|
||||
// Test that it works when accepting the message all at once
|
||||
hasher.update(input);
|
||||
if hasher.finalize_fixed()[..] != output[..] {
|
||||
return Some("whole message");
|
||||
}
|
||||
|
||||
// Test that it works when accepting the message in chunks
|
||||
for n in 1..core::cmp::min(17, input.len()) {
|
||||
let mut hasher = D::default();
|
||||
for chunk in input.chunks(n) {
|
||||
hasher.update(chunk);
|
||||
}
|
||||
if hasher.finalize_fixed()[..] != output[..] {
|
||||
return Some("message in chunks");
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
|
@ -0,0 +1,159 @@
|
|||
/// Define MAC test
|
||||
#[macro_export]
|
||||
#[cfg(feature = "mac")]
|
||||
#[cfg_attr(docsrs, doc(cfg(all(feature = "dev", feature = "mac"))))]
|
||||
macro_rules! new_mac_test {
|
||||
($name:ident, $test_name:expr, $mac:ty $(,)?) => {
|
||||
digest::new_mac_test!($name, $test_name, $mac, "");
|
||||
};
|
||||
($name:ident, $test_name:expr, $mac:ty, trunc_left $(,)?) => {
|
||||
digest::new_mac_test!($name, $test_name, $mac, "left");
|
||||
};
|
||||
($name:ident, $test_name:expr, $mac:ty, trunc_right $(,)?) => {
|
||||
digest::new_mac_test!($name, $test_name, $mac, "right");
|
||||
};
|
||||
($name:ident, $test_name:expr, $mac:ty, $trunc:expr $(,)?) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
use core::cmp::min;
|
||||
use digest::dev::blobby::Blob3Iterator;
|
||||
use digest::Mac;
|
||||
|
||||
fn run_test(key: &[u8], input: &[u8], tag: &[u8]) -> Option<&'static str> {
|
||||
let mac0 = <$mac as Mac>::new_from_slice(key).unwrap();
|
||||
|
||||
let mut mac = mac0.clone();
|
||||
mac.update(input);
|
||||
let result = mac.finalize().into_bytes();
|
||||
let n = tag.len();
|
||||
let result_bytes = match $trunc {
|
||||
"left" => &result[..n],
|
||||
"right" => &result[result.len() - n..],
|
||||
_ => &result[..],
|
||||
};
|
||||
if result_bytes != tag {
|
||||
return Some("whole message");
|
||||
}
|
||||
|
||||
// test reading different chunk sizes
|
||||
for chunk_size in 1..min(64, input.len()) {
|
||||
let mut mac = mac0.clone();
|
||||
for chunk in input.chunks(chunk_size) {
|
||||
mac.update(chunk);
|
||||
}
|
||||
let res = match $trunc {
|
||||
"left" => mac.verify_truncated_left(tag),
|
||||
"right" => mac.verify_truncated_right(tag),
|
||||
_ => mac.verify_slice(tag),
|
||||
};
|
||||
if res.is_err() {
|
||||
return Some("chunked message");
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
let data = include_bytes!(concat!("data/", $test_name, ".blb"));
|
||||
|
||||
for (i, row) in Blob3Iterator::new(data).unwrap().enumerate() {
|
||||
let [key, input, tag] = row.unwrap();
|
||||
if let Some(desc) = run_test(key, input, tag) {
|
||||
panic!(
|
||||
"\n\
|
||||
Failed test №{}: {}\n\
|
||||
key:\t{:?}\n\
|
||||
input:\t{:?}\n\
|
||||
tag:\t{:?}\n",
|
||||
i, desc, key, input, tag,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Define resettable MAC test
|
||||
#[macro_export]
|
||||
#[cfg(feature = "mac")]
|
||||
#[cfg_attr(docsrs, doc(cfg(all(feature = "dev", feature = "mac"))))]
|
||||
macro_rules! new_resettable_mac_test {
|
||||
($name:ident, $test_name:expr, $mac:ty $(,)?) => {
|
||||
digest::new_resettable_mac_test!($name, $test_name, $mac, "");
|
||||
};
|
||||
($name:ident, $test_name:expr, $mac:ty, trunc_left $(,)?) => {
|
||||
digest::new_resettable_mac_test!($name, $test_name, $mac, "left");
|
||||
};
|
||||
($name:ident, $test_name:expr, $mac:ty, trunc_right $(,)?) => {
|
||||
digest::new_resettable_mac_test!($name, $test_name, $mac, "right");
|
||||
};
|
||||
($name:ident, $test_name:expr, $mac:ty, $trunc:expr $(,)?) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
use core::cmp::min;
|
||||
use digest::dev::blobby::Blob3Iterator;
|
||||
use digest::Mac;
|
||||
|
||||
fn run_test(key: &[u8], input: &[u8], tag: &[u8]) -> Option<&'static str> {
|
||||
let mac0 = <$mac as Mac>::new_from_slice(key).unwrap();
|
||||
|
||||
let mut mac = mac0.clone();
|
||||
mac.update(input);
|
||||
let result = mac.finalize_reset().into_bytes();
|
||||
let n = tag.len();
|
||||
let result_bytes = match $trunc {
|
||||
"left" => &result[..n],
|
||||
"right" => &result[result.len() - n..],
|
||||
_ => &result[..],
|
||||
};
|
||||
if result_bytes != tag {
|
||||
return Some("whole message");
|
||||
}
|
||||
|
||||
// test if reset worked correctly
|
||||
mac.update(input);
|
||||
let res = match $trunc {
|
||||
"left" => mac.verify_truncated_left(tag),
|
||||
"right" => mac.verify_truncated_right(tag),
|
||||
_ => mac.verify_slice(tag),
|
||||
};
|
||||
if res.is_err() {
|
||||
return Some("after reset");
|
||||
}
|
||||
|
||||
// test reading different chunk sizes
|
||||
for chunk_size in 1..min(64, input.len()) {
|
||||
let mut mac = mac0.clone();
|
||||
for chunk in input.chunks(chunk_size) {
|
||||
mac.update(chunk);
|
||||
}
|
||||
let res = match $trunc {
|
||||
"left" => mac.verify_truncated_left(tag),
|
||||
"right" => mac.verify_truncated_right(tag),
|
||||
_ => mac.verify_slice(tag),
|
||||
};
|
||||
if res.is_err() {
|
||||
return Some("chunked message");
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
let data = include_bytes!(concat!("data/", $test_name, ".blb"));
|
||||
|
||||
for (i, row) in Blob3Iterator::new(data).unwrap().enumerate() {
|
||||
let [key, input, tag] = row.unwrap();
|
||||
if let Some(desc) = run_test(key, input, tag) {
|
||||
panic!(
|
||||
"\n\
|
||||
Failed test №{}: {}\n\
|
||||
key:\t{:?}\n\
|
||||
input:\t{:?}\n\
|
||||
tag:\t{:?}\n",
|
||||
i, desc, key, input, tag,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
//! Xorshift RNG used for tests. Based on the `rand_xorshift` crate.
|
||||
use core::num::Wrapping;
|
||||
|
||||
/// Initial RNG state used in tests.
|
||||
// choosen by fair dice roll. guaranteed to be random.
|
||||
pub(crate) const RNG: XorShiftRng = XorShiftRng {
|
||||
x: Wrapping(0x0787_3B4A),
|
||||
y: Wrapping(0xFAAB_8FFE),
|
||||
z: Wrapping(0x1745_980F),
|
||||
w: Wrapping(0xB0AD_B4F3),
|
||||
};
|
||||
|
||||
/// Xorshift RNG instance/
|
||||
pub(crate) struct XorShiftRng {
|
||||
x: Wrapping<u32>,
|
||||
y: Wrapping<u32>,
|
||||
z: Wrapping<u32>,
|
||||
w: Wrapping<u32>,
|
||||
}
|
||||
|
||||
impl XorShiftRng {
|
||||
pub(crate) fn fill(&mut self, buf: &mut [u8; 1024]) {
|
||||
for chunk in buf.chunks_exact_mut(4) {
|
||||
chunk.copy_from_slice(&self.next_u32().to_le_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
fn next_u32(&mut self) -> u32 {
|
||||
let x = self.x;
|
||||
let t = x ^ (x << 11);
|
||||
self.x = self.y;
|
||||
self.y = self.z;
|
||||
self.z = self.w;
|
||||
let w = self.w;
|
||||
self.w = w ^ (w >> 19) ^ (t ^ (t >> 8));
|
||||
self.w.0
|
||||
}
|
||||
}
|
|
@ -0,0 +1,82 @@
|
|||
use crate::{VariableOutput, VariableOutputReset};
|
||||
use core::fmt::Debug;
|
||||
|
||||
/// Variable-output resettable digest test
|
||||
pub fn variable_reset_test<D>(input: &[u8], output: &[u8]) -> Option<&'static str>
|
||||
where
|
||||
D: VariableOutputReset + Debug + Clone,
|
||||
{
|
||||
let mut hasher = D::new(output.len()).unwrap();
|
||||
let mut buf = [0u8; 128];
|
||||
let buf = &mut buf[..output.len()];
|
||||
// Test that it works when accepting the message all at once
|
||||
hasher.update(input);
|
||||
let mut hasher2 = hasher.clone();
|
||||
hasher.finalize_variable(buf).unwrap();
|
||||
if buf != output {
|
||||
return Some("whole message");
|
||||
}
|
||||
buf.iter_mut().for_each(|b| *b = 0);
|
||||
|
||||
// Test if reset works correctly
|
||||
hasher2.reset();
|
||||
hasher2.update(input);
|
||||
hasher2.finalize_variable_reset(buf).unwrap();
|
||||
if buf != output {
|
||||
return Some("whole message after reset");
|
||||
}
|
||||
buf.iter_mut().for_each(|b| *b = 0);
|
||||
|
||||
// Test that it works when accepting the message in chunks
|
||||
for n in 1..core::cmp::min(17, input.len()) {
|
||||
let mut hasher = D::new(output.len()).unwrap();
|
||||
for chunk in input.chunks(n) {
|
||||
hasher.update(chunk);
|
||||
hasher2.update(chunk);
|
||||
}
|
||||
hasher.finalize_variable(buf).unwrap();
|
||||
if buf != output {
|
||||
return Some("message in chunks");
|
||||
}
|
||||
buf.iter_mut().for_each(|b| *b = 0);
|
||||
|
||||
hasher2.finalize_variable_reset(buf).unwrap();
|
||||
if buf != output {
|
||||
return Some("message in chunks");
|
||||
}
|
||||
buf.iter_mut().for_each(|b| *b = 0);
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Variable-output resettable digest test
|
||||
pub fn variable_test<D>(input: &[u8], output: &[u8]) -> Option<&'static str>
|
||||
where
|
||||
D: VariableOutput + Debug + Clone,
|
||||
{
|
||||
let mut hasher = D::new(output.len()).unwrap();
|
||||
let mut buf = [0u8; 128];
|
||||
let buf = &mut buf[..output.len()];
|
||||
// Test that it works when accepting the message all at once
|
||||
hasher.update(input);
|
||||
hasher.finalize_variable(buf).unwrap();
|
||||
if buf != output {
|
||||
return Some("whole message");
|
||||
}
|
||||
buf.iter_mut().for_each(|b| *b = 0);
|
||||
|
||||
// Test that it works when accepting the message in chunks
|
||||
for n in 1..core::cmp::min(17, input.len()) {
|
||||
let mut hasher = D::new(output.len()).unwrap();
|
||||
for chunk in input.chunks(n) {
|
||||
hasher.update(chunk);
|
||||
}
|
||||
hasher.finalize_variable(buf).unwrap();
|
||||
if buf != output {
|
||||
return Some("message in chunks");
|
||||
}
|
||||
buf.iter_mut().for_each(|b| *b = 0);
|
||||
}
|
||||
None
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
use crate::ExtendableOutputReset;
|
||||
use core::fmt::Debug;
|
||||
|
||||
/// Resettable XOF test
|
||||
pub fn xof_reset_test<D>(input: &[u8], output: &[u8]) -> Option<&'static str>
|
||||
where
|
||||
D: ExtendableOutputReset + Default + Debug + Clone,
|
||||
{
|
||||
let mut hasher = D::default();
|
||||
let mut buf = [0u8; 1024];
|
||||
let buf = &mut buf[..output.len()];
|
||||
// Test that it works when accepting the message all at once
|
||||
hasher.update(input);
|
||||
let mut hasher2 = hasher.clone();
|
||||
hasher.finalize_xof_into(buf);
|
||||
if buf != output {
|
||||
return Some("whole message");
|
||||
}
|
||||
buf.iter_mut().for_each(|b| *b = 0);
|
||||
|
||||
// Test if reset works correctly
|
||||
hasher2.reset();
|
||||
hasher2.update(input);
|
||||
hasher2.finalize_xof_reset_into(buf);
|
||||
if buf != output {
|
||||
return Some("whole message after reset");
|
||||
}
|
||||
buf.iter_mut().for_each(|b| *b = 0);
|
||||
|
||||
// Test that it works when accepting the message in chunks
|
||||
for n in 1..core::cmp::min(17, input.len()) {
|
||||
let mut hasher = D::default();
|
||||
for chunk in input.chunks(n) {
|
||||
hasher.update(chunk);
|
||||
hasher2.update(chunk);
|
||||
}
|
||||
hasher.finalize_xof_into(buf);
|
||||
if buf != output {
|
||||
return Some("message in chunks");
|
||||
}
|
||||
buf.iter_mut().for_each(|b| *b = 0);
|
||||
|
||||
hasher2.finalize_xof_reset_into(buf);
|
||||
if buf != output {
|
||||
return Some("message in chunks");
|
||||
}
|
||||
buf.iter_mut().for_each(|b| *b = 0);
|
||||
}
|
||||
|
||||
None
|
||||
}
|
|
@ -1,86 +1,236 @@
|
|||
use super::{Input, FixedOutput, Reset};
|
||||
use generic_array::{GenericArray, ArrayLength};
|
||||
use generic_array::typenum::Unsigned;
|
||||
use super::{FixedOutput, FixedOutputReset, InvalidBufferSize, Reset, Update};
|
||||
use crypto_common::{typenum::Unsigned, Output, OutputSizeUser};
|
||||
|
||||
/// The `Digest` trait specifies an interface common for digest functions.
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::boxed::Box;
|
||||
|
||||
/// Marker trait for cryptographic hash functions.
|
||||
pub trait HashMarker {}
|
||||
|
||||
/// Convinience wrapper trait covering functionality of cryptographic hash
|
||||
/// functions with fixed output size.
|
||||
///
|
||||
/// It's a convenience wrapper around `Input`, `FixedOutput`, `Reset`, `Clone`,
|
||||
/// and `Default` traits. It also provides additional convenience methods.
|
||||
pub trait Digest {
|
||||
type OutputSize: ArrayLength<u8>;
|
||||
/// Create new hasher instance
|
||||
/// This trait wraps [`Update`], [`FixedOutput`], [`Default`], and
|
||||
/// [`HashMarker`] traits and provides additional convenience methods.
|
||||
pub trait Digest: OutputSizeUser {
|
||||
/// Create new hasher instance.
|
||||
fn new() -> Self;
|
||||
|
||||
/// Create new hasher instance which has processed the provided data.
|
||||
fn new_with_prefix(data: impl AsRef<[u8]>) -> Self;
|
||||
|
||||
/// Process data, updating the internal state.
|
||||
fn update(&mut self, data: impl AsRef<[u8]>);
|
||||
|
||||
/// Process input data in a chained manner.
|
||||
#[must_use]
|
||||
fn chain_update(self, data: impl AsRef<[u8]>) -> Self;
|
||||
|
||||
/// Retrieve result and consume hasher instance.
|
||||
fn finalize(self) -> Output<Self>;
|
||||
|
||||
/// Write result into provided array and consume the hasher instance.
|
||||
fn finalize_into(self, out: &mut Output<Self>);
|
||||
|
||||
/// Retrieve result and reset hasher instance.
|
||||
fn finalize_reset(&mut self) -> Output<Self>
|
||||
where
|
||||
Self: FixedOutputReset;
|
||||
|
||||
/// Write result into provided array and reset the hasher instance.
|
||||
fn finalize_into_reset(&mut self, out: &mut Output<Self>)
|
||||
where
|
||||
Self: FixedOutputReset;
|
||||
|
||||
/// Reset hasher instance to its initial state.
|
||||
fn reset(&mut self)
|
||||
where
|
||||
Self: Reset;
|
||||
|
||||
/// Get output size of the hasher
|
||||
fn output_size() -> usize;
|
||||
|
||||
/// Compute hash of `data`.
|
||||
fn digest(data: impl AsRef<[u8]>) -> Output<Self>;
|
||||
}
|
||||
|
||||
impl<D: FixedOutput + Default + Update + HashMarker> Digest for D {
|
||||
#[inline]
|
||||
fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn new_with_prefix(data: impl AsRef<[u8]>) -> Self
|
||||
where
|
||||
Self: Default + Sized,
|
||||
{
|
||||
let mut h = Self::default();
|
||||
h.update(data.as_ref());
|
||||
h
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn update(&mut self, data: impl AsRef<[u8]>) {
|
||||
Update::update(self, data.as_ref());
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chain_update(mut self, data: impl AsRef<[u8]>) -> Self {
|
||||
Update::update(&mut self, data.as_ref());
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn finalize(self) -> Output<Self> {
|
||||
FixedOutput::finalize_fixed(self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn finalize_into(self, out: &mut Output<Self>) {
|
||||
FixedOutput::finalize_into(self, out);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn finalize_reset(&mut self) -> Output<Self>
|
||||
where
|
||||
Self: FixedOutputReset,
|
||||
{
|
||||
FixedOutputReset::finalize_fixed_reset(self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn finalize_into_reset(&mut self, out: &mut Output<Self>)
|
||||
where
|
||||
Self: FixedOutputReset,
|
||||
{
|
||||
FixedOutputReset::finalize_into_reset(self, out);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn reset(&mut self)
|
||||
where
|
||||
Self: Reset,
|
||||
{
|
||||
Reset::reset(self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn output_size() -> usize {
|
||||
Self::OutputSize::to_usize()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn digest(data: impl AsRef<[u8]>) -> Output<Self> {
|
||||
let mut hasher = Self::default();
|
||||
hasher.update(data.as_ref());
|
||||
hasher.finalize()
|
||||
}
|
||||
}
|
||||
|
||||
/// Modification of the [`Digest`] trait suitable for trait objects.
|
||||
pub trait DynDigest {
|
||||
/// Digest input data.
|
||||
///
|
||||
/// This method can be called repeatedly for use with streaming messages.
|
||||
fn input<B: AsRef<[u8]>>(&mut self, data: B);
|
||||
fn update(&mut self, data: &[u8]);
|
||||
|
||||
/// Digest input data in a chained manner.
|
||||
fn chain<B: AsRef<[u8]>>(self, data: B) -> Self where Self: Sized;
|
||||
/// Retrieve result and reset hasher instance
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
|
||||
fn finalize_reset(&mut self) -> Box<[u8]> {
|
||||
let mut result = vec![0; self.output_size()];
|
||||
self.finalize_into_reset(&mut result).unwrap();
|
||||
result.into_boxed_slice()
|
||||
}
|
||||
|
||||
/// Retrieve result and consume hasher instance.
|
||||
fn result(self) -> GenericArray<u8, Self::OutputSize>;
|
||||
/// Retrieve result and consume boxed hasher instance
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
|
||||
#[allow(clippy::boxed_local)]
|
||||
fn finalize(mut self: Box<Self>) -> Box<[u8]> {
|
||||
let mut result = vec![0; self.output_size()];
|
||||
self.finalize_into_reset(&mut result).unwrap();
|
||||
result.into_boxed_slice()
|
||||
}
|
||||
|
||||
/// Retrieve result and reset hasher instance.
|
||||
/// Write result into provided array and consume the hasher instance.
|
||||
///
|
||||
/// This method sometimes can be more efficient compared to hasher
|
||||
/// re-creation.
|
||||
fn result_reset(&mut self) -> GenericArray<u8, Self::OutputSize>;
|
||||
/// Returns error if buffer length is not equal to `output_size`.
|
||||
fn finalize_into(self, buf: &mut [u8]) -> Result<(), InvalidBufferSize>;
|
||||
|
||||
/// Write result into provided array and reset the hasher instance.
|
||||
///
|
||||
/// Returns error if buffer length is not equal to `output_size`.
|
||||
fn finalize_into_reset(&mut self, out: &mut [u8]) -> Result<(), InvalidBufferSize>;
|
||||
|
||||
/// Reset hasher instance to its initial state.
|
||||
fn reset(&mut self);
|
||||
|
||||
/// Get output size of the hasher
|
||||
fn output_size() -> usize;
|
||||
fn output_size(&self) -> usize;
|
||||
|
||||
/// Convenience function to compute hash of the `data`. It will handle
|
||||
/// hasher creation, data feeding and finalization.
|
||||
///
|
||||
/// Example:
|
||||
///
|
||||
/// ```rust,ignore
|
||||
/// println!("{:x}", sha2::Sha256::digest(b"Hello world"));
|
||||
/// ```
|
||||
fn digest(data: &[u8]) -> GenericArray<u8, Self::OutputSize>;
|
||||
/// Clone hasher state into a boxed trait object
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
|
||||
fn box_clone(&self) -> Box<dyn DynDigest>;
|
||||
}
|
||||
|
||||
impl<D: Input + FixedOutput + Reset + Clone + Default> Digest for D {
|
||||
type OutputSize = <Self as FixedOutput>::OutputSize;
|
||||
|
||||
fn new() -> Self {
|
||||
Self::default()
|
||||
impl<D: Update + FixedOutputReset + Reset + Clone + 'static> DynDigest for D {
|
||||
fn update(&mut self, data: &[u8]) {
|
||||
Update::update(self, data);
|
||||
}
|
||||
|
||||
fn input<B: AsRef<[u8]>>(&mut self, data: B) {
|
||||
Input::input(self, data);
|
||||
#[cfg(feature = "alloc")]
|
||||
fn finalize_reset(&mut self) -> Box<[u8]> {
|
||||
FixedOutputReset::finalize_fixed_reset(self)
|
||||
.to_vec()
|
||||
.into_boxed_slice()
|
||||
}
|
||||
|
||||
fn chain<B: AsRef<[u8]>>(self, data: B) -> Self where Self: Sized {
|
||||
Input::chain(self, data)
|
||||
#[cfg(feature = "alloc")]
|
||||
fn finalize(self: Box<Self>) -> Box<[u8]> {
|
||||
FixedOutput::finalize_fixed(*self)
|
||||
.to_vec()
|
||||
.into_boxed_slice()
|
||||
}
|
||||
|
||||
fn result(self) -> GenericArray<u8, Self::OutputSize> {
|
||||
self.fixed_result()
|
||||
fn finalize_into(self, buf: &mut [u8]) -> Result<(), InvalidBufferSize> {
|
||||
if buf.len() == self.output_size() {
|
||||
FixedOutput::finalize_into(self, Output::<Self>::from_mut_slice(buf));
|
||||
Ok(())
|
||||
} else {
|
||||
Err(InvalidBufferSize)
|
||||
}
|
||||
}
|
||||
|
||||
fn result_reset(&mut self) -> GenericArray<u8, Self::OutputSize> {
|
||||
let res = self.clone().fixed_result();
|
||||
self.reset();
|
||||
res
|
||||
fn finalize_into_reset(&mut self, buf: &mut [u8]) -> Result<(), InvalidBufferSize> {
|
||||
if buf.len() == self.output_size() {
|
||||
FixedOutputReset::finalize_into_reset(self, Output::<Self>::from_mut_slice(buf));
|
||||
Ok(())
|
||||
} else {
|
||||
Err(InvalidBufferSize)
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
<Self as Reset>::reset(self)
|
||||
Reset::reset(self);
|
||||
}
|
||||
|
||||
fn output_size() -> usize {
|
||||
Self::OutputSize::to_usize()
|
||||
fn output_size(&self) -> usize {
|
||||
<Self as OutputSizeUser>::OutputSize::to_usize()
|
||||
}
|
||||
|
||||
fn digest(data: &[u8]) -> GenericArray<u8, Self::OutputSize> {
|
||||
let mut hasher = Self::default();
|
||||
Input::input(&mut hasher, data);
|
||||
hasher.fixed_result()
|
||||
#[cfg(feature = "alloc")]
|
||||
fn box_clone(&self) -> Box<dyn DynDigest> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
|
||||
impl Clone for Box<dyn DynDigest> {
|
||||
fn clone(&self) -> Self {
|
||||
self.box_clone()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,63 +0,0 @@
|
|||
#![cfg(feature = "std")]
|
||||
use std::boxed::Box;
|
||||
|
||||
use super::{Input, FixedOutput, Reset};
|
||||
use generic_array::typenum::Unsigned;
|
||||
|
||||
/// The `DynDigest` trait is a modification of `Digest` trait suitable
|
||||
/// for trait objects.
|
||||
pub trait DynDigest {
|
||||
/// Digest input data.
|
||||
///
|
||||
/// This method can be called repeatedly for use with streaming messages.
|
||||
fn input(&mut self, data: &[u8]);
|
||||
|
||||
/// Retrieve result and reset hasher instance
|
||||
fn result_reset(&mut self) -> Box<[u8]>;
|
||||
|
||||
/// Retrieve result and consume boxed hasher instance
|
||||
fn result(self: Box<Self>) -> Box<[u8]>;
|
||||
|
||||
/// Reset hasher instance to its initial state.
|
||||
fn reset(&mut self);
|
||||
|
||||
/// Get output size of the hasher
|
||||
fn output_size(&self) -> usize;
|
||||
|
||||
/// Clone hasher state into a boxed trait object
|
||||
fn box_clone(&self) -> Box<DynDigest>;
|
||||
}
|
||||
|
||||
impl<D: Input + FixedOutput + Reset + Clone + 'static> DynDigest for D {
|
||||
fn input(&mut self, data: &[u8]) {
|
||||
Input::input(self, data);
|
||||
}
|
||||
|
||||
fn result_reset(&mut self) -> Box<[u8]> {
|
||||
let res = self.clone().fixed_result().to_vec().into_boxed_slice();
|
||||
Reset::reset(self);
|
||||
res
|
||||
}
|
||||
|
||||
fn result(self: Box<Self>) -> Box<[u8]> {
|
||||
self.fixed_result().to_vec().into_boxed_slice()
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
Reset::reset(self);
|
||||
}
|
||||
|
||||
fn output_size(&self) -> usize {
|
||||
<Self as FixedOutput>::OutputSize::to_usize()
|
||||
}
|
||||
|
||||
fn box_clone(&self) -> Box<DynDigest> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for Box<DynDigest> {
|
||||
fn clone(&self) -> Self {
|
||||
self.box_clone()
|
||||
}
|
||||
}
|
|
@ -1,20 +0,0 @@
|
|||
use core::fmt;
|
||||
#[cfg(feature = "std")]
|
||||
use std::error;
|
||||
|
||||
/// The error type for variable hasher initialization
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
pub struct InvalidOutputSize;
|
||||
|
||||
impl fmt::Display for InvalidOutputSize {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.write_str("invalid output size")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl error::Error for InvalidOutputSize {
|
||||
fn description(&self) -> &str {
|
||||
"invalid output size"
|
||||
}
|
||||
}
|
|
@ -1,141 +1,299 @@
|
|||
//! This crate provides traits which describe functionality of cryptographic hash
|
||||
//! functions.
|
||||
//! functions and Message Authentication algorithms.
|
||||
//!
|
||||
//! Traits in this repository can be separated into two levels:
|
||||
//! - Low level traits: `Input`, `BlockInput`, `Reset`, `FixedOutput`,
|
||||
//! `VariableOutput`, `ExtendableOutput`. These traits atomically describe
|
||||
//! available functionality of hash function implementations.
|
||||
//! - Convenience trait: `Digest`, `DynDigest`. They are wrappers around
|
||||
//! low level traits for most common hash-function use-cases.
|
||||
//! Traits in this repository are organized into the following levels:
|
||||
//!
|
||||
//! Additionally hash functions implement traits from `std`: `Default`, `Clone`,
|
||||
//! `Write`. (the latter depends on enabled-by-default `std` crate feature)
|
||||
//! - **High-level convenience traits**: [`Digest`], [`DynDigest`], [`Mac`].
|
||||
//! Wrappers around lower-level traits for most common use-cases. Users should
|
||||
//! usually prefer using these traits.
|
||||
//! - **Mid-level traits**: [`Update`], [`FixedOutput`], [`FixedOutputReset`],
|
||||
//! [`ExtendableOutput`], [`ExtendableOutputReset`], [`XofReader`],
|
||||
//! [`VariableOutput`], [`Reset`], [`KeyInit`], and [`InnerInit`]. These
|
||||
//! traits atomically describe available functionality of an algorithm.
|
||||
//! - **Marker traits**: [`HashMarker`], [`MacMarker`]. Used to distinguish
|
||||
//! different algorithm classes.
|
||||
//! - **Low-level traits** defined in the [`core_api`] module. These traits
|
||||
//! operate at a block-level and do not contain any built-in buffering.
|
||||
//! They are intended to be implemented by low-level algorithm providers only.
|
||||
//! Usually they should not be used in application-level code.
|
||||
//!
|
||||
//! The `Digest` trait is the most commonly used trait.
|
||||
#![no_std]
|
||||
#![doc(html_logo_url =
|
||||
"https://raw.githubusercontent.com/RustCrypto/meta/master/logo_small.png")]
|
||||
pub extern crate generic_array;
|
||||
#[cfg(feature = "std")]
|
||||
#[macro_use] extern crate std;
|
||||
#[cfg(feature = "dev")]
|
||||
pub extern crate blobby;
|
||||
use generic_array::{GenericArray, ArrayLength};
|
||||
#[cfg(feature = "std")]
|
||||
use std::vec::Vec;
|
||||
//! Additionally hash functions implement traits from the standard library:
|
||||
//! [`Default`], [`Clone`], [`Write`][std::io::Write]. The latter is
|
||||
//! feature-gated behind `std` feature, which is usually enabled by default
|
||||
//! by hash implementation crates.
|
||||
|
||||
#![no_std]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![forbid(unsafe_code)]
|
||||
#![doc(
|
||||
html_logo_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
|
||||
html_favicon_url = "https://raw.githubusercontent.com/RustCrypto/media/6ee8e381/logo.svg",
|
||||
html_root_url = "https://docs.rs/digest/0.10.3"
|
||||
)]
|
||||
#![warn(missing_docs, rust_2018_idioms)]
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[macro_use]
|
||||
extern crate alloc;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
extern crate std;
|
||||
|
||||
#[cfg(feature = "rand_core")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))]
|
||||
pub use crypto_common::rand_core;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::boxed::Box;
|
||||
|
||||
mod digest;
|
||||
mod dyn_digest;
|
||||
mod errors;
|
||||
#[cfg(feature = "dev")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "dev")))]
|
||||
pub mod dev;
|
||||
|
||||
pub use errors::InvalidOutputSize;
|
||||
pub use digest::Digest;
|
||||
#[cfg(feature = "std")]
|
||||
pub use dyn_digest::DynDigest;
|
||||
#[cfg(feature = "core-api")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "core-api")))]
|
||||
pub mod core_api;
|
||||
mod digest;
|
||||
#[cfg(feature = "mac")]
|
||||
mod mac;
|
||||
|
||||
/// Trait for processing input data
|
||||
pub trait Input {
|
||||
/// Digest input data.
|
||||
///
|
||||
/// This method can be called repeatedly, e.g. for processing streaming
|
||||
/// messages.
|
||||
fn input<B: AsRef<[u8]>>(&mut self, data: B);
|
||||
#[cfg(feature = "core-api")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "core-api")))]
|
||||
pub use block_buffer;
|
||||
pub use crypto_common;
|
||||
|
||||
pub use crate::digest::{Digest, DynDigest, HashMarker};
|
||||
pub use crypto_common::{generic_array, typenum, typenum::consts, Output, OutputSizeUser, Reset};
|
||||
#[cfg(feature = "mac")]
|
||||
pub use crypto_common::{InnerInit, InvalidLength, Key, KeyInit};
|
||||
#[cfg(feature = "mac")]
|
||||
pub use mac::{CtOutput, Mac, MacError, MacMarker};
|
||||
|
||||
use core::fmt;
|
||||
|
||||
/// Types which consume data with byte granularity.
|
||||
pub trait Update {
|
||||
/// Update state using the provided data.
|
||||
fn update(&mut self, data: &[u8]);
|
||||
|
||||
/// Digest input data in a chained manner.
|
||||
fn chain<B: AsRef<[u8]>>(mut self, data: B) -> Self where Self: Sized {
|
||||
self.input(data);
|
||||
#[must_use]
|
||||
fn chain(mut self, data: impl AsRef<[u8]>) -> Self
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
self.update(data.as_ref());
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait to indicate that digest function processes data in blocks of size
|
||||
/// `BlockSize`.
|
||||
///
|
||||
/// The main usage of this trait is for implementing HMAC generically.
|
||||
pub trait BlockInput {
|
||||
type BlockSize: ArrayLength<u8>;
|
||||
/// Trait for hash functions with fixed-size output.
|
||||
pub trait FixedOutput: Update + OutputSizeUser + Sized {
|
||||
/// Consume value and write result into provided array.
|
||||
fn finalize_into(self, out: &mut Output<Self>);
|
||||
|
||||
/// Retrieve result and consume the hasher instance.
|
||||
#[inline]
|
||||
fn finalize_fixed(self) -> Output<Self> {
|
||||
let mut out = Default::default();
|
||||
self.finalize_into(&mut out);
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for returning digest result with the fixed size
|
||||
pub trait FixedOutput {
|
||||
type OutputSize: ArrayLength<u8>;
|
||||
/// Trait for hash functions with fixed-size output able to reset themselves.
|
||||
pub trait FixedOutputReset: FixedOutput + Reset {
|
||||
/// Write result into provided array and reset the hasher state.
|
||||
fn finalize_into_reset(&mut self, out: &mut Output<Self>);
|
||||
|
||||
/// Retrieve result and consume hasher instance.
|
||||
fn fixed_result(self) -> GenericArray<u8, Self::OutputSize>;
|
||||
/// Retrieve result and reset the hasher state.
|
||||
#[inline]
|
||||
fn finalize_fixed_reset(&mut self) -> Output<Self> {
|
||||
let mut out = Default::default();
|
||||
self.finalize_into_reset(&mut out);
|
||||
out
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for returning digest result with the variable size
|
||||
pub trait VariableOutput: core::marker::Sized {
|
||||
/// Trait for reader types which are used to extract extendable output
|
||||
/// from a XOF (extendable-output function) result.
|
||||
pub trait XofReader {
|
||||
/// Read output into the `buffer`. Can be called an unlimited number of times.
|
||||
fn read(&mut self, buffer: &mut [u8]);
|
||||
|
||||
/// Read output into a boxed slice of the specified size.
|
||||
///
|
||||
/// Can be called an unlimited number of times in combination with `read`.
|
||||
///
|
||||
/// `Box<[u8]>` is used instead of `Vec<u8>` to save stack space, since
|
||||
/// they have size of 2 and 3 words respectively.
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
|
||||
fn read_boxed(&mut self, n: usize) -> Box<[u8]> {
|
||||
let mut buf = vec![0u8; n].into_boxed_slice();
|
||||
self.read(&mut buf);
|
||||
buf
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for hash functions with extendable-output (XOF).
|
||||
pub trait ExtendableOutput: Sized + Update {
|
||||
/// Reader
|
||||
type Reader: XofReader;
|
||||
|
||||
/// Retrieve XOF reader and consume hasher instance.
|
||||
fn finalize_xof(self) -> Self::Reader;
|
||||
|
||||
/// Finalize XOF and write result into `out`.
|
||||
fn finalize_xof_into(self, out: &mut [u8]) {
|
||||
self.finalize_xof().read(out);
|
||||
}
|
||||
|
||||
/// Compute hash of `data` and write it into `output`.
|
||||
fn digest_xof(input: impl AsRef<[u8]>, output: &mut [u8])
|
||||
where
|
||||
Self: Default,
|
||||
{
|
||||
let mut hasher = Self::default();
|
||||
hasher.update(input.as_ref());
|
||||
hasher.finalize_xof().read(output);
|
||||
}
|
||||
|
||||
/// Retrieve result into a boxed slice of the specified size and consume
|
||||
/// the hasher.
|
||||
///
|
||||
/// `Box<[u8]>` is used instead of `Vec<u8>` to save stack space, since
|
||||
/// they have size of 2 and 3 words respectively.
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
|
||||
fn finalize_boxed(self, output_size: usize) -> Box<[u8]> {
|
||||
let mut buf = vec![0u8; output_size].into_boxed_slice();
|
||||
self.finalize_xof().read(&mut buf);
|
||||
buf
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for hash functions with extendable-output (XOF) able to reset themselves.
|
||||
pub trait ExtendableOutputReset: ExtendableOutput + Reset {
|
||||
/// Retrieve XOF reader and reset hasher instance state.
|
||||
fn finalize_xof_reset(&mut self) -> Self::Reader;
|
||||
|
||||
/// Finalize XOF, write result into `out`, and reset the hasher state.
|
||||
fn finalize_xof_reset_into(&mut self, out: &mut [u8]) {
|
||||
self.finalize_xof_reset().read(out);
|
||||
}
|
||||
|
||||
/// Retrieve result into a boxed slice of the specified size and reset
|
||||
/// the hasher state.
|
||||
///
|
||||
/// `Box<[u8]>` is used instead of `Vec<u8>` to save stack space, since
|
||||
/// they have size of 2 and 3 words respectively.
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
|
||||
fn finalize_boxed_reset(&mut self, output_size: usize) -> Box<[u8]> {
|
||||
let mut buf = vec![0u8; output_size].into_boxed_slice();
|
||||
self.finalize_xof_reset().read(&mut buf);
|
||||
buf
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for hash functions with variable-size output.
|
||||
pub trait VariableOutput: Sized + Update {
|
||||
/// Maximum size of output hash.
|
||||
const MAX_OUTPUT_SIZE: usize;
|
||||
|
||||
/// Create new hasher instance with the given output size.
|
||||
///
|
||||
/// It will return `Err(InvalidOutputSize)` in case if hasher can not return
|
||||
/// specified output size. It will always return an error if output size
|
||||
/// equals to zero.
|
||||
/// hash of the specified output size.
|
||||
fn new(output_size: usize) -> Result<Self, InvalidOutputSize>;
|
||||
|
||||
/// Get output size of the hasher instance provided to the `new` method
|
||||
fn output_size(&self) -> usize;
|
||||
|
||||
/// Retrieve result via closure and consume hasher.
|
||||
/// Write result into the output buffer.
|
||||
///
|
||||
/// Closure is guaranteed to be called, length of the buffer passed to it
|
||||
/// will be equal to `output_size`.
|
||||
fn variable_result<F: FnOnce(&[u8])>(self, f: F);
|
||||
/// Returns `Err(InvalidOutputSize)` if `out` size is not equal to
|
||||
/// `self.output_size()`.
|
||||
fn finalize_variable(self, out: &mut [u8]) -> Result<(), InvalidBufferSize>;
|
||||
|
||||
/// Retrieve result into vector and consume hasher.
|
||||
#[cfg(feature = "std")]
|
||||
fn vec_result(self) -> Vec<u8> {
|
||||
let mut buf = Vec::with_capacity(self.output_size());
|
||||
self.variable_result(|res| buf.extend_from_slice(res));
|
||||
/// Compute hash of `data` and write it to `output`.
|
||||
///
|
||||
/// Length of the output hash is determined by `output`. If `output` is
|
||||
/// bigger than `Self::MAX_OUTPUT_SIZE`, this method returns
|
||||
/// `InvalidOutputSize`.
|
||||
fn digest_variable(
|
||||
input: impl AsRef<[u8]>,
|
||||
output: &mut [u8],
|
||||
) -> Result<(), InvalidOutputSize> {
|
||||
let mut hasher = Self::new(output.len())?;
|
||||
hasher.update(input.as_ref());
|
||||
hasher
|
||||
.finalize_variable(output)
|
||||
.map_err(|_| InvalidOutputSize)
|
||||
}
|
||||
|
||||
/// Retrieve result into a boxed slice and consume hasher.
|
||||
///
|
||||
/// `Box<[u8]>` is used instead of `Vec<u8>` to save stack space, since
|
||||
/// they have size of 2 and 3 words respectively.
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
|
||||
fn finalize_boxed(self) -> Box<[u8]> {
|
||||
let n = self.output_size();
|
||||
let mut buf = vec![0u8; n].into_boxed_slice();
|
||||
self.finalize_variable(&mut buf)
|
||||
.expect("buf length is equal to output_size");
|
||||
buf
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for describing readers which are used to extract extendable output
|
||||
/// from XOF (extendable-output function) result.
|
||||
pub trait XofReader {
|
||||
/// Read output into the `buffer`. Can be called unlimited number of times.
|
||||
fn read(&mut self, buffer: &mut [u8]);
|
||||
}
|
||||
/// Trait for hash functions with variable-size output able to reset themselves.
|
||||
pub trait VariableOutputReset: VariableOutput + Reset {
|
||||
/// Write result into the output buffer and reset the hasher state.
|
||||
///
|
||||
/// Returns `Err(InvalidOutputSize)` if `out` size is not equal to
|
||||
/// `self.output_size()`.
|
||||
fn finalize_variable_reset(&mut self, out: &mut [u8]) -> Result<(), InvalidBufferSize>;
|
||||
|
||||
/// Trait which describes extendable-output functions (XOF).
|
||||
pub trait ExtendableOutput: core::marker::Sized {
|
||||
type Reader: XofReader;
|
||||
|
||||
/// Retrieve XOF reader and consume hasher instance.
|
||||
fn xof_result(self) -> Self::Reader;
|
||||
|
||||
/// Retrieve result into vector of specified length.
|
||||
#[cfg(feature = "std")]
|
||||
fn vec_result(self, n: usize) -> Vec<u8> {
|
||||
let mut buf = vec![0u8; n];
|
||||
self.xof_result().read(&mut buf);
|
||||
/// Retrieve result into a boxed slice and reset the hasher state.
|
||||
///
|
||||
/// `Box<[u8]>` is used instead of `Vec<u8>` to save stack space, since
|
||||
/// they have size of 2 and 3 words respectively.
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "alloc")))]
|
||||
fn finalize_boxed_reset(&mut self) -> Box<[u8]> {
|
||||
let n = self.output_size();
|
||||
let mut buf = vec![0u8; n].into_boxed_slice();
|
||||
self.finalize_variable_reset(&mut buf)
|
||||
.expect("buf length is equal to output_size");
|
||||
buf
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for resetting hash instances
|
||||
pub trait Reset {
|
||||
/// Reset hasher instance to its initial state and return current state.
|
||||
fn reset(&mut self);
|
||||
}
|
||||
/// The error type used in variable hash traits.
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
pub struct InvalidOutputSize;
|
||||
|
||||
#[macro_export]
|
||||
/// Implements `std::io::Write` trait for implementer of `Input`
|
||||
macro_rules! impl_write {
|
||||
($hasher:ident) => {
|
||||
#[cfg(feature = "std")]
|
||||
impl ::std::io::Write for $hasher {
|
||||
fn write(&mut self, buf: &[u8]) -> ::std::io::Result<usize> {
|
||||
Input::input(self, buf);
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> ::std::io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
impl fmt::Display for InvalidOutputSize {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str("invalid output size")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
|
||||
impl std::error::Error for InvalidOutputSize {}
|
||||
|
||||
/// Buffer length is not equal to hash output size.
|
||||
#[derive(Default, Debug, Copy, Clone, Eq, PartialEq)]
|
||||
pub struct InvalidBufferSize;
|
||||
|
||||
impl fmt::Display for InvalidBufferSize {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str("invalid buffer length")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl std::error::Error for InvalidBufferSize {}
|
||||
|
|
|
@ -0,0 +1,261 @@
|
|||
use crate::{FixedOutput, FixedOutputReset, Update};
|
||||
use crypto_common::{InvalidLength, Key, KeyInit, Output, OutputSizeUser, Reset};
|
||||
|
||||
#[cfg(feature = "rand_core")]
|
||||
use crate::rand_core::{CryptoRng, RngCore};
|
||||
use core::fmt;
|
||||
use crypto_common::typenum::Unsigned;
|
||||
use subtle::{Choice, ConstantTimeEq};
|
||||
|
||||
/// Marker trait for Message Authentication algorithms.
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "mac")))]
|
||||
pub trait MacMarker {}
|
||||
|
||||
/// Convinience wrapper trait covering functionality of Message Authentication algorithms.
|
||||
///
|
||||
/// This trait wraps [`KeyInit`], [`Update`], [`FixedOutput`], and [`MacMarker`]
|
||||
/// traits and provides additional convenience methods.
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "mac")))]
|
||||
pub trait Mac: OutputSizeUser + Sized {
|
||||
/// Create new value from fixed size key.
|
||||
fn new(key: &Key<Self>) -> Self
|
||||
where
|
||||
Self: KeyInit;
|
||||
|
||||
/// Generate random key using the provided [`CryptoRng`].
|
||||
#[cfg(feature = "rand_core")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))]
|
||||
fn generate_key(rng: impl CryptoRng + RngCore) -> Key<Self>
|
||||
where
|
||||
Self: KeyInit;
|
||||
|
||||
/// Create new value from variable size key.
|
||||
fn new_from_slice(key: &[u8]) -> Result<Self, InvalidLength>
|
||||
where
|
||||
Self: KeyInit;
|
||||
|
||||
/// Update state using the provided data.
|
||||
fn update(&mut self, data: &[u8]);
|
||||
|
||||
/// Process input data in a chained manner.
|
||||
#[must_use]
|
||||
fn chain_update(self, data: impl AsRef<[u8]>) -> Self;
|
||||
|
||||
/// Obtain the result of a [`Mac`] computation as a [`CtOutput`] and consume
|
||||
/// [`Mac`] instance.
|
||||
fn finalize(self) -> CtOutput<Self>;
|
||||
|
||||
/// Obtain the result of a [`Mac`] computation as a [`CtOutput`] and reset
|
||||
/// [`Mac`] instance.
|
||||
fn finalize_reset(&mut self) -> CtOutput<Self>
|
||||
where
|
||||
Self: FixedOutputReset;
|
||||
|
||||
/// Reset MAC instance to its initial state.
|
||||
fn reset(&mut self)
|
||||
where
|
||||
Self: Reset;
|
||||
|
||||
/// Check if tag/code value is correct for the processed input.
|
||||
fn verify(self, tag: &Output<Self>) -> Result<(), MacError>;
|
||||
|
||||
/// Check truncated tag correctness using all bytes
|
||||
/// of calculated tag.
|
||||
///
|
||||
/// Returns `Error` if `tag` is not valid or not equal in length
|
||||
/// to MAC's output.
|
||||
fn verify_slice(self, tag: &[u8]) -> Result<(), MacError>;
|
||||
|
||||
/// Check truncated tag correctness using left side bytes
|
||||
/// (i.e. `tag[..n]`) of calculated tag.
|
||||
///
|
||||
/// Returns `Error` if `tag` is not valid or empty.
|
||||
fn verify_truncated_left(self, tag: &[u8]) -> Result<(), MacError>;
|
||||
|
||||
/// Check truncated tag correctness using right side bytes
|
||||
/// (i.e. `tag[n..]`) of calculated tag.
|
||||
///
|
||||
/// Returns `Error` if `tag` is not valid or empty.
|
||||
fn verify_truncated_right(self, tag: &[u8]) -> Result<(), MacError>;
|
||||
}
|
||||
|
||||
impl<T: Update + FixedOutput + MacMarker> Mac for T {
|
||||
#[inline(always)]
|
||||
fn new(key: &Key<Self>) -> Self
|
||||
where
|
||||
Self: KeyInit,
|
||||
{
|
||||
KeyInit::new(key)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn new_from_slice(key: &[u8]) -> Result<Self, InvalidLength>
|
||||
where
|
||||
Self: KeyInit,
|
||||
{
|
||||
KeyInit::new_from_slice(key)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn update(&mut self, data: &[u8]) {
|
||||
Update::update(self, data);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn chain_update(mut self, data: impl AsRef<[u8]>) -> Self {
|
||||
Update::update(&mut self, data.as_ref());
|
||||
self
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn finalize(self) -> CtOutput<Self> {
|
||||
CtOutput::new(self.finalize_fixed())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn finalize_reset(&mut self) -> CtOutput<Self>
|
||||
where
|
||||
Self: FixedOutputReset,
|
||||
{
|
||||
CtOutput::new(self.finalize_fixed_reset())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn reset(&mut self)
|
||||
where
|
||||
Self: Reset,
|
||||
{
|
||||
Reset::reset(self)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn verify(self, tag: &Output<Self>) -> Result<(), MacError> {
|
||||
if self.finalize() == tag.into() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(MacError)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn verify_slice(self, tag: &[u8]) -> Result<(), MacError> {
|
||||
let n = tag.len();
|
||||
if n != Self::OutputSize::USIZE {
|
||||
return Err(MacError);
|
||||
}
|
||||
let choice = self.finalize_fixed().ct_eq(tag);
|
||||
if choice.unwrap_u8() == 1 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(MacError)
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_truncated_left(self, tag: &[u8]) -> Result<(), MacError> {
|
||||
let n = tag.len();
|
||||
if n == 0 || n > Self::OutputSize::USIZE {
|
||||
return Err(MacError);
|
||||
}
|
||||
let choice = self.finalize_fixed()[..n].ct_eq(tag);
|
||||
|
||||
if choice.unwrap_u8() == 1 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(MacError)
|
||||
}
|
||||
}
|
||||
|
||||
fn verify_truncated_right(self, tag: &[u8]) -> Result<(), MacError> {
|
||||
let n = tag.len();
|
||||
if n == 0 || n > Self::OutputSize::USIZE {
|
||||
return Err(MacError);
|
||||
}
|
||||
let m = Self::OutputSize::USIZE - n;
|
||||
let choice = self.finalize_fixed()[m..].ct_eq(tag);
|
||||
|
||||
if choice.unwrap_u8() == 1 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(MacError)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "rand_core")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "rand_core")))]
|
||||
#[inline]
|
||||
fn generate_key(rng: impl CryptoRng + RngCore) -> Key<Self>
|
||||
where
|
||||
Self: KeyInit,
|
||||
{
|
||||
<T as KeyInit>::generate_key(rng)
|
||||
}
|
||||
}
|
||||
|
||||
/// Fixed size output value which provides a safe [`Eq`] implementation that
|
||||
/// runs in constant time.
|
||||
///
|
||||
/// It is useful for implementing Message Authentication Codes (MACs).
|
||||
#[derive(Clone)]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "mac")))]
|
||||
pub struct CtOutput<T: OutputSizeUser> {
|
||||
bytes: Output<T>,
|
||||
}
|
||||
|
||||
impl<T: OutputSizeUser> CtOutput<T> {
|
||||
/// Create a new [`CtOutput`] value.
|
||||
#[inline(always)]
|
||||
pub fn new(bytes: Output<T>) -> Self {
|
||||
Self { bytes }
|
||||
}
|
||||
|
||||
/// Get the inner [`Output`] array this type wraps.
|
||||
#[inline(always)]
|
||||
pub fn into_bytes(self) -> Output<T> {
|
||||
self.bytes
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: OutputSizeUser> From<Output<T>> for CtOutput<T> {
|
||||
#[inline(always)]
|
||||
fn from(bytes: Output<T>) -> Self {
|
||||
Self { bytes }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: OutputSizeUser> From<&'a Output<T>> for CtOutput<T> {
|
||||
#[inline(always)]
|
||||
fn from(bytes: &'a Output<T>) -> Self {
|
||||
bytes.clone().into()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: OutputSizeUser> ConstantTimeEq for CtOutput<T> {
|
||||
#[inline(always)]
|
||||
fn ct_eq(&self, other: &Self) -> Choice {
|
||||
self.bytes.ct_eq(&other.bytes)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: OutputSizeUser> PartialEq for CtOutput<T> {
|
||||
#[inline(always)]
|
||||
fn eq(&self, x: &CtOutput<T>) -> bool {
|
||||
self.ct_eq(x).unwrap_u8() == 1
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: OutputSizeUser> Eq for CtOutput<T> {}
|
||||
|
||||
/// Error type for when the [`Output`] of a [`Mac`]
|
||||
/// is not equal to the expected value.
|
||||
#[derive(Default, Debug, Copy, Clone, Eq, PartialEq)]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "mac")))]
|
||||
pub struct MacError;
|
||||
|
||||
impl fmt::Display for MacError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.write_str("MAC tag mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl std::error::Error for MacError {}
|
|
@ -1 +0,0 @@
|
|||
{"files":{"Cargo.toml":"c63db0226f9aac6e001898735c81392b8f01dfc8b7245f37e290990562c3c0d8","LICENSE-APACHE":"a9040321c3712d8fd0b09cf52b17445de04a23a10165049ae187cd39e5c86be5","LICENSE-MIT":"52232c2cee3bb7d8cabe47ef367f1bf8bb607c22bdfca0219d6156cb7f446e9d","src/lib.rs":"2cd66d61acfb96f3425194c12695d8e55cf56c6fbd02de90033c45bdcc338c1a"},"package":"e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed"}
|
|
@ -1,9 +0,0 @@
|
|||
[package]
|
||||
name = "fake-simd"
|
||||
version = "0.1.2"
|
||||
authors = ["The Rust-Crypto Project Developers"]
|
||||
license = "MIT/Apache-2.0"
|
||||
description = "Crate for mimicking simd crate on stable Rust"
|
||||
documentation = "https://docs.rs/fake-simd"
|
||||
repository = "https://github.com/RustCrypto/utils"
|
||||
keywords = ["simd"]
|
|
@ -1,201 +0,0 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,26 +0,0 @@
|
|||
Copyright (c) 2006-2009 Graydon Hoare
|
||||
Copyright (c) 2009-2013 Mozilla Foundation
|
||||
|
||||
Permission is hereby granted, free of charge, to any
|
||||
person obtaining a copy of this software and associated
|
||||
documentation files (the "Software"), to deal in the
|
||||
Software without restriction, including without
|
||||
limitation the rights to use, copy, modify, merge,
|
||||
publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software
|
||||
is furnished to do so, subject to the following
|
||||
conditions:
|
||||
|
||||
The above copyright notice and this permission notice
|
||||
shall be included in all copies or substantial portions
|
||||
of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
|
||||
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
|
||||
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
|
||||
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
|
||||
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
DEALINGS IN THE SOFTWARE.
|
|
@ -1,108 +0,0 @@
|
|||
#![no_std]
|
||||
use core::ops::{Add, BitAnd, BitOr, BitXor, Shl, Shr, Sub};
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub struct u32x4(pub u32, pub u32, pub u32, pub u32);
|
||||
|
||||
impl Add for u32x4 {
|
||||
type Output = u32x4;
|
||||
|
||||
#[inline(always)]
|
||||
fn add(self, rhs: u32x4) -> u32x4 {
|
||||
u32x4(
|
||||
self.0.wrapping_add(rhs.0),
|
||||
self.1.wrapping_add(rhs.1),
|
||||
self.2.wrapping_add(rhs.2),
|
||||
self.3.wrapping_add(rhs.3))
|
||||
}
|
||||
}
|
||||
|
||||
impl Sub for u32x4 {
|
||||
type Output = u32x4;
|
||||
|
||||
#[inline(always)]
|
||||
fn sub(self, rhs: u32x4) -> u32x4 {
|
||||
u32x4(
|
||||
self.0.wrapping_sub(rhs.0),
|
||||
self.1.wrapping_sub(rhs.1),
|
||||
self.2.wrapping_sub(rhs.2),
|
||||
self.3.wrapping_sub(rhs.3))
|
||||
}
|
||||
}
|
||||
|
||||
impl BitAnd for u32x4 {
|
||||
type Output = u32x4;
|
||||
|
||||
#[inline(always)]
|
||||
fn bitand(self, rhs: u32x4) -> u32x4 {
|
||||
u32x4(self.0 & rhs.0, self.1 & rhs.1, self.2 & rhs.2, self.3 & rhs.3)
|
||||
}
|
||||
}
|
||||
|
||||
impl BitOr for u32x4 {
|
||||
type Output = u32x4;
|
||||
|
||||
#[inline(always)]
|
||||
fn bitor(self, rhs: u32x4) -> u32x4 {
|
||||
u32x4(self.0 | rhs.0, self.1 | rhs.1, self.2 | rhs.2, self.3 | rhs.3)
|
||||
}
|
||||
}
|
||||
|
||||
impl BitXor for u32x4 {
|
||||
type Output = u32x4;
|
||||
|
||||
#[inline(always)]
|
||||
fn bitxor(self, rhs: u32x4) -> u32x4 {
|
||||
u32x4(self.0 ^ rhs.0, self.1 ^ rhs.1, self.2 ^ rhs.2, self.3 ^ rhs.3)
|
||||
}
|
||||
}
|
||||
|
||||
impl Shl<usize> for u32x4 {
|
||||
type Output = u32x4;
|
||||
|
||||
#[inline(always)]
|
||||
fn shl(self, amt: usize) -> u32x4 {
|
||||
u32x4(self.0 << amt, self.1 << amt, self.2 << amt, self.3 << amt)
|
||||
}
|
||||
}
|
||||
|
||||
impl Shl<u32x4> for u32x4 {
|
||||
type Output = u32x4;
|
||||
|
||||
#[inline(always)]
|
||||
fn shl(self, rhs: u32x4) -> u32x4 {
|
||||
u32x4(self.0 << rhs.0, self.1 << rhs.1, self.2 << rhs.2, self.3 << rhs.3)
|
||||
}
|
||||
}
|
||||
|
||||
impl Shr<usize> for u32x4 {
|
||||
type Output = u32x4;
|
||||
|
||||
#[inline(always)]
|
||||
fn shr(self, amt: usize) -> u32x4 {
|
||||
u32x4(self.0 >> amt, self.1 >> amt, self.2 >> amt, self.3 >> amt)
|
||||
}
|
||||
}
|
||||
|
||||
impl Shr<u32x4> for u32x4 {
|
||||
type Output = u32x4;
|
||||
|
||||
#[inline(always)]
|
||||
fn shr(self, rhs: u32x4) -> u32x4 {
|
||||
u32x4(self.0 >> rhs.0, self.1 >> rhs.1, self.2 >> rhs.2, self.3 >> rhs.3)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub struct u64x2(pub u64, pub u64);
|
||||
|
||||
impl Add for u64x2 {
|
||||
type Output = u64x2;
|
||||
|
||||
#[inline(always)]
|
||||
fn add(self, rhs: u64x2) -> u64x2 {
|
||||
u64x2(self.0.wrapping_add(rhs.0), self.1.wrapping_add(rhs.1))
|
||||
}
|
||||
}
|
|
@ -1 +1 @@
|
|||
{"files":{"CHANGELOG.md":"ae7c1d7fcd6dceb6472568994016862441606c444d0670d4e9dffab20c5eeea3","Cargo.toml":"122428b66e56c3287df569deba1e7de4b7c316cee39e1f9e1c3e7d80c793e427","LICENSE":"ad4fcfaf8d5b12b97409c137a03d4a4e4b21024c65c54f976cc3b609c1bd5b0f","README.md":"9a1a45416eac57050036b13df6ec84d21d555e820726af3c782896bd9d37d94b","rustfmt.toml":"2a298b4ce1fe6e16b8f281a0035567b8eb15042ed3062729fd28224f29c2f75a","src/arr.rs":"d866a89232279e5602cfe80b7f4e0db2b8b1153532ca72f61d65ba4d792fa603","src/functional.rs":"a0b12be07c1cc85549a80ddf1cfa1c7d5cbc09c3710bb635a5f95c35537005dc","src/hex.rs":"45e780bf385f99eec5058cfae87f97042679b1e8cbc130c009d4c074052016aa","src/impl_serde.rs":"5556e952fd351ab0af27bb562b1b6382e96a20afe51a04300264842e1fb33747","src/impls.rs":"d3046213d058b43b7b7360a2fa9ab6794e44970f1bc1847649c42c8f1c4f2d75","src/iter.rs":"b8b130ddd52461c435b873b9435f771d6535ae835055a73d79c0f3105f6b367e","src/lib.rs":"99a3fe3bc49cbfb9f00554d4860b24abe02e8075404fdb88343f2ee389f09110","src/sequence.rs":"bdf4d8920205bb85dee95897940373991b232e7b0614ae52c03c5f4bc4e8dccc","tests/arr.rs":"97258231dfeefc52ec785c2019611cc1a339c3a13f744a26727a591f7e46a7a8","tests/generics.rs":"8da33daacab14d0fd685e0ca6292a2d19be23fa6c6e128921b554a7e1d6181a1","tests/hex.rs":"143d783defedd6609995862f8aac46b8c843272a8f877f83d2f7242de8814c02","tests/import_name.rs":"1235729ecbde47fc9a38b3bf35c750a53ed55e3cf967c9d2b24fd759dc9e9e0c","tests/iter.rs":"3e5e6a1354709e8bfa76e52969c61f3d21cb960027bb91745049c0dcdfa52bfd","tests/mod.rs":"75694855127075e14ddef490fffee3dea5a052bcdda2912878bd9995f3f2956d"},"package":"ffdf9f34f1447443d37393cc6c2b8313aebddcd96906caf34e54c68d8e57d7bd"}
|
||||
{"files":{"CHANGELOG.md":"0c2dc832f2f50aba2b59e4b28f045f5d9ed4d55763c9c9aed3891b715061aeff","Cargo.toml":"84096deb8ec081346988ab7bef7fc6a9e1e0796efcdd0ba8dcffa3beb26d1447","DESIGN.md":"8b745d89e634c48646202edfaa2151ee08a04a9c32271f4c2cc4afb63b4e952c","LICENSE":"c09aae9d3c77b531f56351a9947bc7446511d6b025b3255312d3e3442a9a7583","README.md":"9e86d03b400dc818f44df68b76dafd1d89e42a51221bcb0de4259a6529ab6d84","build.rs":"08fa30c4a2c1ad24fe5f987e721dfb20131f45ea5b5dc3e836dcf88a8e33248c","rustfmt.toml":"13d771354ddee15d5aa5a168fd6965c3c0ee7aa7ce75cdd5e3b82852cdac5123","src/arr.rs":"c115d6926deb769ced772e9c4e1c84baf1bdea4fe4b9eb2061658a63869eab62","src/functional.rs":"7dd6ddd5db3000054cbbd76959f745c7de73c8493cbfb745be80509b306e4a83","src/hex.rs":"091fb78f6d373a6ef1c467d85c461472fcdb1e91efc294039f4c870151c3ee9f","src/impl_serde.rs":"f046daba067522b4c3e79437d04f43a001e83353c81e6b2188c37a2e63dba7a3","src/impls.rs":"18b285821421eea0cdbbcfcc896eef67bd55d72f8d85b5827cca6687e9c0fc27","src/iter.rs":"fa58bf5de00c900a767b3bdc9c9cdc49424bab55b2e3e2a88bd9445b06325f55","src/lib.rs":"22af14d446ec5f67a99e350d4a0c95e070f4ff9537ac9e84ec1172f654f8b95a","src/sequence.rs":"26679cfec035bae7298f067f37e8d42a1eda8fe241e9cf2c2977ba4bddddab1d","tests/arr.rs":"22d332fcb5e0314980ddc952af0265125cf53bb9cb8b546a9dcaec2e29bfc3b0","tests/generics.rs":"491c9351fd973ff2b7bc72e78d3069cf3ed3fcd2f9180558ab027099605fa147","tests/hex.rs":"fd428c2558da2f1e2cf229af2e40e5b35a2094b3306312ac41943d25a85b7de1","tests/import_name.rs":"c9439c7d7531ce79419b0d413d729ea4321887c091bd9be8b18e6c2413021ed0","tests/iter.rs":"d9f18c7a280a938a63d382086450146206c5805804d4b62c7e55cd60ea0e2d0d","tests/mod.rs":"556a9cb6f6699c523ebfb1b167a18b30d909604339e929e9c874da92aae60bd3"},"package":"fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803"}
|
|
@ -1,51 +1,93 @@
|
|||
* **`0.12.4`**
|
||||
* Fix unsoundness in the `arr!` macro.
|
||||
|
||||
* **`0.12.0`**
|
||||
* Allow trailing commas in `arr!` macro.
|
||||
* **BREAKING**: Serialize `GenericArray` using `serde` tuples, instead of variable-length sequences. This may not be compatible with old serialized data.
|
||||
|
||||
* **`0.11.0`**
|
||||
* **BREAKING** Redesign `GenericSequence` with an emphasis on use in generic type parameters.
|
||||
* Add `MappedGenericSequence` and `FunctionalSequence`
|
||||
* Implements optimized `map`, `zip` and `fold` for `GenericArray`, `&GenericArray` and `&mut GenericArray`
|
||||
* **BREAKING** Remove `map_ref`, `zip_ref` and `map_slice`
|
||||
* `map_slice` is now equivalent to `GenericArray::from_iter(slice.iter().map(...))`
|
||||
* **`0.10.0`**
|
||||
* Add `GenericSequence`, `Lengthen`, `Shorten`, `Split` and `Concat` traits.
|
||||
* Redefine `transmute` to avert errors.
|
||||
* **`0.9.0`**
|
||||
* Rewrite construction methods to be well-defined in panic situations, correctly dropping elements.
|
||||
* `NoDrop` crate replaced by `ManuallyDrop` as it became stable in Rust core.
|
||||
* Add optimized `map`/`map_ref` and `zip`/`zip_ref` methods to `GenericArray`
|
||||
* **`0.8.0`**
|
||||
* Implement `AsRef`, `AsMut`, `Borrow`, `BorrowMut`, `Hash` for `GenericArray`
|
||||
* Update `serde` to `1.0`
|
||||
* Update `typenum`
|
||||
* Make macro `arr!` non-cloning
|
||||
* Implement `From<[T; N]>` up to `N=32`
|
||||
* Fix #45
|
||||
* **`0.7.0`**
|
||||
* Upgrade `serde` to `0.9`
|
||||
* Make `serde` with `no_std`
|
||||
* Implement `PartialOrd`/`Ord` for `GenericArray`
|
||||
* **`0.6.0`**
|
||||
* Fixed #30
|
||||
* Implement `Default` for `GenericArray`
|
||||
* Implement `LowerHex` and `UpperHex` for `GenericArray<u8, N>`
|
||||
* Use `precision` formatting field in hex representation
|
||||
* Add `as_slice`, `as_mut_slice`
|
||||
* Remove `GenericArray::new` in favor of `Default` trait
|
||||
* Add `from_slice` and `from_mut_slice`
|
||||
* `no_std` and `core` for crate.
|
||||
* **`0.5.0`**
|
||||
* Update `serde`
|
||||
* remove `no_std` feature, fixed #19
|
||||
* **`0.4.0`**
|
||||
* Re-export `typenum`
|
||||
* **`0.3.0`**
|
||||
* Implement `IntoIter` for `GenericArray`
|
||||
* Add `map` method
|
||||
* Add optional `serde` (de)serialization support feature.
|
||||
* **`< 0.3.0`**
|
||||
* Initial implementation in late 2015
|
||||
* **`0.14.5`**
|
||||
* Fix unsoundness behavior in `GenericArrayIter::clone` ([#120](https://github.com/fizyk20/generic-array/pull/120))
|
||||
|
||||
* **`0.14.4`**
|
||||
* Update `typenum` to `1.12.0`
|
||||
* Make `Drop` a no-op when the inner type does not require `Drop` (using `core::mem::needs_drop`)
|
||||
|
||||
* **`0.14.3`**
|
||||
* Improve behavior of `GenericArray::from_exact_iter` to assume `ExactIterator`s can lie.
|
||||
* Fix alignment of zero-length `GenericArray`s
|
||||
* Implement `From<&[T; N]> for &GenericArray<T, N>` and its mutable variant
|
||||
|
||||
* **`0.14.2`**
|
||||
* Lower MSRV to `1.36.0` without `From<[T; N]>` implementations.
|
||||
|
||||
* **`0.14.1`**
|
||||
* Fix element conversions in `arr!` macro.
|
||||
|
||||
* **`0.14.0`**
|
||||
* Replace `Into` implementations with the more general `From`.
|
||||
* Requires minumum Rust version of 1.41.0
|
||||
* Fix unsoundness in `arr!` macro.
|
||||
* Fix meta variable misuse
|
||||
* Fix Undefined Behavior across the crate by switching to `MaybeUninit`
|
||||
* Improve some documentation and doctests
|
||||
* Add `AsRef<[T; N]>` and `AsMut<[T; N]>` impls to `GenericArray<T, N>`
|
||||
* Add `Split` impl for `&GenericArray` and `&mut GenericArray`
|
||||
|
||||
* **`0.13.2`**
|
||||
* Add feature `more_lengths`, which adds more `From`/`Into` implementations for arrays of various lengths.
|
||||
|
||||
* **`0.13.1`**
|
||||
* Mark `GenericArray` as `#[repr(transparent)]`
|
||||
* Implement `Into<[T; N]>` for `GenericArray<T, N>` up to N=32
|
||||
|
||||
* **`0.13.0`**
|
||||
* Allow `arr!` to be imported with use syntax.
|
||||
* Requires minumum Rust version of 1.30.1
|
||||
|
||||
* **`0.12.2`**
|
||||
* Implement `FusedIterator` for `GenericArrayIter`
|
||||
|
||||
* **`0.12.1`**
|
||||
* Use internal iteration where possible and provide more efficient internal iteration methods.
|
||||
|
||||
* **`0.12.0`**
|
||||
* Allow trailing commas in `arr!` macro.
|
||||
* **BREAKING**: Serialize `GenericArray` using `serde` tuples, instead of variable-length sequences. This may not be compatible with old serialized data.
|
||||
|
||||
* **`0.11.0`**
|
||||
* **BREAKING** Redesign `GenericSequence` with an emphasis on use in generic type parameters.
|
||||
* Add `MappedGenericSequence` and `FunctionalSequence`
|
||||
* Implements optimized `map`, `zip` and `fold` for `GenericArray`, `&GenericArray` and `&mut GenericArray`
|
||||
* **BREAKING** Remove `map_ref`, `zip_ref` and `map_slice`
|
||||
* `map_slice` is now equivalent to `GenericArray::from_iter(slice.iter().map(...))`
|
||||
* **`0.10.0`**
|
||||
* Add `GenericSequence`, `Lengthen`, `Shorten`, `Split` and `Concat` traits.
|
||||
* Redefine `transmute` to avert errors.
|
||||
* **`0.9.0`**
|
||||
* Rewrite construction methods to be well-defined in panic situations, correctly dropping elements.
|
||||
* `NoDrop` crate replaced by `ManuallyDrop` as it became stable in Rust core.
|
||||
* Add optimized `map`/`map_ref` and `zip`/`zip_ref` methods to `GenericArray`
|
||||
* **`0.8.0`**
|
||||
* Implement `AsRef`, `AsMut`, `Borrow`, `BorrowMut`, `Hash` for `GenericArray`
|
||||
* Update `serde` to `1.0`
|
||||
* Update `typenum`
|
||||
* Make macro `arr!` non-cloning
|
||||
* Implement `From<[T; N]>` up to `N=32`
|
||||
* Fix #45
|
||||
* **`0.7.0`**
|
||||
* Upgrade `serde` to `0.9`
|
||||
* Make `serde` with `no_std`
|
||||
* Implement `PartialOrd`/`Ord` for `GenericArray`
|
||||
* **`0.6.0`**
|
||||
* Fixed #30
|
||||
* Implement `Default` for `GenericArray`
|
||||
* Implement `LowerHex` and `UpperHex` for `GenericArray<u8, N>`
|
||||
* Use `precision` formatting field in hex representation
|
||||
* Add `as_slice`, `as_mut_slice`
|
||||
* Remove `GenericArray::new` in favor of `Default` trait
|
||||
* Add `from_slice` and `from_mut_slice`
|
||||
* `no_std` and `core` for crate.
|
||||
* **`0.5.0`**
|
||||
* Update `serde`
|
||||
* remove `no_std` feature, fixed #19
|
||||
* **`0.4.0`**
|
||||
* Re-export `typenum`
|
||||
* **`0.3.0`**
|
||||
* Implement `IntoIter` for `GenericArray`
|
||||
* Add `map` method
|
||||
* Add optional `serde` (de)serialization support feature.
|
||||
* **`< 0.3.0`**
|
||||
* Initial implementation in late 2015
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
[package]
|
||||
name = "generic-array"
|
||||
version = "0.12.4"
|
||||
version = "0.14.5"
|
||||
authors = ["Bartłomiej Kamiński <fizyk20@gmail.com>", "Aaron Trent <novacrazy@gmail.com>"]
|
||||
description = "Generic types implementing functionality of arrays"
|
||||
documentation = "http://fizyk20.github.io/generic-array/generic_array/"
|
||||
|
@ -30,11 +30,16 @@ optional = true
|
|||
default-features = false
|
||||
|
||||
[dependencies.typenum]
|
||||
version = "1.10"
|
||||
version = "1.12"
|
||||
[dev-dependencies.bincode]
|
||||
version = "1.0"
|
||||
|
||||
[dev-dependencies.serde_json]
|
||||
version = "1.0"
|
||||
[build-dependencies.version_check]
|
||||
version = "0.9"
|
||||
|
||||
[features]
|
||||
more_lengths = []
|
||||
[badges.travis-ci]
|
||||
repository = "fizyk20/generic-array"
|
||||
|
|
|
@ -0,0 +1,585 @@
|
|||
Design and Usage Notes
|
||||
======================
|
||||
|
||||
## Sections
|
||||
|
||||
1. [How it Works](#how-it-works)
|
||||
2. [Initialization](#initialization)
|
||||
3. [Functional Programming](#functional-programming)
|
||||
4. [Miscellaneous Utilities](#miscellaneous-utilities)
|
||||
5. [Safety](#safety)
|
||||
6. [Optimization](#optimization)
|
||||
7. [The Future](#the-future)
|
||||
|
||||
**NOTE**: This document uses `<details>` sections, so look out for collapsible parts with an arrow on the left.
|
||||
|
||||
# How it works
|
||||
|
||||
`generic-array` is a method of achieving fixed-length fixed-size stack-allocated generic arrays without needing const generics in stable Rust.
|
||||
|
||||
That is to say this:
|
||||
|
||||
```rust
|
||||
struct Foo<const N: usize> {
|
||||
data: [i32; N],
|
||||
}
|
||||
```
|
||||
|
||||
or anything similar is not currently supported.
|
||||
|
||||
However, Rust's type system is sufficiently advanced, and a "hack" for solving this was created in the form of the `typenum` crate, which recursively defines integer values in binary as nested types, and operations which can be applied to those type-numbers, such as `Add`, `Sub`, etc.
|
||||
|
||||
e.g. `6` would be `UInt<UInt<UInt<UTerm, B1>, B1>, B0>`
|
||||
|
||||
Over time, I've come to see `typenum` as less of a hack and more as an elegant solution.
|
||||
|
||||
The recursive binary nature of `typenum` is what makes `generic-array` possible, so:
|
||||
|
||||
```rust
|
||||
struct Foo<N: ArrayLength<i32>> {
|
||||
data: GenericArray<i32, N>,
|
||||
}
|
||||
```
|
||||
|
||||
is supported.
|
||||
|
||||
I often see questions about why `ArrayLength` requires the element type `T` in it's signature, even though it's not used in the inner `ArrayType`.
|
||||
|
||||
This is because `GenericArray` itself does not define the actual array. Rather, it is defined as:
|
||||
|
||||
```rust
|
||||
pub struct GenericArray<T, N: ArrayLength<T>> {
|
||||
data: N::ArrayType,
|
||||
}
|
||||
```
|
||||
|
||||
The trait `ArrayLength` does all the real heavy lifting for defining the data, with implementations on `UInt<N, B0>`, `UInt<N, B1>` and `UTerm`, which correspond to even, odd and zero numeric values, respectively.
|
||||
|
||||
`ArrayLength`'s implementations use type-level recursion to peel away each least significant bit and form sort of an opaque binary tree of contiguous data the correct physical size to store `N` elements of `T`. The tree, or block of data, is then stored inside of `GenericArray` to be reinterpreted as the array.
|
||||
|
||||
For example, `GenericArray<T, U6>` more or less expands to (at compile time):
|
||||
|
||||
<details>
|
||||
<summary>Expand for code</summary>
|
||||
|
||||
```rust
|
||||
GenericArray {
|
||||
// UInt<UInt<UInt<UTerm, B1>, B1>, B0>
|
||||
data: EvenData {
|
||||
// UInt<UInt<UTerm, B1>, B1>
|
||||
left: OddData {
|
||||
// UInt<UTerm, B1>
|
||||
left: OddData {
|
||||
left: (), // UTerm
|
||||
right: (), // UTerm
|
||||
data: T, // Element 0
|
||||
},
|
||||
// UInt<UTerm, B1>
|
||||
right: OddData {
|
||||
left: (), // UTerm
|
||||
right: (), // UTerm
|
||||
data: T, // Element 1
|
||||
},
|
||||
data: T // Element 2
|
||||
},
|
||||
// UInt<UInt<UTerm, B1>, B1>
|
||||
right: OddData {
|
||||
// UInt<UTerm, B1>
|
||||
left: OddData {
|
||||
left: (), // UTerm
|
||||
right: (), // UTerm
|
||||
data: T, // Element 3
|
||||
},
|
||||
// UInt<UTerm, B1>
|
||||
right: OddData {
|
||||
left: (), // UTerm
|
||||
right: (), // UTerm
|
||||
data: T, // Element 4
|
||||
},
|
||||
data: T // Element 5
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
This has the added benefit of only being `log2(N)` deep, which is important for things like `Drop`, which we'll go into later.
|
||||
|
||||
Then, we take `data` and cast it to `*const T` or `*mut T` and use it as a slice like:
|
||||
|
||||
```rust
|
||||
unsafe {
|
||||
slice::from_raw_parts(
|
||||
self as *const Self as *const T,
|
||||
N::to_usize()
|
||||
)
|
||||
}
|
||||
```
|
||||
|
||||
It is useful to note that because `typenum` is compile-time with nested generics, `to_usize`, even if it isn't a `const fn`, *does* expand to effectively `1 + 2 + 4 + 8 + ...` and so forth, which LLVM is smart enough to reduce to a single compile-time constant. This helps hint to the optimizers about things such as bounds checks.
|
||||
|
||||
So, to reiterate, we're working with a raw block of contiguous memory the correct physical size to store `N` elements of `T`. It's really no different from how normal arrays are stored.
|
||||
|
||||
## Pointer Safety
|
||||
|
||||
Of course, casting pointers around and constructing blocks of data out of thin air is normal for C, but here in Rust we try to be a bit less prone to segfaults. Therefore, great care is taken to minimize casual `unsafe` usage and restrict `unsafe` to specific parts of the API, making heavy use those exposed safe APIs internally.
|
||||
|
||||
For example, the above `slice::from_raw_parts` is only used twice in the entire library, once for `&[T]` and `slice::from_raw_parts_mut` once for `&mut [T]`. Everything else goes through those slices.
|
||||
|
||||
# Initialization
|
||||
|
||||
## Constant
|
||||
|
||||
"Constant" initialization, that is to say - without dynamic values, can be done via the `arr![]` macro, which works almost exactly like `vec![]`, but with an additional type parameter.
|
||||
|
||||
Example:
|
||||
|
||||
```rust
|
||||
let my_arr = arr![i32; 1, 2, 3, 4, 5, 6, 7, 8];
|
||||
```
|
||||
|
||||
## Dynamic
|
||||
|
||||
Although some users have opted to use their own initializers, as of version `0.9` and beyond `generic-array` includes safe methods for initializing elements in the array.
|
||||
|
||||
The `GenericSequence` trait defines a `generate` method which can be used like so:
|
||||
|
||||
```rust
|
||||
use generic_array::{GenericArray, sequence::GenericSequence};
|
||||
|
||||
let squares: GenericArray<i32, U4> =
|
||||
GenericArray::generate(|i: usize| i as i32 * 2);
|
||||
```
|
||||
|
||||
and `GenericArray` additionally implements `FromIterator`, although `from_iter` ***will*** panic if the number of elements is not *at least* `N`. It will ignore extra items.
|
||||
|
||||
The safety of these operations is described later.
|
||||
|
||||
# Functional Programming
|
||||
|
||||
In addition to `GenericSequence`, this crate provides a `FunctionalSequence`, which allows extremely efficient `map`, `zip` and `fold` operations on `GenericArray`s.
|
||||
|
||||
As described at the end of the [Optimization](#optimization) section, `FunctionalSequence` uses clever specialization tactics to provide optimized methods wherever possible, while remaining perfectly safe.
|
||||
|
||||
Some examples, taken from `tests/generic.rs`:
|
||||
|
||||
<details>
|
||||
<summary>Expand for code</summary>
|
||||
|
||||
This is so extensive to show how you can build up to processing totally arbitrary sequences, but for the most part these can be used on `GenericArray` instances without much added complexity.
|
||||
|
||||
```rust
|
||||
/// Super-simple fixed-length i32 `GenericArray`s
|
||||
pub fn generic_array_plain_zip_sum(a: GenericArray<i32, U4>, b: GenericArray<i32, U4>) -> i32 {
|
||||
a.zip(b, |l, r| l + r)
|
||||
.map(|x| x + 1)
|
||||
.fold(0, |a, x| x + a)
|
||||
}
|
||||
|
||||
pub fn generic_array_variable_length_zip_sum<N>(a: GenericArray<i32, N>, b: GenericArray<i32, N>) -> i32
|
||||
where
|
||||
N: ArrayLength<i32>,
|
||||
{
|
||||
a.zip(b, |l, r| l + r)
|
||||
.map(|x| x + 1)
|
||||
.fold(0, |a, x| x + a)
|
||||
}
|
||||
|
||||
pub fn generic_array_same_type_variable_length_zip_sum<T, N>(a: GenericArray<T, N>, b: GenericArray<T, N>) -> i32
|
||||
where
|
||||
N: ArrayLength<T> + ArrayLength<<T as Add<T>>::Output>,
|
||||
T: Add<T, Output=i32>,
|
||||
{
|
||||
a.zip(b, |l, r| l + r)
|
||||
.map(|x| x + 1)
|
||||
.fold(0, |a, x| x + a)
|
||||
}
|
||||
|
||||
/// Complex example using fully generic `GenericArray`s with the same length.
|
||||
///
|
||||
/// It's mostly just the repeated `Add` traits, which would be present in other systems anyway.
|
||||
pub fn generic_array_zip_sum<A, B, N: ArrayLength<A> + ArrayLength<B>>(a: GenericArray<A, N>, b: GenericArray<B, N>) -> i32
|
||||
where
|
||||
A: Add<B>,
|
||||
N: ArrayLength<<A as Add<B>>::Output> +
|
||||
ArrayLength<<<A as Add<B>>::Output as Add<i32>>::Output>,
|
||||
<A as Add<B>>::Output: Add<i32>,
|
||||
<<A as Add<B>>::Output as Add<i32>>::Output: Add<i32, Output=i32>,
|
||||
{
|
||||
a.zip(b, |l, r| l + r)
|
||||
.map(|x| x + 1)
|
||||
.fold(0, |a, x| x + a)
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
and if you really want to go off the deep end and support any arbitrary *`GenericSequence`*:
|
||||
|
||||
<details>
|
||||
<summary>Expand for code</summary>
|
||||
|
||||
```rust
|
||||
/// Complex example function using generics to pass N-length sequences, zip them, and then map that result.
|
||||
///
|
||||
/// If used with `GenericArray` specifically this isn't necessary
|
||||
pub fn generic_sequence_zip_sum<A, B>(a: A, b: B) -> i32
|
||||
where
|
||||
A: FunctionalSequence<i32>, // `.zip`
|
||||
B: FunctionalSequence<i32, Length = A::Length>, // `.zip`
|
||||
A: MappedGenericSequence<i32, i32>, // `i32` -> `i32`
|
||||
B: MappedGenericSequence<i32, i32, Mapped = MappedSequence<A, i32, i32>>, // `i32` -> `i32`, prove A and B can map to the same output
|
||||
A::Item: Add<B::Item, Output = i32>, // `l + r`
|
||||
MappedSequence<A, i32, i32>: MappedGenericSequence<i32, i32> + FunctionalSequence<i32>, // `.map`
|
||||
SequenceItem<MappedSequence<A, i32, i32>>: Add<i32, Output=i32>, // `x + 1`
|
||||
MappedSequence<MappedSequence<A, i32, i32>, i32, i32>: Debug, // `println!`
|
||||
MappedSequence<MappedSequence<A, i32, i32>, i32, i32>: FunctionalSequence<i32>, // `.fold`
|
||||
SequenceItem<MappedSequence<MappedSequence<A, i32, i32>, i32, i32>>: Add<i32, Output=i32> // `x + a`, note the order
|
||||
{
|
||||
let c = a.zip(b, |l, r| l + r).map(|x| x + 1);
|
||||
|
||||
println!("{:?}", c);
|
||||
|
||||
c.fold(0, |a, x| x + a)
|
||||
}
|
||||
```
|
||||
|
||||
of course, as I stated before, that's almost never necessary, especially when you know the concrete types of all the components.
|
||||
|
||||
</details>
|
||||
|
||||
The [`numeric-array`](https://crates.io/crates/numeric-array) crate uses these to apply numeric operations across all elements in a `GenericArray`, making full use of all the optimizations described in the last section here.
|
||||
|
||||
# Miscellaneous Utilities
|
||||
|
||||
Although not usually advertised, `generic-array` contains traits for lengthening, shortening, splitting and concatenating arrays.
|
||||
|
||||
For example, these snippets are taken from `tests/mod.rs`:
|
||||
|
||||
<details>
|
||||
<summary>Expand for code</summary>
|
||||
|
||||
Appending and prepending elements:
|
||||
|
||||
```rust
|
||||
use generic_array::sequence::Lengthen;
|
||||
|
||||
#[test]
|
||||
fn test_append() {
|
||||
let a = arr![i32; 1, 2, 3];
|
||||
|
||||
let b = a.append(4);
|
||||
|
||||
assert_eq!(b, arr![i32; 1, 2, 3, 4]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prepend() {
|
||||
let a = arr![i32; 1, 2, 3];
|
||||
|
||||
let b = a.prepend(4);
|
||||
|
||||
assert_eq!(b, arr![i32; 4, 1, 2, 3]);
|
||||
}
|
||||
```
|
||||
|
||||
Popping elements from the front of back of the array:
|
||||
|
||||
```rust
|
||||
use generic_array::sequence::Shorten;
|
||||
|
||||
let a = arr![i32; 1, 2, 3, 4];
|
||||
|
||||
let (init, last) = a.pop_back();
|
||||
|
||||
assert_eq!(init, arr![i32; 1, 2, 3]);
|
||||
assert_eq!(last, 4);
|
||||
|
||||
let (head, tail) = a.pop_front();
|
||||
|
||||
assert_eq!(head, 1);
|
||||
assert_eq!(tail, arr![i32; 2, 3, 4]);
|
||||
```
|
||||
|
||||
and of course concatenating and splitting:
|
||||
|
||||
```rust
|
||||
use generic_array::sequence::{Concat, Split};
|
||||
|
||||
let a = arr![i32; 1, 2];
|
||||
let b = arr![i32; 3, 4];
|
||||
|
||||
let c = a.concat(b);
|
||||
|
||||
assert_eq!(c, arr![i32; 1, 2, 3, 4]);
|
||||
|
||||
let (d, e) = c.split();
|
||||
|
||||
assert_eq!(d, arr![i32; 1]);
|
||||
assert_eq!(e, arr![i32; 2, 3, 4]);
|
||||
```
|
||||
</details>
|
||||
|
||||
`Split` and `Concat` in these examples use type-inference to determine the lengths of the resulting arrays.
|
||||
|
||||
# Safety
|
||||
|
||||
As stated earlier, for raw reinterpretations such as this, safety is a must even while working with unsafe code. Great care is taken to reduce or eliminate undefined behavior.
|
||||
|
||||
For most of the above code examples, the biggest potential undefined behavior hasn't even been applicable for one simple reason: they were all primitive values.
|
||||
|
||||
The simplest way to lead into this is to post these questions:
|
||||
|
||||
1. What if the element type of the array implements `Drop`?
|
||||
2. What if `GenericArray::generate` opens a bunch of files?
|
||||
3. What if halfway through opening each of the files, one is not found?
|
||||
4. What if the resulting error is unwrapped, causing the generation function to panic?
|
||||
|
||||
For a fully initialized `GenericArray`, the expanded structure as described in the [How It Works](#how-it-works) can implement `Drop` naturally, recursively dropping elements. As it is only `log2(N)` deep, the recursion is very small overall.
|
||||
|
||||
In fact, I tested it while writing this, the size of the array itself overflows the stack before any recursive calls to `drop` can.
|
||||
|
||||
However, ***partially*** initialized arrays, such as described in the above hypothetical, pose an issue where `drop` could be called on uninitialized data, which is undefined behavior.
|
||||
|
||||
To solve this, `GenericArray` implements two components named `ArrayBuilder` and `ArrayConsumer`, which work very similarly.
|
||||
|
||||
`ArrayBuilder` creates a block of wholly uninitialized memory via `mem::unintialized()`, and stores that in a `ManuallyDrop` wrapper. `ManuallyDrop` does exactly what it says on the tin, and simply doesn't drop the value unless manually requested to.
|
||||
|
||||
So, as we're initializing our array, `ArrayBuilder` keeps track of the current position through it, and if something happens, `ArrayBuilder` itself will iteratively and manually `drop` all currently initialized elements, ignoring any uninitialized ones, because those are just raw memory and should be ignored.
|
||||
|
||||
`ArrayConsumer` does almost the same, "moving" values out of the array and into something else, like user code. It uses `ptr::read` to "move" the value out, and increments a counter saying that value is no longer valid in the array.
|
||||
|
||||
If a panic occurs in the user code with that element, it's dropped naturally as it was moved into that scope. `ArrayConsumer` then proceeds to iteratively and manually `drop` all *remaining* elements.
|
||||
|
||||
Combined, these two systems provide a safe system for building and consuming `GenericArray`s. In fact, they are used extensively inside the library itself for `FromIterator`, `GenericSequence` and `FunctionalSequence`, among others.
|
||||
|
||||
Even `GenericArray`s implementation of `Clone` makes use of this via:
|
||||
|
||||
```rust
|
||||
impl<T: Clone, N> Clone for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn clone(&self) -> GenericArray<T, N> {
|
||||
self.map(|x| x.clone())
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
where `.map` is from the `FunctionalSequence`, and uses those builder and consumer structures to safely move and initialize values. Although, in this particular case, a consumer is not necessary as we're using references. More on how that is automatically deduced is described in the next section.
|
||||
|
||||
# Optimization
|
||||
|
||||
Rust and LLVM is smart. Crazy smart. However, it's not magic.
|
||||
|
||||
In my experience, most of Rust's "zero-cost" abstractions stem more from the type system, rather than explicit optimizations. Most Rust code is very easily optimizable and inlinable by design, so it can be simplified and compacted rather well, as opposed to the spaghetti code of some other languages.
|
||||
|
||||
Unfortunately, unless `rustc` or LLVM can "prove" things about code to simplify it, it must still be run, and can prevent further optimization.
|
||||
|
||||
A great example of this, and why I created the `GenericSequence` and `FunctionalSequence` traits, are iterators.
|
||||
|
||||
Custom iterators are slow. Not terribly slow, but slow enough to prevent some rather important optimizations.
|
||||
|
||||
Take `GenericArrayIter` for example:
|
||||
|
||||
<details>
|
||||
<summary>Expand for code</summary>
|
||||
|
||||
```rust
|
||||
pub struct GenericArrayIter<T, N: ArrayLength<T>> {
|
||||
array: ManuallyDrop<GenericArray<T, N>>,
|
||||
index: usize,
|
||||
index_back: usize,
|
||||
}
|
||||
|
||||
impl<T, N> Iterator for GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
type Item = T;
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<T> {
|
||||
if self.index < self.index_back {
|
||||
let p = unsafe {
|
||||
Some(ptr::read(self.array.get_unchecked(self.index)))
|
||||
};
|
||||
|
||||
self.index += 1;
|
||||
|
||||
p
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
//and more
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
Seems simple enough, right? Move an element out of the array with `ptr::read` and increment the index. If the iterator is dropped, the remaining elements are dropped exactly as they would with `ArrayConsumer`. `index_back` is provided for `DoubleEndedIterator`.
|
||||
|
||||
Unfortunately, that single `if` statement is terrible. In my mind, this is one of the biggest flaws of the iterator design. A conditional jump on a mutable variable unrelated to the data we are accessing on each call foils the optimizer and generates suboptimal code for the above iterator, even when we use `get_unchecked`.
|
||||
|
||||
The optimizer is unable to see that we are simply accessing memory sequentially. In fact, almost all iterators are like this. Granted, this is usually fine and, especially if they have to handle errors, it's perfectly acceptable.
|
||||
|
||||
However, there is one iterator in the standard library that is optimized perfectly: the slice iterator. So perfectly in fact that it allows the optimizer to do something even more special: **auto-vectorization**! We'll get to that later.
|
||||
|
||||
It's a bit frustrating as to *why* slice iterators can be so perfectly optimized, and it basically boils down to that the iterator itself does not own the data the slice refers to, so it uses raw pointers to the array/sequence/etc. rather than having to use an index on a stack allocated and always moving array. It can check for if the iterator is empty by comparing some `front` and `back` pointers for equality, and because those directly correspond to the position in memory of the next element, LLVM can see that and make optimizations.
|
||||
|
||||
So, the gist of that is: always use slice iterators where possible.
|
||||
|
||||
Here comes the most important part of all of this: `ArrayBuilder` and `ArrayConsumer` don't iterate the arrays themselves. Instead, we use slice iterators (immutable and mutable), with `zip` or `enumerate`, to apply operations to the entire array, incrementing the position in both `ArrayBuilder` or `ArrayConsumer` to keep track.
|
||||
|
||||
For example, `GenericSequence::generate` for `GenericArray` is:
|
||||
|
||||
<details>
|
||||
<summary>Expand for code</summary>
|
||||
|
||||
```rust
|
||||
fn generate<F>(mut f: F) -> GenericArray<T, N>
|
||||
where
|
||||
F: FnMut(usize) -> T,
|
||||
{
|
||||
unsafe {
|
||||
let mut destination = ArrayBuilder::new();
|
||||
|
||||
{
|
||||
let (destination_iter, position) = destination.iter_position();
|
||||
|
||||
for (i, dst) in destination_iter.enumerate() {
|
||||
ptr::write(dst, f(i));
|
||||
|
||||
*position += 1;
|
||||
}
|
||||
}
|
||||
|
||||
destination.into_inner()
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
where `ArrayBuilder::iter_position` is just an internal convenience function:
|
||||
|
||||
```rust
|
||||
pub unsafe fn iter_position(&mut self) -> (slice::IterMut<T>, &mut usize) {
|
||||
(self.array.iter_mut(), &mut self.position)
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
Of course, this may appear to be redundant, if we're using an iterator that keeps track of the position itself, and the builder is also keeping track of the position. However, the two are decoupled.
|
||||
|
||||
If the generation function doesn't have a chance at panicking, and/or the array element type doesn't implement `Drop`, the optimizer deems the `Drop` implementation on `ArrayBuilder` (and `ArrayConsumer`) dead code, and therefore `position` is never actually read from, so it becomes dead code as well, and is removed.
|
||||
|
||||
So for simple non-`Drop`/non-panicking elements and generation functions, `generate` becomes a very simple loop that uses a slice iterator to write values to the array.
|
||||
|
||||
Next, let's take a look at a more complex example where this *really* shines: `.zip`
|
||||
|
||||
To cut down on excessively verbose code, `.zip` uses `FromIterator` for building the array, which has almost identical code to `generate`, so it will be omitted.
|
||||
|
||||
The first implementation of `.zip` is defined as:
|
||||
|
||||
<details>
|
||||
<summary>Expand for code</summary>
|
||||
|
||||
```rust
|
||||
fn inverted_zip<B, U, F>(
|
||||
self,
|
||||
lhs: GenericArray<B, Self::Length>,
|
||||
mut f: F,
|
||||
) -> MappedSequence<GenericArray<B, Self::Length>, B, U>
|
||||
where
|
||||
GenericArray<B, Self::Length>:
|
||||
GenericSequence<B, Length = Self::Length> + MappedGenericSequence<B, U>,
|
||||
Self: MappedGenericSequence<T, U>,
|
||||
Self::Length: ArrayLength<B> + ArrayLength<U>,
|
||||
F: FnMut(B, Self::Item) -> U,
|
||||
{
|
||||
unsafe {
|
||||
let mut left = ArrayConsumer::new(lhs);
|
||||
let mut right = ArrayConsumer::new(self);
|
||||
|
||||
let (left_array_iter, left_position) = left.iter_position();
|
||||
let (right_array_iter, right_position) = right.iter_position();
|
||||
|
||||
FromIterator::from_iter(left_array_iter.zip(right_array_iter).map(|(l, r)| {
|
||||
let left_value = ptr::read(l);
|
||||
let right_value = ptr::read(r);
|
||||
|
||||
*left_position += 1;
|
||||
*right_position += 1;
|
||||
|
||||
f(left_value, right_value)
|
||||
}))
|
||||
}
|
||||
}
|
||||
```
|
||||
</details>
|
||||
|
||||
The gist of this is that we have two `GenericArray` instances that need to be zipped together and mapped to a new sequence. This employs two `ArrayConsumer`s, and more or less use the same pattern as the previous example.
|
||||
|
||||
Again, the position values can be optimized out, and so can the slice iterator adapters.
|
||||
|
||||
We can go a step further with this, however.
|
||||
|
||||
Consider this:
|
||||
|
||||
```rust
|
||||
let a = arr![i32; 1, 3, 5, 7];
|
||||
let b = arr![i32; 2, 4, 6, 8];
|
||||
|
||||
let c = a.zip(b, |l, r| l + r);
|
||||
|
||||
assert_eq!(c, arr![i32; 3, 7, 11, 15]);
|
||||
```
|
||||
|
||||
when compiled with:
|
||||
|
||||
```
|
||||
cargo rustc --lib --profile test --release -- -C target-cpu=native -C opt-level=3 --emit asm
|
||||
```
|
||||
|
||||
will produce assembly with the following relevant instructions taken from the entire program:
|
||||
|
||||
```asm
|
||||
; Copy constant to register
|
||||
vmovaps __xmm@00000007000000050000000300000001(%rip), %xmm0
|
||||
|
||||
; Copy constant to register
|
||||
vmovaps __xmm@00000008000000060000000400000002(%rip), %xmm0
|
||||
|
||||
; Add the two values together
|
||||
vpaddd 192(%rsp), %xmm0, %xmm1
|
||||
|
||||
; Copy constant to register
|
||||
vmovaps __xmm@0000000f0000000b0000000700000003(%rip), %xmm0
|
||||
|
||||
; Compare result of the addition with the last constant
|
||||
vpcmpeqb 128(%rsp), %xmm0, %xmm0
|
||||
```
|
||||
|
||||
so, aside from a bunch of obvious hygiene instructions around those selected instructions,
|
||||
it seriously boils down that `.zip` call to a ***SINGLE*** SIMD instruction. In fact, it continues to do this for even larger arrays. Although it does fall back to individual additions for fewer than four elements, as it can't fit those into an SSE register evenly.
|
||||
|
||||
Using this property of auto-vectorization without sacrificing safety, I created the [`numeric-array`](https://crates.io/crates/numeric-array) crate which makes use of this to wrap `GenericArray` and implement numeric traits so that almost *all* operations can be auto-vectorized, even complex ones like fused multiple-add.
|
||||
|
||||
It doesn't end there, though. You may have noticed that the function name for zip above wasn't `zip`, but `inverted_zip`.
|
||||
|
||||
This is because `generic-array` employs a clever specialization tactic to ensure `.zip` works corrects with:
|
||||
|
||||
1. `a.zip(b, ...)`
|
||||
2. `(&a).zip(b, ...)`
|
||||
3. `(&a).zip(&b, ...)`
|
||||
4. `a.zip(&b, ...)`
|
||||
|
||||
wherein `GenericSequence` and `FunctionalSequence` have default implementations of `zip` variants, with concrete implementations for `GenericArray`. As `GenericSequence` is implemented for `&GenericArray`, where calling `into_iter` on produces a slice iterator, it can use "naive" iterator adapters to the same effect, while the specialized implementations use `ArrayConsumer`.
|
||||
|
||||
The result is that any combination of move or reference calls to `.zip`, `.map` and `.fold` produce code that can be optimized, none of them falling back to slow non-slice iterators. All perfectly safe with the `ArrayBuilder` and `ArrayConsumer` systems.
|
||||
|
||||
Honestly, `GenericArray` is better than standard arrays at this point.
|
||||
|
||||
# The Future
|
||||
|
||||
If/when const generics land in stable Rust, my intention is to reorient this crate or create a new crate to provide traits and wrappers for standard arrays to provide the same safety and performance discussed above.
|
|
@ -1,21 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Bartłomiej Kamiński
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Bartłomiej Kamiński
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -1,34 +1,62 @@
|
|||
[![Crates.io](https://img.shields.io/crates/v/generic-array.svg)](https://crates.io/crates/generic-array)
|
||||
[![Build Status](https://travis-ci.org/fizyk20/generic-array.svg?branch=master)](https://travis-ci.org/fizyk20/generic-array)
|
||||
# generic-array
|
||||
|
||||
This crate implements generic array types for Rust.
|
||||
|
||||
[Documentation](http://fizyk20.github.io/generic-array/generic_array/)
|
||||
|
||||
## Usage
|
||||
|
||||
The Rust arrays `[T; N]` are problematic in that they can't be used generically with respect to `N`, so for example this won't work:
|
||||
|
||||
```rust
|
||||
struct Foo<N> {
|
||||
data: [i32; N]
|
||||
}
|
||||
```
|
||||
|
||||
**generic-array** defines a new trait `ArrayLength<T>` and a struct `GenericArray<T, N: ArrayLength<T>>`, which let the above be implemented as:
|
||||
|
||||
```rust
|
||||
struct Foo<N: ArrayLength<i32>> {
|
||||
data: GenericArray<i32, N>
|
||||
}
|
||||
```
|
||||
|
||||
To actually define a type implementing `ArrayLength`, you can use unsigned integer types defined in [typenum](https://github.com/paholg/typenum) crate - for example, `GenericArray<T, U5>` would work almost like `[T; 5]` :)
|
||||
|
||||
In version 0.1.1 an `arr!` macro was introduced, allowing for creation of arrays as shown below:
|
||||
|
||||
```rust
|
||||
let array = arr![u32; 1, 2, 3];
|
||||
assert_eq!(array[2], 3);
|
||||
```
|
||||
[![Crates.io](https://img.shields.io/crates/v/generic-array.svg)](https://crates.io/crates/generic-array)
|
||||
[![Build Status](https://travis-ci.org/fizyk20/generic-array.svg?branch=master)](https://travis-ci.org/fizyk20/generic-array)
|
||||
# generic-array
|
||||
|
||||
This crate implements generic array types for Rust.
|
||||
|
||||
**Requires minumum Rust version of 1.36.0, or 1.41.0 for `From<[T; N]>` implementations**
|
||||
|
||||
[Documentation](http://fizyk20.github.io/generic-array/generic_array/)
|
||||
|
||||
## Usage
|
||||
|
||||
The Rust arrays `[T; N]` are problematic in that they can't be used generically with respect to `N`, so for example this won't work:
|
||||
|
||||
```rust
|
||||
struct Foo<N> {
|
||||
data: [i32; N]
|
||||
}
|
||||
```
|
||||
|
||||
**generic-array** defines a new trait `ArrayLength<T>` and a struct `GenericArray<T, N: ArrayLength<T>>`, which let the above be implemented as:
|
||||
|
||||
```rust
|
||||
struct Foo<N: ArrayLength<i32>> {
|
||||
data: GenericArray<i32, N>
|
||||
}
|
||||
```
|
||||
|
||||
The `ArrayLength<T>` trait is implemented by default for [unsigned integer types](http://fizyk20.github.io/generic-array/typenum/uint/index.html) from [typenum](http://fizyk20.github.io/generic-array/typenum/index.html) crate:
|
||||
|
||||
```rust
|
||||
use generic_array::typenum::U5;
|
||||
|
||||
struct Foo<N: ArrayLength<i32>> {
|
||||
data: GenericArray<i32, N>
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let foo = Foo::<U5>{data: GenericArray::default()};
|
||||
}
|
||||
```
|
||||
|
||||
For example, `GenericArray<T, U5>` would work almost like `[T; 5]`:
|
||||
|
||||
```rust
|
||||
use generic_array::typenum::U5;
|
||||
|
||||
struct Foo<T, N: ArrayLength<T>> {
|
||||
data: GenericArray<T, N>
|
||||
}
|
||||
|
||||
fn main() {
|
||||
let foo = Foo::<i32, U5>{data: GenericArray::default()};
|
||||
}
|
||||
```
|
||||
|
||||
In version 0.1.1 an `arr!` macro was introduced, allowing for creation of arrays as shown below:
|
||||
|
||||
```rust
|
||||
let array = arr![u32; 1, 2, 3];
|
||||
assert_eq!(array[2], 3);
|
||||
```
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
fn main() {
|
||||
if version_check::is_min_version("1.41.0").unwrap_or(false) {
|
||||
println!("cargo:rustc-cfg=relaxed_coherence");
|
||||
}
|
||||
}
|
|
@ -1,3 +1,3 @@
|
|||
reorder_imports = true
|
||||
reorder_imported_names = true
|
||||
use_try_shorthand = true
|
||||
reorder_imports = true
|
||||
reorder_imported_names = true
|
||||
use_try_shorthand = true
|
||||
|
|
|
@ -1,126 +1,125 @@
|
|||
//! Implementation for `arr!` macro.
|
||||
|
||||
use super::ArrayLength;
|
||||
use core::ops::Add;
|
||||
use typenum::U1;
|
||||
|
||||
/// Helper trait for `arr!` macro
|
||||
pub trait AddLength<T, N: ArrayLength<T>>: ArrayLength<T> {
|
||||
/// Resulting length
|
||||
type Output: ArrayLength<T>;
|
||||
}
|
||||
|
||||
impl<T, N1, N2> AddLength<T, N2> for N1
|
||||
where
|
||||
N1: ArrayLength<T> + Add<N2>,
|
||||
N2: ArrayLength<T>,
|
||||
<N1 as Add<N2>>::Output: ArrayLength<T>,
|
||||
{
|
||||
type Output = <N1 as Add<N2>>::Output;
|
||||
}
|
||||
|
||||
/// Helper type for `arr!` macro
|
||||
pub type Inc<T, U> = <U as AddLength<T, U1>>::Output;
|
||||
|
||||
#[doc(hidden)]
|
||||
#[macro_export]
|
||||
macro_rules! arr_impl {
|
||||
(@replace_expr $e:expr)=>{
|
||||
1
|
||||
};
|
||||
($T:ty; $N:ty, [$($x:expr),*], []) => ({
|
||||
const __ARR_LENGTH:usize=0 $(+ $crate::arr_impl!(@replace_expr $x) )*;
|
||||
fn __do_transmute<'a, T, N: $crate::ArrayLength<T>>(arr: [T; __ARR_LENGTH]) -> $crate::GenericArray<T, N> {
|
||||
unsafe { $crate::transmute(arr) }
|
||||
}
|
||||
|
||||
let _:[();<$N as $crate::typenum::Unsigned>::USIZE]=[();__ARR_LENGTH];
|
||||
|
||||
__do_transmute::<$T,$N>([$($x),*])
|
||||
});
|
||||
($T:ty; $N:ty, [], [$x1:expr]) => (
|
||||
$crate::arr_impl!($T; $crate::arr::Inc<$T, $N>, [$x1], [])
|
||||
);
|
||||
($T:ty; $N:ty, [], [$x1:expr, $($x:expr),+]) => (
|
||||
$crate::arr_impl!($T; $crate::arr::Inc<$T, $N>, [$x1], [$($x),+])
|
||||
);
|
||||
($T:ty; $N:ty, [$($y:expr),+], [$x1:expr]) => (
|
||||
$crate::arr_impl!($T; $crate::arr::Inc<$T, $N>, [$($y),+, $x1], [])
|
||||
);
|
||||
($T:ty; $N:ty, [$($y:expr),+], [$x1:expr, $($x:expr),+]) => (
|
||||
$crate::arr_impl!($T; $crate::arr::Inc<$T, $N>, [$($y),+, $x1], [$($x),+])
|
||||
);
|
||||
}
|
||||
|
||||
/// Macro allowing for easy generation of Generic Arrays.
|
||||
/// Example: `let test = arr![u32; 1, 2, 3];`
|
||||
#[macro_export]
|
||||
macro_rules! arr {
|
||||
($T:ty; $(,)*) => ({
|
||||
unsafe { $crate::transmute::<[$T; 0], $crate::GenericArray<$T, $crate::typenum::U0>>([]) }
|
||||
});
|
||||
($T:ty; $($x:expr),* $(,)*) => (
|
||||
arr_impl!($T; $crate::typenum::U0, [], [$($x),*])
|
||||
);
|
||||
($($x:expr,)+) => (arr![$($x),*]);
|
||||
() => ("""Macro requires a type, e.g. `let array = arr![u32; 1, 2, 3];`")
|
||||
}
|
||||
|
||||
|
||||
mod doctests_only{
|
||||
///
|
||||
/// # With ellision
|
||||
///
|
||||
/// Testing that lifetimes aren't transmuted when they're ellided.
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// #[macro_use] extern crate generic_array;
|
||||
/// fn main() {
|
||||
/// fn unsound_lifetime_extension<'a, A>(a: &'a A) -> &'static A {
|
||||
/// arr![&A; a][0]
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use] extern crate generic_array;
|
||||
/// fn main() {
|
||||
/// fn unsound_lifetime_extension<'a, A>(a: &'a A) -> &'a A {
|
||||
/// arr![&A; a][0]
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// # Without ellision
|
||||
///
|
||||
/// Testing that lifetimes aren't transmuted when they're specified explicitly.
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// #[macro_use] extern crate generic_array;
|
||||
/// fn main() {
|
||||
/// fn unsound_lifetime_extension<'a, A>(a: &'a A) -> &'static A {
|
||||
/// arr![&'a A; a][0]
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// #[macro_use] extern crate generic_array;
|
||||
/// fn main() {
|
||||
/// fn unsound_lifetime_extension<'a, A>(a: &'a A) -> &'static A {
|
||||
/// arr![&'static A; a][0]
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use] extern crate generic_array;
|
||||
/// fn main() {
|
||||
/// fn unsound_lifetime_extension<'a, A>(a: &'a A) -> &'a A {
|
||||
/// arr![&'a A; a][0]
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[allow(dead_code)]
|
||||
pub enum DocTests{}
|
||||
}
|
||||
//! Implementation for `arr!` macro.
|
||||
|
||||
use super::ArrayLength;
|
||||
use core::ops::Add;
|
||||
use typenum::U1;
|
||||
|
||||
/// Helper trait for `arr!` macro
|
||||
pub trait AddLength<T, N: ArrayLength<T>>: ArrayLength<T> {
|
||||
/// Resulting length
|
||||
type Output: ArrayLength<T>;
|
||||
}
|
||||
|
||||
impl<T, N1, N2> AddLength<T, N2> for N1
|
||||
where
|
||||
N1: ArrayLength<T> + Add<N2>,
|
||||
N2: ArrayLength<T>,
|
||||
<N1 as Add<N2>>::Output: ArrayLength<T>,
|
||||
{
|
||||
type Output = <N1 as Add<N2>>::Output;
|
||||
}
|
||||
|
||||
/// Helper type for `arr!` macro
|
||||
pub type Inc<T, U> = <U as AddLength<T, U1>>::Output;
|
||||
|
||||
#[doc(hidden)]
|
||||
#[macro_export]
|
||||
macro_rules! arr_impl {
|
||||
(@replace_expr $e:expr) => { 1 };
|
||||
($T:ty; $N:ty, [$($x:expr),*], []) => ({
|
||||
const __ARR_LENGTH: usize = 0 $(+ $crate::arr_impl!(@replace_expr $x) )*;
|
||||
|
||||
#[inline(always)]
|
||||
fn __do_transmute<T, N: $crate::ArrayLength<T>>(arr: [T; __ARR_LENGTH]) -> $crate::GenericArray<T, N> {
|
||||
unsafe { $crate::transmute(arr) }
|
||||
}
|
||||
|
||||
let _: [(); <$N as $crate::typenum::Unsigned>::USIZE] = [(); __ARR_LENGTH];
|
||||
|
||||
__do_transmute::<$T, $N>([$($x as $T),*])
|
||||
});
|
||||
($T:ty; $N:ty, [], [$x1:expr]) => (
|
||||
$crate::arr_impl!($T; $crate::arr::Inc<$T, $N>, [$x1], [])
|
||||
);
|
||||
($T:ty; $N:ty, [], [$x1:expr, $($x:expr),+]) => (
|
||||
$crate::arr_impl!($T; $crate::arr::Inc<$T, $N>, [$x1], [$($x),+])
|
||||
);
|
||||
($T:ty; $N:ty, [$($y:expr),+], [$x1:expr]) => (
|
||||
$crate::arr_impl!($T; $crate::arr::Inc<$T, $N>, [$($y),+, $x1], [])
|
||||
);
|
||||
($T:ty; $N:ty, [$($y:expr),+], [$x1:expr, $($x:expr),+]) => (
|
||||
$crate::arr_impl!($T; $crate::arr::Inc<$T, $N>, [$($y),+, $x1], [$($x),+])
|
||||
);
|
||||
}
|
||||
|
||||
/// Macro allowing for easy generation of Generic Arrays.
|
||||
/// Example: `let test = arr![u32; 1, 2, 3];`
|
||||
#[macro_export]
|
||||
macro_rules! arr {
|
||||
($T:ty; $(,)*) => ({
|
||||
unsafe { $crate::transmute::<[$T; 0], $crate::GenericArray<$T, $crate::typenum::U0>>([]) }
|
||||
});
|
||||
($T:ty; $($x:expr),* $(,)*) => (
|
||||
$crate::arr_impl!($T; $crate::typenum::U0, [], [$($x),*])
|
||||
);
|
||||
($($x:expr,)+) => (arr![$($x),+]);
|
||||
() => ("""Macro requires a type, e.g. `let array = arr![u32; 1, 2, 3];`")
|
||||
}
|
||||
|
||||
mod doctests_only {
|
||||
///
|
||||
/// # With ellision
|
||||
///
|
||||
/// Testing that lifetimes aren't transmuted when they're ellided.
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// #[macro_use] extern crate generic_array;
|
||||
/// fn main() {
|
||||
/// fn unsound_lifetime_extension<'a, A>(a: &'a A) -> &'static A {
|
||||
/// arr![&A; a][0]
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use] extern crate generic_array;
|
||||
/// fn main() {
|
||||
/// fn unsound_lifetime_extension<'a, A>(a: &'a A) -> &'a A {
|
||||
/// arr![&A; a][0]
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// # Without ellision
|
||||
///
|
||||
/// Testing that lifetimes aren't transmuted when they're specified explicitly.
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// #[macro_use] extern crate generic_array;
|
||||
/// fn main() {
|
||||
/// fn unsound_lifetime_extension<'a, A>(a: &'a A) -> &'static A {
|
||||
/// arr![&'a A; a][0]
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// ```compile_fail
|
||||
/// #[macro_use] extern crate generic_array;
|
||||
/// fn main() {
|
||||
/// fn unsound_lifetime_extension<'a, A>(a: &'a A) -> &'static A {
|
||||
/// arr![&'static A; a][0]
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// ```rust
|
||||
/// #[macro_use] extern crate generic_array;
|
||||
/// fn main() {
|
||||
/// fn unsound_lifetime_extension<'a, A>(a: &'a A) -> &'a A {
|
||||
/// arr![&'a A; a][0]
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[allow(dead_code)]
|
||||
pub enum DocTests {}
|
||||
}
|
||||
|
|
|
@ -1,94 +1,95 @@
|
|||
//! Functional programming with generic sequences
|
||||
//!
|
||||
//! Please see `tests/generics.rs` for examples of how to best use these in your generic functions.
|
||||
|
||||
use super::ArrayLength;
|
||||
use core::iter::FromIterator;
|
||||
use sequence::*;
|
||||
|
||||
/// Defines the relationship between one generic sequence and another,
|
||||
/// for operations such as `map` and `zip`.
|
||||
pub unsafe trait MappedGenericSequence<T, U>: GenericSequence<T>
|
||||
where
|
||||
Self::Length: ArrayLength<U>,
|
||||
{
|
||||
/// Mapped sequence type
|
||||
type Mapped: GenericSequence<U, Length = Self::Length>;
|
||||
}
|
||||
|
||||
unsafe impl<'a, T, U, S: MappedGenericSequence<T, U>> MappedGenericSequence<T, U> for &'a S
|
||||
where
|
||||
&'a S: GenericSequence<T>,
|
||||
S: GenericSequence<T, Length = <&'a S as GenericSequence<T>>::Length>,
|
||||
<S as GenericSequence<T>>::Length: ArrayLength<U>,
|
||||
{
|
||||
type Mapped = <S as MappedGenericSequence<T, U>>::Mapped;
|
||||
}
|
||||
|
||||
unsafe impl<'a, T, U, S: MappedGenericSequence<T, U>> MappedGenericSequence<T, U> for &'a mut S
|
||||
where
|
||||
&'a mut S: GenericSequence<T>,
|
||||
S: GenericSequence<T, Length = <&'a mut S as GenericSequence<T>>::Length>,
|
||||
<S as GenericSequence<T>>::Length: ArrayLength<U>,
|
||||
{
|
||||
type Mapped = <S as MappedGenericSequence<T, U>>::Mapped;
|
||||
}
|
||||
|
||||
/// Accessor type for a mapped generic sequence
|
||||
pub type MappedSequence<S, T, U> =
|
||||
<<S as MappedGenericSequence<T, U>>::Mapped as GenericSequence<U>>::Sequence;
|
||||
|
||||
/// Defines functional programming methods for generic sequences
|
||||
pub unsafe trait FunctionalSequence<T>: GenericSequence<T> {
|
||||
/// Maps a `GenericSequence` to another `GenericSequence`.
|
||||
///
|
||||
/// If the mapping function panics, any already initialized elements in the new sequence
|
||||
/// will be dropped, AND any unused elements in the source sequence will also be dropped.
|
||||
fn map<U, F>(self, f: F) -> MappedSequence<Self, T, U>
|
||||
where
|
||||
Self: MappedGenericSequence<T, U>,
|
||||
Self::Length: ArrayLength<U>,
|
||||
F: FnMut(Self::Item) -> U,
|
||||
{
|
||||
FromIterator::from_iter(self.into_iter().map(f))
|
||||
}
|
||||
|
||||
/// Combines two `GenericSequence` instances and iterates through both of them,
|
||||
/// initializing a new `GenericSequence` with the result of the zipped mapping function.
|
||||
///
|
||||
/// If the mapping function panics, any already initialized elements in the new sequence
|
||||
/// will be dropped, AND any unused elements in the source sequences will also be dropped.
|
||||
#[inline]
|
||||
fn zip<B, Rhs, U, F>(self, rhs: Rhs, f: F) -> MappedSequence<Self, T, U>
|
||||
where
|
||||
Self: MappedGenericSequence<T, U>,
|
||||
Rhs: MappedGenericSequence<B, U, Mapped = MappedSequence<Self, T, U>>,
|
||||
Self::Length: ArrayLength<B> + ArrayLength<U>,
|
||||
Rhs: GenericSequence<B, Length = Self::Length>,
|
||||
F: FnMut(Self::Item, Rhs::Item) -> U,
|
||||
{
|
||||
rhs.inverted_zip2(self, f)
|
||||
}
|
||||
|
||||
/// Folds (or reduces) a sequence of data into a single value.
|
||||
///
|
||||
/// If the fold function panics, any unused elements will be dropped.
|
||||
fn fold<U, F>(self, init: U, f: F) -> U
|
||||
where
|
||||
F: FnMut(U, Self::Item) -> U,
|
||||
{
|
||||
self.into_iter().fold(init, f)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<'a, T, S: GenericSequence<T>> FunctionalSequence<T> for &'a S
|
||||
where
|
||||
&'a S: GenericSequence<T>,
|
||||
{
|
||||
}
|
||||
|
||||
unsafe impl<'a, T, S: GenericSequence<T>> FunctionalSequence<T> for &'a mut S
|
||||
where
|
||||
&'a mut S: GenericSequence<T>,
|
||||
{
|
||||
}
|
||||
//! Functional programming with generic sequences
|
||||
//!
|
||||
//! Please see `tests/generics.rs` for examples of how to best use these in your generic functions.
|
||||
|
||||
use super::ArrayLength;
|
||||
use core::iter::FromIterator;
|
||||
|
||||
use crate::sequence::*;
|
||||
|
||||
/// Defines the relationship between one generic sequence and another,
|
||||
/// for operations such as `map` and `zip`.
|
||||
pub unsafe trait MappedGenericSequence<T, U>: GenericSequence<T>
|
||||
where
|
||||
Self::Length: ArrayLength<U>,
|
||||
{
|
||||
/// Mapped sequence type
|
||||
type Mapped: GenericSequence<U, Length = Self::Length>;
|
||||
}
|
||||
|
||||
unsafe impl<'a, T, U, S: MappedGenericSequence<T, U>> MappedGenericSequence<T, U> for &'a S
|
||||
where
|
||||
&'a S: GenericSequence<T>,
|
||||
S: GenericSequence<T, Length = <&'a S as GenericSequence<T>>::Length>,
|
||||
<S as GenericSequence<T>>::Length: ArrayLength<U>,
|
||||
{
|
||||
type Mapped = <S as MappedGenericSequence<T, U>>::Mapped;
|
||||
}
|
||||
|
||||
unsafe impl<'a, T, U, S: MappedGenericSequence<T, U>> MappedGenericSequence<T, U> for &'a mut S
|
||||
where
|
||||
&'a mut S: GenericSequence<T>,
|
||||
S: GenericSequence<T, Length = <&'a mut S as GenericSequence<T>>::Length>,
|
||||
<S as GenericSequence<T>>::Length: ArrayLength<U>,
|
||||
{
|
||||
type Mapped = <S as MappedGenericSequence<T, U>>::Mapped;
|
||||
}
|
||||
|
||||
/// Accessor type for a mapped generic sequence
|
||||
pub type MappedSequence<S, T, U> =
|
||||
<<S as MappedGenericSequence<T, U>>::Mapped as GenericSequence<U>>::Sequence;
|
||||
|
||||
/// Defines functional programming methods for generic sequences
|
||||
pub unsafe trait FunctionalSequence<T>: GenericSequence<T> {
|
||||
/// Maps a `GenericSequence` to another `GenericSequence`.
|
||||
///
|
||||
/// If the mapping function panics, any already initialized elements in the new sequence
|
||||
/// will be dropped, AND any unused elements in the source sequence will also be dropped.
|
||||
fn map<U, F>(self, f: F) -> MappedSequence<Self, T, U>
|
||||
where
|
||||
Self: MappedGenericSequence<T, U>,
|
||||
Self::Length: ArrayLength<U>,
|
||||
F: FnMut(Self::Item) -> U,
|
||||
{
|
||||
FromIterator::from_iter(self.into_iter().map(f))
|
||||
}
|
||||
|
||||
/// Combines two `GenericSequence` instances and iterates through both of them,
|
||||
/// initializing a new `GenericSequence` with the result of the zipped mapping function.
|
||||
///
|
||||
/// If the mapping function panics, any already initialized elements in the new sequence
|
||||
/// will be dropped, AND any unused elements in the source sequences will also be dropped.
|
||||
#[inline]
|
||||
fn zip<B, Rhs, U, F>(self, rhs: Rhs, f: F) -> MappedSequence<Self, T, U>
|
||||
where
|
||||
Self: MappedGenericSequence<T, U>,
|
||||
Rhs: MappedGenericSequence<B, U, Mapped = MappedSequence<Self, T, U>>,
|
||||
Self::Length: ArrayLength<B> + ArrayLength<U>,
|
||||
Rhs: GenericSequence<B, Length = Self::Length>,
|
||||
F: FnMut(Self::Item, Rhs::Item) -> U,
|
||||
{
|
||||
rhs.inverted_zip2(self, f)
|
||||
}
|
||||
|
||||
/// Folds (or reduces) a sequence of data into a single value.
|
||||
///
|
||||
/// If the fold function panics, any unused elements will be dropped.
|
||||
fn fold<U, F>(self, init: U, f: F) -> U
|
||||
where
|
||||
F: FnMut(U, Self::Item) -> U,
|
||||
{
|
||||
self.into_iter().fold(init, f)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<'a, T, S: GenericSequence<T>> FunctionalSequence<T> for &'a S
|
||||
where
|
||||
&'a S: GenericSequence<T>,
|
||||
{
|
||||
}
|
||||
|
||||
unsafe impl<'a, T, S: GenericSequence<T>> FunctionalSequence<T> for &'a mut S
|
||||
where
|
||||
&'a mut S: GenericSequence<T>,
|
||||
{
|
||||
}
|
||||
|
|
|
@ -1,102 +1,105 @@
|
|||
//! Generic array are commonly used as a return value for hash digests, so
|
||||
//! it's a good idea to allow to hexlify them easily. This module implements
|
||||
//! `std::fmt::LowerHex` and `std::fmt::UpperHex` traits.
|
||||
//!
|
||||
//! Example:
|
||||
//!
|
||||
//! ```rust
|
||||
//! # #[macro_use]
|
||||
//! # extern crate generic_array;
|
||||
//! # extern crate typenum;
|
||||
//! # fn main() {
|
||||
//! let array = arr![u8; 10, 20, 30];
|
||||
//! assert_eq!(format!("{:x}", array), "0a141e");
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
|
||||
use {ArrayLength, GenericArray};
|
||||
use core::cmp::min;
|
||||
use core::fmt;
|
||||
use core::ops::Add;
|
||||
use core::str;
|
||||
use typenum::*;
|
||||
|
||||
static LOWER_CHARS: &'static [u8] = b"0123456789abcdef";
|
||||
static UPPER_CHARS: &'static [u8] = b"0123456789ABCDEF";
|
||||
|
||||
impl<T: ArrayLength<u8>> fmt::LowerHex for GenericArray<u8, T>
|
||||
where
|
||||
T: Add<T>,
|
||||
<T as Add<T>>::Output: ArrayLength<u8>,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let max_digits = f.precision().unwrap_or_else(|| self.len() * 2);
|
||||
let max_hex = (max_digits >> 1) + (max_digits & 1);
|
||||
|
||||
if T::to_usize() < 1024 {
|
||||
// For small arrays use a stack allocated
|
||||
// buffer of 2x number of bytes
|
||||
let mut res = GenericArray::<u8, Sum<T, T>>::default();
|
||||
|
||||
for (i, c) in self.iter().take(max_hex).enumerate() {
|
||||
res[i * 2] = LOWER_CHARS[(c >> 4) as usize];
|
||||
res[i * 2 + 1] = LOWER_CHARS[(c & 0xF) as usize];
|
||||
}
|
||||
f.write_str(unsafe { str::from_utf8_unchecked(&res[..max_digits]) })?;
|
||||
} else {
|
||||
// For large array use chunks of up to 1024 bytes (2048 hex chars)
|
||||
let mut buf = [0u8; 2048];
|
||||
let mut digits_left = max_digits;
|
||||
|
||||
for chunk in self[..max_hex].chunks(1024) {
|
||||
for (i, c) in chunk.iter().enumerate() {
|
||||
buf[i * 2] = LOWER_CHARS[(c >> 4) as usize];
|
||||
buf[i * 2 + 1] = LOWER_CHARS[(c & 0xF) as usize];
|
||||
}
|
||||
let n = min(chunk.len() * 2, digits_left);
|
||||
f.write_str(unsafe { str::from_utf8_unchecked(&buf[..n]) })?;
|
||||
digits_left -= n;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ArrayLength<u8>> fmt::UpperHex for GenericArray<u8, T>
|
||||
where
|
||||
T: Add<T>,
|
||||
<T as Add<T>>::Output: ArrayLength<u8>,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
let max_digits = f.precision().unwrap_or_else(|| self.len() * 2);
|
||||
let max_hex = (max_digits >> 1) + (max_digits & 1);
|
||||
|
||||
if T::to_usize() < 1024 {
|
||||
// For small arrays use a stack allocated
|
||||
// buffer of 2x number of bytes
|
||||
let mut res = GenericArray::<u8, Sum<T, T>>::default();
|
||||
|
||||
for (i, c) in self.iter().take(max_hex).enumerate() {
|
||||
res[i * 2] = UPPER_CHARS[(c >> 4) as usize];
|
||||
res[i * 2 + 1] = UPPER_CHARS[(c & 0xF) as usize];
|
||||
}
|
||||
f.write_str(unsafe { str::from_utf8_unchecked(&res[..max_digits]) })?;
|
||||
} else {
|
||||
// For large array use chunks of up to 1024 bytes (2048 hex chars)
|
||||
let mut buf = [0u8; 2048];
|
||||
let mut digits_left = max_digits;
|
||||
|
||||
for chunk in self[..max_hex].chunks(1024) {
|
||||
for (i, c) in chunk.iter().enumerate() {
|
||||
buf[i * 2] = UPPER_CHARS[(c >> 4) as usize];
|
||||
buf[i * 2 + 1] = UPPER_CHARS[(c & 0xF) as usize];
|
||||
}
|
||||
let n = min(chunk.len() * 2, digits_left);
|
||||
f.write_str(unsafe { str::from_utf8_unchecked(&buf[..n]) })?;
|
||||
digits_left -= n;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
//! Generic array are commonly used as a return value for hash digests, so
|
||||
//! it's a good idea to allow to hexlify them easily. This module implements
|
||||
//! `std::fmt::LowerHex` and `std::fmt::UpperHex` traits.
|
||||
//!
|
||||
//! Example:
|
||||
//!
|
||||
//! ```rust
|
||||
//! # #[macro_use]
|
||||
//! # extern crate generic_array;
|
||||
//! # extern crate typenum;
|
||||
//! # fn main() {
|
||||
//! let array = arr![u8; 10, 20, 30];
|
||||
//! assert_eq!(format!("{:x}", array), "0a141e");
|
||||
//! # }
|
||||
//! ```
|
||||
//!
|
||||
|
||||
use core::{fmt, str, ops::Add, cmp::min};
|
||||
|
||||
use typenum::*;
|
||||
|
||||
use crate::{ArrayLength, GenericArray};
|
||||
|
||||
static LOWER_CHARS: &'static [u8] = b"0123456789abcdef";
|
||||
static UPPER_CHARS: &'static [u8] = b"0123456789ABCDEF";
|
||||
|
||||
impl<T: ArrayLength<u8>> fmt::LowerHex for GenericArray<u8, T>
|
||||
where
|
||||
T: Add<T>,
|
||||
<T as Add<T>>::Output: ArrayLength<u8>,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let max_digits = f.precision().unwrap_or_else(|| self.len() * 2);
|
||||
let max_hex = (max_digits >> 1) + (max_digits & 1);
|
||||
|
||||
if T::USIZE < 1024 {
|
||||
// For small arrays use a stack allocated
|
||||
// buffer of 2x number of bytes
|
||||
let mut res = GenericArray::<u8, Sum<T, T>>::default();
|
||||
|
||||
self.iter().take(max_hex).enumerate().for_each(|(i, c)| {
|
||||
res[i * 2] = LOWER_CHARS[(c >> 4) as usize];
|
||||
res[i * 2 + 1] = LOWER_CHARS[(c & 0xF) as usize];
|
||||
});
|
||||
|
||||
f.write_str(unsafe { str::from_utf8_unchecked(&res[..max_digits]) })?;
|
||||
} else {
|
||||
// For large array use chunks of up to 1024 bytes (2048 hex chars)
|
||||
let mut buf = [0u8; 2048];
|
||||
let mut digits_left = max_digits;
|
||||
|
||||
for chunk in self[..max_hex].chunks(1024) {
|
||||
chunk.iter().enumerate().for_each(|(i, c)| {
|
||||
buf[i * 2] = LOWER_CHARS[(c >> 4) as usize];
|
||||
buf[i * 2 + 1] = LOWER_CHARS[(c & 0xF) as usize];
|
||||
});
|
||||
|
||||
let n = min(chunk.len() * 2, digits_left);
|
||||
f.write_str(unsafe { str::from_utf8_unchecked(&buf[..n]) })?;
|
||||
digits_left -= n;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ArrayLength<u8>> fmt::UpperHex for GenericArray<u8, T>
|
||||
where
|
||||
T: Add<T>,
|
||||
<T as Add<T>>::Output: ArrayLength<u8>,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let max_digits = f.precision().unwrap_or_else(|| self.len() * 2);
|
||||
let max_hex = (max_digits >> 1) + (max_digits & 1);
|
||||
|
||||
if T::USIZE < 1024 {
|
||||
// For small arrays use a stack allocated
|
||||
// buffer of 2x number of bytes
|
||||
let mut res = GenericArray::<u8, Sum<T, T>>::default();
|
||||
|
||||
self.iter().take(max_hex).enumerate().for_each(|(i, c)| {
|
||||
res[i * 2] = UPPER_CHARS[(c >> 4) as usize];
|
||||
res[i * 2 + 1] = UPPER_CHARS[(c & 0xF) as usize];
|
||||
});
|
||||
|
||||
f.write_str(unsafe { str::from_utf8_unchecked(&res[..max_digits]) })?;
|
||||
} else {
|
||||
// For large array use chunks of up to 1024 bytes (2048 hex chars)
|
||||
let mut buf = [0u8; 2048];
|
||||
let mut digits_left = max_digits;
|
||||
|
||||
for chunk in self[..max_hex].chunks(1024) {
|
||||
chunk.iter().enumerate().for_each(|(i, c)| {
|
||||
buf[i * 2] = UPPER_CHARS[(c >> 4) as usize];
|
||||
buf[i * 2 + 1] = UPPER_CHARS[(c & 0xF) as usize];
|
||||
});
|
||||
|
||||
let n = min(chunk.len() * 2, digits_left);
|
||||
f.write_str(unsafe { str::from_utf8_unchecked(&buf[..n]) })?;
|
||||
digits_left -= n;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,108 +1,108 @@
|
|||
//! Serde serialization/deserialization implementation
|
||||
|
||||
use core::fmt;
|
||||
use core::marker::PhantomData;
|
||||
use serde::de::{self, SeqAccess, Visitor};
|
||||
use serde::{ser::SerializeTuple, Deserialize, Deserializer, Serialize, Serializer};
|
||||
use {ArrayLength, GenericArray};
|
||||
|
||||
impl<T, N> Serialize for GenericArray<T, N>
|
||||
where
|
||||
T: Serialize,
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
#[inline]
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut tup = serializer.serialize_tuple(N::to_usize())?;
|
||||
for el in self {
|
||||
tup.serialize_element(el)?;
|
||||
}
|
||||
|
||||
tup.end()
|
||||
}
|
||||
}
|
||||
|
||||
struct GAVisitor<T, N> {
|
||||
_t: PhantomData<T>,
|
||||
_n: PhantomData<N>,
|
||||
}
|
||||
|
||||
impl<'de, T, N> Visitor<'de> for GAVisitor<T, N>
|
||||
where
|
||||
T: Deserialize<'de> + Default,
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
type Value = GenericArray<T, N>;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
formatter.write_str("struct GenericArray")
|
||||
}
|
||||
|
||||
fn visit_seq<A>(self, mut seq: A) -> Result<GenericArray<T, N>, A::Error>
|
||||
where
|
||||
A: SeqAccess<'de>,
|
||||
{
|
||||
let mut result = GenericArray::default();
|
||||
for i in 0..N::to_usize() {
|
||||
result[i] = seq
|
||||
.next_element()?
|
||||
.ok_or_else(|| de::Error::invalid_length(i, &self))?;
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de, T, N> Deserialize<'de> for GenericArray<T, N>
|
||||
where
|
||||
T: Deserialize<'de> + Default,
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<GenericArray<T, N>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let visitor = GAVisitor {
|
||||
_t: PhantomData,
|
||||
_n: PhantomData,
|
||||
};
|
||||
deserializer.deserialize_tuple(N::to_usize(), visitor)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode;
|
||||
use typenum;
|
||||
|
||||
#[test]
|
||||
fn test_serialize() {
|
||||
let array = GenericArray::<u8, typenum::U2>::default();
|
||||
let serialized = bincode::serialize(&array);
|
||||
assert!(serialized.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize() {
|
||||
let mut array = GenericArray::<u8, typenum::U2>::default();
|
||||
array[0] = 1;
|
||||
array[1] = 2;
|
||||
let serialized = bincode::serialize(&array).unwrap();
|
||||
let deserialized = bincode::deserialize::<GenericArray<u8, typenum::U2>>(&array);
|
||||
assert!(deserialized.is_ok());
|
||||
let array = deserialized.unwrap();
|
||||
assert_eq!(array[0], 1);
|
||||
assert_eq!(array[1], 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialized_size() {
|
||||
let array = GenericArray::<u8, typenum::U1>::default();
|
||||
let size = bincode::serialized_size(&array).unwrap();
|
||||
assert_eq!(size, 1);
|
||||
}
|
||||
|
||||
}
|
||||
//! Serde serialization/deserialization implementation
|
||||
|
||||
use core::fmt;
|
||||
use core::marker::PhantomData;
|
||||
use serde::de::{self, SeqAccess, Visitor};
|
||||
use serde::{ser::SerializeTuple, Deserialize, Deserializer, Serialize, Serializer};
|
||||
use {ArrayLength, GenericArray};
|
||||
|
||||
impl<T, N> Serialize for GenericArray<T, N>
|
||||
where
|
||||
T: Serialize,
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
#[inline]
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
let mut tup = serializer.serialize_tuple(N::USIZE)?;
|
||||
for el in self {
|
||||
tup.serialize_element(el)?;
|
||||
}
|
||||
|
||||
tup.end()
|
||||
}
|
||||
}
|
||||
|
||||
struct GAVisitor<T, N> {
|
||||
_t: PhantomData<T>,
|
||||
_n: PhantomData<N>,
|
||||
}
|
||||
|
||||
impl<'de, T, N> Visitor<'de> for GAVisitor<T, N>
|
||||
where
|
||||
T: Deserialize<'de> + Default,
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
type Value = GenericArray<T, N>;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
formatter.write_str("struct GenericArray")
|
||||
}
|
||||
|
||||
fn visit_seq<A>(self, mut seq: A) -> Result<GenericArray<T, N>, A::Error>
|
||||
where
|
||||
A: SeqAccess<'de>,
|
||||
{
|
||||
let mut result = GenericArray::default();
|
||||
for i in 0..N::USIZE {
|
||||
result[i] = seq
|
||||
.next_element()?
|
||||
.ok_or_else(|| de::Error::invalid_length(i, &self))?;
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de, T, N> Deserialize<'de> for GenericArray<T, N>
|
||||
where
|
||||
T: Deserialize<'de> + Default,
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<GenericArray<T, N>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let visitor = GAVisitor {
|
||||
_t: PhantomData,
|
||||
_n: PhantomData,
|
||||
};
|
||||
deserializer.deserialize_tuple(N::USIZE, visitor)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use bincode;
|
||||
use typenum;
|
||||
|
||||
#[test]
|
||||
fn test_serialize() {
|
||||
let array = GenericArray::<u8, typenum::U2>::default();
|
||||
let serialized = bincode::serialize(&array);
|
||||
assert!(serialized.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialize() {
|
||||
let mut array = GenericArray::<u8, typenum::U2>::default();
|
||||
array[0] = 1;
|
||||
array[1] = 2;
|
||||
let serialized = bincode::serialize(&array).unwrap();
|
||||
let deserialized = bincode::deserialize::<GenericArray<u8, typenum::U2>>(&serialized);
|
||||
assert!(deserialized.is_ok());
|
||||
let array = deserialized.unwrap();
|
||||
assert_eq!(array[0], 1);
|
||||
assert_eq!(array[1], 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialized_size() {
|
||||
let array = GenericArray::<u8, typenum::U1>::default();
|
||||
let size = bincode::serialized_size(&array).unwrap();
|
||||
assert_eq!(size, 1);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,182 +1,269 @@
|
|||
use super::{ArrayLength, GenericArray};
|
||||
use core::borrow::{Borrow, BorrowMut};
|
||||
use core::cmp::Ordering;
|
||||
use core::fmt::{self, Debug};
|
||||
use core::hash::{Hash, Hasher};
|
||||
use functional::*;
|
||||
use sequence::*;
|
||||
|
||||
impl<T: Default, N> Default for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
#[inline]
|
||||
fn default() -> Self {
|
||||
Self::generate(|_| T::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone, N> Clone for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn clone(&self) -> GenericArray<T, N> {
|
||||
self.map(Clone::clone)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy, N> Copy for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
N::ArrayType: Copy,
|
||||
{
|
||||
}
|
||||
|
||||
impl<T: PartialEq, N> PartialEq for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
**self == **other
|
||||
}
|
||||
}
|
||||
impl<T: Eq, N> Eq for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
}
|
||||
|
||||
impl<T: PartialOrd, N> PartialOrd for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn partial_cmp(&self, other: &GenericArray<T, N>) -> Option<Ordering> {
|
||||
PartialOrd::partial_cmp(self.as_slice(), other.as_slice())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Ord, N> Ord for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn cmp(&self, other: &GenericArray<T, N>) -> Ordering {
|
||||
Ord::cmp(self.as_slice(), other.as_slice())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Debug, N> Debug for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
self[..].fmt(fmt)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> Borrow<[T]> for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
#[inline(always)]
|
||||
fn borrow(&self) -> &[T] {
|
||||
&self[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> BorrowMut<[T]> for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
#[inline(always)]
|
||||
fn borrow_mut(&mut self) -> &mut [T] {
|
||||
&mut self[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> AsRef<[T]> for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
#[inline(always)]
|
||||
fn as_ref(&self) -> &[T] {
|
||||
&self[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> AsMut<[T]> for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
#[inline(always)]
|
||||
fn as_mut(&mut self) -> &mut [T] {
|
||||
&mut self[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Hash, N> Hash for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn hash<H>(&self, state: &mut H)
|
||||
where
|
||||
H: Hasher,
|
||||
{
|
||||
Hash::hash(&self[..], state)
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_from {
|
||||
($($n: expr => $ty: ty),*) => {
|
||||
$(
|
||||
impl<T> From<[T; $n]> for GenericArray<T, $ty> {
|
||||
#[inline(always)]
|
||||
fn from(arr: [T; $n]) -> Self {
|
||||
unsafe { $crate::transmute(arr) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Into<[T; $n]> for GenericArray<T, $ty> {
|
||||
#[inline(always)]
|
||||
fn into(self) -> [T; $n] {
|
||||
unsafe { $crate::transmute(self) }
|
||||
}
|
||||
}
|
||||
)*
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
impl_from! {
|
||||
1 => ::typenum::U1,
|
||||
2 => ::typenum::U2,
|
||||
3 => ::typenum::U3,
|
||||
4 => ::typenum::U4,
|
||||
5 => ::typenum::U5,
|
||||
6 => ::typenum::U6,
|
||||
7 => ::typenum::U7,
|
||||
8 => ::typenum::U8,
|
||||
9 => ::typenum::U9,
|
||||
10 => ::typenum::U10,
|
||||
11 => ::typenum::U11,
|
||||
12 => ::typenum::U12,
|
||||
13 => ::typenum::U13,
|
||||
14 => ::typenum::U14,
|
||||
15 => ::typenum::U15,
|
||||
16 => ::typenum::U16,
|
||||
17 => ::typenum::U17,
|
||||
18 => ::typenum::U18,
|
||||
19 => ::typenum::U19,
|
||||
20 => ::typenum::U20,
|
||||
21 => ::typenum::U21,
|
||||
22 => ::typenum::U22,
|
||||
23 => ::typenum::U23,
|
||||
24 => ::typenum::U24,
|
||||
25 => ::typenum::U25,
|
||||
26 => ::typenum::U26,
|
||||
27 => ::typenum::U27,
|
||||
28 => ::typenum::U28,
|
||||
29 => ::typenum::U29,
|
||||
30 => ::typenum::U30,
|
||||
31 => ::typenum::U31,
|
||||
32 => ::typenum::U32
|
||||
}
|
||||
use core::borrow::{Borrow, BorrowMut};
|
||||
use core::cmp::Ordering;
|
||||
use core::fmt::{self, Debug};
|
||||
use core::hash::{Hash, Hasher};
|
||||
|
||||
use super::{ArrayLength, GenericArray};
|
||||
|
||||
use crate::functional::*;
|
||||
use crate::sequence::*;
|
||||
|
||||
impl<T: Default, N> Default for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
#[inline(always)]
|
||||
fn default() -> Self {
|
||||
Self::generate(|_| T::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone, N> Clone for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn clone(&self) -> GenericArray<T, N> {
|
||||
self.map(Clone::clone)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Copy, N> Copy for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
N::ArrayType: Copy,
|
||||
{
|
||||
}
|
||||
|
||||
impl<T: PartialEq, N> PartialEq for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
**self == **other
|
||||
}
|
||||
}
|
||||
impl<T: Eq, N> Eq for GenericArray<T, N> where N: ArrayLength<T> {}
|
||||
|
||||
impl<T: PartialOrd, N> PartialOrd for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn partial_cmp(&self, other: &GenericArray<T, N>) -> Option<Ordering> {
|
||||
PartialOrd::partial_cmp(self.as_slice(), other.as_slice())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Ord, N> Ord for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn cmp(&self, other: &GenericArray<T, N>) -> Ordering {
|
||||
Ord::cmp(self.as_slice(), other.as_slice())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Debug, N> Debug for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
|
||||
self[..].fmt(fmt)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> Borrow<[T]> for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
#[inline(always)]
|
||||
fn borrow(&self) -> &[T] {
|
||||
&self[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> BorrowMut<[T]> for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
#[inline(always)]
|
||||
fn borrow_mut(&mut self) -> &mut [T] {
|
||||
&mut self[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> AsRef<[T]> for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
#[inline(always)]
|
||||
fn as_ref(&self) -> &[T] {
|
||||
&self[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> AsMut<[T]> for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
#[inline(always)]
|
||||
fn as_mut(&mut self) -> &mut [T] {
|
||||
&mut self[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Hash, N> Hash for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn hash<H>(&self, state: &mut H)
|
||||
where
|
||||
H: Hasher,
|
||||
{
|
||||
Hash::hash(&self[..], state)
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! impl_from {
|
||||
($($n: expr => $ty: ty),*) => {
|
||||
$(
|
||||
impl<T> From<[T; $n]> for GenericArray<T, $ty> {
|
||||
#[inline(always)]
|
||||
fn from(arr: [T; $n]) -> Self {
|
||||
unsafe { $crate::transmute(arr) }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(relaxed_coherence)]
|
||||
impl<T> From<GenericArray<T, $ty>> for [T; $n] {
|
||||
#[inline(always)]
|
||||
fn from(sel: GenericArray<T, $ty>) -> [T; $n] {
|
||||
unsafe { $crate::transmute(sel) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> From<&'a [T; $n]> for &'a GenericArray<T, $ty> {
|
||||
#[inline]
|
||||
fn from(slice: &[T; $n]) -> &GenericArray<T, $ty> {
|
||||
unsafe { &*(slice.as_ptr() as *const GenericArray<T, $ty>) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> From<&'a mut [T; $n]> for &'a mut GenericArray<T, $ty> {
|
||||
#[inline]
|
||||
fn from(slice: &mut [T; $n]) -> &mut GenericArray<T, $ty> {
|
||||
unsafe { &mut *(slice.as_mut_ptr() as *mut GenericArray<T, $ty>) }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(relaxed_coherence))]
|
||||
impl<T> Into<[T; $n]> for GenericArray<T, $ty> {
|
||||
#[inline(always)]
|
||||
fn into(self) -> [T; $n] {
|
||||
unsafe { $crate::transmute(self) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> AsRef<[T; $n]> for GenericArray<T, $ty> {
|
||||
#[inline]
|
||||
fn as_ref(&self) -> &[T; $n] {
|
||||
unsafe { $crate::transmute(self) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> AsMut<[T; $n]> for GenericArray<T, $ty> {
|
||||
#[inline]
|
||||
fn as_mut(&mut self) -> &mut [T; $n] {
|
||||
unsafe { $crate::transmute(self) }
|
||||
}
|
||||
}
|
||||
)*
|
||||
}
|
||||
}
|
||||
|
||||
impl_from! {
|
||||
1 => ::typenum::U1,
|
||||
2 => ::typenum::U2,
|
||||
3 => ::typenum::U3,
|
||||
4 => ::typenum::U4,
|
||||
5 => ::typenum::U5,
|
||||
6 => ::typenum::U6,
|
||||
7 => ::typenum::U7,
|
||||
8 => ::typenum::U8,
|
||||
9 => ::typenum::U9,
|
||||
10 => ::typenum::U10,
|
||||
11 => ::typenum::U11,
|
||||
12 => ::typenum::U12,
|
||||
13 => ::typenum::U13,
|
||||
14 => ::typenum::U14,
|
||||
15 => ::typenum::U15,
|
||||
16 => ::typenum::U16,
|
||||
17 => ::typenum::U17,
|
||||
18 => ::typenum::U18,
|
||||
19 => ::typenum::U19,
|
||||
20 => ::typenum::U20,
|
||||
21 => ::typenum::U21,
|
||||
22 => ::typenum::U22,
|
||||
23 => ::typenum::U23,
|
||||
24 => ::typenum::U24,
|
||||
25 => ::typenum::U25,
|
||||
26 => ::typenum::U26,
|
||||
27 => ::typenum::U27,
|
||||
28 => ::typenum::U28,
|
||||
29 => ::typenum::U29,
|
||||
30 => ::typenum::U30,
|
||||
31 => ::typenum::U31,
|
||||
32 => ::typenum::U32
|
||||
}
|
||||
|
||||
#[cfg(feature = "more_lengths")]
|
||||
impl_from! {
|
||||
33 => ::typenum::U33,
|
||||
34 => ::typenum::U34,
|
||||
35 => ::typenum::U35,
|
||||
36 => ::typenum::U36,
|
||||
37 => ::typenum::U37,
|
||||
38 => ::typenum::U38,
|
||||
39 => ::typenum::U39,
|
||||
40 => ::typenum::U40,
|
||||
41 => ::typenum::U41,
|
||||
42 => ::typenum::U42,
|
||||
43 => ::typenum::U43,
|
||||
44 => ::typenum::U44,
|
||||
45 => ::typenum::U45,
|
||||
46 => ::typenum::U46,
|
||||
47 => ::typenum::U47,
|
||||
48 => ::typenum::U48,
|
||||
49 => ::typenum::U49,
|
||||
50 => ::typenum::U50,
|
||||
51 => ::typenum::U51,
|
||||
52 => ::typenum::U52,
|
||||
53 => ::typenum::U53,
|
||||
54 => ::typenum::U54,
|
||||
55 => ::typenum::U55,
|
||||
56 => ::typenum::U56,
|
||||
57 => ::typenum::U57,
|
||||
58 => ::typenum::U58,
|
||||
59 => ::typenum::U59,
|
||||
60 => ::typenum::U60,
|
||||
61 => ::typenum::U61,
|
||||
62 => ::typenum::U62,
|
||||
63 => ::typenum::U63,
|
||||
64 => ::typenum::U64,
|
||||
|
||||
70 => ::typenum::U70,
|
||||
80 => ::typenum::U80,
|
||||
90 => ::typenum::U90,
|
||||
|
||||
100 => ::typenum::U100,
|
||||
200 => ::typenum::U200,
|
||||
300 => ::typenum::U300,
|
||||
400 => ::typenum::U400,
|
||||
500 => ::typenum::U500,
|
||||
|
||||
128 => ::typenum::U128,
|
||||
256 => ::typenum::U256,
|
||||
512 => ::typenum::U512,
|
||||
|
||||
1000 => ::typenum::U1000,
|
||||
1024 => ::typenum::U1024
|
||||
}
|
||||
|
|
|
@ -1,190 +1,256 @@
|
|||
//! `GenericArray` iterator implementation.
|
||||
|
||||
use super::{ArrayLength, GenericArray};
|
||||
use core::{cmp, ptr, fmt, mem};
|
||||
use core::mem::ManuallyDrop;
|
||||
|
||||
/// An iterator that moves out of a `GenericArray`
|
||||
pub struct GenericArrayIter<T, N: ArrayLength<T>> {
|
||||
// Invariants: index <= index_back <= N
|
||||
// Only values in array[index..index_back] are alive at any given time.
|
||||
// Values from array[..index] and array[index_back..] are already moved/dropped.
|
||||
array: ManuallyDrop<GenericArray<T, N>>,
|
||||
index: usize,
|
||||
index_back: usize,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
fn send<I: Send>(_iter: I) {}
|
||||
|
||||
#[test]
|
||||
fn test_send_iter() {
|
||||
send(GenericArray::from([1, 2, 3, 4]).into_iter());
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
/// Returns the remaining items of this iterator as a slice
|
||||
#[inline]
|
||||
pub fn as_slice(&self) -> &[T] {
|
||||
&self.array.as_slice()[self.index..self.index_back]
|
||||
}
|
||||
|
||||
/// Returns the remaining items of this iterator as a mutable slice
|
||||
#[inline]
|
||||
pub fn as_mut_slice(&mut self) -> &mut [T] {
|
||||
&mut self.array.as_mut_slice()[self.index..self.index_back]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> IntoIterator for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
type Item = T;
|
||||
type IntoIter = GenericArrayIter<T, N>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
GenericArrayIter {
|
||||
array: ManuallyDrop::new(self),
|
||||
index: 0,
|
||||
index_back: N::to_usize(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Based on work in rust-lang/rust#49000
|
||||
impl<T: fmt::Debug, N> fmt::Debug for GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_tuple("GenericArrayIter")
|
||||
.field(&self.as_slice())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> Drop for GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
// Drop values that are still alive.
|
||||
for p in self.as_mut_slice() {
|
||||
unsafe {
|
||||
ptr::drop_in_place(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Based on work in rust-lang/rust#49000
|
||||
impl<T: Clone, N> Clone for GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
// This places all cloned elements at the start of the new array iterator,
|
||||
// not at their original indices.
|
||||
unsafe {
|
||||
let mut iter = GenericArrayIter {
|
||||
array: ManuallyDrop::new(mem::uninitialized()),
|
||||
index: 0,
|
||||
index_back: 0,
|
||||
};
|
||||
|
||||
for (dst, src) in iter.array.iter_mut().zip(self.as_slice()) {
|
||||
ptr::write(dst, src.clone());
|
||||
|
||||
iter.index_back += 1;
|
||||
}
|
||||
|
||||
iter
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> Iterator for GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
type Item = T;
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<T> {
|
||||
if self.index < self.index_back {
|
||||
let p = unsafe { Some(ptr::read(self.array.get_unchecked(self.index))) };
|
||||
|
||||
self.index += 1;
|
||||
|
||||
p
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
let len = self.len();
|
||||
(len, Some(len))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn count(self) -> usize {
|
||||
self.len()
|
||||
}
|
||||
|
||||
fn nth(&mut self, n: usize) -> Option<T> {
|
||||
// First consume values prior to the nth.
|
||||
let ndrop = cmp::min(n, self.len());
|
||||
|
||||
for p in &mut self.array[self.index..self.index + ndrop] {
|
||||
self.index += 1;
|
||||
|
||||
unsafe {
|
||||
ptr::drop_in_place(p);
|
||||
}
|
||||
}
|
||||
|
||||
self.next()
|
||||
}
|
||||
|
||||
fn last(mut self) -> Option<T> {
|
||||
// Note, everything else will correctly drop first as `self` leaves scope.
|
||||
self.next_back()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> DoubleEndedIterator for GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn next_back(&mut self) -> Option<T> {
|
||||
if self.index < self.index_back {
|
||||
self.index_back -= 1;
|
||||
|
||||
unsafe { Some(ptr::read(self.array.get_unchecked(self.index_back))) }
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> ExactSizeIterator for GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn len(&self) -> usize {
|
||||
self.index_back - self.index
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Implement `FusedIterator` and `TrustedLen` when stabilized
|
||||
//! `GenericArray` iterator implementation.
|
||||
|
||||
use super::{ArrayLength, GenericArray};
|
||||
use core::iter::FusedIterator;
|
||||
use core::mem::ManuallyDrop;
|
||||
use core::{cmp, fmt, ptr, mem};
|
||||
|
||||
/// An iterator that moves out of a `GenericArray`
|
||||
pub struct GenericArrayIter<T, N: ArrayLength<T>> {
|
||||
// Invariants: index <= index_back <= N
|
||||
// Only values in array[index..index_back] are alive at any given time.
|
||||
// Values from array[..index] and array[index_back..] are already moved/dropped.
|
||||
array: ManuallyDrop<GenericArray<T, N>>,
|
||||
index: usize,
|
||||
index_back: usize,
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
fn send<I: Send>(_iter: I) {}
|
||||
|
||||
#[test]
|
||||
fn test_send_iter() {
|
||||
send(GenericArray::from([1, 2, 3, 4]).into_iter());
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
/// Returns the remaining items of this iterator as a slice
|
||||
#[inline]
|
||||
pub fn as_slice(&self) -> &[T] {
|
||||
&self.array.as_slice()[self.index..self.index_back]
|
||||
}
|
||||
|
||||
/// Returns the remaining items of this iterator as a mutable slice
|
||||
#[inline]
|
||||
pub fn as_mut_slice(&mut self) -> &mut [T] {
|
||||
&mut self.array.as_mut_slice()[self.index..self.index_back]
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> IntoIterator for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
type Item = T;
|
||||
type IntoIter = GenericArrayIter<T, N>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
GenericArrayIter {
|
||||
array: ManuallyDrop::new(self),
|
||||
index: 0,
|
||||
index_back: N::USIZE,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Based on work in rust-lang/rust#49000
|
||||
impl<T: fmt::Debug, N> fmt::Debug for GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_tuple("GenericArrayIter")
|
||||
.field(&self.as_slice())
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> Drop for GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
#[inline]
|
||||
fn drop(&mut self) {
|
||||
if mem::needs_drop::<T>() {
|
||||
// Drop values that are still alive.
|
||||
for p in self.as_mut_slice() {
|
||||
unsafe {
|
||||
ptr::drop_in_place(p);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Based on work in rust-lang/rust#49000
|
||||
impl<T: Clone, N> Clone for GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn clone(&self) -> Self {
|
||||
// This places all cloned elements at the start of the new array iterator,
|
||||
// not at their original indices.
|
||||
unsafe {
|
||||
let mut array = ptr::read(&self.array);
|
||||
let mut index_back = 0;
|
||||
|
||||
for (dst, src) in array.as_mut_slice().into_iter().zip(self.as_slice()) {
|
||||
ptr::write(dst, src.clone());
|
||||
index_back += 1;
|
||||
}
|
||||
|
||||
GenericArrayIter {
|
||||
array,
|
||||
index: 0,
|
||||
index_back
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> Iterator for GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
type Item = T;
|
||||
|
||||
#[inline]
|
||||
fn next(&mut self) -> Option<T> {
|
||||
if self.index < self.index_back {
|
||||
let p = unsafe { Some(ptr::read(self.array.get_unchecked(self.index))) };
|
||||
|
||||
self.index += 1;
|
||||
|
||||
p
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn fold<B, F>(mut self, init: B, mut f: F) -> B
|
||||
where
|
||||
F: FnMut(B, Self::Item) -> B,
|
||||
{
|
||||
let ret = unsafe {
|
||||
let GenericArrayIter {
|
||||
ref array,
|
||||
ref mut index,
|
||||
index_back,
|
||||
} = self;
|
||||
|
||||
let remaining = &array[*index..index_back];
|
||||
|
||||
remaining.iter().fold(init, |acc, src| {
|
||||
let value = ptr::read(src);
|
||||
|
||||
*index += 1;
|
||||
|
||||
f(acc, value)
|
||||
})
|
||||
};
|
||||
|
||||
// ensure the drop happens here after iteration
|
||||
drop(self);
|
||||
|
||||
ret
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
let len = self.len();
|
||||
(len, Some(len))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn count(self) -> usize {
|
||||
self.len()
|
||||
}
|
||||
|
||||
fn nth(&mut self, n: usize) -> Option<T> {
|
||||
// First consume values prior to the nth.
|
||||
let ndrop = cmp::min(n, self.len());
|
||||
|
||||
for p in &mut self.array[self.index..self.index + ndrop] {
|
||||
self.index += 1;
|
||||
|
||||
unsafe {
|
||||
ptr::drop_in_place(p);
|
||||
}
|
||||
}
|
||||
|
||||
self.next()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn last(mut self) -> Option<T> {
|
||||
// Note, everything else will correctly drop first as `self` leaves scope.
|
||||
self.next_back()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> DoubleEndedIterator for GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn next_back(&mut self) -> Option<T> {
|
||||
if self.index < self.index_back {
|
||||
self.index_back -= 1;
|
||||
|
||||
unsafe { Some(ptr::read(self.array.get_unchecked(self.index_back))) }
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn rfold<B, F>(mut self, init: B, mut f: F) -> B
|
||||
where
|
||||
F: FnMut(B, Self::Item) -> B,
|
||||
{
|
||||
let ret = unsafe {
|
||||
let GenericArrayIter {
|
||||
ref array,
|
||||
index,
|
||||
ref mut index_back,
|
||||
} = self;
|
||||
|
||||
let remaining = &array[index..*index_back];
|
||||
|
||||
remaining.iter().rfold(init, |acc, src| {
|
||||
let value = ptr::read(src);
|
||||
|
||||
*index_back -= 1;
|
||||
|
||||
f(acc, value)
|
||||
})
|
||||
};
|
||||
|
||||
// ensure the drop happens here after iteration
|
||||
drop(self);
|
||||
|
||||
ret
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> ExactSizeIterator for GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
fn len(&self) -> usize {
|
||||
self.index_back - self.index
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N> FusedIterator for GenericArrayIter<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
}
|
||||
|
||||
// TODO: Implement `TrustedLen` when stabilized
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -1,320 +1,380 @@
|
|||
//! Useful traits for manipulating sequences of data stored in `GenericArray`s
|
||||
|
||||
use super::*;
|
||||
use core::{mem, ptr};
|
||||
use core::ops::{Add, Sub};
|
||||
use typenum::operator_aliases::*;
|
||||
|
||||
/// Defines some sequence with an associated length and iteration capabilities.
|
||||
///
|
||||
/// This is useful for passing N-length generic arrays as generics.
|
||||
pub unsafe trait GenericSequence<T>: Sized + IntoIterator {
|
||||
/// `GenericArray` associated length
|
||||
type Length: ArrayLength<T>;
|
||||
|
||||
/// Concrete sequence type used in conjuction with reference implementations of `GenericSequence`
|
||||
type Sequence: GenericSequence<T, Length = Self::Length> + FromIterator<T>;
|
||||
|
||||
/// Initializes a new sequence instance using the given function.
|
||||
///
|
||||
/// If the generator function panics while initializing the sequence,
|
||||
/// any already initialized elements will be dropped.
|
||||
fn generate<F>(f: F) -> Self::Sequence
|
||||
where
|
||||
F: FnMut(usize) -> T;
|
||||
|
||||
#[doc(hidden)]
|
||||
fn inverted_zip<B, U, F>(
|
||||
self,
|
||||
lhs: GenericArray<B, Self::Length>,
|
||||
mut f: F,
|
||||
) -> MappedSequence<GenericArray<B, Self::Length>, B, U>
|
||||
where
|
||||
GenericArray<B, Self::Length>: GenericSequence<B, Length = Self::Length>
|
||||
+ MappedGenericSequence<B, U>,
|
||||
Self: MappedGenericSequence<T, U>,
|
||||
Self::Length: ArrayLength<B> + ArrayLength<U>,
|
||||
F: FnMut(B, Self::Item) -> U,
|
||||
{
|
||||
unsafe {
|
||||
let mut left = ArrayConsumer::new(lhs);
|
||||
|
||||
let (left_array_iter, left_position) = left.iter_position();
|
||||
|
||||
FromIterator::from_iter(
|
||||
left_array_iter
|
||||
.zip(self.into_iter())
|
||||
.map(|(l, right_value)| {
|
||||
let left_value = ptr::read(l);
|
||||
|
||||
*left_position += 1;
|
||||
|
||||
f(left_value, right_value)
|
||||
})
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
fn inverted_zip2<B, Lhs, U, F>(self, lhs: Lhs, mut f: F) -> MappedSequence<Lhs, B, U>
|
||||
where
|
||||
Lhs: GenericSequence<B, Length = Self::Length> + MappedGenericSequence<B, U>,
|
||||
Self: MappedGenericSequence<T, U>,
|
||||
Self::Length: ArrayLength<B> + ArrayLength<U>,
|
||||
F: FnMut(Lhs::Item, Self::Item) -> U,
|
||||
{
|
||||
FromIterator::from_iter(lhs.into_iter().zip(self.into_iter()).map(|(l, r)| f(l, r)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Accessor for `GenericSequence` item type, which is really `IntoIterator::Item`
|
||||
///
|
||||
/// For deeply nested generic mapped sequence types, like shown in `tests/generics.rs`,
|
||||
/// this can be useful for keeping things organized.
|
||||
pub type SequenceItem<T> = <T as IntoIterator>::Item;
|
||||
|
||||
unsafe impl<'a, T: 'a, S: GenericSequence<T>> GenericSequence<T> for &'a S
|
||||
where
|
||||
&'a S: IntoIterator,
|
||||
{
|
||||
type Length = S::Length;
|
||||
type Sequence = S::Sequence;
|
||||
|
||||
#[inline]
|
||||
fn generate<F>(f: F) -> Self::Sequence
|
||||
where
|
||||
F: FnMut(usize) -> T,
|
||||
{
|
||||
S::generate(f)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<'a, T: 'a, S: GenericSequence<T>> GenericSequence<T> for &'a mut S
|
||||
where
|
||||
&'a mut S: IntoIterator,
|
||||
{
|
||||
type Length = S::Length;
|
||||
type Sequence = S::Sequence;
|
||||
|
||||
#[inline]
|
||||
fn generate<F>(f: F) -> Self::Sequence
|
||||
where
|
||||
F: FnMut(usize) -> T,
|
||||
{
|
||||
S::generate(f)
|
||||
}
|
||||
}
|
||||
|
||||
/// Defines any `GenericSequence` which can be lengthened or extended by appending
|
||||
/// or prepending an element to it.
|
||||
///
|
||||
/// Any lengthened sequence can be shortened back to the original using `pop_front` or `pop_back`
|
||||
pub unsafe trait Lengthen<T>: Sized + GenericSequence<T> {
|
||||
/// `GenericSequence` that has one more element than `Self`
|
||||
type Longer: Shorten<T, Shorter = Self>;
|
||||
|
||||
/// Returns a new array with the given element appended to the end of it.
|
||||
///
|
||||
/// Example:
|
||||
///
|
||||
/// ```ignore
|
||||
/// let a = arr![i32; 1, 2, 3];
|
||||
///
|
||||
/// let b = a.append(4);
|
||||
///
|
||||
/// assert_eq!(b, arr![i32; 1, 2, 3, 4]);
|
||||
/// ```
|
||||
fn append(self, last: T) -> Self::Longer;
|
||||
|
||||
/// Returns a new array with the given element prepended to the front of it.
|
||||
///
|
||||
/// Example:
|
||||
///
|
||||
/// ```ignore
|
||||
/// let a = arr![i32; 1, 2, 3];
|
||||
///
|
||||
/// let b = a.prepend(4);
|
||||
///
|
||||
/// assert_eq!(b, arr![i32; 4, 1, 2, 3]);
|
||||
/// ```
|
||||
fn prepend(self, first: T) -> Self::Longer;
|
||||
}
|
||||
|
||||
/// Defines a `GenericSequence` which can be shortened by removing the first or last element from it.
|
||||
///
|
||||
/// Additionally, any shortened sequence can be lengthened by
|
||||
/// appending or prepending an element to it.
|
||||
pub unsafe trait Shorten<T>: Sized + GenericSequence<T> {
|
||||
/// `GenericSequence` that has one less element than `Self`
|
||||
type Shorter: Lengthen<T, Longer = Self>;
|
||||
|
||||
/// Returns a new array without the last element, and the last element.
|
||||
///
|
||||
/// Example:
|
||||
///
|
||||
/// ```ignore
|
||||
/// let a = arr![i32; 1, 2, 3, 4];
|
||||
///
|
||||
/// let (init, last) = a.pop_back();
|
||||
///
|
||||
/// assert_eq!(init, arr![i32; 1, 2, 3]);
|
||||
/// assert_eq!(last, 4);
|
||||
/// ```
|
||||
fn pop_back(self) -> (Self::Shorter, T);
|
||||
|
||||
/// Returns a new array without the first element, and the first element.
|
||||
/// Example:
|
||||
///
|
||||
/// ```ignore
|
||||
/// let a = arr![i32; 1, 2, 3, 4];
|
||||
///
|
||||
/// let (head, tail) = a.pop_front();
|
||||
///
|
||||
/// assert_eq!(head, 1);
|
||||
/// assert_eq!(tail, arr![i32; 2, 3, 4]);
|
||||
/// ```
|
||||
fn pop_front(self) -> (T, Self::Shorter);
|
||||
}
|
||||
|
||||
unsafe impl<T, N: ArrayLength<T>> Lengthen<T> for GenericArray<T, N>
|
||||
where
|
||||
N: Add<B1>,
|
||||
Add1<N>: ArrayLength<T>,
|
||||
Add1<N>: Sub<B1, Output = N>,
|
||||
Sub1<Add1<N>>: ArrayLength<T>,
|
||||
{
|
||||
type Longer = GenericArray<T, Add1<N>>;
|
||||
|
||||
fn append(self, last: T) -> Self::Longer {
|
||||
let mut longer: Self::Longer = unsafe { mem::uninitialized() };
|
||||
|
||||
unsafe {
|
||||
ptr::write(longer.as_mut_ptr() as *mut _, self);
|
||||
ptr::write(&mut longer[N::to_usize()], last);
|
||||
}
|
||||
|
||||
longer
|
||||
}
|
||||
|
||||
fn prepend(self, first: T) -> Self::Longer {
|
||||
let mut longer: Self::Longer = unsafe { mem::uninitialized() };
|
||||
|
||||
let longer_ptr = longer.as_mut_ptr();
|
||||
|
||||
unsafe {
|
||||
ptr::write(longer_ptr as *mut _, first);
|
||||
ptr::write(longer_ptr.offset(1) as *mut _, self);
|
||||
}
|
||||
|
||||
longer
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, N: ArrayLength<T>> Shorten<T> for GenericArray<T, N>
|
||||
where
|
||||
N: Sub<B1>,
|
||||
Sub1<N>: ArrayLength<T>,
|
||||
Sub1<N>: Add<B1, Output = N>,
|
||||
Add1<Sub1<N>>: ArrayLength<T>,
|
||||
{
|
||||
type Shorter = GenericArray<T, Sub1<N>>;
|
||||
|
||||
fn pop_back(self) -> (Self::Shorter, T) {
|
||||
let init_ptr = self.as_ptr();
|
||||
let last_ptr = unsafe { init_ptr.offset(Sub1::<N>::to_usize() as isize) };
|
||||
|
||||
let init = unsafe { ptr::read(init_ptr as _) };
|
||||
let last = unsafe { ptr::read(last_ptr as _) };
|
||||
|
||||
mem::forget(self);
|
||||
|
||||
(init, last)
|
||||
}
|
||||
|
||||
fn pop_front(self) -> (T, Self::Shorter) {
|
||||
let head_ptr = self.as_ptr();
|
||||
let tail_ptr = unsafe { head_ptr.offset(1) };
|
||||
|
||||
let head = unsafe { ptr::read(head_ptr as _) };
|
||||
let tail = unsafe { ptr::read(tail_ptr as _) };
|
||||
|
||||
mem::forget(self);
|
||||
|
||||
(head, tail)
|
||||
}
|
||||
}
|
||||
|
||||
/// Defines a `GenericSequence` that can be split into two parts at a given pivot index.
|
||||
pub unsafe trait Split<T, K>: GenericSequence<T>
|
||||
where
|
||||
K: ArrayLength<T>,
|
||||
{
|
||||
/// First part of the resulting split array
|
||||
type First: GenericSequence<T>;
|
||||
/// Second part of the resulting split array
|
||||
type Second: GenericSequence<T>;
|
||||
|
||||
/// Splits an array at the given index, returning the separate parts of the array.
|
||||
fn split(self) -> (Self::First, Self::Second);
|
||||
}
|
||||
|
||||
unsafe impl<T, N, K> Split<T, K> for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
K: ArrayLength<T>,
|
||||
N: Sub<K>,
|
||||
Diff<N, K>: ArrayLength<T>,
|
||||
{
|
||||
type First = GenericArray<T, K>;
|
||||
type Second = GenericArray<T, Diff<N, K>>;
|
||||
|
||||
fn split(self) -> (Self::First, Self::Second) {
|
||||
let head_ptr = self.as_ptr();
|
||||
let tail_ptr = unsafe { head_ptr.offset(K::to_usize() as isize) };
|
||||
|
||||
let head = unsafe { ptr::read(head_ptr as _) };
|
||||
let tail = unsafe { ptr::read(tail_ptr as _) };
|
||||
|
||||
mem::forget(self);
|
||||
|
||||
(head, tail)
|
||||
}
|
||||
}
|
||||
|
||||
/// Defines `GenericSequence`s which can be joined together, forming a larger array.
|
||||
pub unsafe trait Concat<T, M>: GenericSequence<T>
|
||||
where
|
||||
M: ArrayLength<T>,
|
||||
{
|
||||
/// Sequence to be concatenated with `self`
|
||||
type Rest: GenericSequence<T, Length = M>;
|
||||
|
||||
/// Resulting sequence formed by the concatenation.
|
||||
type Output: GenericSequence<T>;
|
||||
|
||||
/// Concatenate, or join, two sequences.
|
||||
fn concat(self, rest: Self::Rest) -> Self::Output;
|
||||
}
|
||||
|
||||
unsafe impl<T, N, M> Concat<T, M> for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T> + Add<M>,
|
||||
M: ArrayLength<T>,
|
||||
Sum<N, M>: ArrayLength<T>,
|
||||
{
|
||||
type Rest = GenericArray<T, M>;
|
||||
type Output = GenericArray<T, Sum<N, M>>;
|
||||
|
||||
fn concat(self, rest: Self::Rest) -> Self::Output {
|
||||
let mut output: Self::Output = unsafe { mem::uninitialized() };
|
||||
|
||||
let output_ptr = output.as_mut_ptr();
|
||||
|
||||
unsafe {
|
||||
ptr::write(output_ptr as *mut _, self);
|
||||
ptr::write(output_ptr.offset(N::to_usize() as isize) as *mut _, rest);
|
||||
}
|
||||
|
||||
output
|
||||
}
|
||||
}
|
||||
//! Useful traits for manipulating sequences of data stored in `GenericArray`s
|
||||
|
||||
use super::*;
|
||||
use core::ops::{Add, Sub};
|
||||
use core::mem::MaybeUninit;
|
||||
use core::ptr;
|
||||
use typenum::operator_aliases::*;
|
||||
|
||||
/// Defines some sequence with an associated length and iteration capabilities.
|
||||
///
|
||||
/// This is useful for passing N-length generic arrays as generics.
|
||||
pub unsafe trait GenericSequence<T>: Sized + IntoIterator {
|
||||
/// `GenericArray` associated length
|
||||
type Length: ArrayLength<T>;
|
||||
|
||||
/// Concrete sequence type used in conjuction with reference implementations of `GenericSequence`
|
||||
type Sequence: GenericSequence<T, Length = Self::Length> + FromIterator<T>;
|
||||
|
||||
/// Initializes a new sequence instance using the given function.
|
||||
///
|
||||
/// If the generator function panics while initializing the sequence,
|
||||
/// any already initialized elements will be dropped.
|
||||
fn generate<F>(f: F) -> Self::Sequence
|
||||
where
|
||||
F: FnMut(usize) -> T;
|
||||
|
||||
#[doc(hidden)]
|
||||
fn inverted_zip<B, U, F>(
|
||||
self,
|
||||
lhs: GenericArray<B, Self::Length>,
|
||||
mut f: F,
|
||||
) -> MappedSequence<GenericArray<B, Self::Length>, B, U>
|
||||
where
|
||||
GenericArray<B, Self::Length>: GenericSequence<B, Length = Self::Length>
|
||||
+ MappedGenericSequence<B, U>,
|
||||
Self: MappedGenericSequence<T, U>,
|
||||
Self::Length: ArrayLength<B> + ArrayLength<U>,
|
||||
F: FnMut(B, Self::Item) -> U,
|
||||
{
|
||||
unsafe {
|
||||
let mut left = ArrayConsumer::new(lhs);
|
||||
|
||||
let (left_array_iter, left_position) = left.iter_position();
|
||||
|
||||
FromIterator::from_iter(left_array_iter.zip(self.into_iter()).map(
|
||||
|(l, right_value)| {
|
||||
let left_value = ptr::read(l);
|
||||
|
||||
*left_position += 1;
|
||||
|
||||
f(left_value, right_value)
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
fn inverted_zip2<B, Lhs, U, F>(self, lhs: Lhs, mut f: F) -> MappedSequence<Lhs, B, U>
|
||||
where
|
||||
Lhs: GenericSequence<B, Length = Self::Length> + MappedGenericSequence<B, U>,
|
||||
Self: MappedGenericSequence<T, U>,
|
||||
Self::Length: ArrayLength<B> + ArrayLength<U>,
|
||||
F: FnMut(Lhs::Item, Self::Item) -> U,
|
||||
{
|
||||
FromIterator::from_iter(lhs.into_iter().zip(self.into_iter()).map(|(l, r)| f(l, r)))
|
||||
}
|
||||
}
|
||||
|
||||
/// Accessor for `GenericSequence` item type, which is really `IntoIterator::Item`
|
||||
///
|
||||
/// For deeply nested generic mapped sequence types, like shown in `tests/generics.rs`,
|
||||
/// this can be useful for keeping things organized.
|
||||
pub type SequenceItem<T> = <T as IntoIterator>::Item;
|
||||
|
||||
unsafe impl<'a, T: 'a, S: GenericSequence<T>> GenericSequence<T> for &'a S
|
||||
where
|
||||
&'a S: IntoIterator,
|
||||
{
|
||||
type Length = S::Length;
|
||||
type Sequence = S::Sequence;
|
||||
|
||||
#[inline]
|
||||
fn generate<F>(f: F) -> Self::Sequence
|
||||
where
|
||||
F: FnMut(usize) -> T,
|
||||
{
|
||||
S::generate(f)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<'a, T: 'a, S: GenericSequence<T>> GenericSequence<T> for &'a mut S
|
||||
where
|
||||
&'a mut S: IntoIterator,
|
||||
{
|
||||
type Length = S::Length;
|
||||
type Sequence = S::Sequence;
|
||||
|
||||
#[inline]
|
||||
fn generate<F>(f: F) -> Self::Sequence
|
||||
where
|
||||
F: FnMut(usize) -> T,
|
||||
{
|
||||
S::generate(f)
|
||||
}
|
||||
}
|
||||
|
||||
/// Defines any `GenericSequence` which can be lengthened or extended by appending
|
||||
/// or prepending an element to it.
|
||||
///
|
||||
/// Any lengthened sequence can be shortened back to the original using `pop_front` or `pop_back`
|
||||
pub unsafe trait Lengthen<T>: Sized + GenericSequence<T> {
|
||||
/// `GenericSequence` that has one more element than `Self`
|
||||
type Longer: Shorten<T, Shorter = Self>;
|
||||
|
||||
/// Returns a new array with the given element appended to the end of it.
|
||||
///
|
||||
/// Example:
|
||||
///
|
||||
/// ```rust
|
||||
/// # use generic_array::{arr, sequence::Lengthen};
|
||||
/// # fn main() {
|
||||
/// let a = arr![i32; 1, 2, 3];
|
||||
///
|
||||
/// let b = a.append(4);
|
||||
///
|
||||
/// assert_eq!(b, arr![i32; 1, 2, 3, 4]);
|
||||
/// # }
|
||||
/// ```
|
||||
fn append(self, last: T) -> Self::Longer;
|
||||
|
||||
/// Returns a new array with the given element prepended to the front of it.
|
||||
///
|
||||
/// Example:
|
||||
///
|
||||
/// ```rust
|
||||
/// # use generic_array::{arr, sequence::Lengthen};
|
||||
/// # fn main() {
|
||||
/// let a = arr![i32; 1, 2, 3];
|
||||
///
|
||||
/// let b = a.prepend(4);
|
||||
///
|
||||
/// assert_eq!(b, arr![i32; 4, 1, 2, 3]);
|
||||
/// # }
|
||||
/// ```
|
||||
fn prepend(self, first: T) -> Self::Longer;
|
||||
}
|
||||
|
||||
/// Defines a `GenericSequence` which can be shortened by removing the first or last element from it.
|
||||
///
|
||||
/// Additionally, any shortened sequence can be lengthened by
|
||||
/// appending or prepending an element to it.
|
||||
pub unsafe trait Shorten<T>: Sized + GenericSequence<T> {
|
||||
/// `GenericSequence` that has one less element than `Self`
|
||||
type Shorter: Lengthen<T, Longer = Self>;
|
||||
|
||||
/// Returns a new array without the last element, and the last element.
|
||||
///
|
||||
/// Example:
|
||||
///
|
||||
/// ```rust
|
||||
/// # use generic_array::{arr, sequence::Shorten};
|
||||
/// # fn main() {
|
||||
/// let a = arr![i32; 1, 2, 3, 4];
|
||||
///
|
||||
/// let (init, last) = a.pop_back();
|
||||
///
|
||||
/// assert_eq!(init, arr![i32; 1, 2, 3]);
|
||||
/// assert_eq!(last, 4);
|
||||
/// # }
|
||||
/// ```
|
||||
fn pop_back(self) -> (Self::Shorter, T);
|
||||
|
||||
/// Returns a new array without the first element, and the first element.
|
||||
/// Example:
|
||||
///
|
||||
/// ```rust
|
||||
/// # use generic_array::{arr, sequence::Shorten};
|
||||
/// # fn main() {
|
||||
/// let a = arr![i32; 1, 2, 3, 4];
|
||||
///
|
||||
/// let (head, tail) = a.pop_front();
|
||||
///
|
||||
/// assert_eq!(head, 1);
|
||||
/// assert_eq!(tail, arr![i32; 2, 3, 4]);
|
||||
/// # }
|
||||
/// ```
|
||||
fn pop_front(self) -> (T, Self::Shorter);
|
||||
}
|
||||
|
||||
unsafe impl<T, N: ArrayLength<T>> Lengthen<T> for GenericArray<T, N>
|
||||
where
|
||||
N: Add<B1>,
|
||||
Add1<N>: ArrayLength<T>,
|
||||
Add1<N>: Sub<B1, Output = N>,
|
||||
Sub1<Add1<N>>: ArrayLength<T>,
|
||||
{
|
||||
type Longer = GenericArray<T, Add1<N>>;
|
||||
|
||||
fn append(self, last: T) -> Self::Longer {
|
||||
let mut longer: MaybeUninit<Self::Longer> = MaybeUninit::uninit();
|
||||
|
||||
// Note this is *mut Self, so add(1) increments by the whole array
|
||||
let out_ptr = longer.as_mut_ptr() as *mut Self;
|
||||
|
||||
unsafe {
|
||||
// write self first
|
||||
ptr::write(out_ptr, self);
|
||||
// increment past self, then write the last
|
||||
ptr::write(out_ptr.add(1) as *mut T, last);
|
||||
|
||||
longer.assume_init()
|
||||
}
|
||||
}
|
||||
|
||||
fn prepend(self, first: T) -> Self::Longer {
|
||||
let mut longer: MaybeUninit<Self::Longer> = MaybeUninit::uninit();
|
||||
|
||||
// Note this is *mut T, so add(1) increments by a single T
|
||||
let out_ptr = longer.as_mut_ptr() as *mut T;
|
||||
|
||||
unsafe {
|
||||
// write the first at the start
|
||||
ptr::write(out_ptr, first);
|
||||
// increment past the first, then write self
|
||||
ptr::write(out_ptr.add(1) as *mut Self, self);
|
||||
|
||||
longer.assume_init()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T, N: ArrayLength<T>> Shorten<T> for GenericArray<T, N>
|
||||
where
|
||||
N: Sub<B1>,
|
||||
Sub1<N>: ArrayLength<T>,
|
||||
Sub1<N>: Add<B1, Output = N>,
|
||||
Add1<Sub1<N>>: ArrayLength<T>,
|
||||
{
|
||||
type Shorter = GenericArray<T, Sub1<N>>;
|
||||
|
||||
fn pop_back(self) -> (Self::Shorter, T) {
|
||||
let whole = ManuallyDrop::new(self);
|
||||
|
||||
unsafe {
|
||||
let init = ptr::read(whole.as_ptr() as _);
|
||||
let last = ptr::read(whole.as_ptr().add(Sub1::<N>::USIZE) as _);
|
||||
|
||||
(init, last)
|
||||
}
|
||||
}
|
||||
|
||||
fn pop_front(self) -> (T, Self::Shorter) {
|
||||
// ensure this doesn't get dropped
|
||||
let whole = ManuallyDrop::new(self);
|
||||
|
||||
unsafe {
|
||||
let head = ptr::read(whole.as_ptr() as _);
|
||||
let tail = ptr::read(whole.as_ptr().offset(1) as _);
|
||||
|
||||
(head, tail)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Defines a `GenericSequence` that can be split into two parts at a given pivot index.
|
||||
pub unsafe trait Split<T, K>: GenericSequence<T>
|
||||
where
|
||||
K: ArrayLength<T>,
|
||||
{
|
||||
/// First part of the resulting split array
|
||||
type First: GenericSequence<T>;
|
||||
/// Second part of the resulting split array
|
||||
type Second: GenericSequence<T>;
|
||||
|
||||
/// Splits an array at the given index, returning the separate parts of the array.
|
||||
fn split(self) -> (Self::First, Self::Second);
|
||||
}
|
||||
|
||||
unsafe impl<T, N, K> Split<T, K> for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
K: ArrayLength<T>,
|
||||
N: Sub<K>,
|
||||
Diff<N, K>: ArrayLength<T>,
|
||||
{
|
||||
type First = GenericArray<T, K>;
|
||||
type Second = GenericArray<T, Diff<N, K>>;
|
||||
|
||||
fn split(self) -> (Self::First, Self::Second) {
|
||||
unsafe {
|
||||
// ensure this doesn't get dropped
|
||||
let whole = ManuallyDrop::new(self);
|
||||
|
||||
let head = ptr::read(whole.as_ptr() as *const _);
|
||||
let tail = ptr::read(whole.as_ptr().add(K::USIZE) as *const _);
|
||||
|
||||
(head, tail)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<'a, T, N, K> Split<T, K> for &'a GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
K: ArrayLength<T> + 'static,
|
||||
N: Sub<K>,
|
||||
Diff<N, K>: ArrayLength<T>,
|
||||
{
|
||||
type First = &'a GenericArray<T, K>;
|
||||
type Second = &'a GenericArray<T, Diff<N, K>>;
|
||||
|
||||
fn split(self) -> (Self::First, Self::Second) {
|
||||
unsafe {
|
||||
let ptr_to_first: *const T = self.as_ptr();
|
||||
let head = &*(ptr_to_first as *const _);
|
||||
let tail = &*(ptr_to_first.add(K::USIZE) as *const _);
|
||||
(head, tail)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<'a, T, N, K> Split<T, K> for &'a mut GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T>,
|
||||
K: ArrayLength<T> + 'static,
|
||||
N: Sub<K>,
|
||||
Diff<N, K>: ArrayLength<T>,
|
||||
{
|
||||
type First = &'a mut GenericArray<T, K>;
|
||||
type Second = &'a mut GenericArray<T, Diff<N, K>>;
|
||||
|
||||
fn split(self) -> (Self::First, Self::Second) {
|
||||
unsafe {
|
||||
let ptr_to_first: *mut T = self.as_mut_ptr();
|
||||
let head = &mut *(ptr_to_first as *mut _);
|
||||
let tail = &mut *(ptr_to_first.add(K::USIZE) as *mut _);
|
||||
(head, tail)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Defines `GenericSequence`s which can be joined together, forming a larger array.
|
||||
pub unsafe trait Concat<T, M>: GenericSequence<T>
|
||||
where
|
||||
M: ArrayLength<T>,
|
||||
{
|
||||
/// Sequence to be concatenated with `self`
|
||||
type Rest: GenericSequence<T, Length = M>;
|
||||
|
||||
/// Resulting sequence formed by the concatenation.
|
||||
type Output: GenericSequence<T>;
|
||||
|
||||
/// Concatenate, or join, two sequences.
|
||||
fn concat(self, rest: Self::Rest) -> Self::Output;
|
||||
}
|
||||
|
||||
unsafe impl<T, N, M> Concat<T, M> for GenericArray<T, N>
|
||||
where
|
||||
N: ArrayLength<T> + Add<M>,
|
||||
M: ArrayLength<T>,
|
||||
Sum<N, M>: ArrayLength<T>,
|
||||
{
|
||||
type Rest = GenericArray<T, M>;
|
||||
type Output = GenericArray<T, Sum<N, M>>;
|
||||
|
||||
fn concat(self, rest: Self::Rest) -> Self::Output {
|
||||
let mut output: MaybeUninit<Self::Output> = MaybeUninit::uninit();
|
||||
|
||||
let out_ptr = output.as_mut_ptr() as *mut Self;
|
||||
|
||||
unsafe {
|
||||
// write all of self to the pointer
|
||||
ptr::write(out_ptr, self);
|
||||
// increment past self, then write the rest
|
||||
ptr::write(out_ptr.add(1) as *mut _, rest);
|
||||
|
||||
output.assume_init()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,27 +1,27 @@
|
|||
#[macro_use]
|
||||
extern crate generic_array;
|
||||
extern crate typenum;
|
||||
|
||||
#[test]
|
||||
fn empty_without_trailing_comma() {
|
||||
let ar = arr![u8; ];
|
||||
assert_eq!(format!("{:x}", ar), "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_with_trailing_comma() {
|
||||
let ar = arr![u8; , ];
|
||||
assert_eq!(format!("{:x}", ar), "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn without_trailing_comma() {
|
||||
let ar = arr![u8; 10, 20, 30];
|
||||
assert_eq!(format!("{:x}", ar), "0a141e");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn with_trailing_comma() {
|
||||
let ar = arr![u8; 10, 20, 30, ];
|
||||
assert_eq!(format!("{:x}", ar), "0a141e");
|
||||
}
|
||||
#[macro_use]
|
||||
extern crate generic_array;
|
||||
extern crate typenum;
|
||||
|
||||
#[test]
|
||||
fn empty_without_trailing_comma() {
|
||||
let ar = arr![u8; ];
|
||||
assert_eq!(format!("{:x}", ar), "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_with_trailing_comma() {
|
||||
let ar = arr![u8; , ];
|
||||
assert_eq!(format!("{:x}", ar), "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn without_trailing_comma() {
|
||||
let ar = arr![u8; 10, 20, 30];
|
||||
assert_eq!(format!("{:x}", ar), "0a141e");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn with_trailing_comma() {
|
||||
let ar = arr![u8; 10, 20, 30, ];
|
||||
assert_eq!(format!("{:x}", ar), "0a141e");
|
||||
}
|
||||
|
|
|
@ -1,98 +1,98 @@
|
|||
#![recursion_limit = "128"]
|
||||
|
||||
#[macro_use]
|
||||
extern crate generic_array;
|
||||
|
||||
use generic_array::typenum::consts::U4;
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Add;
|
||||
|
||||
use generic_array::{GenericArray, ArrayLength};
|
||||
use generic_array::sequence::*;
|
||||
use generic_array::functional::*;
|
||||
|
||||
/// Example function using generics to pass N-length sequences and map them
|
||||
pub fn generic_map<S>(s: S)
|
||||
where
|
||||
S: FunctionalSequence<i32>, // `.map`
|
||||
S::Item: Add<i32, Output = i32>, // `x + 1`
|
||||
S: MappedGenericSequence<i32, i32>, // `i32` -> `i32`
|
||||
MappedSequence<S, i32, i32>: Debug, // println!
|
||||
{
|
||||
let a = s.map(|x| x + 1);
|
||||
|
||||
println!("{:?}", a);
|
||||
}
|
||||
|
||||
/// Complex example function using generics to pass N-length sequences, zip them, and then map that result.
|
||||
///
|
||||
/// If used with `GenericArray` specifically this isn't necessary
|
||||
pub fn generic_sequence_zip_sum<A, B>(a: A, b: B) -> i32
|
||||
where
|
||||
A: FunctionalSequence<i32>, // `.zip`
|
||||
B: FunctionalSequence<i32, Length = A::Length>, // `.zip`
|
||||
A: MappedGenericSequence<i32, i32>, // `i32` -> `i32`
|
||||
B: MappedGenericSequence<i32, i32, Mapped = MappedSequence<A, i32, i32>>, // `i32` -> `i32`, prove A and B can map to the same output
|
||||
A::Item: Add<B::Item, Output = i32>, // `l + r`
|
||||
MappedSequence<A, i32, i32>: MappedGenericSequence<i32, i32> + FunctionalSequence<i32>, // `.map`
|
||||
SequenceItem<MappedSequence<A, i32, i32>>: Add<i32, Output=i32>, // `x + 1`
|
||||
MappedSequence<MappedSequence<A, i32, i32>, i32, i32>: Debug, // `println!`
|
||||
MappedSequence<MappedSequence<A, i32, i32>, i32, i32>: FunctionalSequence<i32>, // `.fold`
|
||||
SequenceItem<MappedSequence<MappedSequence<A, i32, i32>, i32, i32>>: Add<i32, Output=i32> // `x + a`, note the order
|
||||
{
|
||||
let c = a.zip(b, |l, r| l + r).map(|x| x + 1);
|
||||
|
||||
println!("{:?}", c);
|
||||
|
||||
c.fold(0, |a, x| x + a)
|
||||
}
|
||||
|
||||
/// Super-simple fixed-length i32 `GenericArray`s
|
||||
pub fn generic_array_plain_zip_sum(a: GenericArray<i32, U4>, b: GenericArray<i32, U4>) -> i32 {
|
||||
a.zip(b, |l, r| l + r).map(|x| x + 1).fold(0, |a, x| x + a)
|
||||
}
|
||||
|
||||
pub fn generic_array_variable_length_zip_sum<N>(a: GenericArray<i32, N>, b: GenericArray<i32, N>) -> i32
|
||||
where
|
||||
N: ArrayLength<i32>,
|
||||
{
|
||||
a.zip(b, |l, r| l + r).map(|x| x + 1).fold(0, |a, x| x + a)
|
||||
}
|
||||
|
||||
pub fn generic_array_same_type_variable_length_zip_sum<T, N>(a: GenericArray<T, N>, b: GenericArray<T, N>) -> i32
|
||||
where
|
||||
N: ArrayLength<T> + ArrayLength<<T as Add<T>>::Output>,
|
||||
T: Add<T, Output=i32>,
|
||||
{
|
||||
a.zip(b, |l, r| l + r).map(|x| x + 1).fold(0, |a, x| x + a)
|
||||
}
|
||||
|
||||
/// Complex example using fully generic `GenericArray`s with the same length.
|
||||
///
|
||||
/// It's mostly just the repeated `Add` traits, which would be present in other systems anyway.
|
||||
pub fn generic_array_zip_sum<A, B, N: ArrayLength<A> + ArrayLength<B>>(a: GenericArray<A, N>, b: GenericArray<B, N>) -> i32
|
||||
where
|
||||
A: Add<B>,
|
||||
N: ArrayLength<<A as Add<B>>::Output> +
|
||||
ArrayLength<<<A as Add<B>>::Output as Add<i32>>::Output>,
|
||||
<A as Add<B>>::Output: Add<i32>,
|
||||
<<A as Add<B>>::Output as Add<i32>>::Output: Add<i32, Output=i32>,
|
||||
{
|
||||
a.zip(b, |l, r| l + r).map(|x| x + 1).fold(0, |a, x| x + a)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_generics() {
|
||||
generic_map(arr![i32; 1, 2, 3, 4]);
|
||||
|
||||
assert_eq!(generic_sequence_zip_sum(arr![i32; 1, 2, 3, 4], arr![i32; 2, 3, 4, 5]), 28);
|
||||
|
||||
assert_eq!(generic_array_plain_zip_sum(arr![i32; 1, 2, 3, 4], arr![i32; 2, 3, 4, 5]), 28);
|
||||
|
||||
assert_eq!(generic_array_variable_length_zip_sum(arr![i32; 1, 2, 3, 4], arr![i32; 2, 3, 4, 5]), 28);
|
||||
|
||||
assert_eq!(generic_array_same_type_variable_length_zip_sum(arr![i32; 1, 2, 3, 4], arr![i32; 2, 3, 4, 5]), 28);
|
||||
|
||||
assert_eq!(generic_array_zip_sum(arr![i32; 1, 2, 3, 4], arr![i32; 2, 3, 4, 5]), 28);
|
||||
#![recursion_limit = "128"]
|
||||
|
||||
#[macro_use]
|
||||
extern crate generic_array;
|
||||
|
||||
use generic_array::typenum::consts::U4;
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::ops::Add;
|
||||
|
||||
use generic_array::{GenericArray, ArrayLength};
|
||||
use generic_array::sequence::*;
|
||||
use generic_array::functional::*;
|
||||
|
||||
/// Example function using generics to pass N-length sequences and map them
|
||||
pub fn generic_map<S>(s: S)
|
||||
where
|
||||
S: FunctionalSequence<i32>, // `.map`
|
||||
S::Item: Add<i32, Output = i32>, // `x + 1`
|
||||
S: MappedGenericSequence<i32, i32>, // `i32` -> `i32`
|
||||
MappedSequence<S, i32, i32>: Debug, // println!
|
||||
{
|
||||
let a = s.map(|x| x + 1);
|
||||
|
||||
println!("{:?}", a);
|
||||
}
|
||||
|
||||
/// Complex example function using generics to pass N-length sequences, zip them, and then map that result.
|
||||
///
|
||||
/// If used with `GenericArray` specifically this isn't necessary
|
||||
pub fn generic_sequence_zip_sum<A, B>(a: A, b: B) -> i32
|
||||
where
|
||||
A: FunctionalSequence<i32>, // `.zip`
|
||||
B: FunctionalSequence<i32, Length = A::Length>, // `.zip`
|
||||
A: MappedGenericSequence<i32, i32>, // `i32` -> `i32`
|
||||
B: MappedGenericSequence<i32, i32, Mapped = MappedSequence<A, i32, i32>>, // `i32` -> `i32`, prove A and B can map to the same output
|
||||
A::Item: Add<B::Item, Output = i32>, // `l + r`
|
||||
MappedSequence<A, i32, i32>: MappedGenericSequence<i32, i32> + FunctionalSequence<i32>, // `.map`
|
||||
SequenceItem<MappedSequence<A, i32, i32>>: Add<i32, Output=i32>, // `x + 1`
|
||||
MappedSequence<MappedSequence<A, i32, i32>, i32, i32>: Debug, // `println!`
|
||||
MappedSequence<MappedSequence<A, i32, i32>, i32, i32>: FunctionalSequence<i32>, // `.fold`
|
||||
SequenceItem<MappedSequence<MappedSequence<A, i32, i32>, i32, i32>>: Add<i32, Output=i32> // `x + a`, note the order
|
||||
{
|
||||
let c = a.zip(b, |l, r| l + r).map(|x| x + 1);
|
||||
|
||||
println!("{:?}", c);
|
||||
|
||||
c.fold(0, |a, x| x + a)
|
||||
}
|
||||
|
||||
/// Super-simple fixed-length i32 `GenericArray`s
|
||||
pub fn generic_array_plain_zip_sum(a: GenericArray<i32, U4>, b: GenericArray<i32, U4>) -> i32 {
|
||||
a.zip(b, |l, r| l + r).map(|x| x + 1).fold(0, |a, x| x + a)
|
||||
}
|
||||
|
||||
pub fn generic_array_variable_length_zip_sum<N>(a: GenericArray<i32, N>, b: GenericArray<i32, N>) -> i32
|
||||
where
|
||||
N: ArrayLength<i32>,
|
||||
{
|
||||
a.zip(b, |l, r| l + r).map(|x| x + 1).fold(0, |a, x| x + a)
|
||||
}
|
||||
|
||||
pub fn generic_array_same_type_variable_length_zip_sum<T, N>(a: GenericArray<T, N>, b: GenericArray<T, N>) -> i32
|
||||
where
|
||||
N: ArrayLength<T> + ArrayLength<<T as Add<T>>::Output>,
|
||||
T: Add<T, Output=i32>,
|
||||
{
|
||||
a.zip(b, |l, r| l + r).map(|x| x + 1).fold(0, |a, x| x + a)
|
||||
}
|
||||
|
||||
/// Complex example using fully generic `GenericArray`s with the same length.
|
||||
///
|
||||
/// It's mostly just the repeated `Add` traits, which would be present in other systems anyway.
|
||||
pub fn generic_array_zip_sum<A, B, N: ArrayLength<A> + ArrayLength<B>>(a: GenericArray<A, N>, b: GenericArray<B, N>) -> i32
|
||||
where
|
||||
A: Add<B>,
|
||||
N: ArrayLength<<A as Add<B>>::Output> +
|
||||
ArrayLength<<<A as Add<B>>::Output as Add<i32>>::Output>,
|
||||
<A as Add<B>>::Output: Add<i32>,
|
||||
<<A as Add<B>>::Output as Add<i32>>::Output: Add<i32, Output=i32>,
|
||||
{
|
||||
a.zip(b, |l, r| l + r).map(|x| x + 1).fold(0, |a, x| x + a)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_generics() {
|
||||
generic_map(arr![i32; 1, 2, 3, 4]);
|
||||
|
||||
assert_eq!(generic_sequence_zip_sum(arr![i32; 1, 2, 3, 4], arr![i32; 2, 3, 4, 5]), 28);
|
||||
|
||||
assert_eq!(generic_array_plain_zip_sum(arr![i32; 1, 2, 3, 4], arr![i32; 2, 3, 4, 5]), 28);
|
||||
|
||||
assert_eq!(generic_array_variable_length_zip_sum(arr![i32; 1, 2, 3, 4], arr![i32; 2, 3, 4, 5]), 28);
|
||||
|
||||
assert_eq!(generic_array_same_type_variable_length_zip_sum(arr![i32; 1, 2, 3, 4], arr![i32; 2, 3, 4, 5]), 28);
|
||||
|
||||
assert_eq!(generic_array_zip_sum(arr![i32; 1, 2, 3, 4], arr![i32; 2, 3, 4, 5]), 28);
|
||||
}
|
|
@ -1,61 +1,61 @@
|
|||
#[macro_use]
|
||||
extern crate generic_array;
|
||||
extern crate typenum;
|
||||
|
||||
use generic_array::GenericArray;
|
||||
use std::str::from_utf8;
|
||||
use typenum::U2048;
|
||||
|
||||
#[test]
|
||||
fn short_lower_hex() {
|
||||
let ar = arr![u8; 10, 20, 30];
|
||||
assert_eq!(format!("{:x}", ar), "0a141e");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn short_upper_hex() {
|
||||
let ar = arr![u8; 30, 20, 10];
|
||||
assert_eq!(format!("{:X}", ar), "1E140A");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn long_lower_hex() {
|
||||
let ar = GenericArray::<u8, U2048>::default();
|
||||
assert_eq!(format!("{:x}", ar), from_utf8(&[b'0'; 4096]).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn long_lower_hex_truncated() {
|
||||
let ar = GenericArray::<u8, U2048>::default();
|
||||
assert_eq!(format!("{:.3001x}", ar), from_utf8(&[b'0'; 3001]).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn long_upper_hex() {
|
||||
let ar = GenericArray::<u8, U2048>::default();
|
||||
assert_eq!(format!("{:X}", ar), from_utf8(&[b'0'; 4096]).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn long_upper_hex_truncated() {
|
||||
let ar = GenericArray::<u8, U2048>::default();
|
||||
assert_eq!(format!("{:.2777X}", ar), from_utf8(&[b'0'; 2777]).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncated_lower_hex() {
|
||||
let ar = arr![u8; 10, 20, 30, 40, 50];
|
||||
assert_eq!(format!("{:.2x}", ar), "0a");
|
||||
assert_eq!(format!("{:.3x}", ar), "0a1");
|
||||
assert_eq!(format!("{:.4x}", ar), "0a14");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncated_upper_hex() {
|
||||
let ar = arr![u8; 30, 20, 10, 17, 0];
|
||||
assert_eq!(format!("{:.4X}", ar), "1E14");
|
||||
assert_eq!(format!("{:.5X}", ar), "1E140");
|
||||
assert_eq!(format!("{:.6X}", ar), "1E140A");
|
||||
assert_eq!(format!("{:.7X}", ar), "1E140A1");
|
||||
assert_eq!(format!("{:.8X}", ar), "1E140A11");
|
||||
}
|
||||
#[macro_use]
|
||||
extern crate generic_array;
|
||||
extern crate typenum;
|
||||
|
||||
use generic_array::GenericArray;
|
||||
use std::str::from_utf8;
|
||||
use typenum::U2048;
|
||||
|
||||
#[test]
|
||||
fn short_lower_hex() {
|
||||
let ar = arr![u8; 10, 20, 30];
|
||||
assert_eq!(format!("{:x}", ar), "0a141e");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn short_upper_hex() {
|
||||
let ar = arr![u8; 30, 20, 10];
|
||||
assert_eq!(format!("{:X}", ar), "1E140A");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn long_lower_hex() {
|
||||
let ar = GenericArray::<u8, U2048>::default();
|
||||
assert_eq!(format!("{:x}", ar), from_utf8(&[b'0'; 4096]).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn long_lower_hex_truncated() {
|
||||
let ar = GenericArray::<u8, U2048>::default();
|
||||
assert_eq!(format!("{:.3001x}", ar), from_utf8(&[b'0'; 3001]).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn long_upper_hex() {
|
||||
let ar = GenericArray::<u8, U2048>::default();
|
||||
assert_eq!(format!("{:X}", ar), from_utf8(&[b'0'; 4096]).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn long_upper_hex_truncated() {
|
||||
let ar = GenericArray::<u8, U2048>::default();
|
||||
assert_eq!(format!("{:.2777X}", ar), from_utf8(&[b'0'; 2777]).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncated_lower_hex() {
|
||||
let ar = arr![u8; 10, 20, 30, 40, 50];
|
||||
assert_eq!(format!("{:.2x}", ar), "0a");
|
||||
assert_eq!(format!("{:.3x}", ar), "0a1");
|
||||
assert_eq!(format!("{:.4x}", ar), "0a14");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn truncated_upper_hex() {
|
||||
let ar = arr![u8; 30, 20, 10, 17, 0];
|
||||
assert_eq!(format!("{:.4X}", ar), "1E14");
|
||||
assert_eq!(format!("{:.5X}", ar), "1E140");
|
||||
assert_eq!(format!("{:.6X}", ar), "1E140A");
|
||||
assert_eq!(format!("{:.7X}", ar), "1E140A1");
|
||||
assert_eq!(format!("{:.8X}", ar), "1E140A11");
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
#[macro_use]
|
||||
extern crate generic_array as gen_arr;
|
||||
|
||||
use gen_arr::typenum;
|
||||
|
||||
#[test]
|
||||
fn test_different_crate_name() {
|
||||
let _: gen_arr::GenericArray<u32, typenum::U4> = arr![u32; 0, 1, 2, 3];
|
||||
let _: gen_arr::GenericArray<u32, typenum::U0> = arr![u32;];
|
||||
}
|
||||
#[macro_use]
|
||||
extern crate generic_array as gen_arr;
|
||||
|
||||
use gen_arr::typenum;
|
||||
|
||||
#[test]
|
||||
fn test_different_crate_name() {
|
||||
let _: gen_arr::GenericArray<u32, typenum::U4> = arr![u32; 0, 1, 2, 3];
|
||||
let _: gen_arr::GenericArray<u32, typenum::U0> = arr![u32;];
|
||||
}
|
||||
|
|
|
@ -1,164 +1,199 @@
|
|||
#[macro_use]
|
||||
extern crate generic_array;
|
||||
|
||||
use std::cell::Cell;
|
||||
use std::ops::Drop;
|
||||
|
||||
use generic_array::GenericArray;
|
||||
use generic_array::typenum::consts::U5;
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_as_slice() {
|
||||
let array = arr![char; 'a', 'b', 'c'];
|
||||
let mut into_iter = array.into_iter();
|
||||
assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
|
||||
let _ = into_iter.next().unwrap();
|
||||
assert_eq!(into_iter.as_slice(), &['b', 'c']);
|
||||
let _ = into_iter.next().unwrap();
|
||||
let _ = into_iter.next().unwrap();
|
||||
assert_eq!(into_iter.as_slice(), &[]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_as_mut_slice() {
|
||||
let array = arr![char; 'a', 'b', 'c'];
|
||||
let mut into_iter = array.into_iter();
|
||||
assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
|
||||
into_iter.as_mut_slice()[0] = 'x';
|
||||
into_iter.as_mut_slice()[1] = 'y';
|
||||
assert_eq!(into_iter.next().unwrap(), 'x');
|
||||
assert_eq!(into_iter.as_slice(), &['y', 'c']);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_debug() {
|
||||
let array = arr![char; 'a', 'b', 'c'];
|
||||
let into_iter = array.into_iter();
|
||||
let debug = format!("{:?}", into_iter);
|
||||
assert_eq!(debug, "GenericArrayIter(['a', 'b', 'c'])");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_clone() {
|
||||
fn iter_equal<I: Iterator<Item = i32>>(it: I, slice: &[i32]) {
|
||||
let v: Vec<i32> = it.collect();
|
||||
assert_eq!(&v[..], slice);
|
||||
}
|
||||
let mut it = arr![i32; 1, 2, 3].into_iter();
|
||||
iter_equal(it.clone(), &[1, 2, 3]);
|
||||
assert_eq!(it.next(), Some(1));
|
||||
let mut it = it.rev();
|
||||
iter_equal(it.clone(), &[3, 2]);
|
||||
assert_eq!(it.next(), Some(3));
|
||||
iter_equal(it.clone(), &[2]);
|
||||
assert_eq!(it.next(), Some(2));
|
||||
iter_equal(it.clone(), &[]);
|
||||
assert_eq!(it.next(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_nth() {
|
||||
let v = arr![i32; 0, 1, 2, 3, 4];
|
||||
for i in 0..v.len() {
|
||||
assert_eq!(v.clone().into_iter().nth(i).unwrap(), v[i]);
|
||||
}
|
||||
assert_eq!(v.clone().into_iter().nth(v.len()), None);
|
||||
|
||||
let mut iter = v.into_iter();
|
||||
assert_eq!(iter.nth(2).unwrap(), v[2]);
|
||||
assert_eq!(iter.nth(1).unwrap(), v[4]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_last() {
|
||||
let v = arr![i32; 0, 1, 2, 3, 4];
|
||||
assert_eq!(v.into_iter().last().unwrap(), 4);
|
||||
assert_eq!(arr![i32; 0].into_iter().last().unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_count() {
|
||||
let v = arr![i32; 0, 1, 2, 3, 4];
|
||||
assert_eq!(v.clone().into_iter().count(), 5);
|
||||
|
||||
let mut iter2 = v.into_iter();
|
||||
iter2.next();
|
||||
iter2.next();
|
||||
assert_eq!(iter2.count(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_flat_map() {
|
||||
assert!((0..5).flat_map(|i| arr![i32; 2 * i, 2 * i + 1]).eq(0..10));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_drops() {
|
||||
struct R<'a> {
|
||||
i: &'a Cell<usize>,
|
||||
}
|
||||
|
||||
impl<'a> Drop for R<'a> {
|
||||
fn drop(&mut self) {
|
||||
self.i.set(self.i.get() + 1);
|
||||
}
|
||||
}
|
||||
|
||||
fn r(i: &Cell<usize>) -> R {
|
||||
R {
|
||||
i: i
|
||||
}
|
||||
}
|
||||
|
||||
fn v(i: &Cell<usize>) -> GenericArray<R, U5> {
|
||||
arr![R; r(i), r(i), r(i), r(i), r(i)]
|
||||
}
|
||||
|
||||
let i = Cell::new(0);
|
||||
{
|
||||
v(&i).into_iter();
|
||||
}
|
||||
assert_eq!(i.get(), 5);
|
||||
|
||||
let i = Cell::new(0);
|
||||
{
|
||||
let mut iter = v(&i).into_iter();
|
||||
let _x = iter.next();
|
||||
assert_eq!(i.get(), 0);
|
||||
assert_eq!(iter.count(), 4);
|
||||
assert_eq!(i.get(), 4);
|
||||
}
|
||||
assert_eq!(i.get(), 5);
|
||||
|
||||
let i = Cell::new(0);
|
||||
{
|
||||
let mut iter = v(&i).into_iter();
|
||||
let _x = iter.nth(2);
|
||||
assert_eq!(i.get(), 2);
|
||||
let _y = iter.last();
|
||||
assert_eq!(i.get(), 3);
|
||||
}
|
||||
assert_eq!(i.get(), 5);
|
||||
|
||||
let i = Cell::new(0);
|
||||
for (index, _x) in v(&i).into_iter().enumerate() {
|
||||
assert_eq!(i.get(), index);
|
||||
}
|
||||
assert_eq!(i.get(), 5);
|
||||
|
||||
let i = Cell::new(0);
|
||||
for (index, _x) in v(&i).into_iter().rev().enumerate() {
|
||||
assert_eq!(i.get(), index);
|
||||
}
|
||||
assert_eq!(i.get(), 5);
|
||||
}
|
||||
|
||||
/*
|
||||
//TODO: Cover this
|
||||
#[allow(dead_code)]
|
||||
fn assert_covariance() {
|
||||
fn into_iter<'new>(i: GenericArrayIter<&'static str, U10>) -> GenericArrayIter<&'new str, U10> {
|
||||
i
|
||||
}
|
||||
}
|
||||
*/
|
||||
#[macro_use]
|
||||
extern crate generic_array;
|
||||
|
||||
use std::cell::Cell;
|
||||
use std::ops::Drop;
|
||||
|
||||
use generic_array::typenum::consts::U5;
|
||||
use generic_array::GenericArray;
|
||||
|
||||
#[test]
|
||||
fn test_from_iterator() {
|
||||
struct BadExact(usize);
|
||||
|
||||
impl Iterator for BadExact {
|
||||
type Item = usize;
|
||||
fn next(&mut self) -> Option<usize> {
|
||||
if self.0 == 1 {
|
||||
return None;
|
||||
}
|
||||
self.0 -= 1;
|
||||
Some(self.0)
|
||||
}
|
||||
}
|
||||
impl ExactSizeIterator for BadExact {
|
||||
fn len(&self) -> usize { self.0 }
|
||||
}
|
||||
assert!(GenericArray::<usize, U5>::from_exact_iter(BadExact(5)).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_as_slice() {
|
||||
let array = arr![char; 'a', 'b', 'c'];
|
||||
let mut into_iter = array.into_iter();
|
||||
assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
|
||||
let _ = into_iter.next().unwrap();
|
||||
assert_eq!(into_iter.as_slice(), &['b', 'c']);
|
||||
let _ = into_iter.next().unwrap();
|
||||
let _ = into_iter.next().unwrap();
|
||||
assert_eq!(into_iter.as_slice(), &[]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_as_mut_slice() {
|
||||
let array = arr![char; 'a', 'b', 'c'];
|
||||
let mut into_iter = array.into_iter();
|
||||
assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
|
||||
into_iter.as_mut_slice()[0] = 'x';
|
||||
into_iter.as_mut_slice()[1] = 'y';
|
||||
assert_eq!(into_iter.next().unwrap(), 'x');
|
||||
assert_eq!(into_iter.as_slice(), &['y', 'c']);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_debug() {
|
||||
let array = arr![char; 'a', 'b', 'c'];
|
||||
let into_iter = array.into_iter();
|
||||
let debug = format!("{:?}", into_iter);
|
||||
assert_eq!(debug, "GenericArrayIter(['a', 'b', 'c'])");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_clone() {
|
||||
fn iter_equal<I: Iterator<Item = i32>>(it: I, slice: &[i32]) {
|
||||
let v: Vec<i32> = it.collect();
|
||||
assert_eq!(&v[..], slice);
|
||||
}
|
||||
let mut it = arr![i32; 1, 2, 3].into_iter();
|
||||
iter_equal(it.clone(), &[1, 2, 3]);
|
||||
assert_eq!(it.next(), Some(1));
|
||||
let mut it = it.rev();
|
||||
iter_equal(it.clone(), &[3, 2]);
|
||||
assert_eq!(it.next(), Some(3));
|
||||
iter_equal(it.clone(), &[2]);
|
||||
assert_eq!(it.next(), Some(2));
|
||||
iter_equal(it.clone(), &[]);
|
||||
assert_eq!(it.next(), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_nth() {
|
||||
let v = arr![i32; 0, 1, 2, 3, 4];
|
||||
for i in 0..v.len() {
|
||||
assert_eq!(v.clone().into_iter().nth(i).unwrap(), v[i]);
|
||||
}
|
||||
assert_eq!(v.clone().into_iter().nth(v.len()), None);
|
||||
|
||||
let mut iter = v.into_iter();
|
||||
assert_eq!(iter.nth(2).unwrap(), v[2]);
|
||||
assert_eq!(iter.nth(1).unwrap(), v[4]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_last() {
|
||||
let v = arr![i32; 0, 1, 2, 3, 4];
|
||||
assert_eq!(v.into_iter().last().unwrap(), 4);
|
||||
assert_eq!(arr![i32; 0].into_iter().last().unwrap(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_count() {
|
||||
let v = arr![i32; 0, 1, 2, 3, 4];
|
||||
assert_eq!(v.clone().into_iter().count(), 5);
|
||||
|
||||
let mut iter2 = v.into_iter();
|
||||
iter2.next();
|
||||
iter2.next();
|
||||
assert_eq!(iter2.count(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_flat_map() {
|
||||
assert!((0..5).flat_map(|i| arr![i32; 2 * i, 2 * i + 1]).eq(0..10));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_fold() {
|
||||
assert_eq!(
|
||||
arr![i32; 1, 2, 3, 4].into_iter().fold(0, |sum, x| sum + x),
|
||||
10
|
||||
);
|
||||
|
||||
let mut iter = arr![i32; 0, 1, 2, 3, 4, 5].into_iter();
|
||||
|
||||
iter.next();
|
||||
iter.next_back();
|
||||
|
||||
assert_eq!(iter.clone().fold(0, |sum, x| sum + x), 10);
|
||||
|
||||
assert_eq!(iter.rfold(0, |sum, x| sum + x), 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_into_iter_drops() {
|
||||
struct R<'a> {
|
||||
i: &'a Cell<usize>,
|
||||
}
|
||||
|
||||
impl<'a> Drop for R<'a> {
|
||||
fn drop(&mut self) {
|
||||
self.i.set(self.i.get() + 1);
|
||||
}
|
||||
}
|
||||
|
||||
fn r(i: &Cell<usize>) -> R {
|
||||
R { i: i }
|
||||
}
|
||||
|
||||
fn v(i: &Cell<usize>) -> GenericArray<R, U5> {
|
||||
arr![R; r(i), r(i), r(i), r(i), r(i)]
|
||||
}
|
||||
|
||||
let i = Cell::new(0);
|
||||
{
|
||||
v(&i).into_iter();
|
||||
}
|
||||
assert_eq!(i.get(), 5);
|
||||
|
||||
let i = Cell::new(0);
|
||||
{
|
||||
let mut iter = v(&i).into_iter();
|
||||
let _x = iter.next();
|
||||
assert_eq!(i.get(), 0);
|
||||
assert_eq!(iter.count(), 4);
|
||||
assert_eq!(i.get(), 4);
|
||||
}
|
||||
assert_eq!(i.get(), 5);
|
||||
|
||||
let i = Cell::new(0);
|
||||
{
|
||||
let mut iter = v(&i).into_iter();
|
||||
let _x = iter.nth(2);
|
||||
assert_eq!(i.get(), 2);
|
||||
let _y = iter.last();
|
||||
assert_eq!(i.get(), 3);
|
||||
}
|
||||
assert_eq!(i.get(), 5);
|
||||
|
||||
let i = Cell::new(0);
|
||||
for (index, _x) in v(&i).into_iter().enumerate() {
|
||||
assert_eq!(i.get(), index);
|
||||
}
|
||||
assert_eq!(i.get(), 5);
|
||||
|
||||
let i = Cell::new(0);
|
||||
for (index, _x) in v(&i).into_iter().rev().enumerate() {
|
||||
assert_eq!(i.get(), index);
|
||||
}
|
||||
assert_eq!(i.get(), 5);
|
||||
}
|
||||
|
||||
/*
|
||||
//TODO: Cover this
|
||||
#[allow(dead_code)]
|
||||
fn assert_covariance() {
|
||||
fn into_iter<'new>(i: GenericArrayIter<&'static str, U10>) -> GenericArrayIter<&'new str, U10> {
|
||||
i
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
|
|
@ -1,287 +1,379 @@
|
|||
#![recursion_limit = "128"]
|
||||
#![no_std]
|
||||
#[macro_use]
|
||||
extern crate generic_array;
|
||||
use core::cell::Cell;
|
||||
use core::ops::{Add, Drop};
|
||||
use generic_array::GenericArray;
|
||||
use generic_array::functional::*;
|
||||
use generic_array::sequence::*;
|
||||
use generic_array::typenum::{U1, U3, U4, U97};
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
let mut list97 = [0; 97];
|
||||
for i in 0..97 {
|
||||
list97[i] = i as i32;
|
||||
}
|
||||
let l: GenericArray<i32, U97> = GenericArray::clone_from_slice(&list97);
|
||||
assert_eq!(l[0], 0);
|
||||
assert_eq!(l[1], 1);
|
||||
assert_eq!(l[32], 32);
|
||||
assert_eq!(l[56], 56);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drop() {
|
||||
#[derive(Clone)]
|
||||
struct TestDrop<'a>(&'a Cell<u32>);
|
||||
|
||||
impl<'a> Drop for TestDrop<'a> {
|
||||
fn drop(&mut self) {
|
||||
self.0.set(self.0.get() + 1);
|
||||
}
|
||||
}
|
||||
|
||||
let drop_counter = Cell::new(0);
|
||||
{
|
||||
let _: GenericArray<TestDrop, U3> = arr![TestDrop; TestDrop(&drop_counter),
|
||||
TestDrop(&drop_counter),
|
||||
TestDrop(&drop_counter)];
|
||||
}
|
||||
assert_eq!(drop_counter.get(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_arr() {
|
||||
let test: GenericArray<u32, U3> = arr![u32; 1, 2, 3];
|
||||
assert_eq!(test[1], 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_copy() {
|
||||
let test = arr![u32; 1, 2, 3];
|
||||
let test2 = test;
|
||||
// if GenericArray is not copy, this should fail as a use of a moved value
|
||||
assert_eq!(test[1], 2);
|
||||
assert_eq!(test2[0], 1);
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
struct NoClone<T>(T);
|
||||
|
||||
#[test]
|
||||
fn test_from_slice() {
|
||||
let arr = [1, 2, 3, 4];
|
||||
let gen_arr = GenericArray::<_, U3>::from_slice(&arr[..3]);
|
||||
assert_eq!(&arr[..3], gen_arr.as_slice());
|
||||
let arr = [NoClone(1u32), NoClone(2), NoClone(3), NoClone(4)];
|
||||
let gen_arr = GenericArray::<_, U3>::from_slice(&arr[..3]);
|
||||
assert_eq!(&arr[..3], gen_arr.as_slice());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_mut_slice() {
|
||||
let mut arr = [1, 2, 3, 4];
|
||||
{
|
||||
let gen_arr = GenericArray::<_, U3>::from_mut_slice(&mut arr[..3]);
|
||||
gen_arr[2] = 10;
|
||||
}
|
||||
assert_eq!(arr, [1, 2, 10, 4]);
|
||||
let mut arr = [NoClone(1u32), NoClone(2), NoClone(3), NoClone(4)];
|
||||
{
|
||||
let gen_arr = GenericArray::<_, U3>::from_mut_slice(&mut arr[..3]);
|
||||
gen_arr[2] = NoClone(10);
|
||||
}
|
||||
assert_eq!(arr, [NoClone(1), NoClone(2), NoClone(10), NoClone(4)]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default() {
|
||||
let arr = GenericArray::<u8, U1>::default();
|
||||
assert_eq!(arr[0], 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from() {
|
||||
let data = [(1, 2, 3), (4, 5, 6), (7, 8, 9)];
|
||||
let garray: GenericArray<(usize, usize, usize), U3> = data.into();
|
||||
assert_eq!(&data, garray.as_slice());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unit_macro() {
|
||||
let arr = arr![f32; 3.14];
|
||||
assert_eq!(arr[0], 3.14);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_macro() {
|
||||
let _arr = arr![f32;];
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cmp() {
|
||||
arr![u8; 0x00].cmp(&arr![u8; 0x00]);
|
||||
}
|
||||
|
||||
/// This test should cause a helpful compile error if uncommented.
|
||||
// #[test]
|
||||
// fn test_empty_macro2(){
|
||||
// let arr = arr![];
|
||||
// }
|
||||
#[cfg(feature = "serde")]
|
||||
mod impl_serde {
|
||||
extern crate serde_json;
|
||||
|
||||
use generic_array::GenericArray;
|
||||
use generic_array::typenum::U6;
|
||||
|
||||
#[test]
|
||||
fn test_serde_implementation() {
|
||||
let array: GenericArray<f64, U6> = arr![f64; 0.0, 5.0, 3.0, 7.07192, 76.0, -9.0];
|
||||
let string = serde_json::to_string(&array).unwrap();
|
||||
assert_eq!(string, "[0.0,5.0,3.0,7.07192,76.0,-9.0]");
|
||||
|
||||
let test_array: GenericArray<f64, U6> = serde_json::from_str(&string).unwrap();
|
||||
assert_eq!(test_array, array);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_map() {
|
||||
let b: GenericArray<i32, U4> = GenericArray::generate(|i| i as i32 * 4).map(|x| x - 3);
|
||||
|
||||
assert_eq!(b, arr![i32; -3, 1, 5, 9]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zip() {
|
||||
let a: GenericArray<_, U4> = GenericArray::generate(|i| i + 1);
|
||||
let b: GenericArray<_, U4> = GenericArray::generate(|i| i as i32 * 4);
|
||||
|
||||
// Uses reference and non-reference arguments
|
||||
let c = (&a).zip(b, |r, l| *r as i32 + l);
|
||||
|
||||
assert_eq!(c, arr![i32; 1, 6, 11, 16]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_from_iter_short() {
|
||||
use core::iter::repeat;
|
||||
|
||||
let a: GenericArray<_, U4> = repeat(11).take(3).collect();
|
||||
|
||||
assert_eq!(a, arr![i32; 11, 11, 11, 0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_iter() {
|
||||
use core::iter::{once, repeat};
|
||||
|
||||
let a: GenericArray<_, U4> = repeat(11).take(3).chain(once(0)).collect();
|
||||
|
||||
assert_eq!(a, arr![i32; 11, 11, 11, 0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sizes() {
|
||||
#![allow(dead_code)]
|
||||
use core::mem::{size_of, size_of_val};
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[repr(C)]
|
||||
#[repr(packed)]
|
||||
struct Test {
|
||||
t: u16,
|
||||
s: u32,
|
||||
r: u16,
|
||||
f: u16,
|
||||
o: u32,
|
||||
}
|
||||
|
||||
assert_eq!(size_of::<Test>(), 14);
|
||||
|
||||
assert_eq!(size_of_val(&arr![u8; 1, 2, 3]), size_of::<u8>() * 3);
|
||||
assert_eq!(size_of_val(&arr![u32; 1]), size_of::<u32>() * 1);
|
||||
assert_eq!(size_of_val(&arr![u64; 1, 2, 3, 4]), size_of::<u64>() * 4);
|
||||
|
||||
assert_eq!(size_of::<GenericArray<Test, U97>>(), size_of::<Test>() * 97);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_append() {
|
||||
let a = arr![i32; 1, 2, 3];
|
||||
|
||||
let b = a.append(4);
|
||||
|
||||
assert_eq!(b, arr![i32; 1, 2, 3, 4]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prepend() {
|
||||
let a = arr![i32; 1, 2, 3];
|
||||
|
||||
let b = a.prepend(4);
|
||||
|
||||
assert_eq!(b, arr![i32; 4, 1, 2, 3]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pop() {
|
||||
let a = arr![i32; 1, 2, 3, 4];
|
||||
|
||||
let (init, last) = a.pop_back();
|
||||
|
||||
assert_eq!(init, arr![i32; 1, 2, 3]);
|
||||
assert_eq!(last, 4);
|
||||
|
||||
let (head, tail) = a.pop_front();
|
||||
|
||||
assert_eq!(head, 1);
|
||||
assert_eq!(tail, arr![i32; 2, 3, 4]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split() {
|
||||
let a = arr![i32; 1, 2, 3, 4];
|
||||
|
||||
let (b, c) = a.split();
|
||||
|
||||
assert_eq!(b, arr![i32; 1]);
|
||||
assert_eq!(c, arr![i32; 2, 3, 4]);
|
||||
|
||||
let (e, f) = a.split();
|
||||
|
||||
assert_eq!(e, arr![i32; 1, 2]);
|
||||
assert_eq!(f, arr![i32; 3, 4]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concat() {
|
||||
let a = arr![i32; 1, 2];
|
||||
let b = arr![i32; 3, 4];
|
||||
|
||||
let c = a.concat(b);
|
||||
|
||||
assert_eq!(c, arr![i32; 1, 2, 3, 4]);
|
||||
|
||||
let (d, e) = c.split();
|
||||
|
||||
assert_eq!(d, arr![i32; 1]);
|
||||
assert_eq!(e, arr![i32; 2, 3, 4]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fold() {
|
||||
let a = arr![i32; 1, 2, 3, 4];
|
||||
|
||||
assert_eq!(10, a.fold(0, |a, x| a + x));
|
||||
}
|
||||
|
||||
fn sum_generic<S>(s: S) -> i32
|
||||
where
|
||||
S: FunctionalSequence<i32>,
|
||||
S::Item: Add<i32, Output = i32>, // `+`
|
||||
i32: Add<S::Item, Output = i32>, // reflexive
|
||||
{
|
||||
s.fold(0, |a, x| a + x)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sum() {
|
||||
let a = sum_generic(arr![i32; 1, 2, 3, 4]);
|
||||
|
||||
assert_eq!(a, 10);
|
||||
}
|
||||
#![recursion_limit = "128"]
|
||||
#![no_std]
|
||||
#[macro_use]
|
||||
extern crate generic_array;
|
||||
use core::cell::Cell;
|
||||
use core::ops::{Add, Drop};
|
||||
use generic_array::functional::*;
|
||||
use generic_array::sequence::*;
|
||||
use generic_array::typenum::{U0, U3, U4, U97};
|
||||
use generic_array::GenericArray;
|
||||
|
||||
#[test]
|
||||
fn test() {
|
||||
let mut list97 = [0; 97];
|
||||
for i in 0..97 {
|
||||
list97[i] = i as i32;
|
||||
}
|
||||
let l: GenericArray<i32, U97> = GenericArray::clone_from_slice(&list97);
|
||||
assert_eq!(l[0], 0);
|
||||
assert_eq!(l[1], 1);
|
||||
assert_eq!(l[32], 32);
|
||||
assert_eq!(l[56], 56);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_drop() {
|
||||
#[derive(Clone)]
|
||||
struct TestDrop<'a>(&'a Cell<u32>);
|
||||
|
||||
impl<'a> Drop for TestDrop<'a> {
|
||||
fn drop(&mut self) {
|
||||
self.0.set(self.0.get() + 1);
|
||||
}
|
||||
}
|
||||
|
||||
let drop_counter = Cell::new(0);
|
||||
{
|
||||
let _: GenericArray<TestDrop, U3> = arr![TestDrop; TestDrop(&drop_counter),
|
||||
TestDrop(&drop_counter),
|
||||
TestDrop(&drop_counter)];
|
||||
}
|
||||
assert_eq!(drop_counter.get(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_arr() {
|
||||
let test: GenericArray<u32, U3> = arr![u32; 1, 2, 3];
|
||||
assert_eq!(test[1], 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_copy() {
|
||||
let test = arr![u32; 1, 2, 3];
|
||||
let test2 = test;
|
||||
// if GenericArray is not copy, this should fail as a use of a moved value
|
||||
assert_eq!(test[1], 2);
|
||||
assert_eq!(test2[0], 1);
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
struct NoClone<T>(T);
|
||||
|
||||
#[test]
|
||||
fn test_from_slice() {
|
||||
let arr = [1, 2, 3, 4];
|
||||
let gen_arr = GenericArray::<_, U3>::from_slice(&arr[..3]);
|
||||
assert_eq!(&arr[..3], gen_arr.as_slice());
|
||||
let arr = [NoClone(1u32), NoClone(2), NoClone(3), NoClone(4)];
|
||||
let gen_arr = GenericArray::<_, U3>::from_slice(&arr[..3]);
|
||||
assert_eq!(&arr[..3], gen_arr.as_slice());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_mut_slice() {
|
||||
let mut arr = [1, 2, 3, 4];
|
||||
{
|
||||
let gen_arr = GenericArray::<_, U3>::from_mut_slice(&mut arr[..3]);
|
||||
gen_arr[2] = 10;
|
||||
}
|
||||
assert_eq!(arr, [1, 2, 10, 4]);
|
||||
let mut arr = [NoClone(1u32), NoClone(2), NoClone(3), NoClone(4)];
|
||||
{
|
||||
let gen_arr = GenericArray::<_, U3>::from_mut_slice(&mut arr[..3]);
|
||||
gen_arr[2] = NoClone(10);
|
||||
}
|
||||
assert_eq!(arr, [NoClone(1), NoClone(2), NoClone(10), NoClone(4)]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default() {
|
||||
let arr = GenericArray::<u8, U4>::default();
|
||||
assert_eq!(arr.as_slice(), &[0, 0, 0, 0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from() {
|
||||
let data = [(1, 2, 3), (4, 5, 6), (7, 8, 9)];
|
||||
let garray: GenericArray<(usize, usize, usize), U3> = data.into();
|
||||
assert_eq!(&data, garray.as_slice());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unit_macro() {
|
||||
let arr = arr![f32; 3.14];
|
||||
assert_eq!(arr[0], 3.14);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_macro() {
|
||||
let _arr = arr![f32;];
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cmp() {
|
||||
let _ = arr![u8; 0x00].cmp(&arr![u8; 0x00]);
|
||||
}
|
||||
|
||||
/// This test should cause a helpful compile error if uncommented.
|
||||
// #[test]
|
||||
// fn test_empty_macro2(){
|
||||
// let arr = arr![];
|
||||
// }
|
||||
#[cfg(feature = "serde")]
|
||||
mod impl_serde {
|
||||
extern crate serde_json;
|
||||
|
||||
use generic_array::typenum::U6;
|
||||
use generic_array::GenericArray;
|
||||
|
||||
#[test]
|
||||
fn test_serde_implementation() {
|
||||
let array: GenericArray<f64, U6> = arr![f64; 0.0, 5.0, 3.0, 7.07192, 76.0, -9.0];
|
||||
let string = serde_json::to_string(&array).unwrap();
|
||||
assert_eq!(string, "[0.0,5.0,3.0,7.07192,76.0,-9.0]");
|
||||
|
||||
let test_array: GenericArray<f64, U6> = serde_json::from_str(&string).unwrap();
|
||||
assert_eq!(test_array, array);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_map() {
|
||||
let b: GenericArray<i32, U4> = GenericArray::generate(|i| i as i32 * 4).map(|x| x - 3);
|
||||
|
||||
assert_eq!(b, arr![i32; -3, 1, 5, 9]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zip() {
|
||||
let a: GenericArray<_, U4> = GenericArray::generate(|i| i + 1);
|
||||
let b: GenericArray<_, U4> = GenericArray::generate(|i| i as i32 * 4);
|
||||
|
||||
// Uses reference and non-reference arguments
|
||||
let c = (&a).zip(b, |r, l| *r as i32 + l);
|
||||
|
||||
assert_eq!(c, arr![i32; 1, 6, 11, 16]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn test_from_iter_short() {
|
||||
use core::iter::repeat;
|
||||
|
||||
let a: GenericArray<_, U4> = repeat(11).take(3).collect();
|
||||
|
||||
assert_eq!(a, arr![i32; 11, 11, 11, 0]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_iter() {
|
||||
use core::iter::{once, repeat};
|
||||
|
||||
let a: GenericArray<_, U4> = repeat(11).take(3).chain(once(0)).collect();
|
||||
|
||||
assert_eq!(a, arr![i32; 11, 11, 11, 0]);
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
enum E {
|
||||
V,
|
||||
V2(i32),
|
||||
V3 { h: bool, i: i32 },
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
#[repr(C)]
|
||||
#[repr(packed)]
|
||||
struct Test {
|
||||
t: u16,
|
||||
s: u32,
|
||||
mm: bool,
|
||||
r: u16,
|
||||
f: u16,
|
||||
p: (),
|
||||
o: u32,
|
||||
ff: *const extern "C" fn(*const char) -> *const core::ffi::c_void,
|
||||
l: *const core::ffi::c_void,
|
||||
w: bool,
|
||||
q: bool,
|
||||
v: E,
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sizes() {
|
||||
use core::mem::{size_of, size_of_val};
|
||||
|
||||
assert_eq!(size_of::<E>(), 8);
|
||||
|
||||
assert_eq!(size_of::<Test>(), 25 + size_of::<usize>() * 2);
|
||||
|
||||
assert_eq!(size_of_val(&arr![u8; 1, 2, 3]), size_of::<u8>() * 3);
|
||||
assert_eq!(size_of_val(&arr![u32; 1]), size_of::<u32>() * 1);
|
||||
assert_eq!(size_of_val(&arr![u64; 1, 2, 3, 4]), size_of::<u64>() * 4);
|
||||
|
||||
assert_eq!(size_of::<GenericArray<Test, U97>>(), size_of::<Test>() * 97);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_alignment() {
|
||||
use core::mem::align_of;
|
||||
|
||||
assert_eq!(align_of::<GenericArray::<u32, U0>>(), align_of::<[u32; 0]>());
|
||||
assert_eq!(align_of::<GenericArray::<u32, U3>>(), align_of::<[u32; 3]>());
|
||||
assert_eq!(align_of::<GenericArray::<Test, U3>>(), align_of::<[Test; 3]>());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_append() {
|
||||
let a = arr![i32; 1, 2, 3];
|
||||
|
||||
let b = a.append(4);
|
||||
|
||||
assert_eq!(b, arr![i32; 1, 2, 3, 4]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prepend() {
|
||||
let a = arr![i32; 1, 2, 3];
|
||||
|
||||
let b = a.prepend(4);
|
||||
|
||||
assert_eq!(b, arr![i32; 4, 1, 2, 3]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pop() {
|
||||
let a = arr![i32; 1, 2, 3, 4];
|
||||
|
||||
let (init, last) = a.pop_back();
|
||||
|
||||
assert_eq!(init, arr![i32; 1, 2, 3]);
|
||||
assert_eq!(last, 4);
|
||||
|
||||
let (head, tail) = a.pop_front();
|
||||
|
||||
assert_eq!(head, 1);
|
||||
assert_eq!(tail, arr![i32; 2, 3, 4]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split() {
|
||||
let a = arr![i32; 1, 2, 3, 4];
|
||||
|
||||
let (b, c) = a.split();
|
||||
|
||||
assert_eq!(b, arr![i32; 1]);
|
||||
assert_eq!(c, arr![i32; 2, 3, 4]);
|
||||
|
||||
let (e, f) = a.split();
|
||||
|
||||
assert_eq!(e, arr![i32; 1, 2]);
|
||||
assert_eq!(f, arr![i32; 3, 4]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_ref() {
|
||||
let a = arr![i32; 1, 2, 3, 4];
|
||||
let a_ref = &a;
|
||||
|
||||
let (b_ref, c_ref) = a_ref.split();
|
||||
|
||||
assert_eq!(b_ref, &arr![i32; 1]);
|
||||
assert_eq!(c_ref, &arr![i32; 2, 3, 4]);
|
||||
|
||||
let (e_ref, f_ref) = a_ref.split();
|
||||
|
||||
assert_eq!(e_ref, &arr![i32; 1, 2]);
|
||||
assert_eq!(f_ref, &arr![i32; 3, 4]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_mut() {
|
||||
let mut a = arr![i32; 1, 2, 3, 4];
|
||||
let a_ref = &mut a;
|
||||
|
||||
let (b_ref, c_ref) = a_ref.split();
|
||||
|
||||
assert_eq!(b_ref, &mut arr![i32; 1]);
|
||||
assert_eq!(c_ref, &mut arr![i32; 2, 3, 4]);
|
||||
|
||||
let (e_ref, f_ref) = a_ref.split();
|
||||
|
||||
assert_eq!(e_ref, &mut arr![i32; 1, 2]);
|
||||
assert_eq!(f_ref, &mut arr![i32; 3, 4]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concat() {
|
||||
let a = arr![i32; 1, 2];
|
||||
let b = arr![i32; 3, 4, 5];
|
||||
|
||||
let c = a.concat(b);
|
||||
|
||||
assert_eq!(c, arr![i32; 1, 2, 3, 4, 5]);
|
||||
|
||||
let (d, e) = c.split();
|
||||
|
||||
assert_eq!(d, arr![i32; 1, 2]);
|
||||
assert_eq!(e, arr![i32; 3, 4, 5]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fold() {
|
||||
let a = arr![i32; 1, 2, 3, 4];
|
||||
|
||||
assert_eq!(10, a.fold(0, |a, x| a + x));
|
||||
}
|
||||
|
||||
fn sum_generic<S>(s: S) -> i32
|
||||
where
|
||||
S: FunctionalSequence<i32>,
|
||||
S::Item: Add<i32, Output = i32>, // `+`
|
||||
i32: Add<S::Item, Output = i32>, // reflexive
|
||||
{
|
||||
s.fold(0, |a, x| a + x)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sum() {
|
||||
let a = sum_generic(arr![i32; 1, 2, 3, 4]);
|
||||
|
||||
assert_eq!(a, 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_as_ref() {
|
||||
let a = arr![i32; 1, 2, 3, 4];
|
||||
let a_ref: &[i32; 4] = a.as_ref();
|
||||
assert_eq!(a_ref, &[1, 2, 3, 4]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_as_mut() {
|
||||
let mut a = arr![i32; 1, 2, 3, 4];
|
||||
let a_mut: &mut [i32; 4] = a.as_mut();
|
||||
assert_eq!(a_mut, &mut [1, 2, 3, 4]);
|
||||
a_mut[2] = 0;
|
||||
assert_eq!(a_mut, &mut [1, 2, 0, 4]);
|
||||
assert_eq!(a, arr![i32; 1, 2, 0, 4]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_array_ref() {
|
||||
let a = arr![i32; 1, 2, 3, 4];
|
||||
let a_ref: &[i32; 4] = a.as_ref();
|
||||
let a_from: &GenericArray<i32, U4> = a_ref.into();
|
||||
assert_eq!(&a, a_from);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_array_mut() {
|
||||
let mut a = arr![i32; 1, 2, 3, 4];
|
||||
let mut a_copy = a;
|
||||
let a_mut: &mut [i32; 4] = a.as_mut();
|
||||
let a_from: &mut GenericArray<i32, U4> = a_mut.into();
|
||||
assert_eq!(&mut a_copy, a_from);
|
||||
}
|
||||
|
|
Различия файлов скрыты, потому что одна или несколько строк слишком длинны
|
@ -3,24 +3,29 @@
|
|||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies
|
||||
# to registry (e.g., crates.io) dependencies.
|
||||
#
|
||||
# If you believe there's an error in this file please file an
|
||||
# issue against the rust-lang/cargo repository. If you're
|
||||
# editing this file be aware that the upstream Cargo.toml
|
||||
# will likely look very different (and much more reasonable)
|
||||
# If you are reading this file be aware that the original Cargo.toml
|
||||
# will likely look very different (and much more reasonable).
|
||||
# See Cargo.toml.orig for the original contents.
|
||||
|
||||
[package]
|
||||
name = "headers"
|
||||
version = "0.3.3"
|
||||
version = "0.3.7"
|
||||
authors = ["Sean McArthur <sean@seanmonstar.com>"]
|
||||
description = "typed HTTP headers"
|
||||
homepage = "https://hyper.rs"
|
||||
readme = "README.md"
|
||||
keywords = ["http", "headers", "hyper", "hyperium"]
|
||||
keywords = [
|
||||
"http",
|
||||
"headers",
|
||||
"hyper",
|
||||
"hyperium",
|
||||
]
|
||||
categories = ["web-programming"]
|
||||
license = "MIT"
|
||||
repository = "https://github.com/hyperium/headers"
|
||||
|
||||
[dependencies.base64]
|
||||
version = "0.13"
|
||||
|
||||
|
@ -36,14 +41,14 @@ version = "0.2"
|
|||
[dependencies.http]
|
||||
version = "0.2.0"
|
||||
|
||||
[dependencies.httpdate]
|
||||
version = "1"
|
||||
|
||||
[dependencies.mime]
|
||||
version = "0.3.14"
|
||||
|
||||
[dependencies.sha-1]
|
||||
version = "0.8"
|
||||
|
||||
[dependencies.time]
|
||||
version = "0.1.34"
|
||||
version = "0.10"
|
||||
|
||||
[features]
|
||||
nightly = []
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# rust http headers
|
||||
|
||||
[![Build Status](https://travis-ci.org/hyperium/headers.svg?branch=master)](https://travis-ci.org/hyperium/header)
|
||||
[![Build Status](https://github.com/hyperium/headers/workflows/CI/badge.svg)](https://github.com/hyperium/headers/actions?query=workflow%3ACI)
|
||||
|
||||
Typed HTTP headers.
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
use std::convert::TryFrom;
|
||||
|
||||
use super::origin::Origin;
|
||||
use util::{IterExt, TryFromValues};
|
||||
use HeaderValue;
|
||||
|
@ -25,9 +27,11 @@ use HeaderValue;
|
|||
/// ```
|
||||
/// # extern crate headers;
|
||||
/// use headers::AccessControlAllowOrigin;
|
||||
/// use std::convert::TryFrom;
|
||||
///
|
||||
/// let any_origin = AccessControlAllowOrigin::ANY;
|
||||
/// let null_origin = AccessControlAllowOrigin::NULL;
|
||||
/// let origin = AccessControlAllowOrigin::try_from("http://web-platform.test:8000");
|
||||
/// ```
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct AccessControlAllowOrigin(OriginOrAny);
|
||||
|
@ -60,6 +64,26 @@ impl AccessControlAllowOrigin {
|
|||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&str> for AccessControlAllowOrigin {
|
||||
type Error = ::Error;
|
||||
|
||||
fn try_from(s: &str) -> Result<Self, ::Error> {
|
||||
let header_value = HeaderValue::from_str(s).map_err(|_| ::Error::invalid())?;
|
||||
let origin = OriginOrAny::try_from(&header_value)?;
|
||||
Ok(Self(origin))
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<&HeaderValue> for OriginOrAny {
|
||||
type Error = ::Error;
|
||||
|
||||
fn try_from(header_value: &HeaderValue) -> Result<Self, ::Error> {
|
||||
Origin::try_from_value(header_value)
|
||||
.map(OriginOrAny::Origin)
|
||||
.ok_or_else(::Error::invalid)
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFromValues for OriginOrAny {
|
||||
fn try_from_values<'i, I>(values: &mut I) -> Result<Self, ::Error>
|
||||
where
|
||||
|
@ -89,12 +113,14 @@ impl<'a> From<&'a OriginOrAny> for HeaderValue {
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use super::super::{test_decode, test_encode};
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn origin() {
|
||||
let s = "http://web-platform.test:8000";
|
||||
|
||||
let allow_origin = test_decode::<AccessControlAllowOrigin>(&[s]).unwrap();
|
||||
{
|
||||
let origin = allow_origin.origin().unwrap();
|
||||
|
@ -107,6 +133,22 @@ mod tests {
|
|||
assert_eq!(headers["access-control-allow-origin"], s);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn try_from_origin() {
|
||||
let s = "http://web-platform.test:8000";
|
||||
|
||||
let allow_origin = AccessControlAllowOrigin::try_from(s).unwrap();
|
||||
{
|
||||
let origin = allow_origin.origin().unwrap();
|
||||
assert_eq!(origin.scheme(), "http");
|
||||
assert_eq!(origin.hostname(), "web-platform.test");
|
||||
assert_eq!(origin.port(), Some(8000));
|
||||
}
|
||||
|
||||
let headers = test_encode(allow_origin);
|
||||
assert_eq!(headers["access-control-allow-origin"], s);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn any() {
|
||||
let allow_origin = test_decode::<AccessControlAllowOrigin>(&["*"]).unwrap();
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
use std::time::Duration;
|
||||
|
||||
use util::Seconds;
|
||||
|
||||
/// `Age` header, defined in [RFC7234](https://tools.ietf.org/html/rfc7234#section-5.1)
|
||||
///
|
||||
/// The "Age" header field conveys the sender's estimate of the amount of
|
||||
/// time since the response was generated or successfully validated at
|
||||
/// the origin server. Age values are calculated as specified in
|
||||
/// [Section 4.2.3](https://tools.ietf.org/html/rfc7234#section-4.2.3).
|
||||
///
|
||||
/// ## ABNF
|
||||
///
|
||||
/// ```text
|
||||
/// Age = delta-seconds
|
||||
/// ```
|
||||
///
|
||||
/// The Age field-value is a non-negative integer, representing time in
|
||||
/// seconds (see [Section 1.2.1](https://tools.ietf.org/html/rfc7234#section-1.2.1)).
|
||||
///
|
||||
/// The presence of an Age header field implies that the response was not
|
||||
/// generated or validated by the origin server for this request.
|
||||
/// However, lack of an Age header field does not imply the origin was
|
||||
/// contacted, since the response might have been received from an
|
||||
/// HTTP/1.0 cache that does not implement Age.
|
||||
///
|
||||
/// ## Example values
|
||||
///
|
||||
/// * `3600`
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// # extern crate headers;
|
||||
/// use headers::Age;
|
||||
///
|
||||
/// let len = Age::from_secs(60);
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct Age(Seconds);
|
||||
|
||||
derive_header! {
|
||||
Age(_),
|
||||
name: AGE
|
||||
}
|
||||
|
||||
impl Age {
|
||||
/// Creates a new `Age` header from the specified number of whole seconds.
|
||||
pub fn from_secs(secs: u64) -> Self {
|
||||
Self(Seconds::from_secs(secs))
|
||||
}
|
||||
|
||||
/// Returns the number of seconds for this `Age` header.
|
||||
pub fn as_secs(&self) -> u64 {
|
||||
self.0.as_u64()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Duration> for Age {
|
||||
fn from(dur: Duration) -> Self {
|
||||
Age(Seconds::from(dur))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Age> for Duration {
|
||||
fn from(age: Age) -> Self {
|
||||
age.0.into()
|
||||
}
|
||||
}
|
|
@ -45,6 +45,16 @@ impl Authorization<Basic> {
|
|||
|
||||
Authorization(Basic { decoded, colon_pos })
|
||||
}
|
||||
|
||||
/// View the decoded username.
|
||||
pub fn username(&self) -> &str {
|
||||
self.0.username()
|
||||
}
|
||||
|
||||
/// View the decoded password.
|
||||
pub fn password(&self) -> &str {
|
||||
self.0.password()
|
||||
}
|
||||
}
|
||||
|
||||
impl Authorization<Bearer> {
|
||||
|
@ -54,6 +64,11 @@ impl Authorization<Bearer> {
|
|||
.map(|val| Authorization(Bearer(val)))
|
||||
.ok_or_else(|| InvalidBearerToken { _inner: () })
|
||||
}
|
||||
|
||||
/// View the token part as a `&str`.
|
||||
pub fn token(&self) -> &str {
|
||||
self.0.token()
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: Credentials> ::Header for Authorization<C> {
|
||||
|
|
|
@ -19,6 +19,7 @@ pub use self::access_control_expose_headers::AccessControlExposeHeaders;
|
|||
pub use self::access_control_max_age::AccessControlMaxAge;
|
||||
pub use self::access_control_request_headers::AccessControlRequestHeaders;
|
||||
pub use self::access_control_request_method::AccessControlRequestMethod;
|
||||
pub use self::age::Age;
|
||||
pub use self::allow::Allow;
|
||||
pub use self::authorization::Authorization;
|
||||
pub use self::cache_control::CacheControl;
|
||||
|
@ -138,6 +139,7 @@ mod access_control_expose_headers;
|
|||
mod access_control_max_age;
|
||||
mod access_control_request_headers;
|
||||
mod access_control_request_method;
|
||||
mod age;
|
||||
mod allow;
|
||||
pub mod authorization;
|
||||
mod cache_control;
|
||||
|
|
|
@ -37,9 +37,9 @@ impl From<SecWebsocketKey> for SecWebsocketAccept {
|
|||
|
||||
fn sign(key: &[u8]) -> SecWebsocketAccept {
|
||||
let mut sha1 = Sha1::default();
|
||||
sha1.input(key);
|
||||
sha1.input(&b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11"[..]);
|
||||
let b64 = Bytes::from(base64::encode(&sha1.result()));
|
||||
sha1.update(key);
|
||||
sha1.update(&b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11"[..]);
|
||||
let b64 = Bytes::from(base64::encode(&sha1.finalize()));
|
||||
|
||||
let val = ::HeaderValue::from_maybe_shared(b64).expect("base64 is a valid value");
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#![deny(missing_debug_implementations)]
|
||||
#![cfg_attr(test, deny(warnings))]
|
||||
#![cfg_attr(all(test, feature = "nightly"), feature(test))]
|
||||
#![doc(html_root_url = "https://docs.rs/headers/0.3.3")]
|
||||
#![doc(html_root_url = "https://docs.rs/headers/0.3.7")]
|
||||
|
||||
//! # Typed HTTP Headers
|
||||
//!
|
||||
|
@ -78,11 +78,11 @@ extern crate bitflags;
|
|||
extern crate bytes;
|
||||
extern crate headers_core;
|
||||
extern crate http;
|
||||
extern crate httpdate;
|
||||
extern crate mime;
|
||||
extern crate sha1;
|
||||
#[cfg(all(test, feature = "nightly"))]
|
||||
extern crate test;
|
||||
extern crate time;
|
||||
|
||||
pub use headers_core::{Error, Header};
|
||||
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
use std::time::SystemTime;
|
||||
|
||||
use bytes::Bytes;
|
||||
use http::header::HeaderValue;
|
||||
use time;
|
||||
use httpdate;
|
||||
|
||||
use super::IterExt;
|
||||
|
||||
|
@ -32,7 +32,7 @@ use super::IterExt;
|
|||
// HTTP-date, the sender MUST generate those timestamps in the
|
||||
// IMF-fixdate format.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub(crate) struct HttpDate(time::Tm);
|
||||
pub(crate) struct HttpDate(httpdate::HttpDate);
|
||||
|
||||
impl HttpDate {
|
||||
pub(crate) fn from_val(val: &HeaderValue) -> Option<Self> {
|
||||
|
@ -74,96 +74,74 @@ impl<'a> From<&'a HttpDate> for HeaderValue {
|
|||
impl FromStr for HttpDate {
|
||||
type Err = Error;
|
||||
fn from_str(s: &str) -> Result<HttpDate, Error> {
|
||||
time::strptime(s, "%a, %d %b %Y %T %Z")
|
||||
.or_else(|_| time::strptime(s, "%A, %d-%b-%y %T %Z"))
|
||||
.or_else(|_| time::strptime(s, "%c"))
|
||||
.map(HttpDate)
|
||||
.map_err(|_| Error(()))
|
||||
Ok(HttpDate(s.parse().map_err(|_| Error(()))?))
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for HttpDate {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt::Display::fmt(&self.0.to_utc().rfc822(), f)
|
||||
fmt::Display::fmt(&self.0, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for HttpDate {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
fmt::Display::fmt(&self.0.to_utc().rfc822(), f)
|
||||
fmt::Display::fmt(&self.0, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SystemTime> for HttpDate {
|
||||
fn from(sys: SystemTime) -> HttpDate {
|
||||
let tmspec = match sys.duration_since(UNIX_EPOCH) {
|
||||
Ok(dur) => {
|
||||
// subsec nanos always dropped
|
||||
time::Timespec::new(dur.as_secs() as i64, 0)
|
||||
}
|
||||
Err(err) => {
|
||||
let neg = err.duration();
|
||||
// subsec nanos always dropped
|
||||
time::Timespec::new(-(neg.as_secs() as i64), 0)
|
||||
}
|
||||
};
|
||||
HttpDate(time::at_utc(tmspec))
|
||||
HttpDate(sys.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<HttpDate> for SystemTime {
|
||||
fn from(date: HttpDate) -> SystemTime {
|
||||
let spec = date.0.to_timespec();
|
||||
if spec.sec >= 0 {
|
||||
UNIX_EPOCH + Duration::new(spec.sec as u64, spec.nsec as u32)
|
||||
} else {
|
||||
UNIX_EPOCH - Duration::new(spec.sec as u64, spec.nsec as u32)
|
||||
}
|
||||
SystemTime::from(date.0)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::HttpDate;
|
||||
use time::Tm;
|
||||
|
||||
const NOV_07: HttpDate = HttpDate(Tm {
|
||||
tm_nsec: 0,
|
||||
tm_sec: 37,
|
||||
tm_min: 48,
|
||||
tm_hour: 8,
|
||||
tm_mday: 7,
|
||||
tm_mon: 10,
|
||||
tm_year: 94,
|
||||
tm_wday: 0,
|
||||
tm_isdst: 0,
|
||||
tm_yday: 0,
|
||||
tm_utcoff: 0,
|
||||
});
|
||||
use std::time::{Duration, UNIX_EPOCH};
|
||||
|
||||
// The old tests had Sunday, but 1994-11-07 is a Monday.
|
||||
// See https://github.com/pyfisch/httpdate/pull/6#issuecomment-846881001
|
||||
fn nov_07() -> HttpDate {
|
||||
HttpDate((UNIX_EPOCH + Duration::new(784198117, 0)).into())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_display_is_imf_fixdate() {
|
||||
assert_eq!("Mon, 07 Nov 1994 08:48:37 GMT", &nov_07().to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_imf_fixdate() {
|
||||
assert_eq!(
|
||||
"Sun, 07 Nov 1994 08:48:37 GMT".parse::<HttpDate>().unwrap(),
|
||||
NOV_07
|
||||
"Mon, 07 Nov 1994 08:48:37 GMT".parse::<HttpDate>().unwrap(),
|
||||
nov_07()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rfc_850() {
|
||||
assert_eq!(
|
||||
"Sunday, 07-Nov-94 08:48:37 GMT"
|
||||
"Monday, 07-Nov-94 08:48:37 GMT"
|
||||
.parse::<HttpDate>()
|
||||
.unwrap(),
|
||||
NOV_07
|
||||
nov_07()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_asctime() {
|
||||
assert_eq!(
|
||||
"Sun Nov 7 08:48:37 1994".parse::<HttpDate>().unwrap(),
|
||||
NOV_07
|
||||
"Mon Nov 7 08:48:37 1994".parse::<HttpDate>().unwrap(),
|
||||
nov_07()
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,11 @@ impl Seconds {
|
|||
pub(crate) fn from_val(val: &HeaderValue) -> Option<Self> {
|
||||
let secs = val.to_str().ok()?.parse().ok()?;
|
||||
|
||||
Some(Seconds(Duration::from_secs(secs)))
|
||||
Some(Self::from_secs(secs))
|
||||
}
|
||||
|
||||
pub(crate) fn from_secs(secs: u64) -> Self {
|
||||
Self::from(Duration::from_secs(secs))
|
||||
}
|
||||
|
||||
pub(crate) fn as_u64(&self) -> u64 {
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
{"files":{"Cargo.toml":"214f35d88fe24ad0bce0c89aa14912a083e7a3bd496cd6c39c915931ee32674f","LICENSE-APACHE":"4d10fe5f3aa176b05b229a248866bad70b834c173f1252a814ff4748d8a13837","LICENSE-MIT":"934887691e05d69d7c86ad3f2c360980fa30c15b035e351f3c9865e99da4debc","README.md":"26318a99a935b392b4fc7527e2376ee67e6b7bb75558882173d25c4408ce9273","benches/benchmarks.rs":"13f1208dfb86e3c02dcd67a4c08c2bae300c0a153de5df437eac4a136579ec23","src/date.rs":"bb1afb1189a9da7f8246f5fd4594d4b29c27bf8da3642fddd9bf54b7ce0b1bd8","src/lib.rs":"83e73452762adf92f4ab476276d6ad72bce9142ffbf730bd0c47549a8dff2699"},"package":"c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421"}
|
|
@ -0,0 +1,27 @@
|
|||
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
|
||||
#
|
||||
# When uploading crates to the registry Cargo will automatically
|
||||
# "normalize" Cargo.toml files for maximal compatibility
|
||||
# with all versions of Cargo and also rewrite `path` dependencies
|
||||
# to registry (e.g., crates.io) dependencies.
|
||||
#
|
||||
# If you are reading this file be aware that the original Cargo.toml
|
||||
# will likely look very different (and much more reasonable).
|
||||
# See Cargo.toml.orig for the original contents.
|
||||
|
||||
[package]
|
||||
edition = "2018"
|
||||
name = "httpdate"
|
||||
version = "1.0.2"
|
||||
authors = ["Pyfisch <pyfisch@posteo.org>"]
|
||||
description = "HTTP date parsing and formatting"
|
||||
readme = "README.md"
|
||||
keywords = ["http", "date", "time", "simple", "timestamp"]
|
||||
license = "MIT/Apache-2.0"
|
||||
repository = "https://github.com/pyfisch/httpdate"
|
||||
|
||||
[[bench]]
|
||||
name = "benchmarks"
|
||||
harness = false
|
||||
[dev-dependencies.criterion]
|
||||
version = "0.3.5"
|
|
@ -0,0 +1,201 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,19 @@
|
|||
Copyright (c) 2016 Pyfisch
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -0,0 +1,27 @@
|
|||
# Date and time utils for HTTP.
|
||||
|
||||
[![Build Status](https://travis-ci.org/pyfisch/httpdate.svg?branch=master)](https://travis-ci.org/pyfisch/httpdate)
|
||||
[![Crates.io](https://img.shields.io/crates/v/httpdate.svg)](https://crates.io/crates/httpdate)
|
||||
[![Documentation](https://docs.rs/httpdate/badge.svg)](https://docs.rs/httpdate)
|
||||
|
||||
Multiple HTTP header fields store timestamps.
|
||||
For example a response created on May 15, 2015 may contain the header
|
||||
`Date: Fri, 15 May 2015 15:34:21 GMT`. Since the timestamp does not
|
||||
contain any timezone or leap second information it is equvivalent to
|
||||
writing 1431696861 Unix time. Rust’s `SystemTime` is used to store
|
||||
these timestamps.
|
||||
|
||||
This crate provides two public functions:
|
||||
|
||||
* `parse_http_date` to parse a HTTP datetime string to a system time
|
||||
* `fmt_http_date` to format a system time to a IMF-fixdate
|
||||
|
||||
In addition it exposes the `HttpDate` type that can be used to parse
|
||||
and format timestamps. Convert a sytem time to `HttpDate` and vice versa.
|
||||
The `HttpType` (8 bytes) is smaller than `SystemTime` (16 bytes) and
|
||||
using the display impl avoids a temporary allocation.
|
||||
|
||||
Read the [blog post](https://pyfisch.org/blog/http-datetime-handling/) to learn
|
||||
more.
|
||||
|
||||
Fuzz it by installing *cargo-fuzz* and running `cargo fuzz run fuzz_target_1`.
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче