Bug 1670341 - Update mp4parse-rust to eliminate multiple versions of hashbrown crate. r=emilio

Also update servo's `style` package to use hashbrown 0.9.

Differential Revision: https://phabricator.services.mozilla.com/D93616
This commit is contained in:
Jon Bauman 2020-10-16 19:49:05 +00:00
Родитель 65afc76646
Коммит ce053c88e0
82 изменённых файлов: 58 добавлений и 15752 удалений

Просмотреть файл

@ -15,7 +15,7 @@ tag = "v0.4.13"
[source."https://github.com/mozilla/mp4parse-rust"]
git = "https://github.com/mozilla/mp4parse-rust"
replace-with = "vendored-sources"
rev = "fe9028570e44f3a725dd78bbb58428909c4618bf"
rev = "f7c35a30ff25521bebe64c19d3f306569ecb5385"
[source."https://github.com/mozilla/application-services"]
git = "https://github.com/mozilla/application-services"

52
Cargo.lock сгенерированный
Просмотреть файл

@ -6,12 +6,6 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2"
[[package]]
name = "ahash"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0989268a37e128d4d7a8028f1c60099430113fdbc70419010601ce51a228e4fe"
[[package]]
name = "ahash"
version = "0.4.5"
@ -216,12 +210,6 @@ version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b671c8fb71b457dd4ae18c4ba1e59aa81793daacc361d82fcd410cef0d491875"
[[package]]
name = "autocfg"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
[[package]]
name = "baldrdash"
version = "0.1.0"
@ -874,7 +862,7 @@ version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac"
dependencies = [
"autocfg 0.1.6",
"autocfg",
"cfg-if",
"crossbeam-utils 0.7.0",
"lazy_static",
@ -907,7 +895,7 @@ version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4"
dependencies = [
"autocfg 0.1.6",
"autocfg",
"cfg-if",
"lazy_static",
]
@ -1386,11 +1374,11 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
[[package]]
name = "fallible_collections"
version = "0.1.3"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba3301bcde54d3fc19c626ff4bf962630fe1f94cb6cdc3f18a26727a2d1f4a67"
checksum = "3bda4d04bca84e2331f0ff2ee8300064df3f467e37743d87788c1487a6dd903b"
dependencies = [
"hashbrown 0.7.2",
"hashbrown",
]
[[package]]
@ -2168,23 +2156,13 @@ dependencies = [
"tokio-util",
]
[[package]]
name = "hashbrown"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96282e96bfcd3da0d3aa9938bedf1e50df3269b6db08b4876d2da0bb1a0841cf"
dependencies = [
"ahash 0.3.2",
"autocfg 1.0.0",
]
[[package]]
name = "hashbrown"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d7afe4a420e3fe79967a00898cc1f4db7c8a49a9333a29f8a4bd76a253d5cd04"
dependencies = [
"ahash 0.4.5",
"ahash",
]
[[package]]
@ -2200,7 +2178,7 @@ version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d99cf782f0dc4372d26846bec3de7804ceb5df083c2d4462c0b8d2330e894fa8"
dependencies = [
"hashbrown 0.9.1",
"hashbrown",
]
[[package]]
@ -3169,12 +3147,12 @@ dependencies = [
[[package]]
name = "mp4parse"
version = "0.11.4"
source = "git+https://github.com/mozilla/mp4parse-rust?rev=fe9028570e44f3a725dd78bbb58428909c4618bf#fe9028570e44f3a725dd78bbb58428909c4618bf"
source = "git+https://github.com/mozilla/mp4parse-rust?rev=f7c35a30ff25521bebe64c19d3f306569ecb5385#f7c35a30ff25521bebe64c19d3f306569ecb5385"
dependencies = [
"bitreader",
"byteorder",
"fallible_collections",
"hashbrown 0.7.2",
"hashbrown",
"log",
"num-traits",
"static_assertions",
@ -3187,7 +3165,7 @@ version = "0.1.0"
[[package]]
name = "mp4parse_capi"
version = "0.11.4"
source = "git+https://github.com/mozilla/mp4parse-rust?rev=fe9028570e44f3a725dd78bbb58428909c4618bf#fe9028570e44f3a725dd78bbb58428909c4618bf"
source = "git+https://github.com/mozilla/mp4parse-rust?rev=f7c35a30ff25521bebe64c19d3f306569ecb5385#f7c35a30ff25521bebe64c19d3f306569ecb5385"
dependencies = [
"byteorder",
"fallible_collections",
@ -3420,7 +3398,7 @@ version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9c3f34cdd24f334cb265d9bf8bfa8a241920d026916785747a92f0e55541a1a"
dependencies = [
"autocfg 0.1.6",
"autocfg",
"num-integer",
"num-traits",
]
@ -3442,7 +3420,7 @@ version = "0.1.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b85e541ef8255f6cf42bbfe4ef361305c6c135d10919ecc26126c4e5ae94bc09"
dependencies = [
"autocfg 0.1.6",
"autocfg",
"num-traits",
]
@ -3452,7 +3430,7 @@ version = "0.1.39"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76bd5272412d173d6bf9afdf98db8612bbabc9a7a830b7bfc9c188911716132e"
dependencies = [
"autocfg 0.1.6",
"autocfg",
"num-integer",
"num-traits",
]
@ -3473,7 +3451,7 @@ version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4c81ffc11c212fa327657cb19dd85eb7419e163b5b076bede2bdb5c974c07e4"
dependencies = [
"autocfg 0.1.6",
"autocfg",
]
[[package]]
@ -4761,7 +4739,7 @@ dependencies = [
"euclid",
"fallible",
"fxhash",
"hashbrown 0.7.2",
"hashbrown",
"hashglobe",
"indexmap",
"itertools 0.8.0",

Просмотреть файл

@ -40,7 +40,7 @@ encoding_rs = {version = "0.8", optional = true}
euclid = "0.22"
fallible = { path = "../fallible" }
fxhash = "0.2"
hashbrown = "0.7"
hashbrown = "0.9"
hashglobe = { path = "../hashglobe" }
html5ever = {version = "0.24", optional = true}
indexmap = "1.0"

Просмотреть файл

@ -1 +0,0 @@
{"files":{"Cargo.toml":"f0e25e3e6f839a66910d21529e8ba1912fcc86985869aa1e0bc82615bcc9c616","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","README.md":"cae69bc8ba7fee044c48b7126a5facb831c29913753f7daec85a057d6d2b33fd","rustfmt.toml":"e090969e99df9360705680cc0097cfaddae10c22dc2e01470592cf3b9787fd36","src/aes_hash.rs":"e6fbcaf7d5a0153b98c7e70f8dd15cb56b1e4b9ae98747c05c12858342065ea6","src/convert.rs":"45c69b65982a95cc78d1b4e54c662be7d852aa03366acf57699e55a62ecb8930","src/fallback_hash.rs":"3d398c24197e7413461edbd79821aba118284dcdd0f1eaa01672fb717e561601","src/folded_multiply.rs":"f3a6ddc1db0655cc1ca775b8370175b3a098ed60e31bf15573879d2ab6870acb","src/hash_map.rs":"36bf0b13e334d7cedbf83e4822438098b227e7c7e381abe5e1eeac1ff6caa209","src/hash_quality_test.rs":"ecb76478989eb3b96e4a299aeccb0d75ba53d13b2c11e5a3c8de15d466a7476a","src/hash_set.rs":"4289672c142e314a0bfc6535b5e8f5e07cc78b60c0c7b308a43fa361eca6ddea","src/lib.rs":"ad3538e0a10bba5fba2f7364f2e0673a62c99b617b7c2fd96232100042a64781","src/random_state.rs":"85749d2b2c915c29c129f609cc22270ab1902a436bfc3997002adecf93c62406","tests/bench.rs":"a206178c8fb2a9756fb4292c44a3a4355143915217018e253930efd1575e99fb","tests/map_tests.rs":"68b81bcef528a83b8b786baad15db8cae13e5ca1db1e595e5aa74bdbf8d8115b","tests/nopanic.rs":"bc54a25d271c690ed49f6eb896e377bfb96217d29eb2c198052771714e972a56"},"package":"0989268a37e128d4d7a8028f1c60099430113fdbc70419010601ce51a228e4fe"}

90
third_party/rust/ahash-0.3.2/Cargo.toml поставляемый
Просмотреть файл

@ -1,90 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "ahash"
version = "0.3.2"
authors = ["Tom Kaitchuck <Tom.Kaitchuck@gmail.com>"]
description = "A non-cryprographic hash function using AES-NI for high performance"
documentation = "https://docs.rs/ahash"
readme = "README.md"
keywords = ["hash", "hashmap", "aes", "aes-ni", "no-std"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/tkaitchuck/ahash"
[package.metadata.docs.rs]
features = ["std"]
rustc-args = ["-C", "target-feature=+aes"]
rustdoc-args = ["-C", "target-feature=+aes"]
[profile.bench]
opt-level = 3
lto = "fat"
codegen-units = 1
debug = false
debug-assertions = false
[profile.release]
opt-level = 3
lto = "fat"
codegen-units = 1
debug = false
debug-assertions = false
[profile.test]
opt-level = 2
[lib]
name = "ahash"
path = "src/lib.rs"
test = true
doctest = true
bench = true
doc = true
[[bench]]
name = "ahash"
path = "tests/bench.rs"
harness = false
[[bench]]
name = "map"
path = "tests/map_tests.rs"
harness = false
[dependencies.const-random]
version = "0.1.6"
optional = true
[dev-dependencies.criterion]
version = "0.3.0"
features = ["real_blackbox"]
[dev-dependencies.fnv]
version = "1.0.5"
[dev-dependencies.fxhash]
version = "0.2.1"
[dev-dependencies.hex]
version = "0.3.2"
[dev-dependencies.no-panic]
version = "0.1.10"
[dev-dependencies.rand]
version = "0.6.5"
[dev-dependencies.seahash]
version = "3.0.5"
[features]
compile-time-rng = ["const-random"]
default = ["compile-time-rng", "std"]
std = []

201
third_party/rust/ahash-0.3.2/LICENSE-APACHE поставляемый
Просмотреть файл

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
third_party/rust/ahash-0.3.2/LICENSE-MIT поставляемый
Просмотреть файл

@ -1,25 +0,0 @@
Copyright (c) 2016 Amanieu d'Antras
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

219
third_party/rust/ahash-0.3.2/README.md поставляемый
Просмотреть файл

@ -1,219 +0,0 @@
# aHash
AHash is a high speed keyed hashing algorithm intended for use in in-memory hashmaps. It provides a high quality 64 bit hash.
AHash is designed for performance and is *not cryptographically secure*.
When it is available aHash takes advantage of the [hardware AES instruction](https://en.wikipedia.org/wiki/AES_instruction_set)
on X86 processors. If it is not available it falls back on a lower quality (but still DOS resistant) [algorithm based on
multiplication](https://github.com/tkaitchuck/aHash/wiki/AHash-fallback-algorithm).
Similar to Sip_hash, aHash is a keyed hash, so two instances initialized with different keys will produce completely different
hashes and the resulting hashes cannot be predicted without knowing the keys.
This prevents DOS attacks where an attacker sends a large number of items whose hashes collide that get used as keys in a hashmap.
# Goals
AHash is intended to be the fastest DOS resistant hash for use in HashMaps available in the Rust language.
Failing in any of these criteria will be treated as a bug.
# Non-Goals
AHash is not:
* A cryptographically secure hash
* Intended to be a MAC
* Intended for network or persisted use
Different computers using aHash will arrive at different hashes for the same input. Similarly the same computer running
different versions of the code may hash the same input to different values.
## Speed
When it is available aHash uses two rounds of AES encryption using the AES-NI instruction per 16 bytes of input.
On an intel i5-6200u this is as fast as a 64 bit multiplication, but it has the advantages of being a much stronger
permutation and handles 16 bytes at a time. This is obviously much faster than most standard approaches to hashing,
and does a better job of scrambling data than most non-secure hashes.
On an intel i5-6200u compiled with flags `-C opt-level=3 -C target-cpu=native -C codegen-units=1`:
| Input | SipHash 1-3 time | FnvHash time|FxHash time| aHash time| aHash Fallback* |
|----------------|-----------|-----------|-----------|-----------|---------------|
| u8 | 12.766 ns | 1.1561 ns | **1.1474 ns** | 1.4607 ns | 1.2010 ns |
| u16 | 13.095 ns | 1.3030 ns | **1.1589 ns** | 1.4677 ns | 1.1991 ns |
| u32 | 12.303 ns | 2.1232 ns | **1.1491 ns** | 1.4659 ns | 1.1992 ns |
| u64 | 14.648 ns | 4.3945 ns | **1.1623 ns** | 1.4769 ns | 1.2105 ns |
| u128 | 17.207 ns | 9.5498 ns | **1.4231 ns** | 1.4613 ns | 1.7041 ns |
| 1 byte string | 16.042 ns | 1.9192 ns | 2.5481 ns | **2.1789 ns** | 2.1790 ns |
| 3 byte string | 16.775 ns | 3.5305 ns | 4.5138 ns | **2.1914 ns** | 2.1809 ns |
| 4 byte string | 15.726 ns | 3.8268 ns | **1.2745 ns** | 2.2099 ns | 2.1902 ns |
| 7 byte string | 19.970 ns | 5.9849 ns | 3.9006 ns | **2.1830 ns** | 2.1836 ns |
| 8 byte string | 18.103 ns | 4.5923 ns | 2.2808 ns | **2.1691 ns** | 2.1884 ns |
| 15 byte string | 22.637 ns | 10.361 ns | 6.0990 ns | **2.1663 ns** | 2.2695 ns |
| 16 byte string | 19.882 ns | 9.8525 ns | 2.7562 ns | **2.1658 ns** | 2.2304 ns |
| 24 byte string | 21.893 ns | 16.640 ns | 3.2014 ns | **2.1657 ns** | 4.4364 ns |
| 68 byte string | 33.370 ns | 65.900 ns | 6.4713 ns | **6.1354 ns** | 8.5719 ns |
| 132 byte string| 52.996 ns | 158.34 ns | 14.245 ns | **8.3096 ns** | 14.608 ns |
|1024 byte string| 337.01 ns | 1453.1 ns | 205.60 ns | **46.916 ns** | 98.323 ns |
* Fallback refers to the algorithm aHash would use if AES instruction are unavailable.
For reference a hash that does nothing (not even reads the input data takes) **0.844 ns**. So that represents the fastest
possible time.
As you can see above aHash like FxHash provides a large speedup over SipHash-1-3 which is already nearly twice as fast as SipHash-2-4.
Rust by default uses SipHash-1-3 because faster hash functions such as FxHash are predictable and vulnerable to denial of
service attacks. While aHash has both very strong scrambling as well as very high performance.
AHash performs well when dealing with large inputs because aHash reads 8 or 16 bytes at a time. (depending on availability of AES-NI)
Because of this, and it's optimized logic aHash is able to outperform FxHash with strings.
It also provides especially good performance dealing with unaligned input.
(Notice the big performance gaps between 3 vs 4, 7 vs 8 and 15 vs 16 in FxHash above)
For more a more representative performance comparison which includes the overhead of using a HashMap, see [HashBrown's benchmarks](https://github.com/rust-lang/hashbrown#performance) as HashBrown now uses aHash as it's hasher by default.
## Security
AHash is designed to [prevent an adversary that does not know the key from being able to create hash collisions or partial collisions.](https://github.com/tkaitchuck/aHash/wiki/Attacking-aHash-or-why-it's-good-enough-for-a-hashmap)
This achieved by ensuring that:
* It obeys the '[strict avalanche criterion](https://en.wikipedia.org/wiki/Avalanche_effect#Strict_avalanche_criterion)':
Each bit of input can has the potential to flip every bit of the output.
* Similarly each bit in the key can affect every bit in the output.
* Input bits never affect just one or a very few bits in intermediate state. This is specifically designed to prevent [differential attacks aimed to cancel previous input](https://emboss.github.io/blog/2012/12/14/breaking-murmur-hash-flooding-dos-reloaded/)
* The update function is not 'lossy'. IE: It is composed of reversible operations (such as AES) which means any entropy from the input is not lost.
AHash is not designed to prevent finding the key by observing the output. It is not intended to prevent impossible differential
analysis from finding they key. Instead the security model is to not allow the hashes be made visible. This is not a major
issue for hashMaps because they aren't normally even stored. In practice this means using unique keys for each map
(RandomState does this for you by default), and not exposing the iteration order of long lived maps that an attacker could
conceivably insert elements into. (This is generally recommended anyway, regardless of hash function,
[because even without knowledge of the hash function an attack is possible](https://accidentallyquadratic.tumblr.com/post/153545455987/rust-hash-iteration-reinsertion).)
## Hash quality
It is a high quality hash that produces results that look highly random.
There should be around the same number of collisions for a small number of buckets that would be expected with random numbers.
There are no full 64 bit collisions with smaller than 64 bits of input. Notably this means the hashes are distinguishable from random data.
### aHash is not cryptographically secure
AHash should not be used for situations where cryptographic security is needed.
It is not intended for this and will likely fail to hold up for several reasons.
1. It has not been analyzed for vulnerabilities and may leak bits of the key in its output.
2. It only uses 2 rounds of AES as opposed to the standard of 10. This likely makes it possible to guess the key by observing a large number of hashes.
3. Like any cypher based hash, it will show certain statistical deviations from truly random output when comparing a (VERY) large number of hashes.
There are several efforts to build a secure hash function that uses AES-NI for acceleration, but aHash is not one of them.
## Compatibility
New versions of aHash may change the algorithm slightly resulting in the new version producing different hashes than
the old version even with the same keys. Additionally aHash does not guarantee that it won't produce different
hash values for the same data on different machines, or even on the same machine when recompiled.
For this reason aHash is not recommended for cases where hashes need to be persisted.
## Accelerated CPUs
Hardware AES instructions are built into Intel processors built after 2010 and AMD processors after 2012.
It is also available on [many other CPUs](https://en.wikipedia.org/wiki/AES_instruction_set) should in eventually
be able to get aHash to work. However only X86 and X86-64 are the only supported architectures at the moment, as currently
they are the only architectures for which Rust provides an intrinsic.
# Why use aHash over X
## SipHash
For a hashmap: Because aHash is faster.
SipHash is however useful in other contexts, such as for a HMAC, where aHash would be completely inapproaprate.
*SipHash-2-4* is designed to provide DOS attack resistance, and has no presently known attacks
against this claim that don't involve learning bits of the key.
SipHash is also available in the "1-3" variant which is about twice as fast as the standard version.
The SipHash authors don't recommend using this variation when DOS attacks are a concern, but there are still no known
practical DOS attacks against the algorithm. Rust has opted for the "1-3" version as the default in `std::collections::HashMap`,
because the speed trade off of "2-4" was not worth it.
As you can see in the table above, aHash is **much** faster than even *SipHash-1-3*, but it also provides DOS resistance,
and any attack against the accelerated form would likely involve a weakness in AES.
## FxHash
In terms of performance, aHash is faster the FXhash for strings and byte arrays but not primitives.
So it might seem like using Fxhash for hashmaps when the key is a primitive is a good idea. This is *not* the case.
When FX hash is operating on a 4 or 8 bite input such as a u32 or a u64, it reduces to multiplying the input by a fixed
constant. This is a bad hashing algorithm because it means that lower bits can never be influenced by any higher bit. In
the context of a hashmap where the low order bits are being used to determine which bucket to put an item in, this isn't
any better than the identity function. Any keys that happen to end in the same bit pattern will all collide. Some examples of where this is likely to occur are:
* Strings encoded in base64
* Null terminated strings (when working with C code)
* Integers that have the lower bits as zeros. (IE any multiple of small power of 2, which isn't a rare pattern in computer programs.)
* For example when taking lengths of data or locations in data it is common for values to
have a multiple of 1024, if these were used as keys in a map they will collide and end up in the same bucket.
Like any non-keyed hash FxHash can be attacked. But FxHash is so prone to this that you may find yourself doing it accidentally.
For example it is possible to [accidentally introduce quadratic behavior by reading from one map in iteration order and writing to another.](https://accidentallyquadratic.tumblr.com/post/153545455987/rust-hash-iteration-reinsertion)
Fxhash flaws make sense when you understand it for what it is. It is a quick and dirty hash, nothing more.
it was not published and promoted by its creator, it was **found**!
Because it is error-prone, FxHash should never be used as a default. In specialized instances where the keys are understood
it makes sense, but given that aHash is faster on almost any object, it's probably not worth it.
## FnvHash
FnvHash is also a poor default. It only handles one byte at a time, so it's performance really suffers with large inputs.
It is also non-keyed so it is still subject to DOS attacks and [accidentally quadratic behavior.](https://accidentallyquadratic.tumblr.com/post/153545455987/rust-hash-iteration-reinsertion)
## MurmurHash, CityHash, MetroHash, FarmHash, HighwayHash, XXHash, SeaHash
Murmur, City, Metro, Farm and Highway are all related, and appear to directly replace one another. Sea and XX are independent
and compete.
They are all fine hashing algorithms, they do a good job of scrambling data, but they are all targeted at a different
usecase. They are intended to work in distributed systems where the hash is expected to be the same over time and from one
computer to the next, efficiently hashing large volumes of data.
This is quite different from the needs of a Hasher used in a hashmap. In a map the typical value is under 10 bytes. None
of these algorithms scale down to handle that small of data at a competitive time. What's more the restriction that they
provide consistent output prevents them from taking advantage of different hardware capabilities on different CPUs. It makes
since for a hashmap to work differently on a phone than on a server, or in wasm.
If you need to persist or transmit a hash of a file, then using one of these is probably a good idea. HighwayHash seems to the the preferred solution du jour. But inside a simple Hashmap, stick with aHash.
## AquaHash
AquaHash is structured very similarly to aHash. (Though the two were designed completely independently) It scales up
better and will start outperforming aHash with inputs larger than 5-10KB. However it does not scale down nearly as well and
does poorly with for example a single `i32` as input. It's only implementation at this point is in C++.
## t1ha
T1ha is fast at large sizes and the output is of high quality, but it is not clear what usecase it hash aims for.
It has many different versions and is very complex, and uses hardware tricks, so one might infer it is meant for
hashmaps like aHash. But any hash using it take at least **20ns**, and it doesn't outperform even SipHash until the
input sizes are larger than 128 bytes. So uses are likely niche.
# License
Licensed under either of:
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
## Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
additional terms or conditions.

1
third_party/rust/ahash-0.3.2/rustfmt.toml поставляемый
Просмотреть файл

@ -1 +0,0 @@
max_width = 120

241
third_party/rust/ahash-0.3.2/src/aes_hash.rs поставляемый
Просмотреть файл

@ -1,241 +0,0 @@
use crate::convert::*;
use core::hash::Hasher;
/// A `Hasher` for hashing an arbitrary stream of bytes.
///
/// Instances of [`AHasher`] represent state that is updated while hashing data.
///
/// Each method updates the internal state based on the new data provided. Once
/// all of the data has been provided, the resulting hash can be obtained by calling
/// `finish()`
///
/// [Clone] is also provided in case you wish to calculate hashes for two different items that
/// start with the same data.
///
#[derive(Debug, Clone)]
pub struct AHasher {
buffer: [u64; 2],
key: u128,
}
impl AHasher {
/// Creates a new hasher keyed to the provided keys.
/// # Example
///
/// ```
/// use std::hash::Hasher;
/// use ahash::AHasher;
///
/// let mut hasher = AHasher::new_with_keys(123, 456);
///
/// hasher.write_u32(1989);
/// hasher.write_u8(11);
/// hasher.write_u8(9);
/// hasher.write(b"Huh?");
///
/// println!("Hash is {:x}!", hasher.finish());
/// ```
#[inline]
pub fn new_with_keys(key0: u64, key1: u64) -> Self {
Self {
buffer: [key0, key1],
key: [key1, key0].convert(),
}
}
#[cfg(test)]
pub(crate) fn test_with_keys(key1: u64, key2: u64) -> AHasher {
use crate::random_state::scramble_keys;
let (k1, k2) = scramble_keys(key1, key2);
AHasher {
buffer: [k1, k2],
key: [k2, k1].convert(),
}
}
#[inline(always)]
fn hash_in(&mut self, new_value: u128) {
self.buffer = aeshashx2(self.buffer.convert(), new_value, self.key).convert();
}
#[inline(always)]
fn hash_in_2(&mut self, v1: u128, v2: u128) {
let updated = aeshash(self.buffer.convert(), v1);
self.buffer = aeshashx2(updated, v2, updated).convert();
}
}
/// Provides methods to hash all of the primitive types.
impl Hasher for AHasher {
#[inline]
fn write_u8(&mut self, i: u8) {
self.write_u128(i as u128);
}
#[inline]
fn write_u16(&mut self, i: u16) {
self.write_u128(i as u128);
}
#[inline]
fn write_u32(&mut self, i: u32) {
self.write_u128(i as u128);
}
#[inline]
fn write_u128(&mut self, i: u128) {
self.hash_in(i);
}
#[inline]
fn write_usize(&mut self, i: usize) {
self.write_u64(i as u64);
}
#[inline]
fn write_u64(&mut self, i: u64) {
self.write_u128(i as u128);
}
#[inline]
fn write(&mut self, input: &[u8]) {
let mut data = input;
let length = data.len() as u64;
//This will be scrambled by the first AES round in any branch.
self.buffer[1] = self.buffer[1].wrapping_add(length);
//A 'binary search' on sizes reduces the number of comparisons.
if data.len() <= 8 {
let value: [u64; 2] = if data.len() >= 2 {
if data.len() >= 4 {
//len 4-8
[data.read_u32().0 as u64, data.read_last_u32() as u64]
} else {
//len 2-3
[data.read_u16().0 as u64, data[data.len() - 1] as u64]
}
} else {
if data.len() > 0 {
[data[0] as u64, 0]
} else {
[0, 0]
}
};
self.hash_in(value.convert());
} else {
if data.len() > 32 {
if data.len() > 64 {
let tail = data.read_last_u128x4();
let mut par_block: u128 = self.buffer.convert();
while data.len() > 64 {
let (blocks, rest) = data.read_u128x4();
data = rest;
self.hash_in_2(blocks[0], blocks[1]);
par_block = aeshash(par_block, blocks[2]);
par_block = aeshashx2(par_block, blocks[3], par_block);
}
self.hash_in_2(tail[0], tail[1]);
par_block = aeshash(par_block, tail[2]);
par_block = aeshashx2(par_block, tail[3], par_block);
self.hash_in(par_block);
} else {
//len 33-64
let (head, _) = data.read_u128x2();
let tail = data.read_last_u128x2();
self.hash_in_2(head[0], head[1]);
self.hash_in_2(tail[0], tail[1]);
}
} else {
if data.len() > 16 {
//len 17-32
self.hash_in_2(data.read_u128().0, data.read_last_u128());
} else {
//len 9-16
let value: [u64; 2] = [data.read_u64().0, data.read_last_u64()];
self.hash_in(value.convert());
}
}
}
}
#[inline]
fn finish(&self) -> u64 {
let result: [u64; 2] = aeshash(self.buffer.convert(), self.key).convert();
result[0] //.wrapping_add(result[1])
}
}
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes"))]
#[inline(always)]
fn aeshash(value: u128, xor: u128) -> u128 {
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
use core::mem::transmute;
unsafe {
let value = transmute(value);
transmute(_mm_aesdec_si128(value, transmute(xor)))
}
}
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes"))]
#[inline(always)]
fn aeshashx2(value: u128, k1: u128, k2: u128) -> u128 {
#[cfg(target_arch = "x86")]
use core::arch::x86::*;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64::*;
use core::mem::transmute;
unsafe {
let value = transmute(value);
let value = _mm_aesdec_si128(value, transmute(k1));
transmute(_mm_aesdec_si128(value, transmute(k2)))
}
}
#[cfg(test)]
mod tests {
use crate::aes_hash::*;
use crate::convert::Convert;
use std::collections::HashMap;
use std::hash::BuildHasherDefault;
#[cfg(feature = "compile-time-rng")]
#[test]
fn test_builder() {
let mut map = HashMap::<u32, u64, BuildHasherDefault<AHasher>>::default();
map.insert(1, 3);
}
#[cfg(feature = "compile-time-rng")]
#[test]
fn test_default() {
let hasher_a = AHasher::default();
assert_ne!(0, hasher_a.buffer[0]);
assert_ne!(0, hasher_a.buffer[1]);
assert_ne!(hasher_a.buffer[0], hasher_a.buffer[1]);
let hasher_b = AHasher::default();
assert_eq!(hasher_a.buffer[0], hasher_b.buffer[0]);
assert_eq!(hasher_a.buffer[1], hasher_b.buffer[1]);
}
#[test]
fn test_hash() {
let mut result: [u64; 2] = [0x6c62272e07bb0142, 0x62b821756295c58d];
let value: [u64; 2] = [1 << 32, 0xFEDCBA9876543210];
result = aeshash(value.convert(), result.convert()).convert();
result = aeshash(result.convert(), result.convert()).convert();
let mut result2: [u64; 2] = [0x6c62272e07bb0142, 0x62b821756295c58d];
let value2: [u64; 2] = [1, 0xFEDCBA9876543210];
result2 = aeshash(value2.convert(), result2.convert()).convert();
result2 = aeshash(result2.convert(), result.convert()).convert();
let result: [u8; 16] = result.convert();
let result2: [u8; 16] = result2.convert();
assert_ne!(hex::encode(result), hex::encode(result2));
}
#[test]
fn test_conversion() {
let input: &[u8] = "dddddddd".as_bytes();
let bytes: u64 = as_array!(input, 8).convert();
assert_eq!(bytes, 0x6464646464646464);
}
}

174
third_party/rust/ahash-0.3.2/src/convert.rs поставляемый
Просмотреть файл

@ -1,174 +0,0 @@
use core::mem::transmute;
pub(crate) trait Convert<To> {
fn convert(self) -> To;
fn convert_ref(&self) -> &To;
fn convert_mut_ref(&mut self) -> &mut To;
}
macro_rules! convert {
($from:ty, $to:ty) => {
impl Convert<$to> for $from {
#[inline(always)]
fn convert(self) -> $to {
unsafe { transmute(self) }
}
#[inline(always)]
fn convert_ref(&self) -> &$to {
unsafe { &*(self as *const $from as *const $to) }
}
#[inline(always)]
fn convert_mut_ref(&mut self) -> &mut $to {
unsafe { &mut *(self as *mut $from as *mut $to) }
}
}
impl Convert<$from> for $to {
#[inline(always)]
fn convert(self) -> $from {
unsafe { transmute(self) }
}
#[inline(always)]
fn convert_ref(&self) -> &$from {
unsafe { &*(self as *const $to as *const $from) }
}
#[inline(always)]
fn convert_mut_ref(&mut self) -> &mut $from {
unsafe { &mut *(self as *mut $to as *mut $from) }
}
}
};
}
convert!([u128; 4], [u64; 8]);
convert!([u128; 4], [u32; 16]);
convert!([u128; 4], [u16; 32]);
convert!([u128; 4], [u8; 64]);
convert!([u128; 2], [u64; 4]);
convert!([u128; 2], [u32; 8]);
convert!([u128; 2], [u16; 16]);
convert!([u128; 2], [u8; 32]);
convert!(u128, [u64; 2]);
convert!(u128, [u32; 4]);
convert!(u128, [u16; 8]);
convert!(u128, [u8; 16]);
convert!([u64; 2], [u32; 4]);
convert!([u64; 2], [u16; 8]);
convert!([u64; 2], [u8; 16]);
convert!([u32; 4], [u16; 8]);
convert!([u32; 4], [u8; 16]);
convert!([u16; 8], [u8; 16]);
convert!(u64, [u32; 2]);
convert!(u64, [u16; 4]);
convert!(u64, [u8; 8]);
convert!([u32; 2], [u16; 4]);
convert!([u32; 2], [u8; 8]);
convert!(u32, [u16; 2]);
convert!(u32, [u8; 4]);
convert!([u16; 2], [u8; 4]);
convert!(u16, [u8; 2]);
convert!([f64; 2], [u8; 16]);
convert!([f32; 4], [u8; 16]);
convert!(f64, [u8; 8]);
convert!([f32; 2], [u8; 8]);
convert!(f32, [u8; 4]);
macro_rules! as_array {
($input:expr, $len:expr) => {{
{
#[inline(always)]
fn as_array<T>(slice: &[T]) -> &[T; $len] {
assert_eq!(slice.len(), $len);
unsafe { &*(slice.as_ptr() as *const [_; $len]) }
}
as_array($input)
}
}};
}
pub(crate) trait ReadFromSlice {
fn read_u16(&self) -> (u16, &[u8]);
fn read_u32(&self) -> (u32, &[u8]);
fn read_u64(&self) -> (u64, &[u8]);
fn read_u128(&self) -> (u128, &[u8]);
fn read_u128x2(&self) -> ([u128; 2], &[u8]);
fn read_u128x4(&self) -> ([u128; 4], &[u8]);
fn read_last_u16(&self) -> u16;
fn read_last_u32(&self) -> u32;
fn read_last_u64(&self) -> u64;
fn read_last_u128(&self) -> u128;
fn read_last_u128x2(&self) -> [u128; 2];
fn read_last_u128x4(&self) -> [u128; 4];
}
impl ReadFromSlice for [u8] {
#[inline(always)]
fn read_u16(&self) -> (u16, &[u8]) {
let (value, rest) = self.split_at(2);
(as_array!(value, 2).convert(), rest)
}
#[inline(always)]
fn read_u32(&self) -> (u32, &[u8]) {
let (value, rest) = self.split_at(4);
(as_array!(value, 4).convert(), rest)
}
#[inline(always)]
fn read_u64(&self) -> (u64, &[u8]) {
let (value, rest) = self.split_at(8);
(as_array!(value, 8).convert(), rest)
}
#[inline(always)]
fn read_u128(&self) -> (u128, &[u8]) {
let (value, rest) = self.split_at(16);
(as_array!(value, 16).convert(), rest)
}
#[inline(always)]
fn read_u128x2(&self) -> ([u128; 2], &[u8]) {
let (value, rest) = self.split_at(32);
(as_array!(value, 32).convert(), rest)
}
#[inline(always)]
fn read_u128x4(&self) -> ([u128; 4], &[u8]) {
let (value, rest) = self.split_at(64);
(as_array!(value, 64).convert(), rest)
}
#[inline(always)]
fn read_last_u16(&self) -> u16 {
let (_, value) = self.split_at(self.len() - 2);
as_array!(value, 2).convert()
}
#[inline(always)]
fn read_last_u32(&self) -> u32 {
let (_, value) = self.split_at(self.len() - 4);
as_array!(value, 4).convert()
}
#[inline(always)]
fn read_last_u64(&self) -> u64 {
let (_, value) = self.split_at(self.len() - 8);
as_array!(value, 8).convert()
}
#[inline(always)]
fn read_last_u128(&self) -> u128 {
let (_, value) = self.split_at(self.len() - 16);
as_array!(value, 16).convert()
}
#[inline(always)]
fn read_last_u128x2(&self) -> [u128; 2] {
let (_, value) = self.split_at(self.len() - 32);
as_array!(value, 32).convert()
}
#[inline(always)]
fn read_last_u128x4(&self) -> [u128; 4] {
let (_, value) = self.split_at(self.len() - 64);
as_array!(value, 64).convert()
}
}

Просмотреть файл

@ -1,222 +0,0 @@
use crate::convert::*;
use core::hash::Hasher;
///This constant come from Kunth's prng (Empirically it works better than those from splitmix32).
const MULTIPLE: u64 = crate::random_state::MULTIPLE;
const INCREMENT: u64 = 1442695040888963407;
const ROT: u32 = 23; //17
/// A `Hasher` for hashing an arbitrary stream of bytes.
///
/// Instances of [`AHasher`] represent state that is updated while hashing data.
///
/// Each method updates the internal state based on the new data provided. Once
/// all of the data has been provided, the resulting hash can be obtained by calling
/// `finish()`
///
/// [Clone] is also provided in case you wish to calculate hashes for two different items that
/// start with the same data.
///
#[derive(Debug, Clone)]
pub struct AHasher {
buffer: u64,
pad: u64,
}
impl AHasher {
/// Creates a new hasher keyed to the provided key.
#[inline]
pub fn new_with_keys(key1: u64, key2: u64) -> AHasher {
AHasher {
buffer: key1,
pad: key2,
}
}
#[cfg(test)]
pub(crate) fn test_with_keys(key1: u64, key2: u64) -> AHasher {
use crate::random_state::scramble_keys;
let (k1, k2) = scramble_keys(key1, key2);
AHasher { buffer: k1, pad: k2 }
}
/// This update function has the goal of updating the buffer with a single multiply
/// FxHash does this but is vulnerable to attack. To avoid this input needs to be masked to with an
/// unpredictable value. Other hashes such as murmurhash have taken this approach but were found vulnerable
/// to attack. The attack was based on the idea of reversing the pre-mixing (Which is necessarily
/// reversible otherwise bits would be lost) then placing a difference in the highest bit before the
/// multiply used to mix the data. Because a multiply can never affect the bits to the right of it, a
/// subsequent update that also differed in this bit could result in a predictable collision.
///
/// This version avoids this vulnerability while still only using a single multiply. It takes advantage
/// of the fact that when a 64 bit multiply is performed the upper 64 bits are usually computed and thrown
/// away. Instead it creates two 128 bit values where the upper 64 bits are zeros and multiplies them.
/// (The compiler is smart enough to turn this into a 64 bit multiplication in the assembly)
/// Then the upper bits are added to the lower bits to produce a single 64 bit result.
///
/// To understand why this is a good scrambling function it helps to understand multiply-with-carry PRNGs:
/// https://en.wikipedia.org/wiki/Multiply-with-carry_pseudorandom_number_generator
/// If the multiple is chosen well, this creates a long period, decent quality PRNG.
/// Notice that this function is equivalent to this except the `buffer`/`state` is being xored with each
/// new block of data. In the event that data is all zeros, it is exactly equivalent to a MWC PRNG.
///
/// This is impervious to attack because every bit buffer at the end is dependent on every bit in
/// `new_data ^ buffer`. For example suppose two inputs differed in only the 5th bit. Then when the
/// multiplication is performed the `result` will differ in bits 5-69. More specifically it will differ by
/// 2^5 * MULTIPLE. However in the next step bits 65-128 are turned into a separate 64 bit value. So the
/// differing bits will be in the lower 6 bits of this value. The two intermediate values that differ in
/// bits 5-63 and in bits 0-5 respectively get added together. Producing an output that differs in every
/// bit. The addition carries in the multiplication and at the end additionally mean that the even if an
/// attacker somehow knew part of (but not all) the contents of the buffer before hand,
/// they would not be able to predict any of the bits in the buffer at the end.
#[inline(always)]
fn update(&mut self, new_data: u64) {
use crate::folded_multiply::FoldedMultiply;
self.buffer = (new_data ^ self.buffer).folded_multiply(&MULTIPLE);
}
/// This update function updates the buffer with the new information in a way that can't be canceled
/// with a subsequent update without knowledge of the content of the buffer prior to the update.
///
/// To achieve this the input needs to be modified in an unpredictable (to an attacker) way before it is
/// combined with the value in the buffer. This is done by xoring it with `key`.
///
/// Other hashes such as murmurhash have taken that approach but were found vulnerable to attack.
/// The attack was based on the idea of reversing any pre-mixing (Which is necessarily reversible otherwise
/// bits would be lost) then placing a difference in the highest bit before the multiply. Because a multiply
/// can never affect the bits to the right of it, a subsequent update that also only differed in the high
/// order bit could cancel out the change to `buffer` from the first update. This allowed murmurhash to be
/// attacked. In this update function aHash avoids this vulnerability by rotating and performing a second
/// multiply.
///
/// This makes it impossible for an attacker to place a single bit difference between
/// two blocks so as to cancel each other. (While the transform is still reversible if you know the key)
///
/// This is similar to the above update function but is designed to run in a loop
/// that will be unrolled and vectorized. So instead of using the buffer, it uses a 'key' that it updates
/// and returns. The buffer is only xored at the end. This structure is so that when the method is inlined,
/// the compiler will unroll any loop this gets placed in and the loop can be automatically vectorized
/// and the rotates, xors, and multiplies can be paralleled.
///
/// The key needs to be incremented between consecutive calls to prevent (a,b) from hashing the same as (b,a).
/// The adding of the increment is moved to the bottom rather than the top. This allows one less add to be
/// performed overall, but more importantly, it follows the multiply, which is expensive. So the CPU can
/// run another operation afterwords if does not depend on the output of the multiply operation.
#[inline(always)]
fn ordered_update(&mut self, new_data: u64, key: u64) -> u64 {
self.buffer ^= (new_data ^ key)
.wrapping_mul(MULTIPLE)
.rotate_left(ROT)
.wrapping_mul(MULTIPLE);
key.wrapping_add(INCREMENT)
}
}
/// Provides methods to hash all of the primitive types.
impl Hasher for AHasher {
#[inline]
fn write_u8(&mut self, i: u8) {
self.update(i as u64);
}
#[inline]
fn write_u16(&mut self, i: u16) {
self.update(i as u64);
}
#[inline]
fn write_u32(&mut self, i: u32) {
self.update(i as u64);
}
#[inline]
fn write_u64(&mut self, i: u64) {
self.update(i as u64);
}
#[inline]
fn write_u128(&mut self, i: u128) {
let data: [u64; 2] = i.convert();
self.update(data[0]);
self.update(data[1]);
}
#[inline]
fn write_usize(&mut self, i: usize) {
self.write_u64(i as u64);
}
#[inline]
fn write(&mut self, input: &[u8]) {
let mut data = input;
let length = data.len() as u64;
//Needs to be an add rather than an xor because otherwise it could be canceled with carefully formed input.
self.buffer = self.buffer.wrapping_add(length.wrapping_mul(MULTIPLE));
//A 'binary search' on sizes reduces the number of comparisons.
if data.len() > 8 {
if data.len() > 16 {
let tail = data.read_last_u64();
let mut key: u64 = self.buffer;
while data.len() > 8 {
let (val, rest) = data.read_u64();
key = self.ordered_update(val, key);
data = rest;
}
self.update(tail);
} else {
self.update(data.read_u64().0);
self.update(data.read_last_u64());
}
} else {
if data.len() >= 2 {
if data.len() >= 4 {
let block: [u32; 2] = [data.read_u32().0, data.read_last_u32()];
self.update(block.convert());
} else {
let block: [u16; 2] = [data.read_u16().0, data.read_last_u16()];
let val: u32 = block.convert();
self.update(val as u64);
}
} else {
let value = if data.len() > 0 {
data[0] //len 1
} else {
0
};
self.update(value as u64);
}
}
}
#[inline]
fn finish(&self) -> u64 {
(self.buffer ^ self.pad)
}
}
#[cfg(test)]
mod tests {
use crate::convert::Convert;
use crate::fallback_hash::*;
#[test]
fn test_hash() {
let mut hasher = AHasher::new_with_keys(0, 0);
let value: u64 = 1 << 32;
hasher.update(value);
let result = hasher.buffer;
let mut hasher = AHasher::new_with_keys(0, 0);
let value2: u64 = 1;
hasher.update(value2);
let result2 = hasher.buffer;
let result: [u8; 8] = result.convert();
let result2: [u8; 8] = result2.convert();
assert_ne!(hex::encode(result), hex::encode(result2));
}
#[test]
fn test_conversion() {
let input: &[u8] = "dddddddd".as_bytes();
let bytes: u64 = as_array!(input, 8).convert();
assert_eq!(bytes, 0x6464646464646464);
}
}

Просмотреть файл

@ -1,15 +0,0 @@
use crate::convert::*;
use core::ops::Add;
use core::ops::Mul;
pub(crate) trait FoldedMultiply: Mul + Add + Sized {
fn folded_multiply(&self, by: &Self) -> Self;
}
impl FoldedMultiply for u64 {
#[inline(always)]
fn folded_multiply(&self, by: &u64) -> u64 {
let result: [u64; 2] = (*self as u128).wrapping_mul(*by as u128).convert();
result[0].wrapping_add(result[1])
}
}

177
third_party/rust/ahash-0.3.2/src/hash_map.rs поставляемый
Просмотреть файл

@ -1,177 +0,0 @@
use std::borrow::Borrow;
use std::collections::{hash_map, HashMap};
use std::fmt::{self, Debug};
use std::hash::{BuildHasher, Hash};
use std::iter::FromIterator;
use std::ops::{Deref, DerefMut, Index};
use std::panic::UnwindSafe;
/// A [`HashMap`](std::collections::HashMap) using [`RandomState`](crate::RandomState) to hash the items.
/// Requires the `std` feature to be enabled.
#[derive(Clone)]
pub struct AHashMap<K, V, S = crate::RandomState>(HashMap<K, V, S>);
impl<K, V, S> AHashMap<K, V, S>
where
K: Hash + Eq,
S: BuildHasher + Default,
{
pub fn new() -> Self {
AHashMap(HashMap::with_hasher(S::default()))
}
pub fn with_capacity(capacity: usize) -> Self {
AHashMap(HashMap::with_capacity_and_hasher(capacity, S::default()))
}
}
impl<K, V, S> AHashMap<K, V, S>
where
K: Hash + Eq,
S: BuildHasher,
{
pub fn with_hasher(hash_builder: S) -> Self {
AHashMap(HashMap::with_hasher(hash_builder))
}
pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self {
AHashMap(HashMap::with_capacity_and_hasher(capacity, hash_builder))
}
}
impl<K, V, S> Deref for AHashMap<K, V, S> {
type Target = HashMap<K, V, S>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<K, V, S> DerefMut for AHashMap<K, V, S> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<K, V, S> UnwindSafe for AHashMap<K, V, S>
where
K: UnwindSafe,
V: UnwindSafe,
{
}
impl<K, V, S> PartialEq for AHashMap<K, V, S>
where
K: Eq + Hash,
V: PartialEq,
S: BuildHasher,
{
fn eq(&self, other: &AHashMap<K, V, S>) -> bool {
self.0.eq(&other.0)
}
}
impl<K, V, S> Eq for AHashMap<K, V, S>
where
K: Eq + Hash,
V: Eq,
S: BuildHasher,
{
}
impl<K, Q: ?Sized, V, S> Index<&Q> for AHashMap<K, V, S>
where
K: Eq + Hash + Borrow<Q>,
Q: Eq + Hash,
S: BuildHasher,
{
type Output = V;
/// Returns a reference to the value corresponding to the supplied key.
///
/// # Panics
///
/// Panics if the key is not present in the `HashMap`.
#[inline]
fn index(&self, key: &Q) -> &V {
self.0.index(key)
}
}
impl<K, V, S> Debug for AHashMap<K, V, S>
where
K: Eq + Hash + Debug,
V: Debug,
S: BuildHasher,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(fmt)
}
}
impl<K, V, S> FromIterator<(K, V)> for AHashMap<K, V, S>
where
K: Eq + Hash,
S: BuildHasher + Default,
{
fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
AHashMap(HashMap::from_iter(iter))
}
}
impl<'a, K, V, S> IntoIterator for &'a AHashMap<K, V, S> {
type Item = (&'a K, &'a V);
type IntoIter = hash_map::Iter<'a, K, V>;
fn into_iter(self) -> Self::IntoIter {
(&self.0).iter()
}
}
impl<'a, K, V, S> IntoIterator for &'a mut AHashMap<K, V, S> {
type Item = (&'a K, &'a mut V);
type IntoIter = hash_map::IterMut<'a, K, V>;
fn into_iter(self) -> Self::IntoIter {
(&mut self.0).iter_mut()
}
}
impl<K, V, S> IntoIterator for AHashMap<K, V, S> {
type Item = (K, V);
type IntoIter = hash_map::IntoIter<K, V>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl<K, V, S> Extend<(K, V)> for AHashMap<K, V, S>
where
K: Eq + Hash,
S: BuildHasher,
{
#[inline]
fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
self.0.extend(iter)
}
}
impl<'a, K, V, S> Extend<(&'a K, &'a V)> for AHashMap<K, V, S>
where
K: Eq + Hash + Copy + 'a,
V: Copy + 'a,
S: BuildHasher,
{
#[inline]
fn extend<T: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: T) {
self.0.extend(iter)
}
}
impl<K, V, S> Default for AHashMap<K, V, S>
where
K: Eq + Hash,
S: BuildHasher + Default,
{
#[inline]
fn default() -> AHashMap<K, V, S> {
AHashMap::with_hasher(Default::default())
}
}

Просмотреть файл

@ -1,322 +0,0 @@
use core::hash::{Hash, Hasher};
fn assert_sufficiently_different(a: u64, b: u64, tolerance: i32) {
let (same_byte_count, same_nibble_count) = count_same_bytes_and_nibbles(a, b);
assert!(same_byte_count <= tolerance, "{:x} vs {:x}: {:}", a, b, same_byte_count);
assert!(
same_nibble_count <= tolerance * 3,
"{:x} vs {:x}: {:}",
a,
b,
same_nibble_count
);
let flipped_bits = (a ^ b).count_ones();
assert!(
flipped_bits > 12 && flipped_bits < 52,
"{:x} and {:x}: {:}",
a,
b,
flipped_bits
);
for rotate in 0..64 {
let flipped_bits2 = (a ^ (b.rotate_left(rotate))).count_ones();
assert!(
flipped_bits2 > 10 && flipped_bits2 < 54,
"{:x} and {:x}: {:}",
a,
b.rotate_left(rotate),
flipped_bits2
);
}
}
fn count_same_bytes_and_nibbles(a: u64, b: u64) -> (i32, i32) {
let mut same_byte_count = 0;
let mut same_nibble_count = 0;
for byte in 0..8 {
let ba = (a >> (8 * byte)) as u8;
let bb = (b >> (8 * byte)) as u8;
if ba == bb {
same_byte_count += 1;
}
if ba & 0xF0u8 == bb & 0xF0u8 {
same_nibble_count += 1;
}
if ba & 0x0Fu8 == bb & 0x0Fu8 {
same_nibble_count += 1;
}
}
(same_byte_count, same_nibble_count)
}
fn test_keys_change_output<T: Hasher>(constructor: impl Fn(u64, u64) -> T) {
let mut a = constructor(0, 0);
let mut b = constructor(0, 1);
let mut c = constructor(1, 0);
let mut d = constructor(1, 1);
"test".hash(&mut a);
"test".hash(&mut b);
"test".hash(&mut c);
"test".hash(&mut d);
assert_sufficiently_different(a.finish(), b.finish(), 1);
assert_sufficiently_different(a.finish(), c.finish(), 1);
assert_sufficiently_different(a.finish(), d.finish(), 1);
assert_sufficiently_different(b.finish(), c.finish(), 1);
assert_sufficiently_different(b.finish(), d.finish(), 1);
assert_sufficiently_different(c.finish(), d.finish(), 1);
}
fn test_input_affect_every_byte<T: Hasher>(constructor: impl Fn(u64, u64) -> T) {
let mut base = constructor(0, 0);
0.hash(&mut base);
let base = base.finish();
for shift in 0..16 {
let mut alternitives = vec![];
for v in 0..256 {
let input = (v as u128) << (shift * 8);
let mut hasher = constructor(0, 0);
input.hash(&mut hasher);
alternitives.push(hasher.finish());
}
assert_each_byte_differes(base, alternitives);
}
}
fn test_keys_affect_every_byte<T: Hasher>(constructor: impl Fn(u64, u64) -> T) {
let mut base = constructor(0, 0);
0.hash(&mut base);
let base = base.finish();
for shift in 0..8 {
let mut alternitives1 = vec![];
let mut alternitives2 = vec![];
for v in 0..256 {
let input = (v as u64) << (shift * 8);
let mut hasher1 = constructor(input, 0);
let mut hasher2 = constructor(0, input);
0.hash(&mut hasher1);
0.hash(&mut hasher2);
alternitives1.push(hasher1.finish());
alternitives2.push(hasher2.finish());
}
assert_each_byte_differes(base, alternitives1);
assert_each_byte_differes(base, alternitives2);
}
}
fn assert_each_byte_differes(base: u64, alternitives: Vec<u64>) {
let mut changed_bits = 0_u64;
for alternitive in alternitives {
changed_bits |= base ^ alternitive
}
assert_eq!(core::u64::MAX, changed_bits, "Bits changed: {:x}", changed_bits);
}
fn test_finish_is_consistant<T: Hasher>(constructor: impl Fn(u64, u64) -> T) {
let mut hasher = constructor(1, 2);
"Foo".hash(&mut hasher);
let a = hasher.finish();
let b = hasher.finish();
assert_eq!(a, b);
}
fn test_single_key_bit_flip<T: Hasher>(constructor: impl Fn(u64, u64) -> T) {
for bit in 0..64 {
let mut a = constructor(0, 0);
let mut b = constructor(0, 1 << bit);
let mut c = constructor(1 << bit, 0);
"1234".hash(&mut a);
"1234".hash(&mut b);
"1234".hash(&mut c);
assert_sufficiently_different(a.finish(), b.finish(), 2);
assert_sufficiently_different(a.finish(), c.finish(), 2);
assert_sufficiently_different(b.finish(), c.finish(), 2);
let mut a = constructor(0, 0);
let mut b = constructor(0, 1 << bit);
let mut c = constructor(1 << bit, 0);
"12345678".hash(&mut a);
"12345678".hash(&mut b);
"12345678".hash(&mut c);
assert_sufficiently_different(a.finish(), b.finish(), 2);
assert_sufficiently_different(a.finish(), c.finish(), 2);
assert_sufficiently_different(b.finish(), c.finish(), 2);
let mut a = constructor(0, 0);
let mut b = constructor(0, 1 << bit);
let mut c = constructor(1 << bit, 0);
"1234567812345678".hash(&mut a);
"1234567812345678".hash(&mut b);
"1234567812345678".hash(&mut c);
assert_sufficiently_different(a.finish(), b.finish(), 2);
assert_sufficiently_different(a.finish(), c.finish(), 2);
assert_sufficiently_different(b.finish(), c.finish(), 2);
}
}
fn test_all_bytes_matter<T: Hasher>(hasher: impl Fn() -> T) {
let mut item = vec![0; 256];
let base_hash = hash(&item, &hasher);
for pos in 0..256 {
item[pos] = 255;
let hash = hash(&item, &hasher);
assert_ne!(base_hash, hash, "Position {} did not affect output", pos);
item[pos] = 0;
}
}
fn hash<T: Hasher>(b: &impl Hash, hasher: &dyn Fn() -> T) -> u64 {
let mut hasher = hasher();
b.hash(&mut hasher);
hasher.finish()
}
fn test_single_bit_flip<T: Hasher>(hasher: impl Fn() -> T) {
let size = 32;
let compare_value = hash(&0u32, &hasher);
for pos in 0..size {
let test_value = hash(&(1u32 << pos), &hasher);
assert_sufficiently_different(compare_value, test_value, 2);
}
let size = 64;
let compare_value = hash(&0u64, &hasher);
for pos in 0..size {
let test_value = hash(&(1u64 << pos), &hasher);
assert_sufficiently_different(compare_value, test_value, 2);
}
let size = 128;
let compare_value = hash(&0u128, &hasher);
for pos in 0..size {
let test_value = hash(&(1u128 << pos), &hasher);
assert_sufficiently_different(compare_value, test_value, 2);
}
}
fn test_padding_doesnot_collide<T: Hasher>(hasher: impl Fn() -> T) {
for c in 0..128u8 {
for string in ["", "1234", "12345678", "1234567812345678"].iter() {
let mut short = hasher();
string.hash(&mut short);
let value = short.finish();
let mut string = string.to_string();
for num in 1..=128 {
let mut long = hasher();
string.push(c as char);
string.hash(&mut long);
let (same_bytes, same_nibbles) = count_same_bytes_and_nibbles(value, long.finish());
assert!(
same_bytes <= 2,
format!("{} bytes of {} -> {:x} vs {:x}", num, c, value, long.finish())
);
assert!(
same_nibbles <= 8,
format!("{} bytes of {} -> {:x} vs {:x}", num, c, value, long.finish())
);
let flipped_bits = (value ^ long.finish()).count_ones();
assert!(flipped_bits > 10);
}
}
}
}
#[cfg(test)]
mod fallback_tests {
use crate::fallback_hash::*;
use crate::hash_quality_test::*;
#[test]
fn fallback_single_bit_flip() {
test_single_bit_flip(|| AHasher::test_with_keys(0, 0))
}
#[test]
fn fallback_single_key_bit_flip() {
test_single_key_bit_flip(AHasher::test_with_keys)
}
#[test]
fn fallback_all_bytes_matter() {
test_all_bytes_matter(|| AHasher::test_with_keys(0, 0));
}
#[test]
fn fallback_keys_change_output() {
test_keys_change_output(AHasher::test_with_keys);
}
#[test]
fn fallback_input_affect_every_byte() {
test_input_affect_every_byte(AHasher::test_with_keys);
}
#[test]
fn fallback_keys_affect_every_byte() {
test_keys_affect_every_byte(AHasher::test_with_keys);
}
#[test]
fn fallback_finish_is_consistant() {
test_finish_is_consistant(AHasher::test_with_keys)
}
#[test]
fn fallback_padding_doesnot_collide() {
test_padding_doesnot_collide(|| AHasher::test_with_keys(0, 1))
}
}
///Basic sanity tests of the cypto properties of aHash.
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes"))]
#[cfg(test)]
mod aes_tests {
use crate::aes_hash::*;
use crate::hash_quality_test::*;
use std::hash::{Hash, Hasher};
const BAD_KEY: u64 = 0x5252_5252_5252_5252; //Thi s encrypts to 0.
#[test]
fn test_single_bit_in_byte() {
let mut hasher1 = AHasher::new_with_keys(64, 64);
8_u32.hash(&mut hasher1);
let mut hasher2 = AHasher::new_with_keys(64, 64);
0_u32.hash(&mut hasher2);
assert_sufficiently_different(hasher1.finish(), hasher2.finish(), 1);
}
#[test]
fn aes_single_bit_flip() {
test_single_bit_flip(|| AHasher::test_with_keys(BAD_KEY, BAD_KEY))
}
#[test]
fn aes_single_key_bit_flip() {
test_single_key_bit_flip(|k1, k2| AHasher::test_with_keys(k1, k2))
}
#[test]
fn aes_all_bytes_matter() {
test_all_bytes_matter(|| AHasher::test_with_keys(BAD_KEY, BAD_KEY));
}
#[test]
fn aes_keys_change_output() {
test_keys_change_output(AHasher::test_with_keys);
}
#[test]
fn aes_input_affect_every_byte() {
test_input_affect_every_byte(AHasher::test_with_keys);
}
#[test]
fn aes_keys_affect_every_byte() {
test_keys_affect_every_byte(AHasher::test_with_keys);
}
#[test]
fn aes_finish_is_consistant() {
test_finish_is_consistant(AHasher::test_with_keys)
}
#[test]
fn aes_padding_doesnot_collide() {
test_padding_doesnot_collide(|| AHasher::test_with_keys(BAD_KEY, BAD_KEY))
}
}

267
third_party/rust/ahash-0.3.2/src/hash_set.rs поставляемый
Просмотреть файл

@ -1,267 +0,0 @@
use std::collections::{hash_set, HashSet};
use std::fmt::{self, Debug};
use std::hash::{BuildHasher, Hash};
use std::iter::FromIterator;
use std::ops::{BitAnd, BitOr, BitXor, Deref, DerefMut, Sub};
/// A [`HashSet`](std::collections::HashSet) using [`RandomState`](crate::RandomState) to hash the items.
/// Requires the `std` feature to be enabled.
#[derive(Clone)]
pub struct AHashSet<T, S = crate::RandomState>(HashSet<T, S>);
impl<T, S> AHashSet<T, S>
where
T: Hash + Eq,
S: BuildHasher + Default,
{
pub fn new() -> Self {
AHashSet(HashSet::with_hasher(S::default()))
}
pub fn with_capacity(capacity: usize) -> Self {
AHashSet(HashSet::with_capacity_and_hasher(capacity, S::default()))
}
}
impl<T, S> AHashSet<T, S>
where
T: Hash + Eq,
S: BuildHasher,
{
pub fn with_hasher(hash_builder: S) -> Self {
AHashSet(HashSet::with_hasher(hash_builder))
}
pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self {
AHashSet(HashSet::with_capacity_and_hasher(capacity, hash_builder))
}
}
impl<T, S> Deref for AHashSet<T, S> {
type Target = HashSet<T, S>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T, S> DerefMut for AHashSet<T, S> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<T, S> PartialEq for AHashSet<T, S>
where
T: Eq + Hash,
S: BuildHasher,
{
fn eq(&self, other: &AHashSet<T, S>) -> bool {
self.0.eq(&other.0)
}
}
impl<T, S> Eq for AHashSet<T, S>
where
T: Eq + Hash,
S: BuildHasher,
{
}
impl<T, S> BitOr<&AHashSet<T, S>> for &AHashSet<T, S>
where
T: Eq + Hash + Clone,
S: BuildHasher + Default,
{
type Output = AHashSet<T, S>;
/// Returns the union of `self` and `rhs` as a new `AHashSet<T, S>`.
///
/// # Examples
///
/// ```
/// use ahash::AHashSet;
///
/// let a: AHashSet<_> = vec![1, 2, 3].into_iter().collect();
/// let b: AHashSet<_> = vec![3, 4, 5].into_iter().collect();
///
/// let set = &a | &b;
///
/// let mut i = 0;
/// let expected = [1, 2, 3, 4, 5];
/// for x in &set {
/// assert!(expected.contains(x));
/// i += 1;
/// }
/// assert_eq!(i, expected.len());
/// ```
fn bitor(self, rhs: &AHashSet<T, S>) -> AHashSet<T, S> {
AHashSet(self.0.bitor(&rhs.0))
}
}
impl<T, S> BitAnd<&AHashSet<T, S>> for &AHashSet<T, S>
where
T: Eq + Hash + Clone,
S: BuildHasher + Default,
{
type Output = AHashSet<T, S>;
/// Returns the intersection of `self` and `rhs` as a new `AHashSet<T, S>`.
///
/// # Examples
///
/// ```
/// use ahash::AHashSet;
///
/// let a: AHashSet<_> = vec![1, 2, 3].into_iter().collect();
/// let b: AHashSet<_> = vec![2, 3, 4].into_iter().collect();
///
/// let set = &a & &b;
///
/// let mut i = 0;
/// let expected = [2, 3];
/// for x in &set {
/// assert!(expected.contains(x));
/// i += 1;
/// }
/// assert_eq!(i, expected.len());
/// ```
fn bitand(self, rhs: &AHashSet<T, S>) -> AHashSet<T, S> {
AHashSet(self.0.bitand(&rhs.0))
}
}
impl<T, S> BitXor<&AHashSet<T, S>> for &AHashSet<T, S>
where
T: Eq + Hash + Clone,
S: BuildHasher + Default,
{
type Output = AHashSet<T, S>;
/// Returns the symmetric difference of `self` and `rhs` as a new `AHashSet<T, S>`.
///
/// # Examples
///
/// ```
/// use ahash::AHashSet;
///
/// let a: AHashSet<_> = vec![1, 2, 3].into_iter().collect();
/// let b: AHashSet<_> = vec![3, 4, 5].into_iter().collect();
///
/// let set = &a ^ &b;
///
/// let mut i = 0;
/// let expected = [1, 2, 4, 5];
/// for x in &set {
/// assert!(expected.contains(x));
/// i += 1;
/// }
/// assert_eq!(i, expected.len());
/// ```
fn bitxor(self, rhs: &AHashSet<T, S>) -> AHashSet<T, S> {
AHashSet(self.0.bitxor(&rhs.0))
}
}
impl<T, S> Sub<&AHashSet<T, S>> for &AHashSet<T, S>
where
T: Eq + Hash + Clone,
S: BuildHasher + Default,
{
type Output = AHashSet<T, S>;
/// Returns the difference of `self` and `rhs` as a new `AHashSet<T, S>`.
///
/// # Examples
///
/// ```
/// use ahash::AHashSet;
///
/// let a: AHashSet<_> = vec![1, 2, 3].into_iter().collect();
/// let b: AHashSet<_> = vec![3, 4, 5].into_iter().collect();
///
/// let set = &a - &b;
///
/// let mut i = 0;
/// let expected = [1, 2];
/// for x in &set {
/// assert!(expected.contains(x));
/// i += 1;
/// }
/// assert_eq!(i, expected.len());
/// ```
fn sub(self, rhs: &AHashSet<T, S>) -> AHashSet<T, S> {
AHashSet(self.0.sub(&rhs.0))
}
}
impl<T, S> Debug for AHashSet<T, S>
where
T: Eq + Hash + Debug,
S: BuildHasher,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(fmt)
}
}
impl<T, S> FromIterator<T> for AHashSet<T, S>
where
T: Eq + Hash,
S: BuildHasher + Default,
{
#[inline]
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> AHashSet<T, S> {
AHashSet(HashSet::from_iter(iter))
}
}
impl<'a, T, S> IntoIterator for &'a AHashSet<T, S> {
type Item = &'a T;
type IntoIter = hash_set::Iter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
(&self.0).iter()
}
}
impl<T, S> IntoIterator for AHashSet<T, S> {
type Item = T;
type IntoIter = hash_set::IntoIter<T>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl<T, S> Extend<T> for AHashSet<T, S>
where
T: Eq + Hash,
S: BuildHasher,
{
#[inline]
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
self.0.extend(iter)
}
}
impl<'a, T, S> Extend<&'a T> for AHashSet<T, S>
where
T: 'a + Eq + Hash + Copy,
S: BuildHasher,
{
#[inline]
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
self.0.extend(iter)
}
}
impl<T, S> Default for AHashSet<T, S>
where
T: Eq + Hash,
S: BuildHasher + Default,
{
/// Creates an empty `AHashSet<T, S>` with the `Default` value for the hasher.
#[inline]
fn default() -> AHashSet<T, S> {
AHashSet(HashSet::default())
}
}

132
third_party/rust/ahash-0.3.2/src/lib.rs поставляемый
Просмотреть файл

@ -1,132 +0,0 @@
//! # aHash
//!
//! This hashing algorithm is intended to be a high performance, (hardware specific), keyed hash function.
//! This can be seen as a DOS resistant alternative to `FxHash`, or a fast equivalent to `SipHash`.
//! It provides a high speed hash algorithm, but where the result is not predictable without knowing a Key.
//! This allows it to be used in a `HashMap` without allowing for the possibility that an malicious user can
//! induce a collision.
//!
//! # How aHash works
//!
//! aHash uses the hardware AES instruction on x86 processors to provide a keyed hash function.
//! It uses two rounds of AES per hash. So it should not be considered cryptographically secure.
#![deny(clippy::correctness, clippy::complexity, clippy::perf)]
#![allow(clippy::pedantic, clippy::cast_lossless, clippy::unreadable_literal)]
#![cfg_attr(all(not(test), not(feature = "std")), no_std)]
#[macro_use]
mod convert;
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes"))]
mod aes_hash;
mod fallback_hash;
#[cfg(test)]
mod hash_quality_test;
mod folded_multiply;
#[cfg(feature = "std")]
mod hash_map;
#[cfg(feature = "std")]
mod hash_set;
mod random_state;
#[cfg(feature = "compile-time-rng")]
use const_random::const_random;
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes"))]
pub use crate::aes_hash::AHasher;
#[cfg(not(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes")))]
pub use crate::fallback_hash::AHasher;
pub use crate::random_state::RandomState;
#[cfg(feature = "std")]
pub use crate::hash_map::AHashMap;
#[cfg(feature = "std")]
pub use crate::hash_set::AHashSet;
/// Provides a default [Hasher] compile time generated constants for keys.
/// This is typically used in conjunction with [`BuildHasherDefault`] to create
/// [AHasher]s in order to hash the keys of the map.
///
/// # Example
/// ```
/// use std::hash::BuildHasherDefault;
/// use ahash::{AHasher, RandomState};
/// use std::collections::HashMap;
///
/// let mut map: HashMap<i32, i32, RandomState> = HashMap::default();
/// map.insert(12, 34);
/// ```
///
/// [BuildHasherDefault]: std::hash::BuildHasherDefault
/// [Hasher]: std::hash::Hasher
/// [HashMap]: std::collections::HashMap
#[cfg(feature = "compile-time-rng")]
impl Default for AHasher {
/// Constructs a new [AHasher] with compile time generated constants for keys.
/// This means the keys will be the same from one instance to another,
/// but different from build to the next. So if it is possible for a potential
/// attacker to have access to the compiled binary it would be better
/// to specify keys generated at runtime.
///
/// This is defined only if the `compile-time-rng` feature is enabled.
///
/// # Examples
///
/// ```
/// use ahash::AHasher;
/// use std::hash::Hasher;
///
/// let mut hasher_1 = AHasher::default();
/// let mut hasher_2 = AHasher::default();
///
/// hasher_1.write_u32(1234);
/// hasher_2.write_u32(1234);
///
/// assert_eq!(hasher_1.finish(), hasher_2.finish());
/// ```
#[inline]
fn default() -> AHasher {
AHasher::new_with_keys(const_random!(u64), const_random!(u64))
}
}
//#[inline(never)]
//pub fn hash_test(input: &[u8]) -> u64 {
// use std::hash::Hasher;
// let mut a = AHasher::new_with_keys(67, 87);
// a.write(input);
// a.finish()
//}
#[cfg(test)]
mod test {
use crate::convert::Convert;
use crate::*;
use core::hash::BuildHasherDefault;
use std::collections::HashMap;
#[test]
fn test_default_builder() {
let mut map = HashMap::<u32, u64, BuildHasherDefault<AHasher>>::default();
map.insert(1, 3);
}
#[test]
fn test_builder() {
let mut map = HashMap::<u32, u64, RandomState>::default();
map.insert(1, 3);
}
#[test]
fn test_conversion() {
let input: &[u8] = b"dddddddd";
let bytes: u64 = as_array!(input, 8).convert();
assert_eq!(bytes, 0x6464646464646464);
}
#[test]
fn test_ahasher_construction() {
let _ = AHasher::new_with_keys(1245, 5678);
}
}

Просмотреть файл

@ -1,104 +0,0 @@
use crate::AHasher;
use core::hash::BuildHasher;
use core::sync::atomic::AtomicUsize;
use core::sync::atomic::Ordering;
#[cfg(feature = "compile-time-rng")]
use const_random::const_random;
///This constant come from Kunth's prng
pub(crate) const MULTIPLE: u64 = 6364136223846793005;
// Const random provides randomized starting key with no runtime cost.
#[cfg(feature = "compile-time-rng")]
static SEED: AtomicUsize = AtomicUsize::new(const_random!(u64));
#[cfg(not(feature = "compile-time-rng"))]
static SEED: AtomicUsize = AtomicUsize::new(MULTIPLE as usize);
/// Provides a [Hasher] factory. This is typically used (e.g. by [`HashMap`]) to create
/// [AHasher]s in order to hash the keys of the map. See `build_hasher` below.
///
/// [build_hasher]: ahash::
/// [Hasher]: std::hash::Hasher
/// [BuildHasher]: std::hash::BuildHasher
/// [HashMap]: std::collections::HashMap
#[derive(Clone)]
pub struct RandomState {
pub(crate) k0: u64,
pub(crate) k1: u64,
}
impl RandomState {
#[inline]
pub fn new() -> RandomState {
//Using a self pointer. When running with ASLR this is a random value.
let previous = SEED.load(Ordering::Relaxed) as u64;
let stack_mem_loc = &previous as *const _ as u64;
//This is similar to the update function in the fallback.
//only one multiply is needed because memory locations are not under an attackers control.
let current_seed = previous
.wrapping_mul(MULTIPLE)
.wrapping_add(stack_mem_loc)
.rotate_left(31);
SEED.store(current_seed as usize, Ordering::Relaxed);
let (k0, k1) = scramble_keys(&SEED as *const _ as u64, current_seed);
RandomState { k0, k1 }
}
}
pub(crate) fn scramble_keys(k0: u64, k1: u64) -> (u64, u64) {
//Scramble seeds (based on xoroshiro128+)
//This is intentionally not similar the hash algorithm
let result1 = k0.wrapping_add(k1);
let k1 = k1 ^ k0;
let k0 = k0.rotate_left(24) ^ k1 ^ (k1.wrapping_shl(16));
let result2 = k0.wrapping_add(k1.rotate_left(37));
(result2, result1)
}
impl Default for RandomState {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl BuildHasher for RandomState {
type Hasher = AHasher;
/// Constructs a new [AHasher] with keys based on compile time generated constants** and the location
/// of the this object in memory. This means that two different [BuildHasher]s will will generate
/// [AHasher]s that will return different hashcodes, but [Hasher]s created from the same [BuildHasher]
/// will generate the same hashes for the same input data.
///
/// ** - only if the `compile-time-rng` feature is enabled.
///
/// # Examples
///
/// ```
/// use ahash::{AHasher, RandomState};
/// use std::hash::{Hasher, BuildHasher};
///
/// let build_hasher = RandomState::new();
/// let mut hasher_1 = build_hasher.build_hasher();
/// let mut hasher_2 = build_hasher.build_hasher();
///
/// hasher_1.write_u32(1234);
/// hasher_2.write_u32(1234);
///
/// assert_eq!(hasher_1.finish(), hasher_2.finish());
///
/// let other_build_hasher = RandomState::new();
/// let mut different_hasher = other_build_hasher.build_hasher();
/// different_hasher.write_u32(1234);
/// assert_ne!(different_hasher.finish(), hasher_1.finish());
/// ```
/// [Hasher]: std::hash::Hasher
/// [BuildHasher]: std::hash::BuildHasher
/// [HashMap]: std::collections::HashMap
#[inline]
fn build_hasher(&self) -> AHasher {
AHasher::new_with_keys(self.k0, self.k1)
}
}

226
third_party/rust/ahash-0.3.2/tests/bench.rs поставляемый
Просмотреть файл

@ -1,226 +0,0 @@
use ahash::AHasher;
use criterion::*;
use fxhash::FxHasher;
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes"))]
fn aeshash<H: Hash>(b: H) -> u64 {
let mut hasher = AHasher::new_with_keys(1234, 5678);
b.hash(&mut hasher);
hasher.finish()
}
#[cfg(not(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes")))]
fn aeshash<H: Hash>(_b: H) -> u64 {
panic!("aes must be enabled")
}
#[cfg(not(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes")))]
fn fallbackhash<H: Hash>(b: H) -> u64 {
let mut hasher = AHasher::new_with_keys(1234, 5678);
b.hash(&mut hasher);
hasher.finish()
}
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), target_feature = "aes"))]
fn fallbackhash<H: Hash>(_b: H) -> u64 {
panic!("aes must be disabled")
}
fn fnvhash<H: Hash>(b: H) -> u64 {
let mut hasher = fnv::FnvHasher::default();
b.hash(&mut hasher);
hasher.finish()
}
fn siphash<H: Hash>(b: H) -> u64 {
let mut hasher = DefaultHasher::default();
b.hash(&mut hasher);
hasher.finish()
}
fn fxhash<H: Hash>(b: H) -> u64 {
let mut hasher = FxHasher::default();
b.hash(&mut hasher);
hasher.finish()
}
fn seahash<H: Hash>(b: H) -> u64 {
let mut hasher = seahash::SeaHasher::default();
b.hash(&mut hasher);
hasher.finish()
}
const STRING_LENGTHS: [u32; 11] = [1, 3, 4, 7, 8, 15, 16, 24, 68, 132, 1024];
fn gen_strings() -> Vec<String> {
STRING_LENGTHS
.iter()
.map(|len| {
let mut string = String::default();
for pos in 1..=*len {
let c = (48 + (pos % 10) as u8) as char;
string.push(c);
}
string
})
.collect()
}
const U8_VALUES: [u8; 1] = [8];
const U16_VALUES: [u16; 1] = [16];
const U32_VALUES: [u32; 1] = [32];
const U64_VALUES: [u64; 1] = [64];
const U128_VALUES: [u128; 1] = [128];
fn bench_ahash(c: &mut Criterion) {
c.bench(
"aeshash",
ParameterizedBenchmark::new("u8", |b, s| b.iter(|| black_box(aeshash(s))), &U8_VALUES),
);
c.bench(
"aeshash",
ParameterizedBenchmark::new("u16", |b, s| b.iter(|| black_box(aeshash(s))), &U16_VALUES),
);
c.bench(
"aeshash",
ParameterizedBenchmark::new("u32", |b, s| b.iter(|| black_box(aeshash(s))), &U32_VALUES),
);
c.bench(
"aeshash",
ParameterizedBenchmark::new("u64", |b, s| b.iter(|| black_box(aeshash(s))), &U64_VALUES),
);
c.bench(
"aeshash",
ParameterizedBenchmark::new("u128", |b, s| b.iter(|| black_box(aeshash(s))), &U128_VALUES),
);
c.bench(
"aeshash",
ParameterizedBenchmark::new("string", |b, s| b.iter(|| black_box(aeshash(s))), gen_strings()),
);
}
fn bench_fallback(c: &mut Criterion) {
c.bench(
"fallback",
ParameterizedBenchmark::new("u8", |b, s| b.iter(|| black_box(fallbackhash(s))), &U8_VALUES),
);
c.bench(
"fallback",
ParameterizedBenchmark::new("u16", |b, s| b.iter(|| black_box(fallbackhash(s))), &U16_VALUES),
);
c.bench(
"fallback",
ParameterizedBenchmark::new("u32", |b, s| b.iter(|| black_box(fallbackhash(s))), &U32_VALUES),
);
c.bench(
"fallback",
ParameterizedBenchmark::new("u64", |b, s| b.iter(|| black_box(fallbackhash(s))), &U64_VALUES),
);
c.bench(
"fallback",
ParameterizedBenchmark::new("u128", |b, s| b.iter(|| black_box(fallbackhash(s))), &U128_VALUES),
);
c.bench(
"fallback",
ParameterizedBenchmark::new("string", |b, s| b.iter(|| black_box(fallbackhash(s))), gen_strings()),
);
}
fn bench_fx(c: &mut Criterion) {
c.bench(
"fx",
ParameterizedBenchmark::new("u8", |b, s| b.iter(|| black_box(fxhash(s))), &U8_VALUES),
);
c.bench(
"fx",
ParameterizedBenchmark::new("u16", |b, s| b.iter(|| black_box(fxhash(s))), &U16_VALUES),
);
c.bench(
"fx",
ParameterizedBenchmark::new("u32", |b, s| b.iter(|| black_box(fxhash(s))), &U32_VALUES),
);
c.bench(
"fx",
ParameterizedBenchmark::new("u64", |b, s| b.iter(|| black_box(fxhash(s))), &U64_VALUES),
);
c.bench(
"fx",
ParameterizedBenchmark::new("u128", |b, s| b.iter(|| black_box(fxhash(s))), &U128_VALUES),
);
c.bench(
"fx",
ParameterizedBenchmark::new("string", |b, s| b.iter(|| black_box(fxhash(s))), gen_strings()),
);
}
fn bench_fnv(c: &mut Criterion) {
c.bench(
"fnv",
ParameterizedBenchmark::new("u8", |b, s| b.iter(|| black_box(fnvhash(s))), &U8_VALUES),
);
c.bench(
"fnv",
ParameterizedBenchmark::new("u16", |b, s| b.iter(|| black_box(fnvhash(s))), &U16_VALUES),
);
c.bench(
"fnv",
ParameterizedBenchmark::new("u32", |b, s| b.iter(|| black_box(fnvhash(s))), &U32_VALUES),
);
c.bench(
"fnv",
ParameterizedBenchmark::new("u64", |b, s| b.iter(|| black_box(fnvhash(s))), &U64_VALUES),
);
c.bench(
"fnv",
ParameterizedBenchmark::new("u128", |b, s| b.iter(|| black_box(fnvhash(s))), &U128_VALUES),
);
c.bench(
"fnv",
ParameterizedBenchmark::new("string", |b, s| b.iter(|| black_box(fnvhash(s))), gen_strings()),
);
}
fn bench_sea(c: &mut Criterion) {
c.bench(
"sea",
ParameterizedBenchmark::new("string", |b, s| b.iter(|| black_box(seahash(s))), gen_strings()),
);
}
fn bench_sip(c: &mut Criterion) {
c.bench(
"sip",
ParameterizedBenchmark::new("u8", |b, s| b.iter(|| black_box(siphash(s))), &U8_VALUES),
);
c.bench(
"sip",
ParameterizedBenchmark::new("u16", |b, s| b.iter(|| black_box(siphash(s))), &U16_VALUES),
);
c.bench(
"sip",
ParameterizedBenchmark::new("u32", |b, s| b.iter(|| black_box(siphash(s))), &U32_VALUES),
);
c.bench(
"sip",
ParameterizedBenchmark::new("u64", |b, s| b.iter(|| black_box(siphash(s))), &U64_VALUES),
);
c.bench(
"sip",
ParameterizedBenchmark::new("u128", |b, s| b.iter(|| black_box(siphash(s))), &U128_VALUES),
);
c.bench(
"sip",
ParameterizedBenchmark::new("string", |b, s| b.iter(|| black_box(siphash(s))), gen_strings()),
);
}
criterion_main!(benches);
criterion_group!(
benches,
bench_ahash,
bench_fallback,
bench_fx,
bench_fnv,
bench_sea,
bench_sip
);

Просмотреть файл

@ -1,202 +0,0 @@
use std::hash::{Hash, Hasher};
use criterion::*;
use fxhash::FxHasher;
use ahash::AHasher;
fn gen_word_pairs() -> Vec<String> {
let words: Vec<_> = r#"
a, ability, able, about, above, accept, according, account, across, act, action,
activity, actually, add, address, administration, admit, adult, affect, after,
again, against, age, agency, agent, ago, agree, agreement, ahead, air, all,
allow, almost, alone, along, already, also, although, always, American, among,
amount, analysis, and, animal, another, answer, any, anyone, anything, appear,
apply, approach, area, argue, arm, around, arrive, art, article, artist, as,
ask, assume, at, attack, attention, attorney, audience, author, authority,
available, avoid, away, baby, back, bad, bag, ball, bank, bar, base, be, beat,
beautiful, because, become, bed, before, begin, behavior, behind, believe,
benefit, best, better, between, beyond, big, bill, billion, bit, black, blood,
blue, board, body, book, born, both, box, boy, break, bring, brother, budget,
build, building, business, but, buy, by, call, camera, campaign, can, cancer,
candidate, capital, car, card, care, career, carry, case, catch, cause, cell,
center, central, century, certain, certainly, chair, challenge, chance, change,
character, charge, check, child, choice, choose, church, citizen, city, civil,
claim, class, clear, clearly, close, coach, cold, collection, college, color,
come, commercial, common, community, company, compare, computer, concern,
condition, conference, Congress, consider, consumer, contain, continue, control,
cost, could, country, couple, course, court, cover, create, crime, cultural,
culture, cup, current, customer, cut, dark, data, daughter, day, dead, deal,
death, debate, decade, decide, decision, deep, defense, degree, Democrat,
democratic, describe, design, despite, detail, determine, develop, development,
die, difference, different, difficult, dinner, direction, director, discover,
discuss, discussion, disease, do, doctor, dog, door, down, draw, dream, drive,
drop, drug, during, each, early, east, easy, eat, economic, economy, edge,
education, effect, effort, eight, either, election, else, employee, end, energy,
enjoy, enough, enter, entire, environment, environmental, especially, establish,
even, evening, event, ever, every, everybody, everyone, everything, evidence,
exactly, example, executive, exist, expect, experience, expert, explain, eye,
face, fact, factor, fail, fall, family, far, fast, father, fear, federal, feel,
feeling, few, field, fight, figure, fill, film, final, finally, financial, find,
fine, finger, finish, fire, firm, first, fish, five, floor, fly, focus, follow,
food, foot, for, force, foreign, forget, form, former, forward, four, free,
friend, from, front, full, fund, future, game, garden, gas, general, generation,
get, girl, give, glass, go, goal, good, government, great, green, ground, group,
grow, growth, guess, gun, guy, hair, half, hand, hang, happen, happy, hard,
have, he, head, health, hear, heart, heat, heavy, help, her, here, herself,
high, him, himself, his, history, hit, hold, home, hope, hospital, hot, hotel,
hour, house, how, however, huge, human, hundred, husband, I, idea, identify, if,
image, imagine, impact, important, improve, in, include, including, increase,
indeed, indicate, individual, industry, information, inside, instead,
institution, interest, interesting, international, interview, into, investment,
involve, issue, it, item, its, itself, job, join, just, keep, key, kid, kill,
kind, kitchen, know, knowledge, land, language, large, last, late, later, laugh,
law, lawyer, lay, lead, leader, learn, least, leave, left, leg, legal, less,
let, letter, level, lie, life, light, like, likely, line, list, listen, little,
live, local, long, look, lose, loss, lot, love, low, machine, magazine, main,
maintain, major, majority, make, man, manage, management, manager, many, market,
marriage, material, matter, may, maybe, me, mean, measure, media, medical, meet,
meeting, member, memory, mention, message, method, middle, might, military,
million, mind, minute, miss, mission, model, modern, moment, money, month, more,
morning, most, mother, mouth, move, movement, movie, Mr, Mrs, much, music, must,
my, myself, name, nation, national, natural, nature, near, nearly, necessary,
need, network, never, new, news, newspaper, next, nice, night, no, none, nor,
north, not, note, nothing, notice, now, n't, number, occur, of, off, offer,
office, officer, official, often, oh, oil, ok, old, on, once, one, only, onto,
open, operation, opportunity, option, or, order, organization, other, others,
our, out, outside, over, own, owner, page, pain, painting, paper, parent, part,
participant, particular, particularly, partner, party, pass, past, patient,
pattern, pay, peace, people, per, perform, performance, perhaps, period, person,
personal, phone, physical, pick, picture, piece, place, plan, plant, play,
player, PM, point, police, policy, political, politics, poor, popular,
population, position, positive, possible, power, practice, prepare, present,
president, pressure, pretty, prevent, price, private, probably, problem,
process, produce, product, production, professional, professor, program,
project, property, protect, prove, provide, public, pull, purpose, push, put,
quality, question, quickly, quite, race, radio, raise, range, rate, rather,
reach, read, ready, real, reality, realize, really, reason, receive, recent,
recently, recognize, record, red, reduce, reflect, region, relate, relationship,
religious, remain, remember, remove, report, represent, Republican, require,
research, resource, respond, response, responsibility, rest, result, return,
reveal, rich, right, rise, risk, road, rock, role, room, rule, run, safe, same,
save, say, scene, school, science, scientist, score, sea, season, seat, second,
section, security, see, seek, seem, sell, send, senior, sense, series, serious,
serve, service, set, seven, several, sex, sexual, shake, share, she, shoot,
short, shot, should, shoulder, show, side, sign, significant, similar, simple,
simply, since, sing, single, sister, sit, site, situation, six, size, skill,
skin, small, smile, so, social, society, soldier, some, somebody, someone,
something, sometimes, son, song, soon, sort, sound, source, south, southern,
space, speak, special, specific, speech, spend, sport, spring, staff, stage,
stand, standard, star, start, state, statement, station, stay, step, still,
stock, stop, store, story, strategy, street, strong, structure, student, study,
stuff, style, subject, success, successful, such, suddenly, suffer, suggest,
summer, support, sure, surface, system, table, take, talk, task, tax, teach,
teacher, team, technology, television, tell, ten, tend, term, test, than, thank,
that, the, their, them, themselves, then, theory, there, these, they, thing,
think, third, this, those, though, thought, thousand, threat, three, through,
throughout, throw, thus, time, to, today, together, tonight, too, top, total,
tough, toward, town, trade, traditional, training, travel, treat, treatment,
tree, trial, trip, trouble, true, truth, try, turn, TV, two, type, under,
understand, unit, until, up, upon, us, use, usually, value, various, very,
victim, view, violence, visit, voice, vote, wait, walk, wall, want, war, watch,
water, way, we, weapon, wear, week, weight, well, west, western, what, whatever,
when, where, whether, which, while, white, who, whole, whom, whose, why, wide,
wife, will, win, wind, window, wish, with, within, without, woman, wonder, word,
work, worker, world, worry, would, write, writer, wrong, yard, yeah, year, yes,
yet, you, young, your, yourself"#
.split(',')
.map(|word| word.trim())
.collect();
let mut word_pairs: Vec<_> = Vec::new();
for word in &words {
for other_word in &words {
word_pairs.push(word.to_string() + " " + other_word);
}
}
assert_eq!(1_000_000, word_pairs.len());
word_pairs
}
fn test_hash_common_words<T: Hasher>(hasher: impl Fn() -> T) {
let word_pairs: Vec<_> = gen_word_pairs();
check_for_collisions(&hasher, &word_pairs, 32);
}
fn check_for_collisions<T: Hasher, H: Hash>(hasher: &impl Fn() -> T, items: &[H], bucket_count: usize) {
let mut buckets = vec![0; bucket_count];
for item in items {
let value = hash(item, &hasher) as usize;
println!("{:x}", value);
buckets[value % bucket_count] += 1;
}
let mean = items.len() / bucket_count;
let max = *buckets.iter().max().unwrap();
let min = *buckets.iter().min().unwrap();
assert!(
(min as f64) > (mean as f64) * 0.95,
"min: {}, max:{}, {:?}",
min,
max,
buckets
);
assert!(
(max as f64) < (mean as f64) * 1.05,
"min: {}, max:{}, {:?}",
min,
max,
buckets
);
}
fn hash<T: Hasher>(b: &impl Hash, hasher: &dyn Fn() -> T) -> u64 {
let mut hasher = hasher();
b.hash(&mut hasher);
hasher.finish()
}
#[test]
fn test_bucket_distribution() {
let hasher = || AHasher::new_with_keys(0x0123456789ABCDEF, 0x0123456789ABCDEF);
let sequence: Vec<_> = (0..320000).collect();
check_for_collisions(&hasher, &sequence, 32);
let sequence: Vec<_> = (0..2560000).collect();
check_for_collisions(&hasher, &sequence, 256);
let sequence: Vec<_> = (0..320000).map(|i| i * 1024).collect();
check_for_collisions(&hasher, &sequence, 32);
let sequence: Vec<_> = (0..2560000_u64).map(|i| i * 1024).collect();
check_for_collisions(&hasher, &sequence, 256);
}
fn ahash_vec<H: Hash>(b: &Vec<H>) -> u64 {
let mut total: u64 = 0;
for item in b {
let mut hasher = AHasher::new_with_keys(1234, 5678);
item.hash(&mut hasher);
total = total.wrapping_add(hasher.finish());
}
total
}
fn fxhash_vec<H: Hash>(b: &Vec<H>) -> u64 {
let mut total: u64 = 0;
for item in b {
let mut hasher = FxHasher::default();
item.hash(&mut hasher);
total = total.wrapping_add(hasher.finish());
}
total
}
fn bench_ahash_words(c: &mut Criterion) {
let words = gen_word_pairs();
c.bench_function("aes_words", |b| b.iter(|| black_box(ahash_vec(&words))));
}
fn bench_fx_words(c: &mut Criterion) {
let words = gen_word_pairs();
c.bench_function("fx_words", |b| b.iter(|| black_box(fxhash_vec(&words))));
}
criterion_main!(benches);
criterion_group!(benches, bench_ahash_words, bench_fx_words,);

25
third_party/rust/ahash-0.3.2/tests/nopanic.rs поставляемый
Просмотреть файл

@ -1,25 +0,0 @@
use ahash::AHasher;
#[macro_use]
extern crate no_panic;
#[inline(never)]
#[no_panic]
fn hash_test_final(num: i32, string: &str) -> (u64, u64) {
use core::hash::Hasher;
let mut hasher1 = AHasher::new_with_keys(0, 1);
let mut hasher2 = AHasher::new_with_keys(0, 2);
hasher1.write_i32(num);
hasher2.write(string.as_bytes());
(hasher1.finish(), hasher2.finish())
}
#[inline(never)]
fn hash_test_final_wrapper(num: i32, string: &str) {
hash_test_final(num, string);
}
#[test]
fn test_no_panic() {
hash_test_final_wrapper(2, "");
}

Просмотреть файл

@ -1 +0,0 @@
{"files":{"Cargo.lock":"461e5e87b13d7faf25813b08b5003060c39d8af0953f30d5b80ae0926c888022","Cargo.toml":"1eded5c9954b3bb92bb2c7403e026198e66a2a42199db06fc9cafddc8d1fd677","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"27995d58ad5c1145c1a8cd86244ce844886958a35eb2b78c6b772748669999ac","README.md":"ba9a1621483e0b9f017f07c282d00d5cf3a2d8660cca6df6b14941319d748953","examples/integers.rs":"589ff4271566dfa322becddf3e2c7b592e6e0bc97b02892ce75619b7e452e930","examples/paths.rs":"1b30e466b824ce8df7ad0a55334424131d9d2573d6cf9f7d5d50c09c8901d526","examples/traits.rs":"cbee6a3e1f7db60b02ae25b714926517144a77cb492021f492774cf0e1865a9e","examples/versions.rs":"38535e6d9f5bfae0de474a3db79a40e8f5da8ba9334c5ff4c363de9bc99d4d12","src/error.rs":"12de7dafea4a35d1dc2f0fa79bfa038386bbbea72bf083979f4ddf227999eeda","src/lib.rs":"411d8dbc48ab0f67cb10243f1e16b235407818c96556c838182e4004da995dff","src/tests.rs":"0b1353344e832553d328c47f1639ced877b5dff70fd2024d84130bd1c33eee07","src/version.rs":"175727d5f02f2fe2271ddc9b041db2a5b9c6fe0f95afd17c73a4d982612764a3"},"package":"b671c8fb71b457dd4ae18c4ba1e59aa81793daacc361d82fcd410cef0d491875"}

6
third_party/rust/autocfg-0.1.6/Cargo.lock сгенерированный поставляемый
Просмотреть файл

@ -1,6 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
[[package]]
name = "autocfg"
version = "0.1.6"

24
third_party/rust/autocfg-0.1.6/Cargo.toml поставляемый
Просмотреть файл

@ -1,24 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
name = "autocfg"
version = "0.1.6"
authors = ["Josh Stone <cuviper@gmail.com>"]
description = "Automatic cfg for Rust compiler features"
readme = "README.md"
keywords = ["rustc", "build", "autoconf"]
categories = ["development-tools::build-utils"]
license = "Apache-2.0/MIT"
repository = "https://github.com/cuviper/autocfg"
[dependencies]

201
third_party/rust/autocfg-0.1.6/LICENSE-APACHE поставляемый
Просмотреть файл

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
third_party/rust/autocfg-0.1.6/LICENSE-MIT поставляемый
Просмотреть файл

@ -1,25 +0,0 @@
Copyright (c) 2018 Josh Stone
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

81
third_party/rust/autocfg-0.1.6/README.md поставляемый
Просмотреть файл

@ -1,81 +0,0 @@
autocfg
=======
[![autocfg crate](https://img.shields.io/crates/v/autocfg.svg)](https://crates.io/crates/autocfg)
[![autocfg documentation](https://docs.rs/autocfg/badge.svg)](https://docs.rs/autocfg)
![minimum rustc 1.0](https://img.shields.io/badge/rustc-1.0+-red.svg)
[![Travis Status](https://travis-ci.org/cuviper/autocfg.svg?branch=master)](https://travis-ci.org/cuviper/autocfg)
A Rust library for build scripts to automatically configure code based on
compiler support. Code snippets are dynamically tested to see if the `rustc`
will accept them, rather than hard-coding specific version support.
## Usage
Add this to your `Cargo.toml`:
```toml
[build-dependencies]
autocfg = "0.1"
```
Then use it in your `build.rs` script to detect compiler features. For
example, to test for 128-bit integer support, it might look like:
```rust
extern crate autocfg;
fn main() {
let ac = autocfg::new();
ac.emit_has_type("i128");
// (optional) We don't need to rerun for anything external.
autocfg::rerun_path(file!());
}
```
If the type test succeeds, this will write a `cargo:rustc-cfg=has_i128` line
for Cargo, which translates to Rust arguments `--cfg has_i128`. Then in the
rest of your Rust code, you can add `#[cfg(has_i128)]` conditions on code that
should only be used when the compiler supports it.
## Release Notes
- 0.1.6 (2019-08-19)
- Add `probe`/`emit_sysroot_crate`, by @leo60228
- 0.1.5 (2019-07-16)
- Mask some warnings from newer rustc.
- 0.1.4 (2019-05-22)
- Relax `std`/`no_std` probing to a warning instead of an error.
- Improve `rustc` bootstrap compatibility.
- 0.1.3 (2019-05-21)
- Auto-detects if `#![no_std]` is needed for the `$TARGET`
- 0.1.2 (2019-01-16)
- Add `rerun_env(ENV)` to print `cargo:rerun-if-env-changed=ENV`
- Add `rerun_path(PATH)` to print `cargo:rerun-if-changed=PATH`
## Minimum Rust version policy
This crate's minimum supported `rustc` version is `1.0.0`. Compatibility is
its entire reason for existence, so this crate will be extremely conservative
about raising this requirement. If this is ever deemed necessary, it will be
treated as a major breaking change for semver purposes.
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
http://opensource.org/licenses/MIT)
at your option.

Просмотреть файл

@ -1,9 +0,0 @@
extern crate autocfg;
fn main() {
// Normally, cargo will set `OUT_DIR` for build scripts.
let ac = autocfg::AutoCfg::with_dir("target").unwrap();
for i in 3..8 {
ac.emit_has_type(&format!("i{}", 1 << i));
}
}

Просмотреть файл

@ -1,22 +0,0 @@
extern crate autocfg;
fn main() {
// Normally, cargo will set `OUT_DIR` for build scripts.
let ac = autocfg::AutoCfg::with_dir("target").unwrap();
// since ancient times...
ac.emit_has_path("std::vec::Vec");
ac.emit_path_cfg("std::vec::Vec", "has_vec");
// rustc 1.10.0
ac.emit_has_path("std::panic::PanicInfo");
ac.emit_path_cfg("std::panic::PanicInfo", "has_panic_info");
// rustc 1.20.0
ac.emit_has_path("std::mem::ManuallyDrop");
ac.emit_path_cfg("std::mem::ManuallyDrop", "has_manually_drop");
// rustc 1.25.0
ac.emit_has_path("std::ptr::NonNull");
ac.emit_path_cfg("std::ptr::NonNull", "has_non_null");
}

Просмотреть файл

@ -1,26 +0,0 @@
extern crate autocfg;
fn main() {
// Normally, cargo will set `OUT_DIR` for build scripts.
let ac = autocfg::AutoCfg::with_dir("target").unwrap();
// since ancient times...
ac.emit_has_trait("std::ops::Add");
ac.emit_trait_cfg("std::ops::Add", "has_ops");
// trait parameters have to be provided
ac.emit_has_trait("std::borrow::Borrow<str>");
ac.emit_trait_cfg("std::borrow::Borrow<str>", "has_borrow");
// rustc 1.8.0
ac.emit_has_trait("std::ops::AddAssign");
ac.emit_trait_cfg("std::ops::AddAssign", "has_assign_ops");
// rustc 1.12.0
ac.emit_has_trait("std::iter::Sum");
ac.emit_trait_cfg("std::iter::Sum", "has_sum");
// rustc 1.28.0
ac.emit_has_trait("std::alloc::GlobalAlloc");
ac.emit_trait_cfg("std::alloc::GlobalAlloc", "has_global_alloc");
}

Просмотреть файл

@ -1,9 +0,0 @@
extern crate autocfg;
fn main() {
// Normally, cargo will set `OUT_DIR` for build scripts.
let ac = autocfg::AutoCfg::with_dir("target").unwrap();
for i in 0..100 {
ac.emit_rustc_version(1, i);
}
}

69
third_party/rust/autocfg-0.1.6/src/error.rs поставляемый
Просмотреть файл

@ -1,69 +0,0 @@
use std::error;
use std::fmt;
use std::io;
use std::num;
use std::str;
/// A common error type for the `autocfg` crate.
#[derive(Debug)]
pub struct Error {
kind: ErrorKind,
}
impl error::Error for Error {
fn description(&self) -> &str {
"AutoCfg error"
}
fn cause(&self) -> Option<&error::Error> {
match self.kind {
ErrorKind::Io(ref e) => Some(e),
ErrorKind::Num(ref e) => Some(e),
ErrorKind::Utf8(ref e) => Some(e),
ErrorKind::Other(_) => None,
}
}
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
match self.kind {
ErrorKind::Io(ref e) => e.fmt(f),
ErrorKind::Num(ref e) => e.fmt(f),
ErrorKind::Utf8(ref e) => e.fmt(f),
ErrorKind::Other(s) => s.fmt(f),
}
}
}
#[derive(Debug)]
enum ErrorKind {
Io(io::Error),
Num(num::ParseIntError),
Utf8(str::Utf8Error),
Other(&'static str),
}
pub fn from_io(e: io::Error) -> Error {
Error {
kind: ErrorKind::Io(e),
}
}
pub fn from_num(e: num::ParseIntError) -> Error {
Error {
kind: ErrorKind::Num(e),
}
}
pub fn from_utf8(e: str::Utf8Error) -> Error {
Error {
kind: ErrorKind::Utf8(e),
}
}
pub fn from_str(s: &'static str) -> Error {
Error {
kind: ErrorKind::Other(s),
}
}

328
third_party/rust/autocfg-0.1.6/src/lib.rs поставляемый
Просмотреть файл

@ -1,328 +0,0 @@
//! A Rust library for build scripts to automatically configure code based on
//! compiler support. Code snippets are dynamically tested to see if the `rustc`
//! will accept them, rather than hard-coding specific version support.
//!
//!
//! ## Usage
//!
//! Add this to your `Cargo.toml`:
//!
//! ```toml
//! [build-dependencies]
//! autocfg = "0.1"
//! ```
//!
//! Then use it in your `build.rs` script to detect compiler features. For
//! example, to test for 128-bit integer support, it might look like:
//!
//! ```rust
//! extern crate autocfg;
//!
//! fn main() {
//! # // Normally, cargo will set `OUT_DIR` for build scripts.
//! # std::env::set_var("OUT_DIR", "target");
//! let ac = autocfg::new();
//! ac.emit_has_type("i128");
//!
//! // (optional) We don't need to rerun for anything external.
//! autocfg::rerun_path(file!());
//! }
//! ```
//!
//! If the type test succeeds, this will write a `cargo:rustc-cfg=has_i128` line
//! for Cargo, which translates to Rust arguments `--cfg has_i128`. Then in the
//! rest of your Rust code, you can add `#[cfg(has_i128)]` conditions on code that
//! should only be used when the compiler supports it.
#![deny(missing_debug_implementations)]
#![deny(missing_docs)]
// allow future warnings that can't be fixed while keeping 1.0 compatibility
#![allow(unknown_lints)]
#![allow(bare_trait_objects)]
#![allow(ellipsis_inclusive_range_patterns)]
use std::env;
use std::ffi::OsString;
use std::fs;
use std::io::{stderr, Write};
use std::path::PathBuf;
use std::process::{Command, Stdio};
#[allow(deprecated)]
use std::sync::atomic::ATOMIC_USIZE_INIT;
use std::sync::atomic::{AtomicUsize, Ordering};
mod error;
pub use error::Error;
mod version;
use version::Version;
#[cfg(test)]
mod tests;
/// Helper to detect compiler features for `cfg` output in build scripts.
#[derive(Clone, Debug)]
pub struct AutoCfg {
out_dir: PathBuf,
rustc: PathBuf,
rustc_version: Version,
target: Option<OsString>,
no_std: bool,
}
/// Writes a config flag for rustc on standard out.
///
/// This looks like: `cargo:rustc-cfg=CFG`
///
/// Cargo will use this in arguments to rustc, like `--cfg CFG`.
pub fn emit(cfg: &str) {
println!("cargo:rustc-cfg={}", cfg);
}
/// Writes a line telling Cargo to rerun the build script if `path` changes.
///
/// This looks like: `cargo:rerun-if-changed=PATH`
///
/// This requires at least cargo 0.7.0, corresponding to rustc 1.6.0. Earlier
/// versions of cargo will simply ignore the directive.
pub fn rerun_path(path: &str) {
println!("cargo:rerun-if-changed={}", path);
}
/// Writes a line telling Cargo to rerun the build script if the environment
/// variable `var` changes.
///
/// This looks like: `cargo:rerun-if-env-changed=VAR`
///
/// This requires at least cargo 0.21.0, corresponding to rustc 1.20.0. Earlier
/// versions of cargo will simply ignore the directive.
pub fn rerun_env(var: &str) {
println!("cargo:rerun-if-env-changed={}", var);
}
/// Create a new `AutoCfg` instance.
///
/// # Panics
///
/// Panics if `AutoCfg::new()` returns an error.
pub fn new() -> AutoCfg {
AutoCfg::new().unwrap()
}
impl AutoCfg {
/// Create a new `AutoCfg` instance.
///
/// # Common errors
///
/// - `rustc` can't be executed, from `RUSTC` or in the `PATH`.
/// - The version output from `rustc` can't be parsed.
/// - `OUT_DIR` is not set in the environment, or is not a writable directory.
///
pub fn new() -> Result<Self, Error> {
match env::var_os("OUT_DIR") {
Some(d) => Self::with_dir(d),
None => Err(error::from_str("no OUT_DIR specified!")),
}
}
/// Create a new `AutoCfg` instance with the specified output directory.
///
/// # Common errors
///
/// - `rustc` can't be executed, from `RUSTC` or in the `PATH`.
/// - The version output from `rustc` can't be parsed.
/// - `dir` is not a writable directory.
///
pub fn with_dir<T: Into<PathBuf>>(dir: T) -> Result<Self, Error> {
let rustc = env::var_os("RUSTC").unwrap_or_else(|| "rustc".into());
let rustc: PathBuf = rustc.into();
let rustc_version = try!(Version::from_rustc(&rustc));
// Sanity check the output directory
let dir = dir.into();
let meta = try!(fs::metadata(&dir).map_err(error::from_io));
if !meta.is_dir() || meta.permissions().readonly() {
return Err(error::from_str("output path is not a writable directory"));
}
let mut ac = AutoCfg {
out_dir: dir,
rustc: rustc,
rustc_version: rustc_version,
target: env::var_os("TARGET"),
no_std: false,
};
// Sanity check with and without `std`.
if !ac.probe("").unwrap_or(false) {
ac.no_std = true;
if !ac.probe("").unwrap_or(false) {
// Neither worked, so assume nothing...
ac.no_std = false;
let warning = b"warning: autocfg could not probe for `std`\n";
stderr().write_all(warning).ok();
}
}
Ok(ac)
}
/// Test whether the current `rustc` reports a version greater than
/// or equal to "`major`.`minor`".
pub fn probe_rustc_version(&self, major: usize, minor: usize) -> bool {
self.rustc_version >= Version::new(major, minor, 0)
}
/// Sets a `cfg` value of the form `rustc_major_minor`, like `rustc_1_29`,
/// if the current `rustc` is at least that version.
pub fn emit_rustc_version(&self, major: usize, minor: usize) {
if self.probe_rustc_version(major, minor) {
emit(&format!("rustc_{}_{}", major, minor));
}
}
fn probe<T: AsRef<[u8]>>(&self, code: T) -> Result<bool, Error> {
#[allow(deprecated)]
static ID: AtomicUsize = ATOMIC_USIZE_INIT;
let id = ID.fetch_add(1, Ordering::Relaxed);
let mut command = Command::new(&self.rustc);
command
.arg("--crate-name")
.arg(format!("probe{}", id))
.arg("--crate-type=lib")
.arg("--out-dir")
.arg(&self.out_dir)
.arg("--emit=llvm-ir");
if let Some(target) = self.target.as_ref() {
command.arg("--target").arg(target);
}
command.arg("-").stdin(Stdio::piped());
let mut child = try!(command.spawn().map_err(error::from_io));
let mut stdin = child.stdin.take().expect("rustc stdin");
if self.no_std {
try!(stdin.write_all(b"#![no_std]\n").map_err(error::from_io));
}
try!(stdin.write_all(code.as_ref()).map_err(error::from_io));
drop(stdin);
let status = try!(child.wait().map_err(error::from_io));
Ok(status.success())
}
/// Tests whether the given sysroot crate can be used.
///
/// The test code is subject to change, but currently looks like:
///
/// ```ignore
/// extern crate CRATE as probe;
/// ```
pub fn probe_sysroot_crate(&self, name: &str) -> bool {
self.probe(format!("extern crate {} as probe;", name)) // `as _` wasn't stabilized until Rust 1.33
.unwrap_or(false)
}
/// Emits a config value `has_CRATE` if `probe_sysroot_crate` returns true.
pub fn emit_sysroot_crate(&self, name: &str) {
if self.probe_sysroot_crate(name) {
emit(&format!("has_{}", mangle(name)));
}
}
/// Tests whether the given path can be used.
///
/// The test code is subject to change, but currently looks like:
///
/// ```ignore
/// pub use PATH;
/// ```
pub fn probe_path(&self, path: &str) -> bool {
self.probe(format!("pub use {};", path)).unwrap_or(false)
}
/// Emits a config value `has_PATH` if `probe_path` returns true.
///
/// Any non-identifier characters in the `path` will be replaced with
/// `_` in the generated config value.
pub fn emit_has_path(&self, path: &str) {
if self.probe_path(path) {
emit(&format!("has_{}", mangle(path)));
}
}
/// Emits the given `cfg` value if `probe_path` returns true.
pub fn emit_path_cfg(&self, path: &str, cfg: &str) {
if self.probe_path(path) {
emit(cfg);
}
}
/// Tests whether the given trait can be used.
///
/// The test code is subject to change, but currently looks like:
///
/// ```ignore
/// pub trait Probe: TRAIT + Sized {}
/// ```
pub fn probe_trait(&self, name: &str) -> bool {
self.probe(format!("pub trait Probe: {} + Sized {{}}", name))
.unwrap_or(false)
}
/// Emits a config value `has_TRAIT` if `probe_trait` returns true.
///
/// Any non-identifier characters in the trait `name` will be replaced with
/// `_` in the generated config value.
pub fn emit_has_trait(&self, name: &str) {
if self.probe_trait(name) {
emit(&format!("has_{}", mangle(name)));
}
}
/// Emits the given `cfg` value if `probe_trait` returns true.
pub fn emit_trait_cfg(&self, name: &str, cfg: &str) {
if self.probe_trait(name) {
emit(cfg);
}
}
/// Tests whether the given type can be used.
///
/// The test code is subject to change, but currently looks like:
///
/// ```ignore
/// pub type Probe = TYPE;
/// ```
pub fn probe_type(&self, name: &str) -> bool {
self.probe(format!("pub type Probe = {};", name))
.unwrap_or(false)
}
/// Emits a config value `has_TYPE` if `probe_type` returns true.
///
/// Any non-identifier characters in the type `name` will be replaced with
/// `_` in the generated config value.
pub fn emit_has_type(&self, name: &str) {
if self.probe_type(name) {
emit(&format!("has_{}", mangle(name)));
}
}
/// Emits the given `cfg` value if `probe_type` returns true.
pub fn emit_type_cfg(&self, name: &str, cfg: &str) {
if self.probe_type(name) {
emit(cfg);
}
}
}
fn mangle(s: &str) -> String {
s.chars()
.map(|c| match c {
'A'...'Z' | 'a'...'z' | '0'...'9' => c,
_ => '_',
})
.collect()
}

99
third_party/rust/autocfg-0.1.6/src/tests.rs поставляемый
Просмотреть файл

@ -1,99 +0,0 @@
use super::AutoCfg;
impl AutoCfg {
fn core_std(&self, path: &str) -> String {
let krate = if self.no_std { "core" } else { "std" };
format!("{}::{}", krate, path)
}
}
#[test]
fn autocfg_version() {
let ac = AutoCfg::with_dir("target").unwrap();
println!("version: {:?}", ac.rustc_version);
assert!(ac.probe_rustc_version(1, 0));
}
#[test]
fn version_cmp() {
use super::version::Version;
let v123 = Version::new(1, 2, 3);
assert!(Version::new(1, 0, 0) < v123);
assert!(Version::new(1, 2, 2) < v123);
assert!(Version::new(1, 2, 3) == v123);
assert!(Version::new(1, 2, 4) > v123);
assert!(Version::new(1, 10, 0) > v123);
assert!(Version::new(2, 0, 0) > v123);
}
#[test]
fn probe_add() {
let ac = AutoCfg::with_dir("target").unwrap();
let add = ac.core_std("ops::Add");
let add_rhs = ac.core_std("ops::Add<i32>");
let add_rhs_output = ac.core_std("ops::Add<i32, Output = i32>");
assert!(ac.probe_path(&add));
assert!(ac.probe_trait(&add));
assert!(ac.probe_trait(&add_rhs));
assert!(ac.probe_trait(&add_rhs_output));
assert!(ac.probe_type(&add_rhs_output));
}
#[test]
fn probe_as_ref() {
let ac = AutoCfg::with_dir("target").unwrap();
let as_ref = ac.core_std("convert::AsRef");
let as_ref_str = ac.core_std("convert::AsRef<str>");
assert!(ac.probe_path(&as_ref));
assert!(ac.probe_trait(&as_ref_str));
assert!(ac.probe_type(&as_ref_str));
}
#[test]
fn probe_i128() {
let ac = AutoCfg::with_dir("target").unwrap();
let missing = !ac.probe_rustc_version(1, 26);
let i128_path = ac.core_std("i128");
assert!(missing ^ ac.probe_path(&i128_path));
assert!(missing ^ ac.probe_type("i128"));
}
#[test]
fn probe_sum() {
let ac = AutoCfg::with_dir("target").unwrap();
let missing = !ac.probe_rustc_version(1, 12);
let sum = ac.core_std("iter::Sum");
let sum_i32 = ac.core_std("iter::Sum<i32>");
assert!(missing ^ ac.probe_path(&sum));
assert!(missing ^ ac.probe_trait(&sum));
assert!(missing ^ ac.probe_trait(&sum_i32));
assert!(missing ^ ac.probe_type(&sum_i32));
}
#[test]
fn probe_std() {
let ac = AutoCfg::with_dir("target").unwrap();
assert_eq!(ac.probe_sysroot_crate("std"), !ac.no_std);
}
#[test]
fn probe_alloc() {
let ac = AutoCfg::with_dir("target").unwrap();
let missing = !ac.probe_rustc_version(1, 36);
assert!(missing ^ ac.probe_sysroot_crate("alloc"));
}
#[test]
fn probe_bad_sysroot_crate() {
let ac = AutoCfg::with_dir("target").unwrap();
assert!(!ac.probe_sysroot_crate("doesnt_exist"));
}
#[test]
fn probe_no_std() {
let ac = AutoCfg::with_dir("target").unwrap();
assert!(ac.probe_type("i32"));
assert!(ac.probe_type("[i32]"));
assert_eq!(ac.probe_type("Vec<i32>"), !ac.no_std);
}

60
third_party/rust/autocfg-0.1.6/src/version.rs поставляемый
Просмотреть файл

@ -1,60 +0,0 @@
use std::path::Path;
use std::process::Command;
use std::str;
use super::{error, Error};
/// A version structure for making relative comparisons.
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct Version {
major: usize,
minor: usize,
patch: usize,
}
impl Version {
/// Creates a `Version` instance for a specific `major.minor.patch` version.
pub fn new(major: usize, minor: usize, patch: usize) -> Self {
Version {
major: major,
minor: minor,
patch: patch,
}
}
pub fn from_rustc(rustc: &Path) -> Result<Self, Error> {
// Get rustc's verbose version
let output = try!(Command::new(rustc)
.args(&["--version", "--verbose"])
.output()
.map_err(error::from_io));
if !output.status.success() {
return Err(error::from_str("could not execute rustc"));
}
let output = try!(str::from_utf8(&output.stdout).map_err(error::from_utf8));
// Find the release line in the verbose version output.
let release = match output.lines().find(|line| line.starts_with("release: ")) {
Some(line) => &line["release: ".len()..],
None => return Err(error::from_str("could not find rustc release")),
};
// Strip off any extra channel info, e.g. "-beta.N", "-nightly"
let version = match release.find('-') {
Some(i) => &release[..i],
None => release,
};
// Split the version into semver components.
let mut iter = version.splitn(3, '.');
let major = try!(iter.next().ok_or(error::from_str("missing major version")));
let minor = try!(iter.next().ok_or(error::from_str("missing minor version")));
let patch = try!(iter.next().ok_or(error::from_str("missing patch version")));
Ok(Version::new(
try!(major.parse().map_err(error::from_num)),
try!(minor.parse().map_err(error::from_num)),
try!(patch.parse().map_err(error::from_num)),
))
}
}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.lock":"8dc9b28e55bc55e1846909cfbb169aef0ff15bef2cfa6e27eef5adb7634eed1a","Cargo.toml":"9a97fd6cdf41c7b507d2d8954f99dbbae3450e9e67934c87e5cabe8fa795f6c5","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"27995d58ad5c1145c1a8cd86244ce844886958a35eb2b78c6b772748669999ac","README.md":"f173253f205c245806e5dd16ad1906e1a1b3f89694890236298b2126e937324a","examples/integers.rs":"589ff4271566dfa322becddf3e2c7b592e6e0bc97b02892ce75619b7e452e930","examples/paths.rs":"1b30e466b824ce8df7ad0a55334424131d9d2573d6cf9f7d5d50c09c8901d526","examples/traits.rs":"cbee6a3e1f7db60b02ae25b714926517144a77cb492021f492774cf0e1865a9e","examples/versions.rs":"38535e6d9f5bfae0de474a3db79a40e8f5da8ba9334c5ff4c363de9bc99d4d12","src/error.rs":"12de7dafea4a35d1dc2f0fa79bfa038386bbbea72bf083979f4ddf227999eeda","src/lib.rs":"4e80b48869f038be4e80e3356e4cce7e81713717ceeca095f9bb04cda2e4d224","src/tests.rs":"8197b5a6e91872d6c63731ed4b7b619068b6d13501d9aecb3652f20857edc9aa","src/version.rs":"175727d5f02f2fe2271ddc9b041db2a5b9c6fe0f95afd17c73a4d982612764a3","tests/rustflags.rs":"441fb0c6606e243c31d3817a5ae2240b65fcae0ea8ab583f80f8f6d6c267e614"},"package":"f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"}
{"files":{"Cargo.lock":"461e5e87b13d7faf25813b08b5003060c39d8af0953f30d5b80ae0926c888022","Cargo.toml":"1eded5c9954b3bb92bb2c7403e026198e66a2a42199db06fc9cafddc8d1fd677","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"27995d58ad5c1145c1a8cd86244ce844886958a35eb2b78c6b772748669999ac","README.md":"ba9a1621483e0b9f017f07c282d00d5cf3a2d8660cca6df6b14941319d748953","examples/integers.rs":"589ff4271566dfa322becddf3e2c7b592e6e0bc97b02892ce75619b7e452e930","examples/paths.rs":"1b30e466b824ce8df7ad0a55334424131d9d2573d6cf9f7d5d50c09c8901d526","examples/traits.rs":"cbee6a3e1f7db60b02ae25b714926517144a77cb492021f492774cf0e1865a9e","examples/versions.rs":"38535e6d9f5bfae0de474a3db79a40e8f5da8ba9334c5ff4c363de9bc99d4d12","src/error.rs":"12de7dafea4a35d1dc2f0fa79bfa038386bbbea72bf083979f4ddf227999eeda","src/lib.rs":"411d8dbc48ab0f67cb10243f1e16b235407818c96556c838182e4004da995dff","src/tests.rs":"0b1353344e832553d328c47f1639ced877b5dff70fd2024d84130bd1c33eee07","src/version.rs":"175727d5f02f2fe2271ddc9b041db2a5b9c6fe0f95afd17c73a4d982612764a3"},"package":"b671c8fb71b457dd4ae18c4ba1e59aa81793daacc361d82fcd410cef0d491875"}

2
third_party/rust/autocfg/Cargo.lock сгенерированный поставляемый
Просмотреть файл

@ -2,5 +2,5 @@
# It is not intended for manual editing.
[[package]]
name = "autocfg"
version = "1.0.0"
version = "0.1.6"

4
third_party/rust/autocfg/Cargo.toml поставляемый
Просмотреть файл

@ -12,13 +12,13 @@
[package]
name = "autocfg"
version = "1.0.0"
version = "0.1.6"
authors = ["Josh Stone <cuviper@gmail.com>"]
description = "Automatic cfg for Rust compiler features"
readme = "README.md"
keywords = ["rustc", "build", "autoconf"]
categories = ["development-tools::build-utils"]
license = "Apache-2.0 OR MIT"
license = "Apache-2.0/MIT"
repository = "https://github.com/cuviper/autocfg"
[dependencies]

20
third_party/rust/autocfg/README.md поставляемый
Просмотреть файл

@ -17,7 +17,7 @@ Add this to your `Cargo.toml`:
```toml
[build-dependencies]
autocfg = "1"
autocfg = "0.1"
```
Then use it in your `build.rs` script to detect compiler features. For
@ -31,7 +31,7 @@ fn main() {
ac.emit_has_type("i128");
// (optional) We don't need to rerun for anything external.
autocfg::rerun_path("build.rs");
autocfg::rerun_path(file!());
}
```
@ -43,16 +43,8 @@ should only be used when the compiler supports it.
## Release Notes
- 1.0.0 (2020-01-08)
- 🎉 Release 1.0! 🎉 (no breaking changes)
- Add `probe_expression` and `emit_expression_cfg` to test arbitrary expressions.
- Add `probe_constant` and `emit_constant_cfg` to test arbitrary constant expressions.
- 0.1.7 (2019-10-20)
- Apply `RUSTFLAGS` when probing `$TARGET != $HOST`, mainly for sysroot, by @roblabla.
- 0.1.6 (2019-08-19)
- Add `probe`/`emit_sysroot_crate`, by @leo60228.
- Add `probe`/`emit_sysroot_crate`, by @leo60228
- 0.1.5 (2019-07-16)
- Mask some warnings from newer rustc.
@ -62,11 +54,11 @@ should only be used when the compiler supports it.
- Improve `rustc` bootstrap compatibility.
- 0.1.3 (2019-05-21)
- Auto-detects if `#![no_std]` is needed for the `$TARGET`.
- Auto-detects if `#![no_std]` is needed for the `$TARGET`
- 0.1.2 (2019-01-16)
- Add `rerun_env(ENV)` to print `cargo:rerun-if-env-changed=ENV`.
- Add `rerun_path(PATH)` to print `cargo:rerun-if-changed=PATH`.
- Add `rerun_env(ENV)` to print `cargo:rerun-if-env-changed=ENV`
- Add `rerun_path(PATH)` to print `cargo:rerun-if-changed=PATH`
## Minimum Rust version policy

88
third_party/rust/autocfg/src/lib.rs поставляемый
Просмотреть файл

@ -9,7 +9,7 @@
//!
//! ```toml
//! [build-dependencies]
//! autocfg = "1"
//! autocfg = "0.1"
//! ```
//!
//! Then use it in your `build.rs` script to detect compiler features. For
@ -25,7 +25,7 @@
//! ac.emit_has_type("i128");
//!
//! // (optional) We don't need to rerun for anything external.
//! autocfg::rerun_path("build.rs");
//! autocfg::rerun_path(file!());
//! }
//! ```
//!
@ -33,14 +33,6 @@
//! for Cargo, which translates to Rust arguments `--cfg has_i128`. Then in the
//! rest of your Rust code, you can add `#[cfg(has_i128)]` conditions on code that
//! should only be used when the compiler supports it.
//!
//! ## Caution
//!
//! Many of the probing methods of `AutoCfg` document the particular template they
//! use, **subject to change**. The inputs are not validated to make sure they are
//! semantically correct for their expected use, so it's _possible_ to escape and
//! inject something unintended. However, such abuse is unsupported and will not
//! be considered when making changes to the templates.
#![deny(missing_debug_implementations)]
#![deny(missing_docs)]
@ -49,16 +41,6 @@
#![allow(bare_trait_objects)]
#![allow(ellipsis_inclusive_range_patterns)]
/// Local macro to avoid `std::try!`, deprecated in Rust 1.39.
macro_rules! try {
($result:expr) => {
match $result {
Ok(value) => value,
Err(error) => return Err(error),
}
};
}
use std::env;
use std::ffi::OsString;
use std::fs;
@ -86,7 +68,6 @@ pub struct AutoCfg {
rustc_version: Version,
target: Option<OsString>,
no_std: bool,
rustflags: Option<Vec<String>>,
}
/// Writes a config flag for rustc on standard out.
@ -164,35 +145,12 @@ impl AutoCfg {
return Err(error::from_str("output path is not a writable directory"));
}
// Cargo only applies RUSTFLAGS for building TARGET artifact in
// cross-compilation environment. Sadly, we don't have a way to detect
// when we're building HOST artifact in a cross-compilation environment,
// so for now we only apply RUSTFLAGS when cross-compiling an artifact.
//
// See https://github.com/cuviper/autocfg/pull/10#issuecomment-527575030.
let rustflags = if env::var_os("TARGET") != env::var_os("HOST") {
env::var("RUSTFLAGS").ok().map(|rustflags| {
// This is meant to match how cargo handles the RUSTFLAG environment
// variable.
// See https://github.com/rust-lang/cargo/blob/69aea5b6f69add7c51cca939a79644080c0b0ba0/src/cargo/core/compiler/build_context/target_info.rs#L434-L441
rustflags
.split(' ')
.map(str::trim)
.filter(|s| !s.is_empty())
.map(str::to_string)
.collect::<Vec<String>>()
})
} else {
None
};
let mut ac = AutoCfg {
out_dir: dir,
rustc: rustc,
rustc_version: rustc_version,
target: env::var_os("TARGET"),
no_std: false,
rustflags: rustflags,
};
// Sanity check with and without `std`.
@ -236,10 +194,6 @@ impl AutoCfg {
.arg(&self.out_dir)
.arg("--emit=llvm-ir");
if let &Some(ref rustflags) = &self.rustflags {
command.args(rustflags);
}
if let Some(target) = self.target.as_ref() {
command.arg("--target").arg(target);
}
@ -362,44 +316,6 @@ impl AutoCfg {
emit(cfg);
}
}
/// Tests whether the given expression can be used.
///
/// The test code is subject to change, but currently looks like:
///
/// ```ignore
/// pub fn probe() { let _ = EXPR; }
/// ```
pub fn probe_expression(&self, expr: &str) -> bool {
self.probe(format!("pub fn probe() {{ let _ = {}; }}", expr))
.unwrap_or(false)
}
/// Emits the given `cfg` value if `probe_expression` returns true.
pub fn emit_expression_cfg(&self, expr: &str, cfg: &str) {
if self.probe_expression(expr) {
emit(cfg);
}
}
/// Tests whether the given constant expression can be used.
///
/// The test code is subject to change, but currently looks like:
///
/// ```ignore
/// pub const PROBE: () = ((), EXPR).0;
/// ```
pub fn probe_constant(&self, expr: &str) -> bool {
self.probe(format!("pub const PROBE: () = ((), {}).0;", expr))
.unwrap_or(false)
}
/// Emits the given `cfg` value if `probe_constant` returns true.
pub fn emit_constant_cfg(&self, expr: &str, cfg: &str) {
if self.probe_constant(expr) {
emit(cfg);
}
}
}
fn mangle(s: &str) -> String {

60
third_party/rust/autocfg/src/tests.rs поставляемый
Просмотреть файл

@ -5,14 +5,6 @@ impl AutoCfg {
let krate = if self.no_std { "core" } else { "std" };
format!("{}::{}", krate, path)
}
fn assert_std(&self, probe_result: bool) {
assert_eq!(!self.no_std, probe_result);
}
fn assert_min(&self, major: usize, minor: usize, probe_result: bool) {
assert_eq!(self.probe_rustc_version(major, minor), probe_result);
}
}
#[test]
@ -39,59 +31,57 @@ fn version_cmp() {
fn probe_add() {
let ac = AutoCfg::with_dir("target").unwrap();
let add = ac.core_std("ops::Add");
let add_rhs = add.clone() + "<i32>";
let add_rhs_output = add.clone() + "<i32, Output = i32>";
let dyn_add_rhs_output = "dyn ".to_string() + &*add_rhs_output;
let add_rhs = ac.core_std("ops::Add<i32>");
let add_rhs_output = ac.core_std("ops::Add<i32, Output = i32>");
assert!(ac.probe_path(&add));
assert!(ac.probe_trait(&add));
assert!(ac.probe_trait(&add_rhs));
assert!(ac.probe_trait(&add_rhs_output));
ac.assert_min(1, 27, ac.probe_type(&dyn_add_rhs_output));
assert!(ac.probe_type(&add_rhs_output));
}
#[test]
fn probe_as_ref() {
let ac = AutoCfg::with_dir("target").unwrap();
let as_ref = ac.core_std("convert::AsRef");
let as_ref_str = as_ref.clone() + "<str>";
let dyn_as_ref_str = "dyn ".to_string() + &*as_ref_str;
let as_ref_str = ac.core_std("convert::AsRef<str>");
assert!(ac.probe_path(&as_ref));
assert!(ac.probe_trait(&as_ref_str));
assert!(ac.probe_type(&as_ref_str));
ac.assert_min(1, 27, ac.probe_type(&dyn_as_ref_str));
}
#[test]
fn probe_i128() {
let ac = AutoCfg::with_dir("target").unwrap();
let missing = !ac.probe_rustc_version(1, 26);
let i128_path = ac.core_std("i128");
ac.assert_min(1, 26, ac.probe_path(&i128_path));
ac.assert_min(1, 26, ac.probe_type("i128"));
assert!(missing ^ ac.probe_path(&i128_path));
assert!(missing ^ ac.probe_type("i128"));
}
#[test]
fn probe_sum() {
let ac = AutoCfg::with_dir("target").unwrap();
let missing = !ac.probe_rustc_version(1, 12);
let sum = ac.core_std("iter::Sum");
let sum_i32 = sum.clone() + "<i32>";
let dyn_sum_i32 = "dyn ".to_string() + &*sum_i32;
ac.assert_min(1, 12, ac.probe_path(&sum));
ac.assert_min(1, 12, ac.probe_trait(&sum));
ac.assert_min(1, 12, ac.probe_trait(&sum_i32));
ac.assert_min(1, 12, ac.probe_type(&sum_i32));
ac.assert_min(1, 27, ac.probe_type(&dyn_sum_i32));
let sum_i32 = ac.core_std("iter::Sum<i32>");
assert!(missing ^ ac.probe_path(&sum));
assert!(missing ^ ac.probe_trait(&sum));
assert!(missing ^ ac.probe_trait(&sum_i32));
assert!(missing ^ ac.probe_type(&sum_i32));
}
#[test]
fn probe_std() {
let ac = AutoCfg::with_dir("target").unwrap();
ac.assert_std(ac.probe_sysroot_crate("std"));
assert_eq!(ac.probe_sysroot_crate("std"), !ac.no_std);
}
#[test]
fn probe_alloc() {
let ac = AutoCfg::with_dir("target").unwrap();
ac.assert_min(1, 36, ac.probe_sysroot_crate("alloc"));
let missing = !ac.probe_rustc_version(1, 36);
assert!(missing ^ ac.probe_sysroot_crate("alloc"));
}
#[test]
@ -105,21 +95,5 @@ fn probe_no_std() {
let ac = AutoCfg::with_dir("target").unwrap();
assert!(ac.probe_type("i32"));
assert!(ac.probe_type("[i32]"));
ac.assert_std(ac.probe_type("Vec<i32>"));
}
#[test]
fn probe_expression() {
let ac = AutoCfg::with_dir("target").unwrap();
assert!(ac.probe_expression(r#""test".trim_left()"#));
ac.assert_min(1, 30, ac.probe_expression(r#""test".trim_start()"#));
ac.assert_std(ac.probe_expression("[1, 2, 3].to_vec()"));
}
#[test]
fn probe_constant() {
let ac = AutoCfg::with_dir("target").unwrap();
assert!(ac.probe_constant("1 + 2 + 3"));
ac.assert_min(1, 33, ac.probe_constant("{ let x = 1 + 2 + 3; x * x }"));
ac.assert_min(1, 39, ac.probe_constant(r#""test".len()"#));
assert_eq!(ac.probe_type("Vec<i32>"), !ac.no_std);
}

19
third_party/rust/autocfg/tests/rustflags.rs поставляемый
Просмотреть файл

@ -1,19 +0,0 @@
extern crate autocfg;
use std::env;
/// Tests that autocfg uses the RUSTFLAGS environment variable when running
/// rustc.
#[test]
fn test_with_sysroot() {
// Use the same path as this test binary.
let dir = env::current_exe().unwrap().parent().unwrap().to_path_buf();
env::set_var("RUSTFLAGS", &format!("-L {}", dir.display()));
env::set_var("OUT_DIR", &format!("{}", dir.display()));
// Ensure HOST != TARGET.
env::set_var("HOST", "lol");
let ac = autocfg::AutoCfg::new().unwrap();
assert!(ac.probe_sysroot_crate("autocfg"));
}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"63f9088664a4a87c994e96e43e096a2a9b5746339a0689057ab669364536652b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"6d74a8ddac747e9fd6d8c2000c6d0ae05bc28bb9e47e629e86d4b8d05115f546","src/arc.rs":"60742619b7404d8c46237e9a3f98c49f75f88e4c24513ebca1d0ddad0274b6d6","src/boxed.rs":"8d9b1bc84e3bfb59f59f0c7df93ccd225ffc044166db6e2aa5976cc968203712","src/btree.rs":"b83820fc2a00e2e34127b3037abde8b945f0ca2785f3def725787e6813c3d3e0","src/btree/map.rs":"4d8710cf6f00bd889045a6144de692d9f752d51089db493e859d55e5ba12430a","src/btree/node.rs":"f6b4557d30ca0e30c7c7b6752c7a2c67432aab5c18c08392a28040326620a109","src/btree/search.rs":"ae78f73f3e56ea277b0a02cc39454447b75e12a6c817ecfee00065b3ddbfff67","src/btree/set.rs":"29cc3bff736007b21e14017d880edbcc7c76c30e0c256e811cae1fff0dad13fa","src/format.rs":"cee32d75cf260b19c8db74b50852bc50b8c47189d22b7424b647d084c4a76857","src/hashmap.rs":"cf02762085d9968fc235ef2c0626358661cb21aca2c8c19961b3969225c96dce","src/lib.rs":"deaf67958a1b8ae537a04a4eca3d424d20a6d9cf08cc9dfa2f6969a586976247","src/rc.rs":"102ad49f2201b9f69b50cf5a35af1e0039094936354b12572702551970c2f53c","src/try_clone.rs":"32c790435c71dec116756c284d2b953d382292b7727675740229a6b53d8c8b41","src/vec.rs":"95900e8d2f9a8902d5505e13c0d8ec17ac3ddc2ed8ae8f25b56962cdb4d8398c"},"package":"ba3301bcde54d3fc19c626ff4bf962630fe1f94cb6cdc3f18a26727a2d1f4a67"}
{"files":{"Cargo.toml":"2a7958c74d86b964737863c4d869cb8e7ae64ec85632600fd7cef6e87ee0be47","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0621878e61f0d0fda054bcbe02df75192c28bde1ecc8289cbd86aeba2dd72720","README.md":"6d74a8ddac747e9fd6d8c2000c6d0ae05bc28bb9e47e629e86d4b8d05115f546","src/arc.rs":"60742619b7404d8c46237e9a3f98c49f75f88e4c24513ebca1d0ddad0274b6d6","src/boxed.rs":"40537576912a01ed8bb3bd6b1c8179632613f37f65459e708d4abc2286933c57","src/btree.rs":"b83820fc2a00e2e34127b3037abde8b945f0ca2785f3def725787e6813c3d3e0","src/btree/map.rs":"4d8710cf6f00bd889045a6144de692d9f752d51089db493e859d55e5ba12430a","src/btree/node.rs":"f6b4557d30ca0e30c7c7b6752c7a2c67432aab5c18c08392a28040326620a109","src/btree/search.rs":"ae78f73f3e56ea277b0a02cc39454447b75e12a6c817ecfee00065b3ddbfff67","src/btree/set.rs":"29cc3bff736007b21e14017d880edbcc7c76c30e0c256e811cae1fff0dad13fa","src/format.rs":"cee32d75cf260b19c8db74b50852bc50b8c47189d22b7424b647d084c4a76857","src/hashmap.rs":"cf02762085d9968fc235ef2c0626358661cb21aca2c8c19961b3969225c96dce","src/lib.rs":"4cd0ef055208600292ec075f600e940eafcd24bd3e74fe34bba9164842d7f380","src/rc.rs":"102ad49f2201b9f69b50cf5a35af1e0039094936354b12572702551970c2f53c","src/try_clone.rs":"32c790435c71dec116756c284d2b953d382292b7727675740229a6b53d8c8b41","src/vec.rs":"691e80f89ffd25ea3e0cb287ea9bdecb1528367f21a8b3d995e9b3915c01c8e4"},"package":"3bda4d04bca84e2331f0ff2ee8300064df3f467e37743d87788c1487a6dd903b"}

Просмотреть файл

@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "fallible_collections"
version = "0.1.3"
version = "0.2.0"
authors = ["vcombey <vcombey@student.42.fr>"]
description = "a crate which adds fallible allocation api to std collections"
readme = "README.md"
@ -21,7 +21,7 @@ keywords = ["fallible", "collections"]
license = "MIT/Apache-2.0"
repository = "https://github.com/vcombey/fallible_collections.git"
[dependencies.hashbrown]
version = "0.7.1"
version = "0.9"
[features]
std_io = []

Просмотреть файл

@ -72,7 +72,7 @@ fn alloc(layout: Layout) -> Result<NonNull<u8>, TryReserveError> {
}
1..=core::usize::MAX => {
let ptr = unsafe { alloc::alloc::alloc(layout) };
core::ptr::NonNull::new(ptr).ok_or(TryReserveError::AllocErr { layout })
core::ptr::NonNull::new(ptr).ok_or(TryReserveError::AllocError { layout })
}
_ => unreachable!("size must be non-negative"),
}

Просмотреть файл

@ -58,7 +58,7 @@ pub mod try_clone;
#[cfg(feature = "unstable")]
pub use alloc::collections::TryReserveError;
#[cfg(not(feature = "unstable"))]
pub use hashbrown::CollectionAllocErr as TryReserveError;
pub use hashbrown::TryReserveError;
#[cfg(feature = "std_io")]
pub use vec::std_io::*;

Просмотреть файл

@ -444,7 +444,7 @@ fn vec_try_extend<T>(v: &mut Vec<T>, new_cap: usize) -> Result<(), TryReserveErr
};
if new_ptr.is_null() {
return Err(TryReserveError::AllocErr { layout });
return Err(TryReserveError::AllocError { layout });
}
let new_vec = unsafe { Vec::from_raw_parts(new_ptr.cast(), old_len, new_cap) };

Просмотреть файл

@ -1 +0,0 @@
{"files":{"CHANGELOG.md":"bc2445e0c6178a7c0cd140fe67ab1d0006f952682bf7271ed6cf883bed77df2d","Cargo.toml":"2c552bdd41739da9161cc16f917f8401049dbac171bc589745a2f06441205f55","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","README.md":"b657d6635c937634bacfb4c2e4605a175298388b36f0ab0eceb95b58661765b9","benches/bench.rs":"a3f8426559ebf68d93e37edee0bf83c28f18572b394b22e47dbff33e25cac403","build.rs":"85096ca579db79e502f0af4521b62820b2b0efcfe0f3d4de2f8ec4965f13b61f","clippy.toml":"7535949f908c6d9aea4f9a9f3a7625552c93fc29e963d059d40f4def9d77ea7b","src/external_trait_impls/mod.rs":"d69528827794524cfd9acbeacc1ac4f6131e3c7574311e6d919f818f65fbff07","src/external_trait_impls/rayon/helpers.rs":"d4fbca4db924925548f8dab8eb94cf4a3955a53c5e1ff15f59c460546c394034","src/external_trait_impls/rayon/map.rs":"116e6a9138a3e572e2cd766af220ba8c45d4ef49d8bef614e30217dc3125a51b","src/external_trait_impls/rayon/mod.rs":"156de9c1ad0123334ea3b7e5a17444faf1b8bf971aa88a1f23e2f2d1c3021141","src/external_trait_impls/rayon/raw.rs":"78fc72fd84ab29a62b28ea8064f773ff097a38653a2ee18a600015c76104c52b","src/external_trait_impls/rayon/set.rs":"59afc7b1cdc985a85952d456e34eada4ca2fedf90d2a14dccf98a69f8f496137","src/external_trait_impls/serde.rs":"9306fb6e0e339398dc23ba9e7400a9a28d713df248e8b260e3d4dc44f799e101","src/lib.rs":"5c623bce890f9c3aad17715d91df95208264c80fc94c857b6ed06df8e9c80df7","src/macros.rs":"0b1e9a55e8f5232b82f7e56f352a98904b35ddfca015377cf71daa31939baabf","src/map.rs":"d669bfc54ec1b23947552a3895369b287809c09527c7be3b455396eaa8c900b7","src/raw/bitmask.rs":"3c1f33b09b212d3828a89c11a86ab1bb18dedc20e45ff0032787ca49de2833c5","src/raw/generic.rs":"03d4d5cf5241dd32cfee55986381048bf5432957cdb7c48efd85538c0cf0523d","src/raw/mod.rs":"b39ed32849d8b2717cba9ce94c2c7d1a324578b9ee65af5f613ba96dd63f2c85","src/raw/sse2.rs":"963b7be51552eb18ea24eec8b8d8882d98ba0e798eef46f1813eb42311422ce3","src/rustc_entry.rs":"28f14ebe3c95083783026fb3f7bcb63e5be6a94a77ba053385fec774fb12a69a","src/scopeguard.rs":"337cde60c9e1109cd19d4fa53529014cef1e3d5900dffde82f647881df1505f7","src/set.rs":"ff914e8fb75effca835890da0c1cceb95402bb62bc15c57a4350ac8f379f551f","tests/hasher.rs":"9a8fdf67e4415618e16729969c386eefe71408cded5d46cf7b67d969276a3452","tests/rayon.rs":"2286707a87b139f41902c82488c355b9fb402a3e734f392f3a73e87b9b932795","tests/serde.rs":"eed27382c0e43f67e402cd9eed20dea23ef5582e1a26a183e708ca9217a559e0","tests/set.rs":"374bd312c01a01cf8953bbbc9494f431b260c2657d7c79cc250e977b869a76ad"},"package":"96282e96bfcd3da0d3aa9938bedf1e50df3269b6db08b4876d2da0bb1a0841cf"}

222
third_party/rust/hashbrown-0.7.2/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,222 +0,0 @@
# Change Log
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/)
and this project adheres to [Semantic Versioning](http://semver.org/).
## [Unreleased]
## [v0.7.2] - 2020-04-27
### Added
- Added `or_insert_with_key` to `Entry`. (#152)
### Fixed
- Partially reverted `Clone` optimization which was unsound. (#154)
### Changed
- Disabled use of `const-random` by default, which prevented reproducible builds. (#155)
- Optimized `repeat` function. (#150)
- Use `NonNull` for buckets, which improves codegen for iterators. (#148)
## [v0.7.1] - 2020-03-16
### Added
- Added `HashMap::get_key_value_mut`. (#145)
### Changed
- Optimized `Clone` implementation. (#146)
## [v0.7.0] - 2020-01-31
### Added
- Added a `drain_filter` function to `HashMap`. (#135)
### Changed
- Updated `ahash` dependency to 0.3. (#141)
- Optimized set union and intersection. (#130)
- `raw_entry` can now be used without requiring `S: BuildHasher`. (#123)
- `RawTable::bucket_index` can now be used under the `raw` feature. (#128)
## [v0.6.3] - 2019-10-31
### Added
- Added an `ahash-compile-time-rng` feature (enabled by default) which allows disabling the
`compile-time-rng` feature in `ahash` to work around a Cargo bug. (#125)
## [v0.6.2] - 2019-10-23
### Added
- Added an `inline-more` feature (enabled by default) which allows choosing a tradeoff between
runtime performance and compilation time. (#119)
## [v0.6.1] - 2019-10-04
### Added
- Added `Entry::insert` and `RawEntryMut::insert`. (#118)
### Changed
- `Group::static_empty` was changed from a `const` to a `static` (#116).
## [v0.6.0] - 2019-08-13
### Fixed
- Fixed AHash accidentally depending on `std`. (#110)
### Changed
- The minimum Rust version has been bumped to 1.32 (due to `rand` dependency).
## ~~[v0.5.1] - 2019-08-04~~
This release was _yanked_ due to a breaking change for users of `no-default-features`.
### Added
- The experimental and unsafe `RawTable` API is available under the "raw" feature. (#108)
- Added entry-like methods for `HashSet`. (#98)
### Changed
- Changed the default hasher from FxHash to AHash. (#97)
- `hashbrown` is now fully `no_std` on recent Rust versions (1.36+). (#96)
### Fixed
- We now avoid growing the table during insertions when it wasn't necessary. (#106)
- `RawOccupiedEntryMut` now properly implements `Send` and `Sync`. (#100)
- Relaxed `lazy_static` version. (#92)
## [v0.5.0] - 2019-06-12
### Fixed
- Resize with a more conservative amount of space after deletions. (#86)
### Changed
- Exposed the Layout of the failed allocation in CollectionAllocErr::AllocErr. (#89)
## [v0.4.0] - 2019-05-30
### Fixed
- Fixed `Send` trait bounds on `IterMut` not matching the libstd one. (#82)
## [v0.3.1] - 2019-05-30
### Fixed
- Fixed incorrect use of slice in unsafe code. (#80)
## [v0.3.0] - 2019-04-23
### Changed
- Changed shrink_to to not panic if min_capacity < capacity. (#67)
### Fixed
- Worked around emscripten bug emscripten-core/emscripten-fastcomp#258. (#66)
## [v0.2.2] - 2019-04-16
### Fixed
- Inlined non-nightly lowest_set_bit_nonzero. (#64)
- Fixed build on latest nightly. (#65)
## [v0.2.1] - 2019-04-14
### Changed
- Use for_each in map Extend and FromIterator. (#58)
- Improved worst-case performance of HashSet.is_subset. (#61)
### Fixed
- Removed incorrect debug_assert. (#60)
## [v0.2.0] - 2019-03-31
### Changed
- The code has been updated to Rust 2018 edition. This means that the minimum
Rust version has been bumped to 1.31 (2018 edition).
### Added
- Added `insert_with_hasher` to the raw_entry API to allow `K: !(Hash + Eq)`. (#54)
- Added support for using hashbrown as the hash table implementation in libstd. (#46)
### Fixed
- Fixed cargo build with minimal-versions. (#45)
- Fixed `#[may_dangle]` attributes to match the libstd `HashMap`. (#46)
- ZST keys and values are now handled properly. (#46)
## [v0.1.8] - 2019-01-14
### Added
- Rayon parallel iterator support (#37)
- `raw_entry` support (#31)
- `#[may_dangle]` on nightly (#31)
- `try_reserve` support (#31)
### Fixed
- Fixed variance on `IterMut`. (#31)
## [v0.1.7] - 2018-12-05
### Fixed
- Fixed non-SSE version of convert_special_to_empty_and_full_to_deleted. (#32)
- Fixed overflow in rehash_in_place. (#33)
## [v0.1.6] - 2018-11-17
### Fixed
- Fixed compile error on nightly. (#29)
## [v0.1.5] - 2018-11-08
### Fixed
- Fixed subtraction overflow in generic::Group::match_byte. (#28)
## [v0.1.4] - 2018-11-04
### Fixed
- Fixed a bug in the `erase_no_drop` implementation. (#26)
## [v0.1.3] - 2018-11-01
### Added
- Serde support. (#14)
### Fixed
- Make the compiler inline functions more aggressively. (#20)
## [v0.1.2] - 2018-10-31
### Fixed
- `clear` segfaults when called on an empty table. (#13)
## [v0.1.1] - 2018-10-30
### Fixed
- `erase_no_drop` optimization not triggering in the SSE2 implementation. (#3)
- Missing `Send` and `Sync` for hash map and iterator types. (#7)
- Bug when inserting into a table smaller than the group width. (#5)
## v0.1.0 - 2018-10-29
- Initial release
[Unreleased]: https://github.com/rust-lang/hashbrown/compare/v0.7.2...HEAD
[v0.7.2]: https://github.com/rust-lang/hashbrown/compare/v0.7.1...v0.7.2
[v0.7.1]: https://github.com/rust-lang/hashbrown/compare/v0.7.0...v0.7.1
[v0.7.0]: https://github.com/rust-lang/hashbrown/compare/v0.6.3...v0.7.0
[v0.6.3]: https://github.com/rust-lang/hashbrown/compare/v0.6.2...v0.6.3
[v0.6.2]: https://github.com/rust-lang/hashbrown/compare/v0.6.1...v0.6.2
[v0.6.1]: https://github.com/rust-lang/hashbrown/compare/v0.6.0...v0.6.1
[v0.6.0]: https://github.com/rust-lang/hashbrown/compare/v0.5.1...v0.6.0
[v0.5.1]: https://github.com/rust-lang/hashbrown/compare/v0.5.0...v0.5.1
[v0.5.0]: https://github.com/rust-lang/hashbrown/compare/v0.4.0...v0.5.0
[v0.4.0]: https://github.com/rust-lang/hashbrown/compare/v0.3.1...v0.4.0
[v0.3.1]: https://github.com/rust-lang/hashbrown/compare/v0.3.0...v0.3.1
[v0.3.0]: https://github.com/rust-lang/hashbrown/compare/v0.2.2...v0.3.0
[v0.2.2]: https://github.com/rust-lang/hashbrown/compare/v0.2.1...v0.2.2
[v0.2.1]: https://github.com/rust-lang/hashbrown/compare/v0.2.0...v0.2.1
[v0.2.0]: https://github.com/rust-lang/hashbrown/compare/v0.1.8...v0.2.0
[v0.1.8]: https://github.com/rust-lang/hashbrown/compare/v0.1.7...v0.1.8
[v0.1.7]: https://github.com/rust-lang/hashbrown/compare/v0.1.6...v0.1.7
[v0.1.6]: https://github.com/rust-lang/hashbrown/compare/v0.1.5...v0.1.6
[v0.1.5]: https://github.com/rust-lang/hashbrown/compare/v0.1.4...v0.1.5
[v0.1.4]: https://github.com/rust-lang/hashbrown/compare/v0.1.3...v0.1.4
[v0.1.3]: https://github.com/rust-lang/hashbrown/compare/v0.1.2...v0.1.3
[v0.1.2]: https://github.com/rust-lang/hashbrown/compare/v0.1.1...v0.1.2
[v0.1.1]: https://github.com/rust-lang/hashbrown/compare/v0.1.0...v0.1.1

83
third_party/rust/hashbrown-0.7.2/Cargo.toml поставляемый
Просмотреть файл

@ -1,83 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "hashbrown"
version = "0.7.2"
authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
build = "build.rs"
exclude = [".travis.yml", "bors.toml", "/ci/*"]
description = "A Rust port of Google's SwissTable hash map"
readme = "README.md"
keywords = ["hash", "no_std", "hashmap", "swisstable"]
categories = ["data-structures", "no-std"]
license = "Apache-2.0/MIT"
repository = "https://github.com/rust-lang/hashbrown"
[package.metadata.docs.rs]
features = ["nightly", "rayon", "serde", "raw"]
[dependencies.ahash]
version = "0.3.2"
optional = true
default-features = false
[dependencies.alloc]
version = "1.0.0"
optional = true
package = "rustc-std-workspace-alloc"
[dependencies.compiler_builtins]
version = "0.1.2"
optional = true
[dependencies.core]
version = "1.0.0"
optional = true
package = "rustc-std-workspace-core"
[dependencies.rayon]
version = "1.0"
optional = true
[dependencies.serde]
version = "1.0.25"
optional = true
default-features = false
[dev-dependencies.doc-comment]
version = "0.3.1"
[dev-dependencies.lazy_static]
version = "1.2"
[dev-dependencies.rand]
version = "0.7.3"
features = ["small_rng"]
[dev-dependencies.rayon]
version = "1.0"
[dev-dependencies.rustc-hash]
version = "=1.0"
[dev-dependencies.serde_test]
version = "1.0"
[build-dependencies.autocfg]
version = "1"
[features]
ahash-compile-time-rng = ["ahash/compile-time-rng"]
default = ["ahash", "inline-more"]
inline-more = []
nightly = []
raw = []
rustc-dep-of-std = ["nightly", "core", "compiler_builtins", "alloc", "rustc-internal-api"]
rustc-internal-api = []

Просмотреть файл

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
third_party/rust/hashbrown-0.7.2/LICENSE-MIT поставляемый
Просмотреть файл

@ -1,25 +0,0 @@
Copyright (c) 2016 Amanieu d'Antras
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

125
third_party/rust/hashbrown-0.7.2/README.md поставляемый
Просмотреть файл

@ -1,125 +0,0 @@
hashbrown
=========
[![Build Status](https://travis-ci.com/rust-lang/hashbrown.svg?branch=master)](https://travis-ci.com/rust-lang/hashbrown)
[![Crates.io](https://img.shields.io/crates/v/hashbrown.svg)](https://crates.io/crates/hashbrown)
[![Documentation](https://docs.rs/hashbrown/badge.svg)](https://docs.rs/hashbrown)
This crate is a Rust port of Google's high-performance [SwissTable] hash
map, adapted to make it a drop-in replacement for Rust's standard `HashMap`
and `HashSet` types.
The original C++ version of SwissTable can be found [here], and this
[CppCon talk] gives an overview of how the algorithm works.
Since Rust 1.36, this is now the `HashMap` implementation for the Rust standard
library. However you may still want to use this crate instead since it works
in environments without `std`, such as embedded systems and kernels.
[SwissTable]: https://abseil.io/blog/20180927-swisstables
[here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h
[CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4
## [Change log](CHANGELOG.md)
## Features
- Drop-in replacement for the standard library `HashMap` and `HashSet` types.
- Uses `AHash` as the default hasher, which is much faster than SipHash.
- Around 2x faster than the previous standard library `HashMap`.
- Lower memory usage: only 1 byte of overhead per entry instead of 8.
- Compatible with `#[no_std]` (but requires a global allocator with the `alloc` crate).
- Empty hash maps do not allocate any memory.
- SIMD lookups to scan multiple hash entries in parallel.
## Performance
Compared to the previous implementation of `std::collections::HashMap` (Rust 1.35).
With the hashbrown default AHash hasher (not HashDoS-resistant):
```text
name oldstdhash ns/iter hashbrown ns/iter diff ns/iter diff % speedup
insert_ahash_highbits 20,846 7,397 -13,449 -64.52% x 2.82
insert_ahash_random 20,515 7,796 -12,719 -62.00% x 2.63
insert_ahash_serial 21,668 7,264 -14,404 -66.48% x 2.98
insert_erase_ahash_highbits 29,570 17,498 -12,072 -40.83% x 1.69
insert_erase_ahash_random 39,569 17,474 -22,095 -55.84% x 2.26
insert_erase_ahash_serial 32,073 17,332 -14,741 -45.96% x 1.85
iter_ahash_highbits 1,572 2,087 515 32.76% x 0.75
iter_ahash_random 1,609 2,074 465 28.90% x 0.78
iter_ahash_serial 2,293 2,120 -173 -7.54% x 1.08
lookup_ahash_highbits 3,460 4,403 943 27.25% x 0.79
lookup_ahash_random 6,377 3,911 -2,466 -38.67% x 1.63
lookup_ahash_serial 3,629 3,586 -43 -1.18% x 1.01
lookup_fail_ahash_highbits 5,286 3,411 -1,875 -35.47% x 1.55
lookup_fail_ahash_random 12,365 4,171 -8,194 -66.27% x 2.96
lookup_fail_ahash_serial 4,902 3,240 -1,662 -33.90% x 1.51
```
With the libstd default SipHash hasher (HashDoS-resistant):
```text
name oldstdhash ns/iter hashbrown ns/iter diff ns/iter diff % speedup
insert_std_highbits 32,598 20,199 -12,399 -38.04% x 1.61
insert_std_random 29,824 20,760 -9,064 -30.39% x 1.44
insert_std_serial 33,151 17,256 -15,895 -47.95% x 1.92
insert_erase_std_highbits 74,731 48,735 -25,996 -34.79% x 1.53
insert_erase_std_random 73,828 47,649 -26,179 -35.46% x 1.55
insert_erase_std_serial 73,864 40,147 -33,717 -45.65% x 1.84
iter_std_highbits 1,518 2,264 746 49.14% x 0.67
iter_std_random 1,502 2,414 912 60.72% x 0.62
iter_std_serial 6,361 2,118 -4,243 -66.70% x 3.00
lookup_std_highbits 21,705 16,962 -4,743 -21.85% x 1.28
lookup_std_random 21,654 17,158 -4,496 -20.76% x 1.26
lookup_std_serial 18,726 14,509 -4,217 -22.52% x 1.29
lookup_fail_std_highbits 25,852 17,323 -8,529 -32.99% x 1.49
lookup_fail_std_random 25,913 17,760 -8,153 -31.46% x 1.46
lookup_fail_std_serial 22,648 14,839 -7,809 -34.48% x 1.53
```
## Usage
Add this to your `Cargo.toml`:
```toml
[dependencies]
hashbrown = "0.7"
```
Then:
```rust
use hashbrown::HashMap;
let mut map = HashMap::new();
map.insert(1, "one");
```
This crate has the following Cargo features:
- `nightly`: Enables nightly-only features: `#[may_dangle]`.
- `serde`: Enables serde serialization support.
- `rayon`: Enables rayon parallel iterator support.
- `raw`: Enables access to the experimental and unsafe `RawTable` API.
- `inline-more`: Adds inline hints to most functions, improving run-time performance at the cost
of compilation time. (enabled by default)
- `ahash`: Compiles with ahash as default hasher. (enabled by default)
- `ahash-compile-time-rng`: Activates the `compile-time-rng` feature of ahash, to increase the
DOS-resistance, but can result in issues for `no_std` builds. More details in
[issue#124](https://github.com/rust-lang/hashbrown/issues/124). (enabled by default)
## License
Licensed under either of:
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
additional terms or conditions.

Просмотреть файл

@ -1,260 +0,0 @@
// This benchmark suite contains some benchmarks along a set of dimensions:
// Hasher: std default (SipHash) and crate default (AHash).
// Int key distribution: low bit heavy, top bit heavy, and random.
// Task: basic functionality: insert, insert_erase, lookup, lookup_fail, iter
#![feature(test)]
extern crate test;
use test::{black_box, Bencher};
use hashbrown::hash_map::DefaultHashBuilder;
use hashbrown::HashMap;
use std::collections::hash_map::RandomState;
const SIZE: usize = 1000;
// The default hashmap when using this crate directly.
type AHashMap<K, V> = HashMap<K, V, DefaultHashBuilder>;
// This uses the hashmap from this crate with the default hasher of the stdlib.
type StdHashMap<K, V> = HashMap<K, V, RandomState>;
// A random key iterator.
#[derive(Clone, Copy)]
struct RandomKeys {
state: usize,
}
impl RandomKeys {
fn new() -> Self {
RandomKeys { state: 0 }
}
}
impl Iterator for RandomKeys {
type Item = usize;
fn next(&mut self) -> Option<usize> {
// Add 1 then multiply by some 32 bit prime.
self.state = self.state.wrapping_add(1).wrapping_mul(3787392781);
Some(self.state)
}
}
macro_rules! bench_suite {
($bench_macro:ident, $bench_ahash_serial:ident, $bench_std_serial:ident,
$bench_ahash_highbits:ident, $bench_std_highbits:ident,
$bench_ahash_random:ident, $bench_std_random:ident) => {
$bench_macro!($bench_ahash_serial, AHashMap, 0..);
$bench_macro!($bench_std_serial, StdHashMap, 0..);
$bench_macro!(
$bench_ahash_highbits,
AHashMap,
(0..).map(usize::swap_bytes)
);
$bench_macro!(
$bench_std_highbits,
StdHashMap,
(0..).map(usize::swap_bytes)
);
$bench_macro!($bench_ahash_random, AHashMap, RandomKeys::new());
$bench_macro!($bench_std_random, StdHashMap, RandomKeys::new());
};
}
macro_rules! bench_insert {
($name:ident, $maptype:ident, $keydist:expr) => {
#[bench]
fn $name(b: &mut Bencher) {
let mut m = $maptype::with_capacity_and_hasher(SIZE, Default::default());
b.iter(|| {
m.clear();
for i in ($keydist).take(SIZE) {
m.insert(i, i);
}
black_box(&mut m);
})
}
};
}
bench_suite!(
bench_insert,
insert_ahash_serial,
insert_std_serial,
insert_ahash_highbits,
insert_std_highbits,
insert_ahash_random,
insert_std_random
);
macro_rules! bench_insert_erase {
($name:ident, $maptype:ident, $keydist:expr) => {
#[bench]
fn $name(b: &mut Bencher) {
let mut base = $maptype::default();
for i in ($keydist).take(SIZE) {
base.insert(i, i);
}
let skip = $keydist.skip(SIZE);
b.iter(|| {
let mut m = base.clone();
let mut add_iter = skip.clone();
let mut remove_iter = $keydist;
// While keeping the size constant,
// replace the first keydist with the second.
for (add, remove) in (&mut add_iter).zip(&mut remove_iter).take(SIZE) {
m.insert(add, add);
black_box(m.remove(&remove));
}
black_box(m);
})
}
};
}
bench_suite!(
bench_insert_erase,
insert_erase_ahash_serial,
insert_erase_std_serial,
insert_erase_ahash_highbits,
insert_erase_std_highbits,
insert_erase_ahash_random,
insert_erase_std_random
);
macro_rules! bench_lookup {
($name:ident, $maptype:ident, $keydist:expr) => {
#[bench]
fn $name(b: &mut Bencher) {
let mut m = $maptype::default();
for i in $keydist.take(SIZE) {
m.insert(i, i);
}
b.iter(|| {
for i in $keydist.take(SIZE) {
black_box(m.get(&i));
}
})
}
};
}
bench_suite!(
bench_lookup,
lookup_ahash_serial,
lookup_std_serial,
lookup_ahash_highbits,
lookup_std_highbits,
lookup_ahash_random,
lookup_std_random
);
macro_rules! bench_lookup_fail {
($name:ident, $maptype:ident, $keydist:expr) => {
#[bench]
fn $name(b: &mut Bencher) {
let mut m = $maptype::default();
let mut iter = $keydist;
for i in (&mut iter).take(SIZE) {
m.insert(i, i);
}
b.iter(|| {
for i in (&mut iter).take(SIZE) {
black_box(m.get(&i));
}
})
}
};
}
bench_suite!(
bench_lookup_fail,
lookup_fail_ahash_serial,
lookup_fail_std_serial,
lookup_fail_ahash_highbits,
lookup_fail_std_highbits,
lookup_fail_ahash_random,
lookup_fail_std_random
);
macro_rules! bench_iter {
($name:ident, $maptype:ident, $keydist:expr) => {
#[bench]
fn $name(b: &mut Bencher) {
let mut m = $maptype::default();
for i in ($keydist).take(SIZE) {
m.insert(i, i);
}
b.iter(|| {
for i in &m {
black_box(i);
}
})
}
};
}
bench_suite!(
bench_iter,
iter_ahash_serial,
iter_std_serial,
iter_ahash_highbits,
iter_std_highbits,
iter_ahash_random,
iter_std_random
);
#[bench]
fn clone_small(b: &mut Bencher) {
let mut m = HashMap::new();
for i in 0..10 {
m.insert(i, i);
}
b.iter(|| {
black_box(m.clone());
})
}
#[bench]
fn clone_from_small(b: &mut Bencher) {
let mut m = HashMap::new();
let mut m2 = HashMap::new();
for i in 0..10 {
m.insert(i, i);
}
b.iter(|| {
m2.clone_from(&m);
black_box(&mut m2);
})
}
#[bench]
fn clone_large(b: &mut Bencher) {
let mut m = HashMap::new();
for i in 0..1000 {
m.insert(i, i);
}
b.iter(|| {
black_box(m.clone());
})
}
#[bench]
fn clone_from_large(b: &mut Bencher) {
let mut m = HashMap::new();
let mut m2 = HashMap::new();
for i in 0..1000 {
m.insert(i, i);
}
b.iter(|| {
m2.clone_from(&m);
black_box(&mut m2);
})
}

9
third_party/rust/hashbrown-0.7.2/build.rs поставляемый
Просмотреть файл

@ -1,9 +0,0 @@
fn main() {
println!("cargo:rerun-if-changed=build.rs");
let nightly = std::env::var_os("CARGO_FEATURE_NIGHTLY").is_some();
let has_stable_alloc = || autocfg::new().probe_rustc_version(1, 36);
if nightly || has_stable_alloc() {
autocfg::emit("has_extern_crate_alloc")
}
}

Просмотреть файл

@ -1 +0,0 @@
doc-valid-idents = [ "CppCon", "SwissTable", "SipHash", "HashDoS" ]

Просмотреть файл

@ -1,4 +0,0 @@
#[cfg(feature = "rayon")]
pub(crate) mod rayon;
#[cfg(feature = "serde")]
mod serde;

Просмотреть файл

@ -1,26 +0,0 @@
use alloc::collections::LinkedList;
use alloc::vec::Vec;
use rayon::iter::{IntoParallelIterator, ParallelIterator};
/// Helper for collecting parallel iterators to an intermediary
pub(super) fn collect<I: IntoParallelIterator>(iter: I) -> (LinkedList<Vec<I::Item>>, usize) {
let list = iter
.into_par_iter()
.fold(Vec::new, |mut vec, elem| {
vec.push(elem);
vec
})
.map(|vec| {
let mut list = LinkedList::new();
list.push_back(vec);
list
})
.reduce(LinkedList::new, |mut list1, mut list2| {
list1.append(&mut list2);
list1
});
let len = list.iter().map(Vec::len).sum();
(list, len)
}

Просмотреть файл

@ -1,676 +0,0 @@
//! Rayon extensions for `HashMap`.
use crate::hash_map::HashMap;
use core::fmt;
use core::hash::{BuildHasher, Hash};
use rayon::iter::plumbing::UnindexedConsumer;
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator};
/// Parallel iterator over shared references to entries in a map.
///
/// This iterator is created by the [`par_iter`] method on [`HashMap`]
/// (provided by the [`IntoParallelRefIterator`] trait).
/// See its documentation for more.
///
/// [`par_iter`]: /hashbrown/struct.HashMap.html#method.par_iter
/// [`HashMap`]: /hashbrown/struct.HashMap.html
/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html
pub struct ParIter<'a, K, V, S> {
map: &'a HashMap<K, V, S>,
}
impl<'a, K: Sync, V: Sync, S: Sync> ParallelIterator for ParIter<'a, K, V, S> {
type Item = (&'a K, &'a V);
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.map
.table
.par_iter()
.map(|x| unsafe {
let r = x.as_ref();
(&r.0, &r.1)
})
.drive_unindexed(consumer)
}
}
impl<K, V, S> Clone for ParIter<'_, K, V, S> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
ParIter { map: self.map }
}
}
impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, S: BuildHasher> fmt::Debug for ParIter<'_, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.map.iter().fmt(f)
}
}
/// Parallel iterator over shared references to keys in a map.
///
/// This iterator is created by the [`par_keys`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_keys`]: /hashbrown/struct.HashMap.html#method.par_keys
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParKeys<'a, K, V, S> {
map: &'a HashMap<K, V, S>,
}
impl<'a, K: Sync, V: Sync, S: Sync> ParallelIterator for ParKeys<'a, K, V, S> {
type Item = &'a K;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.map
.table
.par_iter()
.map(|x| unsafe { &x.as_ref().0 })
.drive_unindexed(consumer)
}
}
impl<K, V, S> Clone for ParKeys<'_, K, V, S> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
ParKeys { map: self.map }
}
}
impl<K: fmt::Debug + Eq + Hash, V, S: BuildHasher> fmt::Debug for ParKeys<'_, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.map.keys().fmt(f)
}
}
/// Parallel iterator over shared references to values in a map.
///
/// This iterator is created by the [`par_values`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_values`]: /hashbrown/struct.HashMap.html#method.par_values
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParValues<'a, K, V, S> {
map: &'a HashMap<K, V, S>,
}
impl<'a, K: Sync, V: Sync, S: Sync> ParallelIterator for ParValues<'a, K, V, S> {
type Item = &'a V;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.map
.table
.par_iter()
.map(|x| unsafe { &x.as_ref().1 })
.drive_unindexed(consumer)
}
}
impl<K, V, S> Clone for ParValues<'_, K, V, S> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
ParValues { map: self.map }
}
}
impl<K: Eq + Hash, V: fmt::Debug, S: BuildHasher> fmt::Debug for ParValues<'_, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.map.values().fmt(f)
}
}
/// Parallel iterator over mutable references to entries in a map.
///
/// This iterator is created by the [`par_iter_mut`] method on [`HashMap`]
/// (provided by the [`IntoParallelRefMutIterator`] trait).
/// See its documentation for more.
///
/// [`par_iter_mut`]: /hashbrown/struct.HashMap.html#method.par_iter_mut
/// [`HashMap`]: /hashbrown/struct.HashMap.html
/// [`IntoParallelRefMutIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefMutIterator.html
pub struct ParIterMut<'a, K, V, S> {
map: &'a mut HashMap<K, V, S>,
}
impl<'a, K: Send + Sync, V: Send, S: Send> ParallelIterator for ParIterMut<'a, K, V, S> {
type Item = (&'a K, &'a mut V);
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.map
.table
.par_iter()
.map(|x| unsafe {
let r = x.as_mut();
(&r.0, &mut r.1)
})
.drive_unindexed(consumer)
}
}
impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, S: BuildHasher> fmt::Debug
for ParIterMut<'_, K, V, S>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.map.iter().fmt(f)
}
}
/// Parallel iterator over mutable references to values in a map.
///
/// This iterator is created by the [`par_values_mut`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_values_mut`]: /hashbrown/struct.HashMap.html#method.par_values_mut
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParValuesMut<'a, K, V, S> {
map: &'a mut HashMap<K, V, S>,
}
impl<'a, K: Send, V: Send, S: Send> ParallelIterator for ParValuesMut<'a, K, V, S> {
type Item = &'a mut V;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.map
.table
.par_iter()
.map(|x| unsafe { &mut x.as_mut().1 })
.drive_unindexed(consumer)
}
}
impl<K: Eq + Hash, V: fmt::Debug, S: BuildHasher> fmt::Debug for ParValuesMut<'_, K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.map.values().fmt(f)
}
}
/// Parallel iterator over entries of a consumed map.
///
/// This iterator is created by the [`into_par_iter`] method on [`HashMap`]
/// (provided by the [`IntoParallelIterator`] trait).
/// See its documentation for more.
///
/// [`into_par_iter`]: /hashbrown/struct.HashMap.html#method.into_par_iter
/// [`HashMap`]: /hashbrown/struct.HashMap.html
/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html
pub struct IntoParIter<K, V, S> {
map: HashMap<K, V, S>,
}
impl<K: Send, V: Send, S: Send> ParallelIterator for IntoParIter<K, V, S> {
type Item = (K, V);
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.map.table.into_par_iter().drive_unindexed(consumer)
}
}
impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, S: BuildHasher> fmt::Debug for IntoParIter<K, V, S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.map.iter().fmt(f)
}
}
/// Parallel draining iterator over entries of a map.
///
/// This iterator is created by the [`par_drain`] method on [`HashMap`].
/// See its documentation for more.
///
/// [`par_drain`]: /hashbrown/struct.HashMap.html#method.par_drain
/// [`HashMap`]: /hashbrown/struct.HashMap.html
pub struct ParDrain<'a, K, V, S> {
map: &'a mut HashMap<K, V, S>,
}
impl<K: Send, V: Send, S: Send> ParallelIterator for ParDrain<'_, K, V, S> {
type Item = (K, V);
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.map.table.par_drain().drive_unindexed(consumer)
}
}
impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, S: BuildHasher> fmt::Debug
for ParDrain<'_, K, V, S>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.map.iter().fmt(f)
}
}
impl<K: Sync, V: Sync, S: Sync> HashMap<K, V, S> {
/// Visits (potentially in parallel) immutably borrowed keys in an arbitrary order.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_keys(&self) -> ParKeys<'_, K, V, S> {
ParKeys { map: self }
}
/// Visits (potentially in parallel) immutably borrowed values in an arbitrary order.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_values(&self) -> ParValues<'_, K, V, S> {
ParValues { map: self }
}
}
impl<K: Send, V: Send, S: Send> HashMap<K, V, S> {
/// Visits (potentially in parallel) mutably borrowed values in an arbitrary order.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V, S> {
ParValuesMut { map: self }
}
/// Consumes (potentially in parallel) all values in an arbitrary order,
/// while preserving the map's allocated memory for reuse.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_drain(&mut self) -> ParDrain<'_, K, V, S> {
ParDrain { map: self }
}
}
impl<K, V, S> HashMap<K, V, S>
where
K: Eq + Hash + Sync,
V: PartialEq + Sync,
S: BuildHasher + Sync,
{
/// Returns `true` if the map is equal to another,
/// i.e. both maps contain the same keys mapped to the same values.
///
/// This method runs in a potentially parallel fashion.
pub fn par_eq(&self, other: &Self) -> bool {
self.len() == other.len()
&& self
.into_par_iter()
.all(|(key, value)| other.get(key).map_or(false, |v| *value == *v))
}
}
impl<K: Send, V: Send, S: Send> IntoParallelIterator for HashMap<K, V, S> {
type Item = (K, V);
type Iter = IntoParIter<K, V, S>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
IntoParIter { map: self }
}
}
impl<'a, K: Sync, V: Sync, S: Sync> IntoParallelIterator for &'a HashMap<K, V, S> {
type Item = (&'a K, &'a V);
type Iter = ParIter<'a, K, V, S>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
ParIter { map: self }
}
}
impl<'a, K: Send + Sync, V: Send, S: Send> IntoParallelIterator for &'a mut HashMap<K, V, S> {
type Item = (&'a K, &'a mut V);
type Iter = ParIterMut<'a, K, V, S>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
ParIterMut { map: self }
}
}
/// Collect (key, value) pairs from a parallel iterator into a
/// hashmap. If multiple pairs correspond to the same key, then the
/// ones produced earlier in the parallel iterator will be
/// overwritten, just as with a sequential iterator.
impl<K, V, S> FromParallelIterator<(K, V)> for HashMap<K, V, S>
where
K: Eq + Hash + Send,
V: Send,
S: BuildHasher + Default,
{
fn from_par_iter<P>(par_iter: P) -> Self
where
P: IntoParallelIterator<Item = (K, V)>,
{
let mut map = HashMap::default();
map.par_extend(par_iter);
map
}
}
/// Extend a hash map with items from a parallel iterator.
impl<K, V, S> ParallelExtend<(K, V)> for HashMap<K, V, S>
where
K: Eq + Hash + Send,
V: Send,
S: BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (K, V)>,
{
extend(self, par_iter);
}
}
/// Extend a hash map with copied items from a parallel iterator.
impl<'a, K, V, S> ParallelExtend<(&'a K, &'a V)> for HashMap<K, V, S>
where
K: Copy + Eq + Hash + Sync,
V: Copy + Sync,
S: BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = (&'a K, &'a V)>,
{
extend(self, par_iter);
}
}
// This is equal to the normal `HashMap` -- no custom advantage.
fn extend<K, V, S, I>(map: &mut HashMap<K, V, S>, par_iter: I)
where
K: Eq + Hash,
S: BuildHasher,
I: IntoParallelIterator,
HashMap<K, V, S>: Extend<I::Item>,
{
let (list, len) = super::helpers::collect(par_iter);
// Keys may be already present or show multiple times in the iterator.
// Reserve the entire length if the map is empty.
// Otherwise reserve half the length (rounded up), so the map
// will only resize twice in the worst case.
let reserve = if map.is_empty() { len } else { (len + 1) / 2 };
map.reserve(reserve);
for vec in list {
map.extend(vec);
}
}
#[cfg(test)]
mod test_par_map {
use alloc::vec::Vec;
use core::hash::{Hash, Hasher};
use core::sync::atomic::{AtomicUsize, Ordering};
use rayon::prelude::*;
use crate::hash_map::HashMap;
struct Dropable<'a> {
k: usize,
counter: &'a AtomicUsize,
}
impl Dropable<'_> {
fn new(k: usize, counter: &AtomicUsize) -> Dropable<'_> {
counter.fetch_add(1, Ordering::Relaxed);
Dropable { k, counter }
}
}
impl Drop for Dropable<'_> {
fn drop(&mut self) {
self.counter.fetch_sub(1, Ordering::Relaxed);
}
}
impl Clone for Dropable<'_> {
fn clone(&self) -> Self {
Dropable::new(self.k, self.counter)
}
}
impl Hash for Dropable<'_> {
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
self.k.hash(state)
}
}
impl PartialEq for Dropable<'_> {
fn eq(&self, other: &Self) -> bool {
self.k == other.k
}
}
impl Eq for Dropable<'_> {}
#[test]
fn test_into_iter_drops() {
let key = AtomicUsize::new(0);
let value = AtomicUsize::new(0);
let hm = {
let mut hm = HashMap::new();
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
for i in 0..100 {
let d1 = Dropable::new(i, &key);
let d2 = Dropable::new(i + 100, &value);
hm.insert(d1, d2);
}
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
hm
};
// By the way, ensure that cloning doesn't screw up the dropping.
drop(hm.clone());
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// Ensure that dropping the iterator does not leak anything.
drop(hm.clone().into_par_iter());
{
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// retain only half
let _v: Vec<_> = hm
.into_par_iter()
.filter(|&(ref key, _)| key.k < 50)
.collect();
assert_eq!(key.load(Ordering::Relaxed), 50);
assert_eq!(value.load(Ordering::Relaxed), 50);
};
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
}
#[test]
fn test_drain_drops() {
let key = AtomicUsize::new(0);
let value = AtomicUsize::new(0);
let mut hm = {
let mut hm = HashMap::new();
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
for i in 0..100 {
let d1 = Dropable::new(i, &key);
let d2 = Dropable::new(i + 100, &value);
hm.insert(d1, d2);
}
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
hm
};
// By the way, ensure that cloning doesn't screw up the dropping.
drop(hm.clone());
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// Ensure that dropping the drain iterator does not leak anything.
drop(hm.clone().par_drain());
{
assert_eq!(key.load(Ordering::Relaxed), 100);
assert_eq!(value.load(Ordering::Relaxed), 100);
// retain only half
let _v: Vec<_> = hm.drain().filter(|&(ref key, _)| key.k < 50).collect();
assert!(hm.is_empty());
assert_eq!(key.load(Ordering::Relaxed), 50);
assert_eq!(value.load(Ordering::Relaxed), 50);
};
assert_eq!(key.load(Ordering::Relaxed), 0);
assert_eq!(value.load(Ordering::Relaxed), 0);
}
#[test]
fn test_empty_iter() {
let mut m: HashMap<isize, bool> = HashMap::new();
assert_eq!(m.par_drain().count(), 0);
assert_eq!(m.par_keys().count(), 0);
assert_eq!(m.par_values().count(), 0);
assert_eq!(m.par_values_mut().count(), 0);
assert_eq!(m.par_iter().count(), 0);
assert_eq!(m.par_iter_mut().count(), 0);
assert_eq!(m.len(), 0);
assert!(m.is_empty());
assert_eq!(m.into_par_iter().count(), 0);
}
#[test]
fn test_iterate() {
let mut m = HashMap::with_capacity(4);
for i in 0..32 {
assert!(m.insert(i, i * 2).is_none());
}
assert_eq!(m.len(), 32);
let observed = AtomicUsize::new(0);
m.par_iter().for_each(|(k, v)| {
assert_eq!(*v, *k * 2);
observed.fetch_or(1 << *k, Ordering::Relaxed);
});
assert_eq!(observed.into_inner(), 0xFFFF_FFFF);
}
#[test]
fn test_keys() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = vec.into_par_iter().collect();
let keys: Vec<_> = map.par_keys().cloned().collect();
assert_eq!(keys.len(), 3);
assert!(keys.contains(&1));
assert!(keys.contains(&2));
assert!(keys.contains(&3));
}
#[test]
fn test_values() {
let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
let map: HashMap<_, _> = vec.into_par_iter().collect();
let values: Vec<_> = map.par_values().cloned().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&'a'));
assert!(values.contains(&'b'));
assert!(values.contains(&'c'));
}
#[test]
fn test_values_mut() {
let vec = vec![(1, 1), (2, 2), (3, 3)];
let mut map: HashMap<_, _> = vec.into_par_iter().collect();
map.par_values_mut().for_each(|value| *value = (*value) * 2);
let values: Vec<_> = map.par_values().cloned().collect();
assert_eq!(values.len(), 3);
assert!(values.contains(&2));
assert!(values.contains(&4));
assert!(values.contains(&6));
}
#[test]
fn test_eq() {
let mut m1 = HashMap::new();
m1.insert(1, 2);
m1.insert(2, 3);
m1.insert(3, 4);
let mut m2 = HashMap::new();
m2.insert(1, 2);
m2.insert(2, 3);
assert!(!m1.par_eq(&m2));
m2.insert(3, 4);
assert!(m1.par_eq(&m2));
}
#[test]
fn test_from_iter() {
let xs = [(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6)];
let map: HashMap<_, _> = xs.par_iter().cloned().collect();
for &(k, v) in &xs {
assert_eq!(map.get(&k), Some(&v));
}
}
#[test]
fn test_extend_ref() {
let mut a = HashMap::new();
a.insert(1, "one");
let mut b = HashMap::new();
b.insert(2, "two");
b.insert(3, "three");
a.par_extend(&b);
assert_eq!(a.len(), 3);
assert_eq!(a[&1], "one");
assert_eq!(a[&2], "two");
assert_eq!(a[&3], "three");
}
}

Просмотреть файл

@ -1,4 +0,0 @@
mod helpers;
pub(crate) mod map;
pub(crate) mod raw;
pub(crate) mod set;

Просмотреть файл

@ -1,193 +0,0 @@
use crate::raw::Bucket;
use crate::raw::{RawIterRange, RawTable};
use crate::scopeguard::guard;
use alloc::alloc::dealloc;
use core::marker::PhantomData;
use core::mem;
use core::ptr::NonNull;
use rayon::iter::{
plumbing::{self, Folder, UnindexedConsumer, UnindexedProducer},
ParallelIterator,
};
/// Parallel iterator which returns a raw pointer to every full bucket in the table.
pub struct RawParIter<T> {
iter: RawIterRange<T>,
}
impl<T> ParallelIterator for RawParIter<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let producer = ParIterProducer { iter: self.iter };
plumbing::bridge_unindexed(producer, consumer)
}
}
/// Producer which returns a `Bucket<T>` for every element.
struct ParIterProducer<T> {
iter: RawIterRange<T>,
}
impl<T> UnindexedProducer for ParIterProducer<T> {
type Item = Bucket<T>;
#[cfg_attr(feature = "inline-more", inline)]
fn split(self) -> (Self, Option<Self>) {
let (left, right) = self.iter.split();
let left = ParIterProducer { iter: left };
let right = right.map(|right| ParIterProducer { iter: right });
(left, right)
}
#[cfg_attr(feature = "inline-more", inline)]
fn fold_with<F>(self, folder: F) -> F
where
F: Folder<Self::Item>,
{
folder.consume_iter(self.iter)
}
}
/// Parallel iterator which consumes a table and returns elements.
pub struct RawIntoParIter<T> {
table: RawTable<T>,
}
impl<T: Send> ParallelIterator for RawIntoParIter<T> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let iter = unsafe { self.table.iter().iter };
let _guard = guard(self.table.into_alloc(), |alloc| {
if let Some((ptr, layout)) = *alloc {
unsafe {
dealloc(ptr.as_ptr(), layout);
}
}
});
let producer = ParDrainProducer { iter };
plumbing::bridge_unindexed(producer, consumer)
}
}
/// Parallel iterator which consumes elements without freeing the table storage.
pub struct RawParDrain<'a, T> {
// We don't use a &'a mut RawTable<T> because we want RawParDrain to be
// covariant over T.
table: NonNull<RawTable<T>>,
marker: PhantomData<&'a RawTable<T>>,
}
unsafe impl<T> Send for RawParDrain<'_, T> {}
impl<T: Send> ParallelIterator for RawParDrain<'_, T> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
let _guard = guard(self.table, |table| unsafe {
table.as_mut().clear_no_drop()
});
let iter = unsafe { self.table.as_ref().iter().iter };
mem::forget(self);
let producer = ParDrainProducer { iter };
plumbing::bridge_unindexed(producer, consumer)
}
}
impl<T> Drop for RawParDrain<'_, T> {
fn drop(&mut self) {
// If drive_unindexed is not called then simply clear the table.
unsafe { self.table.as_mut().clear() }
}
}
/// Producer which will consume all elements in the range, even if it is dropped
/// halfway through.
struct ParDrainProducer<T> {
iter: RawIterRange<T>,
}
impl<T: Send> UnindexedProducer for ParDrainProducer<T> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
fn split(self) -> (Self, Option<Self>) {
let (left, right) = self.iter.clone().split();
mem::forget(self);
let left = ParDrainProducer { iter: left };
let right = right.map(|right| ParDrainProducer { iter: right });
(left, right)
}
#[cfg_attr(feature = "inline-more", inline)]
fn fold_with<F>(mut self, mut folder: F) -> F
where
F: Folder<Self::Item>,
{
// Make sure to modify the iterator in-place so that any remaining
// elements are processed in our Drop impl.
while let Some(item) = self.iter.next() {
folder = folder.consume(unsafe { item.read() });
if folder.full() {
return folder;
}
}
// If we processed all elements then we don't need to run the drop.
mem::forget(self);
folder
}
}
impl<T> Drop for ParDrainProducer<T> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
// Drop all remaining elements
if mem::needs_drop::<T>() {
while let Some(item) = self.iter.next() {
unsafe {
item.drop();
}
}
}
}
}
impl<T> RawTable<T> {
/// Returns a parallel iterator over the elements in a `RawTable`.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_iter(&self) -> RawParIter<T> {
RawParIter {
iter: unsafe { self.iter().iter },
}
}
/// Returns a parallel iterator over the elements in a `RawTable`.
#[cfg_attr(feature = "inline-more", inline)]
pub fn into_par_iter(self) -> RawIntoParIter<T> {
RawIntoParIter { table: self }
}
/// Returns a parallel iterator which consumes all elements of a `RawTable`
/// without freeing its memory allocation.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_drain(&mut self) -> RawParDrain<'_, T> {
RawParDrain {
table: NonNull::from(self),
marker: PhantomData,
}
}
}

Просмотреть файл

@ -1,646 +0,0 @@
//! Rayon extensions for `HashSet`.
use crate::hash_set::HashSet;
use core::hash::{BuildHasher, Hash};
use rayon::iter::plumbing::UnindexedConsumer;
use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator};
/// Parallel iterator over elements of a consumed set.
///
/// This iterator is created by the [`into_par_iter`] method on [`HashSet`]
/// (provided by the [`IntoParallelIterator`] trait).
/// See its documentation for more.
///
/// [`into_par_iter`]: /hashbrown/struct.HashSet.html#method.into_par_iter
/// [`HashSet`]: /hashbrown/struct.HashSet.html
/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html
pub struct IntoParIter<T, S> {
set: HashSet<T, S>,
}
impl<T: Send, S: Send> ParallelIterator for IntoParIter<T, S> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.set
.map
.into_par_iter()
.map(|(k, _)| k)
.drive_unindexed(consumer)
}
}
/// Parallel draining iterator over entries of a set.
///
/// This iterator is created by the [`par_drain`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_drain`]: /hashbrown/struct.HashSet.html#method.par_drain
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParDrain<'a, T, S> {
set: &'a mut HashSet<T, S>,
}
impl<T: Send, S: Send> ParallelIterator for ParDrain<'_, T, S> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.set
.map
.par_drain()
.map(|(k, _)| k)
.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in a set.
///
/// This iterator is created by the [`par_iter`] method on [`HashSet`]
/// (provided by the [`IntoParallelRefIterator`] trait).
/// See its documentation for more.
///
/// [`par_iter`]: /hashbrown/struct.HashSet.html#method.par_iter
/// [`HashSet`]: /hashbrown/struct.HashSet.html
/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html
pub struct ParIter<'a, T, S> {
set: &'a HashSet<T, S>,
}
impl<'a, T: Sync, S: Sync> ParallelIterator for ParIter<'a, T, S> {
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.set.map.par_keys().drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the difference of
/// sets.
///
/// This iterator is created by the [`par_difference`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_difference`]: /hashbrown/struct.HashSet.html#method.par_difference
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParDifference<'a, T, S> {
a: &'a HashSet<T, S>,
b: &'a HashSet<T, S>,
}
impl<'a, T, S> ParallelIterator for ParDifference<'a, T, S>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.a
.into_par_iter()
.filter(|&x| !self.b.contains(x))
.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the symmetric
/// difference of sets.
///
/// This iterator is created by the [`par_symmetric_difference`] method on
/// [`HashSet`].
/// See its documentation for more.
///
/// [`par_symmetric_difference`]: /hashbrown/struct.HashSet.html#method.par_symmetric_difference
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParSymmetricDifference<'a, T, S> {
a: &'a HashSet<T, S>,
b: &'a HashSet<T, S>,
}
impl<'a, T, S> ParallelIterator for ParSymmetricDifference<'a, T, S>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.a
.par_difference(self.b)
.chain(self.b.par_difference(self.a))
.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the intersection of
/// sets.
///
/// This iterator is created by the [`par_intersection`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_intersection`]: /hashbrown/struct.HashSet.html#method.par_intersection
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParIntersection<'a, T, S> {
a: &'a HashSet<T, S>,
b: &'a HashSet<T, S>,
}
impl<'a, T, S> ParallelIterator for ParIntersection<'a, T, S>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.a
.into_par_iter()
.filter(|&x| self.b.contains(x))
.drive_unindexed(consumer)
}
}
/// Parallel iterator over shared references to elements in the union of sets.
///
/// This iterator is created by the [`par_union`] method on [`HashSet`].
/// See its documentation for more.
///
/// [`par_union`]: /hashbrown/struct.HashSet.html#method.par_union
/// [`HashSet`]: /hashbrown/struct.HashSet.html
pub struct ParUnion<'a, T, S> {
a: &'a HashSet<T, S>,
b: &'a HashSet<T, S>,
}
impl<'a, T, S> ParallelIterator for ParUnion<'a, T, S>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
{
type Item = &'a T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
where
C: UnindexedConsumer<Self::Item>,
{
self.a
.into_par_iter()
.chain(self.b.par_difference(self.a))
.drive_unindexed(consumer)
}
}
impl<T, S> HashSet<T, S>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
{
/// Visits (potentially in parallel) the values representing the difference,
/// i.e. the values that are in `self` but not in `other`.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_difference<'a>(&'a self, other: &'a Self) -> ParDifference<'a, T, S> {
ParDifference { a: self, b: other }
}
/// Visits (potentially in parallel) the values representing the symmetric
/// difference, i.e. the values that are in `self` or in `other` but not in both.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_symmetric_difference<'a>(
&'a self,
other: &'a Self,
) -> ParSymmetricDifference<'a, T, S> {
ParSymmetricDifference { a: self, b: other }
}
/// Visits (potentially in parallel) the values representing the
/// intersection, i.e. the values that are both in `self` and `other`.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_intersection<'a>(&'a self, other: &'a Self) -> ParIntersection<'a, T, S> {
ParIntersection { a: self, b: other }
}
/// Visits (potentially in parallel) the values representing the union,
/// i.e. all the values in `self` or `other`, without duplicates.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_union<'a>(&'a self, other: &'a Self) -> ParUnion<'a, T, S> {
ParUnion { a: self, b: other }
}
/// Returns `true` if `self` has no elements in common with `other`.
/// This is equivalent to checking for an empty intersection.
///
/// This method runs in a potentially parallel fashion.
pub fn par_is_disjoint(&self, other: &Self) -> bool {
self.into_par_iter().all(|x| !other.contains(x))
}
/// Returns `true` if the set is a subset of another,
/// i.e. `other` contains at least all the values in `self`.
///
/// This method runs in a potentially parallel fashion.
pub fn par_is_subset(&self, other: &Self) -> bool {
if self.len() <= other.len() {
self.into_par_iter().all(|x| other.contains(x))
} else {
false
}
}
/// Returns `true` if the set is a superset of another,
/// i.e. `self` contains at least all the values in `other`.
///
/// This method runs in a potentially parallel fashion.
pub fn par_is_superset(&self, other: &Self) -> bool {
other.par_is_subset(self)
}
/// Returns `true` if the set is equal to another,
/// i.e. both sets contain the same values.
///
/// This method runs in a potentially parallel fashion.
pub fn par_eq(&self, other: &Self) -> bool {
self.len() == other.len() && self.par_is_subset(other)
}
}
impl<T, S> HashSet<T, S>
where
T: Eq + Hash + Send,
S: BuildHasher + Send,
{
/// Consumes (potentially in parallel) all values in an arbitrary order,
/// while preserving the set's allocated memory for reuse.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_drain(&mut self) -> ParDrain<'_, T, S> {
ParDrain { set: self }
}
}
impl<T: Send, S: Send> IntoParallelIterator for HashSet<T, S> {
type Item = T;
type Iter = IntoParIter<T, S>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
IntoParIter { set: self }
}
}
impl<'a, T: Sync, S: Sync> IntoParallelIterator for &'a HashSet<T, S> {
type Item = &'a T;
type Iter = ParIter<'a, T, S>;
#[cfg_attr(feature = "inline-more", inline)]
fn into_par_iter(self) -> Self::Iter {
ParIter { set: self }
}
}
/// Collect values from a parallel iterator into a hashset.
impl<T, S> FromParallelIterator<T> for HashSet<T, S>
where
T: Eq + Hash + Send,
S: BuildHasher + Default,
{
fn from_par_iter<P>(par_iter: P) -> Self
where
P: IntoParallelIterator<Item = T>,
{
let mut set = HashSet::default();
set.par_extend(par_iter);
set
}
}
/// Extend a hash set with items from a parallel iterator.
impl<T, S> ParallelExtend<T> for HashSet<T, S>
where
T: Eq + Hash + Send,
S: BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = T>,
{
extend(self, par_iter);
}
}
/// Extend a hash set with copied items from a parallel iterator.
impl<'a, T, S> ParallelExtend<&'a T> for HashSet<T, S>
where
T: 'a + Copy + Eq + Hash + Sync,
S: BuildHasher,
{
fn par_extend<I>(&mut self, par_iter: I)
where
I: IntoParallelIterator<Item = &'a T>,
{
extend(self, par_iter);
}
}
// This is equal to the normal `HashSet` -- no custom advantage.
fn extend<T, S, I>(set: &mut HashSet<T, S>, par_iter: I)
where
T: Eq + Hash,
S: BuildHasher,
I: IntoParallelIterator,
HashSet<T, S>: Extend<I::Item>,
{
let (list, len) = super::helpers::collect(par_iter);
// Values may be already present or show multiple times in the iterator.
// Reserve the entire length if the set is empty.
// Otherwise reserve half the length (rounded up), so the set
// will only resize twice in the worst case.
let reserve = if set.is_empty() { len } else { (len + 1) / 2 };
set.reserve(reserve);
for vec in list {
set.extend(vec);
}
}
#[cfg(test)]
mod test_par_set {
use alloc::vec::Vec;
use core::sync::atomic::{AtomicUsize, Ordering};
use rayon::prelude::*;
use crate::hash_set::HashSet;
#[test]
fn test_disjoint() {
let mut xs = HashSet::new();
let mut ys = HashSet::new();
assert!(xs.par_is_disjoint(&ys));
assert!(ys.par_is_disjoint(&xs));
assert!(xs.insert(5));
assert!(ys.insert(11));
assert!(xs.par_is_disjoint(&ys));
assert!(ys.par_is_disjoint(&xs));
assert!(xs.insert(7));
assert!(xs.insert(19));
assert!(xs.insert(4));
assert!(ys.insert(2));
assert!(ys.insert(-11));
assert!(xs.par_is_disjoint(&ys));
assert!(ys.par_is_disjoint(&xs));
assert!(ys.insert(7));
assert!(!xs.par_is_disjoint(&ys));
assert!(!ys.par_is_disjoint(&xs));
}
#[test]
fn test_subset_and_superset() {
let mut a = HashSet::new();
assert!(a.insert(0));
assert!(a.insert(5));
assert!(a.insert(11));
assert!(a.insert(7));
let mut b = HashSet::new();
assert!(b.insert(0));
assert!(b.insert(7));
assert!(b.insert(19));
assert!(b.insert(250));
assert!(b.insert(11));
assert!(b.insert(200));
assert!(!a.par_is_subset(&b));
assert!(!a.par_is_superset(&b));
assert!(!b.par_is_subset(&a));
assert!(!b.par_is_superset(&a));
assert!(b.insert(5));
assert!(a.par_is_subset(&b));
assert!(!a.par_is_superset(&b));
assert!(!b.par_is_subset(&a));
assert!(b.par_is_superset(&a));
}
#[test]
fn test_iterate() {
let mut a = HashSet::new();
for i in 0..32 {
assert!(a.insert(i));
}
let observed = AtomicUsize::new(0);
a.par_iter().for_each(|k| {
observed.fetch_or(1 << *k, Ordering::Relaxed);
});
assert_eq!(observed.into_inner(), 0xFFFF_FFFF);
}
#[test]
fn test_intersection() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(11));
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(77));
assert!(a.insert(103));
assert!(a.insert(5));
assert!(a.insert(-5));
assert!(b.insert(2));
assert!(b.insert(11));
assert!(b.insert(77));
assert!(b.insert(-9));
assert!(b.insert(-42));
assert!(b.insert(5));
assert!(b.insert(3));
let expected = [3, 5, 11, 77];
let i = a
.par_intersection(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_difference() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(5));
assert!(a.insert(9));
assert!(a.insert(11));
assert!(b.insert(3));
assert!(b.insert(9));
let expected = [1, 5, 11];
let i = a
.par_difference(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_symmetric_difference() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(5));
assert!(a.insert(9));
assert!(a.insert(11));
assert!(b.insert(-2));
assert!(b.insert(3));
assert!(b.insert(9));
assert!(b.insert(14));
assert!(b.insert(22));
let expected = [-2, 1, 5, 11, 14, 22];
let i = a
.par_symmetric_difference(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_union() {
let mut a = HashSet::new();
let mut b = HashSet::new();
assert!(a.insert(1));
assert!(a.insert(3));
assert!(a.insert(5));
assert!(a.insert(9));
assert!(a.insert(11));
assert!(a.insert(16));
assert!(a.insert(19));
assert!(a.insert(24));
assert!(b.insert(-2));
assert!(b.insert(1));
assert!(b.insert(5));
assert!(b.insert(9));
assert!(b.insert(13));
assert!(b.insert(19));
let expected = [-2, 1, 3, 5, 9, 11, 13, 16, 19, 24];
let i = a
.par_union(&b)
.map(|x| {
assert!(expected.contains(x));
1
})
.sum::<usize>();
assert_eq!(i, expected.len());
}
#[test]
fn test_from_iter() {
let xs = [1, 2, 3, 4, 5, 6, 7, 8, 9];
let set: HashSet<_> = xs.par_iter().cloned().collect();
for x in &xs {
assert!(set.contains(x));
}
}
#[test]
fn test_move_iter() {
let hs = {
let mut hs = HashSet::new();
hs.insert('a');
hs.insert('b');
hs
};
let v = hs.into_par_iter().collect::<Vec<char>>();
assert!(v == ['a', 'b'] || v == ['b', 'a']);
}
#[test]
fn test_eq() {
// These constants once happened to expose a bug in insert().
// I'm keeping them around to prevent a regression.
let mut s1 = HashSet::new();
s1.insert(1);
s1.insert(2);
s1.insert(3);
let mut s2 = HashSet::new();
s2.insert(1);
s2.insert(2);
assert!(!s1.par_eq(&s2));
s2.insert(3);
assert!(s1.par_eq(&s2));
}
#[test]
fn test_extend_ref() {
let mut a = HashSet::new();
a.insert(1);
a.par_extend(&[2, 3, 4][..]);
assert_eq!(a.len(), 4);
assert!(a.contains(&1));
assert!(a.contains(&2));
assert!(a.contains(&3));
assert!(a.contains(&4));
let mut b = HashSet::new();
b.insert(5);
b.insert(6);
a.par_extend(&b);
assert_eq!(a.len(), 6);
assert!(a.contains(&1));
assert!(a.contains(&2));
assert!(a.contains(&3));
assert!(a.contains(&4));
assert!(a.contains(&5));
assert!(a.contains(&6));
}
}

Просмотреть файл

@ -1,200 +0,0 @@
mod size_hint {
use core::cmp;
/// This presumably exists to prevent denial of service attacks.
///
/// Original discussion: https://github.com/serde-rs/serde/issues/1114.
#[cfg_attr(feature = "inline-more", inline)]
pub(super) fn cautious(hint: Option<usize>) -> usize {
cmp::min(hint.unwrap_or(0), 4096)
}
}
mod map {
use core::fmt;
use core::hash::{BuildHasher, Hash};
use core::marker::PhantomData;
use serde::de::{Deserialize, Deserializer, MapAccess, Visitor};
use serde::ser::{Serialize, Serializer};
use crate::hash_map::HashMap;
use super::size_hint;
impl<K, V, H> Serialize for HashMap<K, V, H>
where
K: Serialize + Eq + Hash,
V: Serialize,
H: BuildHasher,
{
#[cfg_attr(feature = "inline-more", inline)]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.collect_map(self)
}
}
impl<'de, K, V, S> Deserialize<'de> for HashMap<K, V, S>
where
K: Deserialize<'de> + Eq + Hash,
V: Deserialize<'de>,
S: BuildHasher + Default,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct MapVisitor<K, V, S> {
marker: PhantomData<HashMap<K, V, S>>,
}
impl<'de, K, V, S> Visitor<'de> for MapVisitor<K, V, S>
where
K: Deserialize<'de> + Eq + Hash,
V: Deserialize<'de>,
S: BuildHasher + Default,
{
type Value = HashMap<K, V, S>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
#[cfg_attr(feature = "inline-more", inline)]
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
where
A: MapAccess<'de>,
{
let mut values = HashMap::with_capacity_and_hasher(
size_hint::cautious(map.size_hint()),
S::default(),
);
while let Some((key, value)) = map.next_entry()? {
values.insert(key, value);
}
Ok(values)
}
}
let visitor = MapVisitor {
marker: PhantomData,
};
deserializer.deserialize_map(visitor)
}
}
}
mod set {
use core::fmt;
use core::hash::{BuildHasher, Hash};
use core::marker::PhantomData;
use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor};
use serde::ser::{Serialize, Serializer};
use crate::hash_set::HashSet;
use super::size_hint;
impl<T, H> Serialize for HashSet<T, H>
where
T: Serialize + Eq + Hash,
H: BuildHasher,
{
#[cfg_attr(feature = "inline-more", inline)]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.collect_seq(self)
}
}
impl<'de, T, S> Deserialize<'de> for HashSet<T, S>
where
T: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Default,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct SeqVisitor<T, S> {
marker: PhantomData<HashSet<T, S>>,
}
impl<'de, T, S> Visitor<'de> for SeqVisitor<T, S>
where
T: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Default,
{
type Value = HashSet<T, S>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
}
#[cfg_attr(feature = "inline-more", inline)]
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
let mut values = HashSet::with_capacity_and_hasher(
size_hint::cautious(seq.size_hint()),
S::default(),
);
while let Some(value) = seq.next_element()? {
values.insert(value);
}
Ok(values)
}
}
let visitor = SeqVisitor {
marker: PhantomData,
};
deserializer.deserialize_seq(visitor)
}
fn deserialize_in_place<D>(deserializer: D, place: &mut Self) -> Result<(), D::Error>
where
D: Deserializer<'de>,
{
struct SeqInPlaceVisitor<'a, T, S>(&'a mut HashSet<T, S>);
impl<'a, 'de, T, S> Visitor<'de> for SeqInPlaceVisitor<'a, T, S>
where
T: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Default,
{
type Value = ();
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
}
#[cfg_attr(feature = "inline-more", inline)]
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: SeqAccess<'de>,
{
self.0.clear();
self.0.reserve(size_hint::cautious(seq.size_hint()));
while let Some(value) = seq.next_element()? {
self.0.insert(value);
}
Ok(())
}
}
deserializer.deserialize_seq(SeqInPlaceVisitor(place))
}
}
}

121
third_party/rust/hashbrown-0.7.2/src/lib.rs поставляемый
Просмотреть файл

@ -1,121 +0,0 @@
//! This crate is a Rust port of Google's high-performance [SwissTable] hash
//! map, adapted to make it a drop-in replacement for Rust's standard `HashMap`
//! and `HashSet` types.
//!
//! The original C++ version of [SwissTable] can be found [here], and this
//! [CppCon talk] gives an overview of how the algorithm works.
//!
//! [SwissTable]: https://abseil.io/blog/20180927-swisstables
//! [here]: https://github.com/abseil/abseil-cpp/blob/master/absl/container/internal/raw_hash_set.h
//! [CppCon talk]: https://www.youtube.com/watch?v=ncHmEUmJZf4
#![no_std]
#![cfg_attr(
feature = "nightly",
feature(
alloc_layout_extra,
allocator_api,
ptr_offset_from,
test,
core_intrinsics,
dropck_eyepatch,
specialization,
)
)]
#![allow(
clippy::doc_markdown,
clippy::module_name_repetitions,
clippy::must_use_candidate
)]
#![warn(missing_docs)]
#![warn(rust_2018_idioms)]
#[cfg(test)]
#[macro_use]
extern crate std;
#[cfg(has_extern_crate_alloc)]
#[cfg_attr(test, macro_use)]
extern crate alloc;
#[cfg(not(has_extern_crate_alloc))]
extern crate std as alloc;
#[cfg(feature = "nightly")]
#[cfg(doctest)]
doc_comment::doctest!("../README.md");
#[macro_use]
mod macros;
#[cfg(feature = "raw")]
/// Experimental and unsafe `RawTable` API. This module is only available if the
/// `raw` feature is enabled.
pub mod raw {
// The RawTable API is still experimental and is not properly documented yet.
#[allow(missing_docs)]
#[path = "mod.rs"]
mod inner;
pub use inner::*;
#[cfg(feature = "rayon")]
pub mod rayon {
pub use crate::external_trait_impls::rayon::raw::*;
}
}
#[cfg(not(feature = "raw"))]
mod raw;
mod external_trait_impls;
mod map;
#[cfg(feature = "rustc-internal-api")]
mod rustc_entry;
mod scopeguard;
mod set;
pub mod hash_map {
//! A hash map implemented with quadratic probing and SIMD lookup.
pub use crate::map::*;
#[cfg(feature = "rustc-internal-api")]
pub use crate::rustc_entry::*;
#[cfg(feature = "rayon")]
/// [rayon]-based parallel iterator types for hash maps.
/// You will rarely need to interact with it directly unless you have need
/// to name one of the iterator types.
///
/// [rayon]: https://docs.rs/rayon/1.0/rayon
pub mod rayon {
pub use crate::external_trait_impls::rayon::map::*;
}
}
pub mod hash_set {
//! A hash set implemented as a `HashMap` where the value is `()`.
pub use crate::set::*;
#[cfg(feature = "rayon")]
/// [rayon]-based parallel iterator types for hash sets.
/// You will rarely need to interact with it directly unless you have need
/// to name one of the iterator types.
///
/// [rayon]: https://docs.rs/rayon/1.0/rayon
pub mod rayon {
pub use crate::external_trait_impls::rayon::set::*;
}
}
pub use crate::map::HashMap;
pub use crate::set::HashSet;
/// Augments `AllocErr` with a `CapacityOverflow` variant.
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum CollectionAllocErr {
/// Error due to the computed capacity exceeding the collection's maximum
/// (usually `isize::MAX` bytes).
CapacityOverflow,
/// Error due to the allocator.
AllocErr {
/// The layout of the allocation request that failed.
layout: alloc::alloc::Layout,
},
}

Просмотреть файл

@ -1,69 +0,0 @@
// See the cfg-if crate.
macro_rules! cfg_if {
// match if/else chains with a final `else`
($(
if #[cfg($($meta:meta),*)] { $($it:item)* }
) else * else {
$($it2:item)*
}) => {
cfg_if! {
@__items
() ;
$( ( ($($meta),*) ($($it)*) ), )*
( () ($($it2)*) ),
}
};
// match if/else chains lacking a final `else`
(
if #[cfg($($i_met:meta),*)] { $($i_it:item)* }
$(
else if #[cfg($($e_met:meta),*)] { $($e_it:item)* }
)*
) => {
cfg_if! {
@__items
() ;
( ($($i_met),*) ($($i_it)*) ),
$( ( ($($e_met),*) ($($e_it)*) ), )*
( () () ),
}
};
// Internal and recursive macro to emit all the items
//
// Collects all the negated cfgs in a list at the beginning and after the
// semicolon is all the remaining items
(@__items ($($not:meta,)*) ; ) => {};
(@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => {
// Emit all items within one block, applying an approprate #[cfg]. The
// #[cfg] will require all `$m` matchers specified and must also negate
// all previous matchers.
cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* }
// Recurse to emit all other items in `$rest`, and when we do so add all
// our `$m` matchers to the list of `$not` matchers as future emissions
// will have to negate everything we just matched as well.
cfg_if! { @__items ($($not,)* $($m,)*) ; $($rest)* }
};
// Internal macro to Apply a cfg attribute to a list of items
(@__apply $m:meta, $($it:item)*) => {
$(#[$m] $it)*
};
}
// Helper macro for specialization. This also helps avoid parse errors if the
// default fn syntax for specialization changes in the future.
#[cfg(feature = "nightly")]
macro_rules! default_fn {
($($tt:tt)*) => {
default $($tt)*
}
}
#[cfg(not(feature = "nightly"))]
macro_rules! default_fn {
($($tt:tt)*) => {
$($tt)*
}
}

3862
third_party/rust/hashbrown-0.7.2/src/map.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,108 +0,0 @@
use super::imp::{BitMaskWord, BITMASK_MASK, BITMASK_STRIDE};
#[cfg(feature = "nightly")]
use core::intrinsics;
/// A bit mask which contains the result of a `Match` operation on a `Group` and
/// allows iterating through them.
///
/// The bit mask is arranged so that low-order bits represent lower memory
/// addresses for group match results.
///
/// For implementation reasons, the bits in the set may be sparsely packed, so
/// that there is only one bit-per-byte used (the high bit, 7). If this is the
/// case, `BITMASK_STRIDE` will be 8 to indicate a divide-by-8 should be
/// performed on counts/indices to normalize this difference. `BITMASK_MASK` is
/// similarly a mask of all the actually-used bits.
#[derive(Copy, Clone)]
pub struct BitMask(pub BitMaskWord);
#[allow(clippy::use_self)]
impl BitMask {
/// Returns a new `BitMask` with all bits inverted.
#[inline]
#[must_use]
pub fn invert(self) -> Self {
BitMask(self.0 ^ BITMASK_MASK)
}
/// Returns a new `BitMask` with the lowest bit removed.
#[inline]
#[must_use]
pub fn remove_lowest_bit(self) -> Self {
BitMask(self.0 & (self.0 - 1))
}
/// Returns whether the `BitMask` has at least one set bit.
#[inline]
pub fn any_bit_set(self) -> bool {
self.0 != 0
}
/// Returns the first set bit in the `BitMask`, if there is one.
#[inline]
pub fn lowest_set_bit(self) -> Option<usize> {
if self.0 == 0 {
None
} else {
Some(unsafe { self.lowest_set_bit_nonzero() })
}
}
/// Returns the first set bit in the `BitMask`, if there is one. The
/// bitmask must not be empty.
#[inline]
#[cfg(feature = "nightly")]
pub unsafe fn lowest_set_bit_nonzero(self) -> usize {
intrinsics::cttz_nonzero(self.0) as usize / BITMASK_STRIDE
}
#[inline]
#[cfg(not(feature = "nightly"))]
pub unsafe fn lowest_set_bit_nonzero(self) -> usize {
self.trailing_zeros()
}
/// Returns the number of trailing zeroes in the `BitMask`.
#[inline]
pub fn trailing_zeros(self) -> usize {
// ARM doesn't have a trailing_zeroes instruction, and instead uses
// reverse_bits (RBIT) + leading_zeroes (CLZ). However older ARM
// versions (pre-ARMv7) don't have RBIT and need to emulate it
// instead. Since we only have 1 bit set in each byte on ARM, we can
// use swap_bytes (REV) + leading_zeroes instead.
if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 {
self.0.swap_bytes().leading_zeros() as usize / BITMASK_STRIDE
} else {
self.0.trailing_zeros() as usize / BITMASK_STRIDE
}
}
/// Returns the number of leading zeroes in the `BitMask`.
#[inline]
pub fn leading_zeros(self) -> usize {
self.0.leading_zeros() as usize / BITMASK_STRIDE
}
}
impl IntoIterator for BitMask {
type Item = usize;
type IntoIter = BitMaskIter;
#[inline]
fn into_iter(self) -> BitMaskIter {
BitMaskIter(self)
}
}
/// Iterator over the contents of a `BitMask`, returning the indicies of set
/// bits.
pub struct BitMaskIter(BitMask);
impl Iterator for BitMaskIter {
type Item = usize;
#[inline]
fn next(&mut self) -> Option<usize> {
let bit = self.0.lowest_set_bit()?;
self.0 = self.0.remove_lowest_bit();
Some(bit)
}
}

Просмотреть файл

@ -1,151 +0,0 @@
use super::bitmask::BitMask;
use super::EMPTY;
use core::{mem, ptr};
// Use the native word size as the group size. Using a 64-bit group size on
// a 32-bit architecture will just end up being more expensive because
// shifts and multiplies will need to be emulated.
#[cfg(any(
target_pointer_width = "64",
target_arch = "aarch64",
target_arch = "x86_64",
))]
type GroupWord = u64;
#[cfg(all(
target_pointer_width = "32",
not(target_arch = "aarch64"),
not(target_arch = "x86_64"),
))]
type GroupWord = u32;
pub type BitMaskWord = GroupWord;
pub const BITMASK_STRIDE: usize = 8;
// We only care about the highest bit of each byte for the mask.
#[allow(clippy::cast_possible_truncation, clippy::unnecessary_cast)]
pub const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord;
/// Helper function to replicate a byte across a `GroupWord`.
#[inline]
fn repeat(byte: u8) -> GroupWord {
GroupWord::from_ne_bytes([byte; Group::WIDTH])
}
/// Abstraction over a group of control bytes which can be scanned in
/// parallel.
///
/// This implementation uses a word-sized integer.
#[derive(Copy, Clone)]
pub struct Group(GroupWord);
// We perform all operations in the native endianess, and convert to
// little-endian just before creating a BitMask. The can potentially
// enable the compiler to eliminate unnecessary byte swaps if we are
// only checking whether a BitMask is empty.
#[allow(clippy::use_self)]
impl Group {
/// Number of bytes in the group.
pub const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty bytes, suitable for use as the initial
/// value for an empty hash table. This value is explicitly declared as
/// a static variable to ensure the address is consistent across dylibs.
///
/// This is guaranteed to be aligned to the group size.
#[inline]
pub fn static_empty() -> &'static [u8] {
union AlignedBytes {
_align: Group,
bytes: [u8; Group::WIDTH],
};
static ALIGNED_BYTES: AlignedBytes = AlignedBytes {
bytes: [EMPTY; Group::WIDTH],
};
unsafe { &ALIGNED_BYTES.bytes }
}
/// Loads a group of bytes starting at the given address.
#[inline]
#[allow(clippy::cast_ptr_alignment)] // unaligned load
pub unsafe fn load(ptr: *const u8) -> Self {
Group(ptr::read_unaligned(ptr as *const _))
}
/// Loads a group of bytes starting at the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn load_aligned(ptr: *const u8) -> Self {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
Group(ptr::read(ptr as *const _))
}
/// Stores the group of bytes to the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn store_aligned(self, ptr: *mut u8) {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
ptr::write(ptr as *mut _, self.0);
}
/// Returns a `BitMask` indicating all bytes in the group which *may*
/// have the given value.
///
/// This function may return a false positive in certain cases where
/// the byte in the group differs from the searched value only in its
/// lowest bit. This is fine because:
/// - This never happens for `EMPTY` and `DELETED`, only full entries.
/// - The check for key equality will catch these.
/// - This only happens if there is at least 1 true match.
/// - The chance of this happening is very low (< 1% chance per byte).
#[inline]
pub fn match_byte(self, byte: u8) -> BitMask {
// This algorithm is derived from
// http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
let cmp = self.0 ^ repeat(byte);
BitMask((cmp.wrapping_sub(repeat(0x01)) & !cmp & repeat(0x80)).to_le())
}
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY`.
#[inline]
pub fn match_empty(self) -> BitMask {
// If the high bit is set, then the byte must be either:
// 1111_1111 (EMPTY) or 1000_0000 (DELETED).
// So we can just check if the top two bits are 1 by ANDing them.
BitMask((self.0 & (self.0 << 1) & repeat(0x80)).to_le())
}
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
pub fn match_empty_or_deleted(self) -> BitMask {
// A byte is EMPTY or DELETED iff the high bit is set
BitMask((self.0 & repeat(0x80)).to_le())
}
/// Returns a `BitMask` indicating all bytes in the group which are full.
#[inline]
pub fn match_full(self) -> BitMask {
self.match_empty_or_deleted().invert()
}
/// Performs the following transformation on all bytes in the group:
/// - `EMPTY => EMPTY`
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//
// Here's this logic expanded to concrete values:
// let full = 1000_0000 (true) or 0000_0000 (false)
// !1000_0000 + 1 = 0111_1111 + 1 = 1000_0000 (no carry)
// !0000_0000 + 0 = 1111_1111 + 0 = 1111_1111 (no carry)
let full = !self.0 & repeat(0x80);
Group(!full + (full >> 7))
}
}

1532
third_party/rust/hashbrown-0.7.2/src/raw/mod.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,143 +0,0 @@
use super::bitmask::BitMask;
use super::EMPTY;
use core::mem;
#[cfg(target_arch = "x86")]
use core::arch::x86;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64 as x86;
pub type BitMaskWord = u16;
pub const BITMASK_STRIDE: usize = 1;
pub const BITMASK_MASK: BitMaskWord = 0xffff;
/// Abstraction over a group of control bytes which can be scanned in
/// parallel.
///
/// This implementation uses a 128-bit SSE value.
#[derive(Copy, Clone)]
pub struct Group(x86::__m128i);
// FIXME: https://github.com/rust-lang/rust-clippy/issues/3859
#[allow(clippy::use_self)]
impl Group {
/// Number of bytes in the group.
pub const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty bytes, suitable for use as the initial
/// value for an empty hash table. This value is explicitly declared as
/// a static variable to ensure the address is consistent across dylibs.
///
/// This is guaranteed to be aligned to the group size.
pub fn static_empty() -> &'static [u8] {
union AlignedBytes {
_align: Group,
bytes: [u8; Group::WIDTH],
};
static ALIGNED_BYTES: AlignedBytes = AlignedBytes {
bytes: [EMPTY; Group::WIDTH],
};
unsafe { &ALIGNED_BYTES.bytes }
}
/// Loads a group of bytes starting at the given address.
#[inline]
#[allow(clippy::cast_ptr_alignment)] // unaligned load
pub unsafe fn load(ptr: *const u8) -> Self {
Group(x86::_mm_loadu_si128(ptr as *const _))
}
/// Loads a group of bytes starting at the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn load_aligned(ptr: *const u8) -> Self {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
Group(x86::_mm_load_si128(ptr as *const _))
}
/// Stores the group of bytes to the given address, which must be
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
pub unsafe fn store_aligned(self, ptr: *mut u8) {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
x86::_mm_store_si128(ptr as *mut _, self.0);
}
/// Returns a `BitMask` indicating all bytes in the group which have
/// the given value.
#[inline]
pub fn match_byte(self, byte: u8) -> BitMask {
#[allow(
clippy::cast_possible_wrap, // byte: u8 as i8
// byte: i32 as u16
// note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the
// upper 16-bits of the i32 are zeroed:
clippy::cast_sign_loss,
clippy::cast_possible_truncation
)]
unsafe {
let cmp = x86::_mm_cmpeq_epi8(self.0, x86::_mm_set1_epi8(byte as i8));
BitMask(x86::_mm_movemask_epi8(cmp) as u16)
}
}
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY`.
#[inline]
pub fn match_empty(self) -> BitMask {
self.match_byte(EMPTY)
}
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
pub fn match_empty_or_deleted(self) -> BitMask {
#[allow(
// byte: i32 as u16
// note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the
// upper 16-bits of the i32 are zeroed:
clippy::cast_sign_loss,
clippy::cast_possible_truncation
)]
unsafe {
// A byte is EMPTY or DELETED iff the high bit is set
BitMask(x86::_mm_movemask_epi8(self.0) as u16)
}
}
/// Returns a `BitMask` indicating all bytes in the group which are full.
#[inline]
pub fn match_full(&self) -> BitMask {
self.match_empty_or_deleted().invert()
}
/// Performs the following transformation on all bytes in the group:
/// - `EMPTY => EMPTY`
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//
// Here's this logic expanded to concrete values:
// let special = 0 > byte = 1111_1111 (true) or 0000_0000 (false)
// 1111_1111 | 1000_0000 = 1111_1111
// 0000_0000 | 1000_0000 = 1000_0000
#[allow(
clippy::cast_possible_wrap, // byte: 0x80_u8 as i8
)]
unsafe {
let zero = x86::_mm_setzero_si128();
let special = x86::_mm_cmpgt_epi8(zero, self.0);
Group(x86::_mm_or_si128(
special,
x86::_mm_set1_epi8(0x80_u8 as i8),
))
}
}
}

Просмотреть файл

@ -1,621 +0,0 @@
use self::RustcEntry::*;
use crate::map::{make_hash, Drain, HashMap, IntoIter, Iter, IterMut};
use crate::raw::{Bucket, RawTable};
use core::fmt::{self, Debug};
use core::hash::{BuildHasher, Hash};
use core::mem;
impl<K, V, S> HashMap<K, V, S>
where
K: Eq + Hash,
S: BuildHasher,
{
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut letters = HashMap::new();
///
/// for ch in "a short treatise on fungi".chars() {
/// let counter = letters.rustc_entry(ch).or_insert(0);
/// *counter += 1;
/// }
///
/// assert_eq!(letters[&'s'], 2);
/// assert_eq!(letters[&'t'], 3);
/// assert_eq!(letters[&'u'], 1);
/// assert_eq!(letters.get(&'y'), None);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn rustc_entry(&mut self, key: K) -> RustcEntry<'_, K, V> {
let hash = make_hash(&self.hash_builder, &key);
if let Some(elem) = self.table.find(hash, |q| q.0.eq(&key)) {
RustcEntry::Occupied(RustcOccupiedEntry {
key: Some(key),
elem,
table: &mut self.table,
})
} else {
// Ideally we would put this in VacantEntry::insert, but Entry is not
// generic over the BuildHasher and adding a generic parameter would be
// a breaking change.
self.reserve(1);
RustcEntry::Vacant(RustcVacantEntry {
hash,
key,
table: &mut self.table,
})
}
}
}
/// A view into a single entry in a map, which may either be vacant or occupied.
///
/// This `enum` is constructed from the [`entry`] method on [`HashMap`].
///
/// [`HashMap`]: struct.HashMap.html
/// [`entry`]: struct.HashMap.html#method.rustc_entry
pub enum RustcEntry<'a, K, V> {
/// An occupied entry.
Occupied(RustcOccupiedEntry<'a, K, V>),
/// A vacant entry.
Vacant(RustcVacantEntry<'a, K, V>),
}
impl<K: Debug, V: Debug> Debug for RustcEntry<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(),
Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(),
}
}
}
/// A view into an occupied entry in a `HashMap`.
/// It is part of the [`RustcEntry`] enum.
///
/// [`RustcEntry`]: enum.RustcEntry.html
pub struct RustcOccupiedEntry<'a, K, V> {
key: Option<K>,
elem: Bucket<(K, V)>,
table: &'a mut RawTable<(K, V)>,
}
unsafe impl<K, V> Send for RustcOccupiedEntry<'_, K, V>
where
K: Send,
V: Send,
{
}
unsafe impl<K, V> Sync for RustcOccupiedEntry<'_, K, V>
where
K: Sync,
V: Sync,
{
}
impl<K: Debug, V: Debug> Debug for RustcOccupiedEntry<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OccupiedEntry")
.field("key", self.key())
.field("value", self.get())
.finish()
}
}
/// A view into a vacant entry in a `HashMap`.
/// It is part of the [`RustcEntry`] enum.
///
/// [`RustcEntry`]: enum.RustcEntry.html
pub struct RustcVacantEntry<'a, K, V> {
hash: u64,
key: K,
table: &'a mut RawTable<(K, V)>,
}
impl<K: Debug, V> Debug for RustcVacantEntry<'_, K, V> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("VacantEntry").field(self.key()).finish()
}
}
impl<'a, K, V> RustcEntry<'a, K, V> {
/// Sets the value of the entry, and returns a RustcOccupiedEntry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// let entry = map.entry("horseyland").insert(37);
///
/// assert_eq!(entry.key(), &"horseyland");
/// ```
pub fn insert(self, value: V) -> RustcOccupiedEntry<'a, K, V> {
match self {
Vacant(entry) => entry.insert_entry(value),
Occupied(mut entry) => {
entry.insert(value);
entry
}
}
}
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// map.rustc_entry("poneyland").or_insert(3);
/// assert_eq!(map["poneyland"], 3);
///
/// *map.rustc_entry("poneyland").or_insert(10) *= 2;
/// assert_eq!(map["poneyland"], 6);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn or_insert(self, default: V) -> &'a mut V
where
K: Hash,
{
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default),
}
}
/// Ensures a value is in the entry by inserting the result of the default function if empty,
/// and returns a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, String> = HashMap::new();
/// let s = "hoho".to_string();
///
/// map.rustc_entry("poneyland").or_insert_with(|| s);
///
/// assert_eq!(map["poneyland"], "hoho".to_string());
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V
where
K: Hash,
{
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(default()),
}
}
/// Returns a reference to this entry's key.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland");
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn key(&self) -> &K {
match *self {
Occupied(ref entry) => entry.key(),
Vacant(ref entry) => entry.key(),
}
}
/// Provides in-place mutable access to an occupied entry before any
/// potential inserts into the map.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// map.rustc_entry("poneyland")
/// .and_modify(|e| { *e += 1 })
/// .or_insert(42);
/// assert_eq!(map["poneyland"], 42);
///
/// map.rustc_entry("poneyland")
/// .and_modify(|e| { *e += 1 })
/// .or_insert(42);
/// assert_eq!(map["poneyland"], 43);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn and_modify<F>(self, f: F) -> Self
where
F: FnOnce(&mut V),
{
match self {
Occupied(mut entry) => {
f(entry.get_mut());
Occupied(entry)
}
Vacant(entry) => Vacant(entry),
}
}
}
impl<'a, K, V: Default> RustcEntry<'a, K, V> {
/// Ensures a value is in the entry by inserting the default value if empty,
/// and returns a mutable reference to the value in the entry.
///
/// # Examples
///
/// ```
/// # fn main() {
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, Option<u32>> = HashMap::new();
/// map.rustc_entry("poneyland").or_default();
///
/// assert_eq!(map["poneyland"], None);
/// # }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn or_default(self) -> &'a mut V
where
K: Hash,
{
match self {
Occupied(entry) => entry.into_mut(),
Vacant(entry) => entry.insert(Default::default()),
}
}
}
impl<'a, K, V> RustcOccupiedEntry<'a, K, V> {
/// Gets a reference to the key in the entry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
/// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland");
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn key(&self) -> &K {
unsafe { &self.elem.as_ref().0 }
}
/// Take the ownership of the key and value from the map.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") {
/// // We delete the entry from the map.
/// o.remove_entry();
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(self) -> (K, V) {
unsafe {
self.table.erase_no_drop(&self.elem);
self.elem.read()
}
}
/// Gets a reference to the value in the entry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") {
/// assert_eq!(o.get(), &12);
/// }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn get(&self) -> &V {
unsafe { &self.elem.as_ref().1 }
}
/// Gets a mutable reference to the value in the entry.
///
/// If you need a reference to the `RustcOccupiedEntry` which may outlive the
/// destruction of the `RustcEntry` value, see [`into_mut`].
///
/// [`into_mut`]: #method.into_mut
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let RustcEntry::Occupied(mut o) = map.rustc_entry("poneyland") {
/// *o.get_mut() += 10;
/// assert_eq!(*o.get(), 22);
///
/// // We can use the same RustcEntry multiple times.
/// *o.get_mut() += 2;
/// }
///
/// assert_eq!(map["poneyland"], 24);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn get_mut(&mut self) -> &mut V {
unsafe { &mut self.elem.as_mut().1 }
}
/// Converts the RustcOccupiedEntry into a mutable reference to the value in the entry
/// with a lifetime bound to the map itself.
///
/// If you need multiple references to the `RustcOccupiedEntry`, see [`get_mut`].
///
/// [`get_mut`]: #method.get_mut
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// assert_eq!(map["poneyland"], 12);
/// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") {
/// *o.into_mut() += 10;
/// }
///
/// assert_eq!(map["poneyland"], 22);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn into_mut(self) -> &'a mut V {
unsafe { &mut self.elem.as_mut().1 }
}
/// Sets the value of the entry, and returns the entry's old value.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// if let RustcEntry::Occupied(mut o) = map.rustc_entry("poneyland") {
/// assert_eq!(o.insert(15), 12);
/// }
///
/// assert_eq!(map["poneyland"], 15);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(&mut self, mut value: V) -> V {
let old_value = self.get_mut();
mem::swap(&mut value, old_value);
value
}
/// Takes the value out of the entry, and returns it.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// map.rustc_entry("poneyland").or_insert(12);
///
/// if let RustcEntry::Occupied(o) = map.rustc_entry("poneyland") {
/// assert_eq!(o.remove(), 12);
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove(self) -> V {
self.remove_entry().1
}
/// Replaces the entry, returning the old key and value. The new key in the hash map will be
/// the key used to create this entry.
///
/// # Examples
///
/// ```
/// use hashbrown::hash_map::{RustcEntry, HashMap};
/// use std::rc::Rc;
///
/// let mut map: HashMap<Rc<String>, u32> = HashMap::new();
/// map.insert(Rc::new("Stringthing".to_string()), 15);
///
/// let my_key = Rc::new("Stringthing".to_string());
///
/// if let RustcEntry::Occupied(entry) = map.rustc_entry(my_key) {
/// // Also replace the key with a handle to our other key.
/// let (old_key, old_value): (Rc<String>, u32) = entry.replace_entry(16);
/// }
///
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn replace_entry(self, value: V) -> (K, V) {
let entry = unsafe { self.elem.as_mut() };
let old_key = mem::replace(&mut entry.0, self.key.unwrap());
let old_value = mem::replace(&mut entry.1, value);
(old_key, old_value)
}
/// Replaces the key in the hash map with the key used to create this entry.
///
/// # Examples
///
/// ```
/// use hashbrown::hash_map::{RustcEntry, HashMap};
/// use std::rc::Rc;
///
/// let mut map: HashMap<Rc<String>, u32> = HashMap::new();
/// let mut known_strings: Vec<Rc<String>> = Vec::new();
///
/// // Initialise known strings, run program, etc.
///
/// reclaim_memory(&mut map, &known_strings);
///
/// fn reclaim_memory(map: &mut HashMap<Rc<String>, u32>, known_strings: &[Rc<String>] ) {
/// for s in known_strings {
/// if let RustcEntry::Occupied(entry) = map.rustc_entry(s.clone()) {
/// // Replaces the entry's key with our version of it in `known_strings`.
/// entry.replace_key();
/// }
/// }
/// }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn replace_key(self) -> K {
let entry = unsafe { self.elem.as_mut() };
mem::replace(&mut entry.0, self.key.unwrap())
}
}
impl<'a, K, V> RustcVacantEntry<'a, K, V> {
/// Gets a reference to the key that would be used when inserting a value
/// through the `RustcVacantEntry`.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
/// assert_eq!(map.rustc_entry("poneyland").key(), &"poneyland");
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn key(&self) -> &K {
&self.key
}
/// Take ownership of the key.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// if let RustcEntry::Vacant(v) = map.rustc_entry("poneyland") {
/// v.into_key();
/// }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn into_key(self) -> K {
self.key
}
/// Sets the value of the entry with the RustcVacantEntry's key,
/// and returns a mutable reference to it.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// if let RustcEntry::Vacant(o) = map.rustc_entry("poneyland") {
/// o.insert(37);
/// }
/// assert_eq!(map["poneyland"], 37);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(self, value: V) -> &'a mut V {
let bucket = self.table.insert_no_grow(self.hash, (self.key, value));
unsafe { &mut bucket.as_mut().1 }
}
/// Sets the value of the entry with the RustcVacantEntry's key,
/// and returns a RustcOccupiedEntry.
///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
/// use hashbrown::hash_map::RustcEntry;
///
/// let mut map: HashMap<&str, u32> = HashMap::new();
///
/// if let RustcEntry::Vacant(v) = map.rustc_entry("poneyland") {
/// let o = v.insert_entry(37);
/// assert_eq!(o.get(), &37);
/// }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert_entry(self, value: V) -> RustcOccupiedEntry<'a, K, V> {
let bucket = self.table.insert_no_grow(self.hash, (self.key, value));
RustcOccupiedEntry {
key: None,
elem: bucket,
table: self.table,
}
}
}
impl<K, V> IterMut<'_, K, V> {
/// Returns a iterator of references over the remaining items.
#[cfg_attr(feature = "inline-more", inline)]
pub fn rustc_iter(&self) -> Iter<'_, K, V> {
self.iter()
}
}
impl<K, V> IntoIter<K, V> {
/// Returns a iterator of references over the remaining items.
#[cfg_attr(feature = "inline-more", inline)]
pub fn rustc_iter(&self) -> Iter<'_, K, V> {
self.iter()
}
}
impl<K, V> Drain<'_, K, V> {
/// Returns a iterator of references over the remaining items.
#[cfg_attr(feature = "inline-more", inline)]
pub fn rustc_iter(&self) -> Iter<'_, K, V> {
self.iter()
}
}

Просмотреть файл

@ -1,49 +0,0 @@
// Extracted from the scopeguard crate
use core::ops::{Deref, DerefMut};
pub struct ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
dropfn: F,
value: T,
}
#[cfg_attr(feature = "inline-more", inline)]
pub fn guard<T, F>(value: T, dropfn: F) -> ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
ScopeGuard { dropfn, value }
}
impl<T, F> Deref for ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
type Target = T;
#[cfg_attr(feature = "inline-more", inline)]
fn deref(&self) -> &T {
&self.value
}
}
impl<T, F> DerefMut for ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
#[cfg_attr(feature = "inline-more", inline)]
fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
}
impl<T, F> Drop for ScopeGuard<T, F>
where
F: FnMut(&mut T),
{
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
(self.dropfn)(&mut self.value)
}
}

1906
third_party/rust/hashbrown-0.7.2/src/set.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,65 +0,0 @@
//! Sanity check that alternate hashers work correctly.
#![cfg(not(miri))] // FIXME: takes too long
use hashbrown::HashSet;
use std::hash::{BuildHasher, BuildHasherDefault, Hasher};
fn check<S: BuildHasher + Default>() {
let range = 0..1_000;
let mut set = HashSet::<i32, S>::default();
set.extend(range.clone());
assert!(!set.contains(&i32::min_value()));
assert!(!set.contains(&(range.start - 1)));
for i in range.clone() {
assert!(set.contains(&i));
}
assert!(!set.contains(&range.end));
assert!(!set.contains(&i32::max_value()));
}
/// Use hashbrown's default hasher.
#[test]
fn default() {
check::<hashbrown::hash_map::DefaultHashBuilder>();
}
/// Use std's default hasher.
#[test]
fn random_state() {
check::<std::collections::hash_map::RandomState>();
}
/// Use a constant 0 hash.
#[test]
fn zero() {
#[derive(Default)]
struct ZeroHasher;
impl Hasher for ZeroHasher {
fn finish(&self) -> u64 {
0
}
fn write(&mut self, _: &[u8]) {}
}
check::<BuildHasherDefault<ZeroHasher>>();
}
/// Use a constant maximum hash.
#[test]
fn max() {
#[derive(Default)]
struct MaxHasher;
impl Hasher for MaxHasher {
fn finish(&self) -> u64 {
u64::max_value()
}
fn write(&mut self, _: &[u8]) {}
}
check::<BuildHasherDefault<MaxHasher>>();
}

Просмотреть файл

@ -1,533 +0,0 @@
#![cfg(feature = "rayon")]
#[macro_use]
extern crate lazy_static;
use hashbrown::{HashMap, HashSet};
use rayon::iter::{
IntoParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator, ParallelExtend,
ParallelIterator,
};
macro_rules! assert_eq3 {
($e1:expr, $e2:expr, $e3:expr) => {{
assert_eq!($e1, $e2);
assert_eq!($e1, $e3);
assert_eq!($e2, $e3);
}};
}
lazy_static! {
static ref MAP_EMPTY: HashMap<char, u32> = HashMap::new();
static ref MAP: HashMap<char, u32> = {
let mut m = HashMap::new();
m.insert('b', 20);
m.insert('a', 10);
m.insert('c', 30);
m.insert('e', 50);
m.insert('f', 60);
m.insert('d', 40);
m
};
}
#[test]
fn map_seq_par_equivalence_iter_empty() {
let vec_seq = MAP_EMPTY.iter().collect::<Vec<_>>();
let vec_par = MAP_EMPTY.par_iter().collect::<Vec<_>>();
assert_eq3!(vec_seq, vec_par, []);
}
#[test]
fn map_seq_par_equivalence_iter() {
let mut vec_seq = MAP.iter().collect::<Vec<_>>();
let mut vec_par = MAP.par_iter().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = [
(&'a', &10),
(&'b', &20),
(&'c', &30),
(&'d', &40),
(&'e', &50),
(&'f', &60),
];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
#[test]
fn map_seq_par_equivalence_keys_empty() {
let vec_seq = MAP_EMPTY.keys().collect::<Vec<&char>>();
let vec_par = MAP_EMPTY.par_keys().collect::<Vec<&char>>();
let expected: [&char; 0] = [];
assert_eq3!(vec_seq, vec_par, expected);
}
#[test]
fn map_seq_par_equivalence_keys() {
let mut vec_seq = MAP.keys().collect::<Vec<_>>();
let mut vec_par = MAP.par_keys().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = [&'a', &'b', &'c', &'d', &'e', &'f'];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
#[test]
fn map_seq_par_equivalence_values_empty() {
let vec_seq = MAP_EMPTY.values().collect::<Vec<_>>();
let vec_par = MAP_EMPTY.par_values().collect::<Vec<_>>();
let expected: [&u32; 0] = [];
assert_eq3!(vec_seq, vec_par, expected);
}
#[test]
fn map_seq_par_equivalence_values() {
let mut vec_seq = MAP.values().collect::<Vec<_>>();
let mut vec_par = MAP.par_values().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = [&10, &20, &30, &40, &50, &60];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
#[test]
fn map_seq_par_equivalence_iter_mut_empty() {
let mut map1 = MAP_EMPTY.clone();
let mut map2 = MAP_EMPTY.clone();
let vec_seq = map1.iter_mut().collect::<Vec<_>>();
let vec_par = map2.par_iter_mut().collect::<Vec<_>>();
assert_eq3!(vec_seq, vec_par, []);
}
#[test]
fn map_seq_par_equivalence_iter_mut() {
let mut map1 = MAP.clone();
let mut map2 = MAP.clone();
let mut vec_seq = map1.iter_mut().collect::<Vec<_>>();
let mut vec_par = map2.par_iter_mut().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = [
(&'a', &mut 10),
(&'b', &mut 20),
(&'c', &mut 30),
(&'d', &mut 40),
(&'e', &mut 50),
(&'f', &mut 60),
];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
#[test]
fn map_seq_par_equivalence_values_mut_empty() {
let mut map1 = MAP_EMPTY.clone();
let mut map2 = MAP_EMPTY.clone();
let vec_seq = map1.values_mut().collect::<Vec<_>>();
let vec_par = map2.par_values_mut().collect::<Vec<_>>();
let expected: [&u32; 0] = [];
assert_eq3!(vec_seq, vec_par, expected);
}
#[test]
fn map_seq_par_equivalence_values_mut() {
let mut map1 = MAP.clone();
let mut map2 = MAP.clone();
let mut vec_seq = map1.values_mut().collect::<Vec<_>>();
let mut vec_par = map2.par_values_mut().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = [&mut 10, &mut 20, &mut 30, &mut 40, &mut 50, &mut 60];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
#[test]
fn map_seq_par_equivalence_into_iter_empty() {
let vec_seq = MAP_EMPTY.clone().into_iter().collect::<Vec<_>>();
let vec_par = MAP_EMPTY.clone().into_par_iter().collect::<Vec<_>>();
assert_eq3!(vec_seq, vec_par, []);
}
#[test]
fn map_seq_par_equivalence_into_iter() {
let mut vec_seq = MAP.clone().into_iter().collect::<Vec<_>>();
let mut vec_par = MAP.clone().into_par_iter().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = [
('a', 10),
('b', 20),
('c', 30),
('d', 40),
('e', 50),
('f', 60),
];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
lazy_static! {
static ref MAP_VEC_EMPTY: Vec<(char, u32)> = vec![];
static ref MAP_VEC: Vec<(char, u32)> = vec![
('b', 20),
('a', 10),
('c', 30),
('e', 50),
('f', 60),
('d', 40),
];
}
#[test]
fn map_seq_par_equivalence_collect_empty() {
let map_expected = MAP_EMPTY.clone();
let map_seq = MAP_VEC_EMPTY.clone().into_iter().collect::<HashMap<_, _>>();
let map_par = MAP_VEC_EMPTY
.clone()
.into_par_iter()
.collect::<HashMap<_, _>>();
assert_eq!(map_seq, map_par);
assert_eq!(map_seq, map_expected);
assert_eq!(map_par, map_expected);
}
#[test]
fn map_seq_par_equivalence_collect() {
let map_expected = MAP.clone();
let map_seq = MAP_VEC.clone().into_iter().collect::<HashMap<_, _>>();
let map_par = MAP_VEC.clone().into_par_iter().collect::<HashMap<_, _>>();
assert_eq!(map_seq, map_par);
assert_eq!(map_seq, map_expected);
assert_eq!(map_par, map_expected);
}
lazy_static! {
static ref MAP_EXISTING_EMPTY: HashMap<char, u32> = HashMap::new();
static ref MAP_EXISTING: HashMap<char, u32> = {
let mut m = HashMap::new();
m.insert('b', 20);
m.insert('a', 10);
m
};
static ref MAP_EXTENSION_EMPTY: Vec<(char, u32)> = vec![];
static ref MAP_EXTENSION: Vec<(char, u32)> = vec![('c', 30), ('e', 50), ('f', 60), ('d', 40),];
}
#[test]
fn map_seq_par_equivalence_existing_empty_extend_empty() {
let expected = HashMap::new();
let mut map_seq = MAP_EXISTING_EMPTY.clone();
let mut map_par = MAP_EXISTING_EMPTY.clone();
map_seq.extend(MAP_EXTENSION_EMPTY.iter().cloned());
map_par.par_extend(MAP_EXTENSION_EMPTY.par_iter().cloned());
assert_eq3!(map_seq, map_par, expected);
}
#[test]
fn map_seq_par_equivalence_existing_empty_extend() {
let expected = MAP_EXTENSION.iter().cloned().collect::<HashMap<_, _>>();
let mut map_seq = MAP_EXISTING_EMPTY.clone();
let mut map_par = MAP_EXISTING_EMPTY.clone();
map_seq.extend(MAP_EXTENSION.iter().cloned());
map_par.par_extend(MAP_EXTENSION.par_iter().cloned());
assert_eq3!(map_seq, map_par, expected);
}
#[test]
fn map_seq_par_equivalence_existing_extend_empty() {
let expected = MAP_EXISTING.clone();
let mut map_seq = MAP_EXISTING.clone();
let mut map_par = MAP_EXISTING.clone();
map_seq.extend(MAP_EXTENSION_EMPTY.iter().cloned());
map_par.par_extend(MAP_EXTENSION_EMPTY.par_iter().cloned());
assert_eq3!(map_seq, map_par, expected);
}
#[test]
fn map_seq_par_equivalence_existing_extend() {
let expected = MAP.clone();
let mut map_seq = MAP_EXISTING.clone();
let mut map_par = MAP_EXISTING.clone();
map_seq.extend(MAP_EXTENSION.iter().cloned());
map_par.par_extend(MAP_EXTENSION.par_iter().cloned());
assert_eq3!(map_seq, map_par, expected);
}
lazy_static! {
static ref SET_EMPTY: HashSet<char> = HashSet::new();
static ref SET: HashSet<char> = {
let mut s = HashSet::new();
s.insert('b');
s.insert('a');
s.insert('c');
s.insert('e');
s.insert('f');
s.insert('d');
s
};
}
#[test]
fn set_seq_par_equivalence_iter_empty() {
let vec_seq = SET_EMPTY.iter().collect::<Vec<_>>();
let vec_par = SET_EMPTY.par_iter().collect::<Vec<_>>();
let expected: [&char; 0] = [];
assert_eq3!(vec_seq, vec_par, expected);
}
#[test]
fn set_seq_par_equivalence_iter() {
let mut vec_seq = SET.iter().collect::<Vec<_>>();
let mut vec_par = SET.par_iter().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = [&'a', &'b', &'c', &'d', &'e', &'f'];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
#[test]
fn set_seq_par_equivalence_into_iter_empty() {
let vec_seq = SET_EMPTY.clone().into_iter().collect::<Vec<_>>();
let vec_par = SET_EMPTY.clone().into_par_iter().collect::<Vec<_>>();
assert_eq3!(vec_seq, vec_par, []);
}
#[test]
fn set_seq_par_equivalence_into_iter() {
let mut vec_seq = SET.clone().into_iter().collect::<Vec<_>>();
let mut vec_par = SET.clone().into_par_iter().collect::<Vec<_>>();
assert_eq!(vec_seq, vec_par);
// Do not depend on the exact order of values
let expected_sorted = ['a', 'b', 'c', 'd', 'e', 'f'];
vec_seq.sort_unstable();
vec_par.sort_unstable();
assert_eq3!(vec_seq, vec_par, expected_sorted);
}
lazy_static! {
static ref SET_VEC_EMPTY: Vec<char> = vec![];
static ref SET_VEC: Vec<char> = vec!['b', 'a', 'c', 'e', 'f', 'd',];
}
#[test]
fn set_seq_par_equivalence_collect_empty() {
let set_expected = SET_EMPTY.clone();
let set_seq = SET_VEC_EMPTY.clone().into_iter().collect::<HashSet<_>>();
let set_par = SET_VEC_EMPTY
.clone()
.into_par_iter()
.collect::<HashSet<_>>();
assert_eq!(set_seq, set_par);
assert_eq!(set_seq, set_expected);
assert_eq!(set_par, set_expected);
}
#[test]
fn set_seq_par_equivalence_collect() {
let set_expected = SET.clone();
let set_seq = SET_VEC.clone().into_iter().collect::<HashSet<_>>();
let set_par = SET_VEC.clone().into_par_iter().collect::<HashSet<_>>();
assert_eq!(set_seq, set_par);
assert_eq!(set_seq, set_expected);
assert_eq!(set_par, set_expected);
}
lazy_static! {
static ref SET_EXISTING_EMPTY: HashSet<char> = HashSet::new();
static ref SET_EXISTING: HashSet<char> = {
let mut s = HashSet::new();
s.insert('b');
s.insert('a');
s
};
static ref SET_EXTENSION_EMPTY: Vec<char> = vec![];
static ref SET_EXTENSION: Vec<char> = vec!['c', 'e', 'f', 'd',];
}
#[test]
fn set_seq_par_equivalence_existing_empty_extend_empty() {
let expected = HashSet::new();
let mut set_seq = SET_EXISTING_EMPTY.clone();
let mut set_par = SET_EXISTING_EMPTY.clone();
set_seq.extend(SET_EXTENSION_EMPTY.iter().cloned());
set_par.par_extend(SET_EXTENSION_EMPTY.par_iter().cloned());
assert_eq3!(set_seq, set_par, expected);
}
#[test]
fn set_seq_par_equivalence_existing_empty_extend() {
let expected = SET_EXTENSION.iter().cloned().collect::<HashSet<_>>();
let mut set_seq = SET_EXISTING_EMPTY.clone();
let mut set_par = SET_EXISTING_EMPTY.clone();
set_seq.extend(SET_EXTENSION.iter().cloned());
set_par.par_extend(SET_EXTENSION.par_iter().cloned());
assert_eq3!(set_seq, set_par, expected);
}
#[test]
fn set_seq_par_equivalence_existing_extend_empty() {
let expected = SET_EXISTING.clone();
let mut set_seq = SET_EXISTING.clone();
let mut set_par = SET_EXISTING.clone();
set_seq.extend(SET_EXTENSION_EMPTY.iter().cloned());
set_par.par_extend(SET_EXTENSION_EMPTY.par_iter().cloned());
assert_eq3!(set_seq, set_par, expected);
}
#[test]
fn set_seq_par_equivalence_existing_extend() {
let expected = SET.clone();
let mut set_seq = SET_EXISTING.clone();
let mut set_par = SET_EXISTING.clone();
set_seq.extend(SET_EXTENSION.iter().cloned());
set_par.par_extend(SET_EXTENSION.par_iter().cloned());
assert_eq3!(set_seq, set_par, expected);
}
lazy_static! {
static ref SET_A: HashSet<char> = ['a', 'b', 'c', 'd'].iter().cloned().collect();
static ref SET_B: HashSet<char> = ['a', 'b', 'e', 'f'].iter().cloned().collect();
static ref SET_DIFF_AB: HashSet<char> = ['c', 'd'].iter().cloned().collect();
static ref SET_DIFF_BA: HashSet<char> = ['e', 'f'].iter().cloned().collect();
static ref SET_SYMM_DIFF_AB: HashSet<char> = ['c', 'd', 'e', 'f'].iter().cloned().collect();
static ref SET_INTERSECTION_AB: HashSet<char> = ['a', 'b'].iter().cloned().collect();
static ref SET_UNION_AB: HashSet<char> =
['a', 'b', 'c', 'd', 'e', 'f'].iter().cloned().collect();
}
#[test]
fn set_seq_par_equivalence_difference() {
let diff_ab_seq = SET_A.difference(&*SET_B).cloned().collect::<HashSet<_>>();
let diff_ab_par = SET_A
.par_difference(&*SET_B)
.cloned()
.collect::<HashSet<_>>();
assert_eq3!(diff_ab_seq, diff_ab_par, *SET_DIFF_AB);
let diff_ba_seq = SET_B.difference(&*SET_A).cloned().collect::<HashSet<_>>();
let diff_ba_par = SET_B
.par_difference(&*SET_A)
.cloned()
.collect::<HashSet<_>>();
assert_eq3!(diff_ba_seq, diff_ba_par, *SET_DIFF_BA);
}
#[test]
fn set_seq_par_equivalence_symmetric_difference() {
let symm_diff_ab_seq = SET_A
.symmetric_difference(&*SET_B)
.cloned()
.collect::<HashSet<_>>();
let symm_diff_ab_par = SET_A
.par_symmetric_difference(&*SET_B)
.cloned()
.collect::<HashSet<_>>();
assert_eq3!(symm_diff_ab_seq, symm_diff_ab_par, *SET_SYMM_DIFF_AB);
}
#[test]
fn set_seq_par_equivalence_intersection() {
let intersection_ab_seq = SET_A.intersection(&*SET_B).cloned().collect::<HashSet<_>>();
let intersection_ab_par = SET_A
.par_intersection(&*SET_B)
.cloned()
.collect::<HashSet<_>>();
assert_eq3!(
intersection_ab_seq,
intersection_ab_par,
*SET_INTERSECTION_AB
);
}
#[test]
fn set_seq_par_equivalence_union() {
let union_ab_seq = SET_A.union(&*SET_B).cloned().collect::<HashSet<_>>();
let union_ab_par = SET_A.par_union(&*SET_B).cloned().collect::<HashSet<_>>();
assert_eq3!(union_ab_seq, union_ab_par, *SET_UNION_AB);
}

Просмотреть файл

@ -1,65 +0,0 @@
#![cfg(feature = "serde")]
use core::hash::BuildHasherDefault;
use hashbrown::{HashMap, HashSet};
use rustc_hash::FxHasher;
use serde_test::{assert_tokens, Token};
// We use FxHash for this test because we rely on the ordering
type FxHashMap<K, V> = HashMap<K, V, BuildHasherDefault<FxHasher>>;
type FxHashSet<T> = HashSet<T, BuildHasherDefault<FxHasher>>;
#[test]
fn map_serde_tokens_empty() {
let map = FxHashMap::<char, u32>::default();
assert_tokens(&map, &[Token::Map { len: Some(0) }, Token::MapEnd]);
}
#[test]
fn map_serde_tokens() {
let mut map = FxHashMap::default();
map.insert('b', 20);
map.insert('a', 10);
map.insert('c', 30);
assert_tokens(
&map,
&[
Token::Map { len: Some(3) },
Token::Char('a'),
Token::I32(10),
Token::Char('b'),
Token::I32(20),
Token::Char('c'),
Token::I32(30),
Token::MapEnd,
],
);
}
#[test]
fn set_serde_tokens_empty() {
let set = FxHashSet::<u32>::default();
assert_tokens(&set, &[Token::Seq { len: Some(0) }, Token::SeqEnd]);
}
#[test]
fn set_serde_tokens() {
let mut set = FxHashSet::default();
set.insert(20);
set.insert(10);
set.insert(30);
assert_tokens(
&set,
&[
Token::Seq { len: Some(3) },
Token::I32(20),
Token::I32(10),
Token::I32(30),
Token::SeqEnd,
],
);
}

30
third_party/rust/hashbrown-0.7.2/tests/set.rs поставляемый
Просмотреть файл

@ -1,30 +0,0 @@
#![cfg(not(miri))] // FIXME: takes too long
use hashbrown::HashSet;
use rand::{distributions::Alphanumeric, rngs::SmallRng, Rng, SeedableRng};
#[test]
fn test_hashset_insert_remove() {
let mut m: HashSet<Vec<char>> = HashSet::new();
//let num: u32 = 4096;
//let tx: Vec<Vec<u8>> = (0..num).map(|i| (i..(16 + i)).collect()).collect();
let seed: [u8; 16] = [
130, 220, 246, 217, 111, 124, 221, 189, 190, 234, 121, 93, 67, 95, 100, 43,
];
let rng = &mut SmallRng::from_seed(seed);
let tx: Vec<Vec<char>> = (0..4096)
.map(|_| (rng.sample_iter(&Alphanumeric).take(32).collect()))
.collect();
for _ in 0..32 {
for i in 0..4096 {
assert_eq!(m.contains(&tx[i].clone()), false);
assert_eq!(m.insert(tx[i].clone()), true);
}
for i in 0..4096 {
println!("removing {} {:?}", i, tx[i]);
assert_eq!(m.remove(&tx[i]), true);
}
}
}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"3707c837d19aeefff4bf899274055db9cd1fd8c7dcbbd5067622acb0f25ddd1f","src/boxes.rs":"7f989f8e91d173f323db22a8748ea911dd143c6304f66ccaf6ebc97d68ca5536","src/lib.rs":"4d4152c72ef49ced0a467879c51ff9e2430f704cd13a8eea4dc891b5f5843280","src/macros.rs":"76c840f9299797527fe71aa5b378ffb01312767372b45cc62deddb19775400ae","src/tests.rs":"6c1b8822410f5410d991f553925d3591f9c7ce41891191da8b3da62e783ebb02","tests/bug-1655846.avif":"e0a5a06225800fadf05f5352503a4cec11af73eef705c43b4acab5f4a99dea50","tests/bug-1661347.avif":"31c26561e1d9eafb60f7c5968b82a0859d203d73f17f26b29276256acee12966","tests/overflow.rs":"16b591d8def1a155b3b997622f6ea255536870d99c3d8f97c51755b77a50de3c","tests/public.rs":"5ff2282b0f84f55e25e18dcca9acc5bffde806d885c897354e5f65292f295557"},"package":null}
{"files":{"Cargo.toml":"5f772c84750dde85555ebe9e01e30266c515402b44ac2735f183a5b143c43196","src/boxes.rs":"7f989f8e91d173f323db22a8748ea911dd143c6304f66ccaf6ebc97d68ca5536","src/lib.rs":"4d4152c72ef49ced0a467879c51ff9e2430f704cd13a8eea4dc891b5f5843280","src/macros.rs":"76c840f9299797527fe71aa5b378ffb01312767372b45cc62deddb19775400ae","src/tests.rs":"6c1b8822410f5410d991f553925d3591f9c7ce41891191da8b3da62e783ebb02","tests/bug-1655846.avif":"e0a5a06225800fadf05f5352503a4cec11af73eef705c43b4acab5f4a99dea50","tests/bug-1661347.avif":"31c26561e1d9eafb60f7c5968b82a0859d203d73f17f26b29276256acee12966","tests/overflow.rs":"16b591d8def1a155b3b997622f6ea255536870d99c3d8f97c51755b77a50de3c","tests/public.rs":"5ff2282b0f84f55e25e18dcca9acc5bffde806d885c897354e5f65292f295557"},"package":null}

4
third_party/rust/mp4parse/Cargo.toml поставляемый
Просмотреть файл

@ -27,8 +27,8 @@ travis-ci = { repository = "https://github.com/mozilla/mp4parse-rust" }
[dependencies]
byteorder = "1.2.1"
bitreader = { version = "0.3.2" }
fallible_collections = { version = "0.1.3", features = ["std_io"] }
hashbrown = "0.7.1"
fallible_collections = { version = "0.2", features = ["std_io"] }
hashbrown = "0.9"
num-traits = "=0.2.10"
log = "0.4"
static_assertions = "1.1.0"

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"589072f91823f3ae58e4ccf0c2fc756c6512921497d8df648049ad620233059c","cbindgen.toml":"5c9429f271d6e914d81b63e6509c04ffe84cab11ed3a53a2ed4715e5d5ace80e","examples/dump.rs":"83462422315c22e496960bae922edb23105c0aa272d2b106edd7574ff068513a","src/lib.rs":"76fb7cd35bbe56463d43f452fd7ebaf5b4619bd661fb71fb45f69e980877b424","tests/test_chunk_out_of_range.rs":"b5da583218d98027ed973a29c67434a91a1306f2d2fb39ec4d640d4824c308ce","tests/test_encryption.rs":"ca98516ff423c03b5fcc17b05f993f13b32485e4cf3ba86faf1bea72681d75ce","tests/test_fragment.rs":"e90eb5a4418d30002655466c0c4b3125c7fd70a74b6871471eaa172f1def9db8","tests/test_rotation.rs":"fb43c2f2dfa496d151c33bdd46c0fd3252387c23cc71e2cac9ed0234de715a81","tests/test_sample_table.rs":"185755909b2f4e0ea99604bb423a07623d614a180accdaebd1c98aef2c2e3ae6","tests/test_workaround_stsc.rs":"7dd419f3d55b9a3a039cac57e58a9240a9c8166bcd4356c24f69f731c3ced83b"},"package":null}
{"files":{"Cargo.toml":"87fce2f5e05b37d95cf255301762ea20d08a772112da2ade46eb39a76e8d27e0","cbindgen.toml":"5c9429f271d6e914d81b63e6509c04ffe84cab11ed3a53a2ed4715e5d5ace80e","examples/dump.rs":"83462422315c22e496960bae922edb23105c0aa272d2b106edd7574ff068513a","src/lib.rs":"76fb7cd35bbe56463d43f452fd7ebaf5b4619bd661fb71fb45f69e980877b424","tests/test_chunk_out_of_range.rs":"b5da583218d98027ed973a29c67434a91a1306f2d2fb39ec4d640d4824c308ce","tests/test_encryption.rs":"ca98516ff423c03b5fcc17b05f993f13b32485e4cf3ba86faf1bea72681d75ce","tests/test_fragment.rs":"e90eb5a4418d30002655466c0c4b3125c7fd70a74b6871471eaa172f1def9db8","tests/test_rotation.rs":"fb43c2f2dfa496d151c33bdd46c0fd3252387c23cc71e2cac9ed0234de715a81","tests/test_sample_table.rs":"185755909b2f4e0ea99604bb423a07623d614a180accdaebd1c98aef2c2e3ae6","tests/test_workaround_stsc.rs":"7dd419f3d55b9a3a039cac57e58a9240a9c8166bcd4356c24f69f731c3ced83b"},"package":null}

2
third_party/rust/mp4parse_capi/Cargo.toml поставляемый
Просмотреть файл

@ -24,7 +24,7 @@ travis-ci = { repository = "https://github.com/mozilla/mp4parse-rust" }
[dependencies]
byteorder = "1.2.1"
fallible_collections = { version = "0.1.3", features = ["std_io"] }
fallible_collections = { version = "0.2", features = ["std_io"] }
log = "0.4"
mp4parse = {version = "0.11.2", path = "../mp4parse"}
num-traits = "=0.2.10"

Просмотреть файл

@ -9,7 +9,7 @@ description = "Shared Rust code for libxul"
geckoservo = { path = "../../../../servo/ports/geckolib" }
kvstore = { path = "../../../components/kvstore" }
lmdb-rkv-sys = { version = "0.11", features = ["mdb_idl_logn_9"] }
mp4parse_capi = { git = "https://github.com/mozilla/mp4parse-rust", rev = "fe9028570e44f3a725dd78bbb58428909c4618bf" }
mp4parse_capi = { git = "https://github.com/mozilla/mp4parse-rust", rev = "f7c35a30ff25521bebe64c19d3f306569ecb5385" }
nserror = { path = "../../../../xpcom/rust/nserror" }
nsstring = { path = "../../../../xpcom/rust/nsstring" }
netwerk_helper = { path = "../../../../netwerk/base/rust-helper" }