Backed out 2 changesets (bug 1628068) for ViaductRequest.cpp related bustages CLOSED TREE

Backed out changeset 928a5891c55d (bug 1628068)
Backed out changeset b6fe5d357bed (bug 1628068)
This commit is contained in:
Bogdan Tara 2020-05-11 23:52:47 +03:00
Родитель fcb6f227f2
Коммит 3df4970bcc
113 изменённых файлов: 340 добавлений и 14669 удалений

Просмотреть файл

@ -25,7 +25,7 @@ rev = "0dc3e6e7c5371fe21f69b847f61c65fe6d6dc317"
[source."https://github.com/mozilla/application-services"]
git = "https://github.com/mozilla/application-services"
replace-with = "vendored-sources"
rev = "dd9bece6e205d4101c841ea5542e9b0814b29d9f"
rev = "e1daa2a7e9add66c5a36a7c967495510c2e117e8"
[source."https://github.com/mozilla-spidermonkey/jsparagus"]
git = "https://github.com/mozilla-spidermonkey/jsparagus"

65
Cargo.lock сгенерированный
Просмотреть файл

@ -39,12 +39,6 @@ dependencies = [
"winapi 0.3.7",
]
[[package]]
name = "anyhow"
version = "1.0.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9a60d744a80c30fcb657dfe2c1b22bcb3e814c1a1e3674f32bf5820b570fbff"
[[package]]
name = "app_units"
version = "0.7.0"
@ -1261,7 +1255,7 @@ checksum = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3"
[[package]]
name = "error-support"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=dd9bece6e205d4101c841ea5542e9b0814b29d9f#dd9bece6e205d4101c841ea5542e9b0814b29d9f"
source = "git+https://github.com/mozilla/application-services?rev=e1daa2a7e9add66c5a36a7c967495510c2e117e8#e1daa2a7e9add66c5a36a7c967495510c2e117e8"
dependencies = [
"failure",
]
@ -1879,7 +1873,6 @@ dependencies = [
"storage",
"unic-langid",
"unic-langid-ffi",
"viaduct",
"webext_storage_bridge",
"webrender_bindings",
"wgpu_bindings",
@ -2208,7 +2201,7 @@ dependencies = [
[[package]]
name = "interrupt-support"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=dd9bece6e205d4101c841ea5542e9b0814b29d9f#dd9bece6e205d4101c841ea5542e9b0814b29d9f"
source = "git+https://github.com/mozilla/application-services?rev=e1daa2a7e9add66c5a36a7c967495510c2e117e8#e1daa2a7e9add66c5a36a7c967495510c2e117e8"
[[package]]
name = "intl-memoizer"
@ -3139,7 +3132,7 @@ dependencies = [
[[package]]
name = "nss_build_common"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=dd9bece6e205d4101c841ea5542e9b0814b29d9f#dd9bece6e205d4101c841ea5542e9b0814b29d9f"
source = "git+https://github.com/mozilla/application-services?rev=e1daa2a7e9add66c5a36a7c967495510c2e117e8#e1daa2a7e9add66c5a36a7c967495510c2e117e8"
[[package]]
name = "nsstring"
@ -3260,9 +3253,9 @@ dependencies = [
[[package]]
name = "once_cell"
version = "1.3.1"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b"
checksum = "891f486f630e5c5a4916c7e16c4b24a53e78c860b646e9f8e005e4f16847bfed"
[[package]]
name = "opaque-debug"
@ -3602,29 +3595,6 @@ dependencies = [
"uuid",
]
[[package]]
name = "prost"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce49aefe0a6144a45de32927c77bd2859a5f7677b55f220ae5b744e87389c212"
dependencies = [
"bytes 0.5.3",
"prost-derive",
]
[[package]]
name = "prost-derive"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72"
dependencies = [
"anyhow",
"itertools",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "pulse"
version = "0.3.0"
@ -4306,7 +4276,7 @@ dependencies = [
[[package]]
name = "sql-support"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=dd9bece6e205d4101c841ea5542e9b0814b29d9f#dd9bece6e205d4101c841ea5542e9b0814b29d9f"
source = "git+https://github.com/mozilla/application-services?rev=e1daa2a7e9add66c5a36a7c967495510c2e117e8#e1daa2a7e9add66c5a36a7c967495510c2e117e8"
dependencies = [
"ffi-support",
"interrupt-support",
@ -4503,7 +4473,7 @@ dependencies = [
[[package]]
name = "sync-guid"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=dd9bece6e205d4101c841ea5542e9b0814b29d9f#dd9bece6e205d4101c841ea5542e9b0814b29d9f"
source = "git+https://github.com/mozilla/application-services?rev=e1daa2a7e9add66c5a36a7c967495510c2e117e8#e1daa2a7e9add66c5a36a7c967495510c2e117e8"
dependencies = [
"base64 0.12.0",
"rand",
@ -4514,7 +4484,7 @@ dependencies = [
[[package]]
name = "sync15-traits"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=dd9bece6e205d4101c841ea5542e9b0814b29d9f#dd9bece6e205d4101c841ea5542e9b0814b29d9f"
source = "git+https://github.com/mozilla/application-services?rev=e1daa2a7e9add66c5a36a7c967495510c2e117e8#e1daa2a7e9add66c5a36a7c967495510c2e117e8"
dependencies = [
"failure",
"ffi-support",
@ -5127,23 +5097,6 @@ version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "078775d0255232fb988e6fccf26ddc9d1ac274299aaedcedce21c6f72cc533ce"
[[package]]
name = "viaduct"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=dd9bece6e205d4101c841ea5542e9b0814b29d9f#dd9bece6e205d4101c841ea5542e9b0814b29d9f"
dependencies = [
"failure",
"failure_derive",
"ffi-support",
"log",
"once_cell",
"prost",
"prost-derive",
"serde",
"serde_json",
"url",
]
[[package]]
name = "void"
version = "1.0.2"
@ -5246,7 +5199,7 @@ dependencies = [
[[package]]
name = "webext-storage"
version = "0.1.0"
source = "git+https://github.com/mozilla/application-services?rev=dd9bece6e205d4101c841ea5542e9b0814b29d9f#dd9bece6e205d4101c841ea5542e9b0814b29d9f"
source = "git+https://github.com/mozilla/application-services?rev=e1daa2a7e9add66c5a36a7c967495510c2e117e8#e1daa2a7e9add66c5a36a7c967495510c2e117e8"
dependencies = [
"error-support",
"failure",

Просмотреть файл

@ -8,14 +8,14 @@ edition = "2018"
[dependencies]
atomic_refcell = "0.1"
cstr = "0.1"
interrupt-support = { git = "https://github.com/mozilla/application-services", rev = "dd9bece6e205d4101c841ea5542e9b0814b29d9f" }
interrupt-support = { git = "https://github.com/mozilla/application-services", rev = "e1daa2a7e9add66c5a36a7c967495510c2e117e8" }
log = "0.4"
moz_task = { path = "../../../xpcom/rust/moz_task" }
nserror = { path = "../../../xpcom/rust/nserror" }
nsstring = { path = "../../../xpcom/rust/nsstring" }
serde_json = "1"
storage_variant = { path = "../../../storage/variant" }
sync15-traits = { git = "https://github.com/mozilla/application-services", rev = "dd9bece6e205d4101c841ea5542e9b0814b29d9f" }
sync15-traits = { git = "https://github.com/mozilla/application-services", rev = "e1daa2a7e9add66c5a36a7c967495510c2e117e8" }
xpcom = { path = "../../../xpcom/rust/xpcom" }
[dependencies.thin-vec]

Просмотреть файл

@ -1 +0,0 @@
{"files":{"Cargo.toml":"d5ccbc6846ecbbd2d0497b561d62d18437dd2fc393d41563ba6f6b294b2ce008","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"485188f82d638ebe521e6ab24f775929c4c84c70eb75a580b5cbdfb2f08a217d","build.rs":"42e4fff05dc773820ee1843aba8e36389a36386d6e6082e778d2203420bd72ab","src/backtrace.rs":"a82a8ffae2c68ee385dc78d8ec8cb6f3351234f0ad6af7e87df2371593e0f6aa","src/chain.rs":"1627608ce95c3484d26e1742a1b8b533b74c6159916b717bacffae3ae53731f9","src/context.rs":"f2be36af9588c924ed087bcb7bd681d330889e54b8f98cdc2aba93512a28762e","src/error.rs":"2c6a51880c7379265f7f7b6557e7bed808ff0f3267337a0c77dbbc1d2c4662e7","src/fmt.rs":"079d7b4faaa23f42423e0bb6b4e8a80d7d6d45c38c0d46bebd7d647c8679469f","src/kind.rs":"8481a8b7835eebb3859a8c32c217bf9c73543cfc62e3916b98d39af8b063125c","src/lib.rs":"3d41b20d604d6c519171e407c9d14cac120ba069bf82ea332dcad3a8f521ed6a","src/macros.rs":"77722190b58a6106b21aefd3b5d4f136a076afcdbc0fae21562d99e2c22912e1","src/wrapper.rs":"1229beca67dbd95ca77c9ecce282272acc55276c267c58cb73a75388b4693dda","tests/common/mod.rs":"f9088c2d7afafa64ff730b629272045b776bfafc2f5957508242da630635f2e1","tests/compiletest.rs":"0a52a44786aea1c299c695bf948b2ed2081e4cc344e5c2cadceab4eb03d0010d","tests/drop/mod.rs":"464bc1ddeae307eac906928286ec3edb77057c5c1302e02150d3649e2b861f1a","tests/test_autotrait.rs":"981e792db353be2f14c7a1cabe43b5f1329c168cb7679077cc2be786a0920d48","tests/test_backtrace.rs":"0e50edbb33b6bd07ba89ff3db72fb7c688ba2a4371fccdbbb20309ab02948b6a","tests/test_boxed.rs":"98a45325b1e86d4c5d3094ab99cd1ada1f771c505d2d7322f0afcbe7bdb71cfa","tests/test_chain.rs":"f28efeae7395d1c395e6f1a647b4199c25a00410ade45248c145c6fcf2fb448a","tests/test_context.rs":"f82c915b182df1a604a4cd558a03b1a821414983d6f6af6822398104cea70676","tests/test_convert.rs":"62840be1ee8022ba5e8c0d3fc1752a1526b2c47d4cceecff2b86790524c3b3ea","tests/test_downcast.rs":"253d6f54e554965023b378b037827ec6289c4779a7a7c12706e19c2731d219fe","tests/test_fmt.rs":"17572596f257aac9aa2ec4620e292ca6a954128b94772bb948399fab53832e70","tests/test_macros.rs":"c7d3d5e0b756f59d4858035025fb341d031369c88486fd9f961ee16bae6c78bf","tests/test_repr.rs":"dbb9b04ddbe1ab31eb5331ea69f05bb3a147299da2275a3d4dcc92947b5591b9","tests/test_source.rs":"b80723cf635a4f8c4df21891b34bfab9ed2b2aa407e7a2f826d24e334cd5f88e","tests/ui/no-impl.rs":"fab6cbf2f6ea510b86f567dfb3b7c31250a9fd71ae5d110dbb9188be569ec593","tests/ui/no-impl.stderr":"7c2c3f46c266a437300591f10be330f937ac6a0a2213ed5030a9fbc895e2d100"},"package":"d9a60d744a80c30fcb657dfe2c1b22bcb3e814c1a1e3674f32bf5820b570fbff"}

45
third_party/rust/anyhow/Cargo.toml поставляемый
Просмотреть файл

@ -1,45 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "anyhow"
version = "1.0.28"
authors = ["David Tolnay <dtolnay@gmail.com>"]
description = "Flexible concrete Error type built on std::error::Error"
documentation = "https://docs.rs/anyhow"
readme = "README.md"
categories = ["rust-patterns"]
license = "MIT OR Apache-2.0"
repository = "https://github.com/dtolnay/anyhow"
[package.metadata.docs.rs]
rustdoc-args = ["--cfg", "doc_cfg"]
targets = ["x86_64-unknown-linux-gnu"]
[dev-dependencies.futures]
version = "0.3"
default-features = false
[dev-dependencies.rustversion]
version = "1.0"
[dev-dependencies.thiserror]
version = "1.0"
[dev-dependencies.trybuild]
version = "1.0.19"
features = ["diff"]
[features]
default = ["std"]
std = []
[badges.travis-ci]
repository = "dtolnay/anyhow"

201
third_party/rust/anyhow/LICENSE-APACHE поставляемый
Просмотреть файл

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

23
third_party/rust/anyhow/LICENSE-MIT поставляемый
Просмотреть файл

@ -1,23 +0,0 @@
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

175
third_party/rust/anyhow/README.md поставляемый
Просмотреть файл

@ -1,175 +0,0 @@
Anyhow&ensp;¯\\\_(ツ)\_/¯
=========================
[![Build Status](https://api.travis-ci.com/dtolnay/anyhow.svg?branch=master)](https://travis-ci.com/dtolnay/anyhow)
[![Latest Version](https://img.shields.io/crates/v/anyhow.svg)](https://crates.io/crates/anyhow)
[![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/anyhow)
This library provides [`anyhow::Error`][Error], a trait object based error type
for easy idiomatic error handling in Rust applications.
[Error]: https://docs.rs/anyhow/1.0/anyhow/struct.Error.html
```toml
[dependencies]
anyhow = "1.0"
```
*Compiler support: requires rustc 1.34+*
<br>
## Details
- Use `Result<T, anyhow::Error>`, or equivalently `anyhow::Result<T>`, as the
return type of any fallible function.
Within the function, use `?` to easily propagate any error that implements the
`std::error::Error` trait.
```rust
use anyhow::Result;
fn get_cluster_info() -> Result<ClusterMap> {
let config = std::fs::read_to_string("cluster.json")?;
let map: ClusterMap = serde_json::from_str(&config)?;
Ok(map)
}
```
- Attach context to help the person troubleshooting the error understand where
things went wrong. A low-level error like "No such file or directory" can be
annoying to debug without more context about what higher level step the
application was in the middle of.
```rust
use anyhow::{Context, Result};
fn main() -> Result<()> {
...
it.detach().context("Failed to detach the important thing")?;
let content = std::fs::read(path)
.with_context(|| format!("Failed to read instrs from {}", path))?;
...
}
```
```console
Error: Failed to read instrs from ./path/to/instrs.json
Caused by:
No such file or directory (os error 2)
```
- Downcasting is supported and can be by value, by shared reference, or by
mutable reference as needed.
```rust
// If the error was caused by redaction, then return a
// tombstone instead of the content.
match root_cause.downcast_ref::<DataStoreError>() {
Some(DataStoreError::Censored(_)) => Ok(Poll::Ready(REDACTED_CONTENT)),
None => Err(error),
}
```
- If using the nightly channel, a backtrace is captured and printed with the
error if the underlying error type does not already provide its own. In order
to see backtraces, they must be enabled through the environment variables
described in [`std::backtrace`]:
- If you want panics and errors to both have backtraces, set
`RUST_BACKTRACE=1`;
- If you want only errors to have backtraces, set `RUST_LIB_BACKTRACE=1`;
- If you want only panics to have backtraces, set `RUST_BACKTRACE=1` and
`RUST_LIB_BACKTRACE=0`.
The tracking issue for this feature is [rust-lang/rust#53487].
[`std::backtrace`]: https://doc.rust-lang.org/std/backtrace/index.html#environment-variables
[rust-lang/rust#53487]: https://github.com/rust-lang/rust/issues/53487
- Anyhow works with any error type that has an impl of `std::error::Error`,
including ones defined in your crate. We do not bundle a `derive(Error)` macro
but you can write the impls yourself or use a standalone macro like
[thiserror].
```rust
use thiserror::Error;
#[derive(Error, Debug)]
pub enum FormatError {
#[error("Invalid header (expected {expected:?}, got {found:?})")]
InvalidHeader {
expected: String,
found: String,
},
#[error("Missing attribute: {0}")]
MissingAttribute(String),
}
```
- One-off error messages can be constructed using the `anyhow!` macro, which
supports string interpolation and produces an `anyhow::Error`.
```rust
return Err(anyhow!("Missing attribute: {}", missing));
```
<br>
## No-std support
In no_std mode, the same API is almost all available and works the same way. To
depend on Anyhow in no_std mode, disable our default enabled "std" feature in
Cargo.toml. A global allocator is required.
```toml
[dependencies]
anyhow = { version = "1.0", default-features = false }
```
Since the `?`-based error conversions would normally rely on the
`std::error::Error` trait which is only available through std, no_std mode will
require an explicit `.map_err(Error::msg)` when working with a non-Anyhow error
type inside a function that returns Anyhow's error type.
<br>
## Comparison to failure
The `anyhow::Error` type works something like `failure::Error`, but unlike
failure ours is built around the standard library's `std::error::Error` trait
rather than a separate trait `failure::Fail`. The standard library has adopted
the necessary improvements for this to be possible as part of [RFC 2504].
[RFC 2504]: https://github.com/rust-lang/rfcs/blob/master/text/2504-fix-error.md
<br>
## Comparison to thiserror
Use Anyhow if you don't care what error type your functions return, you just
want it to be easy. This is common in application code. Use [thiserror] if you
are a library that wants to design your own dedicated error type(s) so that on
failures the caller gets exactly the information that you choose.
[thiserror]: https://github.com/dtolnay/thiserror
<br>
#### License
<sup>
Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
</sup>
<br>
<sub>
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
be dual licensed as above, without any additional terms or conditions.
</sub>

62
third_party/rust/anyhow/build.rs поставляемый
Просмотреть файл

@ -1,62 +0,0 @@
use std::env;
use std::fs;
use std::path::Path;
use std::process::{Command, ExitStatus};
// This code exercises the surface area that we expect of the std Backtrace
// type. If the current toolchain is able to compile it, we go ahead and use
// backtrace in anyhow.
const PROBE: &str = r#"
#![feature(backtrace)]
#![allow(dead_code)]
use std::backtrace::{Backtrace, BacktraceStatus};
use std::error::Error;
use std::fmt::{self, Display};
#[derive(Debug)]
struct E;
impl Display for E {
fn fmt(&self, _formatter: &mut fmt::Formatter) -> fmt::Result {
unimplemented!()
}
}
impl Error for E {
fn backtrace(&self) -> Option<&Backtrace> {
let backtrace = Backtrace::capture();
match backtrace.status() {
BacktraceStatus::Captured | BacktraceStatus::Disabled | _ => {}
}
unimplemented!()
}
}
"#;
fn main() {
if !cfg!(feature = "std") {
return;
}
match compile_probe() {
Some(status) if status.success() => println!("cargo:rustc-cfg=backtrace"),
_ => {}
}
}
fn compile_probe() -> Option<ExitStatus> {
let rustc = env::var_os("RUSTC")?;
let out_dir = env::var_os("OUT_DIR")?;
let probefile = Path::new(&out_dir).join("probe.rs");
fs::write(&probefile, PROBE).ok()?;
Command::new(rustc)
.arg("--edition=2018")
.arg("--crate-name=anyhow_build")
.arg("--crate-type=lib")
.arg("--emit=metadata")
.arg("--out-dir")
.arg(out_dir)
.arg(probefile)
.status()
.ok()
}

36
third_party/rust/anyhow/src/backtrace.rs поставляемый
Просмотреть файл

@ -1,36 +0,0 @@
#[cfg(backtrace)]
pub(crate) use std::backtrace::Backtrace;
#[cfg(not(backtrace))]
pub(crate) enum Backtrace {}
#[cfg(backtrace)]
macro_rules! backtrace {
() => {
Some(Backtrace::capture())
};
}
#[cfg(not(backtrace))]
macro_rules! backtrace {
() => {
None
};
}
#[cfg(backtrace)]
macro_rules! backtrace_if_absent {
($err:expr) => {
match $err.backtrace() {
Some(_) => None,
None => Some(Backtrace::capture()),
}
};
}
#[cfg(all(feature = "std", not(backtrace)))]
macro_rules! backtrace_if_absent {
($err:expr) => {
None
};
}

101
third_party/rust/anyhow/src/chain.rs поставляемый
Просмотреть файл

@ -1,101 +0,0 @@
use self::ChainState::*;
use crate::StdError;
#[cfg(feature = "std")]
use std::vec;
#[cfg(feature = "std")]
pub(crate) use crate::Chain;
#[cfg(not(feature = "std"))]
pub(crate) struct Chain<'a> {
state: ChainState<'a>,
}
#[derive(Clone)]
pub(crate) enum ChainState<'a> {
Linked {
next: Option<&'a (dyn StdError + 'static)>,
},
#[cfg(feature = "std")]
Buffered {
rest: vec::IntoIter<&'a (dyn StdError + 'static)>,
},
}
impl<'a> Chain<'a> {
pub fn new(head: &'a (dyn StdError + 'static)) -> Self {
Chain {
state: ChainState::Linked { next: Some(head) },
}
}
}
impl<'a> Iterator for Chain<'a> {
type Item = &'a (dyn StdError + 'static);
fn next(&mut self) -> Option<Self::Item> {
match &mut self.state {
Linked { next } => {
let error = (*next)?;
*next = error.source();
Some(error)
}
#[cfg(feature = "std")]
Buffered { rest } => rest.next(),
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
}
#[cfg(feature = "std")]
impl DoubleEndedIterator for Chain<'_> {
fn next_back(&mut self) -> Option<Self::Item> {
match &mut self.state {
Linked { mut next } => {
let mut rest = Vec::new();
while let Some(cause) = next {
next = cause.source();
rest.push(cause);
}
let mut rest = rest.into_iter();
let last = rest.next_back();
self.state = Buffered { rest };
last
}
Buffered { rest } => rest.next_back(),
}
}
}
impl ExactSizeIterator for Chain<'_> {
fn len(&self) -> usize {
match &self.state {
Linked { mut next } => {
let mut len = 0;
while let Some(cause) = next {
next = cause.source();
len += 1;
}
len
}
#[cfg(feature = "std")]
Buffered { rest } => rest.len(),
}
}
}
#[cfg(feature = "std")]
impl Default for Chain<'_> {
fn default() -> Self {
Chain {
state: ChainState::Buffered {
rest: Vec::new().into_iter(),
},
}
}
}

177
third_party/rust/anyhow/src/context.rs поставляемый
Просмотреть файл

@ -1,177 +0,0 @@
use crate::error::ContextError;
use crate::{Context, Error, StdError};
use core::convert::Infallible;
use core::fmt::{self, Debug, Display, Write};
#[cfg(backtrace)]
use std::backtrace::Backtrace;
mod ext {
use super::*;
pub trait StdError {
fn ext_context<C>(self, context: C) -> Error
where
C: Display + Send + Sync + 'static;
}
#[cfg(feature = "std")]
impl<E> StdError for E
where
E: std::error::Error + Send + Sync + 'static,
{
fn ext_context<C>(self, context: C) -> Error
where
C: Display + Send + Sync + 'static,
{
let backtrace = backtrace_if_absent!(self);
Error::from_context(context, self, backtrace)
}
}
impl StdError for Error {
fn ext_context<C>(self, context: C) -> Error
where
C: Display + Send + Sync + 'static,
{
self.context(context)
}
}
}
impl<T, E> Context<T, E> for Result<T, E>
where
E: ext::StdError + Send + Sync + 'static,
{
fn context<C>(self, context: C) -> Result<T, Error>
where
C: Display + Send + Sync + 'static,
{
self.map_err(|error| error.ext_context(context))
}
fn with_context<C, F>(self, context: F) -> Result<T, Error>
where
C: Display + Send + Sync + 'static,
F: FnOnce() -> C,
{
self.map_err(|error| error.ext_context(context()))
}
}
/// ```
/// # type T = ();
/// #
/// use anyhow::{Context, Result};
///
/// fn maybe_get() -> Option<T> {
/// # const IGNORE: &str = stringify! {
/// ...
/// # };
/// # unimplemented!()
/// }
///
/// fn demo() -> Result<()> {
/// let t = maybe_get().context("there is no T")?;
/// # const IGNORE: &str = stringify! {
/// ...
/// # };
/// # unimplemented!()
/// }
/// ```
impl<T> Context<T, Infallible> for Option<T> {
fn context<C>(self, context: C) -> Result<T, Error>
where
C: Display + Send + Sync + 'static,
{
self.ok_or_else(|| Error::from_display(context, backtrace!()))
}
fn with_context<C, F>(self, context: F) -> Result<T, Error>
where
C: Display + Send + Sync + 'static,
F: FnOnce() -> C,
{
self.ok_or_else(|| Error::from_display(context(), backtrace!()))
}
}
impl<C, E> Debug for ContextError<C, E>
where
C: Display,
E: Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Error")
.field("context", &Quoted(&self.context))
.field("source", &self.error)
.finish()
}
}
impl<C, E> Display for ContextError<C, E>
where
C: Display,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Display::fmt(&self.context, f)
}
}
impl<C, E> StdError for ContextError<C, E>
where
C: Display,
E: StdError + 'static,
{
#[cfg(backtrace)]
fn backtrace(&self) -> Option<&Backtrace> {
self.error.backtrace()
}
fn source(&self) -> Option<&(dyn StdError + 'static)> {
Some(&self.error)
}
}
impl<C> StdError for ContextError<C, Error>
where
C: Display,
{
#[cfg(backtrace)]
fn backtrace(&self) -> Option<&Backtrace> {
Some(self.error.backtrace())
}
fn source(&self) -> Option<&(dyn StdError + 'static)> {
Some(self.error.inner.error())
}
}
struct Quoted<C>(C);
impl<C> Debug for Quoted<C>
where
C: Display,
{
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_char('"')?;
Quoted(&mut *formatter).write_fmt(format_args!("{}", self.0))?;
formatter.write_char('"')?;
Ok(())
}
}
impl Write for Quoted<&mut fmt::Formatter<'_>> {
fn write_str(&mut self, s: &str) -> fmt::Result {
Display::fmt(&s.escape_debug(), self.0)
}
}
pub(crate) mod private {
use super::*;
pub trait Sealed {}
impl<T, E> Sealed for Result<T, E> where E: ext::StdError {}
impl<T> Sealed for Option<T> {}
}

802
third_party/rust/anyhow/src/error.rs поставляемый
Просмотреть файл

@ -1,802 +0,0 @@
use crate::alloc::Box;
use crate::backtrace::Backtrace;
use crate::chain::Chain;
use crate::{Error, StdError};
use core::any::TypeId;
use core::fmt::{self, Debug, Display};
use core::mem::{self, ManuallyDrop};
use core::ptr::{self, NonNull};
#[cfg(feature = "std")]
use core::ops::{Deref, DerefMut};
impl Error {
/// Create a new error object from any error type.
///
/// The error type must be threadsafe and `'static`, so that the `Error`
/// will be as well.
///
/// If the error type does not provide a backtrace, a backtrace will be
/// created here to ensure that a backtrace exists.
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn new<E>(error: E) -> Self
where
E: StdError + Send + Sync + 'static,
{
let backtrace = backtrace_if_absent!(error);
Error::from_std(error, backtrace)
}
/// Create a new error object from a printable error message.
///
/// If the argument implements std::error::Error, prefer `Error::new`
/// instead which preserves the underlying error's cause chain and
/// backtrace. If the argument may or may not implement std::error::Error
/// now or in the future, use `anyhow!(err)` which handles either way
/// correctly.
///
/// `Error::msg("...")` is equivalent to `anyhow!("...")` but occasionally
/// convenient in places where a function is preferable over a macro, such
/// as iterator or stream combinators:
///
/// ```
/// # mod ffi {
/// # pub struct Input;
/// # pub struct Output;
/// # pub async fn do_some_work(_: Input) -> Result<Output, &'static str> {
/// # unimplemented!()
/// # }
/// # }
/// #
/// # use ffi::{Input, Output};
/// #
/// use anyhow::{Error, Result};
/// use futures::stream::{Stream, StreamExt, TryStreamExt};
///
/// async fn demo<S>(stream: S) -> Result<Vec<Output>>
/// where
/// S: Stream<Item = Input>,
/// {
/// stream
/// .then(ffi::do_some_work) // returns Result<Output, &str>
/// .map_err(Error::msg)
/// .try_collect()
/// .await
/// }
/// ```
pub fn msg<M>(message: M) -> Self
where
M: Display + Debug + Send + Sync + 'static,
{
Error::from_adhoc(message, backtrace!())
}
#[cfg(feature = "std")]
pub(crate) fn from_std<E>(error: E, backtrace: Option<Backtrace>) -> Self
where
E: StdError + Send + Sync + 'static,
{
let vtable = &ErrorVTable {
object_drop: object_drop::<E>,
object_ref: object_ref::<E>,
#[cfg(feature = "std")]
object_mut: object_mut::<E>,
object_boxed: object_boxed::<E>,
object_downcast: object_downcast::<E>,
object_drop_rest: object_drop_front::<E>,
};
// Safety: passing vtable that operates on the right type E.
unsafe { Error::construct(error, vtable, backtrace) }
}
pub(crate) fn from_adhoc<M>(message: M, backtrace: Option<Backtrace>) -> Self
where
M: Display + Debug + Send + Sync + 'static,
{
use crate::wrapper::MessageError;
let error: MessageError<M> = MessageError(message);
let vtable = &ErrorVTable {
object_drop: object_drop::<MessageError<M>>,
object_ref: object_ref::<MessageError<M>>,
#[cfg(feature = "std")]
object_mut: object_mut::<MessageError<M>>,
object_boxed: object_boxed::<MessageError<M>>,
object_downcast: object_downcast::<M>,
object_drop_rest: object_drop_front::<M>,
};
// Safety: MessageError is repr(transparent) so it is okay for the
// vtable to allow casting the MessageError<M> to M.
unsafe { Error::construct(error, vtable, backtrace) }
}
pub(crate) fn from_display<M>(message: M, backtrace: Option<Backtrace>) -> Self
where
M: Display + Send + Sync + 'static,
{
use crate::wrapper::DisplayError;
let error: DisplayError<M> = DisplayError(message);
let vtable = &ErrorVTable {
object_drop: object_drop::<DisplayError<M>>,
object_ref: object_ref::<DisplayError<M>>,
#[cfg(feature = "std")]
object_mut: object_mut::<DisplayError<M>>,
object_boxed: object_boxed::<DisplayError<M>>,
object_downcast: object_downcast::<M>,
object_drop_rest: object_drop_front::<M>,
};
// Safety: DisplayError is repr(transparent) so it is okay for the
// vtable to allow casting the DisplayError<M> to M.
unsafe { Error::construct(error, vtable, backtrace) }
}
#[cfg(feature = "std")]
pub(crate) fn from_context<C, E>(context: C, error: E, backtrace: Option<Backtrace>) -> Self
where
C: Display + Send + Sync + 'static,
E: StdError + Send + Sync + 'static,
{
let error: ContextError<C, E> = ContextError { context, error };
let vtable = &ErrorVTable {
object_drop: object_drop::<ContextError<C, E>>,
object_ref: object_ref::<ContextError<C, E>>,
#[cfg(feature = "std")]
object_mut: object_mut::<ContextError<C, E>>,
object_boxed: object_boxed::<ContextError<C, E>>,
object_downcast: context_downcast::<C, E>,
object_drop_rest: context_drop_rest::<C, E>,
};
// Safety: passing vtable that operates on the right type.
unsafe { Error::construct(error, vtable, backtrace) }
}
#[cfg(feature = "std")]
pub(crate) fn from_boxed(
error: Box<dyn StdError + Send + Sync>,
backtrace: Option<Backtrace>,
) -> Self {
use crate::wrapper::BoxedError;
let error = BoxedError(error);
let vtable = &ErrorVTable {
object_drop: object_drop::<BoxedError>,
object_ref: object_ref::<BoxedError>,
#[cfg(feature = "std")]
object_mut: object_mut::<BoxedError>,
object_boxed: object_boxed::<BoxedError>,
object_downcast: object_downcast::<Box<dyn StdError + Send + Sync>>,
object_drop_rest: object_drop_front::<Box<dyn StdError + Send + Sync>>,
};
// Safety: BoxedError is repr(transparent) so it is okay for the vtable
// to allow casting to Box<dyn StdError + Send + Sync>.
unsafe { Error::construct(error, vtable, backtrace) }
}
// Takes backtrace as argument rather than capturing it here so that the
// user sees one fewer layer of wrapping noise in the backtrace.
//
// Unsafe because the given vtable must have sensible behavior on the error
// value of type E.
unsafe fn construct<E>(
error: E,
vtable: &'static ErrorVTable,
backtrace: Option<Backtrace>,
) -> Self
where
E: StdError + Send + Sync + 'static,
{
let inner = Box::new(ErrorImpl {
vtable,
backtrace,
_object: error,
});
// Erase the concrete type of E from the compile-time type system. This
// is equivalent to the safe unsize coersion from Box<ErrorImpl<E>> to
// Box<ErrorImpl<dyn StdError + Send + Sync + 'static>> except that the
// result is a thin pointer. The necessary behavior for manipulating the
// underlying ErrorImpl<E> is preserved in the vtable provided by the
// caller rather than a builtin fat pointer vtable.
let erased = mem::transmute::<Box<ErrorImpl<E>>, Box<ErrorImpl<()>>>(inner);
let inner = ManuallyDrop::new(erased);
Error { inner }
}
/// Wrap the error value with additional context.
///
/// For attaching context to a `Result` as it is propagated, the
/// [`Context`][crate::Context] extension trait may be more convenient than
/// this function.
///
/// The primary reason to use `error.context(...)` instead of
/// `result.context(...)` via the `Context` trait would be if the context
/// needs to depend on some data held by the underlying error:
///
/// ```
/// # use std::fmt::{self, Debug, Display};
/// #
/// # type T = ();
/// #
/// # impl std::error::Error for ParseError {}
/// # impl Debug for ParseError {
/// # fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
/// # unimplemented!()
/// # }
/// # }
/// # impl Display for ParseError {
/// # fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
/// # unimplemented!()
/// # }
/// # }
/// #
/// use anyhow::Result;
/// use std::fs::File;
/// use std::path::Path;
///
/// struct ParseError {
/// line: usize,
/// column: usize,
/// }
///
/// fn parse_impl(file: File) -> Result<T, ParseError> {
/// # const IGNORE: &str = stringify! {
/// ...
/// # };
/// # unimplemented!()
/// }
///
/// pub fn parse(path: impl AsRef<Path>) -> Result<T> {
/// let file = File::open(&path)?;
/// parse_impl(file).map_err(|error| {
/// let context = format!(
/// "only the first {} lines of {} are valid",
/// error.line, path.as_ref().display(),
/// );
/// anyhow::Error::new(error).context(context)
/// })
/// }
/// ```
pub fn context<C>(self, context: C) -> Self
where
C: Display + Send + Sync + 'static,
{
let error: ContextError<C, Error> = ContextError {
context,
error: self,
};
let vtable = &ErrorVTable {
object_drop: object_drop::<ContextError<C, Error>>,
object_ref: object_ref::<ContextError<C, Error>>,
#[cfg(feature = "std")]
object_mut: object_mut::<ContextError<C, Error>>,
object_boxed: object_boxed::<ContextError<C, Error>>,
object_downcast: context_chain_downcast::<C>,
object_drop_rest: context_chain_drop_rest::<C>,
};
// As the cause is anyhow::Error, we already have a backtrace for it.
let backtrace = None;
// Safety: passing vtable that operates on the right type.
unsafe { Error::construct(error, vtable, backtrace) }
}
/// Get the backtrace for this Error.
///
/// Backtraces are only available on the nightly channel. Tracking issue:
/// [rust-lang/rust#53487][tracking].
///
/// In order for the backtrace to be meaningful, one of the two environment
/// variables `RUST_LIB_BACKTRACE=1` or `RUST_BACKTRACE=1` must be defined
/// and `RUST_LIB_BACKTRACE` must not be `0`. Backtraces are somewhat
/// expensive to capture in Rust, so we don't necessarily want to be
/// capturing them all over the place all the time.
///
/// - If you want panics and errors to both have backtraces, set
/// `RUST_BACKTRACE=1`;
/// - If you want only errors to have backtraces, set
/// `RUST_LIB_BACKTRACE=1`;
/// - If you want only panics to have backtraces, set `RUST_BACKTRACE=1` and
/// `RUST_LIB_BACKTRACE=0`.
///
/// [tracking]: https://github.com/rust-lang/rust/issues/53487
#[cfg(backtrace)]
pub fn backtrace(&self) -> &Backtrace {
self.inner.backtrace()
}
/// An iterator of the chain of source errors contained by this Error.
///
/// This iterator will visit every error in the cause chain of this error
/// object, beginning with the error that this error object was created
/// from.
///
/// # Example
///
/// ```
/// use anyhow::Error;
/// use std::io;
///
/// pub fn underlying_io_error_kind(error: &Error) -> Option<io::ErrorKind> {
/// for cause in error.chain() {
/// if let Some(io_error) = cause.downcast_ref::<io::Error>() {
/// return Some(io_error.kind());
/// }
/// }
/// None
/// }
/// ```
#[cfg(feature = "std")]
pub fn chain(&self) -> Chain {
self.inner.chain()
}
/// The lowest level cause of this error &mdash; this error's cause's
/// cause's cause etc.
///
/// The root cause is the last error in the iterator produced by
/// [`chain()`][Error::chain].
#[cfg(feature = "std")]
pub fn root_cause(&self) -> &(dyn StdError + 'static) {
let mut chain = self.chain();
let mut root_cause = chain.next().unwrap();
for cause in chain {
root_cause = cause;
}
root_cause
}
/// Returns true if `E` is the type held by this error object.
///
/// For errors with context, this method returns true if `E` matches the
/// type of the context `C` **or** the type of the error on which the
/// context has been attached. For details about the interaction between
/// context and downcasting, [see here].
///
/// [see here]: trait.Context.html#effect-on-downcasting
pub fn is<E>(&self) -> bool
where
E: Display + Debug + Send + Sync + 'static,
{
self.downcast_ref::<E>().is_some()
}
/// Attempt to downcast the error object to a concrete type.
pub fn downcast<E>(self) -> Result<E, Self>
where
E: Display + Debug + Send + Sync + 'static,
{
let target = TypeId::of::<E>();
unsafe {
// Use vtable to find NonNull<()> which points to a value of type E
// somewhere inside the data structure.
let addr = match (self.inner.vtable.object_downcast)(&self.inner, target) {
Some(addr) => addr,
None => return Err(self),
};
// Prepare to read E out of the data structure. We'll drop the rest
// of the data structure separately so that E is not dropped.
let outer = ManuallyDrop::new(self);
// Read E from where the vtable found it.
let error = ptr::read(addr.cast::<E>().as_ptr());
// Read Box<ErrorImpl<()>> from self. Can't move it out because
// Error has a Drop impl which we want to not run.
let inner = ptr::read(&outer.inner);
let erased = ManuallyDrop::into_inner(inner);
// Drop rest of the data structure outside of E.
(erased.vtable.object_drop_rest)(erased, target);
Ok(error)
}
}
/// Downcast this error object by reference.
///
/// # Example
///
/// ```
/// # use anyhow::anyhow;
/// # use std::fmt::{self, Display};
/// # use std::task::Poll;
/// #
/// # #[derive(Debug)]
/// # enum DataStoreError {
/// # Censored(()),
/// # }
/// #
/// # impl Display for DataStoreError {
/// # fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
/// # unimplemented!()
/// # }
/// # }
/// #
/// # impl std::error::Error for DataStoreError {}
/// #
/// # const REDACTED_CONTENT: () = ();
/// #
/// # let error = anyhow!("...");
/// # let root_cause = &error;
/// #
/// # let ret =
/// // If the error was caused by redaction, then return a tombstone instead
/// // of the content.
/// match root_cause.downcast_ref::<DataStoreError>() {
/// Some(DataStoreError::Censored(_)) => Ok(Poll::Ready(REDACTED_CONTENT)),
/// None => Err(error),
/// }
/// # ;
/// ```
pub fn downcast_ref<E>(&self) -> Option<&E>
where
E: Display + Debug + Send + Sync + 'static,
{
let target = TypeId::of::<E>();
unsafe {
// Use vtable to find NonNull<()> which points to a value of type E
// somewhere inside the data structure.
let addr = (self.inner.vtable.object_downcast)(&self.inner, target)?;
Some(&*addr.cast::<E>().as_ptr())
}
}
/// Downcast this error object by mutable reference.
pub fn downcast_mut<E>(&mut self) -> Option<&mut E>
where
E: Display + Debug + Send + Sync + 'static,
{
let target = TypeId::of::<E>();
unsafe {
// Use vtable to find NonNull<()> which points to a value of type E
// somewhere inside the data structure.
let addr = (self.inner.vtable.object_downcast)(&self.inner, target)?;
Some(&mut *addr.cast::<E>().as_ptr())
}
}
}
#[cfg(feature = "std")]
impl<E> From<E> for Error
where
E: StdError + Send + Sync + 'static,
{
fn from(error: E) -> Self {
let backtrace = backtrace_if_absent!(error);
Error::from_std(error, backtrace)
}
}
#[cfg(feature = "std")]
impl Deref for Error {
type Target = dyn StdError + Send + Sync + 'static;
fn deref(&self) -> &Self::Target {
self.inner.error()
}
}
#[cfg(feature = "std")]
impl DerefMut for Error {
fn deref_mut(&mut self) -> &mut Self::Target {
self.inner.error_mut()
}
}
impl Display for Error {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
self.inner.display(formatter)
}
}
impl Debug for Error {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
self.inner.debug(formatter)
}
}
impl Drop for Error {
fn drop(&mut self) {
unsafe {
// Read Box<ErrorImpl<()>> from self.
let inner = ptr::read(&self.inner);
let erased = ManuallyDrop::into_inner(inner);
// Invoke the vtable's drop behavior.
(erased.vtable.object_drop)(erased);
}
}
}
struct ErrorVTable {
object_drop: unsafe fn(Box<ErrorImpl<()>>),
object_ref: unsafe fn(&ErrorImpl<()>) -> &(dyn StdError + Send + Sync + 'static),
#[cfg(feature = "std")]
object_mut: unsafe fn(&mut ErrorImpl<()>) -> &mut (dyn StdError + Send + Sync + 'static),
object_boxed: unsafe fn(Box<ErrorImpl<()>>) -> Box<dyn StdError + Send + Sync + 'static>,
object_downcast: unsafe fn(&ErrorImpl<()>, TypeId) -> Option<NonNull<()>>,
object_drop_rest: unsafe fn(Box<ErrorImpl<()>>, TypeId),
}
// Safety: requires layout of *e to match ErrorImpl<E>.
unsafe fn object_drop<E>(e: Box<ErrorImpl<()>>) {
// Cast back to ErrorImpl<E> so that the allocator receives the correct
// Layout to deallocate the Box's memory.
let unerased = mem::transmute::<Box<ErrorImpl<()>>, Box<ErrorImpl<E>>>(e);
drop(unerased);
}
// Safety: requires layout of *e to match ErrorImpl<E>.
unsafe fn object_drop_front<E>(e: Box<ErrorImpl<()>>, target: TypeId) {
// Drop the fields of ErrorImpl other than E as well as the Box allocation,
// without dropping E itself. This is used by downcast after doing a
// ptr::read to take ownership of the E.
let _ = target;
let unerased = mem::transmute::<Box<ErrorImpl<()>>, Box<ErrorImpl<ManuallyDrop<E>>>>(e);
drop(unerased);
}
// Safety: requires layout of *e to match ErrorImpl<E>.
unsafe fn object_ref<E>(e: &ErrorImpl<()>) -> &(dyn StdError + Send + Sync + 'static)
where
E: StdError + Send + Sync + 'static,
{
// Attach E's native StdError vtable onto a pointer to self._object.
&(*(e as *const ErrorImpl<()> as *const ErrorImpl<E>))._object
}
// Safety: requires layout of *e to match ErrorImpl<E>.
#[cfg(feature = "std")]
unsafe fn object_mut<E>(e: &mut ErrorImpl<()>) -> &mut (dyn StdError + Send + Sync + 'static)
where
E: StdError + Send + Sync + 'static,
{
// Attach E's native StdError vtable onto a pointer to self._object.
&mut (*(e as *mut ErrorImpl<()> as *mut ErrorImpl<E>))._object
}
// Safety: requires layout of *e to match ErrorImpl<E>.
unsafe fn object_boxed<E>(e: Box<ErrorImpl<()>>) -> Box<dyn StdError + Send + Sync + 'static>
where
E: StdError + Send + Sync + 'static,
{
// Attach ErrorImpl<E>'s native StdError vtable. The StdError impl is below.
mem::transmute::<Box<ErrorImpl<()>>, Box<ErrorImpl<E>>>(e)
}
// Safety: requires layout of *e to match ErrorImpl<E>.
unsafe fn object_downcast<E>(e: &ErrorImpl<()>, target: TypeId) -> Option<NonNull<()>>
where
E: 'static,
{
if TypeId::of::<E>() == target {
// Caller is looking for an E pointer and e is ErrorImpl<E>, take a
// pointer to its E field.
let unerased = e as *const ErrorImpl<()> as *const ErrorImpl<E>;
let addr = &(*unerased)._object as *const E as *mut ();
Some(NonNull::new_unchecked(addr))
} else {
None
}
}
// Safety: requires layout of *e to match ErrorImpl<ContextError<C, E>>.
#[cfg(feature = "std")]
unsafe fn context_downcast<C, E>(e: &ErrorImpl<()>, target: TypeId) -> Option<NonNull<()>>
where
C: 'static,
E: 'static,
{
if TypeId::of::<C>() == target {
let unerased = e as *const ErrorImpl<()> as *const ErrorImpl<ContextError<C, E>>;
let addr = &(*unerased)._object.context as *const C as *mut ();
Some(NonNull::new_unchecked(addr))
} else if TypeId::of::<E>() == target {
let unerased = e as *const ErrorImpl<()> as *const ErrorImpl<ContextError<C, E>>;
let addr = &(*unerased)._object.error as *const E as *mut ();
Some(NonNull::new_unchecked(addr))
} else {
None
}
}
// Safety: requires layout of *e to match ErrorImpl<ContextError<C, E>>.
#[cfg(feature = "std")]
unsafe fn context_drop_rest<C, E>(e: Box<ErrorImpl<()>>, target: TypeId)
where
C: 'static,
E: 'static,
{
// Called after downcasting by value to either the C or the E and doing a
// ptr::read to take ownership of that value.
if TypeId::of::<C>() == target {
let unerased = mem::transmute::<
Box<ErrorImpl<()>>,
Box<ErrorImpl<ContextError<ManuallyDrop<C>, E>>>,
>(e);
drop(unerased);
} else {
let unerased = mem::transmute::<
Box<ErrorImpl<()>>,
Box<ErrorImpl<ContextError<C, ManuallyDrop<E>>>>,
>(e);
drop(unerased);
}
}
// Safety: requires layout of *e to match ErrorImpl<ContextError<C, Error>>.
unsafe fn context_chain_downcast<C>(e: &ErrorImpl<()>, target: TypeId) -> Option<NonNull<()>>
where
C: 'static,
{
if TypeId::of::<C>() == target {
let unerased = e as *const ErrorImpl<()> as *const ErrorImpl<ContextError<C, Error>>;
let addr = &(*unerased)._object.context as *const C as *mut ();
Some(NonNull::new_unchecked(addr))
} else {
// Recurse down the context chain per the inner error's vtable.
let unerased = e as *const ErrorImpl<()> as *const ErrorImpl<ContextError<C, Error>>;
let source = &(*unerased)._object.error;
(source.inner.vtable.object_downcast)(&source.inner, target)
}
}
// Safety: requires layout of *e to match ErrorImpl<ContextError<C, Error>>.
unsafe fn context_chain_drop_rest<C>(e: Box<ErrorImpl<()>>, target: TypeId)
where
C: 'static,
{
// Called after downcasting by value to either the C or one of the causes
// and doing a ptr::read to take ownership of that value.
if TypeId::of::<C>() == target {
let unerased = mem::transmute::<
Box<ErrorImpl<()>>,
Box<ErrorImpl<ContextError<ManuallyDrop<C>, Error>>>,
>(e);
// Drop the entire rest of the data structure rooted in the next Error.
drop(unerased);
} else {
let unerased = mem::transmute::<
Box<ErrorImpl<()>>,
Box<ErrorImpl<ContextError<C, ManuallyDrop<Error>>>>,
>(e);
// Read out a ManuallyDrop<Box<ErrorImpl<()>>> from the next error.
let inner = ptr::read(&unerased._object.error.inner);
drop(unerased);
let erased = ManuallyDrop::into_inner(inner);
// Recursively drop the next error using the same target typeid.
(erased.vtable.object_drop_rest)(erased, target);
}
}
// repr C to ensure that E remains in the final position.
#[repr(C)]
pub(crate) struct ErrorImpl<E> {
vtable: &'static ErrorVTable,
backtrace: Option<Backtrace>,
// NOTE: Don't use directly. Use only through vtable. Erased type may have
// different alignment.
_object: E,
}
// repr C to ensure that ContextError<C, E> has the same layout as
// ContextError<ManuallyDrop<C>, E> and ContextError<C, ManuallyDrop<E>>.
#[repr(C)]
pub(crate) struct ContextError<C, E> {
pub context: C,
pub error: E,
}
impl<E> ErrorImpl<E> {
fn erase(&self) -> &ErrorImpl<()> {
// Erase the concrete type of E but preserve the vtable in self.vtable
// for manipulating the resulting thin pointer. This is analogous to an
// unsize coersion.
unsafe { &*(self as *const ErrorImpl<E> as *const ErrorImpl<()>) }
}
}
impl ErrorImpl<()> {
pub(crate) fn error(&self) -> &(dyn StdError + Send + Sync + 'static) {
// Use vtable to attach E's native StdError vtable for the right
// original type E.
unsafe { &*(self.vtable.object_ref)(self) }
}
#[cfg(feature = "std")]
pub(crate) fn error_mut(&mut self) -> &mut (dyn StdError + Send + Sync + 'static) {
// Use vtable to attach E's native StdError vtable for the right
// original type E.
unsafe { &mut *(self.vtable.object_mut)(self) }
}
#[cfg(backtrace)]
pub(crate) fn backtrace(&self) -> &Backtrace {
// This unwrap can only panic if the underlying error's backtrace method
// is nondeterministic, which would only happen in maliciously
// constructed code.
self.backtrace
.as_ref()
.or_else(|| self.error().backtrace())
.expect("backtrace capture failed")
}
pub(crate) fn chain(&self) -> Chain {
Chain::new(self.error())
}
}
impl<E> StdError for ErrorImpl<E>
where
E: StdError,
{
#[cfg(backtrace)]
fn backtrace(&self) -> Option<&Backtrace> {
Some(self.erase().backtrace())
}
fn source(&self) -> Option<&(dyn StdError + 'static)> {
self.erase().error().source()
}
}
impl<E> Debug for ErrorImpl<E>
where
E: Debug,
{
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
self.erase().debug(formatter)
}
}
impl<E> Display for ErrorImpl<E>
where
E: Display,
{
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
Display::fmt(&self.erase().error(), formatter)
}
}
impl From<Error> for Box<dyn StdError + Send + Sync + 'static> {
fn from(error: Error) -> Self {
let outer = ManuallyDrop::new(error);
unsafe {
// Read Box<ErrorImpl<()>> from error. Can't move it out because
// Error has a Drop impl which we want to not run.
let inner = ptr::read(&outer.inner);
let erased = ManuallyDrop::into_inner(inner);
// Use vtable to attach ErrorImpl<E>'s native StdError vtable for
// the right original type E.
(erased.vtable.object_boxed)(erased)
}
}
}
impl From<Error> for Box<dyn StdError + 'static> {
fn from(error: Error) -> Self {
Box::<dyn StdError + Send + Sync>::from(error)
}
}
#[cfg(feature = "std")]
impl AsRef<dyn StdError + Send + Sync> for Error {
fn as_ref(&self) -> &(dyn StdError + Send + Sync + 'static) {
&**self
}
}
#[cfg(feature = "std")]
impl AsRef<dyn StdError> for Error {
fn as_ref(&self) -> &(dyn StdError + 'static) {
&**self
}
}

149
third_party/rust/anyhow/src/fmt.rs поставляемый
Просмотреть файл

@ -1,149 +0,0 @@
use crate::chain::Chain;
use crate::error::ErrorImpl;
use core::fmt::{self, Debug, Write};
impl ErrorImpl<()> {
pub(crate) fn display(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.error())?;
if f.alternate() {
for cause in self.chain().skip(1) {
write!(f, ": {}", cause)?;
}
}
Ok(())
}
pub(crate) fn debug(&self, f: &mut fmt::Formatter) -> fmt::Result {
let error = self.error();
if f.alternate() {
return Debug::fmt(error, f);
}
write!(f, "{}", error)?;
if let Some(cause) = error.source() {
write!(f, "\n\nCaused by:")?;
let multiple = cause.source().is_some();
for (n, error) in Chain::new(cause).enumerate() {
writeln!(f)?;
let mut indented = Indented {
inner: f,
number: if multiple { Some(n) } else { None },
started: false,
};
write!(indented, "{}", error)?;
}
}
#[cfg(backtrace)]
{
use std::backtrace::BacktraceStatus;
let backtrace = self.backtrace();
if let BacktraceStatus::Captured = backtrace.status() {
let mut backtrace = backtrace.to_string();
if backtrace.starts_with("stack backtrace:") {
// Capitalize to match "Caused by:"
backtrace.replace_range(0..1, "S");
}
backtrace.truncate(backtrace.trim_end().len());
write!(f, "\n\n{}", backtrace)?;
}
}
Ok(())
}
}
struct Indented<'a, D> {
inner: &'a mut D,
number: Option<usize>,
started: bool,
}
impl<T> Write for Indented<'_, T>
where
T: Write,
{
fn write_str(&mut self, s: &str) -> fmt::Result {
for (i, line) in s.split('\n').enumerate() {
if !self.started {
self.started = true;
match self.number {
Some(number) => write!(self.inner, "{: >5}: ", number)?,
None => self.inner.write_str(" ")?,
}
} else if i > 0 {
self.inner.write_char('\n')?;
if self.number.is_some() {
self.inner.write_str(" ")?;
} else {
self.inner.write_str(" ")?;
}
}
self.inner.write_str(line)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn one_digit() {
let input = "verify\nthis";
let expected = " 2: verify\n this";
let mut output = String::new();
Indented {
inner: &mut output,
number: Some(2),
started: false,
}
.write_str(input)
.unwrap();
assert_eq!(expected, output);
}
#[test]
fn two_digits() {
let input = "verify\nthis";
let expected = " 12: verify\n this";
let mut output = String::new();
Indented {
inner: &mut output,
number: Some(12),
started: false,
}
.write_str(input)
.unwrap();
assert_eq!(expected, output);
}
#[test]
fn no_digits() {
let input = "verify\nthis";
let expected = " verify\n this";
let mut output = String::new();
Indented {
inner: &mut output,
number: None,
started: false,
}
.write_str(input)
.unwrap();
assert_eq!(expected, output);
}
}

116
third_party/rust/anyhow/src/kind.rs поставляемый
Просмотреть файл

@ -1,116 +0,0 @@
// Tagged dispatch mechanism for resolving the behavior of `anyhow!($expr)`.
//
// When anyhow! is given a single expr argument to turn into anyhow::Error, we
// want the resulting Error to pick up the input's implementation of source()
// and backtrace() if it has a std::error::Error impl, otherwise require nothing
// more than Display and Debug.
//
// Expressed in terms of specialization, we want something like:
//
// trait AnyhowNew {
// fn new(self) -> Error;
// }
//
// impl<T> AnyhowNew for T
// where
// T: Display + Debug + Send + Sync + 'static,
// {
// default fn new(self) -> Error {
// /* no std error impl */
// }
// }
//
// impl<T> AnyhowNew for T
// where
// T: std::error::Error + Send + Sync + 'static,
// {
// fn new(self) -> Error {
// /* use std error's source() and backtrace() */
// }
// }
//
// Since specialization is not stable yet, instead we rely on autoref behavior
// of method resolution to perform tagged dispatch. Here we have two traits
// AdhocKind and TraitKind that both have an anyhow_kind() method. AdhocKind is
// implemented whether or not the caller's type has a std error impl, while
// TraitKind is implemented only when a std error impl does exist. The ambiguity
// is resolved by AdhocKind requiring an extra autoref so that it has lower
// precedence.
//
// The anyhow! macro will set up the call in this form:
//
// #[allow(unused_imports)]
// use $crate::private::{AdhocKind, TraitKind};
// let error = $msg;
// (&error).anyhow_kind().new(error)
use crate::Error;
use core::fmt::{Debug, Display};
#[cfg(feature = "std")]
use crate::StdError;
#[cfg(backtrace)]
use std::backtrace::Backtrace;
pub struct Adhoc;
pub trait AdhocKind: Sized {
#[inline]
fn anyhow_kind(&self) -> Adhoc {
Adhoc
}
}
impl<T> AdhocKind for &T where T: ?Sized + Display + Debug + Send + Sync + 'static {}
impl Adhoc {
pub fn new<M>(self, message: M) -> Error
where
M: Display + Debug + Send + Sync + 'static,
{
Error::from_adhoc(message, backtrace!())
}
}
pub struct Trait;
pub trait TraitKind: Sized {
#[inline]
fn anyhow_kind(&self) -> Trait {
Trait
}
}
impl<E> TraitKind for E where E: Into<Error> {}
impl Trait {
pub fn new<E>(self, error: E) -> Error
where
E: Into<Error>,
{
error.into()
}
}
#[cfg(feature = "std")]
pub struct Boxed;
#[cfg(feature = "std")]
pub trait BoxedKind: Sized {
#[inline]
fn anyhow_kind(&self) -> Boxed {
Boxed
}
}
#[cfg(feature = "std")]
impl BoxedKind for Box<dyn StdError + Send + Sync> {}
#[cfg(feature = "std")]
impl Boxed {
pub fn new(self, error: Box<dyn StdError + Send + Sync>) -> Error {
let backtrace = backtrace_if_absent!(error);
Error::from_boxed(error, backtrace)
}
}

595
third_party/rust/anyhow/src/lib.rs поставляемый
Просмотреть файл

@ -1,595 +0,0 @@
//! This library provides [`anyhow::Error`][Error], a trait object based error
//! type for easy idiomatic error handling in Rust applications.
//!
//! <br>
//!
//! # Details
//!
//! - Use `Result<T, anyhow::Error>`, or equivalently `anyhow::Result<T>`, as
//! the return type of any fallible function.
//!
//! Within the function, use `?` to easily propagate any error that implements
//! the `std::error::Error` trait.
//!
//! ```
//! # pub trait Deserialize {}
//! #
//! # mod serde_json {
//! # use super::Deserialize;
//! # use std::io;
//! #
//! # pub fn from_str<T: Deserialize>(json: &str) -> io::Result<T> {
//! # unimplemented!()
//! # }
//! # }
//! #
//! # struct ClusterMap;
//! #
//! # impl Deserialize for ClusterMap {}
//! #
//! use anyhow::Result;
//!
//! fn get_cluster_info() -> Result<ClusterMap> {
//! let config = std::fs::read_to_string("cluster.json")?;
//! let map: ClusterMap = serde_json::from_str(&config)?;
//! Ok(map)
//! }
//! #
//! # fn main() {}
//! ```
//!
//! - Attach context to help the person troubleshooting the error understand
//! where things went wrong. A low-level error like "No such file or
//! directory" can be annoying to debug without more context about what higher
//! level step the application was in the middle of.
//!
//! ```
//! # struct It;
//! #
//! # impl It {
//! # fn detach(&self) -> Result<()> {
//! # unimplemented!()
//! # }
//! # }
//! #
//! use anyhow::{Context, Result};
//!
//! fn main() -> Result<()> {
//! # return Ok(());
//! #
//! # const _: &str = stringify! {
//! ...
//! # };
//! #
//! # let it = It;
//! # let path = "./path/to/instrs.json";
//! #
//! it.detach().context("Failed to detach the important thing")?;
//!
//! let content = std::fs::read(path)
//! .with_context(|| format!("Failed to read instrs from {}", path))?;
//! #
//! # const _: &str = stringify! {
//! ...
//! # };
//! #
//! # Ok(())
//! }
//! ```
//!
//! ```console
//! Error: Failed to read instrs from ./path/to/instrs.json
//!
//! Caused by:
//! No such file or directory (os error 2)
//! ```
//!
//! - Downcasting is supported and can be by value, by shared reference, or by
//! mutable reference as needed.
//!
//! ```
//! # use anyhow::anyhow;
//! # use std::fmt::{self, Display};
//! # use std::task::Poll;
//! #
//! # #[derive(Debug)]
//! # enum DataStoreError {
//! # Censored(()),
//! # }
//! #
//! # impl Display for DataStoreError {
//! # fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
//! # unimplemented!()
//! # }
//! # }
//! #
//! # impl std::error::Error for DataStoreError {}
//! #
//! # const REDACTED_CONTENT: () = ();
//! #
//! # let error = anyhow!("...");
//! # let root_cause = &error;
//! #
//! # let ret =
//! // If the error was caused by redaction, then return a
//! // tombstone instead of the content.
//! match root_cause.downcast_ref::<DataStoreError>() {
//! Some(DataStoreError::Censored(_)) => Ok(Poll::Ready(REDACTED_CONTENT)),
//! None => Err(error),
//! }
//! # ;
//! ```
//!
//! - If using the nightly channel, a backtrace is captured and printed with the
//! error if the underlying error type does not already provide its own. In
//! order to see backtraces, they must be enabled through the environment
//! variables described in [`std::backtrace`]:
//!
//! - If you want panics and errors to both have backtraces, set
//! `RUST_BACKTRACE=1`;
//! - If you want only errors to have backtraces, set `RUST_LIB_BACKTRACE=1`;
//! - If you want only panics to have backtraces, set `RUST_BACKTRACE=1` and
//! `RUST_LIB_BACKTRACE=0`.
//!
//! The tracking issue for this feature is [rust-lang/rust#53487].
//!
//! [`std::backtrace`]: https://doc.rust-lang.org/std/backtrace/index.html#environment-variables
//! [rust-lang/rust#53487]: https://github.com/rust-lang/rust/issues/53487
//!
//! - Anyhow works with any error type that has an impl of `std::error::Error`,
//! including ones defined in your crate. We do not bundle a `derive(Error)`
//! macro but you can write the impls yourself or use a standalone macro like
//! [thiserror].
//!
//! [thiserror]: https://github.com/dtolnay/thiserror
//!
//! ```
//! use thiserror::Error;
//!
//! #[derive(Error, Debug)]
//! pub enum FormatError {
//! #[error("Invalid header (expected {expected:?}, got {found:?})")]
//! InvalidHeader {
//! expected: String,
//! found: String,
//! },
//! #[error("Missing attribute: {0}")]
//! MissingAttribute(String),
//! }
//! ```
//!
//! - One-off error messages can be constructed using the `anyhow!` macro, which
//! supports string interpolation and produces an `anyhow::Error`.
//!
//! ```
//! # use anyhow::{anyhow, Result};
//! #
//! # fn demo() -> Result<()> {
//! # let missing = "...";
//! return Err(anyhow!("Missing attribute: {}", missing));
//! # Ok(())
//! # }
//! ```
//!
//! <br>
//!
//! # No-std support
//!
//! In no_std mode, the same API is almost all available and works the same way.
//! To depend on Anyhow in no_std mode, disable our default enabled "std"
//! feature in Cargo.toml. A global allocator is required.
//!
//! ```toml
//! [dependencies]
//! anyhow = { version = "1.0", default-features = false }
//! ```
//!
//! Since the `?`-based error conversions would normally rely on the
//! `std::error::Error` trait which is only available through std, no_std mode
//! will require an explicit `.map_err(Error::msg)` when working with a
//! non-Anyhow error type inside a function that returns Anyhow's error type.
#![doc(html_root_url = "https://docs.rs/anyhow/1.0.28")]
#![cfg_attr(backtrace, feature(backtrace))]
#![cfg_attr(doc_cfg, feature(doc_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
#![allow(
clippy::needless_doctest_main,
clippy::new_ret_no_self,
clippy::wrong_self_convention
)]
mod alloc {
#[cfg(not(feature = "std"))]
extern crate alloc;
#[cfg(not(feature = "std"))]
pub use alloc::boxed::Box;
#[cfg(feature = "std")]
pub use std::boxed::Box;
}
#[macro_use]
mod backtrace;
mod chain;
mod context;
mod error;
mod fmt;
mod kind;
mod macros;
mod wrapper;
use crate::alloc::Box;
use crate::error::ErrorImpl;
use core::fmt::Display;
use core::mem::ManuallyDrop;
#[cfg(not(feature = "std"))]
use core::fmt::Debug;
#[cfg(feature = "std")]
use std::error::Error as StdError;
#[cfg(not(feature = "std"))]
trait StdError: Debug + Display {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
None
}
}
pub use anyhow as format_err;
/// The `Error` type, a wrapper around a dynamic error type.
///
/// `Error` works a lot like `Box<dyn std::error::Error>`, but with these
/// differences:
///
/// - `Error` requires that the error is `Send`, `Sync`, and `'static`.
/// - `Error` guarantees that a backtrace is available, even if the underlying
/// error type does not provide one.
/// - `Error` is represented as a narrow pointer &mdash; exactly one word in
/// size instead of two.
///
/// <br>
///
/// # Display representations
///
/// When you print an error object using "{}" or to_string(), only the outermost
/// underlying error or context is printed, not any of the lower level causes.
/// This is exactly as if you had called the Display impl of the error from
/// which you constructed your anyhow::Error.
///
/// ```console
/// Failed to read instrs from ./path/to/instrs.json
/// ```
///
/// To print causes as well using anyhow's default formatting of causes, use the
/// alternate selector "{:#}".
///
/// ```console
/// Failed to read instrs from ./path/to/instrs.json: No such file or directory (os error 2)
/// ```
///
/// The Debug format "{:?}" includes your backtrace if one was captured. Note
/// that this is the representation you get by default if you return an error
/// from `fn main` instead of printing it explicitly yourself.
///
/// ```console
/// Error: Failed to read instrs from ./path/to/instrs.json
///
/// Caused by:
/// No such file or directory (os error 2)
///
/// Stack backtrace:
/// 0: <E as anyhow::context::ext::StdError>::ext_context
/// at /git/anyhow/src/backtrace.rs:26
/// 1: core::result::Result<T,E>::map_err
/// at /git/rustc/src/libcore/result.rs:596
/// 2: anyhow::context::<impl anyhow::Context<T,E> for core::result::Result<T,E>>::with_context
/// at /git/anyhow/src/context.rs:58
/// 3: testing::main
/// at src/main.rs:5
/// 4: std::rt::lang_start
/// at /git/rustc/src/libstd/rt.rs:61
/// 5: main
/// 6: __libc_start_main
/// 7: _start
/// ```
///
/// To see a conventional struct-style Debug representation, use "{:#?}".
///
/// ```console
/// Error {
/// context: "Failed to read instrs from ./path/to/instrs.json",
/// source: Os {
/// code: 2,
/// kind: NotFound,
/// message: "No such file or directory",
/// },
/// }
/// ```
///
/// If none of the built-in representations are appropriate and you would prefer
/// to render the error and its cause chain yourself, it can be done something
/// like this:
///
/// ```
/// use anyhow::{Context, Result};
///
/// fn main() {
/// if let Err(err) = try_main() {
/// eprintln!("ERROR: {}", err);
/// err.chain().skip(1).for_each(|cause| eprintln!("because: {}", cause));
/// std::process::exit(1);
/// }
/// }
///
/// fn try_main() -> Result<()> {
/// # const IGNORE: &str = stringify! {
/// ...
/// # };
/// # Ok(())
/// }
/// ```
pub struct Error {
inner: ManuallyDrop<Box<ErrorImpl<()>>>,
}
/// Iterator of a chain of source errors.
///
/// This type is the iterator returned by [`Error::chain`].
///
/// # Example
///
/// ```
/// use anyhow::Error;
/// use std::io;
///
/// pub fn underlying_io_error_kind(error: &Error) -> Option<io::ErrorKind> {
/// for cause in error.chain() {
/// if let Some(io_error) = cause.downcast_ref::<io::Error>() {
/// return Some(io_error.kind());
/// }
/// }
/// None
/// }
/// ```
#[cfg(feature = "std")]
#[derive(Clone)]
pub struct Chain<'a> {
state: crate::chain::ChainState<'a>,
}
/// `Result<T, Error>`
///
/// This is a reasonable return type to use throughout your application but also
/// for `fn main`; if you do, failures will be printed along with any
/// [context][Context] and a backtrace if one was captured.
///
/// `anyhow::Result` may be used with one *or* two type parameters.
///
/// ```rust
/// use anyhow::Result;
///
/// # const IGNORE: &str = stringify! {
/// fn demo1() -> Result<T> {...}
/// // ^ equivalent to std::result::Result<T, anyhow::Error>
///
/// fn demo2() -> Result<T, OtherError> {...}
/// // ^ equivalent to std::result::Result<T, OtherError>
/// # };
/// ```
///
/// # Example
///
/// ```
/// # pub trait Deserialize {}
/// #
/// # mod serde_json {
/// # use super::Deserialize;
/// # use std::io;
/// #
/// # pub fn from_str<T: Deserialize>(json: &str) -> io::Result<T> {
/// # unimplemented!()
/// # }
/// # }
/// #
/// # #[derive(Debug)]
/// # struct ClusterMap;
/// #
/// # impl Deserialize for ClusterMap {}
/// #
/// use anyhow::Result;
///
/// fn main() -> Result<()> {
/// # return Ok(());
/// let config = std::fs::read_to_string("cluster.json")?;
/// let map: ClusterMap = serde_json::from_str(&config)?;
/// println!("cluster info: {:#?}", map);
/// Ok(())
/// }
/// ```
pub type Result<T, E = Error> = core::result::Result<T, E>;
/// Provides the `context` method for `Result`.
///
/// This trait is sealed and cannot be implemented for types outside of
/// `anyhow`.
///
/// <br>
///
/// # Example
///
/// ```
/// use anyhow::{Context, Result};
/// use std::fs;
/// use std::path::PathBuf;
///
/// pub struct ImportantThing {
/// path: PathBuf,
/// }
///
/// impl ImportantThing {
/// # const IGNORE: &'static str = stringify! {
/// pub fn detach(&mut self) -> Result<()> {...}
/// # };
/// # fn detach(&mut self) -> Result<()> {
/// # unimplemented!()
/// # }
/// }
///
/// pub fn do_it(mut it: ImportantThing) -> Result<Vec<u8>> {
/// it.detach().context("Failed to detach the important thing")?;
///
/// let path = &it.path;
/// let content = fs::read(path)
/// .with_context(|| format!("Failed to read instrs from {}", path.display()))?;
///
/// Ok(content)
/// }
/// ```
///
/// When printed, the outermost context would be printed first and the lower
/// level underlying causes would be enumerated below.
///
/// ```console
/// Error: Failed to read instrs from ./path/to/instrs.json
///
/// Caused by:
/// No such file or directory (os error 2)
/// ```
///
/// <br>
///
/// # Effect on downcasting
///
/// After attaching context of type `C` onto an error of type `E`, the resulting
/// `anyhow::Error` may be downcast to `C` **or** to `E`.
///
/// That is, in codebases that rely on downcasting, Anyhow's context supports
/// both of the following use cases:
///
/// - **Attaching context whose type is insignificant onto errors whose type
/// is used in downcasts.**
///
/// In other error libraries whose context is not designed this way, it can
/// be risky to introduce context to existing code because new context might
/// break existing working downcasts. In Anyhow, any downcast that worked
/// before adding context will continue to work after you add a context, so
/// you should freely add human-readable context to errors wherever it would
/// be helpful.
///
/// ```
/// # use anyhow::bail;
/// # use thiserror::Error;
/// #
/// # #[derive(Error, Debug)]
/// # #[error("???")]
/// # struct SuspiciousError;
/// #
/// # fn helper() -> Result<()> {
/// # bail!(SuspiciousError);
/// # }
/// #
/// use anyhow::{Context, Result};
///
/// fn do_it() -> Result<()> {
/// helper().context("Failed to complete the work")?;
/// # const IGNORE: &str = stringify! {
/// ...
/// # };
/// # unreachable!()
/// }
///
/// fn main() {
/// let err = do_it().unwrap_err();
/// if let Some(e) = err.downcast_ref::<SuspiciousError>() {
/// // If helper() returned SuspiciousError, this downcast will
/// // correctly succeed even with the context in between.
/// # return;
/// }
/// # panic!("expected downcast to succeed");
/// }
/// ```
///
/// - **Attaching context whose type is used in downcasts onto errors whose
/// type is insignificant.**
///
/// Some codebases prefer to use machine-readable context to categorize
/// lower level errors in a way that will be actionable to higher levels of
/// the application.
///
/// ```
/// # use anyhow::bail;
/// # use thiserror::Error;
/// #
/// # #[derive(Error, Debug)]
/// # #[error("???")]
/// # struct HelperFailed;
/// #
/// # fn helper() -> Result<()> {
/// # bail!("no such file or directory");
/// # }
/// #
/// use anyhow::{Context, Result};
///
/// fn do_it() -> Result<()> {
/// helper().context(HelperFailed)?;
/// # const IGNORE: &str = stringify! {
/// ...
/// # };
/// # unreachable!()
/// }
///
/// fn main() {
/// let err = do_it().unwrap_err();
/// if let Some(e) = err.downcast_ref::<HelperFailed>() {
/// // If helper failed, this downcast will succeed because
/// // HelperFailed is the context that has been attached to
/// // that error.
/// # return;
/// }
/// # panic!("expected downcast to succeed");
/// }
/// ```
pub trait Context<T, E>: context::private::Sealed {
/// Wrap the error value with additional context.
fn context<C>(self, context: C) -> Result<T, Error>
where
C: Display + Send + Sync + 'static;
/// Wrap the error value with additional context that is evaluated lazily
/// only once an error does occur.
fn with_context<C, F>(self, f: F) -> Result<T, Error>
where
C: Display + Send + Sync + 'static,
F: FnOnce() -> C;
}
// Not public API. Referenced by macro-generated code.
#[doc(hidden)]
pub mod private {
use crate::Error;
use core::fmt::{Debug, Display};
#[cfg(backtrace)]
use std::backtrace::Backtrace;
pub use core::result::Result::Err;
#[doc(hidden)]
pub mod kind {
pub use crate::kind::{AdhocKind, TraitKind};
#[cfg(feature = "std")]
pub use crate::kind::BoxedKind;
}
pub fn new_adhoc<M>(message: M) -> Error
where
M: Display + Debug + Send + Sync + 'static,
{
Error::from_adhoc(message, backtrace!())
}
}

163
third_party/rust/anyhow/src/macros.rs поставляемый
Просмотреть файл

@ -1,163 +0,0 @@
/// Return early with an error.
///
/// This macro is equivalent to `return Err(From::from($err))`.
///
/// # Example
///
/// ```
/// # use anyhow::{bail, Result};
/// #
/// # fn has_permission(user: usize, resource: usize) -> bool {
/// # true
/// # }
/// #
/// # fn main() -> Result<()> {
/// # let user = 0;
/// # let resource = 0;
/// #
/// if !has_permission(user, resource) {
/// bail!("permission denied for accessing {}", resource);
/// }
/// # Ok(())
/// # }
/// ```
///
/// ```
/// # use anyhow::{bail, Result};
/// # use thiserror::Error;
/// #
/// # const MAX_DEPTH: usize = 1;
/// #
/// #[derive(Error, Debug)]
/// enum ScienceError {
/// #[error("recursion limit exceeded")]
/// RecursionLimitExceeded,
/// # #[error("...")]
/// # More = (stringify! {
/// ...
/// # }, 1).1,
/// }
///
/// # fn main() -> Result<()> {
/// # let depth = 0;
/// #
/// if depth > MAX_DEPTH {
/// bail!(ScienceError::RecursionLimitExceeded);
/// }
/// # Ok(())
/// # }
/// ```
#[macro_export]
macro_rules! bail {
($msg:literal $(,)?) => {
return $crate::private::Err($crate::anyhow!($msg));
};
($err:expr $(,)?) => {
return $crate::private::Err($crate::anyhow!($err));
};
($fmt:expr, $($arg:tt)*) => {
return $crate::private::Err($crate::anyhow!($fmt, $($arg)*));
};
}
/// Return early with an error if a condition is not satisfied.
///
/// This macro is equivalent to `if !$cond { return Err(From::from($err)); }`.
///
/// Analogously to `assert!`, `ensure!` takes a condition and exits the function
/// if the condition fails. Unlike `assert!`, `ensure!` returns an `Error`
/// rather than panicking.
///
/// # Example
///
/// ```
/// # use anyhow::{ensure, Result};
/// #
/// # fn main() -> Result<()> {
/// # let user = 0;
/// #
/// ensure!(user == 0, "only user 0 is allowed");
/// # Ok(())
/// # }
/// ```
///
/// ```
/// # use anyhow::{ensure, Result};
/// # use thiserror::Error;
/// #
/// # const MAX_DEPTH: usize = 1;
/// #
/// #[derive(Error, Debug)]
/// enum ScienceError {
/// #[error("recursion limit exceeded")]
/// RecursionLimitExceeded,
/// # #[error("...")]
/// # More = (stringify! {
/// ...
/// # }, 1).1,
/// }
///
/// # fn main() -> Result<()> {
/// # let depth = 0;
/// #
/// ensure!(depth <= MAX_DEPTH, ScienceError::RecursionLimitExceeded);
/// # Ok(())
/// # }
/// ```
#[macro_export]
macro_rules! ensure {
($cond:expr, $msg:literal $(,)?) => {
if !$cond {
return $crate::private::Err($crate::anyhow!($msg));
}
};
($cond:expr, $err:expr $(,)?) => {
if !$cond {
return $crate::private::Err($crate::anyhow!($err));
}
};
($cond:expr, $fmt:expr, $($arg:tt)*) => {
if !$cond {
return $crate::private::Err($crate::anyhow!($fmt, $($arg)*));
}
};
}
/// Construct an ad-hoc error from a string.
///
/// This evaluates to an `Error`. It can take either just a string, or a format
/// string with arguments. It also can take any custom type which implements
/// `Debug` and `Display`.
///
/// # Example
///
/// ```
/// # type V = ();
/// #
/// use anyhow::{anyhow, Result};
///
/// fn lookup(key: &str) -> Result<V> {
/// if key.len() != 16 {
/// return Err(anyhow!("key length must be 16 characters, got {:?}", key));
/// }
///
/// // ...
/// # Ok(())
/// }
/// ```
#[macro_export]
macro_rules! anyhow {
($msg:literal $(,)?) => {
// Handle $:literal as a special case to make cargo-expanded code more
// concise in the common case.
$crate::private::new_adhoc($msg)
};
($err:expr $(,)?) => ({
use $crate::private::kind::*;
let error = $err;
(&error).anyhow_kind().new(error)
});
($fmt:expr, $($arg:tt)*) => {
$crate::private::new_adhoc(format!($fmt, $($arg)*))
};
}

78
third_party/rust/anyhow/src/wrapper.rs поставляемый
Просмотреть файл

@ -1,78 +0,0 @@
use crate::StdError;
use core::fmt::{self, Debug, Display};
#[repr(transparent)]
pub struct MessageError<M>(pub M);
impl<M> Debug for MessageError<M>
where
M: Display + Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Debug::fmt(&self.0, f)
}
}
impl<M> Display for MessageError<M>
where
M: Display + Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Display::fmt(&self.0, f)
}
}
impl<M> StdError for MessageError<M> where M: Display + Debug + 'static {}
#[repr(transparent)]
pub struct DisplayError<M>(pub M);
impl<M> Debug for DisplayError<M>
where
M: Display,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Display::fmt(&self.0, f)
}
}
impl<M> Display for DisplayError<M>
where
M: Display,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Display::fmt(&self.0, f)
}
}
impl<M> StdError for DisplayError<M> where M: Display + 'static {}
#[cfg(feature = "std")]
#[repr(transparent)]
pub struct BoxedError(pub Box<dyn StdError + Send + Sync>);
#[cfg(feature = "std")]
impl Debug for BoxedError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Debug::fmt(&self.0, f)
}
}
#[cfg(feature = "std")]
impl Display for BoxedError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Display::fmt(&self.0, f)
}
}
#[cfg(feature = "std")]
impl StdError for BoxedError {
#[cfg(backtrace)]
fn backtrace(&self) -> Option<&crate::backtrace::Backtrace> {
self.0.backtrace()
}
fn source(&self) -> Option<&(dyn StdError + 'static)> {
self.0.source()
}
}

14
third_party/rust/anyhow/tests/common/mod.rs поставляемый
Просмотреть файл

@ -1,14 +0,0 @@
use anyhow::{bail, Result};
use std::io;
pub fn bail_literal() -> Result<()> {
bail!("oh no!");
}
pub fn bail_fmt() -> Result<()> {
bail!("{} {}!", "oh", "no");
}
pub fn bail_error() -> Result<()> {
bail!(io::Error::new(io::ErrorKind::Other, "oh no!"));
}

Просмотреть файл

@ -1,6 +0,0 @@
#[rustversion::attr(not(nightly), ignore)]
#[test]
fn ui() {
let t = trybuild::TestCases::new();
t.compile_fail("tests/ui/*.rs");
}

52
third_party/rust/anyhow/tests/drop/mod.rs поставляемый
Просмотреть файл

@ -1,52 +0,0 @@
use std::error::Error as StdError;
use std::fmt::{self, Display};
use std::sync::atomic::AtomicBool;
use std::sync::atomic::Ordering::SeqCst;
use std::sync::Arc;
#[derive(Debug)]
pub struct Flag {
atomic: Arc<AtomicBool>,
}
impl Flag {
pub fn new() -> Self {
Flag {
atomic: Arc::new(AtomicBool::new(false)),
}
}
pub fn get(&self) -> bool {
self.atomic.load(SeqCst)
}
}
#[derive(Debug)]
pub struct DetectDrop {
has_dropped: Flag,
}
impl DetectDrop {
pub fn new(has_dropped: &Flag) -> Self {
DetectDrop {
has_dropped: Flag {
atomic: Arc::clone(&has_dropped.atomic),
},
}
}
}
impl StdError for DetectDrop {}
impl Display for DetectDrop {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "oh no!")
}
}
impl Drop for DetectDrop {
fn drop(&mut self) {
let already_dropped = self.has_dropped.atomic.swap(true, SeqCst);
assert!(!already_dropped);
}
}

Просмотреть файл

@ -1,13 +0,0 @@
use anyhow::Error;
#[test]
fn test_send() {
fn assert_send<T: Send>() {}
assert_send::<Error>();
}
#[test]
fn test_sync() {
fn assert_sync<T: Sync>() {}
assert_sync::<Error>();
}

Просмотреть файл

@ -1,13 +0,0 @@
#[rustversion::not(nightly)]
#[ignore]
#[test]
fn test_backtrace() {}
#[rustversion::nightly]
#[test]
fn test_backtrace() {
use anyhow::anyhow;
let error = anyhow!("oh no!");
let _ = error.backtrace();
}

40
third_party/rust/anyhow/tests/test_boxed.rs поставляемый
Просмотреть файл

@ -1,40 +0,0 @@
use anyhow::anyhow;
use std::error::Error as StdError;
use std::io;
use thiserror::Error;
#[derive(Error, Debug)]
#[error("outer")]
struct MyError {
source: io::Error,
}
#[test]
fn test_boxed_str() {
let error = Box::<dyn StdError + Send + Sync>::from("oh no!");
let error = anyhow!(error);
assert_eq!("oh no!", error.to_string());
assert_eq!(
"oh no!",
error
.downcast_ref::<Box<dyn StdError + Send + Sync>>()
.unwrap()
.to_string()
);
}
#[test]
fn test_boxed_thiserror() {
let error = MyError {
source: io::Error::new(io::ErrorKind::Other, "oh no!"),
};
let error = anyhow!(error);
assert_eq!("oh no!", error.source().unwrap().to_string());
}
#[test]
fn test_boxed_anyhow() {
let error = anyhow!("oh no!").context("it failed");
let error = anyhow!(error);
assert_eq!("oh no!", error.source().unwrap().to_string());
}

45
third_party/rust/anyhow/tests/test_chain.rs поставляемый
Просмотреть файл

@ -1,45 +0,0 @@
use anyhow::{anyhow, Error};
fn error() -> Error {
anyhow!(0).context(1).context(2).context(3)
}
#[test]
fn test_iter() {
let e = error();
let mut chain = e.chain();
assert_eq!("3", chain.next().unwrap().to_string());
assert_eq!("2", chain.next().unwrap().to_string());
assert_eq!("1", chain.next().unwrap().to_string());
assert_eq!("0", chain.next().unwrap().to_string());
assert!(chain.next().is_none());
assert!(chain.next_back().is_none());
}
#[test]
fn test_rev() {
let e = error();
let mut chain = e.chain().rev();
assert_eq!("0", chain.next().unwrap().to_string());
assert_eq!("1", chain.next().unwrap().to_string());
assert_eq!("2", chain.next().unwrap().to_string());
assert_eq!("3", chain.next().unwrap().to_string());
assert!(chain.next().is_none());
assert!(chain.next_back().is_none());
}
#[test]
fn test_len() {
let e = error();
let mut chain = e.chain();
assert_eq!(4, chain.len());
assert_eq!("3", chain.next().unwrap().to_string());
assert_eq!(3, chain.len());
assert_eq!("0", chain.next_back().unwrap().to_string());
assert_eq!(2, chain.len());
assert_eq!("2", chain.next().unwrap().to_string());
assert_eq!(1, chain.len());
assert_eq!("1", chain.next_back().unwrap().to_string());
assert_eq!(0, chain.len());
assert!(chain.next().is_none());
}

159
third_party/rust/anyhow/tests/test_context.rs поставляемый
Просмотреть файл

@ -1,159 +0,0 @@
mod drop;
use crate::drop::{DetectDrop, Flag};
use anyhow::{Context, Error, Result};
use std::fmt::{self, Display};
use thiserror::Error;
// https://github.com/dtolnay/anyhow/issues/18
#[test]
fn test_inference() -> Result<()> {
let x = "1";
let y: u32 = x.parse().context("...")?;
assert_eq!(y, 1);
Ok(())
}
macro_rules! context_type {
($name:ident) => {
#[derive(Debug)]
struct $name {
message: &'static str,
drop: DetectDrop,
}
impl Display for $name {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(self.message)
}
}
};
}
context_type!(HighLevel);
context_type!(MidLevel);
#[derive(Error, Debug)]
#[error("{message}")]
struct LowLevel {
message: &'static str,
drop: DetectDrop,
}
struct Dropped {
low: Flag,
mid: Flag,
high: Flag,
}
impl Dropped {
fn none(&self) -> bool {
!self.low.get() && !self.mid.get() && !self.high.get()
}
fn all(&self) -> bool {
self.low.get() && self.mid.get() && self.high.get()
}
}
fn make_chain() -> (Error, Dropped) {
let dropped = Dropped {
low: Flag::new(),
mid: Flag::new(),
high: Flag::new(),
};
let low = LowLevel {
message: "no such file or directory",
drop: DetectDrop::new(&dropped.low),
};
// impl Context for Result<T, E>
let mid = Err::<(), LowLevel>(low)
.context(MidLevel {
message: "failed to load config",
drop: DetectDrop::new(&dropped.mid),
})
.unwrap_err();
// impl Context for Result<T, Error>
let high = Err::<(), Error>(mid)
.context(HighLevel {
message: "failed to start server",
drop: DetectDrop::new(&dropped.high),
})
.unwrap_err();
(high, dropped)
}
#[test]
fn test_downcast_ref() {
let (err, dropped) = make_chain();
assert!(!err.is::<String>());
assert!(err.downcast_ref::<String>().is_none());
assert!(err.is::<HighLevel>());
let high = err.downcast_ref::<HighLevel>().unwrap();
assert_eq!(high.to_string(), "failed to start server");
assert!(err.is::<MidLevel>());
let mid = err.downcast_ref::<MidLevel>().unwrap();
assert_eq!(mid.to_string(), "failed to load config");
assert!(err.is::<LowLevel>());
let low = err.downcast_ref::<LowLevel>().unwrap();
assert_eq!(low.to_string(), "no such file or directory");
assert!(dropped.none());
drop(err);
assert!(dropped.all());
}
#[test]
fn test_downcast_high() {
let (err, dropped) = make_chain();
let err = err.downcast::<HighLevel>().unwrap();
assert!(!dropped.high.get());
assert!(dropped.low.get() && dropped.mid.get());
drop(err);
assert!(dropped.all());
}
#[test]
fn test_downcast_mid() {
let (err, dropped) = make_chain();
let err = err.downcast::<MidLevel>().unwrap();
assert!(!dropped.mid.get());
assert!(dropped.low.get() && dropped.high.get());
drop(err);
assert!(dropped.all());
}
#[test]
fn test_downcast_low() {
let (err, dropped) = make_chain();
let err = err.downcast::<LowLevel>().unwrap();
assert!(!dropped.low.get());
assert!(dropped.mid.get() && dropped.high.get());
drop(err);
assert!(dropped.all());
}
#[test]
fn test_unsuccessful_downcast() {
let (err, dropped) = make_chain();
let err = err.downcast::<String>().unwrap_err();
assert!(dropped.none());
drop(err);
assert!(dropped.all());
}

24
third_party/rust/anyhow/tests/test_convert.rs поставляемый
Просмотреть файл

@ -1,24 +0,0 @@
mod drop;
use self::drop::{DetectDrop, Flag};
use anyhow::{Error, Result};
use std::error::Error as StdError;
#[test]
fn test_convert() {
let has_dropped = Flag::new();
let error = Error::new(DetectDrop::new(&has_dropped));
let box_dyn = Box::<dyn StdError + Send + Sync>::from(error);
assert_eq!("oh no!", box_dyn.to_string());
drop(box_dyn);
assert!(has_dropped.get());
}
#[test]
fn test_question_mark() -> Result<(), Box<dyn StdError>> {
fn f() -> Result<()> {
Ok(())
}
f()?;
Ok(())
}

106
third_party/rust/anyhow/tests/test_downcast.rs поставляемый
Просмотреть файл

@ -1,106 +0,0 @@
mod common;
mod drop;
use self::common::*;
use self::drop::{DetectDrop, Flag};
use anyhow::Error;
use std::error::Error as StdError;
use std::fmt::{self, Display};
use std::io;
#[test]
fn test_downcast() {
assert_eq!(
"oh no!",
bail_literal().unwrap_err().downcast::<&str>().unwrap(),
);
assert_eq!(
"oh no!",
bail_fmt().unwrap_err().downcast::<String>().unwrap(),
);
assert_eq!(
"oh no!",
bail_error()
.unwrap_err()
.downcast::<io::Error>()
.unwrap()
.to_string(),
);
}
#[test]
fn test_downcast_ref() {
assert_eq!(
"oh no!",
*bail_literal().unwrap_err().downcast_ref::<&str>().unwrap(),
);
assert_eq!(
"oh no!",
bail_fmt().unwrap_err().downcast_ref::<String>().unwrap(),
);
assert_eq!(
"oh no!",
bail_error()
.unwrap_err()
.downcast_ref::<io::Error>()
.unwrap()
.to_string(),
);
}
#[test]
fn test_downcast_mut() {
assert_eq!(
"oh no!",
*bail_literal().unwrap_err().downcast_mut::<&str>().unwrap(),
);
assert_eq!(
"oh no!",
bail_fmt().unwrap_err().downcast_mut::<String>().unwrap(),
);
assert_eq!(
"oh no!",
bail_error()
.unwrap_err()
.downcast_mut::<io::Error>()
.unwrap()
.to_string(),
);
}
#[test]
fn test_drop() {
let has_dropped = Flag::new();
let error = Error::new(DetectDrop::new(&has_dropped));
drop(error.downcast::<DetectDrop>().unwrap());
assert!(has_dropped.get());
}
#[test]
fn test_large_alignment() {
#[repr(align(64))]
#[derive(Debug)]
struct LargeAlignedError(&'static str);
impl Display for LargeAlignedError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(self.0)
}
}
impl StdError for LargeAlignedError {}
let error = Error::new(LargeAlignedError("oh no!"));
assert_eq!(
"oh no!",
error.downcast_ref::<LargeAlignedError>().unwrap().0
);
}
#[test]
fn test_unsuccessful_downcast() {
let mut error = bail_error().unwrap_err();
assert!(error.downcast_ref::<&str>().is_none());
assert!(error.downcast_mut::<&str>().is_none());
assert!(error.downcast::<&str>().is_err());
}

94
third_party/rust/anyhow/tests/test_fmt.rs поставляемый
Просмотреть файл

@ -1,94 +0,0 @@
use anyhow::{bail, Context, Result};
use std::io;
fn f() -> Result<()> {
bail!(io::Error::new(io::ErrorKind::PermissionDenied, "oh no!"));
}
fn g() -> Result<()> {
f().context("f failed")
}
fn h() -> Result<()> {
g().context("g failed")
}
const EXPECTED_ALTDISPLAY_F: &str = "oh no!";
const EXPECTED_ALTDISPLAY_G: &str = "f failed: oh no!";
const EXPECTED_ALTDISPLAY_H: &str = "g failed: f failed: oh no!";
const EXPECTED_DEBUG_F: &str = "oh no!";
const EXPECTED_DEBUG_G: &str = "\
f failed
Caused by:
oh no!\
";
const EXPECTED_DEBUG_H: &str = "\
g failed
Caused by:
0: f failed
1: oh no!\
";
const EXPECTED_ALTDEBUG_F: &str = "\
Custom {
kind: PermissionDenied,
error: \"oh no!\",
}\
";
const EXPECTED_ALTDEBUG_G: &str = "\
Error {
context: \"f failed\",
source: Custom {
kind: PermissionDenied,
error: \"oh no!\",
},
}\
";
const EXPECTED_ALTDEBUG_H: &str = "\
Error {
context: \"g failed\",
source: Error {
context: \"f failed\",
source: Custom {
kind: PermissionDenied,
error: \"oh no!\",
},
},
}\
";
#[test]
fn test_display() {
assert_eq!("g failed", h().unwrap_err().to_string());
}
#[test]
fn test_altdisplay() {
assert_eq!(EXPECTED_ALTDISPLAY_F, format!("{:#}", f().unwrap_err()));
assert_eq!(EXPECTED_ALTDISPLAY_G, format!("{:#}", g().unwrap_err()));
assert_eq!(EXPECTED_ALTDISPLAY_H, format!("{:#}", h().unwrap_err()));
}
#[test]
#[cfg_attr(not(backtrace), ignore)]
fn test_debug() {
assert_eq!(EXPECTED_DEBUG_F, format!("{:?}", f().unwrap_err()));
assert_eq!(EXPECTED_DEBUG_G, format!("{:?}", g().unwrap_err()));
assert_eq!(EXPECTED_DEBUG_H, format!("{:?}", h().unwrap_err()));
}
#[test]
fn test_altdebug() {
assert_eq!(EXPECTED_ALTDEBUG_F, format!("{:#?}", f().unwrap_err()));
assert_eq!(EXPECTED_ALTDEBUG_G, format!("{:#?}", g().unwrap_err()));
assert_eq!(EXPECTED_ALTDEBUG_H, format!("{:#?}", h().unwrap_err()));
}

33
third_party/rust/anyhow/tests/test_macros.rs поставляемый
Просмотреть файл

@ -1,33 +0,0 @@
mod common;
use self::common::*;
use anyhow::ensure;
#[test]
fn test_messages() {
assert_eq!("oh no!", bail_literal().unwrap_err().to_string());
assert_eq!("oh no!", bail_fmt().unwrap_err().to_string());
assert_eq!("oh no!", bail_error().unwrap_err().to_string());
}
#[test]
fn test_ensure() {
let f = || {
ensure!(1 + 1 == 2, "This is correct");
Ok(())
};
assert!(f().is_ok());
let v = 1;
let f = || {
ensure!(v + v == 2, "This is correct, v: {}", v);
Ok(())
};
assert!(f().is_ok());
let f = || {
ensure!(v + v == 1, "This is not correct, v: {}", v);
Ok(())
};
assert!(f().is_err());
}

29
third_party/rust/anyhow/tests/test_repr.rs поставляемый
Просмотреть файл

@ -1,29 +0,0 @@
mod drop;
use self::drop::{DetectDrop, Flag};
use anyhow::Error;
use std::marker::Unpin;
use std::mem;
#[test]
fn test_error_size() {
assert_eq!(mem::size_of::<Error>(), mem::size_of::<usize>());
}
#[test]
fn test_null_pointer_optimization() {
assert_eq!(mem::size_of::<Result<(), Error>>(), mem::size_of::<usize>());
}
#[test]
fn test_autotraits() {
fn assert<E: Unpin + Send + Sync + 'static>() {}
assert::<Error>();
}
#[test]
fn test_drop() {
let has_dropped = Flag::new();
drop(Error::new(DetectDrop::new(&has_dropped)));
assert!(has_dropped.get());
}

62
third_party/rust/anyhow/tests/test_source.rs поставляемый
Просмотреть файл

@ -1,62 +0,0 @@
use anyhow::anyhow;
use std::error::Error as StdError;
use std::fmt::{self, Display};
use std::io;
#[derive(Debug)]
enum TestError {
Io(io::Error),
}
impl Display for TestError {
fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
match self {
TestError::Io(e) => Display::fmt(e, formatter),
}
}
}
impl StdError for TestError {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
TestError::Io(io) => Some(io),
}
}
}
#[test]
fn test_literal_source() {
let error = anyhow!("oh no!");
assert!(error.source().is_none());
}
#[test]
fn test_variable_source() {
let msg = "oh no!";
let error = anyhow!(msg);
assert!(error.source().is_none());
let msg = msg.to_owned();
let error = anyhow!(msg);
assert!(error.source().is_none());
}
#[test]
fn test_fmt_source() {
let error = anyhow!("{} {}!", "oh", "no");
assert!(error.source().is_none());
}
#[test]
fn test_io_source() {
let io = io::Error::new(io::ErrorKind::Other, "oh no!");
let error = anyhow!(TestError::Io(io));
assert_eq!("oh no!", error.source().unwrap().to_string());
}
#[test]
fn test_anyhow_from_anyhow() {
let error = anyhow!("oh no!").context("context");
let error = anyhow!(error);
assert_eq!("oh no!", error.source().unwrap().to_string());
}

8
third_party/rust/anyhow/tests/ui/no-impl.rs поставляемый
Просмотреть файл

@ -1,8 +0,0 @@
use anyhow::anyhow;
#[derive(Debug)]
struct Error;
fn main() {
let _ = anyhow!(Error);
}

Просмотреть файл

@ -1,21 +0,0 @@
error[E0599]: no method named `anyhow_kind` found for reference `&Error` in the current scope
--> $DIR/no-impl.rs:7:13
|
4 | struct Error;
| -------------
| |
| doesn't satisfy `Error: anyhow::kind::TraitKind`
| doesn't satisfy `Error: std::convert::Into<anyhow::Error>`
| doesn't satisfy `Error: std::fmt::Display`
...
7 | let _ = anyhow!(Error);
| ^^^^^^^^^^^^^^ method not found in `&Error`
|
= note: the method `anyhow_kind` exists but the following trait bounds were not satisfied:
`Error: std::convert::Into<anyhow::Error>`
which is required by `Error: anyhow::kind::TraitKind`
`Error: std::fmt::Display`
which is required by `&Error: anyhow::kind::AdhocKind`
`&Error: std::convert::Into<anyhow::Error>`
which is required by `&Error: anyhow::kind::TraitKind`
= note: this error originates in a macro (in Nightly builds, run with -Z macro-backtrace for more info)

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"21b095cc85324ada8cf714deb719e3e892f2f5222538e063db56fde0f81bd17c","src/lib.rs":"4581b12eb58f9fb5275c7af74fbc4521b82ef224b6ba81f0e785c372ba95f8c6"},"package":null}
{"files":{"Cargo.toml":"9ba6f30454cfbe5cc844824a89f31b65d607df6aec569d093eb6307d902c5159","src/lib.rs":"4581b12eb58f9fb5275c7af74fbc4521b82ef224b6ba81f0e785c372ba95f8c6"},"package":null}

2
third_party/rust/error-support/Cargo.toml поставляемый
Просмотреть файл

@ -6,5 +6,5 @@ edition = "2018"
license = "MPL-2.0"
[dependencies]
failure = "0.1"
failure = "0.1.6"

Просмотреть файл

@ -1 +1 @@
{"files":{"CHANGELOG.md":"30fa41d2cd20250b4d4defdef99cd18843b38f7991e8eed40801b94b8542ce13","Cargo.lock":"147a02b888dc8555251ba130e61c87afbd3c2af8ccac923b22773904ddb0a85e","Cargo.toml":"79f3565fce8df325c6096c50594228bfd7bfb9cef2dba1fc25ea48ba2fe9e5a0","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"d7203c0e18700de8e1408c3f4bb96075df271fea9ab678f6ad2e09855aefa7ed","examples/bench.rs":"1597a52529f75d6c5ad0b86759a775b1d723dfa810e2016317283b13594219da","examples/bench_acquire.rs":"3956f01158abaa1e15f1c633e6f96caf1257bca7bb4311c9568fdbc8347906f9","examples/bench_vs_lazy_static.rs":"d527294a2e73b53ac5faed8b316dfd1ae2a06adb31384134af21f10ce76333a5","examples/lazy_static.rs":"90541b093ed1d1cbb73f4097ff02cf80657e28264d281d6a31d96a708fdfea90","examples/reentrant_init_deadlocks.rs":"ff84929de27a848e5b155549caa96db5db5f030afca975f8ba3f3da640083001","examples/regex.rs":"4a2e0fb093c7f5bbe0fff8689fc0c670c5334344a1bfda376f5faa98a05d459f","examples/test_synchronization.rs":"1fe6828a2bfe5b5fbcaf287fcf02d746e757d89db81a2e32f24b849272dd1e90","src/imp_pl.rs":"2ec567e4a0f3b5adc7399822ced03243aa065b61ac50d81c485e952ddf676c7e","src/imp_std.rs":"bfd6e77afa84defffcc7a8b6bc0226353f95c12fd373866884cd8421c9326446","src/lib.rs":"20f7f610a6ad51dbe59b3dff328c6539520d00cec6d60bcfd3b3c0deb5efd06c","tests/test.rs":"d41dcc82bc03a52a1d1985b155d08de991919ae190a424012981a1e6f395eb20"},"package":"b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b"}
{"files":{"CHANGELOG.md":"e8d8f83b01058deafb7db4e6ed23443393bc721098975c3162d4897915480466","Cargo.lock":"07ab6e70f2cffcc635b1b90b271e063087faf220a4f9329860e4422548246caf","Cargo.toml":"cd3cfed92955b145400903fd12ec2b5e02e34b18f1dfc4c4aab160db9f6e3f91","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"be4ea928065420dba72c551d8c565844e28760b33168f8baa00b50505d0713db","examples/bench.rs":"1597a52529f75d6c5ad0b86759a775b1d723dfa810e2016317283b13594219da","examples/bench_vs_lazy_static.rs":"d527294a2e73b53ac5faed8b316dfd1ae2a06adb31384134af21f10ce76333a5","examples/lazy_static.rs":"90541b093ed1d1cbb73f4097ff02cf80657e28264d281d6a31d96a708fdfea90","examples/reentrant_init_deadlocks.rs":"735005c65e2926a4cd210ad385f50927a8b73e009838fee682e0a5700ca84dd2","examples/regex.rs":"4a2e0fb093c7f5bbe0fff8689fc0c670c5334344a1bfda376f5faa98a05d459f","src/imp_pl.rs":"686ab515374e152622e9c47206e26f96bf063f22de9ea6fb4b4306d20427cc7a","src/imp_std.rs":"4c0ceb6b4af03f9328ffe1f014d42d8a2179745b22d4d943e151ef4fccbf84ab","src/lib.rs":"a5abd4f3e33294aa336a9f6503cad255546202ee147253c46852e7b1adcb3874","tests/test.rs":"eacada5b5ca427dcfa803593002c86af5389811db39d410799fab95920383ec7"},"package":"891f486f630e5c5a4916c7e16c4b24a53e78c860b646e9f8e005e4f16847bfed"}

9
third_party/rust/once_cell/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,14 +1,5 @@
# Changelog
## 1.3.1
- remove unnecessary `F: fmt::Debug` bound from `impl fmt::Debug for Lazy<T, F>`.
## 1.3.0
- `Lazy<T>` now implements `DerefMut`.
- update implementation according to the latest changes in `std`.
## 1.2.0
- add `sync::OnceCell::get_unchecked`.

69
third_party/rust/once_cell/Cargo.lock сгенерированный поставляемый
Просмотреть файл

@ -5,17 +5,17 @@ name = "aho-corasick"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "bitflags"
version = "1.2.1"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "cfg-if"
version = "0.1.10"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@ -23,7 +23,7 @@ name = "cloudabi"
version = "0.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -31,7 +31,7 @@ name = "crossbeam-utils"
version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -42,35 +42,30 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "libc"
version = "0.2.66"
version = "0.2.62"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "lock_api"
version = "0.3.3"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "maybe-uninit"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "memchr"
version = "2.3.0"
version = "2.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "once_cell"
version = "1.3.1"
version = "1.2.0"
dependencies = [
"crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -78,7 +73,7 @@ name = "parking_lot"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"lock_api 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
"lock_api 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -88,12 +83,12 @@ name = "parking_lot_core"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)",
"cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)",
"cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)",
"redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -104,18 +99,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "regex"
version = "1.3.3"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"regex-syntax 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
"memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"regex-syntax 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)",
"thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "regex-syntax"
version = "0.6.13"
version = "0.6.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@ -146,15 +141,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "smallvec"
version = "0.6.13"
version = "0.6.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "thread_local"
version = "1.0.1"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -181,26 +173,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[metadata]
"checksum aho-corasick 0.7.6 (registry+https://github.com/rust-lang/crates.io-index)" = "58fb5e95d83b38284460a5fda7d6470aa0b8844d283a0b614b8535e880800d2d"
"checksum bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
"checksum cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)" = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
"checksum bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d155346769a6855b86399e9bc3814ab343cd3d62c7e985113d46a0ec3c281fd"
"checksum cfg-if 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)" = "b486ce3ccf7ffd79fdeb678eac06a9e6c09fc88d33836340becb8fffe87c5e33"
"checksum cloudabi 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f"
"checksum crossbeam-utils 0.6.6 (registry+https://github.com/rust-lang/crates.io-index)" = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6"
"checksum lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
"checksum libc 0.2.66 (registry+https://github.com/rust-lang/crates.io-index)" = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558"
"checksum lock_api 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b"
"checksum maybe-uninit 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00"
"checksum memchr 2.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3197e20c7edb283f87c071ddfc7a2cca8f8e0b888c242959846a6fce03c72223"
"checksum libc 0.2.62 (registry+https://github.com/rust-lang/crates.io-index)" = "34fcd2c08d2f832f376f4173a231990fa5aef4e99fb569867318a227ef4c06ba"
"checksum lock_api 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f8912e782533a93a167888781b836336a6ca5da6175c05944c86cf28c31104dc"
"checksum memchr 2.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88579771288728879b57485cc7d6b07d648c9f0141eb955f8ab7f9d45394468e"
"checksum parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252"
"checksum parking_lot_core 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b"
"checksum redox_syscall 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)" = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84"
"checksum regex 1.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "b5508c1941e4e7cb19965abef075d35a9a8b5cdf0846f30b4050e9b55dc55e87"
"checksum regex-syntax 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "e734e891f5b408a29efbf8309e656876276f49ab6a6ac208600b4419bd893d90"
"checksum regex 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "88c3d9193984285d544df4a30c23a4e62ead42edf70a4452ceb76dac1ce05c26"
"checksum regex-syntax 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)" = "b143cceb2ca5e56d5671988ef8b15615733e7ee16cd348e064333b251b89343f"
"checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
"checksum scopeguard 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d"
"checksum semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
"checksum smallvec 0.6.13 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6"
"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
"checksum smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)" = "ab606a9c5e214920bb66c458cd7be8ef094f813f20fe77a54cc7dbfff220d4b7"
"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b"
"checksum winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6"
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"

14
third_party/rust/once_cell/Cargo.toml поставляемый
Просмотреть файл

@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "once_cell"
version = "1.3.1"
version = "1.2.0"
authors = ["Aleksey Kladov <aleksey.kladov@gmail.com>"]
exclude = ["*.png", "*.svg", "/Cargo.lock.min", "/.travis.yml", "/run-miri-tests.sh", "rustfmt.toml"]
description = "Single assignment cells and lazy values."
@ -25,11 +25,11 @@ license = "MIT OR Apache-2.0"
repository = "https://github.com/matklad/once_cell"
[[example]]
name = "bench"
name = "reentrant_init_deadlocks"
required-features = ["std"]
[[example]]
name = "bench_acquire"
name = "bench"
required-features = ["std"]
[[example]]
@ -40,17 +40,9 @@ required-features = ["std"]
name = "lazy_static"
required-features = ["std"]
[[example]]
name = "reentrant_init_deadlocks"
required-features = ["std"]
[[example]]
name = "regex"
required-features = ["std"]
[[example]]
name = "test_synchronization"
required-features = ["std"]
[dependencies.parking_lot]
version = "0.9.0"
optional = true

3
third_party/rust/once_cell/README.md поставляемый
Просмотреть файл

@ -51,6 +51,3 @@ More patterns and use-cases are in the [docs](https://docs.rs/once_cell/)!
* [lazycell](https://crates.io/crates/lazycell)
* [mitochondria](https://crates.io/crates/mitochondria)
* [lazy_static](https://crates.io/crates/lazy_static)
The API of `once_cell` is being proposed for inclusion in
[`std`](https://github.com/rust-lang/rfcs/pull/2788).

Просмотреть файл

@ -1,39 +0,0 @@
/// Benchmark the overhead that the synchronization of `OnceCell::get` causes.
/// We do some other operations that write to memory to get an imprecise but somewhat realistic
/// measurement.
use once_cell::sync::OnceCell;
use std::sync::atomic::{AtomicUsize, Ordering};
const N_THREADS: usize = 16;
const N_ROUNDS: usize = 1_000_000;
static CELL: OnceCell<usize> = OnceCell::new();
static OTHER: AtomicUsize = AtomicUsize::new(0);
fn main() {
let start = std::time::Instant::now();
let threads =
(0..N_THREADS).map(|i| std::thread::spawn(move || thread_main(i))).collect::<Vec<_>>();
for thread in threads {
thread.join().unwrap();
}
println!("{:?}", start.elapsed());
println!("{:?}", OTHER.load(Ordering::Relaxed));
}
#[inline(never)]
fn thread_main(i: usize) {
// The operations we do here don't really matter, as long as we do multiple writes, and
// everything is messy enough to prevent the compiler from optimizing the loop away.
let mut data = [i; 128];
let mut accum = 0usize;
for _ in 0..N_ROUNDS {
let _value = CELL.get_or_init(|| i+1);
let k = OTHER.fetch_add(data[accum & 0x7F] as usize, Ordering::Relaxed);
for j in data.iter_mut() {
*j = (*j).wrapping_add(accum);
accum = accum.wrapping_add(k);
}
}
}

Просмотреть файл

@ -5,10 +5,3 @@ fn main() {
2
});
}
/// Dummy test to make it seem hang when compiled as `--test`
/// See https://github.com/matklad/once_cell/issues/79
#[test]
fn dummy_test() {
std::thread::sleep(std::time::Duration::from_secs(4));
}

Просмотреть файл

@ -1,38 +0,0 @@
/// Test if the OnceCell properly synchronizes.
/// Needs to be run in release mode.
///
/// We create a `Vec` with `N_ROUNDS` of `OnceCell`s. All threads will walk the `Vec`, and race to
/// be the first one to initialize a cell.
/// Every thread adds the results of the cells it sees to an accumulator, which is compared at the
/// end.
/// All threads should end up with the same result.
use once_cell::sync::OnceCell;
const N_THREADS: usize = 32;
const N_ROUNDS: usize = 1_000_000;
static CELLS: OnceCell<Vec<OnceCell<usize>>> = OnceCell::new();
static RESULT: OnceCell<usize> = OnceCell::new();
fn main() {
let start = std::time::Instant::now();
CELLS.get_or_init(|| vec![OnceCell::new(); N_ROUNDS]);
let threads =
(0..N_THREADS).map(|i| std::thread::spawn(move || thread_main(i))).collect::<Vec<_>>();
for thread in threads {
thread.join().unwrap();
}
println!("{:?}", start.elapsed());
println!("No races detected");
}
fn thread_main(i: usize) {
let cells = CELLS.get().unwrap();
let mut accum = 0;
for cell in cells.iter() {
let &value = cell.get_or_init(|| i);
accum += value;
}
assert_eq!(RESULT.get_or_init(|| accum), &accum);
}

4
third_party/rust/once_cell/src/imp_pl.rs поставляемый
Просмотреть файл

@ -92,9 +92,9 @@ impl Drop for MutexGuard<'_> {
}
#[test]
#[cfg(target_pointer_width = "64")]
#[cfg(pointer_width = "64")]
fn test_size() {
use std::mem::size_of;
assert_eq!(size_of::<OnceCell<u32>>(), 3 * size_of::<u32>());
assert_eq!(size_of::<OnceCell<u32>>, 2 * size_of::<u32>);
}

177
third_party/rust/once_cell/src/imp_std.rs поставляемый
Просмотреть файл

@ -4,9 +4,10 @@
// * init function can fail
use std::{
cell::{Cell, UnsafeCell},
cell::UnsafeCell,
marker::PhantomData,
panic::{RefUnwindSafe, UnwindSafe},
ptr,
sync::atomic::{AtomicBool, AtomicUsize, Ordering},
thread::{self, Thread},
};
@ -15,11 +16,11 @@ use std::{
pub(crate) struct OnceCell<T> {
// This `state` word is actually an encoded version of just a pointer to a
// `Waiter`, so we add the `PhantomData` appropriately.
state_and_queue: AtomicUsize,
state: AtomicUsize,
_marker: PhantomData<*mut Waiter>,
// FIXME: switch to `std::mem::MaybeUninit` once we are ready to bump MSRV
// that far. It was stabilized in 1.36.0, so, if you are reading this and
// it's higher than 1.46.0 outside, please send a PR! ;) (and do the same
// it's higher than 1.46.0 outside, please send a PR! ;) (and to the same
// for `Lazy`, while we are at it).
pub(crate) value: UnsafeCell<Option<T>>,
}
@ -46,25 +47,23 @@ const COMPLETE: usize = 0x2;
const STATE_MASK: usize = 0x3;
// Representation of a node in the linked list of waiters in the RUNNING state.
#[repr(align(4))] // Ensure the two lower bits are free to use as state bits.
struct Waiter {
thread: Cell<Option<Thread>>,
thread: Option<Thread>,
signaled: AtomicBool,
next: *const Waiter,
next: *mut Waiter,
}
// Head of a linked list of waiters.
// Every node is a struct on the stack of a waiting thread.
// Will wake up the waiters when it gets dropped, i.e. also on panic.
struct WaiterQueue<'a> {
state_and_queue: &'a AtomicUsize,
set_state_on_drop_to: usize,
// Helper struct used to clean up after a closure call with a `Drop`
// implementation to also run on panic.
struct Finish<'a> {
failed: bool,
my_state: &'a AtomicUsize,
}
impl<T> OnceCell<T> {
pub(crate) const fn new() -> OnceCell<T> {
OnceCell {
state_and_queue: AtomicUsize::new(INCOMPLETE),
state: AtomicUsize::new(INCOMPLETE),
_marker: PhantomData,
value: UnsafeCell::new(None),
}
@ -77,7 +76,7 @@ impl<T> OnceCell<T> {
// operations visible to us, and, this being a fast path, weaker
// ordering helps with performance. This `Acquire` synchronizes with
// `SeqCst` operations on the slow path.
self.state_and_queue.load(Ordering::Acquire) == COMPLETE
self.state.load(Ordering::Acquire) == COMPLETE
}
/// Safety: synchronizes with store to value via SeqCst read from state,
@ -91,7 +90,7 @@ impl<T> OnceCell<T> {
let mut f = Some(f);
let mut res: Result<(), E> = Ok(());
let slot = &self.value;
initialize_inner(&self.state_and_queue, &mut || {
initialize_inner(&self.state, &mut || {
let f = f.take().unwrap();
match f() {
Ok(value) => {
@ -108,86 +107,102 @@ impl<T> OnceCell<T> {
}
}
// Corresponds to `std::sync::Once::call_inner`
// Note: this is intentionally monomorphic
fn initialize_inner(my_state_and_queue: &AtomicUsize, init: &mut dyn FnMut() -> bool) -> bool {
let mut state_and_queue = my_state_and_queue.load(Ordering::Acquire);
fn initialize_inner(my_state: &AtomicUsize, init: &mut dyn FnMut() -> bool) -> bool {
// This cold path uses SeqCst consistently because the
// performance difference really does not matter there, and
// SeqCst minimizes the chances of something going wrong.
let mut state = my_state.load(Ordering::SeqCst);
loop {
match state_and_queue {
'outer: loop {
match state {
// If we're complete, then there's nothing to do, we just
// jettison out as we shouldn't run the closure.
COMPLETE => return true,
// Otherwise if we see an incomplete state we will attempt to
// move ourselves into the RUNNING state. If we succeed, then
// the queue of waiters starts at null (all 0 bits).
INCOMPLETE => {
let old = my_state_and_queue.compare_and_swap(
state_and_queue,
RUNNING,
Ordering::Acquire,
);
if old != state_and_queue {
state_and_queue = old;
let old = my_state.compare_and_swap(state, RUNNING, Ordering::SeqCst);
if old != state {
state = old;
continue;
}
let mut waiter_queue = WaiterQueue {
state_and_queue: my_state_and_queue,
set_state_on_drop_to: INCOMPLETE, // Difference, std uses `POISONED`
};
let success = init();
// Difference, std always uses `COMPLETE`
waiter_queue.set_state_on_drop_to = if success { COMPLETE } else { INCOMPLETE };
// Run the initialization routine, letting it know if we're
// poisoned or not. The `Finish` struct is then dropped, and
// the `Drop` implementation here is responsible for waking
// up other waiters both in the normal return and panicking
// case.
let mut complete = Finish { failed: true, my_state };
let success = init();
// Difference from std: abort if `init` errored.
complete.failed = !success;
return success;
}
// All other values we find should correspond to the RUNNING
// state with an encoded waiter list in the more significant
// bits. We attempt to enqueue ourselves by moving us to the
// head of the list and bail out if we ever see a state that's
// not RUNNING.
_ => {
assert!(state_and_queue & STATE_MASK == RUNNING);
wait(&my_state_and_queue, state_and_queue);
state_and_queue = my_state_and_queue.load(Ordering::Acquire);
assert!(state & STATE_MASK == RUNNING);
let mut node = Waiter {
thread: Some(thread::current()),
signaled: AtomicBool::new(false),
next: ptr::null_mut(),
};
let me = &mut node as *mut Waiter as usize;
assert!(me & STATE_MASK == 0);
while state & STATE_MASK == RUNNING {
node.next = (state & !STATE_MASK) as *mut Waiter;
let old = my_state.compare_and_swap(state, me | RUNNING, Ordering::SeqCst);
if old != state {
state = old;
continue;
}
// Once we've enqueued ourselves, wait in a loop.
// Afterwards reload the state and continue with what we
// were doing from before.
while !node.signaled.load(Ordering::SeqCst) {
thread::park();
}
state = my_state.load(Ordering::SeqCst);
continue 'outer;
}
}
}
}
}
// Copy-pasted from std exactly.
fn wait(state_and_queue: &AtomicUsize, mut current_state: usize) {
loop {
if current_state & STATE_MASK != RUNNING {
return;
}
let node = Waiter {
thread: Cell::new(Some(thread::current())),
signaled: AtomicBool::new(false),
next: (current_state & !STATE_MASK) as *const Waiter,
};
let me = &node as *const Waiter as usize;
let old = state_and_queue.compare_and_swap(current_state, me | RUNNING, Ordering::Release);
if old != current_state {
current_state = old;
continue;
}
while !node.signaled.load(Ordering::Acquire) {
thread::park();
}
break;
}
}
// Copy-pasted from std exactly.
impl Drop for WaiterQueue<'_> {
impl Drop for Finish<'_> {
fn drop(&mut self) {
let state_and_queue =
self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel);
assert_eq!(state_and_queue & STATE_MASK, RUNNING);
// Swap out our state with however we finished. We should only ever see
// an old state which was RUNNING.
let queue = if self.failed {
// Difference from std: flip back to INCOMPLETE rather than POISONED.
self.my_state.swap(INCOMPLETE, Ordering::SeqCst)
} else {
self.my_state.swap(COMPLETE, Ordering::SeqCst)
};
assert_eq!(queue & STATE_MASK, RUNNING);
// Decode the RUNNING to a list of waiters, then walk that entire list
// and wake them up. Note that it is crucial that after we store `true`
// in the node it can be free'd! As a result we load the `thread` to
// signal ahead of time and then unpark it after the store.
unsafe {
let mut queue = (state_and_queue & !STATE_MASK) as *const Waiter;
let mut queue = (queue & !STATE_MASK) as *mut Waiter;
while !queue.is_null() {
let next = (*queue).next;
let thread = (*queue).thread.replace(None).unwrap();
(*queue).signaled.store(true, Ordering::Release);
queue = next;
let thread = (*queue).thread.take().unwrap();
(*queue).signaled.store(true, Ordering::SeqCst);
thread.unpark();
queue = next;
}
}
}
@ -197,6 +212,7 @@ impl Drop for WaiterQueue<'_> {
#[cfg(test)]
mod tests {
use std::panic;
#[cfg(not(miri))] // miri doesn't support threads
use std::{sync::mpsc::channel, thread};
use super::OnceCell;
@ -219,7 +235,7 @@ mod tests {
}
#[test]
#[cfg_attr(miri, ignore)] // miri doesn't support threads
#[cfg(not(miri))] // miri doesn't support threads
fn stampede_once() {
static O: OnceCell<()> = OnceCell::new();
static mut RUN: bool = false;
@ -256,6 +272,7 @@ mod tests {
}
#[test]
#[cfg(not(miri))] // miri doesn't support panics
fn poison_bad() {
static O: OnceCell<()> = OnceCell::new();
@ -277,7 +294,7 @@ mod tests {
}
#[test]
#[cfg_attr(miri, ignore)] // miri doesn't support threads
#[cfg(not(miri))] // miri doesn't support panics
fn wait_for_force_to_finish() {
static O: OnceCell<()> = OnceCell::new();
@ -313,12 +330,4 @@ mod tests {
assert!(t1.join().is_ok());
assert!(t2.join().is_ok());
}
#[test]
#[cfg(target_pointer_width = "64")]
fn test_size() {
use std::mem::size_of;
assert_eq!(size_of::<OnceCell<u32>>(), 4 * size_of::<u32>());
}
}

48
third_party/rust/once_cell/src/lib.rs поставляемый
Просмотреть файл

@ -155,7 +155,7 @@ This macro can be useful to avoid "compile regex on every loop iteration" proble
|`!Sync` types | Access Mode | Drawbacks |
|----------------------|------------------------|-----------------------------------------------|
|`Cell<T>` | `T` | requires `T: Copy` for `get` |
|`RefCell<T>` | `RefMut<T>` / `Ref<T>` | may panic at runtime |
|`RefCel<T>` | `RefMut<T>` / `Ref<T>` | may panic at runtime |
|`unsync::OnceCell<T>` | `&T` | assignable only once |
|`Sync` types | Access Mode | Drawbacks |
@ -237,7 +237,7 @@ pub mod unsync {
use core::{
cell::{Cell, UnsafeCell},
fmt,
ops::{Deref, DerefMut},
ops::Deref,
};
#[cfg(feature = "std")]
@ -435,10 +435,6 @@ pub mod unsync {
return Ok(val);
}
let val = f()?;
// Note that *some* forms of reentrant initialization might lead to
// UB (see `reentrant_init` test). I believe that just removing this
// `assert`, while keeping `set/get` would be sound, but it seems
// better to panic, rather than to silently use an old value.
assert!(self.set(val).is_ok(), "reentrant init");
Ok(self.get().unwrap())
}
@ -494,7 +490,7 @@ pub mod unsync {
#[cfg(feature = "std")]
impl<T, F: RefUnwindSafe> RefUnwindSafe for Lazy<T, F> where OnceCell<T>: RefUnwindSafe {}
impl<T: fmt::Debug, F> fmt::Debug for Lazy<T, F> {
impl<T: fmt::Debug, F: fmt::Debug> fmt::Debug for Lazy<T, F> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Lazy").field("cell", &self.cell).field("init", &"..").finish()
}
@ -550,13 +546,6 @@ pub mod unsync {
}
}
impl<T, F: FnOnce() -> T> DerefMut for Lazy<T, F> {
fn deref_mut(&mut self) -> &mut T {
Lazy::force(self);
self.cell.get_mut().unwrap_or_else(|| unreachable!())
}
}
impl<T: Default> Default for Lazy<T> {
/// Creates a new lazy value using `Default` as the initializing function.
fn default() -> Lazy<T> {
@ -567,25 +556,14 @@ pub mod unsync {
#[cfg(feature = "std")]
pub mod sync {
use std::{
cell::Cell,
fmt,
hint::unreachable_unchecked,
ops::{Deref, DerefMut},
panic::RefUnwindSafe,
};
use std::{cell::Cell, fmt, hint::unreachable_unchecked, panic::RefUnwindSafe};
use crate::imp::OnceCell as Imp;
/// A thread-safe cell which can be written to only once.
///
/// `OnceCell` provides `&` references to the contents without RAII guards.
///
/// Reading a non-`None` value out of `OnceCell` establishes a
/// happens-before relationship with a corresponding write. For example, if
/// thread A initializes the cell with `get_or_init(f)`, and thread B
/// subsequently reads the result of this call, B also observes all the side
/// effects of `f`.
/// Unlike `std::sync::Mutex`, a `OnceCell` provides simple `&` references
/// to the contents.
///
/// # Example
/// ```
@ -683,8 +661,7 @@ pub mod sync {
///
/// Safety:
///
/// Caller must ensure that the cell is in initialized state, and that
/// the contents are acquired by (synchronized to) this thread.
/// Caller must ensure that the cell is in initialized state.
pub unsafe fn get_unchecked(&self) -> &T {
debug_assert!(self.0.is_initialized());
let slot: &Option<T> = &*self.0.value.get();
@ -866,7 +843,7 @@ pub mod sync {
init: Cell<Option<F>>,
}
impl<T: fmt::Debug, F> fmt::Debug for Lazy<T, F> {
impl<T: fmt::Debug, F: fmt::Debug> fmt::Debug for Lazy<T, F> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Lazy").field("cell", &self.cell).field("init", &"..").finish()
}
@ -913,20 +890,13 @@ pub mod sync {
}
}
impl<T, F: FnOnce() -> T> Deref for Lazy<T, F> {
impl<T, F: FnOnce() -> T> ::std::ops::Deref for Lazy<T, F> {
type Target = T;
fn deref(&self) -> &T {
Lazy::force(self)
}
}
impl<T, F: FnOnce() -> T> DerefMut for Lazy<T, F> {
fn deref_mut(&mut self) -> &mut T {
Lazy::force(self);
self.cell.get_mut().unwrap_or_else(|| unreachable!())
}
}
impl<T: Default> Default for Lazy<T> {
/// Creates a new lazy value using `Default` as the initializing function.
fn default() -> Lazy<T> {

75
third_party/rust/once_cell/tests/test.rs поставляемый
Просмотреть файл

@ -111,25 +111,6 @@ mod unsync {
assert_eq!(called.get(), 1);
}
#[test]
fn lazy_deref_mut() {
let called = Cell::new(0);
let mut x = Lazy::new(|| {
called.set(called.get() + 1);
92
});
assert_eq!(called.get(), 0);
let y = *x - 30;
assert_eq!(y, 62);
assert_eq!(called.get(), 1);
*x /= 2;
assert_eq!(*x, 46);
assert_eq!(called.get(), 1);
}
#[test]
fn lazy_default() {
static CALLED: AtomicUsize = AtomicUsize::new(0);
@ -156,6 +137,7 @@ mod unsync {
}
#[test]
#[cfg(not(miri))] // miri doesn't support panics
#[cfg(feature = "std")]
fn lazy_poisoning() {
let x: Lazy<String> = Lazy::new(|| panic!("kaboom"));
@ -167,25 +149,12 @@ mod unsync {
#[test]
fn aliasing_in_get() {
let x = OnceCell::new();
let x = once_cell::unsync::OnceCell::new();
x.set(42).unwrap();
let at_x = x.get().unwrap(); // --- (shared) borrow of inner `Option<T>` --+
let _ = x.set(27); // <-- temporary (unique) borrow of inner `Option<T>` |
println!("{}", at_x); // <------- up until here ---------------------------+
}
#[test]
#[should_panic(expected = "reentrant init")]
fn reentrant_init() {
let x: OnceCell<Box<i32>> = OnceCell::new();
let dangling_ref: Cell<Option<&i32>> = Cell::new(None);
x.get_or_init(|| {
let r = x.get_or_init(|| Box::new(92));
dangling_ref.set(Some(r));
Box::new(62)
});
eprintln!("use after free: {:?}", dangling_ref.get().unwrap());
}
}
#[cfg(feature = "std")]
@ -199,14 +168,14 @@ mod sync {
pub(super) use crossbeam_utils::thread::scope;
}
#[cfg(miri)] // "stub threads" for Miri
#[cfg(miri)]
mod scope {
pub(super) struct Scope;
#[cfg(miri)]
impl Scope {
pub(super) fn spawn<R>(&self, f: impl FnOnce(()) -> R) -> R {
f(())
pub(super) fn spawn(&self, f: impl FnOnce(())) {
f(());
}
}
@ -292,6 +261,7 @@ mod sync {
}
#[test]
#[cfg(not(miri))] // miri doesn't support panics
fn get_or_try_init() {
let cell: OnceCell<String> = OnceCell::new();
assert!(cell.get().is_none());
@ -351,7 +321,7 @@ mod sync {
}
#[test]
#[cfg_attr(miri, ignore)] // miri doesn't support processes
#[cfg(not(miri))] // miri doesn't support processes
fn reentrant_init() {
let examples_dir = {
let mut exe = std::env::current_exe().unwrap();
@ -403,25 +373,6 @@ mod sync {
assert_eq!(called.load(SeqCst), 1);
}
#[test]
fn lazy_deref_mut() {
let called = AtomicUsize::new(0);
let mut x = Lazy::new(|| {
called.fetch_add(1, SeqCst);
92
});
assert_eq!(called.load(SeqCst), 0);
let y = *x - 30;
assert_eq!(y, 62);
assert_eq!(called.load(SeqCst), 1);
*x /= 2;
assert_eq!(*x, 46);
assert_eq!(called.load(SeqCst), 1);
}
#[test]
fn lazy_default() {
static CALLED: AtomicUsize = AtomicUsize::new(0);
@ -448,7 +399,7 @@ mod sync {
}
#[test]
#[cfg_attr(miri, ignore)] // leaks memory
#[cfg(not(miri))] // leaks memory
fn static_lazy() {
static XS: Lazy<Vec<i32>> = Lazy::new(|| {
let mut xs = Vec::new();
@ -467,7 +418,7 @@ mod sync {
}
#[test]
#[cfg_attr(miri, ignore)] // leaks memory
#[cfg(not(miri))] // leaks memory
fn static_lazy_via_fn() {
fn xs() -> &'static Vec<i32> {
static XS: OnceCell<Vec<i32>> = OnceCell::new();
@ -483,6 +434,7 @@ mod sync {
}
#[test]
#[cfg(not(miri))] // miri doesn't support panics
fn lazy_poisoning() {
let x: Lazy<String> = Lazy::new(|| panic!("kaboom"));
for _ in 0..2 {
@ -499,6 +451,7 @@ mod sync {
}
#[test]
#[cfg(not(miri))] // leaks memory
fn eval_once_macro() {
macro_rules! eval_once {
(|| -> $ty:ty {
@ -526,7 +479,7 @@ mod sync {
}
#[test]
#[cfg_attr(miri, ignore)] // deadlocks without real threads
#[cfg(not(miri))] // deadlocks without real threads
fn once_cell_does_not_leak_partially_constructed_boxes() {
let n_tries = 100;
let n_readers = 10;
@ -545,7 +498,7 @@ mod sync {
});
}
for _ in 0..n_writers {
let _ = scope.spawn(|_| cell.set(MSG.to_owned()));
scope.spawn(|_| cell.set(MSG.to_owned()));
}
})
.unwrap()
@ -553,7 +506,7 @@ mod sync {
}
#[test]
#[cfg_attr(miri, ignore)] // miri doesn't support Barrier
#[cfg(not(miri))] // miri doesn't support Barrier
fn get_does_not_block() {
use std::sync::Barrier;

Просмотреть файл

@ -1 +0,0 @@
{"files":{"Cargo.toml":"900356d96cde812c45a50f156f334e1ef79ea6b58f855b904b15245262d0595c","README.md":"6c67fa1e48f14adfaf834f520f798ddfb79f90804f46cc215ee391a7d57913a4","src/field/group.rs":"d64fb4673ddc705b3ee10b2df7d086e654bd78759ba61aa65b90d2d99a704bf5","src/field/map.rs":"80a555ef40c1193bcc3865d004513792af07f8701a39b5ca3d2b0204fad2df3d","src/field/message.rs":"eef8581d9df42e1f91932dad42622349e6888da60a54f5ab4efe82bca777bdbc","src/field/mod.rs":"5c0862f2d1ae8fcf0bc87b5f770abf2b88bd2198eb6fddd0aecdf18f8cea9845","src/field/oneof.rs":"50efef18c895abfe2074001971fdcb92ae9eb81b36b77a499a7844fd673d44bd","src/field/scalar.rs":"765a6464d6e291ccb3153fc172539409d6cebde84d319095ea3ccce3094d434e","src/lib.rs":"598d1119877b2cc4903c1c4a37b3a5013e3e41d471aec79cfcb801b7dc60d310"},"package":"537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72"}

40
third_party/rust/prost-derive/Cargo.toml поставляемый
Просмотреть файл

@ -1,40 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "prost-derive"
version = "0.6.1"
authors = ["Dan Burkert <dan@danburkert.com>"]
description = "A Protocol Buffers implementation for the Rust Language."
documentation = "https://docs.rs/prost-derive"
readme = "README.md"
license = "Apache-2.0"
repository = "https://github.com/danburkert/prost"
[lib]
proc_macro = true
[dependencies.anyhow]
version = "1"
[dependencies.itertools]
version = "0.8"
[dependencies.proc-macro2]
version = "1"
[dependencies.quote]
version = "1"
[dependencies.syn]
version = "1"
features = ["extra-traits"]

16
third_party/rust/prost-derive/README.md поставляемый
Просмотреть файл

@ -1,16 +0,0 @@
[![Documentation](https://docs.rs/prost-derive/badge.svg)](https://docs.rs/prost-derive/)
[![Crate](https://img.shields.io/crates/v/prost-derive.svg)](https://crates.io/crates/prost-derive)
# prost-derive
`prost-derive` handles generating encoding and decoding implementations for Rust
types annotated with `prost` annotation. For the most part, users of `prost`
shouldn't need to interact with `prost-derive` directly.
## License
`prost-derive` is distributed under the terms of the Apache License (Version 2.0).
See [LICENSE](../LICENSE) for details.
Copyright 2017 Dan Burkert

Просмотреть файл

@ -1,134 +0,0 @@
use anyhow::{bail, Error};
use proc_macro2::TokenStream;
use quote::{quote, ToTokens};
use syn::Meta;
use crate::field::{set_bool, set_option, tag_attr, word_attr, Label};
#[derive(Clone)]
pub struct Field {
pub label: Label,
pub tag: u32,
}
impl Field {
pub fn new(attrs: &[Meta], inferred_tag: Option<u32>) -> Result<Option<Field>, Error> {
let mut group = false;
let mut label = None;
let mut tag = None;
let mut boxed = false;
let mut unknown_attrs = Vec::new();
for attr in attrs {
if word_attr("group", attr) {
set_bool(&mut group, "duplicate group attributes")?;
} else if word_attr("boxed", attr) {
set_bool(&mut boxed, "duplicate boxed attributes")?;
} else if let Some(t) = tag_attr(attr)? {
set_option(&mut tag, t, "duplicate tag attributes")?;
} else if let Some(l) = Label::from_attr(attr) {
set_option(&mut label, l, "duplicate label attributes")?;
} else {
unknown_attrs.push(attr);
}
}
if !group {
return Ok(None);
}
match unknown_attrs.len() {
0 => (),
1 => bail!("unknown attribute for group field: {:?}", unknown_attrs[0]),
_ => bail!("unknown attributes for group field: {:?}", unknown_attrs),
}
let tag = match tag.or(inferred_tag) {
Some(tag) => tag,
None => bail!("group field is missing a tag attribute"),
};
Ok(Some(Field {
label: label.unwrap_or(Label::Optional),
tag: tag,
}))
}
pub fn new_oneof(attrs: &[Meta]) -> Result<Option<Field>, Error> {
if let Some(mut field) = Field::new(attrs, None)? {
if let Some(attr) = attrs.iter().find(|attr| Label::from_attr(attr).is_some()) {
bail!(
"invalid attribute for oneof field: {}",
attr.path().into_token_stream()
);
}
field.label = Label::Required;
Ok(Some(field))
} else {
Ok(None)
}
}
pub fn encode(&self, ident: TokenStream) -> TokenStream {
let tag = self.tag;
match self.label {
Label::Optional => quote! {
if let Some(ref msg) = #ident {
::prost::encoding::group::encode(#tag, msg, buf);
}
},
Label::Required => quote! {
::prost::encoding::group::encode(#tag, &#ident, buf);
},
Label::Repeated => quote! {
for msg in &#ident {
::prost::encoding::group::encode(#tag, msg, buf);
}
},
}
}
pub fn merge(&self, ident: TokenStream) -> TokenStream {
match self.label {
Label::Optional => quote! {
::prost::encoding::group::merge(
tag,
wire_type,
#ident.get_or_insert_with(Default::default),
buf,
ctx,
)
},
Label::Required => quote! {
::prost::encoding::group::merge(tag, wire_type, #ident, buf, ctx)
},
Label::Repeated => quote! {
::prost::encoding::group::merge_repeated(tag, wire_type, #ident, buf, ctx)
},
}
}
pub fn encoded_len(&self, ident: TokenStream) -> TokenStream {
let tag = self.tag;
match self.label {
Label::Optional => quote! {
#ident.as_ref().map_or(0, |msg| ::prost::encoding::group::encoded_len(#tag, msg))
},
Label::Required => quote! {
::prost::encoding::group::encoded_len(#tag, &#ident)
},
Label::Repeated => quote! {
::prost::encoding::group::encoded_len_repeated(#tag, &#ident)
},
}
}
pub fn clear(&self, ident: TokenStream) -> TokenStream {
match self.label {
Label::Optional => quote!(#ident = ::std::option::Option::None),
Label::Required => quote!(#ident.clear()),
Label::Repeated => quote!(#ident.clear()),
}
}
}

386
third_party/rust/prost-derive/src/field/map.rs поставляемый
Просмотреть файл

@ -1,386 +0,0 @@
use anyhow::{bail, Error};
use proc_macro2::{Span, TokenStream};
use quote::quote;
use syn::{Ident, Lit, Meta, MetaNameValue, NestedMeta};
use crate::field::{scalar, set_option, tag_attr};
#[derive(Clone, Debug)]
pub enum MapTy {
HashMap,
BTreeMap,
}
impl MapTy {
fn from_str(s: &str) -> Option<MapTy> {
match s {
"map" | "hash_map" => Some(MapTy::HashMap),
"btree_map" => Some(MapTy::BTreeMap),
_ => None,
}
}
fn module(&self) -> Ident {
match *self {
MapTy::HashMap => Ident::new("hash_map", Span::call_site()),
MapTy::BTreeMap => Ident::new("btree_map", Span::call_site()),
}
}
}
fn fake_scalar(ty: scalar::Ty) -> scalar::Field {
let kind = scalar::Kind::Plain(scalar::DefaultValue::new(&ty));
scalar::Field {
ty,
kind,
tag: 0, // Not used here
}
}
#[derive(Clone)]
pub struct Field {
pub map_ty: MapTy,
pub key_ty: scalar::Ty,
pub value_ty: ValueTy,
pub tag: u32,
}
impl Field {
pub fn new(attrs: &[Meta], inferred_tag: Option<u32>) -> Result<Option<Field>, Error> {
let mut types = None;
let mut tag = None;
for attr in attrs {
if let Some(t) = tag_attr(attr)? {
set_option(&mut tag, t, "duplicate tag attributes")?;
} else if let Some(map_ty) = attr
.path()
.get_ident()
.and_then(|i| MapTy::from_str(&i.to_string()))
{
let (k, v): (String, String) = match *attr {
Meta::NameValue(MetaNameValue {
lit: Lit::Str(ref lit),
..
}) => {
let items = lit.value();
let mut items = items.split(',').map(ToString::to_string);
let k = items.next().unwrap();
let v = match items.next() {
Some(k) => k,
None => bail!("invalid map attribute: must have key and value types"),
};
if items.next().is_some() {
bail!("invalid map attribute: {:?}", attr);
}
(k, v)
}
Meta::List(ref meta_list) => {
// TODO(rustlang/rust#23121): slice pattern matching would make this much nicer.
if meta_list.nested.len() != 2 {
bail!("invalid map attribute: must contain key and value types");
}
let k = match &meta_list.nested[0] {
&NestedMeta::Meta(Meta::Path(ref k)) if k.get_ident().is_some() => {
k.get_ident().unwrap().to_string()
}
_ => bail!("invalid map attribute: key must be an identifier"),
};
let v = match &meta_list.nested[1] {
&NestedMeta::Meta(Meta::Path(ref v)) if v.get_ident().is_some() => {
v.get_ident().unwrap().to_string()
}
_ => bail!("invalid map attribute: value must be an identifier"),
};
(k, v)
}
_ => return Ok(None),
};
set_option(
&mut types,
(map_ty, key_ty_from_str(&k)?, ValueTy::from_str(&v)?),
"duplicate map type attribute",
)?;
} else {
return Ok(None);
}
}
Ok(match (types, tag.or(inferred_tag)) {
(Some((map_ty, key_ty, val_ty)), Some(tag)) => Some(Field {
map_ty: map_ty,
key_ty: key_ty,
value_ty: val_ty,
tag: tag,
}),
_ => None,
})
}
pub fn new_oneof(attrs: &[Meta]) -> Result<Option<Field>, Error> {
Field::new(attrs, None)
}
/// Returns a statement which encodes the map field.
pub fn encode(&self, ident: TokenStream) -> TokenStream {
let tag = self.tag;
let key_mod = self.key_ty.module();
let ke = quote!(::prost::encoding::#key_mod::encode);
let kl = quote!(::prost::encoding::#key_mod::encoded_len);
let module = self.map_ty.module();
match self.value_ty {
ValueTy::Scalar(scalar::Ty::Enumeration(ref ty)) => {
let default = quote!(#ty::default() as i32);
quote! {
::prost::encoding::#module::encode_with_default(
#ke,
#kl,
::prost::encoding::int32::encode,
::prost::encoding::int32::encoded_len,
&(#default),
#tag,
&#ident,
buf,
);
}
}
ValueTy::Scalar(ref value_ty) => {
let val_mod = value_ty.module();
let ve = quote!(::prost::encoding::#val_mod::encode);
let vl = quote!(::prost::encoding::#val_mod::encoded_len);
quote! {
::prost::encoding::#module::encode(
#ke,
#kl,
#ve,
#vl,
#tag,
&#ident,
buf,
);
}
}
ValueTy::Message => quote! {
::prost::encoding::#module::encode(
#ke,
#kl,
::prost::encoding::message::encode,
::prost::encoding::message::encoded_len,
#tag,
&#ident,
buf,
);
},
}
}
/// Returns an expression which evaluates to the result of merging a decoded key value pair
/// into the map.
pub fn merge(&self, ident: TokenStream) -> TokenStream {
let key_mod = self.key_ty.module();
let km = quote!(::prost::encoding::#key_mod::merge);
let module = self.map_ty.module();
match self.value_ty {
ValueTy::Scalar(scalar::Ty::Enumeration(ref ty)) => {
let default = quote!(#ty::default() as i32);
quote! {
::prost::encoding::#module::merge_with_default(
#km,
::prost::encoding::int32::merge,
#default,
&mut #ident,
buf,
ctx,
)
}
}
ValueTy::Scalar(ref value_ty) => {
let val_mod = value_ty.module();
let vm = quote!(::prost::encoding::#val_mod::merge);
quote!(::prost::encoding::#module::merge(#km, #vm, &mut #ident, buf, ctx))
}
ValueTy::Message => quote! {
::prost::encoding::#module::merge(
#km,
::prost::encoding::message::merge,
&mut #ident,
buf,
ctx,
)
},
}
}
/// Returns an expression which evaluates to the encoded length of the map.
pub fn encoded_len(&self, ident: TokenStream) -> TokenStream {
let tag = self.tag;
let key_mod = self.key_ty.module();
let kl = quote!(::prost::encoding::#key_mod::encoded_len);
let module = self.map_ty.module();
match self.value_ty {
ValueTy::Scalar(scalar::Ty::Enumeration(ref ty)) => {
let default = quote!(#ty::default() as i32);
quote! {
::prost::encoding::#module::encoded_len_with_default(
#kl,
::prost::encoding::int32::encoded_len,
&(#default),
#tag,
&#ident,
)
}
}
ValueTy::Scalar(ref value_ty) => {
let val_mod = value_ty.module();
let vl = quote!(::prost::encoding::#val_mod::encoded_len);
quote!(::prost::encoding::#module::encoded_len(#kl, #vl, #tag, &#ident))
}
ValueTy::Message => quote! {
::prost::encoding::#module::encoded_len(
#kl,
::prost::encoding::message::encoded_len,
#tag,
&#ident,
)
},
}
}
pub fn clear(&self, ident: TokenStream) -> TokenStream {
quote!(#ident.clear())
}
/// Returns methods to embed in the message.
pub fn methods(&self, ident: &Ident) -> Option<TokenStream> {
if let ValueTy::Scalar(scalar::Ty::Enumeration(ref ty)) = self.value_ty {
let key_ty = self.key_ty.rust_type();
let key_ref_ty = self.key_ty.rust_ref_type();
let get = Ident::new(&format!("get_{}", ident), Span::call_site());
let insert = Ident::new(&format!("insert_{}", ident), Span::call_site());
let take_ref = if self.key_ty.is_numeric() {
quote!(&)
} else {
quote!()
};
let get_doc = format!(
"Returns the enum value for the corresponding key in `{}`, \
or `None` if the entry does not exist or it is not a valid enum value.",
ident,
);
let insert_doc = format!("Inserts a key value pair into `{}`.", ident);
Some(quote! {
#[doc=#get_doc]
pub fn #get(&self, key: #key_ref_ty) -> ::std::option::Option<#ty> {
self.#ident.get(#take_ref key).cloned().and_then(#ty::from_i32)
}
#[doc=#insert_doc]
pub fn #insert(&mut self, key: #key_ty, value: #ty) -> ::std::option::Option<#ty> {
self.#ident.insert(key, value as i32).and_then(#ty::from_i32)
}
})
} else {
None
}
}
/// Returns a newtype wrapper around the map, implementing nicer Debug
///
/// The Debug tries to convert any enumerations met into the variants if possible, instead of
/// outputting the raw numbers.
pub fn debug(&self, wrapper_name: TokenStream) -> TokenStream {
let type_name = match self.map_ty {
MapTy::HashMap => Ident::new("HashMap", Span::call_site()),
MapTy::BTreeMap => Ident::new("BTreeMap", Span::call_site()),
};
// A fake field for generating the debug wrapper
let key_wrapper = fake_scalar(self.key_ty.clone()).debug(quote!(KeyWrapper));
let key = self.key_ty.rust_type();
let value_wrapper = self.value_ty.debug();
let fmt = quote! {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
#key_wrapper
#value_wrapper
let mut builder = f.debug_map();
for (k, v) in self.0 {
builder.entry(&KeyWrapper(k), &ValueWrapper(v));
}
builder.finish()
}
};
match self.value_ty {
ValueTy::Scalar(ref ty) => {
let value = ty.rust_type();
quote! {
struct #wrapper_name<'a>(&'a ::std::collections::#type_name<#key, #value>);
impl<'a> ::std::fmt::Debug for #wrapper_name<'a> {
#fmt
}
}
}
ValueTy::Message => quote! {
struct #wrapper_name<'a, V: 'a>(&'a ::std::collections::#type_name<#key, V>);
impl<'a, V> ::std::fmt::Debug for #wrapper_name<'a, V>
where
V: ::std::fmt::Debug + 'a,
{
#fmt
}
},
}
}
}
fn key_ty_from_str(s: &str) -> Result<scalar::Ty, Error> {
let ty = scalar::Ty::from_str(s)?;
match ty {
scalar::Ty::Int32
| scalar::Ty::Int64
| scalar::Ty::Uint32
| scalar::Ty::Uint64
| scalar::Ty::Sint32
| scalar::Ty::Sint64
| scalar::Ty::Fixed32
| scalar::Ty::Fixed64
| scalar::Ty::Sfixed32
| scalar::Ty::Sfixed64
| scalar::Ty::Bool
| scalar::Ty::String => Ok(ty),
_ => bail!("invalid map key type: {}", s),
}
}
/// A map value type.
#[derive(Clone, Debug, PartialEq, Eq)]
pub enum ValueTy {
Scalar(scalar::Ty),
Message,
}
impl ValueTy {
fn from_str(s: &str) -> Result<ValueTy, Error> {
if let Ok(ty) = scalar::Ty::from_str(s) {
Ok(ValueTy::Scalar(ty))
} else if s.trim() == "message" {
Ok(ValueTy::Message)
} else {
bail!("invalid map value type: {}", s);
}
}
/// Returns a newtype wrapper around the ValueTy for nicer debug.
///
/// If the contained value is enumeration, it tries to convert it to the variant. If not, it
/// just forwards the implementation.
fn debug(&self) -> TokenStream {
match *self {
ValueTy::Scalar(ref ty) => fake_scalar(ty.clone()).debug(quote!(ValueWrapper)),
ValueTy::Message => quote!(
fn ValueWrapper<T>(v: T) -> T {
v
}
),
}
}
}

Просмотреть файл

@ -1,134 +0,0 @@
use anyhow::{bail, Error};
use proc_macro2::TokenStream;
use quote::{quote, ToTokens};
use syn::Meta;
use crate::field::{set_bool, set_option, tag_attr, word_attr, Label};
#[derive(Clone)]
pub struct Field {
pub label: Label,
pub tag: u32,
}
impl Field {
pub fn new(attrs: &[Meta], inferred_tag: Option<u32>) -> Result<Option<Field>, Error> {
let mut message = false;
let mut label = None;
let mut tag = None;
let mut boxed = false;
let mut unknown_attrs = Vec::new();
for attr in attrs {
if word_attr("message", attr) {
set_bool(&mut message, "duplicate message attribute")?;
} else if word_attr("boxed", attr) {
set_bool(&mut boxed, "duplicate boxed attribute")?;
} else if let Some(t) = tag_attr(attr)? {
set_option(&mut tag, t, "duplicate tag attributes")?;
} else if let Some(l) = Label::from_attr(attr) {
set_option(&mut label, l, "duplicate label attributes")?;
} else {
unknown_attrs.push(attr);
}
}
if !message {
return Ok(None);
}
match unknown_attrs.len() {
0 => (),
1 => bail!(
"unknown attribute for message field: {:?}",
unknown_attrs[0]
),
_ => bail!("unknown attributes for message field: {:?}", unknown_attrs),
}
let tag = match tag.or(inferred_tag) {
Some(tag) => tag,
None => bail!("message field is missing a tag attribute"),
};
Ok(Some(Field {
label: label.unwrap_or(Label::Optional),
tag: tag,
}))
}
pub fn new_oneof(attrs: &[Meta]) -> Result<Option<Field>, Error> {
if let Some(mut field) = Field::new(attrs, None)? {
if let Some(attr) = attrs.iter().find(|attr| Label::from_attr(attr).is_some()) {
bail!(
"invalid attribute for oneof field: {}",
attr.path().into_token_stream()
);
}
field.label = Label::Required;
Ok(Some(field))
} else {
Ok(None)
}
}
pub fn encode(&self, ident: TokenStream) -> TokenStream {
let tag = self.tag;
match self.label {
Label::Optional => quote! {
if let Some(ref msg) = #ident {
::prost::encoding::message::encode(#tag, msg, buf);
}
},
Label::Required => quote! {
::prost::encoding::message::encode(#tag, &#ident, buf);
},
Label::Repeated => quote! {
for msg in &#ident {
::prost::encoding::message::encode(#tag, msg, buf);
}
},
}
}
pub fn merge(&self, ident: TokenStream) -> TokenStream {
match self.label {
Label::Optional => quote! {
::prost::encoding::message::merge(wire_type,
#ident.get_or_insert_with(Default::default),
buf,
ctx)
},
Label::Required => quote! {
::prost::encoding::message::merge(wire_type, #ident, buf, ctx)
},
Label::Repeated => quote! {
::prost::encoding::message::merge_repeated(wire_type, #ident, buf, ctx)
},
}
}
pub fn encoded_len(&self, ident: TokenStream) -> TokenStream {
let tag = self.tag;
match self.label {
Label::Optional => quote! {
#ident.as_ref().map_or(0, |msg| ::prost::encoding::message::encoded_len(#tag, msg))
},
Label::Required => quote! {
::prost::encoding::message::encoded_len(#tag, &#ident)
},
Label::Repeated => quote! {
::prost::encoding::message::encoded_len_repeated(#tag, &#ident)
},
}
}
pub fn clear(&self, ident: TokenStream) -> TokenStream {
match self.label {
Label::Optional => quote!(#ident = ::std::option::Option::None),
Label::Required => quote!(#ident.clear()),
Label::Repeated => quote!(#ident.clear()),
}
}
}

366
third_party/rust/prost-derive/src/field/mod.rs поставляемый
Просмотреть файл

@ -1,366 +0,0 @@
mod group;
mod map;
mod message;
mod oneof;
mod scalar;
use std::fmt;
use std::slice;
use anyhow::{bail, Error};
use proc_macro2::TokenStream;
use quote::quote;
use syn::{Attribute, Ident, Lit, LitBool, Meta, MetaList, MetaNameValue, NestedMeta};
#[derive(Clone)]
pub enum Field {
/// A scalar field.
Scalar(scalar::Field),
/// A message field.
Message(message::Field),
/// A map field.
Map(map::Field),
/// A oneof field.
Oneof(oneof::Field),
/// A group field.
Group(group::Field),
}
impl Field {
/// Creates a new `Field` from an iterator of field attributes.
///
/// If the meta items are invalid, an error will be returned.
/// If the field should be ignored, `None` is returned.
pub fn new(attrs: Vec<Attribute>, inferred_tag: Option<u32>) -> Result<Option<Field>, Error> {
let attrs = prost_attrs(attrs)?;
// TODO: check for ignore attribute.
let field = if let Some(field) = scalar::Field::new(&attrs, inferred_tag)? {
Field::Scalar(field)
} else if let Some(field) = message::Field::new(&attrs, inferred_tag)? {
Field::Message(field)
} else if let Some(field) = map::Field::new(&attrs, inferred_tag)? {
Field::Map(field)
} else if let Some(field) = oneof::Field::new(&attrs)? {
Field::Oneof(field)
} else if let Some(field) = group::Field::new(&attrs, inferred_tag)? {
Field::Group(field)
} else {
bail!("no type attribute");
};
Ok(Some(field))
}
/// Creates a new oneof `Field` from an iterator of field attributes.
///
/// If the meta items are invalid, an error will be returned.
/// If the field should be ignored, `None` is returned.
pub fn new_oneof(attrs: Vec<Attribute>) -> Result<Option<Field>, Error> {
let attrs = prost_attrs(attrs)?;
// TODO: check for ignore attribute.
let field = if let Some(field) = scalar::Field::new_oneof(&attrs)? {
Field::Scalar(field)
} else if let Some(field) = message::Field::new_oneof(&attrs)? {
Field::Message(field)
} else if let Some(field) = map::Field::new_oneof(&attrs)? {
Field::Map(field)
} else if let Some(field) = group::Field::new_oneof(&attrs)? {
Field::Group(field)
} else {
bail!("no type attribute for oneof field");
};
Ok(Some(field))
}
pub fn tags(&self) -> Vec<u32> {
match *self {
Field::Scalar(ref scalar) => vec![scalar.tag],
Field::Message(ref message) => vec![message.tag],
Field::Map(ref map) => vec![map.tag],
Field::Oneof(ref oneof) => oneof.tags.clone(),
Field::Group(ref group) => vec![group.tag],
}
}
/// Returns a statement which encodes the field.
pub fn encode(&self, ident: TokenStream) -> TokenStream {
match *self {
Field::Scalar(ref scalar) => scalar.encode(ident),
Field::Message(ref message) => message.encode(ident),
Field::Map(ref map) => map.encode(ident),
Field::Oneof(ref oneof) => oneof.encode(ident),
Field::Group(ref group) => group.encode(ident),
}
}
/// Returns an expression which evaluates to the result of merging a decoded
/// value into the field.
pub fn merge(&self, ident: TokenStream) -> TokenStream {
match *self {
Field::Scalar(ref scalar) => scalar.merge(ident),
Field::Message(ref message) => message.merge(ident),
Field::Map(ref map) => map.merge(ident),
Field::Oneof(ref oneof) => oneof.merge(ident),
Field::Group(ref group) => group.merge(ident),
}
}
/// Returns an expression which evaluates to the encoded length of the field.
pub fn encoded_len(&self, ident: TokenStream) -> TokenStream {
match *self {
Field::Scalar(ref scalar) => scalar.encoded_len(ident),
Field::Map(ref map) => map.encoded_len(ident),
Field::Message(ref msg) => msg.encoded_len(ident),
Field::Oneof(ref oneof) => oneof.encoded_len(ident),
Field::Group(ref group) => group.encoded_len(ident),
}
}
/// Returns a statement which clears the field.
pub fn clear(&self, ident: TokenStream) -> TokenStream {
match *self {
Field::Scalar(ref scalar) => scalar.clear(ident),
Field::Message(ref message) => message.clear(ident),
Field::Map(ref map) => map.clear(ident),
Field::Oneof(ref oneof) => oneof.clear(ident),
Field::Group(ref group) => group.clear(ident),
}
}
pub fn default(&self) -> TokenStream {
match *self {
Field::Scalar(ref scalar) => scalar.default(),
_ => quote!(::std::default::Default::default()),
}
}
/// Produces the fragment implementing debug for the given field.
pub fn debug(&self, ident: TokenStream) -> TokenStream {
match *self {
Field::Scalar(ref scalar) => {
let wrapper = scalar.debug(quote!(ScalarWrapper));
quote! {
{
#wrapper
ScalarWrapper(&#ident)
}
}
}
Field::Map(ref map) => {
let wrapper = map.debug(quote!(MapWrapper));
quote! {
{
#wrapper
MapWrapper(&#ident)
}
}
}
_ => quote!(&#ident),
}
}
pub fn methods(&self, ident: &Ident) -> Option<TokenStream> {
match *self {
Field::Scalar(ref scalar) => scalar.methods(ident),
Field::Map(ref map) => map.methods(ident),
_ => None,
}
}
}
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum Label {
/// An optional field.
Optional,
/// A required field.
Required,
/// A repeated field.
Repeated,
}
impl Label {
fn as_str(&self) -> &'static str {
match *self {
Label::Optional => "optional",
Label::Required => "required",
Label::Repeated => "repeated",
}
}
fn variants() -> slice::Iter<'static, Label> {
const VARIANTS: &'static [Label] = &[Label::Optional, Label::Required, Label::Repeated];
VARIANTS.iter()
}
/// Parses a string into a field label.
/// If the string doesn't match a field label, `None` is returned.
fn from_attr(attr: &Meta) -> Option<Label> {
if let Meta::Path(ref path) = *attr {
for &label in Label::variants() {
if path.is_ident(label.as_str()) {
return Some(label);
}
}
}
None
}
}
impl fmt::Debug for Label {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
impl fmt::Display for Label {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
/// Get the items belonging to the 'prost' list attribute, e.g. `#[prost(foo, bar="baz")]`.
pub(super) fn prost_attrs(attrs: Vec<Attribute>) -> Result<Vec<Meta>, Error> {
Ok(attrs
.iter()
.flat_map(Attribute::parse_meta)
.flat_map(|meta| match meta {
Meta::List(MetaList { path, nested, .. }) => {
if path.is_ident("prost") {
nested.into_iter().collect()
} else {
Vec::new()
}
}
_ => Vec::new(),
})
.flat_map(|attr| -> Result<_, _> {
match attr {
NestedMeta::Meta(attr) => Ok(attr),
NestedMeta::Lit(lit) => bail!("invalid prost attribute: {:?}", lit),
}
})
.collect())
}
pub fn set_option<T>(option: &mut Option<T>, value: T, message: &str) -> Result<(), Error>
where
T: fmt::Debug,
{
if let Some(ref existing) = *option {
bail!("{}: {:?} and {:?}", message, existing, value);
}
*option = Some(value);
Ok(())
}
pub fn set_bool(b: &mut bool, message: &str) -> Result<(), Error> {
if *b {
bail!("{}", message);
} else {
*b = true;
Ok(())
}
}
/// Unpacks an attribute into a (key, boolean) pair, returning the boolean value.
/// If the key doesn't match the attribute, `None` is returned.
fn bool_attr(key: &str, attr: &Meta) -> Result<Option<bool>, Error> {
if !attr.path().is_ident(key) {
return Ok(None);
}
match *attr {
Meta::Path(..) => Ok(Some(true)),
Meta::List(ref meta_list) => {
// TODO(rustlang/rust#23121): slice pattern matching would make this much nicer.
if meta_list.nested.len() == 1 {
if let NestedMeta::Lit(Lit::Bool(LitBool { value, .. })) = meta_list.nested[0] {
return Ok(Some(value));
}
}
bail!("invalid {} attribute", key);
}
Meta::NameValue(MetaNameValue {
lit: Lit::Str(ref lit),
..
}) => lit
.value()
.parse::<bool>()
.map_err(Error::from)
.map(Option::Some),
Meta::NameValue(MetaNameValue {
lit: Lit::Bool(LitBool { value, .. }),
..
}) => Ok(Some(value)),
_ => bail!("invalid {} attribute", key),
}
}
/// Checks if an attribute matches a word.
fn word_attr(key: &str, attr: &Meta) -> bool {
if let Meta::Path(ref path) = *attr {
path.is_ident(key)
} else {
false
}
}
pub(super) fn tag_attr(attr: &Meta) -> Result<Option<u32>, Error> {
if !attr.path().is_ident("tag") {
return Ok(None);
}
match *attr {
Meta::List(ref meta_list) => {
// TODO(rustlang/rust#23121): slice pattern matching would make this much nicer.
if meta_list.nested.len() == 1 {
if let NestedMeta::Lit(Lit::Int(ref lit)) = meta_list.nested[0] {
return Ok(Some(lit.base10_parse()?));
}
}
bail!("invalid tag attribute: {:?}", attr);
}
Meta::NameValue(ref meta_name_value) => match meta_name_value.lit {
Lit::Str(ref lit) => lit
.value()
.parse::<u32>()
.map_err(Error::from)
.map(Option::Some),
Lit::Int(ref lit) => Ok(Some(lit.base10_parse()?)),
_ => bail!("invalid tag attribute: {:?}", attr),
},
_ => bail!("invalid tag attribute: {:?}", attr),
}
}
fn tags_attr(attr: &Meta) -> Result<Option<Vec<u32>>, Error> {
if !attr.path().is_ident("tags") {
return Ok(None);
}
match *attr {
Meta::List(ref meta_list) => {
let mut tags = Vec::with_capacity(meta_list.nested.len());
for item in &meta_list.nested {
if let NestedMeta::Lit(Lit::Int(ref lit)) = *item {
tags.push(lit.base10_parse()?);
} else {
bail!("invalid tag attribute: {:?}", attr);
}
}
return Ok(Some(tags));
}
Meta::NameValue(MetaNameValue {
lit: Lit::Str(ref lit),
..
}) => lit
.value()
.split(',')
.map(|s| s.trim().parse::<u32>().map_err(Error::from))
.collect::<Result<Vec<u32>, _>>()
.map(|tags| Some(tags)),
_ => bail!("invalid tag attribute: {:?}", attr),
}
}

Просмотреть файл

@ -1,99 +0,0 @@
use anyhow::{bail, Error};
use proc_macro2::TokenStream;
use quote::quote;
use syn::{parse_str, Lit, Meta, MetaNameValue, NestedMeta, Path};
use crate::field::{set_option, tags_attr};
#[derive(Clone)]
pub struct Field {
pub ty: Path,
pub tags: Vec<u32>,
}
impl Field {
pub fn new(attrs: &[Meta]) -> Result<Option<Field>, Error> {
let mut ty = None;
let mut tags = None;
let mut unknown_attrs = Vec::new();
for attr in attrs {
if attr.path().is_ident("oneof") {
let t = match *attr {
Meta::NameValue(MetaNameValue {
lit: Lit::Str(ref lit),
..
}) => parse_str::<Path>(&lit.value())?,
Meta::List(ref list) if list.nested.len() == 1 => {
// TODO(rustlang/rust#23121): slice pattern matching would make this much nicer.
if let NestedMeta::Meta(Meta::Path(ref path)) = list.nested[0] {
if let Some(ident) = path.get_ident() {
Path::from(ident.clone())
} else {
bail!("invalid oneof attribute: item must be an identifier");
}
} else {
bail!("invalid oneof attribute: item must be an identifier");
}
}
_ => bail!("invalid oneof attribute: {:?}", attr),
};
set_option(&mut ty, t, "duplicate oneof attribute")?;
} else if let Some(t) = tags_attr(attr)? {
set_option(&mut tags, t, "duplicate tags attributes")?;
} else {
unknown_attrs.push(attr);
}
}
let ty = match ty {
Some(ty) => ty,
None => return Ok(None),
};
match unknown_attrs.len() {
0 => (),
1 => bail!(
"unknown attribute for message field: {:?}",
unknown_attrs[0]
),
_ => bail!("unknown attributes for message field: {:?}", unknown_attrs),
}
let tags = match tags {
Some(tags) => tags,
None => bail!("oneof field is missing a tags attribute"),
};
Ok(Some(Field { ty: ty, tags: tags }))
}
/// Returns a statement which encodes the oneof field.
pub fn encode(&self, ident: TokenStream) -> TokenStream {
quote! {
if let Some(ref oneof) = #ident {
oneof.encode(buf)
}
}
}
/// Returns an expression which evaluates to the result of decoding the oneof field.
pub fn merge(&self, ident: TokenStream) -> TokenStream {
let ty = &self.ty;
quote! {
#ty::merge(#ident, tag, wire_type, buf, ctx)
}
}
/// Returns an expression which evaluates to the encoded length of the oneof field.
pub fn encoded_len(&self, ident: TokenStream) -> TokenStream {
let ty = &self.ty;
quote! {
#ident.as_ref().map_or(0, #ty::encoded_len)
}
}
pub fn clear(&self, ident: TokenStream) -> TokenStream {
quote!(#ident = ::std::option::Option::None)
}
}

Просмотреть файл

@ -1,793 +0,0 @@
use std::convert::TryFrom;
use std::fmt;
use anyhow::{anyhow, bail, Error};
use proc_macro2::{Span, TokenStream};
use quote::{quote, ToTokens};
use syn::{
self, parse_str, Ident, Lit, LitByteStr, Meta, MetaList, MetaNameValue, NestedMeta, Path,
};
use crate::field::{bool_attr, set_option, tag_attr, Label};
/// A scalar protobuf field.
#[derive(Clone)]
pub struct Field {
pub ty: Ty,
pub kind: Kind,
pub tag: u32,
}
impl Field {
pub fn new(attrs: &[Meta], inferred_tag: Option<u32>) -> Result<Option<Field>, Error> {
let mut ty = None;
let mut label = None;
let mut packed = None;
let mut default = None;
let mut tag = None;
let mut unknown_attrs = Vec::new();
for attr in attrs {
if let Some(t) = Ty::from_attr(attr)? {
set_option(&mut ty, t, "duplicate type attributes")?;
} else if let Some(p) = bool_attr("packed", attr)? {
set_option(&mut packed, p, "duplicate packed attributes")?;
} else if let Some(t) = tag_attr(attr)? {
set_option(&mut tag, t, "duplicate tag attributes")?;
} else if let Some(l) = Label::from_attr(attr) {
set_option(&mut label, l, "duplicate label attributes")?;
} else if let Some(d) = DefaultValue::from_attr(attr)? {
set_option(&mut default, d, "duplicate default attributes")?;
} else {
unknown_attrs.push(attr);
}
}
let ty = match ty {
Some(ty) => ty,
None => return Ok(None),
};
match unknown_attrs.len() {
0 => (),
1 => bail!("unknown attribute: {:?}", unknown_attrs[0]),
_ => bail!("unknown attributes: {:?}", unknown_attrs),
}
let tag = match tag.or(inferred_tag) {
Some(tag) => tag,
None => bail!("missing tag attribute"),
};
let has_default = default.is_some();
let default = default.map_or_else(
|| Ok(DefaultValue::new(&ty)),
|lit| DefaultValue::from_lit(&ty, lit),
)?;
let kind = match (label, packed, has_default) {
(None, Some(true), _)
| (Some(Label::Optional), Some(true), _)
| (Some(Label::Required), Some(true), _) => {
bail!("packed attribute may only be applied to repeated fields");
}
(Some(Label::Repeated), Some(true), _) if !ty.is_numeric() => {
bail!("packed attribute may only be applied to numeric types");
}
(Some(Label::Repeated), _, true) => {
bail!("repeated fields may not have a default value");
}
(None, _, _) => Kind::Plain(default),
(Some(Label::Optional), _, _) => Kind::Optional(default),
(Some(Label::Required), _, _) => Kind::Required(default),
(Some(Label::Repeated), packed, false) if packed.unwrap_or(ty.is_numeric()) => {
Kind::Packed
}
(Some(Label::Repeated), _, false) => Kind::Repeated,
};
Ok(Some(Field {
ty: ty,
kind: kind,
tag: tag,
}))
}
pub fn new_oneof(attrs: &[Meta]) -> Result<Option<Field>, Error> {
if let Some(mut field) = Field::new(attrs, None)? {
match field.kind {
Kind::Plain(default) => {
field.kind = Kind::Required(default);
Ok(Some(field))
}
Kind::Optional(..) => bail!("invalid optional attribute on oneof field"),
Kind::Required(..) => bail!("invalid required attribute on oneof field"),
Kind::Packed | Kind::Repeated => bail!("invalid repeated attribute on oneof field"),
}
} else {
Ok(None)
}
}
pub fn encode(&self, ident: TokenStream) -> TokenStream {
let module = self.ty.module();
let encode_fn = match self.kind {
Kind::Plain(..) | Kind::Optional(..) | Kind::Required(..) => quote!(encode),
Kind::Repeated => quote!(encode_repeated),
Kind::Packed => quote!(encode_packed),
};
let encode_fn = quote!(::prost::encoding::#module::#encode_fn);
let tag = self.tag;
match self.kind {
Kind::Plain(ref default) => {
let default = default.typed();
quote! {
if #ident != #default {
#encode_fn(#tag, &#ident, buf);
}
}
}
Kind::Optional(..) => quote! {
if let ::std::option::Option::Some(ref value) = #ident {
#encode_fn(#tag, value, buf);
}
},
Kind::Required(..) | Kind::Repeated | Kind::Packed => quote! {
#encode_fn(#tag, &#ident, buf);
},
}
}
/// Returns an expression which evaluates to the result of merging a decoded
/// scalar value into the field.
pub fn merge(&self, ident: TokenStream) -> TokenStream {
let module = self.ty.module();
let merge_fn = match self.kind {
Kind::Plain(..) | Kind::Optional(..) | Kind::Required(..) => quote!(merge),
Kind::Repeated | Kind::Packed => quote!(merge_repeated),
};
let merge_fn = quote!(::prost::encoding::#module::#merge_fn);
match self.kind {
Kind::Plain(..) | Kind::Required(..) | Kind::Repeated | Kind::Packed => quote! {
#merge_fn(wire_type, #ident, buf, ctx)
},
Kind::Optional(..) => quote! {
#merge_fn(wire_type,
#ident.get_or_insert_with(Default::default),
buf,
ctx)
},
}
}
/// Returns an expression which evaluates to the encoded length of the field.
pub fn encoded_len(&self, ident: TokenStream) -> TokenStream {
let module = self.ty.module();
let encoded_len_fn = match self.kind {
Kind::Plain(..) | Kind::Optional(..) | Kind::Required(..) => quote!(encoded_len),
Kind::Repeated => quote!(encoded_len_repeated),
Kind::Packed => quote!(encoded_len_packed),
};
let encoded_len_fn = quote!(::prost::encoding::#module::#encoded_len_fn);
let tag = self.tag;
match self.kind {
Kind::Plain(ref default) => {
let default = default.typed();
quote! {
if #ident != #default {
#encoded_len_fn(#tag, &#ident)
} else {
0
}
}
}
Kind::Optional(..) => quote! {
#ident.as_ref().map_or(0, |value| #encoded_len_fn(#tag, value))
},
Kind::Required(..) | Kind::Repeated | Kind::Packed => quote! {
#encoded_len_fn(#tag, &#ident)
},
}
}
pub fn clear(&self, ident: TokenStream) -> TokenStream {
match self.kind {
Kind::Plain(ref default) | Kind::Required(ref default) => {
let default = default.typed();
match self.ty {
Ty::String | Ty::Bytes => quote!(#ident.clear()),
_ => quote!(#ident = #default),
}
}
Kind::Optional(_) => quote!(#ident = ::std::option::Option::None),
Kind::Repeated | Kind::Packed => quote!(#ident.clear()),
}
}
/// Returns an expression which evaluates to the default value of the field.
pub fn default(&self) -> TokenStream {
match self.kind {
Kind::Plain(ref value) | Kind::Required(ref value) => value.owned(),
Kind::Optional(_) => quote!(::std::option::Option::None),
Kind::Repeated | Kind::Packed => quote!(::std::vec::Vec::new()),
}
}
/// An inner debug wrapper, around the base type.
fn debug_inner(&self, wrap_name: TokenStream) -> TokenStream {
if let Ty::Enumeration(ref ty) = self.ty {
quote! {
struct #wrap_name<'a>(&'a i32);
impl<'a> ::std::fmt::Debug for #wrap_name<'a> {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match #ty::from_i32(*self.0) {
None => ::std::fmt::Debug::fmt(&self.0, f),
Some(en) => ::std::fmt::Debug::fmt(&en, f),
}
}
}
}
} else {
quote! {
fn #wrap_name<T>(v: T) -> T { v }
}
}
}
/// Returns a fragment for formatting the field `ident` in `Debug`.
pub fn debug(&self, wrapper_name: TokenStream) -> TokenStream {
let wrapper = self.debug_inner(quote!(Inner));
let inner_ty = self.ty.rust_type();
match self.kind {
Kind::Plain(_) | Kind::Required(_) => self.debug_inner(wrapper_name),
Kind::Optional(_) => quote! {
struct #wrapper_name<'a>(&'a ::std::option::Option<#inner_ty>);
impl<'a> ::std::fmt::Debug for #wrapper_name<'a> {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
#wrapper
::std::fmt::Debug::fmt(&self.0.as_ref().map(Inner), f)
}
}
},
Kind::Repeated | Kind::Packed => {
quote! {
struct #wrapper_name<'a>(&'a ::std::vec::Vec<#inner_ty>);
impl<'a> ::std::fmt::Debug for #wrapper_name<'a> {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
let mut vec_builder = f.debug_list();
for v in self.0 {
#wrapper
vec_builder.entry(&Inner(v));
}
vec_builder.finish()
}
}
}
}
}
}
/// Returns methods to embed in the message.
pub fn methods(&self, ident: &Ident) -> Option<TokenStream> {
let mut ident_str = ident.to_string();
if ident_str.starts_with("r#") {
ident_str = ident_str[2..].to_owned();
}
if let Ty::Enumeration(ref ty) = self.ty {
let set = Ident::new(&format!("set_{}", ident_str), Span::call_site());
let set_doc = format!("Sets `{}` to the provided enum value.", ident_str);
Some(match self.kind {
Kind::Plain(ref default) | Kind::Required(ref default) => {
let get_doc = format!(
"Returns the enum value of `{}`, \
or the default if the field is set to an invalid enum value.",
ident_str,
);
quote! {
#[doc=#get_doc]
pub fn #ident(&self) -> #ty {
#ty::from_i32(self.#ident).unwrap_or(#default)
}
#[doc=#set_doc]
pub fn #set(&mut self, value: #ty) {
self.#ident = value as i32;
}
}
}
Kind::Optional(ref default) => {
let get_doc = format!(
"Returns the enum value of `{}`, \
or the default if the field is unset or set to an invalid enum value.",
ident_str,
);
quote! {
#[doc=#get_doc]
pub fn #ident(&self) -> #ty {
self.#ident.and_then(#ty::from_i32).unwrap_or(#default)
}
#[doc=#set_doc]
pub fn #set(&mut self, value: #ty) {
self.#ident = ::std::option::Option::Some(value as i32);
}
}
}
Kind::Repeated | Kind::Packed => {
let iter_doc = format!(
"Returns an iterator which yields the valid enum values contained in `{}`.",
ident_str,
);
let push = Ident::new(&format!("push_{}", ident_str), Span::call_site());
let push_doc = format!("Appends the provided enum value to `{}`.", ident_str);
quote! {
#[doc=#iter_doc]
pub fn #ident(&self) -> ::std::iter::FilterMap<
::std::iter::Cloned<::std::slice::Iter<i32>>,
fn(i32) -> ::std::option::Option<#ty>,
> {
self.#ident.iter().cloned().filter_map(#ty::from_i32)
}
#[doc=#push_doc]
pub fn #push(&mut self, value: #ty) {
self.#ident.push(value as i32);
}
}
}
})
} else if let Kind::Optional(ref default) = self.kind {
let ty = self.ty.rust_ref_type();
let match_some = if self.ty.is_numeric() {
quote!(::std::option::Option::Some(val) => val,)
} else {
quote!(::std::option::Option::Some(ref val) => &val[..],)
};
let get_doc = format!(
"Returns the value of `{0}`, or the default value if `{0}` is unset.",
ident_str,
);
Some(quote! {
#[doc=#get_doc]
pub fn #ident(&self) -> #ty {
match self.#ident {
#match_some
::std::option::Option::None => #default,
}
}
})
} else {
None
}
}
}
/// A scalar protobuf field type.
#[derive(Clone, PartialEq, Eq)]
pub enum Ty {
Double,
Float,
Int32,
Int64,
Uint32,
Uint64,
Sint32,
Sint64,
Fixed32,
Fixed64,
Sfixed32,
Sfixed64,
Bool,
String,
Bytes,
Enumeration(Path),
}
impl Ty {
pub fn from_attr(attr: &Meta) -> Result<Option<Ty>, Error> {
let ty = match *attr {
Meta::Path(ref name) if name.is_ident("float") => Ty::Float,
Meta::Path(ref name) if name.is_ident("double") => Ty::Double,
Meta::Path(ref name) if name.is_ident("int32") => Ty::Int32,
Meta::Path(ref name) if name.is_ident("int64") => Ty::Int64,
Meta::Path(ref name) if name.is_ident("uint32") => Ty::Uint32,
Meta::Path(ref name) if name.is_ident("uint64") => Ty::Uint64,
Meta::Path(ref name) if name.is_ident("sint32") => Ty::Sint32,
Meta::Path(ref name) if name.is_ident("sint64") => Ty::Sint64,
Meta::Path(ref name) if name.is_ident("fixed32") => Ty::Fixed32,
Meta::Path(ref name) if name.is_ident("fixed64") => Ty::Fixed64,
Meta::Path(ref name) if name.is_ident("sfixed32") => Ty::Sfixed32,
Meta::Path(ref name) if name.is_ident("sfixed64") => Ty::Sfixed64,
Meta::Path(ref name) if name.is_ident("bool") => Ty::Bool,
Meta::Path(ref name) if name.is_ident("string") => Ty::String,
Meta::Path(ref name) if name.is_ident("bytes") => Ty::Bytes,
Meta::NameValue(MetaNameValue {
ref path,
lit: Lit::Str(ref l),
..
}) if path.is_ident("enumeration") => Ty::Enumeration(parse_str::<Path>(&l.value())?),
Meta::List(MetaList {
ref path,
ref nested,
..
}) if path.is_ident("enumeration") => {
// TODO(rustlang/rust#23121): slice pattern matching would make this much nicer.
if nested.len() == 1 {
if let NestedMeta::Meta(Meta::Path(ref path)) = nested[0] {
Ty::Enumeration(path.clone())
} else {
bail!("invalid enumeration attribute: item must be an identifier");
}
} else {
bail!("invalid enumeration attribute: only a single identifier is supported");
}
}
_ => return Ok(None),
};
Ok(Some(ty))
}
pub fn from_str(s: &str) -> Result<Ty, Error> {
let enumeration_len = "enumeration".len();
let error = Err(anyhow!("invalid type: {}", s));
let ty = match s.trim() {
"float" => Ty::Float,
"double" => Ty::Double,
"int32" => Ty::Int32,
"int64" => Ty::Int64,
"uint32" => Ty::Uint32,
"uint64" => Ty::Uint64,
"sint32" => Ty::Sint32,
"sint64" => Ty::Sint64,
"fixed32" => Ty::Fixed32,
"fixed64" => Ty::Fixed64,
"sfixed32" => Ty::Sfixed32,
"sfixed64" => Ty::Sfixed64,
"bool" => Ty::Bool,
"string" => Ty::String,
"bytes" => Ty::Bytes,
s if s.len() > enumeration_len && &s[..enumeration_len] == "enumeration" => {
let s = &s[enumeration_len..].trim();
match s.chars().next() {
Some('<') | Some('(') => (),
_ => return error,
}
match s.chars().next_back() {
Some('>') | Some(')') => (),
_ => return error,
}
Ty::Enumeration(parse_str::<Path>(s[1..s.len() - 1].trim())?)
}
_ => return error,
};
Ok(ty)
}
/// Returns the type as it appears in protobuf field declarations.
pub fn as_str(&self) -> &'static str {
match *self {
Ty::Double => "double",
Ty::Float => "float",
Ty::Int32 => "int32",
Ty::Int64 => "int64",
Ty::Uint32 => "uint32",
Ty::Uint64 => "uint64",
Ty::Sint32 => "sint32",
Ty::Sint64 => "sint64",
Ty::Fixed32 => "fixed32",
Ty::Fixed64 => "fixed64",
Ty::Sfixed32 => "sfixed32",
Ty::Sfixed64 => "sfixed64",
Ty::Bool => "bool",
Ty::String => "string",
Ty::Bytes => "bytes",
Ty::Enumeration(..) => "enum",
}
}
// TODO: rename to 'owned_type'.
pub fn rust_type(&self) -> TokenStream {
match *self {
Ty::String => quote!(::std::string::String),
Ty::Bytes => quote!(::std::vec::Vec<u8>),
_ => self.rust_ref_type(),
}
}
// TODO: rename to 'ref_type'
pub fn rust_ref_type(&self) -> TokenStream {
match *self {
Ty::Double => quote!(f64),
Ty::Float => quote!(f32),
Ty::Int32 => quote!(i32),
Ty::Int64 => quote!(i64),
Ty::Uint32 => quote!(u32),
Ty::Uint64 => quote!(u64),
Ty::Sint32 => quote!(i32),
Ty::Sint64 => quote!(i64),
Ty::Fixed32 => quote!(u32),
Ty::Fixed64 => quote!(u64),
Ty::Sfixed32 => quote!(i32),
Ty::Sfixed64 => quote!(i64),
Ty::Bool => quote!(bool),
Ty::String => quote!(&str),
Ty::Bytes => quote!(&[u8]),
Ty::Enumeration(..) => quote!(i32),
}
}
pub fn module(&self) -> Ident {
match *self {
Ty::Enumeration(..) => Ident::new("int32", Span::call_site()),
_ => Ident::new(self.as_str(), Span::call_site()),
}
}
/// Returns true if the scalar type is length delimited (i.e., `string` or `bytes`).
pub fn is_numeric(&self) -> bool {
*self != Ty::String && *self != Ty::Bytes
}
}
impl fmt::Debug for Ty {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
impl fmt::Display for Ty {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.as_str())
}
}
/// Scalar Protobuf field types.
#[derive(Clone)]
pub enum Kind {
/// A plain proto3 scalar field.
Plain(DefaultValue),
/// An optional scalar field.
Optional(DefaultValue),
/// A required proto2 scalar field.
Required(DefaultValue),
/// A repeated scalar field.
Repeated,
/// A packed repeated scalar field.
Packed,
}
/// Scalar Protobuf field default value.
#[derive(Clone, Debug)]
pub enum DefaultValue {
F64(f64),
F32(f32),
I32(i32),
I64(i64),
U32(u32),
U64(u64),
Bool(bool),
String(String),
Bytes(Vec<u8>),
Enumeration(TokenStream),
Path(Path),
}
impl DefaultValue {
pub fn from_attr(attr: &Meta) -> Result<Option<Lit>, Error> {
if !attr.path().is_ident("default") {
return Ok(None);
} else if let Meta::NameValue(ref name_value) = *attr {
Ok(Some(name_value.lit.clone()))
} else {
bail!("invalid default value attribute: {:?}", attr)
}
}
pub fn from_lit(ty: &Ty, lit: Lit) -> Result<DefaultValue, Error> {
let is_i32 = *ty == Ty::Int32 || *ty == Ty::Sint32 || *ty == Ty::Sfixed32;
let is_i64 = *ty == Ty::Int64 || *ty == Ty::Sint64 || *ty == Ty::Sfixed64;
let is_u32 = *ty == Ty::Uint32 || *ty == Ty::Fixed32;
let is_u64 = *ty == Ty::Uint64 || *ty == Ty::Fixed64;
let empty_or_is = |expected, actual: &str| expected == actual || actual.is_empty();
let default = match lit {
Lit::Int(ref lit) if is_i32 && empty_or_is("i32", lit.suffix()) => {
DefaultValue::I32(lit.base10_parse()?)
}
Lit::Int(ref lit) if is_i64 && empty_or_is("i64", lit.suffix()) => {
DefaultValue::I64(lit.base10_parse()?)
}
Lit::Int(ref lit) if is_u32 && empty_or_is("u32", lit.suffix()) => {
DefaultValue::U32(lit.base10_parse()?)
}
Lit::Int(ref lit) if is_u64 && empty_or_is("u64", lit.suffix()) => {
DefaultValue::U64(lit.base10_parse()?)
}
Lit::Float(ref lit) if *ty == Ty::Float && empty_or_is("f32", lit.suffix()) => {
DefaultValue::F32(lit.base10_parse()?)
}
Lit::Int(ref lit) if *ty == Ty::Float => DefaultValue::F32(lit.base10_parse()?),
Lit::Float(ref lit) if *ty == Ty::Double && empty_or_is("f64", lit.suffix()) => {
DefaultValue::F64(lit.base10_parse()?)
}
Lit::Int(ref lit) if *ty == Ty::Double => DefaultValue::F64(lit.base10_parse()?),
Lit::Bool(ref lit) if *ty == Ty::Bool => DefaultValue::Bool(lit.value),
Lit::Str(ref lit) if *ty == Ty::String => DefaultValue::String(lit.value()),
Lit::ByteStr(ref lit) if *ty == Ty::Bytes => DefaultValue::Bytes(lit.value()),
Lit::Str(ref lit) => {
let value = lit.value();
let value = value.trim();
if let Ty::Enumeration(ref path) = *ty {
let variant = Ident::new(value, Span::call_site());
return Ok(DefaultValue::Enumeration(quote!(#path::#variant)));
}
// Parse special floating point values.
if *ty == Ty::Float {
match value {
"inf" => {
return Ok(DefaultValue::Path(parse_str::<Path>(
"::std::f32::INFINITY",
)?));
}
"-inf" => {
return Ok(DefaultValue::Path(parse_str::<Path>(
"::std::f32::NEG_INFINITY",
)?));
}
"nan" => {
return Ok(DefaultValue::Path(parse_str::<Path>("::std::f32::NAN")?));
}
_ => (),
}
}
if *ty == Ty::Double {
match value {
"inf" => {
return Ok(DefaultValue::Path(parse_str::<Path>(
"::std::f64::INFINITY",
)?));
}
"-inf" => {
return Ok(DefaultValue::Path(parse_str::<Path>(
"::std::f64::NEG_INFINITY",
)?));
}
"nan" => {
return Ok(DefaultValue::Path(parse_str::<Path>("::std::f64::NAN")?));
}
_ => (),
}
}
// Rust doesn't have a negative literals, so they have to be parsed specially.
if value.chars().next() == Some('-') {
if let Ok(lit) = syn::parse_str::<Lit>(&value[1..]) {
match lit {
Lit::Int(ref lit) if is_i32 && empty_or_is("i32", lit.suffix()) => {
// Initially parse into an i64, so that i32::MIN does not overflow.
let value: i64 = -lit.base10_parse()?;
return Ok(i32::try_from(value).map(DefaultValue::I32)?);
}
Lit::Int(ref lit) if is_i64 && empty_or_is("i64", lit.suffix()) => {
// Initially parse into an i128, so that i64::MIN does not overflow.
let value: i128 = -lit.base10_parse()?;
return Ok(i64::try_from(value).map(DefaultValue::I64)?);
}
Lit::Float(ref lit)
if *ty == Ty::Float && empty_or_is("f32", lit.suffix()) =>
{
return Ok(DefaultValue::F32(-lit.base10_parse()?));
}
Lit::Float(ref lit)
if *ty == Ty::Double && empty_or_is("f64", lit.suffix()) =>
{
return Ok(DefaultValue::F64(-lit.base10_parse()?));
}
Lit::Int(ref lit) if *ty == Ty::Float && lit.suffix().is_empty() => {
return Ok(DefaultValue::F32(-lit.base10_parse()?));
}
Lit::Int(ref lit) if *ty == Ty::Double && lit.suffix().is_empty() => {
return Ok(DefaultValue::F64(-lit.base10_parse()?));
}
_ => (),
}
}
}
match syn::parse_str::<Lit>(&value) {
Ok(Lit::Str(_)) => (),
Ok(lit) => return DefaultValue::from_lit(ty, lit),
_ => (),
}
bail!("invalid default value: {}", quote!(#value));
}
_ => bail!("invalid default value: {}", quote!(#lit)),
};
Ok(default)
}
pub fn new(ty: &Ty) -> DefaultValue {
match *ty {
Ty::Float => DefaultValue::F32(0.0),
Ty::Double => DefaultValue::F64(0.0),
Ty::Int32 | Ty::Sint32 | Ty::Sfixed32 => DefaultValue::I32(0),
Ty::Int64 | Ty::Sint64 | Ty::Sfixed64 => DefaultValue::I64(0),
Ty::Uint32 | Ty::Fixed32 => DefaultValue::U32(0),
Ty::Uint64 | Ty::Fixed64 => DefaultValue::U64(0),
Ty::Bool => DefaultValue::Bool(false),
Ty::String => DefaultValue::String(String::new()),
Ty::Bytes => DefaultValue::Bytes(Vec::new()),
Ty::Enumeration(ref path) => {
return DefaultValue::Enumeration(quote!(#path::default()))
}
}
}
pub fn owned(&self) -> TokenStream {
match *self {
DefaultValue::String(ref value) if value.is_empty() => {
quote!(::std::string::String::new())
}
DefaultValue::String(ref value) => quote!(#value.to_owned()),
DefaultValue::Bytes(ref value) if value.is_empty() => quote!(::std::vec::Vec::new()),
DefaultValue::Bytes(ref value) => {
let lit = LitByteStr::new(value, Span::call_site());
quote!(#lit.to_owned())
}
ref other => other.typed(),
}
}
pub fn typed(&self) -> TokenStream {
if let DefaultValue::Enumeration(_) = *self {
quote!(#self as i32)
} else {
quote!(#self)
}
}
}
impl ToTokens for DefaultValue {
fn to_tokens(&self, tokens: &mut TokenStream) {
match *self {
DefaultValue::F64(value) => value.to_tokens(tokens),
DefaultValue::F32(value) => value.to_tokens(tokens),
DefaultValue::I32(value) => value.to_tokens(tokens),
DefaultValue::I64(value) => value.to_tokens(tokens),
DefaultValue::U32(value) => value.to_tokens(tokens),
DefaultValue::U64(value) => value.to_tokens(tokens),
DefaultValue::Bool(value) => value.to_tokens(tokens),
DefaultValue::String(ref value) => value.to_tokens(tokens),
DefaultValue::Bytes(ref value) => {
LitByteStr::new(value, Span::call_site()).to_tokens(tokens)
}
DefaultValue::Enumeration(ref value) => value.to_tokens(tokens),
DefaultValue::Path(ref value) => value.to_tokens(tokens),
}
}
}

476
third_party/rust/prost-derive/src/lib.rs поставляемый
Просмотреть файл

@ -1,476 +0,0 @@
#![doc(html_root_url = "https://docs.rs/prost-derive/0.6.1")]
// The `quote!` macro requires deep recursion.
#![recursion_limit = "4096"]
extern crate proc_macro;
use anyhow::bail;
use quote::quote;
use anyhow::Error;
use itertools::Itertools;
use proc_macro::TokenStream;
use proc_macro2::Span;
use syn::{
punctuated::Punctuated, Data, DataEnum, DataStruct, DeriveInput, Expr, Fields, FieldsNamed,
FieldsUnnamed, Ident, Variant,
};
mod field;
use crate::field::Field;
fn try_message(input: TokenStream) -> Result<TokenStream, Error> {
let input: DeriveInput = syn::parse(input)?;
let ident = input.ident;
let variant_data = match input.data {
Data::Struct(variant_data) => variant_data,
Data::Enum(..) => bail!("Message can not be derived for an enum"),
Data::Union(..) => bail!("Message can not be derived for a union"),
};
if !input.generics.params.is_empty() || input.generics.where_clause.is_some() {
bail!("Message may not be derived for generic type");
}
let fields = match variant_data {
DataStruct {
fields: Fields::Named(FieldsNamed { named: fields, .. }),
..
}
| DataStruct {
fields:
Fields::Unnamed(FieldsUnnamed {
unnamed: fields, ..
}),
..
} => fields.into_iter().collect(),
DataStruct {
fields: Fields::Unit,
..
} => Vec::new(),
};
let mut next_tag: u32 = 1;
let mut fields = fields
.into_iter()
.enumerate()
.flat_map(|(idx, field)| {
let field_ident = field
.ident
.unwrap_or_else(|| Ident::new(&idx.to_string(), Span::call_site()));
match Field::new(field.attrs, Some(next_tag)) {
Ok(Some(field)) => {
next_tag = field.tags().iter().max().map(|t| t + 1).unwrap_or(next_tag);
Some(Ok((field_ident, field)))
}
Ok(None) => None,
Err(err) => Some(Err(
err.context(format!("invalid message field {}.{}", ident, field_ident))
)),
}
})
.collect::<Result<Vec<_>, _>>()?;
// We want Debug to be in declaration order
let unsorted_fields = fields.clone();
// Sort the fields by tag number so that fields will be encoded in tag order.
// TODO: This encodes oneof fields in the position of their lowest tag,
// regardless of the currently occupied variant, is that consequential?
// See: https://developers.google.com/protocol-buffers/docs/encoding#order
fields.sort_by_key(|&(_, ref field)| field.tags().into_iter().min().unwrap());
let fields = fields;
let mut tags = fields
.iter()
.flat_map(|&(_, ref field)| field.tags())
.collect::<Vec<_>>();
let num_tags = tags.len();
tags.sort();
tags.dedup();
if tags.len() != num_tags {
bail!("message {} has fields with duplicate tags", ident);
}
let encoded_len = fields
.iter()
.map(|&(ref field_ident, ref field)| field.encoded_len(quote!(self.#field_ident)));
let encode = fields
.iter()
.map(|&(ref field_ident, ref field)| field.encode(quote!(self.#field_ident)));
let merge = fields.iter().map(|&(ref field_ident, ref field)| {
let merge = field.merge(quote!(value));
let tags = field
.tags()
.into_iter()
.map(|tag| quote!(#tag))
.intersperse(quote!(|));
quote! {
#(#tags)* => {
let mut value = &mut self.#field_ident;
#merge.map_err(|mut error| {
error.push(STRUCT_NAME, stringify!(#field_ident));
error
})
},
}
});
let struct_name = if fields.is_empty() {
quote!()
} else {
quote!(
const STRUCT_NAME: &'static str = stringify!(#ident);
)
};
// TODO
let is_struct = true;
let clear = fields
.iter()
.map(|&(ref field_ident, ref field)| field.clear(quote!(self.#field_ident)));
let default = fields.iter().map(|&(ref field_ident, ref field)| {
let value = field.default();
quote!(#field_ident: #value,)
});
let methods = fields
.iter()
.flat_map(|&(ref field_ident, ref field)| field.methods(field_ident))
.collect::<Vec<_>>();
let methods = if methods.is_empty() {
quote!()
} else {
quote! {
#[allow(dead_code)]
impl #ident {
#(#methods)*
}
}
};
let debugs = unsorted_fields.iter().map(|&(ref field_ident, ref field)| {
let wrapper = field.debug(quote!(self.#field_ident));
let call = if is_struct {
quote!(builder.field(stringify!(#field_ident), &wrapper))
} else {
quote!(builder.field(&wrapper))
};
quote! {
let builder = {
let wrapper = #wrapper;
#call
};
}
});
let debug_builder = if is_struct {
quote!(f.debug_struct(stringify!(#ident)))
} else {
quote!(f.debug_tuple(stringify!(#ident)))
};
let expanded = quote! {
impl ::prost::Message for #ident {
#[allow(unused_variables)]
fn encode_raw<B>(&self, buf: &mut B) where B: ::prost::bytes::BufMut {
#(#encode)*
}
#[allow(unused_variables)]
fn merge_field<B>(
&mut self,
tag: u32,
wire_type: ::prost::encoding::WireType,
buf: &mut B,
ctx: ::prost::encoding::DecodeContext,
) -> ::std::result::Result<(), ::prost::DecodeError>
where B: ::prost::bytes::Buf {
#struct_name
match tag {
#(#merge)*
_ => ::prost::encoding::skip_field(wire_type, tag, buf, ctx),
}
}
#[inline]
fn encoded_len(&self) -> usize {
0 #(+ #encoded_len)*
}
fn clear(&mut self) {
#(#clear;)*
}
}
impl Default for #ident {
fn default() -> #ident {
#ident {
#(#default)*
}
}
}
impl ::std::fmt::Debug for #ident {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
let mut builder = #debug_builder;
#(#debugs;)*
builder.finish()
}
}
#methods
};
Ok(expanded.into())
}
#[proc_macro_derive(Message, attributes(prost))]
pub fn message(input: TokenStream) -> TokenStream {
try_message(input).unwrap()
}
fn try_enumeration(input: TokenStream) -> Result<TokenStream, Error> {
let input: DeriveInput = syn::parse(input)?;
let ident = input.ident;
if !input.generics.params.is_empty() || input.generics.where_clause.is_some() {
bail!("Message may not be derived for generic type");
}
let punctuated_variants = match input.data {
Data::Enum(DataEnum { variants, .. }) => variants,
Data::Struct(_) => bail!("Enumeration can not be derived for a struct"),
Data::Union(..) => bail!("Enumeration can not be derived for a union"),
};
// Map the variants into 'fields'.
let mut variants: Vec<(Ident, Expr)> = Vec::new();
for Variant {
ident,
fields,
discriminant,
..
} in punctuated_variants
{
match fields {
Fields::Unit => (),
Fields::Named(_) | Fields::Unnamed(_) => {
bail!("Enumeration variants may not have fields")
}
}
match discriminant {
Some((_, expr)) => variants.push((ident, expr)),
None => bail!("Enumeration variants must have a disriminant"),
}
}
if variants.is_empty() {
panic!("Enumeration must have at least one variant");
}
let default = variants[0].0.clone();
let is_valid = variants
.iter()
.map(|&(_, ref value)| quote!(#value => true));
let from = variants.iter().map(
|&(ref variant, ref value)| quote!(#value => ::std::option::Option::Some(#ident::#variant)),
);
let is_valid_doc = format!("Returns `true` if `value` is a variant of `{}`.", ident);
let from_i32_doc = format!(
"Converts an `i32` to a `{}`, or `None` if `value` is not a valid variant.",
ident
);
let expanded = quote! {
impl #ident {
#[doc=#is_valid_doc]
pub fn is_valid(value: i32) -> bool {
match value {
#(#is_valid,)*
_ => false,
}
}
#[doc=#from_i32_doc]
pub fn from_i32(value: i32) -> ::std::option::Option<#ident> {
match value {
#(#from,)*
_ => ::std::option::Option::None,
}
}
}
impl ::std::default::Default for #ident {
fn default() -> #ident {
#ident::#default
}
}
impl ::std::convert::From<#ident> for i32 {
fn from(value: #ident) -> i32 {
value as i32
}
}
};
Ok(expanded.into())
}
#[proc_macro_derive(Enumeration, attributes(prost))]
pub fn enumeration(input: TokenStream) -> TokenStream {
try_enumeration(input).unwrap()
}
fn try_oneof(input: TokenStream) -> Result<TokenStream, Error> {
let input: DeriveInput = syn::parse(input)?;
let ident = input.ident;
let variants = match input.data {
Data::Enum(DataEnum { variants, .. }) => variants,
Data::Struct(..) => bail!("Oneof can not be derived for a struct"),
Data::Union(..) => bail!("Oneof can not be derived for a union"),
};
if !input.generics.params.is_empty() || input.generics.where_clause.is_some() {
bail!("Message may not be derived for generic type");
}
// Map the variants into 'fields'.
let mut fields: Vec<(Ident, Field)> = Vec::new();
for Variant {
attrs,
ident: variant_ident,
fields: variant_fields,
..
} in variants
{
let variant_fields = match variant_fields {
Fields::Unit => Punctuated::new(),
Fields::Named(FieldsNamed { named: fields, .. })
| Fields::Unnamed(FieldsUnnamed {
unnamed: fields, ..
}) => fields,
};
if variant_fields.len() != 1 {
bail!("Oneof enum variants must have a single field");
}
match Field::new_oneof(attrs)? {
Some(field) => fields.push((variant_ident, field)),
None => bail!("invalid oneof variant: oneof variants may not be ignored"),
}
}
let mut tags = fields
.iter()
.flat_map(|&(ref variant_ident, ref field)| -> Result<u32, Error> {
if field.tags().len() > 1 {
bail!(
"invalid oneof variant {}::{}: oneof variants may only have a single tag",
ident,
variant_ident
);
}
Ok(field.tags()[0])
})
.collect::<Vec<_>>();
tags.sort();
tags.dedup();
if tags.len() != fields.len() {
panic!("invalid oneof {}: variants have duplicate tags", ident);
}
let encode = fields.iter().map(|&(ref variant_ident, ref field)| {
let encode = field.encode(quote!(*value));
quote!(#ident::#variant_ident(ref value) => { #encode })
});
let merge = fields.iter().map(|&(ref variant_ident, ref field)| {
let tag = field.tags()[0];
let merge = field.merge(quote!(value));
quote! {
#tag => {
match field {
::std::option::Option::Some(#ident::#variant_ident(ref mut value)) => {
#merge
},
_ => {
let mut owned_value = ::std::default::Default::default();
let value = &mut owned_value;
#merge.map(|_| *field = ::std::option::Option::Some(#ident::#variant_ident(owned_value)))
},
}
}
}
});
let encoded_len = fields.iter().map(|&(ref variant_ident, ref field)| {
let encoded_len = field.encoded_len(quote!(*value));
quote!(#ident::#variant_ident(ref value) => #encoded_len)
});
let debug = fields.iter().map(|&(ref variant_ident, ref field)| {
let wrapper = field.debug(quote!(*value));
quote!(#ident::#variant_ident(ref value) => {
let wrapper = #wrapper;
f.debug_tuple(stringify!(#variant_ident))
.field(&wrapper)
.finish()
})
});
let expanded = quote! {
impl #ident {
pub fn encode<B>(&self, buf: &mut B) where B: ::prost::bytes::BufMut {
match *self {
#(#encode,)*
}
}
pub fn merge<B>(
field: &mut ::std::option::Option<#ident>,
tag: u32,
wire_type: ::prost::encoding::WireType,
buf: &mut B,
ctx: ::prost::encoding::DecodeContext,
) -> ::std::result::Result<(), ::prost::DecodeError>
where B: ::prost::bytes::Buf {
match tag {
#(#merge,)*
_ => unreachable!(concat!("invalid ", stringify!(#ident), " tag: {}"), tag),
}
}
#[inline]
pub fn encoded_len(&self) -> usize {
match *self {
#(#encoded_len,)*
}
}
}
impl ::std::fmt::Debug for #ident {
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
match *self {
#(#debug,)*
}
}
}
};
Ok(expanded.into())
}
#[proc_macro_derive(Oneof, attributes(prost))]
pub fn oneof(input: TokenStream) -> TokenStream {
try_oneof(input).unwrap()
}

1
third_party/rust/prost/.cargo-checksum.json поставляемый
Просмотреть файл

@ -1 +0,0 @@
{"files":{"Cargo.toml":"25f581813a108fec2782701f7a94e4b65c2ff7473ea81d4c8cf9f1ada0d55ffa","LICENSE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","README.md":"1ac86558e6510f6870e2b4f77933756adb866ff8ec89bd29d6904a9d1bb44b7b","benches/varint.rs":"e387c86b72077859cd779ec7e695a2797d7bdb3e23a45b0822257234a1a8fdb3","prepare-release.sh":"67f42e0649d33269c88272e69a9bf48d02de42126a9942ac6298b6995adea8df","publish-release.sh":"a9ff9a5a65a6772fbe115b64051b1284b0b81825f839a65594d6834c53d7a78f","src/encoding.rs":"0a907de22fe59b7655325cdf01891ad2dd8718d98e46b9e7f60dd85629b9584c","src/error.rs":"269c303ee68828a24768354ff820d4dc94c2409d84bb2233ac93f4be3e2e5d9d","src/lib.rs":"c123928cc7b629ad83e4571cdd9f8467872eb7867ccc50d4f5e4209a49250ad7","src/message.rs":"bd59bb3988c20be163e1246bef4f3e92fa7b8c3c88e6d3f7715379f23de195ee","src/types.rs":"a427a28ab0c0908e3d0754e5824327b5b6b09c2357b1e25c7d34fa523c3d2348"},"package":"ce49aefe0a6144a45de32927c77bd2859a5f7677b55f220ae5b744e87389c212"}

63
third_party/rust/prost/Cargo.toml поставляемый
Просмотреть файл

@ -1,63 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "prost"
version = "0.6.1"
authors = ["Dan Burkert <dan@danburkert.com>"]
description = "A Protocol Buffers implementation for the Rust Language."
documentation = "https://docs.rs/prost"
readme = "README.md"
keywords = ["protobuf", "serialization"]
categories = ["encoding"]
license = "Apache-2.0"
repository = "https://github.com/danburkert/prost"
[profile.bench]
debug = true
[lib]
bench = false
[[bench]]
name = "varint"
harness = false
[dependencies.bytes]
version = "0.5"
[dependencies.prost-derive]
version = "0.6.1"
optional = true
[dev-dependencies.criterion]
version = "0.3"
[dev-dependencies.env_logger]
version = "0.7"
default-features = false
[dev-dependencies.log]
version = "0.4"
[dev-dependencies.quickcheck]
version = "0.9"
[dev-dependencies.rand]
version = "0.7"
[features]
default = ["prost-derive"]
no-recursion-limit = []
[badges.appveyor]
repository = "danburkert/prost"
[badges.travis-ci]
repository = "danburkert/prost"

201
third_party/rust/prost/LICENSE поставляемый
Просмотреть файл

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

345
third_party/rust/prost/README.md поставляемый
Просмотреть файл

@ -1,345 +0,0 @@
[![Build Status](https://travis-ci.org/danburkert/prost.svg?branch=master)](https://travis-ci.org/danburkert/prost)
[![Windows Build Status](https://ci.appveyor.com/api/projects/status/24rpba3x2vqe8lje/branch/master?svg=true)](https://ci.appveyor.com/project/danburkert/prost/branch/master)
[![Documentation](https://docs.rs/prost/badge.svg)](https://docs.rs/prost/)
[![Crate](https://img.shields.io/crates/v/prost.svg)](https://crates.io/crates/prost)
[![Dependency Status](https://deps.rs/repo/github/danburkert/prost/status.svg)](https://deps.rs/repo/github/danburkert/prost)
# *PROST!*
`prost` is a [Protocol Buffers](https://developers.google.com/protocol-buffers/)
implementation for the [Rust Language](https://www.rust-lang.org/). `prost`
generates simple, idiomatic Rust code from `proto2` and `proto3` files.
Compared to other Protocol Buffers implementations, `prost`
* Generates simple, idiomatic, and readable Rust types by taking advantage of
Rust `derive` attributes.
* Retains comments from `.proto` files in generated Rust code.
* Allows existing Rust types (not generated from a `.proto`) to be serialized
and deserialized by adding attributes.
* Uses the [`bytes::{Buf, BufMut}`](https://github.com/carllerche/bytes)
abstractions for serialization instead of `std::io::{Read, Write}`.
* Respects the Protobuf `package` specifier when organizing generated code
into Rust modules.
* Preserves unknown enum values during deserialization.
* Does not include support for runtime reflection or message descriptors.
## Using `prost` in a Cargo Project
First, add `prost` and its public dependencies to your `Cargo.toml`:
```
[dependencies]
prost = "0.6"
# Only necessary if using Protobuf well-known types:
prost-types = "0.6"
```
The recommended way to add `.proto` compilation to a Cargo project is to use the
`prost-build` library. See the [`prost-build` documentation](prost-build) for
more details and examples.
## Generated Code
`prost` generates Rust code from source `.proto` files using the `proto2` or
`proto3` syntax. `prost`'s goal is to make the generated code as simple as
possible.
### Packages
All `.proto` files used with `prost` must contain a
[`package` specifier][package]. `prost` will translate the Protobuf package into
a Rust module. For example, given the `package` specifier:
[package]: https://developers.google.com/protocol-buffers/docs/proto#packages
```proto
package foo.bar;
```
All Rust types generated from the file will be in the `foo::bar` module.
### Messages
Given a simple message declaration:
```proto
// Sample message.
message Foo {
}
```
`prost` will generate the following Rust struct:
```rust
/// Sample message.
#[derive(Clone, Debug, PartialEq, Message)]
pub struct Foo {
}
```
### Fields
Fields in Protobuf messages are translated into Rust as public struct fields of the
corresponding type.
#### Scalar Values
Scalar value types are converted as follows:
| Protobuf Type | Rust Type |
| --- | --- |
| `double` | `f64` |
| `float` | `f32` |
| `int32` | `i32` |
| `int64` | `i64` |
| `uint32` | `u32` |
| `uint64` | `u64` |
| `sint32` | `i32` |
| `sint64` | `i64` |
| `fixed32` | `u32` |
| `fixed64` | `u64` |
| `sfixed32` | `i32` |
| `sfixed64` | `i64` |
| `bool` | `bool` |
| `string` | `String` |
| `bytes` | `Vec<u8>` |
#### Enumerations
All `.proto` enumeration types convert to the Rust `i32` type. Additionally,
each enumeration type gets a corresponding Rust `enum` type, with helper methods
to convert `i32` values to the enum type. The `enum` type isn't used directly as
a field, because the Protobuf spec mandates that enumerations values are 'open',
and decoding unrecognized enumeration values must be possible.
#### Field Modifiers
Protobuf scalar value and enumeration message fields can have a modifier
depending on the Protobuf version. Modifiers change the corresponding type of
the Rust field:
| `.proto` Version | Modifier | Rust Type |
| --- | --- | --- |
| `proto2` | `optional` | `Option<T>` |
| `proto2` | `required` | `T` |
| `proto3` | default | `T` |
| `proto2`/`proto3` | repeated | `Vec<T>` |
#### Map Fields
Map fields are converted to a Rust `HashMap` with key and value type converted
from the Protobuf key and value types.
#### Message Fields
Message fields are converted to the corresponding struct type. The table of
field modifiers above applies to message fields, except that `proto3` message
fields without a modifier (the default) will be wrapped in an `Option`.
Typically message fields are unboxed. `prost` will automatically box a message
field if the field type and the parent type are recursively nested in order to
avoid an infinite sized struct.
#### Oneof Fields
Oneof fields convert to a Rust enum. Protobuf `oneof`s types are not named, so
`prost` uses the name of the `oneof` field for the resulting Rust enum, and
defines the enum in a module under the struct. For example, a `proto3` message
such as:
```proto
message Foo {
oneof widget {
int32 quux = 1;
string bar = 2;
}
}
```
generates the following Rust[1]:
```rust
pub struct Foo {
pub widget: Option<foo::Widget>,
}
pub mod foo {
pub enum Widget {
Quux(i32),
Bar(String),
}
}
```
`oneof` fields are always wrapped in an `Option`.
[1] Annotations have been elided for clarity. See below for a full example.
### Services
`prost-build` allows a custom code-generator to be used for processing `service`
definitions. This can be used to output Rust traits according to an
application's specific needs.
### Generated Code Example
Example `.proto` file:
```proto
syntax = "proto3";
package tutorial;
message Person {
string name = 1;
int32 id = 2; // Unique ID number for this person.
string email = 3;
enum PhoneType {
MOBILE = 0;
HOME = 1;
WORK = 2;
}
message PhoneNumber {
string number = 1;
PhoneType type = 2;
}
repeated PhoneNumber phones = 4;
}
// Our address book file is just one of these.
message AddressBook {
repeated Person people = 1;
}
```
and the generated Rust code (`tutorial.rs`):
```rust
#[derive(Clone, Debug, PartialEq, Message)]
pub struct Person {
#[prost(string, tag="1")]
pub name: String,
/// Unique ID number for this person.
#[prost(int32, tag="2")]
pub id: i32,
#[prost(string, tag="3")]
pub email: String,
#[prost(message, repeated, tag="4")]
pub phones: Vec<person::PhoneNumber>,
}
pub mod person {
#[derive(Clone, Debug, PartialEq, Message)]
pub struct PhoneNumber {
#[prost(string, tag="1")]
pub number: String,
#[prost(enumeration="PhoneType", tag="2")]
pub type_: i32,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Enumeration)]
pub enum PhoneType {
Mobile = 0,
Home = 1,
Work = 2,
}
}
/// Our address book file is just one of these.
#[derive(Clone, Debug, PartialEq, Message)]
pub struct AddressBook {
#[prost(message, repeated, tag="1")]
pub people: Vec<Person>,
}
```
## Serializing Existing Types
`prost` uses a custom derive macro to handle encoding and decoding types, which
means that if your existing Rust type is compatible with Protobuf types, you can
serialize and deserialize it by adding the appropriate derive and field
annotations.
Currently the best documentation on adding annotations is to look at the
generated code examples above.
### Tag Inference for Existing Types
Prost automatically infers tags for the struct.
Fields are tagged sequentially in the order they
are specified, starting with `1`.
You may skip tags which have been reserved, or where there are gaps between
sequentially occurring tag values by specifying the tag number to skip to with
the `tag` attribute on the first field after the gap. The following fields will
be tagged sequentially starting from the next number.
```rust
#[derive(Clone, Debug, PartialEq, Message)]
struct Person {
pub id: String, // tag=1
// NOTE: Old "name" field has been removed
// pub name: String, // tag=2 (Removed)
#[prost(tag="6")]
pub given_name: String, // tag=6
pub family_name: String, // tag=7
pub formatted_name: String, // tag=8
#[prost(tag="3")]
pub age: u32, // tag=3
pub height: u32, // tag=4
#[prost(enumeration="Gender")]
pub gender: i32, // tag=5
// NOTE: Skip to less commonly occurring fields
#[prost(tag="16")]
pub name_prefix: String, // tag=16 (eg. mr/mrs/ms)
pub name_suffix: String, // tag=17 (eg. jr/esq)
pub maiden_name: String, // tag=18
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Enumeration)]
pub enum Gender {
Unknown = 0,
Female = 1,
Male = 2,
}
```
## FAQ
1. **Could `prost` be implemented as a serializer for [Serde](https://serde.rs/)?**
Probably not, however I would like to hear from a Serde expert on the matter.
There are two complications with trying to serialize Protobuf messages with
Serde:
- Protobuf fields require a numbered tag, and curently there appears to be no
mechanism suitable for this in `serde`.
- The mapping of Protobuf type to Rust type is not 1-to-1. As a result,
trait-based approaches to dispatching don't work very well. Example: six
different Protobuf field types correspond to a Rust `Vec<i32>`: `repeated
int32`, `repeated sint32`, `repeated sfixed32`, and their packed
counterparts.
But it is possible to place `serde` derive tags onto the generated types, so
the same structure can support both `prost` and `Serde`.
2. **I get errors when trying to run `cargo test` on MacOS**
If the errors are about missing `autoreconf` or similar, you can probably fix
them by running
```
brew install automake
brew install libtool
```
## License
`prost` is distributed under the terms of the Apache License (Version 2.0).
See [LICENSE](LICENSE) for details.
Copyright 2017 Dan Burkert

95
third_party/rust/prost/benches/varint.rs поставляемый
Просмотреть файл

@ -1,95 +0,0 @@
use std::mem;
use bytes::Buf;
use criterion::{Benchmark, Criterion, Throughput};
use prost::encoding::{decode_varint, encode_varint, encoded_len_varint};
use rand::{rngs::StdRng, seq::SliceRandom, SeedableRng};
fn benchmark_varint(criterion: &mut Criterion, name: &str, mut values: Vec<u64>) {
// Shuffle the values in a stable order.
values.shuffle(&mut StdRng::seed_from_u64(0));
let encoded_len = values
.iter()
.cloned()
.map(encoded_len_varint)
.sum::<usize>() as u64;
let decoded_len = (values.len() * mem::size_of::<u64>()) as u64;
let encode_values = values.clone();
let encode = Benchmark::new("encode", move |b| {
let mut buf = Vec::<u8>::with_capacity(encode_values.len() * 10);
b.iter(|| {
buf.clear();
for &value in &encode_values {
encode_varint(value, &mut buf);
}
criterion::black_box(&buf);
})
})
.throughput(Throughput::Bytes(encoded_len));
let mut decode_values = values.clone();
let decode = Benchmark::new("decode", move |b| {
let mut buf = Vec::with_capacity(decode_values.len() * 10);
for &value in &decode_values {
encode_varint(value, &mut buf);
}
b.iter(|| {
decode_values.clear();
let mut buf = &mut buf.as_slice();
while buf.has_remaining() {
let value = decode_varint(&mut buf).unwrap();
decode_values.push(value);
}
criterion::black_box(&decode_values);
})
})
.throughput(Throughput::Bytes(decoded_len));
let encoded_len_values = values.clone();
let encoded_len = Benchmark::new("encoded_len", move |b| {
b.iter(|| {
let mut sum = 0;
for &value in &encoded_len_values {
sum += encoded_len_varint(value);
}
criterion::black_box(sum);
})
})
.throughput(Throughput::Bytes(decoded_len));
let name = format!("varint/{}", name);
criterion
.bench(&name, encode)
.bench(&name, decode)
.bench(&name, encoded_len);
}
fn main() {
let mut criterion = Criterion::default().configure_from_args();
// Benchmark encoding and decoding 100 small (1 byte) varints.
benchmark_varint(&mut criterion, "small", (0..100).collect());
// Benchmark encoding and decoding 100 medium (5 byte) varints.
benchmark_varint(&mut criterion, "medium", (1 << 28..).take(100).collect());
// Benchmark encoding and decoding 100 large (10 byte) varints.
benchmark_varint(&mut criterion, "large", (1 << 63..).take(100).collect());
// Benchmark encoding and decoding 100 varints of mixed width (average 5.5 bytes).
benchmark_varint(
&mut criterion,
"mixed",
(0..10)
.flat_map(move |width| {
let exponent = width * 7;
(0..10).map(move |offset| offset + (1 << exponent))
})
.collect(),
);
criterion.final_summary();
}

47
third_party/rust/prost/prepare-release.sh поставляемый
Просмотреть файл

@ -1,47 +0,0 @@
#!/bin/bash
# Script which automates modifying source version fields, and creating a release
# commit and tag. The commit and tag are not automatically pushed, nor are the
# crates published (see publish-release.sh).
set -ex
if [ "$#" -ne 1 ]
then
echo "Usage: $0 <version>"
exit 1
fi
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
VERSION="$1"
MINOR="$( echo ${VERSION} | cut -d\. -f1-2 )"
VERSION_MATCHER="([a-z0-9\\.-]+)"
PROST_CRATE_MATCHER="(prost|prost-[a-z]+)"
# Update the README.md.
sed -i -E "s/${PROST_CRATE_MATCHER} = \"${VERSION_MATCHER}\"/\1 = \"${MINOR}\"/" "$DIR/README.md"
# Update html_root_url attributes.
sed -i -E "s~html_root_url = \"https://docs\.rs/${PROST_CRATE_MATCHER}/$VERSION_MATCHER\"~html_root_url = \"https://docs.rs/\1/${VERSION}\"~" \
"$DIR/src/lib.rs" \
"$DIR/prost-derive/src/lib.rs" \
"$DIR/prost-build/src/lib.rs" \
"$DIR/prost-types/src/lib.rs"
# Update Cargo.toml version fields.
sed -i -E "s/^version = \"${VERSION_MATCHER}\"$/version = \"${VERSION}\"/" \
"$DIR/Cargo.toml" \
"$DIR/prost-derive/Cargo.toml" \
"$DIR/prost-build/Cargo.toml" \
"$DIR/prost-types/Cargo.toml"
# Update Cargo.toml dependency versions.
sed -i -E "s/^${PROST_CRATE_MATCHER} = \{ version = \"${VERSION_MATCHER}\"/\1 = { version = \"${VERSION}\"/" \
"$DIR/Cargo.toml" \
"$DIR/prost-derive/Cargo.toml" \
"$DIR/prost-build/Cargo.toml" \
"$DIR/prost-types/Cargo.toml"
git commit -a -m "release ${VERSION}"
git tag -a "v${VERSION}" -m "release ${VERSION}"

26
third_party/rust/prost/publish-release.sh поставляемый
Просмотреть файл

@ -1,26 +0,0 @@
#!/bin/bash
# Script which automates publishing a crates.io release of the prost crates.
set -ex
if [ "$#" -ne 0 ]
then
echo "Usage: $0"
exit 1
fi
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
CRATES=( \
"prost-derive" \
"." \
"prost-types" \
"prost-build" \
)
for CRATE in "${CRATES[@]}"; do
pushd "$DIR/$CRATE"
cargo publish
popd
done

1650
third_party/rust/prost/src/encoding.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

112
third_party/rust/prost/src/error.rs поставляемый
Просмотреть файл

@ -1,112 +0,0 @@
//! Protobuf encoding and decoding errors.
use std::borrow::Cow;
use std::error;
use std::fmt;
use std::io;
/// A Protobuf message decoding error.
///
/// `DecodeError` indicates that the input buffer does not caontain a valid
/// Protobuf message. The error details should be considered 'best effort': in
/// general it is not possible to exactly pinpoint why data is malformed.
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct DecodeError {
/// A 'best effort' root cause description.
description: Cow<'static, str>,
/// A stack of (message, field) name pairs, which identify the specific
/// message type and field where decoding failed. The stack contains an
/// entry per level of nesting.
stack: Vec<(&'static str, &'static str)>,
}
impl DecodeError {
/// Creates a new `DecodeError` with a 'best effort' root cause description.
///
/// Meant to be used only by `Message` implementations.
#[doc(hidden)]
pub fn new<S>(description: S) -> DecodeError
where
S: Into<Cow<'static, str>>,
{
DecodeError {
description: description.into(),
stack: Vec::new(),
}
}
/// Pushes a (message, field) name location pair on to the location stack.
///
/// Meant to be used only by `Message` implementations.
#[doc(hidden)]
pub fn push(&mut self, message: &'static str, field: &'static str) {
self.stack.push((message, field));
}
}
impl fmt::Display for DecodeError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("failed to decode Protobuf message: ")?;
for &(message, field) in &self.stack {
write!(f, "{}.{}: ", message, field)?;
}
f.write_str(&self.description)
}
}
impl error::Error for DecodeError {}
impl From<DecodeError> for io::Error {
fn from(error: DecodeError) -> io::Error {
io::Error::new(io::ErrorKind::InvalidData, error)
}
}
/// A Protobuf message encoding error.
///
/// `EncodeError` always indicates that a message failed to encode because the
/// provided buffer had insufficient capacity. Message encoding is otherwise
/// infallible.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct EncodeError {
required: usize,
remaining: usize,
}
impl EncodeError {
/// Creates a new `EncodeError`.
pub(crate) fn new(required: usize, remaining: usize) -> EncodeError {
EncodeError {
required,
remaining,
}
}
/// Returns the required buffer capacity to encode the message.
pub fn required_capacity(&self) -> usize {
self.required
}
/// Returns the remaining length in the provided buffer at the time of encoding.
pub fn remaining(&self) -> usize {
self.remaining
}
}
impl fmt::Display for EncodeError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"failed to encode Protobuf messsage; insufficient buffer capacity (required: {}, remaining: {})",
self.required, self.remaining
)
}
}
impl error::Error for EncodeError {}
impl From<EncodeError> for io::Error {
fn from(error: EncodeError) -> io::Error {
io::Error::new(io::ErrorKind::InvalidInput, error)
}
}

87
third_party/rust/prost/src/lib.rs поставляемый
Просмотреть файл

@ -1,87 +0,0 @@
#![doc(html_root_url = "https://docs.rs/prost/0.6.1")]
mod error;
mod message;
mod types;
#[doc(hidden)]
pub mod encoding;
pub use crate::error::{DecodeError, EncodeError};
pub use crate::message::Message;
use bytes::{Buf, BufMut};
use crate::encoding::{decode_varint, encode_varint, encoded_len_varint};
// See `encoding::DecodeContext` for more info.
// 100 is the default recursion limit in the C++ implementation.
#[cfg(not(feature = "no-recursion-limit"))]
const RECURSION_LIMIT: u32 = 100;
/// Encodes a length delimiter to the buffer.
///
/// See [Message.encode_length_delimited] for more info.
///
/// An error will be returned if the buffer does not have sufficient capacity to encode the
/// delimiter.
pub fn encode_length_delimiter<B>(length: usize, buf: &mut B) -> Result<(), EncodeError>
where
B: BufMut,
{
let length = length as u64;
let required = encoded_len_varint(length);
let remaining = buf.remaining_mut();
if required > remaining {
return Err(EncodeError::new(required, remaining));
}
encode_varint(length, buf);
Ok(())
}
/// Returns the encoded length of a length delimiter.
///
/// Applications may use this method to ensure sufficient buffer capacity before calling
/// `encode_length_delimiter`. The returned size will be between 1 and 10, inclusive.
pub fn length_delimiter_len(length: usize) -> usize {
encoded_len_varint(length as u64)
}
/// Decodes a length delimiter from the buffer.
///
/// This method allows the length delimiter to be decoded independently of the message, when the
/// message is encoded with [Message.encode_length_delimited].
///
/// An error may be returned in two cases:
///
/// * If the supplied buffer contains fewer than 10 bytes, then an error indicates that more
/// input is required to decode the full delimiter.
/// * If the supplied buffer contains more than 10 bytes, then the buffer contains an invalid
/// delimiter, and typically the buffer should be considered corrupt.
pub fn decode_length_delimiter<B>(mut buf: B) -> Result<usize, DecodeError>
where
B: Buf,
{
let length = decode_varint(&mut buf)?;
if length > usize::max_value() as u64 {
return Err(DecodeError::new(
"length delimiter exceeds maximum usize value",
));
}
Ok(length as usize)
}
// Re-export #[derive(Message, Enumeration, Oneof)].
// Based on serde's equivalent re-export [1], but enabled by default.
//
// [1]: https://github.com/serde-rs/serde/blob/v1.0.89/serde/src/lib.rs#L245-L256
#[cfg(feature = "prost-derive")]
#[allow(unused_imports)]
#[macro_use]
extern crate prost_derive;
#[cfg(feature = "prost-derive")]
#[doc(hidden)]
pub use bytes;
#[cfg(feature = "prost-derive")]
#[doc(hidden)]
pub use prost_derive::*;

166
third_party/rust/prost/src/message.rs поставляемый
Просмотреть файл

@ -1,166 +0,0 @@
use std::fmt::Debug;
use std::usize;
use bytes::{Buf, BufMut};
use crate::encoding::{
decode_key, encode_varint, encoded_len_varint, message, DecodeContext, WireType,
};
use crate::DecodeError;
use crate::EncodeError;
/// A Protocol Buffers message.
pub trait Message: Debug + Send + Sync {
/// Encodes the message to a buffer.
///
/// This method will panic if the buffer has insufficient capacity.
///
/// Meant to be used only by `Message` implementations.
#[doc(hidden)]
fn encode_raw<B>(&self, buf: &mut B)
where
B: BufMut,
Self: Sized;
/// Decodes a field from a buffer, and merges it into `self`.
///
/// Meant to be used only by `Message` implementations.
#[doc(hidden)]
fn merge_field<B>(
&mut self,
tag: u32,
wire_type: WireType,
buf: &mut B,
ctx: DecodeContext,
) -> Result<(), DecodeError>
where
B: Buf,
Self: Sized;
/// Returns the encoded length of the message without a length delimiter.
fn encoded_len(&self) -> usize;
/// Encodes the message to a buffer.
///
/// An error will be returned if the buffer does not have sufficient capacity.
fn encode<B>(&self, buf: &mut B) -> Result<(), EncodeError>
where
B: BufMut,
Self: Sized,
{
let required = self.encoded_len();
let remaining = buf.remaining_mut();
if required > buf.remaining_mut() {
return Err(EncodeError::new(required, remaining));
}
self.encode_raw(buf);
Ok(())
}
/// Encodes the message with a length-delimiter to a buffer.
///
/// An error will be returned if the buffer does not have sufficient capacity.
fn encode_length_delimited<B>(&self, buf: &mut B) -> Result<(), EncodeError>
where
B: BufMut,
Self: Sized,
{
let len = self.encoded_len();
let required = len + encoded_len_varint(len as u64);
let remaining = buf.remaining_mut();
if required > remaining {
return Err(EncodeError::new(required, remaining));
}
encode_varint(len as u64, buf);
self.encode_raw(buf);
Ok(())
}
/// Decodes an instance of the message from a buffer.
///
/// The entire buffer will be consumed.
fn decode<B>(mut buf: B) -> Result<Self, DecodeError>
where
B: Buf,
Self: Default,
{
let mut message = Self::default();
Self::merge(&mut message, &mut buf).map(|_| message)
}
/// Decodes a length-delimited instance of the message from the buffer.
fn decode_length_delimited<B>(buf: B) -> Result<Self, DecodeError>
where
B: Buf,
Self: Default,
{
let mut message = Self::default();
message.merge_length_delimited(buf)?;
Ok(message)
}
/// Decodes an instance of the message from a buffer, and merges it into `self`.
///
/// The entire buffer will be consumed.
fn merge<B>(&mut self, mut buf: B) -> Result<(), DecodeError>
where
B: Buf,
Self: Sized,
{
let ctx = DecodeContext::default();
while buf.has_remaining() {
let (tag, wire_type) = decode_key(&mut buf)?;
self.merge_field(tag, wire_type, &mut buf, ctx.clone())?;
}
Ok(())
}
/// Decodes a length-delimited instance of the message from buffer, and
/// merges it into `self`.
fn merge_length_delimited<B>(&mut self, mut buf: B) -> Result<(), DecodeError>
where
B: Buf,
Self: Sized,
{
message::merge(
WireType::LengthDelimited,
self,
&mut buf,
DecodeContext::default(),
)
}
/// Clears the message, resetting all fields to their default.
fn clear(&mut self);
}
impl<M> Message for Box<M>
where
M: Message,
{
fn encode_raw<B>(&self, buf: &mut B)
where
B: BufMut,
{
(**self).encode_raw(buf)
}
fn merge_field<B>(
&mut self,
tag: u32,
wire_type: WireType,
buf: &mut B,
ctx: DecodeContext,
) -> Result<(), DecodeError>
where
B: Buf,
{
(**self).merge_field(tag, wire_type, buf, ctx)
}
fn encoded_len(&self) -> usize {
(**self).encoded_len()
}
fn clear(&mut self) {
(**self).clear()
}
}

383
third_party/rust/prost/src/types.rs поставляемый
Просмотреть файл

@ -1,383 +0,0 @@
//! Protocol Buffers well-known wrapper types.
//!
//! This module provides implementations of `Message` for Rust standard library types which
//! correspond to a Protobuf well-known wrapper type. The remaining well-known types are defined in
//! the `prost-types` crate in order to avoid a cyclic dependency between `prost` and
//! `prost-build`.
use ::bytes::{Buf, BufMut};
use crate::{
encoding::{
bool, bytes, double, float, int32, int64, skip_field, string, uint32, uint64,
DecodeContext, WireType,
},
DecodeError, Message,
};
/// `google.protobuf.BoolValue`
impl Message for bool {
fn encode_raw<B>(&self, buf: &mut B)
where
B: BufMut,
{
if *self {
bool::encode(1, self, buf)
}
}
fn merge_field<B>(
&mut self,
tag: u32,
wire_type: WireType,
buf: &mut B,
ctx: DecodeContext,
) -> Result<(), DecodeError>
where
B: Buf,
{
if tag == 1 {
bool::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, tag, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if *self {
2
} else {
0
}
}
fn clear(&mut self) {
*self = false;
}
}
/// `google.protobuf.UInt32Value`
impl Message for u32 {
fn encode_raw<B>(&self, buf: &mut B)
where
B: BufMut,
{
if *self != 0 {
uint32::encode(1, self, buf)
}
}
fn merge_field<B>(
&mut self,
tag: u32,
wire_type: WireType,
buf: &mut B,
ctx: DecodeContext,
) -> Result<(), DecodeError>
where
B: Buf,
{
if tag == 1 {
uint32::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, tag, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if *self != 0 {
uint32::encoded_len(1, self)
} else {
0
}
}
fn clear(&mut self) {
*self = 0;
}
}
/// `google.protobuf.UInt64Value`
impl Message for u64 {
fn encode_raw<B>(&self, buf: &mut B)
where
B: BufMut,
{
if *self != 0 {
uint64::encode(1, self, buf)
}
}
fn merge_field<B>(
&mut self,
tag: u32,
wire_type: WireType,
buf: &mut B,
ctx: DecodeContext,
) -> Result<(), DecodeError>
where
B: Buf,
{
if tag == 1 {
uint64::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, tag, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if *self != 0 {
uint64::encoded_len(1, self)
} else {
0
}
}
fn clear(&mut self) {
*self = 0;
}
}
/// `google.protobuf.Int32Value`
impl Message for i32 {
fn encode_raw<B>(&self, buf: &mut B)
where
B: BufMut,
{
if *self != 0 {
int32::encode(1, self, buf)
}
}
fn merge_field<B>(
&mut self,
tag: u32,
wire_type: WireType,
buf: &mut B,
ctx: DecodeContext,
) -> Result<(), DecodeError>
where
B: Buf,
{
if tag == 1 {
int32::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, tag, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if *self != 0 {
int32::encoded_len(1, self)
} else {
0
}
}
fn clear(&mut self) {
*self = 0;
}
}
/// `google.protobuf.Int64Value`
impl Message for i64 {
fn encode_raw<B>(&self, buf: &mut B)
where
B: BufMut,
{
if *self != 0 {
int64::encode(1, self, buf)
}
}
fn merge_field<B>(
&mut self,
tag: u32,
wire_type: WireType,
buf: &mut B,
ctx: DecodeContext,
) -> Result<(), DecodeError>
where
B: Buf,
{
if tag == 1 {
int64::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, tag, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if *self != 0 {
int64::encoded_len(1, self)
} else {
0
}
}
fn clear(&mut self) {
*self = 0;
}
}
/// `google.protobuf.FloatValue`
impl Message for f32 {
fn encode_raw<B>(&self, buf: &mut B)
where
B: BufMut,
{
if *self != 0.0 {
float::encode(1, self, buf)
}
}
fn merge_field<B>(
&mut self,
tag: u32,
wire_type: WireType,
buf: &mut B,
ctx: DecodeContext,
) -> Result<(), DecodeError>
where
B: Buf,
{
if tag == 1 {
float::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, tag, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if *self != 0.0 {
float::encoded_len(1, self)
} else {
0
}
}
fn clear(&mut self) {
*self = 0.0;
}
}
/// `google.protobuf.DoubleValue`
impl Message for f64 {
fn encode_raw<B>(&self, buf: &mut B)
where
B: BufMut,
{
if *self != 0.0 {
double::encode(1, self, buf)
}
}
fn merge_field<B>(
&mut self,
tag: u32,
wire_type: WireType,
buf: &mut B,
ctx: DecodeContext,
) -> Result<(), DecodeError>
where
B: Buf,
{
if tag == 1 {
double::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, tag, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if *self != 0.0 {
double::encoded_len(1, self)
} else {
0
}
}
fn clear(&mut self) {
*self = 0.0;
}
}
/// `google.protobuf.StringValue`
impl Message for String {
fn encode_raw<B>(&self, buf: &mut B)
where
B: BufMut,
{
if !self.is_empty() {
string::encode(1, self, buf)
}
}
fn merge_field<B>(
&mut self,
tag: u32,
wire_type: WireType,
buf: &mut B,
ctx: DecodeContext,
) -> Result<(), DecodeError>
where
B: Buf,
{
if tag == 1 {
string::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, tag, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if !self.is_empty() {
string::encoded_len(1, self)
} else {
0
}
}
fn clear(&mut self) {
self.clear();
}
}
/// `google.protobuf.BytesValue`
impl Message for Vec<u8> {
fn encode_raw<B>(&self, buf: &mut B)
where
B: BufMut,
{
if !self.is_empty() {
bytes::encode(1, self, buf)
}
}
fn merge_field<B>(
&mut self,
tag: u32,
wire_type: WireType,
buf: &mut B,
ctx: DecodeContext,
) -> Result<(), DecodeError>
where
B: Buf,
{
if tag == 1 {
bytes::merge(wire_type, self, buf, ctx)
} else {
skip_field(wire_type, tag, buf, ctx)
}
}
fn encoded_len(&self) -> usize {
if !self.is_empty() {
bytes::encoded_len(1, self)
} else {
0
}
}
fn clear(&mut self) {
self.clear();
}
}
/// `google.protobuf.Empty`
impl Message for () {
fn encode_raw<B>(&self, _buf: &mut B)
where
B: BufMut,
{
}
fn merge_field<B>(
&mut self,
tag: u32,
wire_type: WireType,
buf: &mut B,
ctx: DecodeContext,
) -> Result<(), DecodeError>
where
B: Buf,
{
skip_field(wire_type, tag, buf, ctx)
}
fn encoded_len(&self) -> usize {
0
}
fn clear(&mut self) {}
}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"206fef066f785b22aa1239362d1340b966807af10d43e611b99dad72194b23b3","src/lib.rs":"729e562be4e63ec7db2adc00753a019ae77c11ce82637a893ea18122580c3c98","src/rusqlite_support.rs":"827d314605d8c741efdf238a0780a891c88bc56026a3e6dcfa534772a4852fb3","src/serde_support.rs":"519b5eb59ca7be555d522f2186909db969069dc9586a5fe4047d4ec176b2368a"},"package":null}
{"files":{"Cargo.toml":"fec1d023581c5e34b5669c1b42efd11819eba4c3c29eca1f6095f6044a1fa5ae","src/lib.rs":"729e562be4e63ec7db2adc00753a019ae77c11ce82637a893ea18122580c3c98","src/rusqlite_support.rs":"827d314605d8c741efdf238a0780a891c88bc56026a3e6dcfa534772a4852fb3","src/serde_support.rs":"519b5eb59ca7be555d522f2186909db969069dc9586a5fe4047d4ec176b2368a"},"package":null}

4
third_party/rust/sync-guid/Cargo.toml поставляемый
Просмотреть файл

@ -7,7 +7,7 @@ edition = "2018"
[dependencies]
rusqlite = { version = "0.23.1", optional = true }
serde = { version = "1", optional = true }
serde = { version = "1.0.104", optional = true }
rand = { version = "0.7", optional = true }
base64 = { version = "0.12.0", optional = true }
@ -19,4 +19,4 @@ serde_support = ["serde"]
default = ["serde_support"]
[dev-dependencies]
serde_test = "1"
serde_test = "1.0.104"

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"317a24dd2667266ebed53a3a6b65b0c0840d0f96ce362a543af154ab862b0ce6","README.md":"396105211d8ce7f40b05d8062d7ab55d99674555f3ac81c061874ae26656ed7e","src/bridged_engine.rs":"dffaea14d677bae1d95305b90b809cdb0b64e4fa889f1e3c3f4b5d85609991d6","src/changeset.rs":"442aa92b5130ec0f8f2b0054acb399c547380e0060015cbf4ca7a72027440d54","src/client.rs":"6be4f550ade823fafc350c5490e031f90a4af833a9bba9739b05568464255a74","src/lib.rs":"a64802fb56b1fd066c4cfdf18874347e80fc9ef4a1975bdbbd76541b0fa1744c","src/payload.rs":"09db1a444e7893990a4f03cb16263b9c15abc9e48ec4f1343227be1b490865a5","src/request.rs":"9e656ec487e53c7485643687e605d73bb25e138056e920d6f4b7d63fc6a8c460","src/server_timestamp.rs":"43d1b98a90e55e49380a0b66c209c9eb393e2aeaa27d843a4726d93cdd4cea02","src/store.rs":"10e215dd24270b6bec10903ac1d5274ce997eb437134f43be7de44e36fb9d1e4","src/telemetry.rs":"027befb099a6fcded3457f7e566296548a0898ff613267190621856b9ef288f6"},"package":null}
{"files":{"Cargo.toml":"656c4c4af39bcf924098be33996360250f9610ee3a4090b8152b68bdad03c46e","README.md":"396105211d8ce7f40b05d8062d7ab55d99674555f3ac81c061874ae26656ed7e","src/bridged_engine.rs":"dffaea14d677bae1d95305b90b809cdb0b64e4fa889f1e3c3f4b5d85609991d6","src/changeset.rs":"442aa92b5130ec0f8f2b0054acb399c547380e0060015cbf4ca7a72027440d54","src/client.rs":"6be4f550ade823fafc350c5490e031f90a4af833a9bba9739b05568464255a74","src/lib.rs":"a64802fb56b1fd066c4cfdf18874347e80fc9ef4a1975bdbbd76541b0fa1744c","src/payload.rs":"09db1a444e7893990a4f03cb16263b9c15abc9e48ec4f1343227be1b490865a5","src/request.rs":"9e656ec487e53c7485643687e605d73bb25e138056e920d6f4b7d63fc6a8c460","src/server_timestamp.rs":"43d1b98a90e55e49380a0b66c209c9eb393e2aeaa27d843a4726d93cdd4cea02","src/store.rs":"10e215dd24270b6bec10903ac1d5274ce997eb437134f43be7de44e36fb9d1e4","src/telemetry.rs":"027befb099a6fcded3457f7e566296548a0898ff613267190621856b9ef288f6"},"package":null}

6
third_party/rust/sync15-traits/Cargo.toml поставляемый
Просмотреть файл

@ -10,11 +10,11 @@ random-guid = ["sync-guid/random"]
[dependencies]
sync-guid = { path = "../guid" }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
log = "0.4"
ffi-support = "0.4"
url = "2.1"
failure = "0.1"
failure = "0.1.6"
interrupt-support = { path = "../interrupt" }

Просмотреть файл

@ -1 +0,0 @@
{"files":{"Cargo.toml":"9fd0a19d4dd0ebec89c118e09d430fd75b178513d3d63034acb72ab16b199e4a","README.md":"a6856d0f86aaade17cb9fa61c153aca085903d0676fae953022aeab235996cb7","src/backend.rs":"ee89fbb451ffafa82b561cf12cfbef59cd25acdcf5d5953a17099eb8dff4ec24","src/backend/ffi.rs":"f040c9dd47c8a6834a0895ee8b3ff2f252ed984c58332cda278939000780cdd2","src/error.rs":"129aa1f35a1435dc7a1f8a04eb59ef6ebc2e2229614209aa16463f7e2e01c042","src/fetch_msg_types.proto":"ba1aee0b6aaaec42fe20d3431c11dec20984a7c1278c039b90d63677d993db34","src/headers.rs":"2a666eb19b65274dd1e8bdd4c7074619f6a59c62f8df54caeead2f081800e5f2","src/headers/name.rs":"d6b54cb134f3c72e7dd24b7009d34e207a4a6c3fa48b2557287f38f7e8d643b0","src/lib.rs":"d710e4e7ebe2f01e4439f4fe7dd5cd1153c98a0438c6cb266ac32501743d53ab","src/mozilla.appservices.httpconfig.protobuf.rs":"d6e0873c1e4a0e5117ea4f9e37e1621dd5050a4159fae6672364aa60da517d7a","src/settings.rs":"5da12ad4a407d50999ffa499bf2ab27904af03d32f61c2f63b3f86e7b9883b7a"},"package":null}

25
third_party/rust/viaduct/Cargo.toml поставляемый
Просмотреть файл

@ -1,25 +0,0 @@
[package]
name = "viaduct"
version = "0.1.0"
authors = ["Thom Chiovoloni <tchiovoloni@mozilla.com>"]
edition = "2018"
license = "MPL-2.0"
exclude = ["/android", "/ios"]
[lib]
crate-type = ["lib"]
[features]
default = []
[dependencies]
failure = "0.1"
failure_derive = "0.1"
url = "2.1"
log = "0.4"
serde = "1"
serde_json = "1"
once_cell = "1.3.1"
prost = "0.6.1"
prost-derive = "0.6.1"
ffi-support = "0.4"

59
third_party/rust/viaduct/README.md поставляемый
Просмотреть файл

@ -1,59 +0,0 @@
# Viaduct
Viaduct is our HTTP request library, which can make requests either via a
rust-based (reqwest) networking stack (used on iOS and for local desktop use,
for tests and the like), or using a stack that calls a function passed into it
over the FFI (on android).
For usage info, you can run `cargo +nightly doc -p viaduct` (the `+nightly` is
optional, however some intra-doc links require it), it has several examples.
## Android/FFI Backend overview
On Android, the backend works as follows:
1. During megazord initialization, we are passed a `Lazy<Client>` (`Client` comes
from the [concept-fetch](https://github.com/mozilla-mobile/android-components/tree/master/components/concept/fetch)
android component, and `Lazy` is from the Kotlin stdlib).
- It also sets a flag that indicates that even if the FFI backend never gets
fully initialized (e.g. with a callback), we should error rather than use
the reqwest backend (which should not be compiled in, however we've had
trouble ensuring this in the past, although at this point we have checks
in CI to ensure it is not present).
2. At this point, a JNA `Callback` instance is created and passed into Rust.
- This serves to proxy the request made by Rust to the `Client`.
- The `Callback` instance is never allowed to be GCed.
- To Rust, it's just a `extern "C"` function pointer that get's stored in an
atomic variable and never can be unset.
3. When Rust makes a request:
1. We serialize the request info into a protobuf record
2. This record is passed into the function pointer we should have by this
point (erroring if it has not been set yet).
3. The callback (on the Java side now) deserializes the protobuf record,
converts it to a concept-fetch Request instance, and passes it to the
client.
4. The response (or error) is then converted into a protobuf record. The
java code then asks Rust for a buffer big enough to hold the serialized
response (or error).
5. The response is written to the buffer, and returned to Rust.
6. Rust then decodes the protobuf, and converts it to a
`viaduct::Response` object that it returns to the caller.
Some notes:
- This "request flow" is entirely synchronous, simplifying the implementation
considerably.
- Cookies are explicitely not supported at the moment, adding them would
require a separate security review.
- Generally, this is the way the FFI backend is expected to work on any
platform, but for concreteness (and because it's the only one currently using
the FFI backend), we explained it for Android.
- Most of the code in `viaduct` is defining a ergonomic HTTP facade, and is
unrelated to this (or to the reqwest backend). This code is more or less
entirely (in the Kotlin layer and) in `src/backend/ffi.rs`.

97
third_party/rust/viaduct/src/backend.rs поставляемый
Просмотреть файл

@ -1,97 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use ffi::FfiBackend;
use once_cell::sync::OnceCell;
mod ffi;
pub fn note_backend(which: &str) {
// If trace logs are enabled: log on every request. Otherwise, just log on
// the first request at `info` level. We remember if the Once was triggered
// to avoid logging twice in the first case.
static NOTE_BACKEND_ONCE: std::sync::Once = std::sync::Once::new();
let mut called = false;
NOTE_BACKEND_ONCE.call_once(|| {
log::info!("Using HTTP backend {}", which);
called = true;
});
if !called {
log::trace!("Using HTTP backend {}", which);
}
}
pub trait Backend: Send + Sync + 'static {
fn send(&self, request: crate::Request) -> Result<crate::Response, crate::Error>;
}
static BACKEND: OnceCell<&'static dyn Backend> = OnceCell::new();
pub fn set_backend(b: &'static dyn Backend) -> Result<(), crate::Error> {
BACKEND
.set(b)
.map_err(|_| crate::error::Error::SetBackendError)
}
pub(crate) fn get_backend() -> &'static dyn Backend {
*BACKEND.get_or_init(|| Box::leak(Box::new(FfiBackend)))
}
pub fn send(request: crate::Request) -> Result<crate::Response, crate::Error> {
validate_request(&request)?;
get_backend().send(request)
}
pub fn validate_request(request: &crate::Request) -> Result<(), crate::Error> {
if request.url.scheme() != "https"
&& request.url.host_str() != Some("localhost")
&& request.url.host_str() != Some("127.0.0.1")
{
return Err(crate::Error::NonTlsUrl);
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::validate_request;
#[test]
fn test_validate_request() {
let _https_request = crate::Request::new(
crate::Method::Get,
url::Url::parse("https://www.example.com").unwrap(),
);
assert!(validate_request(&_https_request).is_ok());
let _http_request = crate::Request::new(
crate::Method::Get,
url::Url::parse("http://www.example.com").unwrap(),
);
assert!(validate_request(&_http_request).is_err());
let _localhost_https_request = crate::Request::new(
crate::Method::Get,
url::Url::parse("https://127.0.0.1/index.html").unwrap(),
);
assert!(validate_request(&_localhost_https_request).is_ok());
let _localhost_https_request_2 = crate::Request::new(
crate::Method::Get,
url::Url::parse("https://localhost:4242/").unwrap(),
);
assert!(validate_request(&_localhost_https_request_2).is_ok());
let _localhost_http_request = crate::Request::new(
crate::Method::Get,
url::Url::parse("http://localhost:4242/").unwrap(),
);
assert!(validate_request(&_localhost_http_request).is_ok());
let localhost_request = crate::Request::new(
crate::Method::Get,
url::Url::parse("localhost:4242/").unwrap(),
);
assert!(validate_request(&localhost_request).is_err());
}
}

199
third_party/rust/viaduct/src/backend/ffi.rs поставляемый
Просмотреть файл

@ -1,199 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use crate::{backend::Backend, settings::GLOBAL_SETTINGS};
use crate::{msg_types, Error};
use ffi_support::{ByteBuffer, FfiStr};
ffi_support::implement_into_ffi_by_protobuf!(msg_types::Request);
impl From<crate::Request> for msg_types::Request {
fn from(request: crate::Request) -> Self {
msg_types::Request {
url: request.url.into_string(),
body: request.body,
// Real weird that this needs to be specified as an i32, but
// it certainly makes it convenient for us...
method: request.method as i32,
headers: request.headers.into(),
follow_redirects: GLOBAL_SETTINGS.follow_redirects,
use_caches: GLOBAL_SETTINGS.use_caches,
connect_timeout_secs: GLOBAL_SETTINGS
.connect_timeout
.map_or(0, |d| d.as_secs() as i32),
read_timeout_secs: GLOBAL_SETTINGS
.read_timeout
.map_or(0, |d| d.as_secs() as i32),
}
}
}
macro_rules! backend_error {
($($args:tt)*) => {{
let msg = format!($($args)*);
log::error!("{}", msg);
Error::BackendError(msg)
}};
}
pub struct FfiBackend;
impl Backend for FfiBackend {
fn send(&self, request: crate::Request) -> Result<crate::Response, Error> {
use ffi_support::IntoFfi;
use prost::Message;
super::note_backend("FFI (trusted)");
let method = request.method;
let fetch = callback_holder::get_callback().ok_or_else(|| Error::BackendNotInitialized)?;
let proto_req: msg_types::Request = request.into();
let buf = proto_req.into_ffi_value();
let response = unsafe { fetch(buf) };
// This way we'll Drop it if we panic, unlike if we just got a slice into
// it. Besides, we already own it.
let response_bytes = response.into_vec();
let response: msg_types::Response = match Message::decode(response_bytes.as_slice()) {
Ok(v) => v,
Err(e) => {
panic!(
"Failed to parse protobuf returned from fetch callback! {}",
e
);
}
};
if let Some(exn) = response.exception_message {
log::error!(
// Well, we caught *something* java wanted to tell us about, anyway.
"Caught network error (presumably). Message: {:?}",
exn
);
return Err(Error::NetworkError(format!("Java error: {:?}", exn)));
}
let status = response
.status
.ok_or_else(|| backend_error!("Missing HTTP status"))?;
if status < 0 || status > i32::from(u16::max_value()) {
return Err(backend_error!("Illegal HTTP status: {}", status));
}
let mut headers = crate::Headers::with_capacity(response.headers.len());
for (name, val) in response.headers {
let hname = match crate::HeaderName::new(name) {
Ok(name) => name,
Err(e) => {
// Ignore headers with invalid names, since nobody can look for them anyway.
log::warn!("Server sent back invalid header name: '{}'", e);
continue;
}
};
// Not using Header::new since the error it returns is for request headers.
headers.insert_header(crate::Header::new_unchecked(hname, val));
}
let url = url::Url::parse(
&response
.url
.ok_or_else(|| backend_error!("Response has no URL"))?,
)
.map_err(|e| backend_error!("Response has illegal URL: {}", e))?;
Ok(crate::Response {
url,
request_method: method,
body: response.body.unwrap_or_default(),
status: status as u16,
headers,
})
}
}
/// Type of the callback we need callers on the other side of the FFI to
/// provide.
///
/// Takes and returns a ffi_support::ByteBuffer. (TODO: it would be nice if we could
/// make this take/return pointers, so that we could use JNA direct mapping. Maybe
/// we need some kind of ThinBuffer?)
///
/// This is a bit weird, since it requires us to allow code on the other side of
/// the FFI to allocate a ByteBuffer from us, but it works.
///
/// The code on the other side of the FFI is responsible for freeing the ByteBuffer
/// it's passed using `viaduct_destroy_bytebuffer`.
type FetchCallback = unsafe extern "C" fn(ByteBuffer) -> ByteBuffer;
/// Module that manages get/set of the global fetch callback pointer.
mod callback_holder {
use super::FetchCallback;
use std::sync::atomic::{AtomicUsize, Ordering};
/// Note: We only assign to this once.
static CALLBACK_PTR: AtomicUsize = AtomicUsize::new(0);
// Overly-paranoid sanity checking to ensure that these types are
// convertible between each-other. `transmute` actually should check this for
// us too, but this helps document the invariants we rely on in this code.
//
// Note that these are guaranteed by
// https://rust-lang.github.io/unsafe-code-guidelines/layout/function-pointers.html
// and thus this is a little paranoid.
ffi_support::static_assert!(
STATIC_ASSERT_USIZE_EQ_FUNC_SIZE,
std::mem::size_of::<usize>() == std::mem::size_of::<FetchCallback>()
);
ffi_support::static_assert!(
STATIC_ASSERT_USIZE_EQ_OPT_FUNC_SIZE,
std::mem::size_of::<usize>() == std::mem::size_of::<Option<FetchCallback>>()
);
/// Get the function pointer to the FetchCallback. Panics if the callback
/// has not yet been initialized.
pub(super) fn get_callback() -> Option<FetchCallback> {
let ptr_value = CALLBACK_PTR.load(Ordering::SeqCst);
unsafe { std::mem::transmute::<usize, Option<FetchCallback>>(ptr_value) }
}
/// Set the function pointer to the FetchCallback. Returns false if we did nothing because the callback had already been initialized
pub(super) fn set_callback(h: FetchCallback) -> bool {
let as_usize = h as usize;
let old_ptr = CALLBACK_PTR.compare_and_swap(0, as_usize, Ordering::SeqCst);
if old_ptr != 0 {
// This is an internal bug, the other side of the FFI should ensure
// it sets this only once. Note that this is actually going to be
// before logging is initialized in practice, so there's not a lot
// we can actually do here.
log::error!("Bug: Initialized CALLBACK_PTR multiple times");
}
old_ptr == 0
}
}
/// Return a ByteBuffer of the requested size. This is used to store the
/// response from the callback.
#[no_mangle]
pub extern "C" fn viaduct_alloc_bytebuffer(sz: i32) -> ByteBuffer {
let mut error = ffi_support::ExternError::default();
let buffer =
ffi_support::call_with_output(&mut error, || ByteBuffer::new_with_size(sz.max(0) as usize));
error.consume_and_log_if_error();
buffer
}
#[no_mangle]
pub extern "C" fn viaduct_log_error(s: FfiStr<'_>) {
let mut error = ffi_support::ExternError::default();
ffi_support::call_with_output(&mut error, || {
log::error!("Viaduct Ffi Error: {}", s.as_str())
});
error.consume_and_log_if_error();
}
#[no_mangle]
pub extern "C" fn viaduct_initialize(callback: FetchCallback) -> u8 {
ffi_support::abort_on_panic::call_with_output(|| callback_holder::set_callback(callback))
}
ffi_support::define_bytebuffer_destructor!(viaduct_destroy_bytebuffer);

50
third_party/rust/viaduct/src/error.rs поставляемый
Просмотреть файл

@ -1,50 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use failure::Fail;
#[derive(Debug, Fail)]
pub enum Error {
#[fail(display = "Illegal characters in request header '{}'", _0)]
RequestHeaderError(crate::HeaderName),
#[fail(display = "Backend error: {}", _0)]
BackendError(String),
#[fail(display = "Network error: {}", _0)]
NetworkError(String),
#[fail(display = "The rust-components network backend must be initialized before use!")]
BackendNotInitialized,
#[fail(display = "Backend already initialized.")]
SetBackendError,
/// Note: we return this if the server returns a bad URL with
/// its response. This *probably* should never happen, but who knows.
#[fail(display = "URL Parse Error: {}", _0)]
UrlError(#[fail(cause)] url::ParseError),
#[fail(display = "Validation error: URL does not use TLS protocol.")]
NonTlsUrl,
}
impl From<url::ParseError> for Error {
fn from(u: url::ParseError) -> Self {
Error::UrlError(u)
}
}
/// This error is returned as the `Err` result from
/// [`Response::require_success`].
///
/// Note that it's not a variant on `Error` to distinguish between errors
/// caused by the network, and errors returned from the server.
#[derive(failure::Fail, Debug, Clone, PartialEq)]
#[fail(display = "Error: {} {} returned {}", method, url, status)]
pub struct UnexpectedStatus {
pub status: u16,
pub method: crate::Method,
pub url: url::Url,
}

Просмотреть файл

@ -1,41 +0,0 @@
syntax = "proto2";
// Note: this file name must be unique due to how the iOS megazord works :(
package mozilla.appservices.httpconfig.protobuf;
option java_package = "mozilla.appservices.httpconfig";
option java_outer_classname = "MsgTypes";
option swift_prefix = "MsgTypes_";
option optimize_for = LITE_RUNTIME;
message Request {
enum Method {
GET = 0;
HEAD = 1;
POST = 2;
PUT = 3;
DELETE = 4;
CONNECT = 5;
OPTIONS = 6;
TRACE = 7;
}
required Method method = 1;
required string url = 2;
optional bytes body = 3;
map<string, string> headers = 4;
required bool follow_redirects = 5;
required bool use_caches = 6;
required int32 connect_timeout_secs = 7;
required int32 read_timeout_secs = 8;
}
message Response {
// If this is present, nothing else is.
optional string exception_message = 1;
optional string url = 2;
optional int32 status = 3;
optional bytes body = 4;
map<string, string> headers = 5;
}

414
third_party/rust/viaduct/src/headers.rs поставляемый
Просмотреть файл

@ -1,414 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
pub use name::{HeaderName, InvalidHeaderName};
use std::collections::HashMap;
use std::iter::FromIterator;
use std::str::FromStr;
mod name;
/// A single header. Headers have a name (case insensitive) and a value. The
/// character set for header and values are both restrictive.
/// - Names must only contain a-zA-Z0-9 and and ('!' | '#' | '$' | '%' | '&' |
/// '\'' | '*' | '+' | '-' | '.' | '^' | '_' | '`' | '|' | '~') characters
/// (the field-name token production defined at
/// https://tools.ietf.org/html/rfc7230#section-3.2).
/// For request headers, we expect these to all be specified statically,
/// and so we panic if you provide an invalid one. (For response headers, we
/// ignore headers with invalid names, but emit a warning).
///
/// Header names are case insensitive, and we have several pre-defined ones in
/// the [`header_names`] module.
///
/// - Values may only contain printable ascii characters, and may not contain
/// \r or \n. Strictly speaking, HTTP is more flexible for header values,
/// however we don't need to support binary header values, and so we do not.
///
/// Note that typically you should not interact with this directly, and instead
/// use the methods on [`Request`] or [`Headers`] to manipulate these.
#[derive(Clone, Debug, PartialEq, PartialOrd, Hash, Eq, Ord)]
pub struct Header {
pub(crate) name: HeaderName,
pub(crate) value: String,
}
// Trim `s` without copying if it can be avoided.
fn trim_string<S: AsRef<str> + Into<String>>(s: S) -> String {
let sr = s.as_ref();
let trimmed = sr.trim();
if sr.len() != trimmed.len() {
trimmed.into()
} else {
s.into()
}
}
fn is_valid_header_value(value: &str) -> bool {
value.bytes().all(|b| (32 <= b && b < 127) || b == b'\t')
}
impl Header {
pub fn new<Name, Value>(name: Name, value: Value) -> Result<Self, crate::Error>
where
Name: Into<HeaderName>,
Value: AsRef<str> + Into<String>,
{
let name = name.into();
let value = trim_string(value);
if !is_valid_header_value(&value) {
return Err(crate::Error::RequestHeaderError(name));
}
Ok(Self { name, value })
}
pub fn new_unchecked<Value>(name: HeaderName, value: Value) -> Self
where
Value: AsRef<str> + Into<String>,
{
Self {
name,
value: value.into(),
}
}
#[inline]
pub fn name(&self) -> &HeaderName {
&self.name
}
#[inline]
pub fn value(&self) -> &str {
&self.value
}
#[inline]
fn set_value<V: AsRef<str>>(&mut self, s: V) -> Result<(), crate::Error> {
let value = s.as_ref();
if !is_valid_header_value(&value) {
Err(crate::Error::RequestHeaderError(self.name.clone()))
} else {
self.value.clear();
self.value.push_str(s.as_ref().trim());
Ok(())
}
}
}
impl std::fmt::Display for Header {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}: {}", self.name, self.value)
}
}
/// A list of headers.
#[derive(Clone, Debug, PartialEq, Default)]
pub struct Headers {
headers: Vec<Header>,
}
impl Headers {
/// Initialize an empty list of headers.
#[inline]
pub fn new() -> Self {
Default::default()
}
/// Initialize an empty list of headers backed by a vector with the provided
/// capacity.
pub fn with_capacity(c: usize) -> Self {
Self {
headers: Vec::with_capacity(c),
}
}
/// Convert this list of headers to a Vec<Header>
#[inline]
pub fn into_vec(self) -> Vec<Header> {
self.headers
}
/// Returns the number of headers.
#[inline]
pub fn len(&self) -> usize {
self.headers.len()
}
/// Returns true if `len()` is zero.
#[inline]
pub fn is_empty(&self) -> bool {
self.headers.is_empty()
}
/// Clear this set of headers.
#[inline]
pub fn clear(&mut self) {
self.headers.clear();
}
/// Insert or update a new header.
///
/// This returns an error if you attempt to specify a header with an
/// invalid value (values must be printable ASCII and may not contain
/// \r or \n)
///
/// ## Example
/// ```
/// # use viaduct::Headers;
/// # fn main() -> Result<(), viaduct::Error> {
/// let mut h = Headers::new();
/// h.insert("My-Cool-Header", "example")?;
/// assert_eq!(h.get("My-Cool-Header"), Some("example"));
///
/// // Note: names are sensitive
/// assert_eq!(h.get("my-cool-header"), Some("example"));
///
/// // Also note, constants for headers are in `viaduct::header_names`, and
/// // you can chain the result of this function.
/// h.insert(viaduct::header_names::CONTENT_TYPE, "something...")?
/// .insert("Something-Else", "etc")?;
/// # Ok(())
/// # }
/// ```
pub fn insert<N, V>(&mut self, name: N, value: V) -> Result<&mut Self, crate::Error>
where
N: Into<HeaderName> + PartialEq<HeaderName>,
V: Into<String> + AsRef<str>,
{
if let Some(entry) = self.headers.iter_mut().find(|h| name == h.name) {
entry.set_value(value)?;
} else {
self.headers.push(Header::new(name, value)?);
}
Ok(self)
}
/// Insert the provided header unless a header is already specified.
/// Mostly used internally, e.g. to set "Content-Type: application/json"
/// in `Request::json()` unless it has been set specifically.
pub fn insert_if_missing<N, V>(&mut self, name: N, value: V) -> Result<&mut Self, crate::Error>
where
N: Into<HeaderName> + PartialEq<HeaderName>,
V: Into<String> + AsRef<str>,
{
if !self.headers.iter_mut().any(|h| name == h.name) {
self.headers.push(Header::new(name, value)?);
}
Ok(self)
}
/// Insert or update a header directly. Typically you will want to use
/// `insert` over this, as it performs less work if the header needs
/// updating instead of insertion.
pub fn insert_header(&mut self, new: Header) -> &mut Self {
if let Some(entry) = self.headers.iter_mut().find(|h| h.name == new.name) {
entry.value = new.value;
} else {
self.headers.push(new);
}
self
}
/// Add all the headers in the provided iterator to this list of headers.
pub fn extend<I>(&mut self, iter: I) -> &mut Self
where
I: IntoIterator<Item = Header>,
{
let it = iter.into_iter();
self.headers.reserve(it.size_hint().0);
for h in it {
self.insert_header(h);
}
self
}
/// Add all the headers in the provided iterator, unless any of them are Err.
pub fn try_extend<I, E>(&mut self, iter: I) -> Result<&mut Self, E>
where
I: IntoIterator<Item = Result<Header, E>>,
{
// Not the most efficient but avoids leaving us in an unspecified state
// if one returns Err.
self.extend(iter.into_iter().collect::<Result<Vec<_>, E>>()?);
Ok(self)
}
/// Get the header object with the requested name. Usually, you will
/// want to use `get()` or `get_as::<T>()` instead.
pub fn get_header<S>(&self, name: S) -> Option<&Header>
where
S: PartialEq<HeaderName>,
{
self.headers.iter().find(|h| name == h.name)
}
/// Get the value of the header with the provided name.
///
/// See also `get_as`.
///
/// ## Example
/// ```
/// # use viaduct::{Headers, header_names::CONTENT_TYPE};
/// # fn main() -> Result<(), viaduct::Error> {
/// let mut h = Headers::new();
/// h.insert(CONTENT_TYPE, "application/json")?;
/// assert_eq!(h.get(CONTENT_TYPE), Some("application/json"));
/// assert_eq!(h.get("Something-Else"), None);
/// # Ok(())
/// # }
/// ```
pub fn get<S>(&self, name: S) -> Option<&str>
where
S: PartialEq<HeaderName>,
{
self.get_header(name).map(|h| h.value.as_str())
}
/// Get the value of the header with the provided name, and
/// attempt to parse it using [`std::str::FromStr`].
///
/// - If the header is missing, it returns None.
/// - If the header is present but parsing failed, returns
/// `Some(Err(<error returned by parsing>))`.
/// - Otherwise, returns `Some(Ok(result))`.
///
/// Note that if `Option<Result<T, E>>` is inconvenient for you,
/// and you wish this returned `Result<Option<T>, E>`, you may use
/// the built-in `transpose()` method to convert between them.
///
/// ```
/// # use viaduct::Headers;
/// # fn main() -> Result<(), viaduct::Error> {
/// let mut h = Headers::new();
/// h.insert("Example", "1234")?.insert("Illegal", "abcd")?;
/// let v: Option<Result<i64, _>> = h.get_as("Example");
/// assert_eq!(v, Some(Ok(1234)));
/// assert_eq!(h.get_as::<i64, _>("Example"), Some(Ok(1234)));
/// assert_eq!(h.get_as::<i64, _>("Illegal"), Some("abcd".parse::<i64>()));
/// assert_eq!(h.get_as::<i64, _>("Something-Else"), None);
/// # Ok(())
/// # }
/// ```
pub fn get_as<T, S>(&self, name: S) -> Option<Result<T, <T as FromStr>::Err>>
where
T: FromStr,
S: PartialEq<HeaderName>,
{
self.get(name).map(str::parse)
}
/// Get the value of the header with the provided name, and
/// attempt to parse it using [`std::str::FromStr`].
///
/// This is a variant of `get_as` that returns None on error,
/// intended to be used for cases where missing and invalid
/// headers should be treated the same. (With `get_as` this
/// requires `h.get_as(...).and_then(|r| r.ok())`, which is
/// somewhat opaque.
pub fn try_get<T, S>(&self, name: S) -> Option<T>
where
T: FromStr,
S: PartialEq<HeaderName>,
{
self.get(name).and_then(|val| val.parse::<T>().ok())
}
/// Get an iterator over the headers in no particular order.
///
/// Note that we also implement IntoIterator.
pub fn iter(&self) -> <&Headers as IntoIterator>::IntoIter {
self.into_iter()
}
}
impl std::iter::IntoIterator for Headers {
type IntoIter = <Vec<Header> as IntoIterator>::IntoIter;
type Item = Header;
fn into_iter(self) -> Self::IntoIter {
self.headers.into_iter()
}
}
impl<'a> std::iter::IntoIterator for &'a Headers {
type IntoIter = <&'a [Header] as IntoIterator>::IntoIter;
type Item = &'a Header;
fn into_iter(self) -> Self::IntoIter {
(&self.headers[..]).iter()
}
}
impl FromIterator<Header> for Headers {
fn from_iter<T>(iter: T) -> Self
where
T: IntoIterator<Item = Header>,
{
let mut v = iter.into_iter().collect::<Vec<Header>>();
v.sort_by(|a, b| a.name.cmp(&b.name));
v.reverse();
v.dedup_by(|a, b| a.name == b.name);
Headers { headers: v }
}
}
#[allow(clippy::implicit_hasher)] // https://github.com/rust-lang/rust-clippy/issues/3899
impl From<Headers> for HashMap<String, String> {
fn from(headers: Headers) -> HashMap<String, String> {
headers
.into_iter()
.map(|h| (String::from(h.name), h.value))
.collect()
}
}
pub mod consts {
use super::name::HeaderName;
macro_rules! def_header_consts {
($(($NAME:ident, $string:literal)),* $(,)?) => {
$(pub const $NAME: HeaderName = HeaderName(std::borrow::Cow::Borrowed($string));)*
};
}
macro_rules! headers {
($(($NAME:ident, $string:literal)),* $(,)?) => {
def_header_consts!($(($NAME, $string)),*);
// Unused except for tests.
const _ALL: &[&str] = &[$($string),*];
};
}
// Predefined header names, for convenience.
// Feel free to add to these.
headers!(
(ACCEPT_ENCODING, "accept-encoding"),
(ACCEPT, "accept"),
(AUTHORIZATION, "authorization"),
(CONTENT_TYPE, "content-type"),
(ETAG, "etag"),
(IF_NONE_MATCH, "if-none-match"),
(USER_AGENT, "user-agent"),
// non-standard, but it's convenient to have these.
(RETRY_AFTER, "retry-after"),
(X_IF_UNMODIFIED_SINCE, "x-if-unmodified-since"),
(X_KEYID, "x-keyid"),
(X_LAST_MODIFIED, "x-last-modified"),
(X_TIMESTAMP, "x-timestamp"),
(X_WEAVE_NEXT_OFFSET, "x-weave-next-offset"),
(X_WEAVE_RECORDS, "x-weave-records"),
(X_WEAVE_TIMESTAMP, "x-weave-timestamp"),
(X_WEAVE_BACKOFF, "x-weave-backoff"),
);
#[test]
fn test_predefined() {
for &name in _ALL {
assert!(
HeaderName::new(name).is_ok(),
"Invalid header name in predefined header constants: {}",
name
);
assert_eq!(
name.to_ascii_lowercase(),
name,
"Non-lowercase name in predefined header constants: {}",
name
);
}
}
}

229
third_party/rust/viaduct/src/headers/name.rs поставляемый
Просмотреть файл

@ -1,229 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::borrow::Cow;
/// Represents a header name that we know to be both valid and lowercase.
/// Internally, this avoids allocating for headers that are constant strings,
/// like the predefined ones in this crate, however even without that
/// optimization, we would still likely have an equivalent of this for use
/// as a case-insensitive string guaranteed to only have valid characters.
#[derive(Debug, Clone, PartialEq, PartialOrd, Hash, Eq, Ord)]
pub struct HeaderName(pub(super) Cow<'static, str>);
/// Indicates an invalid header name. Note that we only emit
/// this for response headers, for request headers, we panic
/// instead. This is because it would likely come through as
/// a network error if we emitted it for local headers, when
/// it's actually a bug that we'd need to fix.
#[derive(failure::Fail, Debug, Clone, PartialEq)]
#[fail(display = "Invalid header name: {:?}", _0)]
pub struct InvalidHeaderName(Cow<'static, str>);
impl From<&'static str> for HeaderName {
fn from(s: &'static str) -> HeaderName {
match HeaderName::new(s) {
Ok(v) => v,
Err(e) => {
panic!("Illegal locally specified header {}", e);
}
}
}
}
impl From<String> for HeaderName {
fn from(s: String) -> HeaderName {
match HeaderName::new(s) {
Ok(v) => v,
Err(e) => {
panic!("Illegal locally specified header {}", e);
}
}
}
}
impl From<Cow<'static, str>> for HeaderName {
fn from(s: Cow<'static, str>) -> HeaderName {
match HeaderName::new(s) {
Ok(v) => v,
Err(e) => {
panic!("Illegal locally specified header {}", e);
}
}
}
}
impl InvalidHeaderName {
pub fn name(&self) -> &str {
&self.0[..]
}
}
fn validate_header(mut name: Cow<'static, str>) -> Result<HeaderName, InvalidHeaderName> {
if name.len() == 0 {
return Err(invalid_header_name(name));
}
let mut need_lower_case = false;
for b in name.bytes() {
let validity = VALID_HEADER_LUT[b as usize];
if validity == 0 {
return Err(invalid_header_name(name));
}
if validity == 2 {
need_lower_case = true;
}
}
if need_lower_case {
// Only do this if needed, since it causes us to own the header.
name.to_mut().make_ascii_lowercase();
}
Ok(HeaderName(name))
}
impl HeaderName {
/// Create a new header. In general you likely want to use `HeaderName::from(s)`
/// instead for headers being specified locally (This will panic instead of
/// returning a Result, since we have control over headers we specify locally,
/// and want to know if we specify an illegal one).
#[inline]
pub fn new<S: Into<Cow<'static, str>>>(s: S) -> Result<Self, InvalidHeaderName> {
validate_header(s.into())
}
#[inline]
pub fn as_str(&self) -> &str {
&self.0[..]
}
}
impl std::fmt::Display for HeaderName {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
// Separate for dumb micro-optimization reasons.
#[cold]
#[inline(never)]
fn invalid_header_name(s: Cow<'static, str>) -> InvalidHeaderName {
log::warn!("Invalid header name: {}", s);
InvalidHeaderName(s)
}
// Note: 0 = invalid, 1 = valid, 2 = valid but needs lowercasing. I'd use an
// enum for this, but it would make this LUT *way* harder to look at. This
// includes 0-9, a-z, A-Z (as 2), and ('!' | '#' | '$' | '%' | '&' | '\'' | '*'
// | '+' | '-' | '.' | '^' | '_' | '`' | '|' | '~'), matching the field-name
// token production defined at https://tools.ietf.org/html/rfc7230#section-3.2.
static VALID_HEADER_LUT: [u8; 256] = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, 0, 0, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
];
impl std::ops::Deref for HeaderName {
type Target = str;
#[inline]
fn deref(&self) -> &str {
self.as_str()
}
}
impl AsRef<str> for HeaderName {
#[inline]
fn as_ref(&self) -> &str {
self.as_str()
}
}
impl AsRef<[u8]> for HeaderName {
#[inline]
fn as_ref(&self) -> &[u8] {
self.as_str().as_bytes()
}
}
impl From<HeaderName> for String {
#[inline]
fn from(h: HeaderName) -> Self {
h.0.into()
}
}
impl From<HeaderName> for Cow<'static, str> {
#[inline]
fn from(h: HeaderName) -> Self {
h.0
}
}
impl From<HeaderName> for Vec<u8> {
#[inline]
fn from(h: HeaderName) -> Self {
String::from(h.0).into()
}
}
macro_rules! partialeq_boilerplate {
($T0:ty, $T1:ty) => {
impl<'a> PartialEq<$T0> for $T1 {
fn eq(&self, other: &$T0) -> bool {
// The &* should invoke Deref::deref if it exists, no-op otherwise.
(&*self).eq_ignore_ascii_case(&*other)
}
}
impl<'a> PartialEq<$T1> for $T0 {
fn eq(&self, other: &$T1) -> bool {
PartialEq::eq(other, self)
}
}
};
}
partialeq_boilerplate!(HeaderName, str);
partialeq_boilerplate!(HeaderName, &'a str);
partialeq_boilerplate!(HeaderName, String);
partialeq_boilerplate!(HeaderName, &'a String);
partialeq_boilerplate!(HeaderName, Cow<'a, str>);
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_lut() {
let mut expect = [0u8; 256];
for b in b'0'..=b'9' {
expect[b as usize] = 1;
}
for b in b'a'..=b'z' {
expect[b as usize] = 1;
}
for b in b'A'..=b'Z' {
expect[b as usize] = 2;
}
for b in b"!#$%&'*+-.^_`|~" {
expect[*b as usize] = 1;
}
assert_eq!(&VALID_HEADER_LUT[..], &expect[..]);
}
#[test]
fn test_validate() {
assert!(validate_header("".into()).is_err());
assert!(validate_header(" foo ".into()).is_err());
assert!(validate_header("a=b".into()).is_err());
assert_eq!(
validate_header("content-type".into()),
Ok(HeaderName("content-type".into()))
);
assert_eq!(
validate_header("Content-Type".into()),
Ok(HeaderName("content-type".into()))
);
}
}

361
third_party/rust/viaduct/src/lib.rs поставляемый
Просмотреть файл

@ -1,361 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![allow(unknown_lints)]
#![warn(rust_2018_idioms)]
use url::Url;
#[macro_use]
mod headers;
mod backend;
pub mod error;
pub mod settings;
pub use error::*;
pub use backend::{note_backend, set_backend, Backend};
pub use headers::{consts as header_names, Header, HeaderName, Headers, InvalidHeaderName};
pub use settings::GLOBAL_SETTINGS;
pub(crate) mod msg_types {
include!("mozilla.appservices.httpconfig.protobuf.rs");
}
/// HTTP Methods.
///
/// The supported methods are the limited to what's supported by android-components.
#[derive(Clone, Debug, Copy, PartialEq, PartialOrd, Eq, Ord, Hash)]
#[repr(u8)]
pub enum Method {
Get,
Head,
Post,
Put,
Delete,
Connect,
Options,
Trace,
}
impl Method {
pub fn as_str(self) -> &'static str {
match self {
Method::Get => "GET",
Method::Head => "HEAD",
Method::Post => "POST",
Method::Put => "PUT",
Method::Delete => "DELETE",
Method::Connect => "CONNECT",
Method::Options => "OPTIONS",
Method::Trace => "TRACE",
}
}
}
impl std::fmt::Display for Method {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(self.as_str())
}
}
#[must_use = "`Request`'s \"builder\" functions take by move, not by `&mut self`"]
#[derive(Clone, Debug, PartialEq)]
pub struct Request {
pub method: Method,
pub url: Url,
pub headers: Headers,
pub body: Option<Vec<u8>>,
}
impl Request {
/// Construct a new request to the given `url` using the given `method`.
/// Note that the request is not made until `send()` is called.
pub fn new(method: Method, url: Url) -> Self {
Self {
method,
url,
headers: Headers::new(),
body: None,
}
}
pub fn send(self) -> Result<Response, Error> {
crate::backend::send(self)
}
/// Alias for `Request::new(Method::Get, url)`, for convenience.
pub fn get(url: Url) -> Self {
Self::new(Method::Get, url)
}
/// Alias for `Request::new(Method::Post, url)`, for convenience.
pub fn post(url: Url) -> Self {
Self::new(Method::Post, url)
}
/// Alias for `Request::new(Method::Put, url)`, for convenience.
pub fn put(url: Url) -> Self {
Self::new(Method::Put, url)
}
/// Alias for `Request::new(Method::Delete, url)`, for convenience.
pub fn delete(url: Url) -> Self {
Self::new(Method::Delete, url)
}
/// Append the provided query parameters to the URL
///
/// ## Example
/// ```
/// # use viaduct::{Request, header_names};
/// # use url::Url;
/// let some_url = url::Url::parse("https://www.example.com/xyz").unwrap();
///
/// let req = Request::post(some_url).query(&[("a", "1234"), ("b", "qwerty")]);
/// assert_eq!(req.url.as_str(), "https://www.example.com/xyz?a=1234&b=qwerty");
///
/// // This appends to the query query instead of replacing `a`.
/// let req = req.query(&[("a", "5678")]);
/// assert_eq!(req.url.as_str(), "https://www.example.com/xyz?a=1234&b=qwerty&a=5678");
/// ```
pub fn query(mut self, pairs: &[(&str, &str)]) -> Self {
let mut append_to = self.url.query_pairs_mut();
for (k, v) in pairs {
append_to.append_pair(k, v);
}
drop(append_to);
self
}
/// Set the query string of the URL. Note that `req.set_query(None)` will
/// clear the query.
///
/// See also `Request::query` which appends a slice of query pairs, which is
/// typically more ergonomic when usable.
///
/// ## Example
/// ```
/// # use viaduct::{Request, header_names};
/// # use url::Url;
/// let some_url = url::Url::parse("https://www.example.com/xyz").unwrap();
///
/// let req = Request::post(some_url).set_query("a=b&c=d");
/// assert_eq!(req.url.as_str(), "https://www.example.com/xyz?a=b&c=d");
///
/// let req = req.set_query(None);
/// assert_eq!(req.url.as_str(), "https://www.example.com/xyz");
/// ```
pub fn set_query<'a, Q: Into<Option<&'a str>>>(mut self, query: Q) -> Self {
self.url.set_query(query.into());
self
}
/// Add all the provided headers to the list of headers to send with this
/// request.
pub fn headers<I>(mut self, to_add: I) -> Self
where
I: IntoIterator<Item = Header>,
{
self.headers.extend(to_add);
self
}
/// Add the provided header to the list of headers to send with this request.
///
/// This returns `Err` if `val` contains characters that may not appear in
/// the body of a header.
///
/// ## Example
/// ```
/// # use viaduct::{Request, header_names};
/// # use url::Url;
/// # fn main() -> Result<(), viaduct::Error> {
/// # let some_url = url::Url::parse("https://www.example.com").unwrap();
/// Request::post(some_url)
/// .header(header_names::CONTENT_TYPE, "application/json")?
/// .header("My-Header", "Some special value")?;
/// // ...
/// # Ok(())
/// # }
/// ```
pub fn header<Name, Val>(mut self, name: Name, val: Val) -> Result<Self, crate::Error>
where
Name: Into<HeaderName> + PartialEq<HeaderName>,
Val: Into<String> + AsRef<str>,
{
self.headers.insert(name, val)?;
Ok(self)
}
/// Set this request's body.
pub fn body(mut self, body: impl Into<Vec<u8>>) -> Self {
self.body = Some(body.into());
self
}
/// Set body to the result of serializing `val`, and, unless it has already
/// been set, set the Content-Type header to "application/json".
///
/// Note: This panics if serde_json::to_vec fails. This can only happen
/// in a couple cases:
///
/// 1. Trying to serialize a map with non-string keys.
/// 2. We wrote a custom serializer that fails.
///
/// Neither of these are things we do. If they happen, it seems better for
/// this to fail hard with an easy to track down panic, than for e.g. `sync`
/// to fail with a JSON parse error (which we'd probably attribute to
/// corrupt data on the server, or something).
pub fn json<T: ?Sized + serde::Serialize>(mut self, val: &T) -> Self {
self.body =
Some(serde_json::to_vec(val).expect("Rust component bug: serde_json::to_vec failure"));
self.headers
.insert_if_missing(header_names::CONTENT_TYPE, "application/json")
.unwrap(); // We know this has to be valid.
self
}
}
/// A response from the server.
#[derive(Clone, Debug, PartialEq)]
pub struct Response {
/// The method used to request this response.
pub request_method: Method,
/// The URL of this response.
pub url: Url,
/// The HTTP Status code of this response.
pub status: u16,
/// The headers returned with this response.
pub headers: Headers,
/// The body of the response. Note that responses with binary bodies are
/// currently unsupported.
pub body: Vec<u8>,
}
impl Response {
/// Parse the body as JSON.
pub fn json<'a, T>(&'a self) -> Result<T, serde_json::Error>
where
T: serde::Deserialize<'a>,
{
serde_json::from_slice(&self.body)
}
/// Get the body as a string. Assumes UTF-8 encoding. Any non-utf8 bytes
/// are replaced with the replacement character.
pub fn text(&self) -> std::borrow::Cow<'_, str> {
String::from_utf8_lossy(&self.body)
}
/// Returns true if the status code is in the interval `[200, 300)`.
#[inline]
pub fn is_success(&self) -> bool {
status_codes::is_success_code(self.status)
}
/// Returns true if the status code is in the interval `[500, 600)`.
#[inline]
pub fn is_server_error(&self) -> bool {
status_codes::is_server_error_code(self.status)
}
/// Returns true if the status code is in the interval `[400, 500)`.
#[inline]
pub fn is_client_error(&self) -> bool {
status_codes::is_client_error_code(self.status)
}
/// Returns an [`UnexpectedStatus`] error if `self.is_success()` is false,
/// otherwise returns `Ok(self)`.
#[inline]
pub fn require_success(self) -> Result<Self, UnexpectedStatus> {
if self.is_success() {
Ok(self)
} else {
Err(UnexpectedStatus {
method: self.request_method,
// XXX We probably should try and sanitize this. Replace the user id
// if it's a sync token server URL, for example.
url: self.url,
status: self.status,
})
}
}
}
/// A module containing constants for all HTTP status codes.
pub mod status_codes {
/// Is it a 2xx status?
#[inline]
pub fn is_success_code(c: u16) -> bool {
200 <= c && c < 300
}
/// Is it a 4xx error?
#[inline]
pub fn is_client_error_code(c: u16) -> bool {
400 <= c && c < 500
}
/// Is it a 5xx error?
#[inline]
pub fn is_server_error_code(c: u16) -> bool {
500 <= c && c < 600
}
macro_rules! define_status_codes {
($(($val:expr, $NAME:ident)),* $(,)?) => {
$(pub const $NAME: u16 = $val;)*
};
}
// From https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
define_status_codes![
(100, CONTINUE),
(101, SWITCHING_PROTOCOLS),
// 2xx
(200, OK),
(201, CREATED),
(202, ACCEPTED),
(203, NONAUTHORITATIVE_INFORMATION),
(204, NO_CONTENT),
(205, RESET_CONTENT),
(206, PARTIAL_CONTENT),
// 3xx
(300, MULTIPLE_CHOICES),
(301, MOVED_PERMANENTLY),
(302, FOUND),
(303, SEE_OTHER),
(304, NOT_MODIFIED),
(305, USE_PROXY),
// no 306
(307, TEMPORARY_REDIRECT),
// 4xx
(400, BAD_REQUEST),
(401, UNAUTHORIZED),
(402, PAYMENT_REQUIRED),
(403, FORBIDDEN),
(404, NOT_FOUND),
(405, METHOD_NOT_ALLOWED),
(406, NOT_ACCEPTABLE),
(407, PROXY_AUTHENTICATION_REQUIRED),
(408, REQUEST_TIMEOUT),
(409, CONFLICT),
(410, GONE),
(411, LENGTH_REQUIRED),
(412, PRECONDITION_FAILED),
(413, REQUEST_ENTITY_TOO_LARGE),
(414, REQUEST_URI_TOO_LONG),
(415, UNSUPPORTED_MEDIA_TYPE),
(416, REQUESTED_RANGE_NOT_SATISFIABLE),
(417, EXPECTATION_FAILED),
// 5xx
(500, INTERNAL_SERVER_ERROR),
(501, NOT_IMPLEMENTED),
(502, BAD_GATEWAY),
(503, SERVICE_UNAVAILABLE),
(504, GATEWAY_TIMEOUT),
(505, HTTP_VERSION_NOT_SUPPORTED),
];
}

Просмотреть файл

@ -1,47 +0,0 @@
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Request {
#[prost(enumeration="request::Method", required, tag="1")]
pub method: i32,
#[prost(string, required, tag="2")]
pub url: std::string::String,
#[prost(bytes, optional, tag="3")]
pub body: ::std::option::Option<std::vec::Vec<u8>>,
#[prost(map="string, string", tag="4")]
pub headers: ::std::collections::HashMap<std::string::String, std::string::String>,
#[prost(bool, required, tag="5")]
pub follow_redirects: bool,
#[prost(bool, required, tag="6")]
pub use_caches: bool,
#[prost(int32, required, tag="7")]
pub connect_timeout_secs: i32,
#[prost(int32, required, tag="8")]
pub read_timeout_secs: i32,
}
pub mod request {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum Method {
Get = 0,
Head = 1,
Post = 2,
Put = 3,
Delete = 4,
Connect = 5,
Options = 6,
Trace = 7,
}
}
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct Response {
/// If this is present, nothing else is.
#[prost(string, optional, tag="1")]
pub exception_message: ::std::option::Option<std::string::String>,
#[prost(string, optional, tag="2")]
pub url: ::std::option::Option<std::string::String>,
#[prost(int32, optional, tag="3")]
pub status: ::std::option::Option<i32>,
#[prost(bytes, optional, tag="4")]
pub body: ::std::option::Option<std::vec::Vec<u8>>,
#[prost(map="string, string", tag="5")]
pub headers: ::std::collections::HashMap<std::string::String, std::string::String>,
}

38
third_party/rust/viaduct/src/settings.rs поставляемый
Просмотреть файл

@ -1,38 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use std::time::Duration;
/// Note: reqwest allows these only to be specified per-Client. concept-fetch
/// allows these to be specified on each call to fetch. I think it's worth
/// keeping a single global reqwest::Client in the reqwest backend, to simplify
/// the way we abstract away from these.
///
/// In the future, should we need it, we might be able to add a CustomClient type
/// with custom settings. In the reqwest backend this would store a Client, and
/// in the concept-fetch backend it would only store the settings, and populate
/// things on the fly.
#[derive(Debug, PartialEq)]
pub struct Settings {
pub read_timeout: Option<Duration>,
pub connect_timeout: Option<Duration>,
pub follow_redirects: bool,
pub use_caches: bool,
_priv: (),
}
#[cfg(target_os = "ios")]
const TIMEOUT_DURATION: Duration = Duration::from_secs(7);
#[cfg(not(target_os = "ios"))]
const TIMEOUT_DURATION: Duration = Duration::from_secs(10);
// The singleton instance of our settings.
pub static GLOBAL_SETTINGS: &Settings = &Settings {
read_timeout: Some(TIMEOUT_DURATION),
connect_timeout: Some(TIMEOUT_DURATION),
follow_redirects: true,
use_caches: false,
_priv: (),
};

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"d5782ce7188018b6e8fa8a99298472d403b6f11d9b7c67b0fd28acbcbdf37109","README.md":"1fd617294339930ee1ad5172377648b268cce0216fc3971facbfe7c6839e9ab1","build.rs":"2b827a62155a3d724cdb4c198270ea467439e537403f82fa873321ac55a69a63","sql/create_schema.sql":"cbb6d432e578c69614199f9e82f8103da5c1f6df5d7af4f77ea1be5869000b26","sql/create_sync_temp_tables.sql":"3e7f113899745e1d2af162a520300fc74b1b32202f69f928353854bc1f7b1b8e","src/api.rs":"0ee733b8bff50aa419dd893853647d2252cf57823e87aad3ba5e30127df5b82f","src/db.rs":"e04f19ab2e3da4423ce49d43b6c9a6d86be4e573f54421061bea04ef875afb2a","src/error.rs":"c956152633ad6c787f8b7322b619f807354d4c3cb2ecc35c549c3b4bcd98079e","src/lib.rs":"acd7469c70b7094004d8065dcc0f71fd3da300e457f3ab09657d3e93fac3c7c2","src/schema.rs":"cd5a03c2d2dc1eebdea30054c6f6a7a6b302184e9ad1f40de659f6b972c481cf","src/store.rs":"b8433d086131021d696dfc112528abbd205281744900a3d1828f04f1f7443067","src/sync/bridge.rs":"e60fec0f8f167f893bb2055f4623c9cc4dc6921acafd13dcc4e6cfd228b3cb42","src/sync/incoming.rs":"76e494dbe0583bdc3cb9567cbfd202072e80304dded75da9b57c7662e489bc2e","src/sync/mod.rs":"c9ef7561b3ba898e5f5036efc15bcbc95a2975cabbf82ef1d1623349f4897ce7","src/sync/outgoing.rs":"2e0434359ba5005d730aebdac2e29980005f56e62003d89e7def78fcf8d13c5a","src/sync/sync_tests.rs":"e2c665046f3ad2a665eee2b5b33a89ae8804af42bf93fe7b2694280eb5b2a9cc"},"package":null}
{"files":{"Cargo.toml":"0ff7bf4f2ec3250d226f5ca234925fb3d292ef2d3377d89e7dda548789d6b02a","README.md":"1fd617294339930ee1ad5172377648b268cce0216fc3971facbfe7c6839e9ab1","build.rs":"2b827a62155a3d724cdb4c198270ea467439e537403f82fa873321ac55a69a63","sql/create_schema.sql":"cbb6d432e578c69614199f9e82f8103da5c1f6df5d7af4f77ea1be5869000b26","sql/create_sync_temp_tables.sql":"3e7f113899745e1d2af162a520300fc74b1b32202f69f928353854bc1f7b1b8e","src/api.rs":"56d3b1ec95723bfce295728d4dd84cc7712aadb52787a8264871cadd0849a04a","src/db.rs":"3639a0cb8310fb9ff12fcfa251b4f12d39ab69730c77b35f5483fa88747d63ed","src/error.rs":"67d9f32a58cc9232a49a3fbc43da01526eca50deffe7f9ec62f3c2667bb0baab","src/lib.rs":"01e5cc7f4a235409cc893c0275e544289f82d1eca3d93b0e81f523bbe78789f6","src/schema.rs":"cd5a03c2d2dc1eebdea30054c6f6a7a6b302184e9ad1f40de659f6b972c481cf","src/store.rs":"a000751ed6eafcaa87fcf44f6391f2c417fb29917b79ec2812524132c24092a8","src/sync/bridge.rs":"4f7037ab7ae6ad042933056d40f0bbc3ceb988b7dc5b0c43527ec516eb9d3550","src/sync/incoming.rs":"6e70749577b4e037b8a7baa755b8973695ca5926062c0567de8c3585b95a7b66","src/sync/mod.rs":"e2941761b0b20c3ebc6e2b6a7992eeffbab7e1bc49e113b8bd75f304f00f3878","src/sync/outgoing.rs":"2e0434359ba5005d730aebdac2e29980005f56e62003d89e7def78fcf8d13c5a","src/sync/sync_tests.rs":"7fb335ec1a2288529247761df075ccc51280653c0ca1a655115cd1a09eaea540"},"package":null}

4
third_party/rust/webext-storage/Cargo.toml поставляемый
Просмотреть файл

@ -11,7 +11,7 @@ default = []
[dependencies]
error-support = { path = "../support/error" }
failure = "0.1"
failure = "0.1.6"
interrupt-support = { path = "../support/interrupt" }
lazy_static = "1.4.0"
log = "0.4"
@ -28,7 +28,7 @@ version = "0.23.1"
features = ["functions", "bundled", "serde_json"]
[dev-dependencies]
env_logger = "0.7"
env_logger = "0.7.0"
prettytable-rs = "0.8"
# A *direct* dep on the -sys crate is required for our build.rs

48
third_party/rust/webext-storage/src/api.rs поставляемый
Просмотреть файл

@ -294,6 +294,21 @@ pub fn clear(tx: &Transaction<'_>, ext_id: &str) -> Result<StorageChanges> {
Ok(result)
}
/// While this API isn't available to extensions, Firefox wants a way to wipe
/// all data for all addons but not sync the deletions. We also don't report
/// the changes caused by the deletion.
/// That means that after doing this, the next sync is likely to drag some data
/// back in - which is fine.
/// This is much like what the sync support for other components calls a "wipe",
/// so we name it similarly.
pub fn wipe_all(tx: &Transaction<'_>) -> Result<()> {
// We assume the meta table is only used by sync.
tx.execute_batch(
"DELETE FROM storage_sync_data; DELETE FROM storage_sync_mirror; DELETE FROM meta;",
)?;
Ok(())
}
// TODO - get_bytes_in_use()
#[cfg(test)]
@ -529,4 +544,37 @@ mod tests {
};
Ok(())
}
fn query_count(conn: &Connection, table: &str) -> u32 {
conn.query_row_and_then(
&format!("SELECT COUNT(*) FROM {};", table),
rusqlite::NO_PARAMS,
|row| row.get::<_, u32>(0),
)
.expect("should work")
}
#[test]
fn test_wipe() -> Result<()> {
use crate::db::put_meta;
let mut db = new_mem_db();
let tx = db.transaction()?;
set(&tx, "ext-a", json!({ "x": "y" }))?;
set(&tx, "ext-b", json!({ "y": "x" }))?;
put_meta(&tx, "meta", &"meta-meta".to_string())?;
tx.execute(
"INSERT INTO storage_sync_mirror (guid, ext_id, data)
VALUES ('guid', 'ext-a', null)",
rusqlite::NO_PARAMS,
)?;
assert_eq!(query_count(&tx, "storage_sync_data"), 2);
assert_eq!(query_count(&tx, "storage_sync_mirror"), 1);
assert_eq!(query_count(&tx, "meta"), 1);
wipe_all(&tx)?;
assert_eq!(query_count(&tx, "storage_sync_data"), 0);
assert_eq!(query_count(&tx, "storage_sync_mirror"), 0);
assert_eq!(query_count(&tx, "meta"), 0);
Ok(())
}
}

3
third_party/rust/webext-storage/src/db.rs поставляемый
Просмотреть файл

@ -172,6 +172,7 @@ pub(crate) mod sql_fns {
}
// These should be somewhere else...
#[allow(dead_code)]
pub fn put_meta(db: &Connection, key: &str, value: &dyn ToSql) -> Result<()> {
db.conn().execute_named_cached(
"REPLACE INTO meta (key, value) VALUES (:key, :value)",
@ -180,6 +181,7 @@ pub fn put_meta(db: &Connection, key: &str, value: &dyn ToSql) -> Result<()> {
Ok(())
}
#[allow(dead_code)]
pub fn get_meta<T: FromSql>(db: &Connection, key: &str) -> Result<Option<T>> {
let res = db.conn().try_query_one(
"SELECT value FROM meta WHERE key = :key",
@ -189,6 +191,7 @@ pub fn get_meta<T: FromSql>(db: &Connection, key: &str) -> Result<Option<T>> {
Ok(res)
}
#[allow(dead_code)]
pub fn delete_meta(db: &Connection, key: &str) -> Result<()> {
db.conn()
.execute_named_cached("DELETE FROM meta WHERE key = :key", &[(":key", &key)])?;

Просмотреть файл

@ -56,6 +56,9 @@ pub enum ErrorKind {
#[fail(display = "{}", _0)]
IncomingPayloadError(#[fail(cause)] bridged_engine::PayloadError),
#[fail(display = "This operation isn't implemented yet")]
NotImplemented,
}
error_support::define_error! {

5
third_party/rust/webext-storage/src/lib.rs поставляемый
Просмотреть файл

@ -12,9 +12,6 @@ mod schema;
pub mod store;
mod sync;
// We publish this constant from a non-public module.
pub use sync::STORAGE_VERSION;
// This is what we roughly expect the "bridge" used by desktop to do.
// It's primarily here to avoid dead-code warnings (but I don't want to disable
// those warning, as stuff that remains after this is suspect!)
@ -26,5 +23,7 @@ pub fn delme_demo_usage() -> error::Result<()> {
store.get("ext-id", json!({}))?;
store.remove("ext-id", json!({}))?;
store.clear("ext-id")?;
// and it might even...
store.wipe_all()?;
Ok(())
}

Просмотреть файл

@ -100,6 +100,15 @@ impl Store {
Ok(result)
}
/// Wipe all local data without syncing or returning any information about
/// the deletion.
pub fn wipe_all(&self) -> Result<()> {
let tx = self.db.unchecked_transaction()?;
api::wipe_all(&tx)?;
tx.commit()?;
Ok(())
}
/// Returns a bridged sync engine for Desktop for this store.
pub fn bridged_engine(&self) -> sync::BridgedEngine<'_> {
sync::BridgedEngine::new(&self.db)

Просмотреть файл

@ -2,19 +2,16 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use rusqlite::Transaction;
use sync15_traits::{self, ApplyResults, IncomingEnvelope, OutgoingEnvelope};
use sync_guid::Guid as SyncGuid;
use crate::db::{delete_meta, get_meta, put_meta, StorageDb};
use crate::error::{Error, Result};
use crate::api;
use crate::db::StorageDb;
use crate::error::{Error, ErrorKind, Result};
use crate::schema;
use crate::sync::incoming::{apply_actions, get_incoming, plan_incoming, stage_incoming};
use crate::sync::outgoing::{get_outgoing, record_uploaded, stage_outgoing};
const LAST_SYNC_META_KEY: &str = "last_sync_time";
const SYNC_ID_META_KEY: &str = "sync_id";
/// A bridged engine implements all the methods needed to make the
/// `storage.sync` store work with Desktop's Sync implementation.
/// Conceptually, it's similar to `sync15_traits::Store`, which we
@ -28,55 +25,29 @@ impl<'a> BridgedEngine<'a> {
pub fn new(db: &'a StorageDb) -> Self {
BridgedEngine { db }
}
fn do_reset(&self, tx: &Transaction<'_>) -> Result<()> {
tx.execute_batch(
"DELETE FROM storage_sync_mirror;
UPDATE storage_sync_data SET sync_change_counter = 1;",
)?;
delete_meta(tx, LAST_SYNC_META_KEY)?;
Ok(())
}
}
impl<'a> sync15_traits::BridgedEngine for BridgedEngine<'a> {
type Error = Error;
fn last_sync(&self) -> Result<i64> {
Ok(get_meta(self.db, LAST_SYNC_META_KEY)?.unwrap_or(0))
Err(ErrorKind::NotImplemented.into())
}
fn set_last_sync(&self, last_sync_millis: i64) -> Result<()> {
put_meta(self.db, LAST_SYNC_META_KEY, &last_sync_millis)?;
Ok(())
fn set_last_sync(&self, _last_sync_millis: i64) -> Result<()> {
Err(ErrorKind::NotImplemented.into())
}
fn sync_id(&self) -> Result<Option<String>> {
Ok(get_meta(self.db, SYNC_ID_META_KEY)?)
Err(ErrorKind::NotImplemented.into())
}
fn reset_sync_id(&self) -> Result<String> {
let tx = self.db.unchecked_transaction()?;
let new_id = SyncGuid::random().to_string();
self.do_reset(&tx)?;
put_meta(self.db, SYNC_ID_META_KEY, &new_id)?;
tx.commit()?;
Ok(new_id)
Err(ErrorKind::NotImplemented.into())
}
fn ensure_current_sync_id(&self, sync_id: &str) -> Result<String> {
let current: Option<String> = get_meta(self.db, SYNC_ID_META_KEY)?;
Ok(match current {
Some(current) if current == sync_id => current,
_ => {
let tx = self.db.unchecked_transaction()?;
self.do_reset(&tx)?;
let result = sync_id.to_string();
put_meta(self.db, SYNC_ID_META_KEY, &result)?;
tx.commit()?;
result
}
})
fn ensure_current_sync_id(&self, _new_sync_id: &str) -> Result<String> {
Err(ErrorKind::NotImplemented.into())
}
fn sync_started(&self) -> Result<()> {
@ -134,187 +105,13 @@ impl<'a> sync15_traits::BridgedEngine for BridgedEngine<'a> {
}
fn reset(&self) -> Result<()> {
let tx = self.db.unchecked_transaction()?;
self.do_reset(&tx)?;
delete_meta(&tx, SYNC_ID_META_KEY)?;
tx.commit()?;
Ok(())
Err(ErrorKind::NotImplemented.into())
}
fn wipe(&self) -> Result<()> {
let tx = self.db.unchecked_transaction()?;
// We assume the meta table is only used by sync.
tx.execute_batch(
"DELETE FROM storage_sync_data; DELETE FROM storage_sync_mirror; DELETE FROM meta;",
)?;
api::wipe_all(&tx)?;
tx.commit()?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::db::test::new_mem_db;
use sync15_traits::bridged_engine::BridgedEngine;
fn query_count(conn: &StorageDb, table: &str) -> u32 {
conn.query_row_and_then(
&format!("SELECT COUNT(*) FROM {};", table),
rusqlite::NO_PARAMS,
|row| row.get::<_, u32>(0),
)
.expect("should work")
}
// Sets up mock data for the tests here.
fn setup_mock_data(engine: &super::BridgedEngine<'_>) -> Result<()> {
engine.db.execute(
"INSERT INTO storage_sync_data (ext_id, data, sync_change_counter)
VALUES ('ext-a', 'invalid-json', 2)",
rusqlite::NO_PARAMS,
)?;
engine.db.execute(
"INSERT INTO storage_sync_mirror (guid, ext_id, data)
VALUES ('guid', 'ext-a', null)",
rusqlite::NO_PARAMS,
)?;
engine.set_last_sync(1)?;
// and assert we wrote what we think we did.
assert_eq!(query_count(engine.db, "storage_sync_data"), 1);
assert_eq!(query_count(engine.db, "storage_sync_mirror"), 1);
assert_eq!(query_count(engine.db, "meta"), 1);
Ok(())
}
// Assuming a DB setup with setup_mock_data, assert it was correctly reset.
fn assert_reset(engine: &super::BridgedEngine<'_>) -> Result<()> {
// A reset never wipes data...
assert_eq!(query_count(engine.db, "storage_sync_data"), 1);
// But did reset the change counter.
let cc = engine.db.query_row_and_then(
"SELECT sync_change_counter FROM storage_sync_data WHERE ext_id = 'ext-a';",
rusqlite::NO_PARAMS,
|row| row.get::<_, u32>(0),
)?;
assert_eq!(cc, 1);
// But did wipe the mirror...
assert_eq!(query_count(engine.db, "storage_sync_mirror"), 0);
// And the last_sync should have been wiped.
assert!(get_meta::<i64>(engine.db, LAST_SYNC_META_KEY)?.is_none());
Ok(())
}
// Assuming a DB setup with setup_mock_data, assert it has not been reset.
fn assert_not_reset(engine: &super::BridgedEngine<'_>) -> Result<()> {
assert_eq!(query_count(engine.db, "storage_sync_data"), 1);
let cc = engine.db.query_row_and_then(
"SELECT sync_change_counter FROM storage_sync_data WHERE ext_id = 'ext-a';",
rusqlite::NO_PARAMS,
|row| row.get::<_, u32>(0),
)?;
assert_eq!(cc, 2);
assert_eq!(query_count(engine.db, "storage_sync_mirror"), 1);
// And the last_sync should remain.
assert!(get_meta::<i64>(engine.db, LAST_SYNC_META_KEY)?.is_some());
Ok(())
}
#[test]
fn test_wipe() -> Result<()> {
let db = new_mem_db();
let engine = super::BridgedEngine::new(&db);
setup_mock_data(&engine)?;
engine.wipe()?;
assert_eq!(query_count(engine.db, "storage_sync_data"), 0);
assert_eq!(query_count(engine.db, "storage_sync_mirror"), 0);
assert_eq!(query_count(engine.db, "meta"), 0);
Ok(())
}
#[test]
fn test_reset() -> Result<()> {
let db = new_mem_db();
let engine = super::BridgedEngine::new(&db);
setup_mock_data(&engine)?;
put_meta(engine.db, SYNC_ID_META_KEY, &"sync-id".to_string())?;
engine.reset()?;
assert_reset(&engine)?;
// Only an explicit reset kills the sync-id, so check that here.
assert_eq!(get_meta::<String>(engine.db, SYNC_ID_META_KEY)?, None);
Ok(())
}
#[test]
fn test_ensure_missing_sync_id() -> Result<()> {
let db = new_mem_db();
let engine = super::BridgedEngine::new(&db);
setup_mock_data(&engine)?;
assert_eq!(engine.sync_id()?, None);
// We don't have a sync ID - so setting one should reset.
engine.ensure_current_sync_id("new-id")?;
// should have cause a reset.
assert_reset(&engine)?;
Ok(())
}
#[test]
fn test_ensure_new_sync_id() -> Result<()> {
let db = new_mem_db();
let engine = super::BridgedEngine::new(&db);
setup_mock_data(&engine)?;
put_meta(engine.db, SYNC_ID_META_KEY, &"old-id".to_string())?;
assert_not_reset(&engine)?;
assert_eq!(engine.sync_id()?, Some("old-id".to_string()));
engine.ensure_current_sync_id("new-id")?;
// should have cause a reset.
assert_reset(&engine)?;
// should have the new id.
assert_eq!(engine.sync_id()?, Some("new-id".to_string()));
Ok(())
}
#[test]
fn test_ensure_same_sync_id() -> Result<()> {
let db = new_mem_db();
let engine = super::BridgedEngine::new(&db);
setup_mock_data(&engine)?;
assert_not_reset(&engine)?;
put_meta(engine.db, SYNC_ID_META_KEY, &"sync-id".to_string())?;
engine.ensure_current_sync_id("sync-id")?;
// should not have reset.
assert_not_reset(&engine)?;
Ok(())
}
#[test]
fn test_reset_sync_id() -> Result<()> {
let db = new_mem_db();
let engine = super::BridgedEngine::new(&db);
setup_mock_data(&engine)?;
put_meta(engine.db, SYNC_ID_META_KEY, &"sync-id".to_string())?;
assert_eq!(engine.sync_id()?, Some("sync-id".to_string()));
let new_id = engine.reset_sync_id()?;
// should have cause a reset.
assert_reset(&engine)?;
assert_eq!(engine.sync_id()?, Some(new_id));
Ok(())
}
}

Просмотреть файл

@ -13,7 +13,7 @@ use sync_guid::Guid as SyncGuid;
use crate::error::*;
use super::{merge, remove_matching_keys, JsonMap, Record};
use super::{merge, JsonMap, Record};
/// The state data can be in. Could be represented as Option<JsonMap>, but this
/// is clearer and independent of how the data is stored.
@ -216,31 +216,11 @@ pub fn plan_incoming(s: IncomingState) -> IncomingAction {
data: incoming_data,
}
}
(DataState::Deleted, DataState::Exists(local_data), DataState::Exists(mirror)) => {
// Deleted remotely.
// Treat this as a delete of every key that we
// know was present at the time.
let result = remove_matching_keys(local_data, &mirror);
if result.is_empty() {
// If there were no more keys left, we can
// delete our version too.
IncomingAction::DeleteLocally
} else {
IncomingAction::Merge { data: result }
}
}
(DataState::Deleted, DataState::Exists(local_data), DataState::Deleted) => {
// Perhaps another client created and then deleted
// the whole object for this extension since the
// last time we synced.
// Treat this as a delete of every key that we
// knew was present. Unfortunately, we don't know
// any keys that were present, so we delete no keys.
IncomingAction::Merge { data: local_data }
}
(DataState::Deleted, DataState::Deleted, _) => {
// We agree with the remote (regardless of what we
// have mirrored).
(DataState::Deleted, _, _) => {
// Deleted remotely. Server wins.
// XXX - WRONG - we want to 3 way merge here still!
// Eg, final key removed remotely, different key added
// locally, the new key should still be added.
IncomingAction::DeleteLocally
}
}
@ -256,18 +236,14 @@ pub fn plan_incoming(s: IncomingState) -> IncomingAction {
// just a 2-way merge...
merge(incoming_data, local_data, None)
}
(DataState::Deleted, DataState::Exists(local_data)) => {
(DataState::Exists(_), DataState::Deleted) => {
// We've data locally, but there's an incoming deletion.
// We would normally remove keys that we knew were
// present on the server, but we don't know what
// was on the server, so we don't remove anything.
IncomingAction::Merge { data: local_data }
// Remote wins.
IncomingAction::DeleteLocally
}
(DataState::Exists(incoming_data), DataState::Deleted) => {
(DataState::Deleted, DataState::Exists(local_data)) => {
// No data locally, but some is incoming - take it.
IncomingAction::TakeRemote {
data: incoming_data,
}
IncomingAction::TakeRemote { data: local_data }
}
(DataState::Deleted, DataState::Deleted) => {
// Nothing anywhere - odd, but OK.
@ -316,12 +292,11 @@ pub fn apply_actions(
// We want to update the local record with 'data' and after this update the item no longer is considered dirty.
IncomingAction::TakeRemote { data } => {
tx.execute_named_cached(
"INSERT OR REPLACE INTO storage_sync_data(ext_id, data, sync_change_counter)
VALUES (:ext_id, :data, 0)",
"UPDATE storage_sync_data SET data = :data, sync_change_counter = 0 WHERE ext_id = :ext_id",
&[
(":ext_id", &item.ext_id),
(":data", &serde_json::Value::Object(data)),
],
]
)?;
}

Просмотреть файл

@ -17,8 +17,6 @@ use incoming::IncomingAction;
type JsonMap = serde_json::Map<String, serde_json::Value>;
pub const STORAGE_VERSION: usize = 1;
/// For use with `#[serde(skip_serializing_if = )]`
#[inline]
pub fn is_default<T: PartialEq + Default>(v: &T) -> bool {
@ -36,66 +34,58 @@ pub struct Record {
}
// Perform a 2-way or 3-way merge, where the incoming value wins on confict.
fn merge(mut other: JsonMap, mut ours: JsonMap, parent: Option<JsonMap>) -> IncomingAction {
// XXX - this needs more thought, and probably needs significant changes.
// Main problem is that it doesn't handle deletions - but to do that, we need
// something other than a simple Option<JsonMap> - we need to differentiate
// "doesn't exist" from "removed".
// TODO!
fn merge(other: JsonMap, mut ours: JsonMap, parent: Option<JsonMap>) -> IncomingAction {
if other == ours {
return IncomingAction::Same;
}
let old_incoming = other.clone();
if let Some(parent) = parent {
// Perform 3-way merge. First, for every key in parent,
// compare the parent value with the incoming value to compute
// an implicit "diff".
for (key, parent_value) in parent.into_iter() {
if let Some(incoming_value) = other.remove(&key) {
if incoming_value != parent_value {
log::trace!(
"merge: key {} was updated in incoming - copying value locally",
key
);
ours.insert(key, incoming_value);
// Server wins. Iterate over incoming - if incoming and the parent are
// identical, then we will take our local value.
for (key, incoming_value) in other.into_iter() {
let our_value = ours.get(&key);
match our_value {
Some(our_value) => {
if *our_value != incoming_value {
// So we have a discrepency between 'ours' and 'other' - use parent
// to resolve.
let can_take_local = match parent {
Some(ref pm) => {
if let Some(pv) = pm.get(&key) {
// parent has a value - we can only take our local
// value if the parent and incoming have the same.
*pv == incoming_value
} else {
// Value doesn't exist in the parent - can't take local
false
}
}
None => {
// 2 way merge because there's no parent. We always
// prefer incoming here.
false
}
};
if can_take_local {
log::trace!("merge: no remote change in key {} - taking local", key);
} else {
log::trace!("merge: conflict in existing key {} - taking remote", key);
ours.insert(key, incoming_value);
}
} else {
log::trace!("merge: local and incoming same for key {}", key);
}
} else {
// Key was not present in incoming value.
// Another client must have deleted it.
log::trace!(
"merge: key {} no longer present in incoming - removing it locally",
key
);
ours.remove(&key);
}
None => {
log::trace!("merge: incoming new value for key {}", key);
ours.insert(key, incoming_value);
}
}
// Then, go through every remaining key in incoming. These are
// the ones where a corresponding key does not exist in
// parent, so it is a new key, and we need to add it.
for (key, incoming_value) in other.into_iter() {
log::trace!(
"merge: key {} doesn't occur in parent - copying from incoming",
key
);
ours.insert(key, incoming_value);
}
} else {
// No parent. Server wins. Overwrite every key in ours with
// the corresponding value in other.
log::trace!("merge: no parent - copying all keys from incoming");
for (key, incoming_value) in other.into_iter() {
ours.insert(key, incoming_value);
}
}
if ours == old_incoming {
IncomingAction::TakeRemote { data: old_incoming }
} else {
IncomingAction::Merge { data: ours }
}
}
fn remove_matching_keys(mut ours: JsonMap, blacklist: &JsonMap) -> JsonMap {
for key in blacklist.keys() {
ours.remove(key);
}
ours
IncomingAction::Merge { data: ours }
}
// Helpers for tests
@ -168,51 +158,8 @@ mod tests {
data: map!({"other_only": "other", "ours_only": "ours", "common": "new_value"})
}
);
// Field was removed remotely.
assert_eq!(
merge(
map!({"other_only": "other"}),
map!({"common": "old_value"}),
Some(map!({"common": "old_value"})),
),
IncomingAction::TakeRemote {
data: map!({"other_only": "other"}),
}
);
// Field was removed remotely but we added another one.
assert_eq!(
merge(
map!({"other_only": "other"}),
map!({"common": "old_value", "new_key": "new_value"}),
Some(map!({"common": "old_value"})),
),
IncomingAction::Merge {
data: map!({"other_only": "other", "new_key": "new_value"}),
}
);
// Field was removed both remotely and locally.
assert_eq!(
merge(
map!({}),
map!({"new_key": "new_value"}),
Some(map!({"common": "old_value"})),
),
IncomingAction::Merge {
data: map!({"new_key": "new_value"}),
}
);
Ok(())
}
#[test]
fn test_remove_matching_keys() -> Result<()> {
assert_eq!(
remove_matching_keys(
map!({"key1": "value1", "key2": "value2"}),
&map!({"key1": "ignored", "key3": "ignored"})
),
map!({"key2": "value2"})
);
Ok(())
}
// XXX - add `fn test_2way_merging() -> Result<()> {`!!
}

Просмотреть файл

@ -64,15 +64,6 @@ fn check_finished_with(conn: &Connection, ext_id: &str, val: serde_json::Value)
Ok(())
}
fn get_mirror_guid(conn: &Connection, extid: &str) -> Result<String> {
let guid = conn.query_row_and_then(
"SELECT m.guid FROM storage_sync_mirror m WHERE m.ext_id = ?;",
vec![extid],
|row| row.get::<_, String>(0),
)?;
Ok(guid)
}
#[derive(Debug, PartialEq)]
enum DbData {
NoRow,
@ -132,23 +123,6 @@ fn test_simple_outgoing_sync() -> Result<()> {
Ok(())
}
#[test]
fn test_simple_incoming_sync() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
let data = json!({"key1": "key1-value", "key2": "key2-value"});
let payload = Payload::from_record(Record {
guid: Guid::from("guid"),
ext_id: "ext-id".to_string(),
data: Some(data.to_string()),
})?;
assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
let key1_from_api = get(&tx, "ext-id", json!("key1"))?;
assert_eq!(key1_from_api, json!({"key1": "key1-value"}));
check_finished_with(&tx, "ext-id", data)?;
Ok(())
}
#[test]
fn test_simple_tombstone() -> Result<()> {
// Tombstones are only kept when the mirror has that record - so first
@ -179,13 +153,34 @@ fn test_simple_tombstone() -> Result<()> {
Ok(())
}
#[test]
fn test_merged() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
let data = json!({"key1": "key1-value"});
set(&tx, "ext-id", data)?;
// Incoming payload without 'key1' and conflicting for 'key2'
let payload = Payload::from_record(Record {
guid: Guid::from("guid"),
ext_id: "ext-id".to_string(),
data: Some(json!({"key2": "key2-value"}).to_string()),
})?;
assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
check_finished_with(
&tx,
"ext-id",
json!({"key1": "key1-value", "key2": "key2-value"}),
)?;
Ok(())
}
#[test]
fn test_reconciled() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
let data = json!({"key1": "key1-value"});
set(&tx, "ext-id", data)?;
// Incoming payload with the same data
// Incoming payload without 'key1' and conflicting for 'key2'
let payload = Payload::from_record(Record {
guid: Guid::from("guid"),
ext_id: "ext-id".to_string(),
@ -197,302 +192,18 @@ fn test_reconciled() -> Result<()> {
Ok(())
}
/// Tests that we handle things correctly if we get a payload that is
/// identical to what is in the mirrored table.
#[test]
fn test_reconcile_with_null_payload() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
let data = json!({"key1": "key1-value"});
set(&tx, "ext-id", data.clone())?;
// We try to push this change on the next sync.
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
assert_eq!(
get_mirror_data(&tx, "ext-id"),
DbData::Data(data.to_string())
);
let guid = get_mirror_guid(&tx, "ext-id")?;
// Incoming payload with the same data.
// This could happen if, for example, another client changed the
// key and then put it back the way it was.
let payload = Payload::from_record(Record {
guid: Guid::from(guid),
ext_id: "ext-id".to_string(),
data: Some(data.to_string()),
})?;
// Should be no outgoing records as we reconciled.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
check_finished_with(&tx, "ext-id", data)?;
Ok(())
}
#[test]
fn test_accept_incoming_when_local_is_deleted() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
// We only record an extension as deleted locally if it has been
// uploaded before being deleted.
let data = json!({"key1": "key1-value"});
set(&tx, "ext-id", data)?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
let guid = get_mirror_guid(&tx, "ext-id")?;
clear(&tx, "ext-id")?;
// Incoming payload without 'key1'. Because we previously uploaded
// key1, this means another client deleted it.
let payload = Payload::from_record(Record {
guid: Guid::from(guid),
ext_id: "ext-id".to_string(),
data: Some(json!({"key2": "key2-value"}).to_string()),
})?;
// We completely accept the incoming record.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
check_finished_with(&tx, "ext-id", json!({"key2": "key2-value"}))?;
Ok(())
}
#[test]
fn test_accept_incoming_when_local_is_deleted_no_mirror() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
let data = json!({"key1": "key1-value"});
set(&tx, "ext-id", data)?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
clear(&tx, "ext-id")?;
let payload = Payload::from_record(Record {
// Use a random guid so that we don't find the mirrored data.
// This test is somewhat bad because deduping might obviate
// the need for it.
guid: Guid::from("guid"),
ext_id: "ext-id".to_string(),
data: Some(json!({"key2": "key2-value"}).to_string()),
})?;
// We completely accept the incoming record.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
check_finished_with(&tx, "ext-id", json!({"key2": "key2-value"}))?;
Ok(())
}
#[test]
fn test_accept_deleted_key_mirrored() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
let data = json!({"key1": "key1-value", "key2": "key2-value"});
set(&tx, "ext-id", data)?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
let guid = get_mirror_guid(&tx, "ext-id")?;
// Incoming payload without 'key1'. Because we previously uploaded
// key1, this means another client deleted it.
let payload = Payload::from_record(Record {
guid: Guid::from(guid),
ext_id: "ext-id".to_string(),
data: Some(json!({"key2": "key2-value"}).to_string()),
})?;
// We completely accept the incoming record.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
check_finished_with(&tx, "ext-id", json!({"key2": "key2-value"}))?;
Ok(())
}
#[test]
fn test_merged_no_mirror() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
let data = json!({"key1": "key1-value"});
set(&tx, "ext-id", data)?;
// Incoming payload without 'key1' and some data for 'key2'.
// Because we never uploaded 'key1', we merge our local values
// with the remote.
let payload = Payload::from_record(Record {
guid: Guid::from("guid"),
ext_id: "ext-id".to_string(),
data: Some(json!({"key2": "key2-value"}).to_string()),
})?;
assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
check_finished_with(
&tx,
"ext-id",
json!({"key1": "key1-value", "key2": "key2-value"}),
)?;
Ok(())
}
#[test]
fn test_merged_incoming() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
let old_data = json!({"key1": "key1-value", "key2": "key2-value", "doomed_key": "deletable"});
set(&tx, "ext-id", old_data)?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
let guid = get_mirror_guid(&tx, "ext-id")?;
// We update 'key1' locally.
let local_data = json!({"key1": "key1-new", "key2": "key2-value", "doomed_key": "deletable"});
set(&tx, "ext-id", local_data)?;
// Incoming payload where another client set 'key2' and removed
// the 'doomed_key'.
// Because we never uploaded our data, we'll merge our
// key1 in, but otherwise keep the server's changes.
let payload = Payload::from_record(Record {
guid: Guid::from(guid),
ext_id: "ext-id".to_string(),
data: Some(json!({"key1": "key1-value", "key2": "key2-incoming"}).to_string()),
})?;
// We should send our 'key1'
assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
check_finished_with(
&tx,
"ext-id",
json!({"key1": "key1-new", "key2": "key2-incoming"}),
)?;
Ok(())
}
#[test]
fn test_merged_with_null_payload() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
let old_data = json!({"key1": "key1-value"});
set(&tx, "ext-id", old_data.clone())?;
// Push this change remotely.
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
assert_eq!(
get_mirror_data(&tx, "ext-id"),
DbData::Data(old_data.to_string())
);
let guid = get_mirror_guid(&tx, "ext-id")?;
let local_data = json!({"key1": "key1-new", "key2": "key2-value"});
set(&tx, "ext-id", local_data.clone())?;
// Incoming payload with the same old data.
let payload = Payload::from_record(Record {
guid: Guid::from(guid),
ext_id: "ext-id".to_string(),
data: Some(old_data.to_string()),
})?;
// Three-way-merge will not detect any change in key1, so we
// should keep our entire new value.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
check_finished_with(&tx, "ext-id", local_data)?;
Ok(())
}
#[test]
fn test_deleted_mirrored_object_accept() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
let data = json!({"key1": "key1-value", "key2": "key2-value"});
set(&tx, "ext-id", data)?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
let guid = get_mirror_guid(&tx, "ext-id")?;
// Incoming payload with data deleted.
// We synchronize this deletion by deleting the keys we think
// were on the server.
let payload = Payload::from_record(Record {
guid: Guid::from(guid),
ext_id: "ext-id".to_string(),
data: None,
})?;
assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
assert_eq!(get_local_data(&tx, "ext-id"), DbData::NullRow);
assert_eq!(get_mirror_data(&tx, "ext-id"), DbData::NullRow);
Ok(())
}
#[test]
fn test_deleted_mirrored_object_merged() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
set(&tx, "ext-id", json!({"key1": "key1-value"}))?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
let guid = get_mirror_guid(&tx, "ext-id")?;
set(
&tx,
"ext-id",
json!({"key1": "key1-new", "key2": "key2-value"}),
)?;
// Incoming payload with data deleted.
// We synchronize this deletion by deleting the keys we think
// were on the server.
let payload = Payload::from_record(Record {
guid: Guid::from(guid),
ext_id: "ext-id".to_string(),
data: None,
})?;
// This overrides the change to 'key1', but we still upload 'key2'.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
check_finished_with(&tx, "ext-id", json!({"key2": "key2-value"}))?;
Ok(())
}
/// Like the above test, but with a mirrored tombstone.
#[test]
fn test_deleted_mirrored_tombstone_merged() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
// Sync some data so we can get the guid for this extension.
set(&tx, "ext-id", json!({"key1": "key1-value"}))?;
assert_eq!(do_sync(&tx, vec![])?.len(), 1);
let guid = get_mirror_guid(&tx, "ext-id")?;
// Sync a delete for this data so we have a tombstone in the mirror.
let payload = Payload::from_record(Record {
guid: Guid::from(guid.clone()),
ext_id: "ext-id".to_string(),
data: None,
})?;
assert_eq!(do_sync(&tx, vec![payload])?.len(), 0);
assert_eq!(get_mirror_data(&tx, "ext-id"), DbData::NullRow);
// Set some data and sync it simultaneously with another incoming delete.
set(&tx, "ext-id", json!({"key2": "key2-value"}))?;
let payload = Payload::from_record(Record {
guid: Guid::from(guid),
ext_id: "ext-id".to_string(),
data: None,
})?;
// We cannot delete any matching keys because there are no
// matching keys. Instead we push our data.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
check_finished_with(&tx, "ext-id", json!({"key2": "key2-value"}))?;
Ok(())
}
#[test]
fn test_deleted_not_mirrored_object_merged() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
let data = json!({"key1": "key1-value", "key2": "key2-value"});
set(&tx, "ext-id", data)?;
// Incoming payload with data deleted.
let payload = Payload::from_record(Record {
guid: Guid::from("guid"),
ext_id: "ext-id".to_string(),
data: None,
})?;
// We normally delete the keys we think were on the server, but
// here we have no information about what was on the server, so we
// don't delete anything. We merge in all undeleted keys.
assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
check_finished_with(
&tx,
"ext-id",
json!({"key1": "key1-value", "key2": "key2-value"}),
)?;
Ok(())
}
#[test]
fn test_conflicting_incoming() -> Result<()> {
let mut db = new_syncable_mem_db();
let tx = db.transaction()?;
let data = json!({"key1": "key1-value", "key2": "key2-value"});
set(&tx, "ext-id", data)?;
// Incoming payload without 'key1' and conflicting for 'key2'.
// Because we never uploaded either of our keys, we'll merge our
// key1 in, but the server key2 wins.
// Incoming payload without 'key1' and conflicting for 'key2'
let payload = Payload::from_record(Record {
guid: Guid::from("guid"),
ext_id: "ext-id".to_string(),
data: Some(json!({"key2": "key2-incoming"}).to_string()),
})?;
// We should send our 'key1'
assert_eq!(do_sync(&tx, vec![payload])?.len(), 1);
check_finished_with(
&tx,
@ -501,3 +212,6 @@ fn test_conflicting_incoming() -> Result<()> {
)?;
Ok(())
}
// There are lots more we could add here, particularly around the resolution of
// deletion of keys and deletions of the entire value.

Просмотреть файл

@ -18,5 +18,5 @@ xpcom = { path = "../../../../../xpcom/rust/xpcom" }
serde = "1"
serde_json = "1"
storage_variant = { path = "../../../../../storage/variant" }
sql-support = { git = "https://github.com/mozilla/application-services", rev = "dd9bece6e205d4101c841ea5542e9b0814b29d9f" }
webext-storage = { git = "https://github.com/mozilla/application-services", rev = "dd9bece6e205d4101c841ea5542e9b0814b29d9f" }
sql-support = { git = "https://github.com/mozilla/application-services", rev = "e1daa2a7e9add66c5a36a7c967495510c2e117e8" }
webext-storage = { git = "https://github.com/mozilla/application-services", rev = "e1daa2a7e9add66c5a36a7c967495510c2e117e8" }

Просмотреть файл

@ -78,7 +78,6 @@ DIRS += [
'utils',
'url-classifier',
'urlformatter',
'viaduct',
'viewconfig',
'viewsource',
'windowcreator',

Просмотреть файл

@ -26,5 +26,3 @@ regenerate gfx/layers/protobuf/ LayerScopePacket.proto
regenerate devtools/shared/heapsnapshot/ CoreDump.proto
regenerate toolkit/components/reputationservice/chromium/chrome/common/safe_browsing/ csd.proto
regenerate toolkit/components/url-classifier/chromium/ safebrowsing.proto
command cp third_party/rust/viaduct/src/fetch_msg_types.proto toolkit/components/viaduct/fetch_msg_types.proto
regenerate toolkit/components/viaduct/ fetch_msg_types.proto

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше