Backed out 2 changesets (bug 1710421) for causing web-platform failures on context.rs. CLOSED TREE

Backed out changeset bdd6f29ebeeb (bug 1710421)
Backed out changeset 1739da34411f (bug 1710421)
This commit is contained in:
Marian-Vasile Laza 2022-06-01 10:34:41 +03:00
Родитель bef8f04748
Коммит 5d7edc0d2e
1233 изменённых файлов: 95463 добавлений и 83055 удалений

257
Cargo.lock сгенерированный
Просмотреть файл

@ -554,6 +554,12 @@ dependencies = [
"iovec",
]
[[package]]
name = "bytes"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38"
[[package]]
name = "bytes"
version = "1.1.0"
@ -601,7 +607,7 @@ dependencies = [
"storage_variant",
"tempfile",
"thin-vec",
"time 0.1.43",
"time",
"wr_malloc_size_of",
"xpcom",
]
@ -669,7 +675,7 @@ dependencies = [
"num-integer",
"num-traits",
"serde",
"time 0.1.43",
"time",
"winapi",
]
@ -739,12 +745,11 @@ dependencies = [
[[package]]
name = "cookie"
version = "0.16.0"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94d4706de1b0fa5b132270cddffa8585166037822e260a944fe161acd137ca05"
checksum = "888604f00b3db336d2af898ec3c1d5d0ddf5e6d462220f2ededc33a87ac4bbd5"
dependencies = [
"time 0.3.9",
"version_check",
"time",
]
[[package]]
@ -1735,16 +1740,6 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]]
name = "form_urlencoded"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191"
dependencies = [
"matches",
"percent-encoding",
]
[[package]]
name = "freetype"
version = "0.7.0"
@ -1870,7 +1865,7 @@ dependencies = [
"futures-sink",
"futures-task",
"memchr",
"pin-project-lite",
"pin-project-lite 0.2.9",
"pin-utils",
"slab",
]
@ -2130,7 +2125,7 @@ dependencies = [
"serde",
"serde_json",
"thiserror",
"time 0.1.43",
"time",
"uuid",
"whatsys",
]
@ -2150,7 +2145,7 @@ dependencies = [
"rkv",
"serde",
"serde_json",
"time 0.1.43",
"time",
"uuid",
"zeitstempel",
]
@ -2306,21 +2301,21 @@ dependencies = [
[[package]]
name = "h2"
version = "0.3.13"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57"
checksum = "79b7246d7e4b979c03fa093da39cfb3617a96bbeee6310af63991668d7e843ff"
dependencies = [
"bytes 1.1.0",
"bytes 0.5.6",
"fnv",
"futures-core",
"futures-sink",
"futures-util",
"http",
"indexmap",
"log",
"slab",
"tokio 1.17.0",
"tokio-util 0.7.2",
"tracing",
"tokio 0.2.25",
"tokio-util",
]
[[package]]
@ -2390,24 +2385,23 @@ checksum = "dfa686283ad6dd069f105e5ab091b04c62850d3e4cf5d67debad1933f55023df"
[[package]]
name = "http"
version = "0.2.7"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb"
checksum = "1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b"
dependencies = [
"bytes 1.1.0",
"fnv",
"itoa 1.0.2",
"itoa 0.4.999",
]
[[package]]
name = "http-body"
version = "0.4.5"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1"
checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b"
dependencies = [
"bytes 1.1.0",
"bytes 0.5.6",
"http",
"pin-project-lite",
]
[[package]]
@ -2457,11 +2451,11 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]]
name = "hyper"
version = "0.14.18"
version = "0.13.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2"
checksum = "a6e7655b9594024ad0ee439f3b5a7299369dc2a3f459b47c696f9ff676f9aa1f"
dependencies = [
"bytes 1.1.0",
"bytes 0.5.6",
"futures-channel",
"futures-core",
"futures-util",
@ -2469,13 +2463,13 @@ dependencies = [
"http",
"http-body",
"httparse",
"httpdate",
"itoa 1.0.2",
"pin-project-lite",
"itoa 0.4.999",
"log",
"pin-project",
"socket2",
"tokio 1.17.0",
"time",
"tokio 0.2.25",
"tower-service",
"tracing",
"want",
]
@ -2783,7 +2777,7 @@ dependencies = [
"fluent-fallback",
"fluent-testing",
"futures 0.3.21",
"pin-project-lite",
"pin-project-lite 0.2.9",
"replace_with",
"rustc-hash",
"serial_test",
@ -3431,7 +3425,7 @@ version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aad9dfe950c057b1bfe9c1f2aa51583a8468ef2a5baba2ebbe06d775efeb7729"
dependencies = [
"time 0.1.43",
"time",
"winapi",
]
@ -3691,15 +3685,6 @@ dependencies = [
"libc",
]
[[package]]
name = "num_threads"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44"
dependencies = [
"libc",
]
[[package]]
name = "objc"
version = "0.2.7"
@ -3942,24 +3927,30 @@ dependencies = [
[[package]]
name = "pin-project"
version = "1.0.10"
version = "0.4.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e"
checksum = "9615c18d31137579e9ff063499264ddc1278e7b1982757ebc111028c4d1dc909"
dependencies = [
"pin-project-internal",
]
[[package]]
name = "pin-project-internal"
version = "1.0.10"
version = "0.4.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb"
checksum = "044964427019eed9d49d9d5bbce6047ef18f37100ea400912a9fa4a3523ab12a"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "pin-project-lite"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777"
[[package]]
name = "pin-project-lite"
version = "0.2.9"
@ -4601,14 +4592,14 @@ dependencies = [
[[package]]
name = "serde_urlencoded"
version = "0.7.1"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"
checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97"
dependencies = [
"form_urlencoded",
"itoa 1.0.2",
"ryu",
"dtoa",
"itoa 0.4.999",
"serde",
"url",
]
[[package]]
@ -4756,10 +4747,11 @@ dependencies = [
[[package]]
name = "socket2"
version = "0.4.4"
version = "0.3.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0"
checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e"
dependencies = [
"cfg-if 1.0.0",
"libc",
"winapi",
]
@ -4879,7 +4871,7 @@ dependencies = [
"static_prefs",
"style_derive",
"style_traits",
"time 0.1.43",
"time",
"to_shmem",
"to_shmem_derive",
"toml",
@ -5082,24 +5074,6 @@ dependencies = [
"winapi",
]
[[package]]
name = "time"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2702e08a7a860f005826c6815dcac101b19b5eb330c27fe4a5928fec1d20ddd"
dependencies = [
"itoa 1.0.2",
"libc",
"num_threads",
"time-macros",
]
[[package]]
name = "time-macros"
version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792"
[[package]]
name = "tinystr"
version = "0.3.4"
@ -5151,6 +5125,23 @@ dependencies = [
"tokio-uds",
]
[[package]]
name = "tokio"
version = "0.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092"
dependencies = [
"bytes 0.5.6",
"fnv",
"futures-core",
"iovec",
"lazy_static",
"memchr",
"mio 0.6.23",
"pin-project-lite 0.1.12",
"slab",
]
[[package]]
name = "tokio"
version = "1.17.0"
@ -5158,14 +5149,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee"
dependencies = [
"bytes 1.1.0",
"libc",
"memchr",
"mio 0.8.0",
"num_cpus",
"pin-project-lite",
"socket2",
"pin-project-lite 0.2.9",
"tokio-macros",
"winapi",
]
[[package]]
@ -5246,17 +5233,6 @@ dependencies = [
"tokio-io",
]
[[package]]
name = "tokio-stream"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3"
dependencies = [
"futures-core",
"pin-project-lite",
"tokio 1.17.0",
]
[[package]]
name = "tokio-tcp"
version = "0.1.4"
@ -5335,23 +5311,16 @@ dependencies = [
[[package]]
name = "tokio-util"
version = "0.6.999"
dependencies = [
"tokio-util 0.7.2",
]
[[package]]
name = "tokio-util"
version = "0.7.2"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c"
checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499"
dependencies = [
"bytes 1.1.0",
"bytes 0.5.6",
"futures-core",
"futures-sink",
"pin-project-lite",
"tokio 1.17.0",
"tracing",
"log",
"pin-project-lite 0.1.12",
"tokio 0.2.25",
]
[[package]]
@ -5375,39 +5344,6 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6"
[[package]]
name = "tracing"
version = "0.1.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09"
dependencies = [
"cfg-if 1.0.0",
"log",
"pin-project-lite",
"tracing-attributes",
"tracing-core",
]
[[package]]
name = "tracing-attributes"
version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tracing-core"
version = "0.1.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f"
dependencies = [
"lazy_static",
]
[[package]]
name = "tracy-rs"
version = "0.1.2"
@ -5573,6 +5509,12 @@ dependencies = [
"serde",
]
[[package]]
name = "urlencoding"
version = "1.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a1f0175e03a0973cf4afd476bef05c26e228520400eb1fd473ad417b1c00ffb"
[[package]]
name = "uuid"
version = "0.8.1"
@ -5638,30 +5580,26 @@ dependencies = [
[[package]]
name = "warp"
version = "0.3.2"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3cef4e1e9114a4b7f1ac799f16ce71c14de5778500c5450ec6b7b920c55b587e"
checksum = "0e95175b7a927258ecbb816bdada3cc469cb68593e7940b96a60f4af366a9970"
dependencies = [
"bytes 1.1.0",
"futures-channel",
"futures-util",
"bytes 0.5.6",
"futures 0.3.21",
"headers",
"http",
"hyper",
"log",
"mime",
"mime_guess",
"percent-encoding",
"pin-project",
"scoped-tls",
"serde",
"serde_json",
"serde_urlencoded",
"tokio 1.17.0",
"tokio-stream",
"tokio-util 0.6.999",
"tokio 0.2.25",
"tower-service",
"tracing",
"urlencoding",
]
[[package]]
@ -5730,16 +5668,15 @@ name = "webdriver"
version = "0.45.0"
dependencies = [
"base64 0.12.3",
"bytes 1.1.0",
"bytes 0.5.6",
"cookie",
"http",
"log",
"serde",
"serde_derive",
"serde_json",
"time 0.3.9",
"tokio 1.17.0",
"tokio-stream",
"time",
"tokio 0.2.25",
"unicode-segmentation",
"url",
"warp",
@ -5821,7 +5758,7 @@ dependencies = [
"smallvec",
"svg_fmt",
"swgl",
"time 0.1.43",
"time",
"topological-sort",
"tracy-rs",
"webrender_api",
@ -5844,7 +5781,7 @@ dependencies = [
"serde",
"serde_bytes",
"serde_derive",
"time 0.1.43",
"time",
"wr_malloc_size_of",
]
@ -6153,5 +6090,5 @@ dependencies = [
"flate2",
"msdos_time",
"podio",
"time 0.1.43",
"time",
]

Просмотреть файл

@ -117,9 +117,6 @@ rand = { path = "build/rust/rand" }
# Patch hashbrown 0.9 to 0.11
hashbrown = { path = "build/rust/hashbrown" }
# Patch tokio-util 0.6 to 0.7
tokio-util = { path = "build/rust/tokio-util" }
# Patch autocfg to hide rustc output. Workaround for https://github.com/cuviper/autocfg/issues/30
autocfg = { path = "third_party/rust/autocfg" }

Просмотреть файл

@ -1,23 +0,0 @@
[package]
name = "tokio-util"
version = "0.6.999"
edition = "2018"
license = "MPL-2.0"
[lib]
path = "lib.rs"
[dependencies]
tokio-util = "0.7"
[features]
__docs_rs = ["tokio-util/__docs_rs"]
codec = ["tokio-util/codec"]
compat = ["tokio-util/compat"]
default = ["tokio-util/default"]
full = ["tokio-util/full"]
io = ["tokio-util/io"]
io-util = ["tokio-util/io-util"]
net = ["tokio-util/net"]
rt = ["tokio-util/rt"]
time = ["tokio-util/time"]

Просмотреть файл

@ -1,5 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
pub use tokio_util::*;

Просмотреть файл

@ -9,5 +9,5 @@ byteorder = "1.3.1"
dns-parser = "0.8.0"
gecko-profiler = { path = "../../../../../tools/profiler/rust-api" }
log = "0.4"
socket2 = { version = "0.4", features = ["all"] }
socket2 = { version = "0.3.9", features = ["reuseport"] }
uuid = { version = "0.8", features = ["v4"] }

Просмотреть файл

@ -412,7 +412,7 @@ impl MDNSService {
let mdns_addr = std::net::Ipv4Addr::new(224, 0, 0, 251);
let port = 5353;
let socket = Socket::new(Domain::IPV4, Type::DGRAM, None)?;
let socket = Socket::new(Domain::ipv4(), Type::dgram(), None)?;
socket.set_reuse_address(true)?;
#[cfg(not(target_os = "windows"))]
@ -422,7 +422,7 @@ impl MDNSService {
port,
))))?;
let socket = std::net::UdpSocket::from(socket);
let socket = socket.into_udp_socket();
socket.set_multicast_loop_v4(true)?;
socket.set_read_timeout(Some(time::Duration::from_millis(1)))?;
socket.set_write_timeout(Some(time::Duration::from_millis(1)))?;
@ -658,7 +658,7 @@ mod tests {
fn listen_until(addr: &std::net::Ipv4Addr, stop: u64) -> thread::JoinHandle<Vec<String>> {
let port = 5353;
let socket = Socket::new(Domain::IPV4, Type::DGRAM, None).unwrap();
let socket = Socket::new(Domain::ipv4(), Type::dgram(), None).unwrap();
socket.set_reuse_address(true).unwrap();
#[cfg(not(target_os = "windows"))]
@ -670,7 +670,7 @@ mod tests {
))))
.unwrap();
let socket = std::net::UdpSocket::from(socket);
let socket = socket.into_udp_socket();
socket.set_multicast_loop_v4(true).unwrap();
socket
.set_read_timeout(Some(time::Duration::from_millis(10)))

Просмотреть файл

@ -74,7 +74,7 @@ PACKAGES_WE_ALWAYS_WANT_AN_OVERRIDE_OF = [
# add a comment as to why.
TOLERATED_DUPES = {
"base64": 2,
"bytes": 2,
"bytes": 3,
"crossbeam-deque": 2,
"crossbeam-epoch": 2,
"crossbeam-utils": 3,
@ -82,11 +82,8 @@ TOLERATED_DUPES = {
"libloading": 2,
"memoffset": 2,
"mio": 2,
# Transition from time 0.1 to 0.3 underway, but chrono is stuck on 0.1
# and hasn't been updated in 1.5 years (an hypothetical update is
# expected to remove the dependency on time altogether).
"time": 2,
"tokio": 2,
"pin-project-lite": 2,
"tokio": 3,
}

Просмотреть файл

@ -13,7 +13,7 @@ edition = "2018"
base64 = "0.12"
chrono = "0.4.6"
clap = { version = "3.1", default-features = false, features = ["cargo", "std", "suggestions", "wrap_help"] }
hyper = "0.14"
hyper = "0.13"
lazy_static = "1.0"
log = { version = "0.4", features = ["std"] }
marionette = { path = "./marionette", version="0.2.0" }

Просмотреть файл

@ -12,20 +12,19 @@ edition = "2018"
[features]
default = ["server"]
server = ["tokio", "tokio-stream", "warp"]
server = ["tokio", "warp"]
[dependencies]
base64 = "0.12"
bytes = "1.0"
cookie = { version = "0.16", default-features = false }
bytes = "0.5"
cookie = { version = "0.12", default-features = false }
http = "0.2"
log = "0.4"
serde = "1.0"
serde_json = "1.0"
serde_derive = "1.0"
time = "0.3"
tokio = { version = "1.0", features = ["rt", "net"], optional = true}
tokio-stream = { version = "0.1", features = ["net"], optional = true}
time = "0.1"
tokio = { version = "0.2", features = ["rt-core"], optional = true}
unicode-segmentation = "1.2"
url = "2.0"
warp = { version = "0.3", default-features = false, optional = true }
warp = { version = "0.2", default-features = false, optional = true }

Просмотреть файл

@ -17,7 +17,6 @@ use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, Mutex};
use std::thread;
use tokio::net::TcpListener;
use tokio_stream::wrappers::TcpListenerStream;
use url::{Host, Url};
use warp::{self, Buf, Filter, Rejection};
@ -219,11 +218,14 @@ where
let builder = thread::Builder::new().name("webdriver server".to_string());
let handle = builder.spawn(move || {
let rt = tokio::runtime::Builder::new_current_thread()
let mut rt = tokio::runtime::Builder::new()
.basic_scheduler()
.enable_io()
.build()
.unwrap();
let listener = TcpListener::from_std(listener).unwrap();
let mut listener = rt
.handle()
.enter(|| TcpListener::from_std(listener).unwrap());
let wroutes = build_warp_routes(
address,
allow_hosts,
@ -231,7 +233,7 @@ where
&extension_routes,
msg_send.clone(),
);
let fut = warp::serve(wroutes).run_incoming(TcpListenerStream::new(listener));
let fut = warp::serve(wroutes).run_incoming(listener.incoming());
rt.block_on(fut);
})?;
@ -496,7 +498,7 @@ fn build_route<U: 'static + WebDriverExtensionRoute + Send + Sync>(
Some(_) | None => {}
}
}
let body = String::from_utf8(body.chunk().to_vec());
let body = String::from_utf8(body.bytes().to_vec());
if body.is_err() {
let err = WebDriverError::new(
ErrorStatus::UnknownError,

1
third_party/rust/bytes-0.5.6/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"CHANGELOG.md":"7c1c6fe9fa6aa8a155d4a04dab5d4e3abadb349121886b2f24252db0e45fba51","Cargo.toml":"bb5072cd9bad83919ed35f49f3a7f88b608a0150d6ccdcbb4bf17dfb3c64ef3f","LICENSE":"45f522cacecb1023856e46df79ca625dfc550c94910078bd8aec6e02880b3d42","README.md":"2c2f6f1a240ad375f9dbd8e7f023510b645d98e327ea0a42ba339c94fd9baaa9","benches/buf.rs":"b0f4f1130081680f6f99d1efd49a75bd1d97d9a30117b7ad9525c96b7c8968e6","benches/bytes.rs":"dc5289a9ce82be35e71ed5853ab33aa108a30460e481135f6058fe4d2f7dc15e","benches/bytes_mut.rs":"1326fe6224b26826228e02b4133151e756f38152c2d9cfe66adf83af76c3ec98","ci/test-stable.sh":"6e010f1a95b72fea7bebdd217fda78427f3eb07b1e753f79507c71d982b2d38a","ci/tsan.sh":"466b86b19225dd26c756cf2252cb1973f87a145642c99364b462ed7ceb55c7dd","src/buf/buf_impl.rs":"fe1bc64bb9aef5b57d83901268f89bf148490e71bebc340c7ecc40ff95bcfb70","src/buf/buf_mut.rs":"d226189d9db76c9023537dcca0687aa5dd25851a9052d19154de8ee9b25bdee3","src/buf/ext/chain.rs":"337f58e1a8da5b4768e55921ff394f4ba3a0c6d476448fd5bceab6f3c1db1b3e","src/buf/ext/limit.rs":"a705d7cf38f9a11a904d6ee5e7afea83e9abdf8f454bb8e16b407b0e055dc11a","src/buf/ext/mod.rs":"ba2fa392c61b7429530c71797114e3f09d9b6b750b6f77f57fde964d2b218bc4","src/buf/ext/reader.rs":"ee4733fa2c2d893c6df8151c2333a46171619e8a45ec9bae863edc8deb438ac5","src/buf/ext/take.rs":"e92be765539b8b0c1cb67a01b691319cccd35fc098f2bb59ced3bbbe41ee0257","src/buf/ext/writer.rs":"3c52df6e73d09935d37bed9a05689c1966952f980b85b40aaab05081ec7ef6d8","src/buf/iter.rs":"a0de69367fa61d0d1c6c2ff4b4d337de9c5f4213d0c86e083226cf409666d860","src/buf/mod.rs":"4f8e3b4c4b69b7d004306d458ad835801e53659b38ca08312d7217d82da4c64f","src/buf/vec_deque.rs":"5a4063961d10380c1ab3681f8b3f6201112766d9f57a63e2861dc9f2b134668d","src/bytes.rs":"8c3aa5fe425604206ffc1b85a8bff5a9be38917786453450955984523f829cec","src/bytes_mut.rs":"e276f74da841ab65ca681cb09820de98aa2e9837dd975ed564b1a9be40440cf3","src/fmt/debug.rs":"19ebe7e5516e40ab712995f3ec2e0ba78ddfa905cce117e6d01e8eb330f3970a","src/fmt/hex.rs":"13755ec6f1b79923e1f1a05c51b179a38c03c40bb8ed2db0210e8901812e61e7","src/fmt/mod.rs":"176da4e359da99b8e5cf16e480cb7b978f574876827f1b9bb9c08da4d74ac0f5","src/lib.rs":"9b96e2a011a782ceb82428e6b71fd212a46bc186bd152102018c7b6428a0d441","src/loom.rs":"5dc97a5afce14875a66e44cbf0afa67e084c8b6b8c560bc14e7a70ef73aee96e","src/serde.rs":"3ecd7e828cd4c2b7db93c807cb1548fad209e674df493edf7cda69a7b04d405d","tests/test_buf.rs":"3ca99c58f470e7c4beb18e5dc69250ce541dd8ac96b88fb1162640510a735ada","tests/test_buf_mut.rs":"56636e439cb07af2fabdfb60a08995829680c9730a8ebe5c6ad2f54dbf208e32","tests/test_bytes.rs":"3ec0a82ce98fea633ed7d635caca21cd8035d0c9ea4287d1cc0199e167a4a3c1","tests/test_bytes_odd_alloc.rs":"87d51d4ab6ad98193b140ea8158f6631eba985a204c2ea94d34b3bb157791a16","tests/test_bytes_vec_alloc.rs":"2b686b6ab44f924e69d8270a4f256eb3626a3b4db8c1919b74bc422c10124899","tests/test_chain.rs":"71772fbc0bab72a697bd85c6c1be0eddfe7d7dc4f4737a0cd53be4ad191d076b","tests/test_debug.rs":"13299107172809e8cbbd823964ac9450cd0d6b6de79f2e6a2e0f44b9225a0593","tests/test_iter.rs":"c1f46823df26a90139645fd8728a03138edd95b2849dfec830452a80ddd9726d","tests/test_reader.rs":"9c94e164aa7de4c10966f8084ad04d06f4e9c66e156d017d194a1dac3dfc6619","tests/test_serde.rs":"2691f891796ba259de0ecf926de05c514f4912cc5fcd3e6a1591efbcd23ed4d0","tests/test_take.rs":"975aa2e216b6a3c939b31e41ecfbb3a90938096413a14a2ae986c842d2250180"},"package":"0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38"}

166
third_party/rust/bytes-0.5.6/CHANGELOG.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,166 @@
# 0.5.6 (July 13, 2020)
- Improve `BytesMut` to reuse buffer when fully `advance`d.
- Mark `BytesMut::{as_mut, set_len}` with `#[inline]`.
- Relax synchronization when cloning in shared vtable of `Bytes`.
- Move `loom` to `dev-dependencies`.
# 0.5.5 (June 18, 2020)
### Added
- Allow using the `serde` feature in `no_std` environments (#385).
### Fix
- Fix `BufMut::advance_mut` to panic if advanced passed the capacity (#354)..
- Fix `BytesMut::freeze` ignoring amount previously `advance`d (#352).
# 0.5.4 (January 23, 2020)
### Added
- Make `Bytes::new` a `const fn`.
- Add `From<BytesMut>` for `Bytes`.
### Fix
- Fix reversed arguments in `PartialOrd` for `Bytes`.
- Fix `Bytes::truncate` losing original capacity when repr is an unshared `Vec`.
- Fix `Bytes::from(Vec)` when allocator gave `Vec` a pointer with LSB set.
- Fix panic in `Bytes::slice_ref` if argument is an empty slice.
# 0.5.3 (December 12, 2019)
### Added
- `must_use` attributes to `split`, `split_off`, and `split_to` methods (#337).
### Fix
- Potential freeing of a null pointer in `Bytes` when constructed with an empty `Vec<u8>` (#341, #342).
- Calling `Bytes::truncate` with a size large than the length will no longer clear the `Bytes` (#333).
# 0.5.2 (November 27, 2019)
### Added
- `Limit` methods `into_inner`, `get_ref`, `get_mut`, `limit`, and `set_limit` (#325).
# 0.5.1 (November 25, 2019)
### Fix
- Growth documentation for `BytesMut` (#321)
# 0.5.0 (November 25, 2019)
### Fix
- Potential overflow in `copy_to_slice`
### Changed
- Increased minimum supported Rust version to 1.39.
- `Bytes` is now a "trait object", allowing for custom allocation strategies (#298)
- `BytesMut` implicitly grows internal storage. `remaining_mut()` returns
`usize::MAX` (#316).
- `BufMut::bytes_mut` returns `&mut [MaybeUninit<u8>]` to reflect the unknown
initialization state (#305).
- `Buf` / `BufMut` implementations for `&[u8]` and `&mut [u8]`
respectively (#261).
- Move `Buf` / `BufMut` "extra" functions to an extension trait (#306).
- `BufMutExt::limit` (#309).
- `Bytes::slice` takes a `RangeBounds` argument (#265).
- `Bytes::from_static` is now a `const fn` (#311).
- A multitude of smaller performance optimizations.
### Added
- `no_std` support (#281).
- `get_*`, `put_*`, `get_*_le`, and `put_*le` accessors for handling byte order.
- `BorrowMut` implementation for `BytesMut` (#185).
### Removed
- `IntoBuf` (#288).
- `Buf` implementation for `&str` (#301).
- `byteorder` dependency (#280).
- `iovec` dependency, use `std::IoSlice` instead (#263).
- optional `either` dependency (#315).
- optional `i128` feature -- now available on stable. (#276).
# 0.4.12 (March 6, 2019)
### Added
- Implement `FromIterator<&'a u8>` for `BytesMut`/`Bytes` (#244).
- Implement `Buf` for `VecDeque` (#249).
# 0.4.11 (November 17, 2018)
* Use raw pointers for potentially racy loads (#233).
* Implement `BufRead` for `buf::Reader` (#232).
* Documentation tweaks (#234).
# 0.4.10 (September 4, 2018)
* impl `Buf` and `BufMut` for `Either` (#225).
* Add `Bytes::slice_ref` (#208).
# 0.4.9 (July 12, 2018)
* Add 128 bit number support behind a feature flag (#209).
* Implement `IntoBuf` for `&mut [u8]`
# 0.4.8 (May 25, 2018)
* Fix panic in `BytesMut` `FromIterator` implementation.
* Bytes: Recycle space when reserving space in vec mode (#197).
* Bytes: Add resize fn (#203).
# 0.4.7 (April 27, 2018)
* Make `Buf` and `BufMut` usable as trait objects (#186).
* impl BorrowMut for BytesMut (#185).
* Improve accessor performance (#195).
# 0.4.6 (Janary 8, 2018)
* Implement FromIterator for Bytes/BytesMut (#148).
* Add `advance` fn to Bytes/BytesMut (#166).
* Add `unsplit` fn to `BytesMut` (#162, #173).
* Improvements to Bytes split fns (#92).
# 0.4.5 (August 12, 2017)
* Fix range bug in `Take::bytes`
* Misc performance improvements
* Add extra `PartialEq` implementations.
* Add `Bytes::with_capacity`
* Implement `AsMut[u8]` for `BytesMut`
# 0.4.4 (May 26, 2017)
* Add serde support behind feature flag
* Add `extend_from_slice` on `Bytes` and `BytesMut`
* Add `truncate` and `clear` on `Bytes`
* Misc additional std trait implementations
* Misc performance improvements
# 0.4.3 (April 30, 2017)
* Fix Vec::advance_mut bug
* Bump minimum Rust version to 1.15
* Misc performance tweaks
# 0.4.2 (April 5, 2017)
* Misc performance tweaks
* Improved `Debug` implementation for `Bytes`
* Avoid some incorrect assert panics
# 0.4.1 (March 15, 2017)
* Expose `buf` module and have most types available from there vs. root.
* Implement `IntoBuf` for `T: Buf`.
* Add `FromBuf` and `Buf::collect`.
* Add iterator adapter for `Buf`.
* Add scatter/gather support to `Buf` and `BufMut`.
* Add `Buf::chain`.
* Reduce allocations on repeated calls to `BytesMut::reserve`.
* Implement `Debug` for more types.
* Remove `Source` in favor of `IntoBuf`.
* Implement `Extend` for `BytesMut`.
# 0.4.0 (February 24, 2017)
* Initial release

37
third_party/rust/bytes-0.5.6/Cargo.toml поставляемый Normal file
Просмотреть файл

@ -0,0 +1,37 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "bytes"
version = "0.5.6"
authors = ["Carl Lerche <me@carllerche.com>", "Sean McArthur <sean@seanmonstar.com>"]
description = "Types and traits for working with bytes"
documentation = "https://docs.rs/bytes"
readme = "README.md"
keywords = ["buffers", "zero-copy", "io"]
categories = ["network-programming", "data-structures"]
license = "MIT"
repository = "https://github.com/tokio-rs/bytes"
[dependencies.serde]
version = "1.0.60"
features = ["alloc"]
optional = true
default-features = false
[dev-dependencies.serde_test]
version = "1.0"
[features]
default = ["std"]
std = []
[target."cfg(loom)".dev-dependencies.loom]
version = "0.3"

Просмотреть файл

@ -1,4 +1,4 @@
Copyright (c) 2019 Tokio Contributors
Copyright (c) 2018 Carl Lerche
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated

47
third_party/rust/bytes-0.5.6/README.md поставляемый Normal file
Просмотреть файл

@ -0,0 +1,47 @@
# Bytes
A utility library for working with bytes.
[![Crates.io][crates-badge]][crates-url]
[![Build Status][ci-badge]][ci-url]
[crates-badge]: https://img.shields.io/crates/v/bytes.svg
[crates-url]: https://crates.io/crates/bytes
[ci-badge]: https://github.com/tokio-rs/bytes/workflows/CI/badge.svg
[ci-url]: https://github.com/tokio-rs/bytes/actions
[Documentation](https://docs.rs/bytes)
## Usage
To use `bytes`, first add this to your `Cargo.toml`:
```toml
[dependencies]
bytes = "0.5"
```
Next, add this to your crate:
```rust
use bytes::{Bytes, BytesMut, Buf, BufMut};
```
## Serde support
Serde support is optional and disabled by default. To enable use the feature `serde`.
```toml
[dependencies]
bytes = { version = "0.5", features = ["serde"] }
```
## License
This project is licensed under the [MIT license](LICENSE).
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in `bytes` by you, shall be licensed as MIT, without any additional
terms or conditions.

187
third_party/rust/bytes-0.5.6/benches/buf.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,187 @@
#![feature(test)]
#![warn(rust_2018_idioms)]
extern crate test;
use bytes::Buf;
use test::Bencher;
/// Dummy Buf implementation
struct TestBuf {
buf: &'static [u8],
readlens: &'static [usize],
init_pos: usize,
pos: usize,
readlen_pos: usize,
readlen: usize,
}
impl TestBuf {
fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBuf {
let mut buf = TestBuf {
buf,
readlens,
init_pos,
pos: 0,
readlen_pos: 0,
readlen: 0,
};
buf.reset();
buf
}
fn reset(&mut self) {
self.pos = self.init_pos;
self.readlen_pos = 0;
self.next_readlen();
}
/// Compute the length of the next read :
/// - use the next value specified in readlens (capped by remaining) if any
/// - else the remaining
fn next_readlen(&mut self) {
self.readlen = self.buf.len() - self.pos;
if let Some(readlen) = self.readlens.get(self.readlen_pos) {
self.readlen = std::cmp::min(self.readlen, *readlen);
self.readlen_pos += 1;
}
}
}
impl Buf for TestBuf {
fn remaining(&self) -> usize {
return self.buf.len() - self.pos;
}
fn advance(&mut self, cnt: usize) {
self.pos += cnt;
assert!(self.pos <= self.buf.len());
self.next_readlen();
}
fn bytes(&self) -> &[u8] {
if self.readlen == 0 {
Default::default()
} else {
&self.buf[self.pos..self.pos + self.readlen]
}
}
}
/// Dummy Buf implementation
/// version with methods forced to not be inlined (to simulate costly calls)
struct TestBufC {
inner: TestBuf,
}
impl TestBufC {
fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBufC {
TestBufC {
inner: TestBuf::new(buf, readlens, init_pos),
}
}
fn reset(&mut self) {
self.inner.reset()
}
}
impl Buf for TestBufC {
#[inline(never)]
fn remaining(&self) -> usize {
self.inner.remaining()
}
#[inline(never)]
fn advance(&mut self, cnt: usize) {
self.inner.advance(cnt)
}
#[inline(never)]
fn bytes(&self) -> &[u8] {
self.inner.bytes()
}
}
macro_rules! bench {
($fname:ident, testbuf $testbuf:ident $readlens:expr, $method:ident $(,$arg:expr)*) => (
#[bench]
fn $fname(b: &mut Bencher) {
let mut bufs = [
$testbuf::new(&[1u8; 8+0], $readlens, 0),
$testbuf::new(&[1u8; 8+1], $readlens, 1),
$testbuf::new(&[1u8; 8+2], $readlens, 2),
$testbuf::new(&[1u8; 8+3], $readlens, 3),
$testbuf::new(&[1u8; 8+4], $readlens, 4),
$testbuf::new(&[1u8; 8+5], $readlens, 5),
$testbuf::new(&[1u8; 8+6], $readlens, 6),
$testbuf::new(&[1u8; 8+7], $readlens, 7),
];
b.iter(|| {
for i in 0..8 {
bufs[i].reset();
let buf: &mut dyn Buf = &mut bufs[i]; // type erasure
test::black_box(buf.$method($($arg,)*));
}
})
}
);
($fname:ident, slice, $method:ident $(,$arg:expr)*) => (
#[bench]
fn $fname(b: &mut Bencher) {
// buf must be long enough for one read of 8 bytes starting at pos 7
let arr = [1u8; 8+7];
b.iter(|| {
for i in 0..8 {
let mut buf = &arr[i..];
let buf = &mut buf as &mut dyn Buf; // type erasure
test::black_box(buf.$method($($arg,)*));
}
})
}
);
($fname:ident, option) => (
#[bench]
fn $fname(b: &mut Bencher) {
let data = [1u8; 1];
b.iter(|| {
for _ in 0..8 {
let mut buf = Some(data);
let buf = &mut buf as &mut dyn Buf; // type erasure
test::black_box(buf.get_u8());
}
})
}
);
}
macro_rules! bench_group {
($method:ident $(,$arg:expr)*) => (
bench!(slice, slice, $method $(,$arg)*);
bench!(tbuf_1, testbuf TestBuf &[], $method $(,$arg)*);
bench!(tbuf_1_costly, testbuf TestBufC &[], $method $(,$arg)*);
bench!(tbuf_2, testbuf TestBuf &[1], $method $(,$arg)*);
bench!(tbuf_2_costly, testbuf TestBufC &[1], $method $(,$arg)*);
// bench!(tbuf_onebyone, testbuf TestBuf &[1,1,1,1,1,1,1,1], $method $(,$arg)*);
// bench!(tbuf_onebyone_costly, testbuf TestBufC &[1,1,1,1,1,1,1,1], $method $(,$arg)*);
);
}
mod get_u8 {
use super::*;
bench_group!(get_u8);
bench!(option, option);
}
mod get_u16 {
use super::*;
bench_group!(get_u16);
}
mod get_u32 {
use super::*;
bench_group!(get_u32);
}
mod get_u64 {
use super::*;
bench_group!(get_u64);
}
mod get_f32 {
use super::*;
bench_group!(get_f32);
}
mod get_f64 {
use super::*;
bench_group!(get_f64);
}
mod get_uint24 {
use super::*;
bench_group!(get_uint, 3);
}

119
third_party/rust/bytes-0.5.6/benches/bytes.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,119 @@
#![feature(test)]
#![warn(rust_2018_idioms)]
extern crate test;
use bytes::Bytes;
use test::Bencher;
#[bench]
fn deref_unique(b: &mut Bencher) {
let buf = Bytes::from(vec![0; 1024]);
b.iter(|| {
for _ in 0..1024 {
test::black_box(&buf[..]);
}
})
}
#[bench]
fn deref_shared(b: &mut Bencher) {
let buf = Bytes::from(vec![0; 1024]);
let _b2 = buf.clone();
b.iter(|| {
for _ in 0..1024 {
test::black_box(&buf[..]);
}
})
}
#[bench]
fn deref_static(b: &mut Bencher) {
let buf = Bytes::from_static(b"hello world");
b.iter(|| {
for _ in 0..1024 {
test::black_box(&buf[..]);
}
})
}
#[bench]
fn clone_static(b: &mut Bencher) {
let bytes =
Bytes::from_static("hello world 1234567890 and have a good byte 0987654321".as_bytes());
b.iter(|| {
for _ in 0..1024 {
test::black_box(&bytes.clone());
}
})
}
#[bench]
fn clone_shared(b: &mut Bencher) {
let bytes = Bytes::from(b"hello world 1234567890 and have a good byte 0987654321".to_vec());
b.iter(|| {
for _ in 0..1024 {
test::black_box(&bytes.clone());
}
})
}
#[bench]
fn clone_arc_vec(b: &mut Bencher) {
use std::sync::Arc;
let bytes = Arc::new(b"hello world 1234567890 and have a good byte 0987654321".to_vec());
b.iter(|| {
for _ in 0..1024 {
test::black_box(&bytes.clone());
}
})
}
#[bench]
fn from_long_slice(b: &mut Bencher) {
let data = [0u8; 128];
b.bytes = data.len() as u64;
b.iter(|| {
let buf = Bytes::copy_from_slice(&data[..]);
test::black_box(buf);
})
}
#[bench]
fn slice_empty(b: &mut Bencher) {
b.iter(|| {
let b = Bytes::from(vec![17; 1024]).clone();
for i in 0..1000 {
test::black_box(b.slice(i % 100..i % 100));
}
})
}
#[bench]
fn slice_short_from_arc(b: &mut Bencher) {
b.iter(|| {
// `clone` is to convert to ARC
let b = Bytes::from(vec![17; 1024]).clone();
for i in 0..1000 {
test::black_box(b.slice(1..2 + i % 10));
}
})
}
#[bench]
fn split_off_and_drop(b: &mut Bencher) {
b.iter(|| {
for _ in 0..1024 {
let v = vec![10; 200];
let mut b = Bytes::from(v);
test::black_box(b.split_off(100));
test::black_box(b);
}
})
}

266
third_party/rust/bytes-0.5.6/benches/bytes_mut.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,266 @@
#![feature(test)]
#![warn(rust_2018_idioms)]
extern crate test;
use bytes::{BufMut, BytesMut};
use test::Bencher;
#[bench]
fn alloc_small(b: &mut Bencher) {
b.iter(|| {
for _ in 0..1024 {
test::black_box(BytesMut::with_capacity(12));
}
})
}
#[bench]
fn alloc_mid(b: &mut Bencher) {
b.iter(|| {
test::black_box(BytesMut::with_capacity(128));
})
}
#[bench]
fn alloc_big(b: &mut Bencher) {
b.iter(|| {
test::black_box(BytesMut::with_capacity(4096));
})
}
#[bench]
fn deref_unique(b: &mut Bencher) {
let mut buf = BytesMut::with_capacity(4096);
buf.put(&[0u8; 1024][..]);
b.iter(|| {
for _ in 0..1024 {
test::black_box(&buf[..]);
}
})
}
#[bench]
fn deref_unique_unroll(b: &mut Bencher) {
let mut buf = BytesMut::with_capacity(4096);
buf.put(&[0u8; 1024][..]);
b.iter(|| {
for _ in 0..128 {
test::black_box(&buf[..]);
test::black_box(&buf[..]);
test::black_box(&buf[..]);
test::black_box(&buf[..]);
test::black_box(&buf[..]);
test::black_box(&buf[..]);
test::black_box(&buf[..]);
test::black_box(&buf[..]);
}
})
}
#[bench]
fn deref_shared(b: &mut Bencher) {
let mut buf = BytesMut::with_capacity(4096);
buf.put(&[0u8; 1024][..]);
let _b2 = buf.split_off(1024);
b.iter(|| {
for _ in 0..1024 {
test::black_box(&buf[..]);
}
})
}
#[bench]
fn deref_two(b: &mut Bencher) {
let mut buf1 = BytesMut::with_capacity(8);
buf1.put(&[0u8; 8][..]);
let mut buf2 = BytesMut::with_capacity(4096);
buf2.put(&[0u8; 1024][..]);
b.iter(|| {
for _ in 0..512 {
test::black_box(&buf1[..]);
test::black_box(&buf2[..]);
}
})
}
#[bench]
fn clone_frozen(b: &mut Bencher) {
let bytes = BytesMut::from(&b"hello world 1234567890 and have a good byte 0987654321"[..])
.split()
.freeze();
b.iter(|| {
for _ in 0..1024 {
test::black_box(&bytes.clone());
}
})
}
#[bench]
fn alloc_write_split_to_mid(b: &mut Bencher) {
b.iter(|| {
let mut buf = BytesMut::with_capacity(128);
buf.put_slice(&[0u8; 64]);
test::black_box(buf.split_to(64));
})
}
#[bench]
fn drain_write_drain(b: &mut Bencher) {
let data = [0u8; 128];
b.iter(|| {
let mut buf = BytesMut::with_capacity(1024);
let mut parts = Vec::with_capacity(8);
for _ in 0..8 {
buf.put(&data[..]);
parts.push(buf.split_to(128));
}
test::black_box(parts);
})
}
#[bench]
fn fmt_write(b: &mut Bencher) {
use std::fmt::Write;
let mut buf = BytesMut::with_capacity(128);
let s = "foo bar baz quux lorem ipsum dolor et";
b.bytes = s.len() as u64;
b.iter(|| {
let _ = write!(buf, "{}", s);
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
})
}
#[bench]
fn bytes_mut_extend(b: &mut Bencher) {
let mut buf = BytesMut::with_capacity(256);
let data = [33u8; 32];
b.bytes = data.len() as u64 * 4;
b.iter(|| {
for _ in 0..4 {
buf.extend(&data);
}
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
});
}
// BufMut for BytesMut vs Vec<u8>
#[bench]
fn put_slice_bytes_mut(b: &mut Bencher) {
let mut buf = BytesMut::with_capacity(256);
let data = [33u8; 32];
b.bytes = data.len() as u64 * 4;
b.iter(|| {
for _ in 0..4 {
buf.put_slice(&data);
}
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
});
}
#[bench]
fn put_u8_bytes_mut(b: &mut Bencher) {
let mut buf = BytesMut::with_capacity(256);
let cnt = 128;
b.bytes = cnt as u64;
b.iter(|| {
for _ in 0..cnt {
buf.put_u8(b'x');
}
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
});
}
#[bench]
fn put_slice_vec(b: &mut Bencher) {
let mut buf = Vec::<u8>::with_capacity(256);
let data = [33u8; 32];
b.bytes = data.len() as u64 * 4;
b.iter(|| {
for _ in 0..4 {
buf.put_slice(&data);
}
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
});
}
#[bench]
fn put_u8_vec(b: &mut Bencher) {
let mut buf = Vec::<u8>::with_capacity(256);
let cnt = 128;
b.bytes = cnt as u64;
b.iter(|| {
for _ in 0..cnt {
buf.put_u8(b'x');
}
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
});
}
#[bench]
fn put_slice_vec_extend(b: &mut Bencher) {
let mut buf = Vec::<u8>::with_capacity(256);
let data = [33u8; 32];
b.bytes = data.len() as u64 * 4;
b.iter(|| {
for _ in 0..4 {
buf.extend_from_slice(&data);
}
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
});
}
#[bench]
fn put_u8_vec_push(b: &mut Bencher) {
let mut buf = Vec::<u8>::with_capacity(256);
let cnt = 128;
b.bytes = cnt as u64;
b.iter(|| {
for _ in 0..cnt {
buf.push(b'x');
}
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
});
}

27
third_party/rust/bytes-0.5.6/ci/test-stable.sh поставляемый Normal file
Просмотреть файл

@ -0,0 +1,27 @@
#!/bin/bash
set -ex
cmd="${1:-test}"
# Install cargo-hack for feature flag test
cargo install cargo-hack
# Run with each feature
# * --each-feature includes both default/no-default features
# * --optional-deps is needed for serde feature
cargo hack "${cmd}" --each-feature --optional-deps
# Run with all features
cargo "${cmd}" --all-features
cargo doc --no-deps --all-features
if [[ "${RUST_VERSION}" == "nightly"* ]]; then
# Check benchmarks
cargo check --benches
# Check minimal versions
cargo clean
cargo update -Zminimal-versions
cargo check --all-features
fi

13
third_party/rust/bytes-0.5.6/ci/tsan.sh поставляемый Normal file
Просмотреть файл

@ -0,0 +1,13 @@
#!/bin/bash
set -ex
export ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0"
# Run address sanitizer
RUSTFLAGS="-Z sanitizer=address" \
cargo test --target x86_64-unknown-linux-gnu --test test_bytes --test test_buf --test test_buf_mut
# Run thread sanitizer
RUSTFLAGS="-Z sanitizer=thread" \
cargo -Zbuild-std test --target x86_64-unknown-linux-gnu --test test_bytes --test test_buf --test test_buf_mut

1007
third_party/rust/bytes-0.5.6/src/buf/buf_impl.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

1100
third_party/rust/bytes-0.5.6/src/buf/buf_mut.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

233
third_party/rust/bytes-0.5.6/src/buf/ext/chain.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,233 @@
use crate::buf::IntoIter;
use crate::{Buf, BufMut};
use core::mem::MaybeUninit;
#[cfg(feature = "std")]
use crate::buf::IoSliceMut;
#[cfg(feature = "std")]
use std::io::IoSlice;
/// A `Chain` sequences two buffers.
///
/// `Chain` is an adapter that links two underlying buffers and provides a
/// continuous view across both buffers. It is able to sequence either immutable
/// buffers ([`Buf`] values) or mutable buffers ([`BufMut`] values).
///
/// This struct is generally created by calling [`Buf::chain`]. Please see that
/// function's documentation for more detail.
///
/// # Examples
///
/// ```
/// use bytes::{Bytes, Buf, buf::BufExt};
///
/// let mut buf = (&b"hello "[..])
/// .chain(&b"world"[..]);
///
/// let full: Bytes = buf.to_bytes();
/// assert_eq!(full[..], b"hello world"[..]);
/// ```
///
/// [`Buf::chain`]: trait.Buf.html#method.chain
/// [`Buf`]: trait.Buf.html
/// [`BufMut`]: trait.BufMut.html
#[derive(Debug)]
pub struct Chain<T, U> {
a: T,
b: U,
}
impl<T, U> Chain<T, U> {
/// Creates a new `Chain` sequencing the provided values.
pub fn new(a: T, b: U) -> Chain<T, U> {
Chain { a, b }
}
/// Gets a reference to the first underlying `Buf`.
///
/// # Examples
///
/// ```
/// use bytes::buf::BufExt;
///
/// let buf = (&b"hello"[..])
/// .chain(&b"world"[..]);
///
/// assert_eq!(buf.first_ref()[..], b"hello"[..]);
/// ```
pub fn first_ref(&self) -> &T {
&self.a
}
/// Gets a mutable reference to the first underlying `Buf`.
///
/// # Examples
///
/// ```
/// use bytes::{Buf, buf::BufExt};
///
/// let mut buf = (&b"hello"[..])
/// .chain(&b"world"[..]);
///
/// buf.first_mut().advance(1);
///
/// let full = buf.to_bytes();
/// assert_eq!(full, b"elloworld"[..]);
/// ```
pub fn first_mut(&mut self) -> &mut T {
&mut self.a
}
/// Gets a reference to the last underlying `Buf`.
///
/// # Examples
///
/// ```
/// use bytes::buf::BufExt;
///
/// let buf = (&b"hello"[..])
/// .chain(&b"world"[..]);
///
/// assert_eq!(buf.last_ref()[..], b"world"[..]);
/// ```
pub fn last_ref(&self) -> &U {
&self.b
}
/// Gets a mutable reference to the last underlying `Buf`.
///
/// # Examples
///
/// ```
/// use bytes::{Buf, buf::BufExt};
///
/// let mut buf = (&b"hello "[..])
/// .chain(&b"world"[..]);
///
/// buf.last_mut().advance(1);
///
/// let full = buf.to_bytes();
/// assert_eq!(full, b"hello orld"[..]);
/// ```
pub fn last_mut(&mut self) -> &mut U {
&mut self.b
}
/// Consumes this `Chain`, returning the underlying values.
///
/// # Examples
///
/// ```
/// use bytes::buf::BufExt;
///
/// let chain = (&b"hello"[..])
/// .chain(&b"world"[..]);
///
/// let (first, last) = chain.into_inner();
/// assert_eq!(first[..], b"hello"[..]);
/// assert_eq!(last[..], b"world"[..]);
/// ```
pub fn into_inner(self) -> (T, U) {
(self.a, self.b)
}
}
impl<T, U> Buf for Chain<T, U>
where
T: Buf,
U: Buf,
{
fn remaining(&self) -> usize {
self.a.remaining() + self.b.remaining()
}
fn bytes(&self) -> &[u8] {
if self.a.has_remaining() {
self.a.bytes()
} else {
self.b.bytes()
}
}
fn advance(&mut self, mut cnt: usize) {
let a_rem = self.a.remaining();
if a_rem != 0 {
if a_rem >= cnt {
self.a.advance(cnt);
return;
}
// Consume what is left of a
self.a.advance(a_rem);
cnt -= a_rem;
}
self.b.advance(cnt);
}
#[cfg(feature = "std")]
fn bytes_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
let mut n = self.a.bytes_vectored(dst);
n += self.b.bytes_vectored(&mut dst[n..]);
n
}
}
impl<T, U> BufMut for Chain<T, U>
where
T: BufMut,
U: BufMut,
{
fn remaining_mut(&self) -> usize {
self.a.remaining_mut() + self.b.remaining_mut()
}
fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] {
if self.a.has_remaining_mut() {
self.a.bytes_mut()
} else {
self.b.bytes_mut()
}
}
unsafe fn advance_mut(&mut self, mut cnt: usize) {
let a_rem = self.a.remaining_mut();
if a_rem != 0 {
if a_rem >= cnt {
self.a.advance_mut(cnt);
return;
}
// Consume what is left of a
self.a.advance_mut(a_rem);
cnt -= a_rem;
}
self.b.advance_mut(cnt);
}
#[cfg(feature = "std")]
fn bytes_vectored_mut<'a>(&'a mut self, dst: &mut [IoSliceMut<'a>]) -> usize {
let mut n = self.a.bytes_vectored_mut(dst);
n += self.b.bytes_vectored_mut(&mut dst[n..]);
n
}
}
impl<T, U> IntoIterator for Chain<T, U>
where
T: Buf,
U: Buf,
{
type Item = u8;
type IntoIter = IntoIter<Chain<T, U>>;
fn into_iter(self) -> Self::IntoIter {
IntoIter::new(self)
}
}

74
third_party/rust/bytes-0.5.6/src/buf/ext/limit.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,74 @@
use crate::BufMut;
use core::{cmp, mem::MaybeUninit};
/// A `BufMut` adapter which limits the amount of bytes that can be written
/// to an underlying buffer.
#[derive(Debug)]
pub struct Limit<T> {
inner: T,
limit: usize,
}
pub(super) fn new<T>(inner: T, limit: usize) -> Limit<T> {
Limit { inner, limit }
}
impl<T> Limit<T> {
/// Consumes this `Limit`, returning the underlying value.
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying `BufMut`.
///
/// It is inadvisable to directly write to the underlying `BufMut`.
pub fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying `BufMut`.
///
/// It is inadvisable to directly write to the underlying `BufMut`.
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Returns the maximum number of bytes that can be written
///
/// # Note
///
/// If the inner `BufMut` has fewer bytes than indicated by this method then
/// that is the actual number of available bytes.
pub fn limit(&self) -> usize {
self.limit
}
/// Sets the maximum number of bytes that can be written.
///
/// # Note
///
/// If the inner `BufMut` has fewer bytes than `lim` then that is the actual
/// number of available bytes.
pub fn set_limit(&mut self, lim: usize) {
self.limit = lim
}
}
impl<T: BufMut> BufMut for Limit<T> {
fn remaining_mut(&self) -> usize {
cmp::min(self.inner.remaining_mut(), self.limit)
}
fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] {
let bytes = self.inner.bytes_mut();
let end = cmp::min(bytes.len(), self.limit);
&mut bytes[..end]
}
unsafe fn advance_mut(&mut self, cnt: usize) {
assert!(cnt <= self.limit);
self.inner.advance_mut(cnt);
self.limit -= cnt;
}
}

186
third_party/rust/bytes-0.5.6/src/buf/ext/mod.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,186 @@
//! Extra utilities for `Buf` and `BufMut` types.
use super::{Buf, BufMut};
mod chain;
mod limit;
#[cfg(feature = "std")]
mod reader;
mod take;
#[cfg(feature = "std")]
mod writer;
pub use self::chain::Chain;
pub use self::limit::Limit;
pub use self::take::Take;
#[cfg(feature = "std")]
pub use self::{reader::Reader, writer::Writer};
/// Extra methods for implementations of `Buf`.
pub trait BufExt: Buf {
/// Creates an adaptor which will read at most `limit` bytes from `self`.
///
/// This function returns a new instance of `Buf` which will read at most
/// `limit` bytes.
///
/// # Examples
///
/// ```
/// use bytes::{BufMut, buf::BufExt};
///
/// let mut buf = b"hello world"[..].take(5);
/// let mut dst = vec![];
///
/// dst.put(&mut buf);
/// assert_eq!(dst, b"hello");
///
/// let mut buf = buf.into_inner();
/// dst.clear();
/// dst.put(&mut buf);
/// assert_eq!(dst, b" world");
/// ```
fn take(self, limit: usize) -> Take<Self>
where
Self: Sized,
{
take::new(self, limit)
}
/// Creates an adaptor which will chain this buffer with another.
///
/// The returned `Buf` instance will first consume all bytes from `self`.
/// Afterwards the output is equivalent to the output of next.
///
/// # Examples
///
/// ```
/// use bytes::{Buf, buf::BufExt};
///
/// let mut chain = b"hello "[..].chain(&b"world"[..]);
///
/// let full = chain.to_bytes();
/// assert_eq!(full.bytes(), b"hello world");
/// ```
fn chain<U: Buf>(self, next: U) -> Chain<Self, U>
where
Self: Sized,
{
Chain::new(self, next)
}
/// Creates an adaptor which implements the `Read` trait for `self`.
///
/// This function returns a new value which implements `Read` by adapting
/// the `Read` trait functions to the `Buf` trait functions. Given that
/// `Buf` operations are infallible, none of the `Read` functions will
/// return with `Err`.
///
/// # Examples
///
/// ```
/// use bytes::{Bytes, buf::BufExt};
/// use std::io::Read;
///
/// let buf = Bytes::from("hello world");
///
/// let mut reader = buf.reader();
/// let mut dst = [0; 1024];
///
/// let num = reader.read(&mut dst).unwrap();
///
/// assert_eq!(11, num);
/// assert_eq!(&dst[..11], &b"hello world"[..]);
/// ```
#[cfg(feature = "std")]
fn reader(self) -> Reader<Self>
where
Self: Sized,
{
reader::new(self)
}
}
impl<B: Buf + ?Sized> BufExt for B {}
/// Extra methods for implementations of `BufMut`.
pub trait BufMutExt: BufMut {
/// Creates an adaptor which can write at most `limit` bytes to `self`.
///
/// # Examples
///
/// ```
/// use bytes::{BufMut, buf::BufMutExt};
///
/// let arr = &mut [0u8; 128][..];
/// assert_eq!(arr.remaining_mut(), 128);
///
/// let dst = arr.limit(10);
/// assert_eq!(dst.remaining_mut(), 10);
/// ```
fn limit(self, limit: usize) -> Limit<Self>
where
Self: Sized,
{
limit::new(self, limit)
}
/// Creates an adaptor which implements the `Write` trait for `self`.
///
/// This function returns a new value which implements `Write` by adapting
/// the `Write` trait functions to the `BufMut` trait functions. Given that
/// `BufMut` operations are infallible, none of the `Write` functions will
/// return with `Err`.
///
/// # Examples
///
/// ```
/// use bytes::buf::BufMutExt;
/// use std::io::Write;
///
/// let mut buf = vec![].writer();
///
/// let num = buf.write(&b"hello world"[..]).unwrap();
/// assert_eq!(11, num);
///
/// let buf = buf.into_inner();
///
/// assert_eq!(*buf, b"hello world"[..]);
/// ```
#[cfg(feature = "std")]
fn writer(self) -> Writer<Self>
where
Self: Sized,
{
writer::new(self)
}
/// Creates an adapter which will chain this buffer with another.
///
/// The returned `BufMut` instance will first write to all bytes from
/// `self`. Afterwards, it will write to `next`.
///
/// # Examples
///
/// ```
/// use bytes::{BufMut, buf::BufMutExt};
///
/// let mut a = [0u8; 5];
/// let mut b = [0u8; 6];
///
/// let mut chain = (&mut a[..]).chain_mut(&mut b[..]);
///
/// chain.put_slice(b"hello world");
///
/// assert_eq!(&a[..], b"hello");
/// assert_eq!(&b[..], b" world");
/// ```
fn chain_mut<U: BufMut>(self, next: U) -> Chain<Self, U>
where
Self: Sized,
{
Chain::new(self, next)
}
}
impl<B: BufMut + ?Sized> BufMutExt for B {}

81
third_party/rust/bytes-0.5.6/src/buf/ext/reader.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,81 @@
use crate::Buf;
use std::{cmp, io};
/// A `Buf` adapter which implements `io::Read` for the inner value.
///
/// This struct is generally created by calling `reader()` on `Buf`. See
/// documentation of [`reader()`](trait.Buf.html#method.reader) for more
/// details.
#[derive(Debug)]
pub struct Reader<B> {
buf: B,
}
pub fn new<B>(buf: B) -> Reader<B> {
Reader { buf }
}
impl<B: Buf> Reader<B> {
/// Gets a reference to the underlying `Buf`.
///
/// It is inadvisable to directly read from the underlying `Buf`.
///
/// # Examples
///
/// ```rust
/// use bytes::buf::BufExt;
///
/// let buf = b"hello world".reader();
///
/// assert_eq!(b"hello world", buf.get_ref());
/// ```
pub fn get_ref(&self) -> &B {
&self.buf
}
/// Gets a mutable reference to the underlying `Buf`.
///
/// It is inadvisable to directly read from the underlying `Buf`.
pub fn get_mut(&mut self) -> &mut B {
&mut self.buf
}
/// Consumes this `Reader`, returning the underlying value.
///
/// # Examples
///
/// ```rust
/// use bytes::{Buf, buf::BufExt};
/// use std::io;
///
/// let mut buf = b"hello world".reader();
/// let mut dst = vec![];
///
/// io::copy(&mut buf, &mut dst).unwrap();
///
/// let buf = buf.into_inner();
/// assert_eq!(0, buf.remaining());
/// ```
pub fn into_inner(self) -> B {
self.buf
}
}
impl<B: Buf + Sized> io::Read for Reader<B> {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
let len = cmp::min(self.buf.remaining(), dst.len());
Buf::copy_to_slice(&mut self.buf, &mut dst[0..len]);
Ok(len)
}
}
impl<B: Buf + Sized> io::BufRead for Reader<B> {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
Ok(self.buf.bytes())
}
fn consume(&mut self, amt: usize) {
self.buf.advance(amt)
}
}

147
third_party/rust/bytes-0.5.6/src/buf/ext/take.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,147 @@
use crate::Buf;
use core::cmp;
/// A `Buf` adapter which limits the bytes read from an underlying buffer.
///
/// This struct is generally created by calling `take()` on `Buf`. See
/// documentation of [`take()`](trait.BufExt.html#method.take) for more details.
#[derive(Debug)]
pub struct Take<T> {
inner: T,
limit: usize,
}
pub fn new<T>(inner: T, limit: usize) -> Take<T> {
Take { inner, limit }
}
impl<T> Take<T> {
/// Consumes this `Take`, returning the underlying value.
///
/// # Examples
///
/// ```rust
/// use bytes::buf::{BufMut, BufExt};
///
/// let mut buf = b"hello world".take(2);
/// let mut dst = vec![];
///
/// dst.put(&mut buf);
/// assert_eq!(*dst, b"he"[..]);
///
/// let mut buf = buf.into_inner();
///
/// dst.clear();
/// dst.put(&mut buf);
/// assert_eq!(*dst, b"llo world"[..]);
/// ```
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying `Buf`.
///
/// It is inadvisable to directly read from the underlying `Buf`.
///
/// # Examples
///
/// ```rust
/// use bytes::{Buf, buf::BufExt};
///
/// let buf = b"hello world".take(2);
///
/// assert_eq!(11, buf.get_ref().remaining());
/// ```
pub fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying `Buf`.
///
/// It is inadvisable to directly read from the underlying `Buf`.
///
/// # Examples
///
/// ```rust
/// use bytes::{Buf, BufMut, buf::BufExt};
///
/// let mut buf = b"hello world".take(2);
/// let mut dst = vec![];
///
/// buf.get_mut().advance(2);
///
/// dst.put(&mut buf);
/// assert_eq!(*dst, b"ll"[..]);
/// ```
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Returns the maximum number of bytes that can be read.
///
/// # Note
///
/// If the inner `Buf` has fewer bytes than indicated by this method then
/// that is the actual number of available bytes.
///
/// # Examples
///
/// ```rust
/// use bytes::{Buf, buf::BufExt};
///
/// let mut buf = b"hello world".take(2);
///
/// assert_eq!(2, buf.limit());
/// assert_eq!(b'h', buf.get_u8());
/// assert_eq!(1, buf.limit());
/// ```
pub fn limit(&self) -> usize {
self.limit
}
/// Sets the maximum number of bytes that can be read.
///
/// # Note
///
/// If the inner `Buf` has fewer bytes than `lim` then that is the actual
/// number of available bytes.
///
/// # Examples
///
/// ```rust
/// use bytes::{BufMut, buf::BufExt};
///
/// let mut buf = b"hello world".take(2);
/// let mut dst = vec![];
///
/// dst.put(&mut buf);
/// assert_eq!(*dst, b"he"[..]);
///
/// dst.clear();
///
/// buf.set_limit(3);
/// dst.put(&mut buf);
/// assert_eq!(*dst, b"llo"[..]);
/// ```
pub fn set_limit(&mut self, lim: usize) {
self.limit = lim
}
}
impl<T: Buf> Buf for Take<T> {
fn remaining(&self) -> usize {
cmp::min(self.inner.remaining(), self.limit)
}
fn bytes(&self) -> &[u8] {
let bytes = self.inner.bytes();
&bytes[..cmp::min(bytes.len(), self.limit)]
}
fn advance(&mut self, cnt: usize) {
assert!(cnt <= self.limit);
self.inner.advance(cnt);
self.limit -= cnt;
}
}

88
third_party/rust/bytes-0.5.6/src/buf/ext/writer.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,88 @@
use crate::BufMut;
use std::{cmp, io};
/// A `BufMut` adapter which implements `io::Write` for the inner value.
///
/// This struct is generally created by calling `writer()` on `BufMut`. See
/// documentation of [`writer()`](trait.BufMut.html#method.writer) for more
/// details.
#[derive(Debug)]
pub struct Writer<B> {
buf: B,
}
pub fn new<B>(buf: B) -> Writer<B> {
Writer { buf }
}
impl<B: BufMut> Writer<B> {
/// Gets a reference to the underlying `BufMut`.
///
/// It is inadvisable to directly write to the underlying `BufMut`.
///
/// # Examples
///
/// ```rust
/// use bytes::buf::BufMutExt;
///
/// let buf = Vec::with_capacity(1024).writer();
///
/// assert_eq!(1024, buf.get_ref().capacity());
/// ```
pub fn get_ref(&self) -> &B {
&self.buf
}
/// Gets a mutable reference to the underlying `BufMut`.
///
/// It is inadvisable to directly write to the underlying `BufMut`.
///
/// # Examples
///
/// ```rust
/// use bytes::buf::BufMutExt;
///
/// let mut buf = vec![].writer();
///
/// buf.get_mut().reserve(1024);
///
/// assert_eq!(1024, buf.get_ref().capacity());
/// ```
pub fn get_mut(&mut self) -> &mut B {
&mut self.buf
}
/// Consumes this `Writer`, returning the underlying value.
///
/// # Examples
///
/// ```rust
/// use bytes::buf::BufMutExt;
/// use std::io;
///
/// let mut buf = vec![].writer();
/// let mut src = &b"hello world"[..];
///
/// io::copy(&mut src, &mut buf).unwrap();
///
/// let buf = buf.into_inner();
/// assert_eq!(*buf, b"hello world"[..]);
/// ```
pub fn into_inner(self) -> B {
self.buf
}
}
impl<B: BufMut + Sized> io::Write for Writer<B> {
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
let n = cmp::min(self.buf.remaining_mut(), src.len());
self.buf.put(&src[0..n]);
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}

133
third_party/rust/bytes-0.5.6/src/buf/iter.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,133 @@
use crate::Buf;
/// Iterator over the bytes contained by the buffer.
///
/// This struct is created by the [`iter`] method on [`Buf`].
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use bytes::Bytes;
///
/// let buf = Bytes::from(&b"abc"[..]);
/// let mut iter = buf.into_iter();
///
/// assert_eq!(iter.next(), Some(b'a'));
/// assert_eq!(iter.next(), Some(b'b'));
/// assert_eq!(iter.next(), Some(b'c'));
/// assert_eq!(iter.next(), None);
/// ```
///
/// [`iter`]: trait.Buf.html#method.iter
/// [`Buf`]: trait.Buf.html
#[derive(Debug)]
pub struct IntoIter<T> {
inner: T,
}
impl<T> IntoIter<T> {
/// Creates an iterator over the bytes contained by the buffer.
///
/// # Examples
///
/// ```
/// use bytes::Bytes;
/// use bytes::buf::IntoIter;
///
/// let buf = Bytes::from_static(b"abc");
/// let mut iter = IntoIter::new(buf);
///
/// assert_eq!(iter.next(), Some(b'a'));
/// assert_eq!(iter.next(), Some(b'b'));
/// assert_eq!(iter.next(), Some(b'c'));
/// assert_eq!(iter.next(), None);
/// ```
pub fn new(inner: T) -> IntoIter<T> {
IntoIter { inner }
}
/// Consumes this `IntoIter`, returning the underlying value.
///
/// # Examples
///
/// ```rust
/// use bytes::{Buf, Bytes};
///
/// let buf = Bytes::from(&b"abc"[..]);
/// let mut iter = buf.into_iter();
///
/// assert_eq!(iter.next(), Some(b'a'));
///
/// let buf = iter.into_inner();
/// assert_eq!(2, buf.remaining());
/// ```
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying `Buf`.
///
/// It is inadvisable to directly read from the underlying `Buf`.
///
/// # Examples
///
/// ```rust
/// use bytes::{Buf, Bytes};
///
/// let buf = Bytes::from(&b"abc"[..]);
/// let mut iter = buf.into_iter();
///
/// assert_eq!(iter.next(), Some(b'a'));
///
/// assert_eq!(2, iter.get_ref().remaining());
/// ```
pub fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying `Buf`.
///
/// It is inadvisable to directly read from the underlying `Buf`.
///
/// # Examples
///
/// ```rust
/// use bytes::{Buf, BytesMut};
///
/// let buf = BytesMut::from(&b"abc"[..]);
/// let mut iter = buf.into_iter();
///
/// assert_eq!(iter.next(), Some(b'a'));
///
/// iter.get_mut().advance(1);
///
/// assert_eq!(iter.next(), Some(b'c'));
/// ```
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
}
impl<T: Buf> Iterator for IntoIter<T> {
type Item = u8;
fn next(&mut self) -> Option<u8> {
if !self.inner.has_remaining() {
return None;
}
let b = self.inner.bytes()[0];
self.inner.advance(1);
Some(b)
}
fn size_hint(&self) -> (usize, Option<usize>) {
let rem = self.inner.remaining();
(rem, Some(rem))
}
}
impl<T: Buf> ExactSizeIterator for IntoIter<T> {}

30
third_party/rust/bytes-0.5.6/src/buf/mod.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,30 @@
//! Utilities for working with buffers.
//!
//! A buffer is any structure that contains a sequence of bytes. The bytes may
//! or may not be stored in contiguous memory. This module contains traits used
//! to abstract over buffers as well as utilities for working with buffer types.
//!
//! # `Buf`, `BufMut`
//!
//! These are the two foundational traits for abstractly working with buffers.
//! They can be thought as iterators for byte structures. They offer additional
//! performance over `Iterator` by providing an API optimized for byte slices.
//!
//! See [`Buf`] and [`BufMut`] for more details.
//!
//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure)
//! [`Buf`]: trait.Buf.html
//! [`BufMut`]: trait.BufMut.html
mod buf_impl;
mod buf_mut;
pub mod ext;
mod iter;
mod vec_deque;
pub use self::buf_impl::Buf;
pub use self::buf_mut::BufMut;
#[cfg(feature = "std")]
pub use self::buf_mut::IoSliceMut;
pub use self::ext::{BufExt, BufMutExt};
pub use self::iter::IntoIter;

22
third_party/rust/bytes-0.5.6/src/buf/vec_deque.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,22 @@
use alloc::collections::VecDeque;
use super::Buf;
impl Buf for VecDeque<u8> {
fn remaining(&self) -> usize {
self.len()
}
fn bytes(&self) -> &[u8] {
let (s1, s2) = self.as_slices();
if s1.is_empty() {
s2
} else {
s1
}
}
fn advance(&mut self, cnt: usize) {
self.drain(..cnt);
}
}

1108
third_party/rust/bytes-0.5.6/src/bytes.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

1581
third_party/rust/bytes-0.5.6/src/bytes_mut.rs поставляемый Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

49
third_party/rust/bytes-0.5.6/src/fmt/debug.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,49 @@
use core::fmt::{Debug, Formatter, Result};
use super::BytesRef;
use crate::{Bytes, BytesMut};
/// Alternative implementation of `std::fmt::Debug` for byte slice.
///
/// Standard `Debug` implementation for `[u8]` is comma separated
/// list of numbers. Since large amount of byte strings are in fact
/// ASCII strings or contain a lot of ASCII strings (e. g. HTTP),
/// it is convenient to print strings as ASCII when possible.
impl Debug for BytesRef<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
write!(f, "b\"")?;
for &b in self.0 {
// https://doc.rust-lang.org/reference/tokens.html#byte-escapes
if b == b'\n' {
write!(f, "\\n")?;
} else if b == b'\r' {
write!(f, "\\r")?;
} else if b == b'\t' {
write!(f, "\\t")?;
} else if b == b'\\' || b == b'"' {
write!(f, "\\{}", b as char)?;
} else if b == b'\0' {
write!(f, "\\0")?;
// ASCII printable
} else if b >= 0x20 && b < 0x7f {
write!(f, "{}", b as char)?;
} else {
write!(f, "\\x{:02x}", b)?;
}
}
write!(f, "\"")?;
Ok(())
}
}
impl Debug for Bytes {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Debug::fmt(&BytesRef(&self.as_ref()), f)
}
}
impl Debug for BytesMut {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Debug::fmt(&BytesRef(&self.as_ref()), f)
}
}

37
third_party/rust/bytes-0.5.6/src/fmt/hex.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,37 @@
use core::fmt::{Formatter, LowerHex, Result, UpperHex};
use super::BytesRef;
use crate::{Bytes, BytesMut};
impl LowerHex for BytesRef<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
for &b in self.0 {
write!(f, "{:02x}", b)?;
}
Ok(())
}
}
impl UpperHex for BytesRef<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
for &b in self.0 {
write!(f, "{:02X}", b)?;
}
Ok(())
}
}
macro_rules! hex_impl {
($tr:ident, $ty:ty) => {
impl $tr for $ty {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
$tr::fmt(&BytesRef(self.as_ref()), f)
}
}
};
}
hex_impl!(LowerHex, Bytes);
hex_impl!(LowerHex, BytesMut);
hex_impl!(UpperHex, Bytes);
hex_impl!(UpperHex, BytesMut);

5
third_party/rust/bytes-0.5.6/src/fmt/mod.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,5 @@
mod debug;
mod hex;
/// `BytesRef` is not a part of public API of bytes crate.
struct BytesRef<'a>(&'a [u8]);

117
third_party/rust/bytes-0.5.6/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,117 @@
#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)]
#![doc(test(
no_crate_inject,
attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))
))]
#![doc(html_root_url = "https://docs.rs/bytes/0.5.6")]
#![no_std]
//! Provides abstractions for working with bytes.
//!
//! The `bytes` crate provides an efficient byte buffer structure
//! ([`Bytes`](struct.Bytes.html)) and traits for working with buffer
//! implementations ([`Buf`], [`BufMut`]).
//!
//! [`Buf`]: trait.Buf.html
//! [`BufMut`]: trait.BufMut.html
//!
//! # `Bytes`
//!
//! `Bytes` is an efficient container for storing and operating on contiguous
//! slices of memory. It is intended for use primarily in networking code, but
//! could have applications elsewhere as well.
//!
//! `Bytes` values facilitate zero-copy network programming by allowing multiple
//! `Bytes` objects to point to the same underlying memory. This is managed by
//! using a reference count to track when the memory is no longer needed and can
//! be freed.
//!
//! A `Bytes` handle can be created directly from an existing byte store (such as `&[u8]`
//! or `Vec<u8>`), but usually a `BytesMut` is used first and written to. For
//! example:
//!
//! ```rust
//! use bytes::{BytesMut, BufMut};
//!
//! let mut buf = BytesMut::with_capacity(1024);
//! buf.put(&b"hello world"[..]);
//! buf.put_u16(1234);
//!
//! let a = buf.split();
//! assert_eq!(a, b"hello world\x04\xD2"[..]);
//!
//! buf.put(&b"goodbye world"[..]);
//!
//! let b = buf.split();
//! assert_eq!(b, b"goodbye world"[..]);
//!
//! assert_eq!(buf.capacity(), 998);
//! ```
//!
//! In the above example, only a single buffer of 1024 is allocated. The handles
//! `a` and `b` will share the underlying buffer and maintain indices tracking
//! the view into the buffer represented by the handle.
//!
//! See the [struct docs] for more details.
//!
//! [struct docs]: struct.Bytes.html
//!
//! # `Buf`, `BufMut`
//!
//! These two traits provide read and write access to buffers. The underlying
//! storage may or may not be in contiguous memory. For example, `Bytes` is a
//! buffer that guarantees contiguous memory, but a [rope] stores the bytes in
//! disjoint chunks. `Buf` and `BufMut` maintain cursors tracking the current
//! position in the underlying byte storage. When bytes are read or written, the
//! cursor is advanced.
//!
//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure)
//!
//! ## Relation with `Read` and `Write`
//!
//! At first glance, it may seem that `Buf` and `BufMut` overlap in
//! functionality with `std::io::Read` and `std::io::Write`. However, they
//! serve different purposes. A buffer is the value that is provided as an
//! argument to `Read::read` and `Write::write`. `Read` and `Write` may then
//! perform a syscall, which has the potential of failing. Operations on `Buf`
//! and `BufMut` are infallible.
extern crate alloc;
#[cfg(feature = "std")]
extern crate std;
pub mod buf;
pub use crate::buf::{Buf, BufMut};
mod bytes;
mod bytes_mut;
mod fmt;
mod loom;
pub use crate::bytes::Bytes;
pub use crate::bytes_mut::BytesMut;
// Optional Serde support
#[cfg(feature = "serde")]
mod serde;
#[inline(never)]
#[cold]
fn abort() -> ! {
#[cfg(feature = "std")]
{
std::process::abort();
}
#[cfg(not(feature = "std"))]
{
struct Abort;
impl Drop for Abort {
fn drop(&mut self) {
panic!();
}
}
let _a = Abort;
panic!("abort");
}
}

30
third_party/rust/bytes-0.5.6/src/loom.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,30 @@
#[cfg(not(all(test, loom)))]
pub(crate) mod sync {
pub(crate) mod atomic {
pub(crate) use core::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
pub(crate) trait AtomicMut<T> {
fn with_mut<F, R>(&mut self, f: F) -> R
where
F: FnOnce(&mut *mut T) -> R;
}
impl<T> AtomicMut<T> for AtomicPtr<T> {
fn with_mut<F, R>(&mut self, f: F) -> R
where
F: FnOnce(&mut *mut T) -> R,
{
f(self.get_mut())
}
}
}
}
#[cfg(all(test, loom))]
pub(crate) mod sync {
pub(crate) mod atomic {
pub(crate) use loom::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
pub(crate) trait AtomicMut<T> {}
}
}

89
third_party/rust/bytes-0.5.6/src/serde.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,89 @@
use super::{Bytes, BytesMut};
use alloc::string::String;
use alloc::vec::Vec;
use core::{cmp, fmt};
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
macro_rules! serde_impl {
($ty:ident, $visitor_ty:ident, $from_slice:ident, $from_vec:ident) => {
impl Serialize for $ty {
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_bytes(&self)
}
}
struct $visitor_ty;
impl<'de> de::Visitor<'de> for $visitor_ty {
type Value = $ty;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("byte array")
}
#[inline]
fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error>
where
V: de::SeqAccess<'de>,
{
let len = cmp::min(seq.size_hint().unwrap_or(0), 4096);
let mut values: Vec<u8> = Vec::with_capacity(len);
while let Some(value) = seq.next_element()? {
values.push(value);
}
Ok($ty::$from_vec(values))
}
#[inline]
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok($ty::$from_slice(v))
}
#[inline]
fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok($ty::$from_vec(v))
}
#[inline]
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok($ty::$from_slice(v.as_bytes()))
}
#[inline]
fn visit_string<E>(self, v: String) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok($ty::$from_vec(v.into_bytes()))
}
}
impl<'de> Deserialize<'de> for $ty {
#[inline]
fn deserialize<D>(deserializer: D) -> Result<$ty, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_byte_buf($visitor_ty)
}
}
};
}
serde_impl!(Bytes, BytesVisitor, copy_from_slice, from);
serde_impl!(BytesMut, BytesMutVisitor, from, from_vec);

103
third_party/rust/bytes-0.5.6/tests/test_buf.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,103 @@
#![warn(rust_2018_idioms)]
use bytes::Buf;
#[cfg(feature = "std")]
use std::io::IoSlice;
#[test]
fn test_fresh_cursor_vec() {
let mut buf = &b"hello"[..];
assert_eq!(buf.remaining(), 5);
assert_eq!(buf.bytes(), b"hello");
buf.advance(2);
assert_eq!(buf.remaining(), 3);
assert_eq!(buf.bytes(), b"llo");
buf.advance(3);
assert_eq!(buf.remaining(), 0);
assert_eq!(buf.bytes(), b"");
}
#[test]
fn test_get_u8() {
let mut buf = &b"\x21zomg"[..];
assert_eq!(0x21, buf.get_u8());
}
#[test]
fn test_get_u16() {
let mut buf = &b"\x21\x54zomg"[..];
assert_eq!(0x2154, buf.get_u16());
let mut buf = &b"\x21\x54zomg"[..];
assert_eq!(0x5421, buf.get_u16_le());
}
#[test]
#[should_panic]
fn test_get_u16_buffer_underflow() {
let mut buf = &b"\x21"[..];
buf.get_u16();
}
#[cfg(feature = "std")]
#[test]
fn test_bufs_vec() {
let buf = &b"hello world"[..];
let b1: &[u8] = &mut [];
let b2: &[u8] = &mut [];
let mut dst = [IoSlice::new(b1), IoSlice::new(b2)];
assert_eq!(1, buf.bytes_vectored(&mut dst[..]));
}
#[test]
fn test_vec_deque() {
use std::collections::VecDeque;
let mut buffer: VecDeque<u8> = VecDeque::new();
buffer.extend(b"hello world");
assert_eq!(11, buffer.remaining());
assert_eq!(b"hello world", buffer.bytes());
buffer.advance(6);
assert_eq!(b"world", buffer.bytes());
buffer.extend(b" piece");
let mut out = [0; 11];
buffer.copy_to_slice(&mut out);
assert_eq!(b"world piece", &out[..]);
}
#[test]
fn test_deref_buf_forwards() {
struct Special;
impl Buf for Special {
fn remaining(&self) -> usize {
unreachable!("remaining");
}
fn bytes(&self) -> &[u8] {
unreachable!("bytes");
}
fn advance(&mut self, _: usize) {
unreachable!("advance");
}
fn get_u8(&mut self) -> u8 {
// specialized!
b'x'
}
}
// these should all use the specialized method
assert_eq!(Special.get_u8(), b'x');
assert_eq!((&mut Special as &mut dyn Buf).get_u8(), b'x');
assert_eq!((Box::new(Special) as Box<dyn Buf>).get_u8(), b'x');
assert_eq!(Box::new(Special).get_u8(), b'x');
}

120
third_party/rust/bytes-0.5.6/tests/test_buf_mut.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,120 @@
#![warn(rust_2018_idioms)]
#[cfg(feature = "std")]
use bytes::buf::IoSliceMut;
use bytes::{BufMut, BytesMut};
use core::fmt::Write;
use core::usize;
#[test]
fn test_vec_as_mut_buf() {
let mut buf = Vec::with_capacity(64);
assert_eq!(buf.remaining_mut(), usize::MAX);
assert!(buf.bytes_mut().len() >= 64);
buf.put(&b"zomg"[..]);
assert_eq!(&buf, b"zomg");
assert_eq!(buf.remaining_mut(), usize::MAX - 4);
assert_eq!(buf.capacity(), 64);
for _ in 0..16 {
buf.put(&b"zomg"[..]);
}
assert_eq!(buf.len(), 68);
}
#[test]
fn test_put_u8() {
let mut buf = Vec::with_capacity(8);
buf.put_u8(33);
assert_eq!(b"\x21", &buf[..]);
}
#[test]
fn test_put_u16() {
let mut buf = Vec::with_capacity(8);
buf.put_u16(8532);
assert_eq!(b"\x21\x54", &buf[..]);
buf.clear();
buf.put_u16_le(8532);
assert_eq!(b"\x54\x21", &buf[..]);
}
#[test]
#[should_panic(expected = "cannot advance")]
fn test_vec_advance_mut() {
// Verify fix for #354
let mut buf = Vec::with_capacity(8);
unsafe {
buf.advance_mut(12);
}
}
#[test]
fn test_clone() {
let mut buf = BytesMut::with_capacity(100);
buf.write_str("this is a test").unwrap();
let buf2 = buf.clone();
buf.write_str(" of our emergency broadcast system").unwrap();
assert!(buf != buf2);
}
#[cfg(feature = "std")]
#[test]
fn test_bufs_vec_mut() {
let b1: &mut [u8] = &mut [];
let b2: &mut [u8] = &mut [];
let mut dst = [IoSliceMut::from(b1), IoSliceMut::from(b2)];
// with no capacity
let mut buf = BytesMut::new();
assert_eq!(buf.capacity(), 0);
assert_eq!(1, buf.bytes_vectored_mut(&mut dst[..]));
// with capacity
let mut buf = BytesMut::with_capacity(64);
assert_eq!(1, buf.bytes_vectored_mut(&mut dst[..]));
}
#[test]
fn test_mut_slice() {
let mut v = vec![0, 0, 0, 0];
let mut s = &mut v[..];
s.put_u32(42);
}
#[test]
fn test_deref_bufmut_forwards() {
struct Special;
impl BufMut for Special {
fn remaining_mut(&self) -> usize {
unreachable!("remaining_mut");
}
fn bytes_mut(&mut self) -> &mut [std::mem::MaybeUninit<u8>] {
unreachable!("bytes_mut");
}
unsafe fn advance_mut(&mut self, _: usize) {
unreachable!("advance");
}
fn put_u8(&mut self, _: u8) {
// specialized!
}
}
// these should all use the specialized method
Special.put_u8(b'x');
(&mut Special as &mut dyn BufMut).put_u8(b'x');
(Box::new(Special) as Box<dyn BufMut>).put_u8(b'x');
Box::new(Special).put_u8(b'x');
}

962
third_party/rust/bytes-0.5.6/tests/test_bytes.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,962 @@
#![warn(rust_2018_idioms)]
use bytes::{Buf, BufMut, Bytes, BytesMut};
use std::usize;
const LONG: &'static [u8] = b"mary had a little lamb, little lamb, little lamb";
const SHORT: &'static [u8] = b"hello world";
fn is_sync<T: Sync>() {}
fn is_send<T: Send>() {}
#[test]
fn test_bounds() {
is_sync::<Bytes>();
is_sync::<BytesMut>();
is_send::<Bytes>();
is_send::<BytesMut>();
}
#[test]
fn test_layout() {
use std::mem;
assert_eq!(
mem::size_of::<Bytes>(),
mem::size_of::<usize>() * 4,
"Bytes size should be 4 words",
);
assert_eq!(
mem::size_of::<BytesMut>(),
mem::size_of::<usize>() * 4,
"BytesMut should be 4 words",
);
assert_eq!(
mem::size_of::<Bytes>(),
mem::size_of::<Option<Bytes>>(),
"Bytes should be same size as Option<Bytes>",
);
assert_eq!(
mem::size_of::<BytesMut>(),
mem::size_of::<Option<BytesMut>>(),
"BytesMut should be same size as Option<BytesMut>",
);
}
#[test]
fn from_slice() {
let a = Bytes::from(&b"abcdefgh"[..]);
assert_eq!(a, b"abcdefgh"[..]);
assert_eq!(a, &b"abcdefgh"[..]);
assert_eq!(a, Vec::from(&b"abcdefgh"[..]));
assert_eq!(b"abcdefgh"[..], a);
assert_eq!(&b"abcdefgh"[..], a);
assert_eq!(Vec::from(&b"abcdefgh"[..]), a);
let a = BytesMut::from(&b"abcdefgh"[..]);
assert_eq!(a, b"abcdefgh"[..]);
assert_eq!(a, &b"abcdefgh"[..]);
assert_eq!(a, Vec::from(&b"abcdefgh"[..]));
assert_eq!(b"abcdefgh"[..], a);
assert_eq!(&b"abcdefgh"[..], a);
assert_eq!(Vec::from(&b"abcdefgh"[..]), a);
}
#[test]
fn fmt() {
let a = format!("{:?}", Bytes::from(&b"abcdefg"[..]));
let b = "b\"abcdefg\"";
assert_eq!(a, b);
let a = format!("{:?}", BytesMut::from(&b"abcdefg"[..]));
assert_eq!(a, b);
}
#[test]
fn fmt_write() {
use std::fmt::Write;
use std::iter::FromIterator;
let s = String::from_iter((0..10).map(|_| "abcdefg"));
let mut a = BytesMut::with_capacity(64);
write!(a, "{}", &s[..64]).unwrap();
assert_eq!(a, s[..64].as_bytes());
let mut b = BytesMut::with_capacity(64);
write!(b, "{}", &s[..32]).unwrap();
write!(b, "{}", &s[32..64]).unwrap();
assert_eq!(b, s[..64].as_bytes());
let mut c = BytesMut::with_capacity(64);
write!(c, "{}", s).unwrap();
assert_eq!(c, s[..].as_bytes());
}
#[test]
fn len() {
let a = Bytes::from(&b"abcdefg"[..]);
assert_eq!(a.len(), 7);
let a = BytesMut::from(&b"abcdefg"[..]);
assert_eq!(a.len(), 7);
let a = Bytes::from(&b""[..]);
assert!(a.is_empty());
let a = BytesMut::from(&b""[..]);
assert!(a.is_empty());
}
#[test]
fn index() {
let a = Bytes::from(&b"hello world"[..]);
assert_eq!(a[0..5], *b"hello");
}
#[test]
fn slice() {
let a = Bytes::from(&b"hello world"[..]);
let b = a.slice(3..5);
assert_eq!(b, b"lo"[..]);
let b = a.slice(0..0);
assert_eq!(b, b""[..]);
let b = a.slice(3..3);
assert_eq!(b, b""[..]);
let b = a.slice(a.len()..a.len());
assert_eq!(b, b""[..]);
let b = a.slice(..5);
assert_eq!(b, b"hello"[..]);
let b = a.slice(3..);
assert_eq!(b, b"lo world"[..]);
}
#[test]
#[should_panic]
fn slice_oob_1() {
let a = Bytes::from(&b"hello world"[..]);
a.slice(5..44);
}
#[test]
#[should_panic]
fn slice_oob_2() {
let a = Bytes::from(&b"hello world"[..]);
a.slice(44..49);
}
#[test]
fn split_off() {
let mut hello = Bytes::from(&b"helloworld"[..]);
let world = hello.split_off(5);
assert_eq!(hello, &b"hello"[..]);
assert_eq!(world, &b"world"[..]);
let mut hello = BytesMut::from(&b"helloworld"[..]);
let world = hello.split_off(5);
assert_eq!(hello, &b"hello"[..]);
assert_eq!(world, &b"world"[..]);
}
#[test]
#[should_panic]
fn split_off_oob() {
let mut hello = Bytes::from(&b"helloworld"[..]);
let _ = hello.split_off(44);
}
#[test]
fn split_off_uninitialized() {
let mut bytes = BytesMut::with_capacity(1024);
let other = bytes.split_off(128);
assert_eq!(bytes.len(), 0);
assert_eq!(bytes.capacity(), 128);
assert_eq!(other.len(), 0);
assert_eq!(other.capacity(), 896);
}
#[test]
fn split_off_to_loop() {
let s = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
for i in 0..(s.len() + 1) {
{
let mut bytes = Bytes::from(&s[..]);
let off = bytes.split_off(i);
assert_eq!(i, bytes.len());
let mut sum = Vec::new();
sum.extend(bytes.iter());
sum.extend(off.iter());
assert_eq!(&s[..], &sum[..]);
}
{
let mut bytes = BytesMut::from(&s[..]);
let off = bytes.split_off(i);
assert_eq!(i, bytes.len());
let mut sum = Vec::new();
sum.extend(&bytes);
sum.extend(&off);
assert_eq!(&s[..], &sum[..]);
}
{
let mut bytes = Bytes::from(&s[..]);
let off = bytes.split_to(i);
assert_eq!(i, off.len());
let mut sum = Vec::new();
sum.extend(off.iter());
sum.extend(bytes.iter());
assert_eq!(&s[..], &sum[..]);
}
{
let mut bytes = BytesMut::from(&s[..]);
let off = bytes.split_to(i);
assert_eq!(i, off.len());
let mut sum = Vec::new();
sum.extend(&off);
sum.extend(&bytes);
assert_eq!(&s[..], &sum[..]);
}
}
}
#[test]
fn split_to_1() {
// Static
let mut a = Bytes::from_static(SHORT);
let b = a.split_to(4);
assert_eq!(SHORT[4..], a);
assert_eq!(SHORT[..4], b);
// Allocated
let mut a = Bytes::copy_from_slice(LONG);
let b = a.split_to(4);
assert_eq!(LONG[4..], a);
assert_eq!(LONG[..4], b);
let mut a = Bytes::copy_from_slice(LONG);
let b = a.split_to(30);
assert_eq!(LONG[30..], a);
assert_eq!(LONG[..30], b);
}
#[test]
fn split_to_2() {
let mut a = Bytes::from(LONG);
assert_eq!(LONG, a);
let b = a.split_to(1);
assert_eq!(LONG[1..], a);
drop(b);
}
#[test]
#[should_panic]
fn split_to_oob() {
let mut hello = Bytes::from(&b"helloworld"[..]);
let _ = hello.split_to(33);
}
#[test]
#[should_panic]
fn split_to_oob_mut() {
let mut hello = BytesMut::from(&b"helloworld"[..]);
let _ = hello.split_to(33);
}
#[test]
#[should_panic]
fn split_to_uninitialized() {
let mut bytes = BytesMut::with_capacity(1024);
let _other = bytes.split_to(128);
}
#[test]
fn split_off_to_at_gt_len() {
fn make_bytes() -> Bytes {
let mut bytes = BytesMut::with_capacity(100);
bytes.put_slice(&[10, 20, 30, 40]);
bytes.freeze()
}
use std::panic;
let _ = make_bytes().split_to(4);
let _ = make_bytes().split_off(4);
assert!(panic::catch_unwind(move || {
let _ = make_bytes().split_to(5);
})
.is_err());
assert!(panic::catch_unwind(move || {
let _ = make_bytes().split_off(5);
})
.is_err());
}
#[test]
fn truncate() {
let s = &b"helloworld"[..];
let mut hello = Bytes::from(s);
hello.truncate(15);
assert_eq!(hello, s);
hello.truncate(10);
assert_eq!(hello, s);
hello.truncate(5);
assert_eq!(hello, "hello");
}
#[test]
fn freeze_clone_shared() {
let s = &b"abcdefgh"[..];
let b = BytesMut::from(s).split().freeze();
assert_eq!(b, s);
let c = b.clone();
assert_eq!(c, s);
}
#[test]
fn freeze_clone_unique() {
let s = &b"abcdefgh"[..];
let b = BytesMut::from(s).freeze();
assert_eq!(b, s);
let c = b.clone();
assert_eq!(c, s);
}
#[test]
fn freeze_after_advance() {
let s = &b"abcdefgh"[..];
let mut b = BytesMut::from(s);
b.advance(1);
assert_eq!(b, s[1..]);
let b = b.freeze();
// Verify fix for #352. Previously, freeze would ignore the start offset
// for BytesMuts in Vec mode.
assert_eq!(b, s[1..]);
}
#[test]
fn freeze_after_advance_arc() {
let s = &b"abcdefgh"[..];
let mut b = BytesMut::from(s);
// Make b Arc
let _ = b.split_to(0);
b.advance(1);
assert_eq!(b, s[1..]);
let b = b.freeze();
assert_eq!(b, s[1..]);
}
#[test]
fn freeze_after_split_to() {
let s = &b"abcdefgh"[..];
let mut b = BytesMut::from(s);
let _ = b.split_to(1);
assert_eq!(b, s[1..]);
let b = b.freeze();
assert_eq!(b, s[1..]);
}
#[test]
fn freeze_after_truncate() {
let s = &b"abcdefgh"[..];
let mut b = BytesMut::from(s);
b.truncate(7);
assert_eq!(b, s[..7]);
let b = b.freeze();
assert_eq!(b, s[..7]);
}
#[test]
fn freeze_after_truncate_arc() {
let s = &b"abcdefgh"[..];
let mut b = BytesMut::from(s);
// Make b Arc
let _ = b.split_to(0);
b.truncate(7);
assert_eq!(b, s[..7]);
let b = b.freeze();
assert_eq!(b, s[..7]);
}
#[test]
fn freeze_after_split_off() {
let s = &b"abcdefgh"[..];
let mut b = BytesMut::from(s);
let _ = b.split_off(7);
assert_eq!(b, s[..7]);
let b = b.freeze();
assert_eq!(b, s[..7]);
}
#[test]
fn fns_defined_for_bytes_mut() {
let mut bytes = BytesMut::from(&b"hello world"[..]);
bytes.as_ptr();
bytes.as_mut_ptr();
// Iterator
let v: Vec<u8> = bytes.as_ref().iter().cloned().collect();
assert_eq!(&v[..], bytes);
}
#[test]
fn reserve_convert() {
// Vec -> Vec
let mut bytes = BytesMut::from(LONG);
bytes.reserve(64);
assert_eq!(bytes.capacity(), LONG.len() + 64);
// Arc -> Vec
let mut bytes = BytesMut::from(LONG);
let a = bytes.split_to(30);
bytes.reserve(128);
assert!(bytes.capacity() >= bytes.len() + 128);
drop(a);
}
#[test]
fn reserve_growth() {
let mut bytes = BytesMut::with_capacity(64);
bytes.put("hello world".as_bytes());
let _ = bytes.split();
bytes.reserve(65);
assert_eq!(bytes.capacity(), 128);
}
#[test]
fn reserve_allocates_at_least_original_capacity() {
let mut bytes = BytesMut::with_capacity(1024);
for i in 0..1020 {
bytes.put_u8(i as u8);
}
let _other = bytes.split();
bytes.reserve(16);
assert_eq!(bytes.capacity(), 1024);
}
#[test]
fn reserve_max_original_capacity_value() {
const SIZE: usize = 128 * 1024;
let mut bytes = BytesMut::with_capacity(SIZE);
for _ in 0..SIZE {
bytes.put_u8(0u8);
}
let _other = bytes.split();
bytes.reserve(16);
assert_eq!(bytes.capacity(), 64 * 1024);
}
#[test]
fn reserve_vec_recycling() {
let mut bytes = BytesMut::with_capacity(16);
assert_eq!(bytes.capacity(), 16);
let addr = bytes.as_ptr() as usize;
bytes.put("0123456789012345".as_bytes());
assert_eq!(bytes.as_ptr() as usize, addr);
bytes.advance(10);
assert_eq!(bytes.capacity(), 6);
bytes.reserve(8);
assert_eq!(bytes.capacity(), 16);
assert_eq!(bytes.as_ptr() as usize, addr);
}
#[test]
fn reserve_in_arc_unique_does_not_overallocate() {
let mut bytes = BytesMut::with_capacity(1000);
let _ = bytes.split();
// now bytes is Arc and refcount == 1
assert_eq!(1000, bytes.capacity());
bytes.reserve(2001);
assert_eq!(2001, bytes.capacity());
}
#[test]
fn reserve_in_arc_unique_doubles() {
let mut bytes = BytesMut::with_capacity(1000);
let _ = bytes.split();
// now bytes is Arc and refcount == 1
assert_eq!(1000, bytes.capacity());
bytes.reserve(1001);
assert_eq!(2000, bytes.capacity());
}
#[test]
fn reserve_in_arc_nonunique_does_not_overallocate() {
let mut bytes = BytesMut::with_capacity(1000);
let _copy = bytes.split();
// now bytes is Arc and refcount == 2
assert_eq!(1000, bytes.capacity());
bytes.reserve(2001);
assert_eq!(2001, bytes.capacity());
}
#[test]
fn extend_mut() {
let mut bytes = BytesMut::with_capacity(0);
bytes.extend(LONG);
assert_eq!(*bytes, LONG[..]);
}
#[test]
fn extend_from_slice_mut() {
for &i in &[3, 34] {
let mut bytes = BytesMut::new();
bytes.extend_from_slice(&LONG[..i]);
bytes.extend_from_slice(&LONG[i..]);
assert_eq!(LONG[..], *bytes);
}
}
#[test]
fn extend_mut_without_size_hint() {
let mut bytes = BytesMut::with_capacity(0);
let mut long_iter = LONG.iter();
// Use iter::from_fn since it doesn't know a size_hint
bytes.extend(std::iter::from_fn(|| long_iter.next()));
assert_eq!(*bytes, LONG[..]);
}
#[test]
fn from_static() {
let mut a = Bytes::from_static(b"ab");
let b = a.split_off(1);
assert_eq!(a, b"a"[..]);
assert_eq!(b, b"b"[..]);
}
#[test]
fn advance_static() {
let mut a = Bytes::from_static(b"hello world");
a.advance(6);
assert_eq!(a, &b"world"[..]);
}
#[test]
fn advance_vec() {
let mut a = Bytes::from(b"hello world boooo yah world zomg wat wat".to_vec());
a.advance(16);
assert_eq!(a, b"o yah world zomg wat wat"[..]);
a.advance(4);
assert_eq!(a, b"h world zomg wat wat"[..]);
a.advance(6);
assert_eq!(a, b"d zomg wat wat"[..]);
}
#[test]
fn advance_bytes_mut() {
let mut a = BytesMut::from("hello world boooo yah world zomg wat wat");
a.advance(16);
assert_eq!(a, b"o yah world zomg wat wat"[..]);
a.advance(4);
assert_eq!(a, b"h world zomg wat wat"[..]);
// Reserve some space.
a.reserve(1024);
assert_eq!(a, b"h world zomg wat wat"[..]);
a.advance(6);
assert_eq!(a, b"d zomg wat wat"[..]);
}
#[test]
#[should_panic]
fn advance_past_len() {
let mut a = BytesMut::from("hello world");
a.advance(20);
}
#[test]
// Only run these tests on little endian systems. CI uses qemu for testing
// little endian... and qemu doesn't really support threading all that well.
#[cfg(target_endian = "little")]
fn stress() {
// Tests promoting a buffer from a vec -> shared in a concurrent situation
use std::sync::{Arc, Barrier};
use std::thread;
const THREADS: usize = 8;
const ITERS: usize = 1_000;
for i in 0..ITERS {
let data = [i as u8; 256];
let buf = Arc::new(Bytes::copy_from_slice(&data[..]));
let barrier = Arc::new(Barrier::new(THREADS));
let mut joins = Vec::with_capacity(THREADS);
for _ in 0..THREADS {
let c = barrier.clone();
let buf = buf.clone();
joins.push(thread::spawn(move || {
c.wait();
let buf: Bytes = (*buf).clone();
drop(buf);
}));
}
for th in joins {
th.join().unwrap();
}
assert_eq!(*buf, data[..]);
}
}
#[test]
fn partial_eq_bytesmut() {
let bytes = Bytes::from(&b"The quick red fox"[..]);
let bytesmut = BytesMut::from(&b"The quick red fox"[..]);
assert!(bytes == bytesmut);
assert!(bytesmut == bytes);
let bytes2 = Bytes::from(&b"Jumped over the lazy brown dog"[..]);
assert!(bytes2 != bytesmut);
assert!(bytesmut != bytes2);
}
/*
#[test]
fn bytes_unsplit_basic() {
let buf = Bytes::from(&b"aaabbbcccddd"[..]);
let splitted = buf.split_off(6);
assert_eq!(b"aaabbb", &buf[..]);
assert_eq!(b"cccddd", &splitted[..]);
buf.unsplit(splitted);
assert_eq!(b"aaabbbcccddd", &buf[..]);
}
#[test]
fn bytes_unsplit_empty_other() {
let buf = Bytes::from(&b"aaabbbcccddd"[..]);
// empty other
let other = Bytes::new();
buf.unsplit(other);
assert_eq!(b"aaabbbcccddd", &buf[..]);
}
#[test]
fn bytes_unsplit_empty_self() {
// empty self
let mut buf = Bytes::new();
let mut other = Bytes::with_capacity(64);
other.extend_from_slice(b"aaabbbcccddd");
buf.unsplit(other);
assert_eq!(b"aaabbbcccddd", &buf[..]);
}
#[test]
fn bytes_unsplit_arc_different() {
let mut buf = Bytes::with_capacity(64);
buf.extend_from_slice(b"aaaabbbbeeee");
buf.split_off(8); //arc
let mut buf2 = Bytes::with_capacity(64);
buf2.extend_from_slice(b"ccccddddeeee");
buf2.split_off(8); //arc
buf.unsplit(buf2);
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
}
#[test]
fn bytes_unsplit_arc_non_contiguous() {
let mut buf = Bytes::with_capacity(64);
buf.extend_from_slice(b"aaaabbbbeeeeccccdddd");
let mut buf2 = buf.split_off(8); //arc
let buf3 = buf2.split_off(4); //arc
buf.unsplit(buf3);
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
}
#[test]
fn bytes_unsplit_two_split_offs() {
let mut buf = Bytes::with_capacity(64);
buf.extend_from_slice(b"aaaabbbbccccdddd");
let mut buf2 = buf.split_off(8); //arc
let buf3 = buf2.split_off(4); //arc
buf2.unsplit(buf3);
buf.unsplit(buf2);
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
}
#[test]
fn bytes_unsplit_overlapping_references() {
let mut buf = Bytes::with_capacity(64);
buf.extend_from_slice(b"abcdefghijklmnopqrstuvwxyz");
let mut buf0010 = buf.slice(0..10);
let buf1020 = buf.slice(10..20);
let buf0515 = buf.slice(5..15);
buf0010.unsplit(buf1020);
assert_eq!(b"abcdefghijklmnopqrst", &buf0010[..]);
assert_eq!(b"fghijklmno", &buf0515[..]);
}
*/
#[test]
fn bytes_mut_unsplit_basic() {
let mut buf = BytesMut::with_capacity(64);
buf.extend_from_slice(b"aaabbbcccddd");
let splitted = buf.split_off(6);
assert_eq!(b"aaabbb", &buf[..]);
assert_eq!(b"cccddd", &splitted[..]);
buf.unsplit(splitted);
assert_eq!(b"aaabbbcccddd", &buf[..]);
}
#[test]
fn bytes_mut_unsplit_empty_other() {
let mut buf = BytesMut::with_capacity(64);
buf.extend_from_slice(b"aaabbbcccddd");
// empty other
let other = BytesMut::new();
buf.unsplit(other);
assert_eq!(b"aaabbbcccddd", &buf[..]);
}
#[test]
fn bytes_mut_unsplit_empty_self() {
// empty self
let mut buf = BytesMut::new();
let mut other = BytesMut::with_capacity(64);
other.extend_from_slice(b"aaabbbcccddd");
buf.unsplit(other);
assert_eq!(b"aaabbbcccddd", &buf[..]);
}
#[test]
fn bytes_mut_unsplit_arc_different() {
let mut buf = BytesMut::with_capacity(64);
buf.extend_from_slice(b"aaaabbbbeeee");
let _ = buf.split_off(8); //arc
let mut buf2 = BytesMut::with_capacity(64);
buf2.extend_from_slice(b"ccccddddeeee");
let _ = buf2.split_off(8); //arc
buf.unsplit(buf2);
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
}
#[test]
fn bytes_mut_unsplit_arc_non_contiguous() {
let mut buf = BytesMut::with_capacity(64);
buf.extend_from_slice(b"aaaabbbbeeeeccccdddd");
let mut buf2 = buf.split_off(8); //arc
let buf3 = buf2.split_off(4); //arc
buf.unsplit(buf3);
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
}
#[test]
fn bytes_mut_unsplit_two_split_offs() {
let mut buf = BytesMut::with_capacity(64);
buf.extend_from_slice(b"aaaabbbbccccdddd");
let mut buf2 = buf.split_off(8); //arc
let buf3 = buf2.split_off(4); //arc
buf2.unsplit(buf3);
buf.unsplit(buf2);
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
}
#[test]
fn from_iter_no_size_hint() {
use std::iter;
let mut expect = vec![];
let actual: Bytes = iter::repeat(b'x')
.scan(100, |cnt, item| {
if *cnt >= 1 {
*cnt -= 1;
expect.push(item);
Some(item)
} else {
None
}
})
.collect();
assert_eq!(&actual[..], &expect[..]);
}
fn test_slice_ref(bytes: &Bytes, start: usize, end: usize, expected: &[u8]) {
let slice = &(bytes.as_ref()[start..end]);
let sub = bytes.slice_ref(&slice);
assert_eq!(&sub[..], expected);
}
#[test]
fn slice_ref_works() {
let bytes = Bytes::from(&b"012345678"[..]);
test_slice_ref(&bytes, 0, 0, b"");
test_slice_ref(&bytes, 0, 3, b"012");
test_slice_ref(&bytes, 2, 6, b"2345");
test_slice_ref(&bytes, 7, 9, b"78");
test_slice_ref(&bytes, 9, 9, b"");
}
#[test]
fn slice_ref_empty() {
let bytes = Bytes::from(&b""[..]);
let slice = &(bytes.as_ref()[0..0]);
let sub = bytes.slice_ref(&slice);
assert_eq!(&sub[..], b"");
}
#[test]
fn slice_ref_empty_subslice() {
let bytes = Bytes::from(&b"abcde"[..]);
let subbytes = bytes.slice(0..0);
let slice = &subbytes[..];
// The `slice` object is derived from the original `bytes` object
// so `slice_ref` should work.
assert_eq!(Bytes::new(), bytes.slice_ref(slice));
}
#[test]
#[should_panic]
fn slice_ref_catches_not_a_subset() {
let bytes = Bytes::from(&b"012345678"[..]);
let slice = &b"012345"[0..4];
bytes.slice_ref(slice);
}
#[test]
fn slice_ref_not_an_empty_subset() {
let bytes = Bytes::from(&b"012345678"[..]);
let slice = &b""[0..0];
assert_eq!(Bytes::new(), bytes.slice_ref(slice));
}
#[test]
fn empty_slice_ref_not_an_empty_subset() {
let bytes = Bytes::new();
let slice = &b"some other slice"[0..0];
assert_eq!(Bytes::new(), bytes.slice_ref(slice));
}
#[test]
fn bytes_buf_mut_advance() {
let mut bytes = BytesMut::with_capacity(1024);
unsafe {
let ptr = bytes.bytes_mut().as_ptr();
assert_eq!(1024, bytes.bytes_mut().len());
bytes.advance_mut(10);
let next = bytes.bytes_mut().as_ptr();
assert_eq!(1024 - 10, bytes.bytes_mut().len());
assert_eq!(ptr.offset(10), next);
// advance to the end
bytes.advance_mut(1024 - 10);
// The buffer size is doubled
assert_eq!(1024, bytes.bytes_mut().len());
}
}
#[test]
fn bytes_buf_mut_reuse_when_fully_consumed() {
use bytes::{Buf, BytesMut};
let mut buf = BytesMut::new();
buf.reserve(8192);
buf.extend_from_slice(&[0u8; 100][..]);
let p = &buf[0] as *const u8;
buf.advance(100);
buf.reserve(8192);
buf.extend_from_slice(b" ");
assert_eq!(&buf[0] as *const u8, p);
}
#[test]
#[should_panic]
fn bytes_reserve_overflow() {
let mut bytes = BytesMut::with_capacity(1024);
bytes.put_slice(b"hello world");
bytes.reserve(usize::MAX);
}
#[test]
fn bytes_with_capacity_but_empty() {
// See https://github.com/tokio-rs/bytes/issues/340
let vec = Vec::with_capacity(1);
let _ = Bytes::from(vec);
}

67
third_party/rust/bytes-0.5.6/tests/test_bytes_odd_alloc.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,67 @@
//! Test using `Bytes` with an allocator that hands out "odd" pointers for
//! vectors (pointers where the LSB is set).
use std::alloc::{GlobalAlloc, Layout, System};
use std::ptr;
use bytes::Bytes;
#[global_allocator]
static ODD: Odd = Odd;
struct Odd;
unsafe impl GlobalAlloc for Odd {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
if layout.align() == 1 && layout.size() > 0 {
// Allocate slightly bigger so that we can offset the pointer by 1
let size = layout.size() + 1;
let new_layout = match Layout::from_size_align(size, 1) {
Ok(layout) => layout,
Err(_err) => return ptr::null_mut(),
};
let ptr = System.alloc(new_layout);
if !ptr.is_null() {
let ptr = ptr.offset(1);
ptr
} else {
ptr
}
} else {
System.alloc(layout)
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if layout.align() == 1 && layout.size() > 0 {
let size = layout.size() + 1;
let new_layout = match Layout::from_size_align(size, 1) {
Ok(layout) => layout,
Err(_err) => std::process::abort(),
};
System.dealloc(ptr.offset(-1), new_layout);
} else {
System.dealloc(ptr, layout);
}
}
}
#[test]
fn sanity_check_odd_allocator() {
let vec = vec![33u8; 1024];
let p = vec.as_ptr() as usize;
assert!(p & 0x1 == 0x1, "{:#b}", p);
}
#[test]
fn test_bytes_from_vec_drop() {
let vec = vec![33u8; 1024];
let _b = Bytes::from(vec);
}
#[test]
fn test_bytes_clone_drop() {
let vec = vec![33u8; 1024];
let b1 = Bytes::from(vec);
let _b2 = b1.clone();
}

79
third_party/rust/bytes-0.5.6/tests/test_bytes_vec_alloc.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,79 @@
use std::alloc::{GlobalAlloc, Layout, System};
use std::{mem, ptr};
use bytes::{Buf, Bytes};
#[global_allocator]
static LEDGER: Ledger = Ledger;
struct Ledger;
const USIZE_SIZE: usize = mem::size_of::<usize>();
unsafe impl GlobalAlloc for Ledger {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
if layout.align() == 1 && layout.size() > 0 {
// Allocate extra space to stash a record of
// how much space there was.
let orig_size = layout.size();
let size = orig_size + USIZE_SIZE;
let new_layout = match Layout::from_size_align(size, 1) {
Ok(layout) => layout,
Err(_err) => return ptr::null_mut(),
};
let ptr = System.alloc(new_layout);
if !ptr.is_null() {
(ptr as *mut usize).write(orig_size);
let ptr = ptr.offset(USIZE_SIZE as isize);
ptr
} else {
ptr
}
} else {
System.alloc(layout)
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if layout.align() == 1 && layout.size() > 0 {
let off_ptr = (ptr as *mut usize).offset(-1);
let orig_size = off_ptr.read();
if orig_size != layout.size() {
panic!(
"bad dealloc: alloc size was {}, dealloc size is {}",
orig_size,
layout.size()
);
}
let new_layout = match Layout::from_size_align(layout.size() + USIZE_SIZE, 1) {
Ok(layout) => layout,
Err(_err) => std::process::abort(),
};
System.dealloc(off_ptr as *mut u8, new_layout);
} else {
System.dealloc(ptr, layout);
}
}
}
#[test]
fn test_bytes_advance() {
let mut bytes = Bytes::from(vec![10, 20, 30]);
bytes.advance(1);
drop(bytes);
}
#[test]
fn test_bytes_truncate() {
let mut bytes = Bytes::from(vec![10, 20, 30]);
bytes.truncate(2);
drop(bytes);
}
#[test]
fn test_bytes_truncate_and_advance() {
let mut bytes = Bytes::from(vec![10, 20, 30]);
bytes.truncate(2);
bytes.advance(1);
drop(bytes);
}

135
third_party/rust/bytes-0.5.6/tests/test_chain.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,135 @@
#![warn(rust_2018_idioms)]
use bytes::buf::{BufExt, BufMutExt};
use bytes::{Buf, BufMut, Bytes};
#[cfg(feature = "std")]
use std::io::IoSlice;
#[test]
fn collect_two_bufs() {
let a = Bytes::from(&b"hello"[..]);
let b = Bytes::from(&b"world"[..]);
let res = a.chain(b).to_bytes();
assert_eq!(res, &b"helloworld"[..]);
}
#[test]
fn writing_chained() {
let mut a = [0u8; 64];
let mut b = [0u8; 64];
{
let mut buf = (&mut a[..]).chain_mut(&mut b[..]);
for i in 0u8..128 {
buf.put_u8(i);
}
}
for i in 0..64 {
let expect = i as u8;
assert_eq!(expect, a[i]);
assert_eq!(expect + 64, b[i]);
}
}
#[test]
fn iterating_two_bufs() {
let a = Bytes::from(&b"hello"[..]);
let b = Bytes::from(&b"world"[..]);
let res: Vec<u8> = a.chain(b).into_iter().collect();
assert_eq!(res, &b"helloworld"[..]);
}
#[cfg(feature = "std")]
#[test]
fn vectored_read() {
let a = Bytes::from(&b"hello"[..]);
let b = Bytes::from(&b"world"[..]);
let mut buf = a.chain(b);
{
let b1: &[u8] = &mut [];
let b2: &[u8] = &mut [];
let b3: &[u8] = &mut [];
let b4: &[u8] = &mut [];
let mut iovecs = [
IoSlice::new(b1),
IoSlice::new(b2),
IoSlice::new(b3),
IoSlice::new(b4),
];
assert_eq!(2, buf.bytes_vectored(&mut iovecs));
assert_eq!(iovecs[0][..], b"hello"[..]);
assert_eq!(iovecs[1][..], b"world"[..]);
assert_eq!(iovecs[2][..], b""[..]);
assert_eq!(iovecs[3][..], b""[..]);
}
buf.advance(2);
{
let b1: &[u8] = &mut [];
let b2: &[u8] = &mut [];
let b3: &[u8] = &mut [];
let b4: &[u8] = &mut [];
let mut iovecs = [
IoSlice::new(b1),
IoSlice::new(b2),
IoSlice::new(b3),
IoSlice::new(b4),
];
assert_eq!(2, buf.bytes_vectored(&mut iovecs));
assert_eq!(iovecs[0][..], b"llo"[..]);
assert_eq!(iovecs[1][..], b"world"[..]);
assert_eq!(iovecs[2][..], b""[..]);
assert_eq!(iovecs[3][..], b""[..]);
}
buf.advance(3);
{
let b1: &[u8] = &mut [];
let b2: &[u8] = &mut [];
let b3: &[u8] = &mut [];
let b4: &[u8] = &mut [];
let mut iovecs = [
IoSlice::new(b1),
IoSlice::new(b2),
IoSlice::new(b3),
IoSlice::new(b4),
];
assert_eq!(1, buf.bytes_vectored(&mut iovecs));
assert_eq!(iovecs[0][..], b"world"[..]);
assert_eq!(iovecs[1][..], b""[..]);
assert_eq!(iovecs[2][..], b""[..]);
assert_eq!(iovecs[3][..], b""[..]);
}
buf.advance(3);
{
let b1: &[u8] = &mut [];
let b2: &[u8] = &mut [];
let b3: &[u8] = &mut [];
let b4: &[u8] = &mut [];
let mut iovecs = [
IoSlice::new(b1),
IoSlice::new(b2),
IoSlice::new(b3),
IoSlice::new(b4),
];
assert_eq!(1, buf.bytes_vectored(&mut iovecs));
assert_eq!(iovecs[0][..], b"ld"[..]);
assert_eq!(iovecs[1][..], b""[..]);
assert_eq!(iovecs[2][..], b""[..]);
assert_eq!(iovecs[3][..], b""[..]);
}
}

35
third_party/rust/bytes-0.5.6/tests/test_debug.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,35 @@
#![warn(rust_2018_idioms)]
use bytes::Bytes;
#[test]
fn fmt() {
let vec: Vec<_> = (0..0x100).map(|b| b as u8).collect();
let expected = "b\"\
\\0\\x01\\x02\\x03\\x04\\x05\\x06\\x07\
\\x08\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\
\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\
\\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f\
\x20!\\\"#$%&'()*+,-./0123456789:;<=>?\
@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_\
`abcdefghijklmnopqrstuvwxyz{|}~\\x7f\
\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\
\\x88\\x89\\x8a\\x8b\\x8c\\x8d\\x8e\\x8f\
\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\
\\x98\\x99\\x9a\\x9b\\x9c\\x9d\\x9e\\x9f\
\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\
\\xa8\\xa9\\xaa\\xab\\xac\\xad\\xae\\xaf\
\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7\
\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\
\\xc0\\xc1\\xc2\\xc3\\xc4\\xc5\\xc6\\xc7\
\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\
\\xd0\\xd1\\xd2\\xd3\\xd4\\xd5\\xd6\\xd7\
\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\
\\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\
\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef\
\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\
\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd\\xfe\\xff\"";
assert_eq!(expected, format!("{:?}", Bytes::from(vec)));
}

21
third_party/rust/bytes-0.5.6/tests/test_iter.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,21 @@
#![warn(rust_2018_idioms)]
use bytes::Bytes;
#[test]
fn iter_len() {
let buf = Bytes::from_static(b"hello world");
let iter = buf.iter();
assert_eq!(iter.size_hint(), (11, Some(11)));
assert_eq!(iter.len(), 11);
}
#[test]
fn empty_iter_len() {
let buf = Bytes::from_static(b"");
let iter = buf.iter();
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.len(), 0);
}

29
third_party/rust/bytes-0.5.6/tests/test_reader.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,29 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "std")]
use std::io::{BufRead, Read};
use bytes::buf::BufExt;
#[test]
fn read() {
let buf1 = &b"hello "[..];
let buf2 = &b"world"[..];
let buf = BufExt::chain(buf1, buf2); // Disambiguate with Read::chain
let mut buffer = Vec::new();
buf.reader().read_to_end(&mut buffer).unwrap();
assert_eq!(b"hello world", &buffer[..]);
}
#[test]
fn buf_read() {
let buf1 = &b"hell"[..];
let buf2 = &b"o\nworld"[..];
let mut reader = BufExt::chain(buf1, buf2).reader();
let mut line = String::new();
reader.read_line(&mut line).unwrap();
assert_eq!("hello\n", &line);
line.clear();
reader.read_line(&mut line).unwrap();
assert_eq!("world", &line);
}

20
third_party/rust/bytes-0.5.6/tests/test_serde.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,20 @@
#![cfg(feature = "serde")]
#![warn(rust_2018_idioms)]
use serde_test::{assert_tokens, Token};
#[test]
fn test_ser_de_empty() {
let b = bytes::Bytes::new();
assert_tokens(&b, &[Token::Bytes(b"")]);
let b = bytes::BytesMut::with_capacity(0);
assert_tokens(&b, &[Token::Bytes(b"")]);
}
#[test]
fn test_ser_de() {
let b = bytes::Bytes::from(&b"bytes"[..]);
assert_tokens(&b, &[Token::Bytes(b"bytes")]);
let b = bytes::BytesMut::from(&b"bytes"[..]);
assert_tokens(&b, &[Token::Bytes(b"bytes")]);
}

12
third_party/rust/bytes-0.5.6/tests/test_take.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,12 @@
#![warn(rust_2018_idioms)]
use bytes::buf::{Buf, BufExt};
#[test]
fn long_take() {
// Tests that get a take with a size greater than the buffer length will not
// overrun the buffer. Regression test for #138.
let buf = b"hello world".take(100);
assert_eq!(11, buf.remaining());
assert_eq!(b"hello world", buf.bytes());
}

Просмотреть файл

@ -1 +1 @@
{"files":{"CHANGELOG.md":"f28b8766c2e83ce8631e17bdeaea680c3a4306cc3a9a464e75c4e144bf7b48d4","Cargo.toml":"ea7621bda57463a85c1302db729ea460b19a6a97a36d50ac058c45ef4d8ce7e7","LICENSE-APACHE":"2773e20df8f4c52a026b5b578c7f2457341f5aa3fb6612fd87e1d2e1bd8f48ad","LICENSE-MIT":"f28420c1906af38be726cd7842798f0194b27c41d6051c0d1e9ee348b8a4a0ea","README.md":"c40f9fb713d6e7d471a1725315cc45fccfb62c6690ac32095351a4722b2f0c84","build.rs":"42c12800b13ac5ad41021f04a1bdb94de9d782d853ff5f54d8734bef1491e9d1","scripts/test.sh":"a76191d56d96c32efcb6883e0983e86beb4c6842e6e5c5a8bfded4c8183ff6f6","src/builder.rs":"238884aebf7fa2e4ab940faee4a5a2f5d6add35d2214ac644c36ec0ec64d7829","src/delta.rs":"4232e3acf98a70b111329c92cd823ba0419d4a12c3894091a318ae823dfdb225","src/draft.rs":"4f39d7acf3d3e868c013bab9f97ac570b983eed94d64fd59dba39343b870e4e0","src/expiration.rs":"188be15a5dd20d3471daa483609377ab66d876eb77afad1fc44891c18f251efd","src/jar.rs":"e91ba170610cc5b73a5cce8de1f6d960b26303ee5dff49822728e86a5fc64218","src/lib.rs":"e5a4bd66cf864e529ce81b33b02fe05fa807ad9ec1b368a062a05ad131ffdbba","src/parse.rs":"b37bd10631edc373ac82d9c9e5968416b04b3b705630ea4d4f9b1d7db9e695f6","src/secure/key.rs":"a8154b55c5435acba18132b90ab47f6d8ef229eb81240180079a99ae7885d8ca","src/secure/macros.rs":"18377b3fffdb2a1ad754f98f65a69d40a31fb69185fed231a4808ed4319985e4","src/secure/mod.rs":"6b2cf8486244545d51ecc5bd880845ae3f8d681ed86592e93cc4706b34704dcc","src/secure/private.rs":"0bd1b986d41b23fbb9c38836aa957b3db1f534c41dc205085a36d38099ad2f36","src/secure/signed.rs":"2375c29ca816e093fbee1db5631630f0ea47d92b1d16db458b7be14f30765117"},"package":"94d4706de1b0fa5b132270cddffa8585166037822e260a944fe161acd137ca05"}
{"files":{"Cargo.toml":"2c65ccbf56c2640abee1927d35423ade48e20588630ac31e094d434595ee80d8","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"3fcac69759e004d729a868ed5ef248c84d86fb0f3ab5c7a93abb547b91a0eb4e","src/builder.rs":"4200963d44d1a59f1268965b77407ba977eb5a777875cb76ea927ddc829be3d8","src/delta.rs":"510fc3dbf0a70d635d0488c5a5a32a2ba8e1490ce05bee39d944ea8c02189bbc","src/draft.rs":"950b43b3f6e1c4c13b1e90220c71defe02713170807b41e5ffde9a1327688f48","src/jar.rs":"8cc6531203e2a9422bfe1b2a00aeb96beb57d4676fa147a66f28f2d7c3129b57","src/lib.rs":"6a267d63ad90998f4a463c726be6a93fc33979eb8a72bfb53cae9f5b7a13fae0","src/parse.rs":"549844993601f20f5de3f5d5f8bea0fce3fe4f09d72e343aff9e433948a4ec5c","src/secure/key.rs":"734f35ef4b0d6b63174befdcb970f0304ac63f0895871b7c2f267fefdd43b648","src/secure/macros.rs":"83d770e5c4eb7fbd3c3d86973b69042e9e2bb9fafb72a4456598e2ae78638d5f","src/secure/mod.rs":"5d7fecb62295827d474ed1ce6b7628fe93d4a09eb14babfde036d64e8e4a04f8","src/secure/private.rs":"81d782cd4fa4b1415795710ad9e2e77eca3f4326e20ef96675093db9a378da32","src/secure/signed.rs":"26c46c2d561ea14d1d8d79f85342a98b4bd749df776677dde91dd9b928e91fbe"},"package":"888604f00b3db336d2af898ec3c1d5d0ddf5e6d462220f2ededc33a87ac4bbd5"}

142
third_party/rust/cookie/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,142 +0,0 @@
# Version 0.16
## Version 0.16.0 (Dec 28, 2021)
### Breaking Changes
* The MSRV is now `1.53`, up from `1.41` in `0.15`.
* `time` has been updated to `0.3` and is reexported from the crate root.
### General Changes
* `rust-crypto` dependencies were updated to their latest versions.
# Version 0.15
## Version 0.15.1 (Jul 14, 2021)
### Changes and Fixes
* A panic that could result from non-char boundary indexing was fixed.
* Stale doc references to version `0.14` were updated.
## Version 0.15.0 (Feb 25, 2021)
### Breaking Changes
* `Cookie::force_remove()` takes `&Cookie` instead of `Cookie`.
* Child jar methods split into immutable and mutable versions
(`Cookie::{private{_mut}, signed{_mut}}`).
* `Cookie::encoded()` returns a new `Display` struct.
* Dates with year `<= 99` are handled like Chrome: range `0..=68` maps to
`2000..=2068`, `69..=99` to `1969..=1999`.
* `Cookie::{set_}expires()` operates on a new `Expiration` enum.
### New Features
* Added `Cookie::make_removal()` to manually create expired cookies.
* Added `Cookie::stripped()` display variant to print only the `name` and
`value` of a cookie.
* `Key` implements a constant-time `PartialEq`.
* Added `Key::master()` to retrieve the full 512-bit master key.
* Added `PrivateJar::decrypt()` to manually decrypt an encrypted `Cookie`.
* Added `SignedJar::verify()` to manually verify a signed `Cookie`.
* `Cookie::expires()` returns an `Option<Expiration>` to allow distinguishing
between unset and `None` expirations.
* Added `Cookie::expires_datetime()` to retrieve the expiration as an
`OffsetDateTime`.
* Added `Cookie::unset_expires()` to unset expirations.
### General Changes and Fixes
* MSRV is 1.41.
# Version 0.14
## Version 0.14.3 (Nov 5, 2020)
### Changes and Fixes
* `rust-crypto` dependencies were updated to their latest versions.
## Version 0.14.2 (Jul 22, 2020)
### Changes and Fixes
* Documentation now builds on the stable channel.
* `rust-crypto` dependencies were updated to their latest versions.
* Fixed 'interator' -> 'iterator' documentation typo.
## Version 0.14.1 (Jun 5, 2020)
### Changes and Fixes
* Updated `base64` dependency to 0.12.
* Updated minimum `time` dependency to correct version: 0.2.11.
* Added `readme` key to `Cargo.toml`, updated `license` field.
## Version 0.14.0 (May 29, 2020)
### Breaking Changes
* The `Key::from_master()` method was deprecated in favor of the more aptly
named `Key::derive_from()`.
* The deprecated `CookieJar::clear()` method was removed.
### New Features
* Added `Key::from()` to create a `Key` structure from a full-length key.
* Signed and private cookie jars can be individually enabled via the new
`signed` and `private` features, respectively.
* Key derivation via key expansion can be individually enabled via the new
`key-expansion` feature.
### General Changes and Fixes
* `ring` is no longer a dependency: `RustCrypto`-based cryptography is used in
lieu of `ring`. Prior to their inclusion here, the `hmac` and `hkdf` crates
were audited.
* Quotes, if present, are stripped from cookie values when parsing.
# Version 0.13
## Version 0.13.3 (Feb 3, 2020)
### Changes
* The `time` dependency was unpinned from `0.2.4`, allowing any `0.2.x`
version of `time` where `x >= 6`.
## Version 0.13.2 (Jan 28, 2020)
### Changes
* The `time` dependency was pinned to `0.2.4` due to upstream breaking changes
in `0.2.5`.
## Version 0.13.1 (Jan 23, 2020)
### New Features
* Added the `CookieJar::reset_delta()` method, which reverts all _delta_
changes to a `CookieJar`.
## Version 0.13.0 (Jan 21, 2020)
### Breaking Changes
* `time` was updated from 0.1 to 0.2.
* `ring` was updated from 0.14 to 0.16.
* `SameSite::None` now writes `SameSite=None` to correspond with updated
`SameSite` draft. `SameSite` can be unset by passing `None` to
`Cookie::set_same_site()`.
* `CookieBuilder` gained a lifetime: `CookieBuilder<'c>`.
### General Changes and Fixes
* Added a CHANGELOG.
* `expires`, `max_age`, `path`, and `domain` can be unset by passing `None` to
the respective `Cookie::set_{field}()` method.
* The "Expires" field is limited to a date-time of Dec 31, 9999, 23:59:59.
* The `%` character is now properly encoded and decoded.
* Constructor methods on `CookieBuilder` allow non-static lifetimes.

58
third_party/rust/cookie/Cargo.toml поставляемый
Просмотреть файл

@ -11,60 +11,30 @@
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "cookie"
version = "0.16.0"
authors = ["Sergio Benitez <sb@sergio.bz>", "Alex Crichton <alex@alexcrichton.com>"]
build = "build.rs"
description = "HTTP cookie parsing and cookie jar management. Supports signed and private\n(encrypted, authenticated) jars.\n"
version = "0.12.0"
authors = ["Alex Crichton <alex@alexcrichton.com>", "Sergio Benitez <sb@sergio.bz>"]
description = "Crate for parsing HTTP cookie headers and managing a cookie jar. Supports signed\nand private (encrypted + signed) jars.\n"
documentation = "https://docs.rs/cookie"
readme = "README.md"
license = "MIT OR Apache-2.0"
repository = "https://github.com/SergioBenitez/cookie-rs"
license = "MIT/Apache-2.0"
repository = "https://github.com/alexcrichton/cookie-rs"
[package.metadata.docs.rs]
all-features = true
[dependencies.aes-gcm]
version = "0.9.0"
optional = true
[dependencies.base64]
version = "0.13"
optional = true
[dependencies.hkdf]
version = "0.12.0"
optional = true
[dependencies.hmac]
version = "0.12.0"
optional = true
[dependencies.percent-encoding]
version = "2.0"
optional = true
[dependencies.rand]
version = "0.8"
optional = true
[dependencies.sha2]
version = "0.10.0"
optional = true
[dependencies.subtle]
version = "2.3"
[dependencies.ring]
version = "0.14.0"
optional = true
[dependencies.time]
version = "0.3"
features = ["std", "parsing", "formatting", "macros"]
default-features = false
[build-dependencies.version_check]
version = "0.9"
version = "0.1"
[dependencies.url]
version = "1.0"
optional = true
[features]
key-expansion = ["sha2", "hkdf"]
percent-encode = ["percent-encoding"]
private = ["aes-gcm", "base64", "rand", "subtle"]
secure = ["private", "signed", "key-expansion"]
signed = ["hmac", "sha2", "base64", "rand", "subtle"]
percent-encode = ["url"]
secure = ["ring", "base64"]

3
third_party/rust/cookie/LICENSE-APACHE поставляемый
Просмотреть файл

@ -186,8 +186,7 @@ APPENDIX: How to apply the Apache License to your work.
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017 Sergio Benitez
Copyright 2014 Alex Chricton
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

1
third_party/rust/cookie/LICENSE-MIT поставляемый
Просмотреть файл

@ -1,4 +1,3 @@
Copyright (c) 2017 Sergio Benitez
Copyright (c) 2014 Alex Crichton
Permission is hereby granted, free of charge, to any

15
third_party/rust/cookie/README.md поставляемый
Просмотреть файл

@ -1,10 +1,9 @@
# Cookie
# cookie-rs
[![CI Status](https://github.com/SergioBenitez/cookie-rs/workflows/CI/badge.svg)](https://github.com/SergioBenitez/cookie-rs/actions)
[![Build Status](https://travis-ci.com/SergioBenitez/cookie-rs.svg?branch=master)](https://travis-ci.com/SergioBenitez/cookie-rs)
[![Current Crates.io Version](https://img.shields.io/crates/v/cookie.svg)](https://crates.io/crates/cookie)
[![Documentation](https://docs.rs/cookie/badge.svg)](https://docs.rs/cookie)
A Rust library for parsing HTTP cookies and managing cookie jars.
A library for parsing HTTP cookies and managing cookie jars.
# Usage
@ -12,17 +11,11 @@ Add the following to your `Cargo.toml`:
```toml
[dependencies]
cookie = "0.16"
cookie = "0.12"
```
See the [documentation](http://docs.rs/cookie) for detailed usage information.
# MSRV
khe minimum supported `rustc` version for cookie `0.16` is `1.53`.
The minimum supported `rustc` version for cookie `0.15` is `1.41`.
# License
This project is licensed under either of

5
third_party/rust/cookie/build.rs поставляемый
Просмотреть файл

@ -1,5 +0,0 @@
fn main() {
if let Some(true) = version_check::is_feature_flaggable() {
println!("cargo:rustc-cfg=nightly");
}
}

19
third_party/rust/cookie/scripts/test.sh поставляемый
Просмотреть файл

@ -1,19 +0,0 @@
#!/bin/bash
set -e
cargo build --verbose
cargo test --verbose --features percent-encode
cargo test --verbose --features private
cargo test --verbose --features signed
cargo test --verbose --features secure
cargo test --verbose --features 'private,key-expansion'
cargo test --verbose --features 'signed,key-expansion'
cargo test --verbose --features 'secure,percent-encode'
cargo test --verbose
cargo test --verbose --no-default-features
cargo test --verbose --all-features
rustdoc --test README.md -L target

66
third_party/rust/cookie/src/builder.rs поставляемый
Просмотреть файл

@ -1,21 +1,25 @@
use std::borrow::Cow;
use crate::{Cookie, SameSite, Expiration};
use time::{Tm, Duration};
use ::{Cookie, SameSite};
/// Structure that follows the builder pattern for building `Cookie` structs.
///
/// To construct a cookie:
///
/// 1. Call [`Cookie::build`] to start building.
/// 1. Call [`Cookie::build`](struct.Cookie.html#method.build) to start building.
/// 2. Use any of the builder methods to set fields in the cookie.
/// 3. Call [`CookieBuilder::finish()`] to retrieve the built cookie.
/// 3. Call [finish](#method.finish) to retrieve the built cookie.
///
/// # Example
///
/// ```rust
/// # extern crate cookie;
/// extern crate time;
///
/// use cookie::Cookie;
/// use cookie::time::Duration;
/// use time::Duration;
///
/// # fn main() {
/// let cookie: Cookie = Cookie::build("name", "value")
@ -28,15 +32,16 @@ use crate::{Cookie, SameSite, Expiration};
/// # }
/// ```
#[derive(Debug, Clone)]
pub struct CookieBuilder<'c> {
pub struct CookieBuilder {
/// The cookie being built.
cookie: Cookie<'c>,
cookie: Cookie<'static>,
}
impl<'c> CookieBuilder<'c> {
impl CookieBuilder {
/// Creates a new `CookieBuilder` instance from the given name and value.
///
/// This method is typically called indirectly via [`Cookie::build()`].
/// This method is typically called indirectly via
/// [Cookie::build](struct.Cookie.html#method.build).
///
/// # Example
///
@ -46,9 +51,9 @@ impl<'c> CookieBuilder<'c> {
/// let c = Cookie::build("foo", "bar").finish();
/// assert_eq!(c.name_value(), ("foo", "bar"));
/// ```
pub fn new<N, V>(name: N, value: V) -> Self
where N: Into<Cow<'c, str>>,
V: Into<Cow<'c, str>>
pub fn new<N, V>(name: N, value: V) -> CookieBuilder
where N: Into<Cow<'static, str>>,
V: Into<Cow<'static, str>>
{
CookieBuilder { cookie: Cookie::new(name, value) }
}
@ -59,25 +64,20 @@ impl<'c> CookieBuilder<'c> {
///
/// ```rust
/// # extern crate cookie;
/// use cookie::{Cookie, Expiration};
/// use cookie::time::OffsetDateTime;
/// extern crate time;
///
/// use cookie::Cookie;
///
/// # fn main() {
/// let c = Cookie::build("foo", "bar")
/// .expires(OffsetDateTime::now_utc())
/// .expires(time::now())
/// .finish();
///
/// assert!(c.expires().is_some());
///
/// let c = Cookie::build("foo", "bar")
/// .expires(None)
/// .finish();
///
/// assert_eq!(c.expires(), Some(Expiration::Session));
/// # }
/// ```
#[inline]
pub fn expires<E: Into<Expiration>>(mut self, when: E) -> Self {
pub fn expires(mut self, when: Tm) -> CookieBuilder {
self.cookie.set_expires(when);
self
}
@ -88,8 +88,10 @@ impl<'c> CookieBuilder<'c> {
///
/// ```rust
/// # extern crate cookie;
/// extern crate time;
/// use time::Duration;
///
/// use cookie::Cookie;
/// use cookie::time::Duration;
///
/// # fn main() {
/// let c = Cookie::build("foo", "bar")
@ -100,7 +102,7 @@ impl<'c> CookieBuilder<'c> {
/// # }
/// ```
#[inline]
pub fn max_age(mut self, value: time::Duration) -> Self {
pub fn max_age(mut self, value: Duration) -> CookieBuilder {
self.cookie.set_max_age(value);
self
}
@ -118,7 +120,7 @@ impl<'c> CookieBuilder<'c> {
///
/// assert_eq!(c.domain(), Some("www.rust-lang.org"));
/// ```
pub fn domain<D: Into<Cow<'c, str>>>(mut self, value: D) -> Self {
pub fn domain<D: Into<Cow<'static, str>>>(mut self, value: D) -> CookieBuilder {
self.cookie.set_domain(value);
self
}
@ -136,7 +138,7 @@ impl<'c> CookieBuilder<'c> {
///
/// assert_eq!(c.path(), Some("/"));
/// ```
pub fn path<P: Into<Cow<'c, str>>>(mut self, path: P) -> Self {
pub fn path<P: Into<Cow<'static, str>>>(mut self, path: P) -> CookieBuilder {
self.cookie.set_path(path);
self
}
@ -155,7 +157,7 @@ impl<'c> CookieBuilder<'c> {
/// assert_eq!(c.secure(), Some(true));
/// ```
#[inline]
pub fn secure(mut self, value: bool) -> Self {
pub fn secure(mut self, value: bool) -> CookieBuilder {
self.cookie.set_secure(value);
self
}
@ -174,7 +176,7 @@ impl<'c> CookieBuilder<'c> {
/// assert_eq!(c.http_only(), Some(true));
/// ```
#[inline]
pub fn http_only(mut self, value: bool) -> Self {
pub fn http_only(mut self, value: bool) -> CookieBuilder {
self.cookie.set_http_only(value);
self
}
@ -193,7 +195,7 @@ impl<'c> CookieBuilder<'c> {
/// assert_eq!(c.same_site(), Some(SameSite::Strict));
/// ```
#[inline]
pub fn same_site(mut self, value: SameSite) -> Self {
pub fn same_site(mut self, value: SameSite) -> CookieBuilder {
self.cookie.set_same_site(value);
self
}
@ -205,8 +207,10 @@ impl<'c> CookieBuilder<'c> {
///
/// ```rust
/// # extern crate cookie;
/// extern crate time;
///
/// use cookie::Cookie;
/// use cookie::time::Duration;
/// use time::Duration;
///
/// # fn main() {
/// let c = Cookie::build("foo", "bar")
@ -218,7 +222,7 @@ impl<'c> CookieBuilder<'c> {
/// # }
/// ```
#[inline]
pub fn permanent(mut self) -> Self {
pub fn permanent(mut self) -> CookieBuilder {
self.cookie.make_permanent();
self
}
@ -240,7 +244,7 @@ impl<'c> CookieBuilder<'c> {
/// assert_eq!(c.path(), Some("/"));
/// ```
#[inline]
pub fn finish(self) -> Cookie<'c> {
pub fn finish(self) -> Cookie<'static> {
self.cookie
}
}

12
third_party/rust/cookie/src/delta.rs поставляемый
Просмотреть файл

@ -2,7 +2,7 @@ use std::ops::{Deref, DerefMut};
use std::hash::{Hash, Hasher};
use std::borrow::Borrow;
use crate::Cookie;
use Cookie;
/// A `DeltaCookie` is a helper structure used in a cookie jar. It wraps a
/// `Cookie` so that it can be hashed and compared purely by name. It further
@ -19,14 +19,20 @@ impl DeltaCookie {
/// Create a new `DeltaCookie` that is being added to a jar.
#[inline]
pub fn added(cookie: Cookie<'static>) -> DeltaCookie {
DeltaCookie { cookie, removed: false, }
DeltaCookie {
cookie: cookie,
removed: false,
}
}
/// Create a new `DeltaCookie` that is being removed from a jar. The
/// `cookie` should be a "removal" cookie.
#[inline]
pub fn removed(cookie: Cookie<'static>) -> DeltaCookie {
DeltaCookie { cookie, removed: true, }
DeltaCookie {
cookie: cookie,
removed: true,
}
}
}

22
third_party/rust/cookie/src/draft.rs поставляемый
Просмотреть файл

@ -10,28 +10,18 @@ use std::fmt;
/// attribute is "Strict", then the cookie is never sent in cross-site requests.
/// If the `SameSite` attribute is "Lax", the cookie is only sent in cross-site
/// requests with "safe" HTTP methods, i.e, `GET`, `HEAD`, `OPTIONS`, `TRACE`.
/// If the `SameSite` attribute is "None", the cookie is sent in all cross-site
/// requests if the "Secure" flag is also set, otherwise the cookie is ignored.
/// This library automatically sets the "Secure" flag on cookies when
/// `same_site` is set to `SameSite::None` as long as `secure` is not explicitly
/// set to `false`.
/// If the `SameSite` attribute is not present (made explicit via the
/// `SameSite::None` variant), then the cookie will be sent as normal.
///
/// If the `SameSite` attribute is not present (by not setting `SameSite`
/// initally or passing `None` to [`Cookie::set_same_site()`]), then the cookie
/// will be sent as normal.
///
/// **Note:** This cookie attribute is an [HTTP draft]! Its meaning and
/// definition are subject to change.
///
/// [`Cookie::set_same_site()`]: crate::Cookie::set_same_site()
/// [HTTP draft]: https://tools.ietf.org/html/draft-west-cookie-incrementalism-00
/// **Note:** This cookie attribute is an HTTP draft! Its meaning and definition
/// are subject to change.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum SameSite {
/// The "Strict" `SameSite` attribute.
Strict,
/// The "Lax" `SameSite` attribute.
Lax,
/// The "None" `SameSite` attribute.
/// No `SameSite` attribute.
None
}
@ -102,7 +92,7 @@ impl fmt::Display for SameSite {
match *self {
SameSite::Strict => write!(f, "Strict"),
SameSite::Lax => write!(f, "Lax"),
SameSite::None => write!(f, "None"),
SameSite::None => Ok(()),
}
}
}

137
third_party/rust/cookie/src/expiration.rs поставляемый
Просмотреть файл

@ -1,137 +0,0 @@
use time::OffsetDateTime;
/// A cookie's expiration: either session or a date-time.
///
/// An `Expiration` is constructible via `Expiration::from()` with an
/// `Option<OffsetDateTime>` or an `OffsetDateTime`:
///
/// * `None` -> `Expiration::Session`
/// * `Some(OffsetDateTime)` -> `Expiration::DateTime`
/// * `OffsetDateTime` -> `Expiration::DateTime`
///
/// ```rust
/// use cookie::Expiration;
/// use time::OffsetDateTime;
///
/// let expires = Expiration::from(None);
/// assert_eq!(expires, Expiration::Session);
///
/// let now = OffsetDateTime::now_utc();
/// let expires = Expiration::from(now);
/// assert_eq!(expires, Expiration::DateTime(now));
///
/// let expires = Expiration::from(Some(now));
/// assert_eq!(expires, Expiration::DateTime(now));
/// ```
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum Expiration {
/// Expiration for a "permanent" cookie at a specific date-time.
DateTime(OffsetDateTime),
/// Expiration for a "session" cookie. Browsers define the notion of a
/// "session" and will automatically expire session cookies when they deem
/// the "session" to be over. This is typically, but need not be, when the
/// browser is closed.
Session,
}
impl Expiration {
/// Returns `true` if `self` is an `Expiration::DateTime`.
///
/// # Example
///
/// ```rust
/// use cookie::Expiration;
/// use time::OffsetDateTime;
///
/// let expires = Expiration::from(None);
/// assert!(!expires.is_datetime());
///
/// let expires = Expiration::from(OffsetDateTime::now_utc());
/// assert!(expires.is_datetime());
/// ```
pub fn is_datetime(&self) -> bool {
match self {
Expiration::DateTime(_) => true,
Expiration::Session => false
}
}
/// Returns `true` if `self` is an `Expiration::Session`.
///
/// # Example
///
/// ```rust
/// use cookie::Expiration;
/// use time::OffsetDateTime;
///
/// let expires = Expiration::from(None);
/// assert!(expires.is_session());
///
/// let expires = Expiration::from(OffsetDateTime::now_utc());
/// assert!(!expires.is_session());
/// ```
pub fn is_session(&self) -> bool {
match self {
Expiration::DateTime(_) => false,
Expiration::Session => true
}
}
/// Returns the inner `OffsetDateTime` if `self` is a `DateTime`.
///
/// # Example
///
/// ```rust
/// use cookie::Expiration;
/// use time::OffsetDateTime;
///
/// let expires = Expiration::from(None);
/// assert!(expires.datetime().is_none());
///
/// let now = OffsetDateTime::now_utc();
/// let expires = Expiration::from(now);
/// assert_eq!(expires.datetime(), Some(now));
/// ```
pub fn datetime(self) -> Option<OffsetDateTime> {
match self {
Expiration::Session => None,
Expiration::DateTime(v) => Some(v)
}
}
/// Applied `f` to the inner `OffsetDateTime` if `self` is a `DateTime` and
/// returns the mapped `Expiration`.
///
/// # Example
///
/// ```rust
/// use cookie::Expiration;
/// use time::{OffsetDateTime, Duration};
///
/// let now = OffsetDateTime::now_utc();
/// let one_week = Duration::weeks(1);
///
/// let expires = Expiration::from(now);
/// assert_eq!(expires.map(|t| t + one_week).datetime(), Some(now + one_week));
///
/// let expires = Expiration::from(None);
/// assert_eq!(expires.map(|t| t + one_week).datetime(), None);
/// ```
pub fn map<F>(self, f: F) -> Self
where F: FnOnce(OffsetDateTime) -> OffsetDateTime
{
match self {
Expiration::Session => Expiration::Session,
Expiration::DateTime(v) => Expiration::DateTime(f(v)),
}
}
}
impl<T: Into<Option<OffsetDateTime>>> From<T> for Expiration {
fn from(option: T) -> Self {
match option.into() {
Some(value) => Expiration::DateTime(value),
None => Expiration::Session
}
}
}

185
third_party/rust/cookie/src/jar.rs поставляемый
Просмотреть файл

@ -1,17 +1,18 @@
use std::collections::HashSet;
use std::mem::replace;
#[cfg(feature = "signed")] use crate::secure::SignedJar;
#[cfg(feature = "private")] use crate::secure::PrivateJar;
#[cfg(any(feature = "signed", feature = "private"))] use crate::secure::Key;
use time::{self, Duration};
use crate::delta::DeltaCookie;
use crate::Cookie;
#[cfg(feature = "secure")]
use secure::{PrivateJar, SignedJar, Key};
use delta::DeltaCookie;
use Cookie;
/// A collection of cookies that tracks its modifications.
///
/// A `CookieJar` provides storage for any number of cookies. Any changes made
/// to the jar are tracked; the changes can be retrieved via the
/// [delta](#method.delta) method which returns an iterator over the changes.
/// [delta](#method.delta) method which returns an interator over the changes.
///
/// # Usage
///
@ -117,7 +118,7 @@ impl CookieJar {
self.delta_cookies
.get(name)
.or_else(|| self.original_cookies.get(name))
.and_then(|c| if c.removed { None } else { Some(&c.cookie) })
.and_then(|c| if !c.removed { Some(&c.cookie) } else { None })
}
/// Adds an "original" `cookie` to this jar. If an original cookie with the
@ -178,7 +179,7 @@ impl CookieJar {
///
/// A "removal" cookie is a cookie that has the same name as the original
/// cookie but has an empty value, a max-age of 0, and an expiration date
/// far in the past. See also [`Cookie::make_removal()`].
/// far in the past.
///
/// # Example
///
@ -186,8 +187,10 @@ impl CookieJar {
///
/// ```rust
/// # extern crate cookie;
/// extern crate time;
///
/// use cookie::{CookieJar, Cookie};
/// use cookie::time::Duration;
/// use time::Duration;
///
/// # fn main() {
/// let mut jar = CookieJar::new();
@ -206,8 +209,7 @@ impl CookieJar {
/// # }
/// ```
///
/// Removing a new cookie does not result in a _removal_ cookie unless
/// there's an original cookie with the same name:
/// Removing a new cookie does not result in a _removal_ cookie:
///
/// ```rust
/// use cookie::{CookieJar, Cookie};
@ -218,17 +220,12 @@ impl CookieJar {
///
/// jar.remove(Cookie::named("name"));
/// assert_eq!(jar.delta().count(), 0);
///
/// jar.add_original(Cookie::new("name", "value"));
/// jar.add(Cookie::new("name", "value"));
/// assert_eq!(jar.delta().count(), 1);
///
/// jar.remove(Cookie::named("name"));
/// assert_eq!(jar.delta().count(), 1);
/// ```
pub fn remove(&mut self, mut cookie: Cookie<'static>) {
if self.original_cookies.contains(cookie.name()) {
cookie.make_removal();
cookie.set_value("");
cookie.set_max_age(Duration::seconds(0));
cookie.set_expires(time::now() - Duration::days(365));
self.delta_cookies.replace(DeltaCookie::removed(cookie));
} else {
self.delta_cookies.remove(cookie.name());
@ -246,8 +243,10 @@ impl CookieJar {
///
/// ```rust
/// # extern crate cookie;
/// extern crate time;
///
/// use cookie::{CookieJar, Cookie};
/// use cookie::time::Duration;
/// use time::Duration;
///
/// # fn main() {
/// let mut jar = CookieJar::new();
@ -259,56 +258,30 @@ impl CookieJar {
/// assert_eq!(jar.iter().count(), 2);
///
/// // Now force remove the original cookie.
/// jar.force_remove(&Cookie::named("name"));
/// jar.force_remove(Cookie::new("name", "value"));
/// assert_eq!(jar.delta().count(), 1);
/// assert_eq!(jar.iter().count(), 1);
///
/// // Now force remove the new cookie.
/// jar.force_remove(&Cookie::named("key"));
/// jar.force_remove(Cookie::new("key", "value"));
/// assert_eq!(jar.delta().count(), 0);
/// assert_eq!(jar.iter().count(), 0);
/// # }
/// ```
pub fn force_remove<'a>(&mut self, cookie: &Cookie<'a>) {
pub fn force_remove<'a>(&mut self, cookie: Cookie<'a>) {
self.original_cookies.remove(cookie.name());
self.delta_cookies.remove(cookie.name());
}
/// Removes all delta cookies, i.e. all cookies not added via
/// [`CookieJar::add_original()`], from this `CookieJar`. This undoes any
/// changes from [`CookieJar::add()`] and [`CookieJar::remove()`]
/// operations.
///
/// # Example
///
/// ```rust
/// use cookie::{CookieJar, Cookie};
///
/// let mut jar = CookieJar::new();
///
/// // Only original cookies will remain after calling `reset_delta`.
/// jar.add_original(Cookie::new("name", "value"));
/// jar.add_original(Cookie::new("language", "Rust"));
///
/// // These operations, represented by delta cookies, will be reset.
/// jar.add(Cookie::new("language", "C++"));
/// jar.remove(Cookie::named("name"));
///
/// // All is normal.
/// assert_eq!(jar.get("name"), None);
/// assert_eq!(jar.get("language").map(Cookie::value), Some("C++"));
/// assert_eq!(jar.iter().count(), 1);
/// assert_eq!(jar.delta().count(), 2);
///
/// // Resetting undoes delta operations.
/// jar.reset_delta();
/// assert_eq!(jar.get("name").map(Cookie::value), Some("value"));
/// assert_eq!(jar.get("language").map(Cookie::value), Some("Rust"));
/// assert_eq!(jar.iter().count(), 2);
/// assert_eq!(jar.delta().count(), 0);
/// ```
pub fn reset_delta(&mut self) {
self.delta_cookies = HashSet::new();
/// Removes all cookies from this cookie jar.
#[deprecated(since = "0.7.0", note = "calling this method may not remove \
all cookies since the path and domain are not specified; use \
`remove` instead")]
pub fn clear(&mut self) {
self.delta_cookies.clear();
for delta in replace(&mut self.original_cookies, HashSet::new()) {
self.remove(delta.cookie);
}
}
/// Returns an iterator over cookies that represent the changes to this jar
@ -377,9 +350,14 @@ impl CookieJar {
}
}
/// Returns a read-only `PrivateJar` with `self` as its parent jar using the
/// key `key` to verify/decrypt cookies retrieved from the child jar. Any
/// retrievals from the child jar will be made from the parent jar.
/// Returns a `PrivateJar` with `self` as its parent jar using the key `key`
/// to sign/encrypt and verify/decrypt cookies added/retrieved from the
/// child jar.
///
/// Any modifications to the child jar will be reflected on the parent jar,
/// and any retrievals from the child jar will be made from the parent jar.
///
/// This method is only available when the `secure` feature is enabled.
///
/// # Example
///
@ -391,7 +369,7 @@ impl CookieJar {
///
/// // Add a private (signed + encrypted) cookie.
/// let mut jar = CookieJar::new();
/// jar.private_mut(&key).add(Cookie::new("private", "text"));
/// jar.private(&key).add(Cookie::new("private", "text"));
///
/// // The cookie's contents are encrypted.
/// assert_ne!(jar.get("private").unwrap().value(), "text");
@ -405,43 +383,18 @@ impl CookieJar {
/// assert!(jar.private(&key).get("private").is_none());
/// assert!(jar.get("private").is_some());
/// ```
#[cfg(feature = "private")]
#[cfg_attr(all(nightly, doc), doc(cfg(feature = "private")))]
pub fn private<'a>(&'a self, key: &Key) -> PrivateJar<&'a Self> {
#[cfg(feature = "secure")]
pub fn private(&mut self, key: &Key) -> PrivateJar {
PrivateJar::new(self, key)
}
/// Returns a read/write `PrivateJar` with `self` as its parent jar using
/// the key `key` to sign/encrypt and verify/decrypt cookies added/retrieved
/// from the child jar.
/// Returns a `SignedJar` with `self` as its parent jar using the key `key`
/// to sign/verify cookies added/retrieved from the child jar.
///
/// Any modifications to the child jar will be reflected on the parent jar,
/// and any retrievals from the child jar will be made from the parent jar.
///
/// # Example
///
/// ```rust
/// use cookie::{Cookie, CookieJar, Key};
///
/// // Generate a secure key.
/// let key = Key::generate();
///
/// // Add a private (signed + encrypted) cookie.
/// let mut jar = CookieJar::new();
/// jar.private_mut(&key).add(Cookie::new("private", "text"));
///
/// // Remove a cookie using the child jar.
/// jar.private_mut(&key).remove(Cookie::named("private"));
/// ```
#[cfg(feature = "private")]
#[cfg_attr(all(nightly, doc), doc(cfg(feature = "private")))]
pub fn private_mut<'a>(&'a mut self, key: &Key) -> PrivateJar<&'a mut Self> {
PrivateJar::new(self, key)
}
/// Returns a read-only `SignedJar` with `self` as its parent jar using the
/// key `key` to verify cookies retrieved from the child jar. Any retrievals
/// from the child jar will be made from the parent jar.
/// This method is only available when the `secure` feature is enabled.
///
/// # Example
///
@ -453,7 +406,7 @@ impl CookieJar {
///
/// // Add a signed cookie.
/// let mut jar = CookieJar::new();
/// jar.signed_mut(&key).add(Cookie::new("signed", "text"));
/// jar.signed(&key).add(Cookie::new("signed", "text"));
///
/// // The cookie's contents are signed but still in plaintext.
/// assert_ne!(jar.get("signed").unwrap().value(), "text");
@ -468,36 +421,8 @@ impl CookieJar {
/// assert!(jar.signed(&key).get("signed").is_none());
/// assert!(jar.get("signed").is_some());
/// ```
#[cfg(feature = "signed")]
#[cfg_attr(all(nightly, doc), doc(cfg(feature = "signed")))]
pub fn signed<'a>(&'a self, key: &Key) -> SignedJar<&'a Self> {
SignedJar::new(self, key)
}
/// Returns a read/write `SignedJar` with `self` as its parent jar using the
/// key `key` to sign/verify cookies added/retrieved from the child jar.
///
/// Any modifications to the child jar will be reflected on the parent jar,
/// and any retrievals from the child jar will be made from the parent jar.
///
/// # Example
///
/// ```rust
/// use cookie::{Cookie, CookieJar, Key};
///
/// // Generate a secure key.
/// let key = Key::generate();
///
/// // Add a signed cookie.
/// let mut jar = CookieJar::new();
/// jar.signed_mut(&key).add(Cookie::new("signed", "text"));
///
/// // Remove a cookie.
/// jar.signed_mut(&key).remove(Cookie::named("signed"));
/// ```
#[cfg(feature = "signed")]
#[cfg_attr(all(nightly, doc), doc(cfg(feature = "signed")))]
pub fn signed_mut<'a>(&'a mut self, key: &Key) -> SignedJar<&'a mut Self> {
#[cfg(feature = "secure")]
pub fn signed(&mut self, key: &Key) -> SignedJar {
SignedJar::new(self, key)
}
}
@ -543,7 +468,7 @@ impl<'a> Iterator for Iter<'a> {
#[cfg(test)]
mod test {
use super::CookieJar;
use crate::Cookie;
use Cookie;
#[test]
#[allow(deprecated)]
@ -558,8 +483,7 @@ mod test {
assert!(c.get("test2").is_some());
c.add(Cookie::new("test3", ""));
c.remove(Cookie::named("test2"));
c.remove(Cookie::named("test3"));
c.clear();
assert!(c.get("test").is_none());
assert!(c.get("test2").is_none());
@ -576,9 +500,9 @@ mod test {
}
#[test]
#[cfg(all(feature = "signed", feature = "private"))]
#[cfg(feature = "secure")]
fn iter() {
let key = crate::Key::generate();
let key = ::Key::generate();
let mut c = CookieJar::new();
c.add_original(Cookie::new("original", "original"));
@ -588,8 +512,8 @@ mod test {
c.add(Cookie::new("test3", "test3"));
assert_eq!(c.iter().count(), 4);
c.signed_mut(&key).add(Cookie::new("signed", "signed"));
c.private_mut(&key).add(Cookie::new("encrypted", "encrypted"));
c.signed(&key).add(Cookie::new("signed", "signed"));
c.private(&key).add(Cookie::new("encrypted", "encrypted"));
assert_eq!(c.iter().count(), 6);
c.remove(Cookie::named("test"));
@ -607,6 +531,7 @@ mod test {
}
#[test]
#[cfg(feature = "secure")]
fn delta() {
use std::collections::HashMap;
use time::Duration;

673
third_party/rust/cookie/src/lib.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

260
third_party/rust/cookie/src/parse.rs поставляемый
Просмотреть файл

@ -1,29 +1,21 @@
use std::borrow::Cow;
use std::cmp;
use std::error::Error;
use std::convert::{From, TryFrom};
use std::str::Utf8Error;
use std::fmt;
use std::convert::From;
#[allow(unused_imports, deprecated)]
use std::ascii::AsciiExt;
#[cfg(feature = "percent-encode")]
use percent_encoding::percent_decode;
use time::{PrimitiveDateTime, Duration, OffsetDateTime};
use time::{parsing::Parsable, macros::format_description, format_description::FormatItem};
use url::percent_encoding::percent_decode;
use time::{self, Duration};
use crate::{Cookie, SameSite, CookieStr};
// The three formats spec'd in http://tools.ietf.org/html/rfc2616#section-3.3.1.
// Additional ones as encountered in the real world.
pub static FMT1: &[FormatItem<'_>] = format_description!("[weekday repr:short], [day] [month repr:short] [year padding:none] [hour]:[minute]:[second] GMT");
pub static FMT2: &[FormatItem<'_>] = format_description!("[weekday], [day]-[month repr:short]-[year repr:last_two] [hour]:[minute]:[second] GMT");
pub static FMT3: &[FormatItem<'_>] = format_description!("[weekday repr:short] [month repr:short] [day padding:space] [hour]:[minute]:[second] [year padding:none]");
pub static FMT4: &[FormatItem<'_>] = format_description!("[weekday repr:short], [day]-[month repr:short]-[year padding:none] [hour]:[minute]:[second] GMT");
use ::{Cookie, SameSite, CookieStr};
/// Enum corresponding to a parsing error.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
#[non_exhaustive]
pub enum ParseError {
/// The cookie did not contain a name/value pair.
MissingPair,
@ -31,6 +23,10 @@ pub enum ParseError {
EmptyName,
/// Decoding the cookie's name or value resulted in invalid UTF-8.
Utf8Error(Utf8Error),
/// It is discouraged to exhaustively match on this enum as its variants may
/// grow without a breaking-change bump in version numbers.
#[doc(hidden)]
__Nonexhasutive,
}
impl ParseError {
@ -42,6 +38,7 @@ impl ParseError {
ParseError::Utf8Error(_) => {
"decoding the cookie's name or value resulted in invalid UTF-8"
}
ParseError::__Nonexhasutive => unreachable!("__Nonexhasutive ParseError"),
}
}
}
@ -82,39 +79,18 @@ fn indexes_of(needle: &str, haystack: &str) -> Option<(usize, usize)> {
}
#[cfg(feature = "percent-encode")]
fn name_val_decoded(
name: &str,
val: &str
) -> Result<Option<(CookieStr<'static>, CookieStr<'static>)>, ParseError> {
fn name_val_decoded(name: &str, val: &str) -> Result<(CookieStr, CookieStr), ParseError> {
let decoded_name = percent_decode(name.as_bytes()).decode_utf8()?;
let decoded_value = percent_decode(val.as_bytes()).decode_utf8()?;
let name = CookieStr::Concrete(Cow::Owned(decoded_name.into_owned()));
let val = CookieStr::Concrete(Cow::Owned(decoded_value.into_owned()));
if let (&Cow::Borrowed(_), &Cow::Borrowed(_)) = (&decoded_name, &decoded_value) {
Ok(None)
} else {
let name = CookieStr::Concrete(Cow::Owned(decoded_name.into()));
let val = CookieStr::Concrete(Cow::Owned(decoded_value.into()));
Ok(Some((name, val)))
}
Ok((name, val))
}
#[cfg(not(feature = "percent-encode"))]
fn name_val_decoded(
_: &str,
_: &str
) -> Result<Option<(CookieStr<'static>, CookieStr<'static>)>, ParseError> {
unreachable!("This function should never be called with 'percent-encode' disabled!")
}
fn trim_quotes(s: &str) -> &str {
if s.len() < 2 {
return s;
}
match (s.chars().next(), s.chars().last()) {
(Some('"'), Some('"')) => &s[1..(s.len() - 1)],
_ => s
}
fn name_val_decoded(_: &str, _: &str) -> Result<(CookieStr, CookieStr), ParseError> {
unreachable!("This function should never be called when the feature is disabled!")
}
// This function does the real parsing but _does not_ set the `cookie_string` in
@ -123,14 +99,14 @@ fn trim_quotes(s: &str) -> &str {
// set in the outer `parse` function.
fn parse_inner<'c>(s: &str, decode: bool) -> Result<Cookie<'c>, ParseError> {
let mut attributes = s.split(';');
let key_value = match attributes.next() {
Some(s) => s,
_ => panic!(),
};
// Determine the name = val.
let key_value = attributes.next().expect("first str::split().next() returns Some");
let (name, value) = match key_value.find('=') {
Some(i) => {
let (key, value) = (key_value[..i].trim(), key_value[(i + 1)..].trim());
(key, trim_quotes(value).trim())
},
Some(i) => (key_value[..i].trim(), key_value[(i + 1)..].trim()),
None => return Err(ParseError::MissingPair)
};
@ -138,29 +114,23 @@ fn parse_inner<'c>(s: &str, decode: bool) -> Result<Cookie<'c>, ParseError> {
return Err(ParseError::EmptyName);
}
// If there is nothing to decode, or we're not decoding, use indexes.
let indexed_names = |s, name, value| {
// Create a cookie with all of the defaults. We'll fill things in while we
// iterate through the parameters below.
let (name, value) = if decode {
name_val_decoded(name, value)?
} else {
let name_indexes = indexes_of(name, s).expect("name sub");
let value_indexes = indexes_of(value, s).expect("value sub");
let name = CookieStr::Indexed(name_indexes.0, name_indexes.1);
let value = CookieStr::Indexed(value_indexes.0, value_indexes.1);
(name, value)
};
// Create a cookie with all of the defaults. We'll fill things in while we
// iterate through the parameters below.
let (name, value) = if decode {
match name_val_decoded(name, value)? {
Some((name, value)) => (name, value),
None => indexed_names(s, name, value)
}
} else {
indexed_names(s, name, value)
};
let mut cookie: Cookie<'c> = Cookie {
name, value,
let mut cookie = Cookie {
cookie_string: None,
name: name,
value: value,
expires: None,
max_age: None,
domain: None,
@ -179,26 +149,21 @@ fn parse_inner<'c>(s: &str, decode: bool) -> Result<Cookie<'c>, ParseError> {
match (&*key.to_ascii_lowercase(), value) {
("secure", _) => cookie.secure = Some(true),
("httponly", _) => cookie.http_only = Some(true),
("max-age", Some(mut v)) => cookie.max_age = {
let is_negative = v.starts_with('-');
if is_negative {
v = &v[1..];
}
if !v.chars().all(|d| d.is_digit(10)) {
continue
}
// From RFC 6265 5.2.2: neg values indicate that the earliest
// expiration should be used, so set the max age to 0 seconds.
if is_negative {
Some(Duration::ZERO)
} else {
Some(v.parse::<i64>()
.map(Duration::seconds)
.unwrap_or_else(|_| Duration::seconds(i64::max_value())))
}
},
("max-age", Some(v)) => {
// See RFC 6265 Section 5.2.2, negative values indicate that the
// earliest possible expiration time should be used, so set the
// max age as 0 seconds.
cookie.max_age = match v.parse() {
Ok(val) if val <= 0 => Some(Duration::zero()),
Ok(val) => {
// Don't panic if the max age seconds is greater than what's supported by
// `Duration`.
let val = cmp::min(val, Duration::max_value().num_seconds());
Some(Duration::seconds(val))
}
Err(_) => continue,
};
}
("domain", Some(mut domain)) if !domain.is_empty() => {
if domain.starts_with('.') {
domain = &domain[1..];
@ -216,8 +181,6 @@ fn parse_inner<'c>(s: &str, decode: bool) -> Result<Cookie<'c>, ParseError> {
cookie.same_site = Some(SameSite::Strict);
} else if v.eq_ignore_ascii_case("lax") {
cookie.same_site = Some(SameSite::Lax);
} else if v.eq_ignore_ascii_case("none") {
cookie.same_site = Some(SameSite::None);
} else {
// We do nothing here, for now. When/if the `SameSite`
// attribute becomes standard, the spec says that we should
@ -227,14 +190,16 @@ fn parse_inner<'c>(s: &str, decode: bool) -> Result<Cookie<'c>, ParseError> {
}
}
("expires", Some(v)) => {
let tm = parse_date(v, &FMT1)
.or_else(|_| parse_date(v, &FMT2))
.or_else(|_| parse_date(v, &FMT3))
.or_else(|_| parse_date(v, &FMT4));
// .or_else(|_| parse_date(v, &FMT5));
// Try strptime with three date formats according to
// http://tools.ietf.org/html/rfc2616#section-3.3.1. Try
// additional ones as encountered in the real world.
let tm = time::strptime(v, "%a, %d %b %Y %H:%M:%S %Z")
.or_else(|_| time::strptime(v, "%A, %d-%b-%y %H:%M:%S %Z"))
.or_else(|_| time::strptime(v, "%a, %d-%b-%Y %H:%M:%S %Z"))
.or_else(|_| time::strptime(v, "%a %b %d %H:%M:%S %Y"));
if let Ok(time) = tm {
cookie.expires = Some(time.into())
cookie.expires = Some(time)
}
}
_ => {
@ -249,7 +214,7 @@ fn parse_inner<'c>(s: &str, decode: bool) -> Result<Cookie<'c>, ParseError> {
Ok(cookie)
}
pub(crate) fn parse_cookie<'c, S>(cow: S, decode: bool) -> Result<Cookie<'c>, ParseError>
pub fn parse_cookie<'c, S>(cow: S, decode: bool) -> Result<Cookie<'c>, ParseError>
where S: Into<Cow<'c, str>>
{
let s = cow.into();
@ -258,27 +223,10 @@ pub(crate) fn parse_cookie<'c, S>(cow: S, decode: bool) -> Result<Cookie<'c>, Pa
Ok(cookie)
}
pub(crate) fn parse_date(s: &str, format: &impl Parsable) -> Result<OffsetDateTime, time::Error> {
// Parse. Handle "abbreviated" dates like Chromium. See cookie#162.
let mut date = format.parse(s.as_bytes())?;
if let Some(y) = date.year().or_else(|| date.year_last_two().map(|v| v as i32)) {
let offset = match y {
0..=68 => 2000,
69..=99 => 1900,
_ => 0,
};
date.set_year(y + offset);
}
Ok(PrimitiveDateTime::try_from(date)?.assume_utc())
}
#[cfg(test)]
mod tests {
use super::parse_date;
use crate::{Cookie, SameSite};
use time::Duration;
use ::{Cookie, SameSite};
use ::time::{strptime, Duration};
macro_rules! assert_eq_parse {
($string:expr, $expected:expr) => (
@ -323,15 +271,6 @@ mod tests {
assert_eq_parse!("foo=bar; SameSite=strict", expected);
assert_eq_parse!("foo=bar; SameSite=STrICT", expected);
assert_eq_parse!("foo=bar; SameSite=STRICT", expected);
let expected = Cookie::build("foo", "bar")
.same_site(SameSite::None)
.finish();
assert_eq_parse!("foo=bar; SameSite=None", expected);
assert_eq_parse!("foo=bar; SameSITE=none", expected);
assert_eq_parse!("foo=bar; SameSite=NOne", expected);
assert_eq_parse!("foo=bar; SameSite=nOne", expected);
}
#[test]
@ -344,28 +283,10 @@ mod tests {
let expected = Cookie::build("foo", "bar=baz").finish();
assert_eq_parse!("foo=bar=baz", expected);
let expected = Cookie::build("foo", "\"bar\"").finish();
assert_eq_parse!("foo=\"\"bar\"\"", expected);
let expected = Cookie::build("foo", "\"bar").finish();
assert_eq_parse!("foo= \"bar", expected);
assert_eq_parse!("foo=\"bar ", expected);
assert_eq_parse!("foo=\"\"bar\"", expected);
assert_eq_parse!("foo=\"\"bar \"", expected);
assert_eq_parse!("foo=\"\"bar \" ", expected);
let expected = Cookie::build("foo", "bar\"").finish();
assert_eq_parse!("foo=bar\"", expected);
assert_eq_parse!("foo=\"bar\"\"", expected);
assert_eq_parse!("foo=\" bar\"\"", expected);
assert_eq_parse!("foo=\" bar\" \" ", expected);
let mut expected = Cookie::build("foo", "bar").finish();
assert_eq_parse!("foo=bar", expected);
assert_eq_parse!("foo = bar", expected);
assert_eq_parse!("foo=\"bar\"", expected);
assert_eq_parse!(" foo=bar ", expected);
assert_eq_parse!(" foo=\"bar \" ", expected);
assert_eq_parse!(" foo=bar ;Domain=", expected);
assert_eq_parse!(" foo=bar ;Domain= ", expected);
assert_eq_parse!(" foo=bar ;Ignored", expected);
@ -395,7 +316,7 @@ mod tests {
assert_ne_parse!(" foo=bar ;HttpOnly; secure", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly; secure", unexpected);
expected.set_max_age(Duration::ZERO);
expected.set_max_age(Duration::zero());
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=0", expected);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 0 ", expected);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=-1", expected);
@ -450,76 +371,18 @@ mod tests {
Domain=FOO.COM", unexpected);
let time_str = "Wed, 21 Oct 2015 07:28:00 GMT";
let expires = parse_date(time_str, &super::FMT1).unwrap();
let expires = strptime(time_str, "%a, %d %b %Y %H:%M:%S %Z").unwrap();
expected.set_expires(expires);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=foo.com; Expires=Wed, 21 Oct 2015 07:28:00 GMT", expected);
unexpected.set_domain("foo.com");
let bad_expires = parse_date(time_str, &super::FMT1).unwrap();
let bad_expires = strptime(time_str, "%a, %d %b %Y %H:%S:%M %Z").unwrap();
expected.set_expires(bad_expires);
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=foo.com; Expires=Wed, 21 Oct 2015 07:28:00 GMT", unexpected);
}
#[test]
fn parse_abbreviated_years() {
let cookie_str = "foo=bar; expires=Thu, 10-Sep-20 20:00:00 GMT";
let cookie = Cookie::parse(cookie_str).unwrap();
assert_eq!(cookie.expires_datetime().unwrap().year(), 2020);
let cookie_str = "foo=bar; expires=Thu, 10-Sep-68 20:00:00 GMT";
let cookie = Cookie::parse(cookie_str).unwrap();
assert_eq!(cookie.expires_datetime().unwrap().year(), 2068);
let cookie_str = "foo=bar; expires=Thu, 10-Sep-69 20:00:00 GMT";
let cookie = Cookie::parse(cookie_str).unwrap();
assert_eq!(cookie.expires_datetime().unwrap().year(), 1969);
let cookie_str = "foo=bar; expires=Thu, 10-Sep-99 20:00:00 GMT";
let cookie = Cookie::parse(cookie_str).unwrap();
assert_eq!(cookie.expires_datetime().unwrap().year(), 1999);
let cookie_str = "foo=bar; expires=Thu, 10-Sep-2069 20:00:00 GMT";
let cookie = Cookie::parse(cookie_str).unwrap();
assert_eq!(cookie.expires_datetime().unwrap().year(), 2069);
}
#[test]
fn parse_variant_date_fmts() {
let cookie_str = "foo=bar; expires=Sun, 06 Nov 1994 08:49:37 GMT";
Cookie::parse(cookie_str).unwrap().expires_datetime().unwrap();
let cookie_str = "foo=bar; expires=Sunday, 06-Nov-94 08:49:37 GMT";
Cookie::parse(cookie_str).unwrap().expires_datetime().unwrap();
let cookie_str = "foo=bar; expires=Sun Nov 6 08:49:37 1994";
Cookie::parse(cookie_str).unwrap().expires_datetime().unwrap();
}
#[test]
fn parse_very_large_max_ages() {
let mut expected = Cookie::build("foo", "bar")
.max_age(Duration::seconds(i64::max_value()))
.finish();
let string = format!("foo=bar; Max-Age={}", 1u128 << 100);
assert_eq_parse!(&string, expected);
expected.set_max_age(Duration::seconds(0));
assert_eq_parse!("foo=bar; Max-Age=-129", expected);
let string = format!("foo=bar; Max-Age=-{}", 1u128 << 100);
assert_eq_parse!(&string, expected);
let string = format!("foo=bar; Max-Age=-{}", i64::max_value());
assert_eq_parse!(&string, expected);
let string = format!("foo=bar; Max-Age={}", i64::max_value());
expected.set_max_age(Duration::seconds(i64::max_value()));
assert_eq_parse!(&string, expected);
}
#[test]
fn odd_characters() {
let expected = Cookie::new("foo", "b%2Fr");
@ -540,11 +403,10 @@ mod tests {
#[test]
fn do_not_panic_on_large_max_ages() {
let max_seconds = Duration::MAX.whole_seconds();
let max_seconds = Duration::max_value().num_seconds();
let expected = Cookie::build("foo", "bar")
.max_age(Duration::seconds(max_seconds))
.finish();
let too_many_seconds = (max_seconds as u64) + 1;
assert_eq_parse!(format!(" foo=bar; Max-Age={:?}", too_many_seconds), expected);
assert_eq_parse!(format!(" foo=bar; Max-Age={:?}", max_seconds + 1), expected);
}
}

164
third_party/rust/cookie/src/secure/key.rs поставляемый
Просмотреть файл

@ -1,68 +1,31 @@
const SIGNING_KEY_LEN: usize = 32;
const ENCRYPTION_KEY_LEN: usize = 32;
const COMBINED_KEY_LENGTH: usize = SIGNING_KEY_LEN + ENCRYPTION_KEY_LEN;
use secure::ring::hkdf::expand;
use secure::ring::digest::{SHA256, Algorithm};
use secure::ring::hmac::SigningKey;
use secure::ring::rand::{SecureRandom, SystemRandom};
// Statically ensure the numbers above are in-sync.
#[cfg(feature = "signed")]
const_assert!(crate::secure::signed::KEY_LEN == SIGNING_KEY_LEN);
#[cfg(feature = "private")]
const_assert!(crate::secure::private::KEY_LEN == ENCRYPTION_KEY_LEN);
use secure::private::KEY_LEN as PRIVATE_KEY_LEN;
use secure::signed::KEY_LEN as SIGNED_KEY_LEN;
static HKDF_DIGEST: &'static Algorithm = &SHA256;
const KEYS_INFO: &'static str = "COOKIE;SIGNED:HMAC-SHA256;PRIVATE:AEAD-AES-256-GCM";
/// A cryptographic master key for use with `Signed` and/or `Private` jars.
///
/// This structure encapsulates secure, cryptographic keys for use with both
/// [`PrivateJar`](crate::PrivateJar) and [`SignedJar`](crate::SignedJar). A
/// single instance of a `Key` can be used for both a `PrivateJar` and a
/// `SignedJar` simultaneously with no notable security implications.
#[cfg_attr(all(nightly, doc), doc(cfg(any(feature = "private", feature = "signed"))))]
/// [PrivateJar](struct.PrivateJar.html) and [SignedJar](struct.SignedJar.html).
/// It can be derived from a single master key via
/// [from_master](#method.from_master) or generated from a secure random source
/// via [generate](#method.generate). A single instance of `Key` can be used for
/// both a `PrivateJar` and a `SignedJar`.
///
/// This type is only available when the `secure` feature is enabled.
#[derive(Clone)]
pub struct Key([u8; COMBINED_KEY_LENGTH /* SIGNING | ENCRYPTION */]);
impl PartialEq for Key {
fn eq(&self, other: &Self) -> bool {
use subtle::ConstantTimeEq;
self.0.ct_eq(&other.0).into()
}
pub struct Key {
signing_key: [u8; SIGNED_KEY_LEN],
encryption_key: [u8; PRIVATE_KEY_LEN]
}
impl Key {
// An empty key structure, to be filled.
const fn zero() -> Self {
Key([0; COMBINED_KEY_LENGTH])
}
/// Creates a new `Key` from a 512-bit cryptographically random string.
///
/// The supplied key must be at least 512-bits (64 bytes). For security, the
/// master key _must_ be cryptographically random.
///
/// # Panics
///
/// Panics if `key` is less than 64 bytes in length.
///
/// # Example
///
/// ```rust
/// use cookie::Key;
///
/// # /*
/// let key = { /* a cryptographically random key >= 64 bytes */ };
/// # */
/// # let key: &Vec<u8> = &(0..64).collect();
///
/// let key = Key::from(key);
/// ```
pub fn from(key: &[u8]) -> Key {
if key.len() < 64 {
panic!("bad key length: expected >= 64 bytes, found {}", key.len());
}
let mut output = Key::zero();
output.0.copy_from_slice(&key[..COMBINED_KEY_LENGTH]);
output
}
/// Derives new signing/encryption keys from a master key.
///
/// The master key must be at least 256-bits (32 bytes). For security, the
@ -83,21 +46,28 @@ impl Key {
/// # */
/// # let master_key: &Vec<u8> = &(0..32).collect();
///
/// let key = Key::derive_from(master_key);
/// let key = Key::from_master(master_key);
/// ```
#[cfg(feature = "key-expansion")]
#[cfg_attr(all(nightly, doc), doc(cfg(feature = "key-expansion")))]
pub fn derive_from(master_key: &[u8]) -> Self {
if master_key.len() < 32 {
panic!("bad master key length: expected >= 32 bytes, found {}", master_key.len());
pub fn from_master(key: &[u8]) -> Key {
if key.len() < 32 {
panic!("bad master key length: expected at least 32 bytes, found {}", key.len());
}
// Expand the master key into two HKDF generated keys.
const KEYS_INFO: &[u8] = b"COOKIE;SIGNED:HMAC-SHA256;PRIVATE:AEAD-AES-256-GCM";
let mut both_keys = [0; COMBINED_KEY_LENGTH];
let hk = hkdf::Hkdf::<sha2::Sha256>::from_prk(master_key).expect("key length prechecked");
hk.expand(KEYS_INFO, &mut both_keys).expect("expand into keys");
Key::from(&both_keys)
// Expand the user's key into two.
let prk = SigningKey::new(HKDF_DIGEST, key);
let mut both_keys = [0; SIGNED_KEY_LEN + PRIVATE_KEY_LEN];
expand(&prk, KEYS_INFO.as_bytes(), &mut both_keys);
// Copy the keys into their respective arrays.
let mut signing_key = [0; SIGNED_KEY_LEN];
let mut encryption_key = [0; PRIVATE_KEY_LEN];
signing_key.copy_from_slice(&both_keys[..SIGNED_KEY_LEN]);
encryption_key.copy_from_slice(&both_keys[SIGNED_KEY_LEN..]);
Key {
signing_key: signing_key,
encryption_key: encryption_key
}
}
/// Generates signing/encryption keys from a secure, random source. Keys are
@ -106,7 +76,7 @@ impl Key {
/// # Panics
///
/// Panics if randomness cannot be retrieved from the operating system. See
/// [`Key::try_generate()`] for a non-panicking version.
/// [try_generate](#method.try_generate) for a non-panicking version.
///
/// # Example
///
@ -131,16 +101,18 @@ impl Key {
/// let key = Key::try_generate();
/// ```
pub fn try_generate() -> Option<Key> {
use crate::secure::rand::RngCore;
let mut sign_key = [0; SIGNED_KEY_LEN];
let mut enc_key = [0; PRIVATE_KEY_LEN];
let mut rng = crate::secure::rand::thread_rng();
let mut key = Key::zero();
rng.try_fill_bytes(&mut key.0).ok()?;
Some(key)
let rng = SystemRandom::new();
if rng.fill(&mut sign_key).is_err() || rng.fill(&mut enc_key).is_err() {
return None
}
Some(Key { signing_key: sign_key, encryption_key: enc_key })
}
/// Returns the raw bytes of a key suitable for signing cookies. Guaranteed
/// to be at least 32 bytes.
/// Returns the raw bytes of a key suitable for signing cookies.
///
/// # Example
///
@ -151,11 +123,10 @@ impl Key {
/// let signing_key = key.signing();
/// ```
pub fn signing(&self) -> &[u8] {
&self.0[..SIGNING_KEY_LEN]
&self.signing_key[..]
}
/// Returns the raw bytes of a key suitable for encrypting cookies.
/// Guaranteed to be at least 32 bytes.
///
/// # Example
///
@ -166,22 +137,7 @@ impl Key {
/// let encryption_key = key.encryption();
/// ```
pub fn encryption(&self) -> &[u8] {
&self.0[SIGNING_KEY_LEN..]
}
/// Returns the raw bytes of the master key. Guaranteed to be at least 64
/// bytes.
///
/// # Example
///
/// ```rust
/// use cookie::Key;
///
/// let key = Key::generate();
/// let master_key = key.master();
/// ```
pub fn master(&self) -> &[u8] {
&self.0
&self.encryption_key[..]
}
}
@ -190,30 +146,18 @@ mod test {
use super::Key;
#[test]
fn from_works() {
let key = Key::from(&(0..64).collect::<Vec<_>>());
let signing: Vec<u8> = (0..32).collect();
assert_eq!(key.signing(), &*signing);
let encryption: Vec<u8> = (32..64).collect();
assert_eq!(key.encryption(), &*encryption);
}
#[test]
#[cfg(feature = "key-expansion")]
fn deterministic_derive() {
fn deterministic_from_master() {
let master_key: Vec<u8> = (0..32).collect();
let key_a = Key::derive_from(&master_key);
let key_b = Key::derive_from(&master_key);
let key_a = Key::from_master(&master_key);
let key_b = Key::from_master(&master_key);
assert_eq!(key_a.signing(), key_b.signing());
assert_eq!(key_a.encryption(), key_b.encryption());
assert_ne!(key_a.encryption(), key_a.signing());
let master_key_2: Vec<u8> = (32..64).collect();
let key_2 = Key::derive_from(&master_key_2);
let key_2 = Key::from_master(&master_key_2);
assert_ne!(key_2.signing(), key_a.signing());
assert_ne!(key_2.encryption(), key_a.encryption());

Просмотреть файл

@ -39,11 +39,3 @@ macro_rules! assert_secure_behaviour {
})
}
// This is courtesty of `static_assertions`. That library is Copyright (c) 2017
// Nikolai Vazquez. See https://github.com/nvzqz/static-assertions-rs for more.
macro_rules! const_assert {
($x:expr $(,)?) => {
#[allow(unknown_lints, clippy::eq_op)]
const _: [(); 0 - !{ const ASSERT: bool = $x; ASSERT } as usize] = [];
};
}

12
third_party/rust/cookie/src/secure/mod.rs поставляемый
Просмотреть файл

@ -1,14 +1,12 @@
extern crate rand;
extern crate ring;
extern crate base64;
#[macro_use]
mod macros;
mod private;
mod signed;
mod key;
pub use self::private::*;
pub use self::signed::*;
pub use self::key::*;
#[cfg(feature = "private")] mod private;
#[cfg(feature = "private")] pub use self::private::*;
#[cfg(feature = "signed")] mod signed;
#[cfg(feature = "signed")] pub use self::signed::*;

229
third_party/rust/cookie/src/secure/private.rs поставляемый
Просмотреть файл

@ -1,20 +1,15 @@
extern crate aes_gcm;
use secure::ring::aead::{seal_in_place, open_in_place, Aad, Algorithm, Nonce, AES_256_GCM};
use secure::ring::aead::{OpeningKey, SealingKey};
use secure::ring::rand::{SecureRandom, SystemRandom};
use secure::{base64, Key};
use std::convert::TryInto;
use std::borrow::{Borrow, BorrowMut};
use crate::secure::{base64, rand, Key};
use crate::{Cookie, CookieJar};
use self::aes_gcm::Aes256Gcm;
use self::aes_gcm::aead::{Aead, AeadInPlace, NewAead, generic_array::GenericArray, Payload};
use self::rand::RngCore;
use {Cookie, CookieJar};
// Keep these in sync, and keep the key len synced with the `private` docs as
// well as the `KEYS_INFO` const in secure::Key.
pub(crate) const NONCE_LEN: usize = 12;
pub(crate) const TAG_LEN: usize = 16;
pub(crate) const KEY_LEN: usize = 32;
static ALGO: &'static Algorithm = &AES_256_GCM;
const NONCE_LEN: usize = 12;
pub const KEY_LEN: usize = 32;
/// A child cookie jar that provides authenticated encryption for its cookies.
///
@ -23,49 +18,22 @@ pub(crate) const KEY_LEN: usize = 32;
/// `PrivateJar` are simultaneously assured confidentiality, integrity, and
/// authenticity. In other words, clients cannot discover nor tamper with the
/// contents of a cookie, nor can they fabricate cookie data.
#[cfg_attr(all(nightly, doc), doc(cfg(feature = "private")))]
pub struct PrivateJar<J> {
parent: J,
///
/// This type is only available when the `secure` feature is enabled.
pub struct PrivateJar<'a> {
parent: &'a mut CookieJar,
key: [u8; KEY_LEN]
}
impl<J> PrivateJar<J> {
impl<'a> PrivateJar<'a> {
/// Creates a new child `PrivateJar` with parent `parent` and key `key`.
/// This method is typically called indirectly via the `signed` method of
/// `CookieJar`.
pub(crate) fn new(parent: J, key: &Key) -> PrivateJar<J> {
PrivateJar { parent, key: key.encryption().try_into().expect("enc key len") }
}
/// Encrypts the cookie's value with authenticated encryption providing
/// confidentiality, integrity, and authenticity.
fn encrypt_cookie(&self, cookie: &mut Cookie) {
// Create a vec to hold the [nonce | cookie value | tag].
let cookie_val = cookie.value().as_bytes();
let mut data = vec![0; NONCE_LEN + cookie_val.len() + TAG_LEN];
// Split data into three: nonce, input/output, tag. Copy input.
let (nonce, in_out) = data.split_at_mut(NONCE_LEN);
let (in_out, tag) = in_out.split_at_mut(cookie_val.len());
in_out.copy_from_slice(cookie_val);
// Fill nonce piece with random data.
let mut rng = self::rand::thread_rng();
rng.try_fill_bytes(nonce).expect("couldn't random fill nonce");
let nonce = GenericArray::clone_from_slice(nonce);
// Perform the actual sealing operation, using the cookie's name as
// associated data to prevent value swapping.
let aad = cookie.name().as_bytes();
let aead = Aes256Gcm::new(GenericArray::from_slice(&self.key));
let aad_tag = aead.encrypt_in_place_detached(&nonce, aad, in_out)
.expect("encryption failure!");
// Copy the tag into the tag piece.
tag.copy_from_slice(&aad_tag);
// Base64 encode [nonce | encrypted value | tag].
cookie.set_value(base64::encode(&data));
#[doc(hidden)]
pub fn new(parent: &'a mut CookieJar, key: &Key) -> PrivateJar<'a> {
let mut key_array = [0u8; KEY_LEN];
key_array.copy_from_slice(key.encryption());
PrivateJar { parent: parent, key: key_array }
}
/// Given a sealed value `str` and a key name `name`, where the nonce is
@ -73,56 +41,24 @@ impl<J> PrivateJar<J> {
/// verifies and decrypts the sealed value and returns it. If there's a
/// problem, returns an `Err` with a string describing the issue.
fn unseal(&self, name: &str, value: &str) -> Result<String, &'static str> {
let data = base64::decode(value).map_err(|_| "bad base64 value")?;
let mut data = base64::decode(value).map_err(|_| "bad base64 value")?;
if data.len() <= NONCE_LEN {
return Err("length of decoded data is <= NONCE_LEN");
}
let (nonce, cipher) = data.split_at(NONCE_LEN);
let payload = Payload { msg: cipher, aad: name.as_bytes() };
let ad = Aad::from(name.as_bytes());
let key = OpeningKey::new(ALGO, &self.key).expect("opening key");
let (nonce, sealed) = data.split_at_mut(NONCE_LEN);
let nonce = Nonce::try_assume_unique_for_key(nonce)
.expect("invalid length of `nonce`");
let unsealed = open_in_place(&key, nonce, ad, 0, sealed)
.map_err(|_| "invalid key/nonce/value: bad seal")?;
let aead = Aes256Gcm::new(GenericArray::from_slice(&self.key));
aead.decrypt(GenericArray::from_slice(nonce), payload)
.map_err(|_| "invalid key/nonce/value: bad seal")
.and_then(|s| String::from_utf8(s).map_err(|_| "bad unsealed utf8"))
::std::str::from_utf8(unsealed)
.map(|s| s.to_string())
.map_err(|_| "bad unsealed utf8")
}
/// Authenticates and decrypts `cookie`, returning the plaintext version if
/// decryption succeeds or `None` otherwise. Authenticatation and decryption
/// _always_ succeeds if `cookie` was generated by a `PrivateJar` with the
/// same key as `self`.
///
/// # Example
///
/// ```rust
/// use cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// assert!(jar.private(&key).get("name").is_none());
///
/// jar.private_mut(&key).add(Cookie::new("name", "value"));
/// assert_eq!(jar.private(&key).get("name").unwrap().value(), "value");
///
/// let plain = jar.get("name").cloned().unwrap();
/// assert_ne!(plain.value(), "value");
/// let decrypted = jar.private(&key).decrypt(plain).unwrap();
/// assert_eq!(decrypted.value(), "value");
///
/// let plain = Cookie::new("plaintext", "hello");
/// assert!(jar.private(&key).decrypt(plain).is_none());
/// ```
pub fn decrypt(&self, mut cookie: Cookie<'static>) -> Option<Cookie<'static>> {
if let Ok(value) = self.unseal(cookie.name(), cookie.value()) {
cookie.set_value(value);
return Some(cookie);
}
None
}
}
impl<J: Borrow<CookieJar>> PrivateJar<J> {
/// Returns a reference to the `Cookie` inside this jar with the name `name`
/// and authenticates and decrypts the cookie's value, returning a `Cookie`
/// with the decrypted value. If the cookie cannot be found, or the cookie
@ -134,20 +70,25 @@ impl<J: Borrow<CookieJar>> PrivateJar<J> {
/// use cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let jar = CookieJar::new();
/// assert!(jar.private(&key).get("name").is_none());
/// let mut jar = CookieJar::new();
/// let mut private_jar = jar.private(&key);
/// assert!(private_jar.get("name").is_none());
///
/// let mut jar = jar;
/// let mut private_jar = jar.private_mut(&key);
/// private_jar.add(Cookie::new("name", "value"));
/// assert_eq!(private_jar.get("name").unwrap().value(), "value");
/// ```
pub fn get(&self, name: &str) -> Option<Cookie<'static>> {
self.parent.borrow().get(name).and_then(|c| self.decrypt(c.clone()))
}
}
if let Some(cookie_ref) = self.parent.get(name) {
let mut cookie = cookie_ref.clone();
if let Ok(value) = self.unseal(name, cookie.value()) {
cookie.set_value(value);
return Some(cookie);
}
}
None
}
impl<J: BorrowMut<CookieJar>> PrivateJar<J> {
/// Adds `cookie` to the parent jar. The cookie's value is encrypted with
/// authenticated encryption assuring confidentiality, integrity, and
/// authenticity.
@ -159,22 +100,24 @@ impl<J: BorrowMut<CookieJar>> PrivateJar<J> {
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// jar.private_mut(&key).add(Cookie::new("name", "value"));
/// jar.private(&key).add(Cookie::new("name", "value"));
///
/// assert_ne!(jar.get("name").unwrap().value(), "value");
/// assert_eq!(jar.private(&key).get("name").unwrap().value(), "value");
/// ```
pub fn add(&mut self, mut cookie: Cookie<'static>) {
self.encrypt_cookie(&mut cookie);
self.parent.borrow_mut().add(cookie);
// Add the sealed cookie to the parent.
self.parent.add(cookie);
}
/// Adds an "original" `cookie` to parent jar. The cookie's value is
/// encrypted with authenticated encryption assuring confidentiality,
/// integrity, and authenticity. Adding an original cookie does not affect
/// the [`CookieJar::delta()`] computation. This method is intended to be
/// used to seed the cookie jar with cookies received from a client's HTTP
/// message.
/// the [`CookieJar::delta()`](struct.CookieJar.html#method.delta)
/// computation. This method is intended to be used to seed the cookie jar
/// with cookies received from a client's HTTP message.
///
/// For accurate `delta` computations, this method should not be called
/// after calling `remove`.
@ -186,14 +129,48 @@ impl<J: BorrowMut<CookieJar>> PrivateJar<J> {
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// jar.private_mut(&key).add_original(Cookie::new("name", "value"));
/// jar.private(&key).add_original(Cookie::new("name", "value"));
///
/// assert_eq!(jar.iter().count(), 1);
/// assert_eq!(jar.delta().count(), 0);
/// ```
pub fn add_original(&mut self, mut cookie: Cookie<'static>) {
self.encrypt_cookie(&mut cookie);
self.parent.borrow_mut().add_original(cookie);
// Add the sealed cookie to the parent.
self.parent.add_original(cookie);
}
/// Encrypts the cookie's value with
/// authenticated encryption assuring confidentiality, integrity, and authenticity.
fn encrypt_cookie(&self, cookie: &mut Cookie) {
let mut data;
let output_len = {
// Create the `SealingKey` structure.
let key = SealingKey::new(ALGO, &self.key).expect("sealing key creation");
// Create a vec to hold the [nonce | cookie value | overhead].
let overhead = ALGO.tag_len();
let cookie_val = cookie.value().as_bytes();
data = vec![0; NONCE_LEN + cookie_val.len() + overhead];
// Randomly generate the nonce, then copy the cookie value as input.
let (nonce, in_out) = data.split_at_mut(NONCE_LEN);
SystemRandom::new().fill(nonce).expect("couldn't random fill nonce");
in_out[..cookie_val.len()].copy_from_slice(cookie_val);
let nonce = Nonce::try_assume_unique_for_key(nonce)
.expect("invalid length of `nonce`");
// Use cookie's name as associated data to prevent value swapping.
let ad = Aad::from(cookie.name().as_bytes());
// Perform the actual sealing operation and get the output length.
seal_in_place(&key, nonce, ad, in_out, overhead).expect("in-place seal")
};
// Base64 encode the nonce and encrypted value.
let sealed_value = base64::encode(&data[..(NONCE_LEN + output_len)]);
cookie.set_value(sealed_value);
}
/// Removes `cookie` from the parent jar.
@ -201,8 +178,8 @@ impl<J: BorrowMut<CookieJar>> PrivateJar<J> {
/// For correct removal, the passed in `cookie` must contain the same `path`
/// and `domain` as the cookie that was initially set.
///
/// This is identical to [`CookieJar::remove()`]. See the method's
/// documentation for more details.
/// See [CookieJar::remove](struct.CookieJar.html#method.remove) for more
/// details.
///
/// # Example
///
@ -211,7 +188,7 @@ impl<J: BorrowMut<CookieJar>> PrivateJar<J> {
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// let mut private_jar = jar.private_mut(&key);
/// let mut private_jar = jar.private(&key);
///
/// private_jar.add(Cookie::new("name", "value"));
/// assert!(private_jar.get("name").is_some());
@ -220,45 +197,25 @@ impl<J: BorrowMut<CookieJar>> PrivateJar<J> {
/// assert!(private_jar.get("name").is_none());
/// ```
pub fn remove(&mut self, cookie: Cookie<'static>) {
self.parent.borrow_mut().remove(cookie);
self.parent.remove(cookie);
}
}
#[cfg(test)]
mod test {
use crate::{CookieJar, Cookie, Key};
use {CookieJar, Cookie, Key};
#[test]
fn simple() {
let key = Key::generate();
let mut jar = CookieJar::new();
assert_simple_behaviour!(jar, jar.private_mut(&key));
assert_simple_behaviour!(jar, jar.private(&key));
}
#[test]
fn secure() {
fn private() {
let key = Key::generate();
let mut jar = CookieJar::new();
assert_secure_behaviour!(jar, jar.private_mut(&key));
}
#[test]
fn roundtrip() {
// Secret is SHA-256 hash of 'Super secret!' passed through HKDF-SHA256.
let key = Key::from(&[89, 202, 200, 125, 230, 90, 197, 245, 166, 249,
34, 169, 135, 31, 20, 197, 94, 154, 254, 79, 60, 26, 8, 143, 254,
24, 116, 138, 92, 225, 159, 60, 157, 41, 135, 129, 31, 226, 196, 16,
198, 168, 134, 4, 42, 1, 196, 24, 57, 103, 241, 147, 201, 185, 233,
10, 180, 170, 187, 89, 252, 137, 110, 107]);
let mut jar = CookieJar::new();
jar.add(Cookie::new("encrypted_with_ring014",
"lObeZJorGVyeSWUA8khTO/8UCzFVBY9g0MGU6/J3NN1R5x11dn2JIA=="));
jar.add(Cookie::new("encrypted_with_ring016",
"SU1ujceILyMBg3fReqRmA9HUtAIoSPZceOM/CUpObROHEujXIjonkA=="));
let private = jar.private(&key);
assert_eq!(private.get("encrypted_with_ring014").unwrap().value(), "Tamper-proof");
assert_eq!(private.get("encrypted_with_ring016").unwrap().value(), "Tamper-proof");
assert_secure_behaviour!(jar, jar.private(&key));
}
}

191
third_party/rust/cookie/src/secure/signed.rs поставляемый
Просмотреть файл

@ -1,106 +1,54 @@
use std::convert::TryInto;
use std::borrow::{Borrow, BorrowMut};
use secure::ring::digest::{SHA256, Algorithm};
use secure::ring::hmac::{SigningKey, sign, verify_with_own_key as verify};
use secure::{base64, Key};
use sha2::Sha256;
use hmac::{Hmac, Mac};
use crate::secure::{base64, Key};
use crate::{Cookie, CookieJar};
use {Cookie, CookieJar};
// Keep these in sync, and keep the key len synced with the `signed` docs as
// well as the `KEYS_INFO` const in secure::Key.
pub(crate) const BASE64_DIGEST_LEN: usize = 44;
pub(crate) const KEY_LEN: usize = 32;
static HMAC_DIGEST: &'static Algorithm = &SHA256;
const BASE64_DIGEST_LEN: usize = 44;
pub const KEY_LEN: usize = 32;
/// A child cookie jar that authenticates its cookies.
///
/// A _signed_ child jar signs all the cookies added to it and verifies cookies
/// retrieved from it. Any cookies stored in a `SignedJar` are provided
/// integrity and authenticity. In other words, clients cannot tamper with the
/// contents of a cookie nor can they fabricate cookie values, but the data is
/// visible in plaintext.
#[cfg_attr(all(nightly, doc), doc(cfg(feature = "signed")))]
pub struct SignedJar<J> {
parent: J,
key: [u8; KEY_LEN],
/// retrieved from it. Any cookies stored in a `SignedJar` are assured integrity
/// and authenticity. In other words, clients cannot tamper with the contents of
/// a cookie nor can they fabricate cookie values, but the data is visible in
/// plaintext.
///
/// This type is only available when the `secure` feature is enabled.
pub struct SignedJar<'a> {
parent: &'a mut CookieJar,
key: SigningKey
}
impl<J> SignedJar<J> {
impl<'a> SignedJar<'a> {
/// Creates a new child `SignedJar` with parent `parent` and key `key`. This
/// method is typically called indirectly via the `signed{_mut}` methods of
/// method is typically called indirectly via the `signed` method of
/// `CookieJar`.
pub(crate) fn new(parent: J, key: &Key) -> SignedJar<J> {
SignedJar { parent, key: key.signing().try_into().expect("sign key len") }
}
/// Signs the cookie's value providing integrity and authenticity.
fn sign_cookie(&self, cookie: &mut Cookie) {
// Compute HMAC-SHA256 of the cookie's value.
let mut mac = Hmac::<Sha256>::new_from_slice(&self.key).expect("good key");
mac.update(cookie.value().as_bytes());
// Cookie's new value is [MAC | original-value].
let mut new_value = base64::encode(&mac.finalize().into_bytes());
new_value.push_str(cookie.value());
cookie.set_value(new_value);
#[doc(hidden)]
pub fn new(parent: &'a mut CookieJar, key: &Key) -> SignedJar<'a> {
SignedJar { parent: parent, key: SigningKey::new(HMAC_DIGEST, key.signing()) }
}
/// Given a signed value `str` where the signature is prepended to `value`,
/// verifies the signed value and returns it. If there's a problem, returns
/// an `Err` with a string describing the issue.
fn _verify(&self, cookie_value: &str) -> Result<String, &'static str> {
if !cookie_value.is_char_boundary(BASE64_DIGEST_LEN) {
return Err("missing or invalid digest");
fn verify(&self, cookie_value: &str) -> Result<String, &'static str> {
if cookie_value.len() < BASE64_DIGEST_LEN {
return Err("length of value is <= BASE64_DIGEST_LEN");
}
// Split [MAC | original-value] into its two parts.
let (digest_str, value) = cookie_value.split_at(BASE64_DIGEST_LEN);
let digest = base64::decode(digest_str).map_err(|_| "bad base64 digest")?;
let sig = base64::decode(digest_str).map_err(|_| "bad base64 digest")?;
// Perform the verification.
let mut mac = Hmac::<Sha256>::new_from_slice(&self.key).expect("good key");
mac.update(value.as_bytes());
mac.verify_slice(&digest)
verify(&self.key, value.as_bytes(), &sig)
.map(|_| value.to_string())
.map_err(|_| "value did not verify")
}
/// Verifies the authenticity and integrity of `cookie`, returning the
/// plaintext version if verification succeeds or `None` otherwise.
/// Verification _always_ succeeds if `cookie` was generated by a
/// `SignedJar` with the same key as `self`.
///
/// # Example
///
/// ```rust
/// use cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// assert!(jar.signed(&key).get("name").is_none());
///
/// jar.signed_mut(&key).add(Cookie::new("name", "value"));
/// assert_eq!(jar.signed(&key).get("name").unwrap().value(), "value");
///
/// let plain = jar.get("name").cloned().unwrap();
/// assert_ne!(plain.value(), "value");
/// let verified = jar.signed(&key).verify(plain).unwrap();
/// assert_eq!(verified.value(), "value");
///
/// let plain = Cookie::new("plaintext", "hello");
/// assert!(jar.signed(&key).verify(plain).is_none());
/// ```
pub fn verify(&self, mut cookie: Cookie<'static>) -> Option<Cookie<'static>> {
if let Ok(value) = self._verify(cookie.value()) {
cookie.set_value(value);
return Some(cookie);
}
None
}
}
impl<J: Borrow<CookieJar>> SignedJar<J> {
/// Returns a reference to the `Cookie` inside this jar with the name `name`
/// and verifies the authenticity and integrity of the cookie's value,
/// returning a `Cookie` with the authenticated value. If the cookie cannot
@ -112,20 +60,25 @@ impl<J: Borrow<CookieJar>> SignedJar<J> {
/// use cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let jar = CookieJar::new();
/// assert!(jar.signed(&key).get("name").is_none());
/// let mut jar = CookieJar::new();
/// let mut signed_jar = jar.signed(&key);
/// assert!(signed_jar.get("name").is_none());
///
/// let mut jar = jar;
/// let mut signed_jar = jar.signed_mut(&key);
/// signed_jar.add(Cookie::new("name", "value"));
/// assert_eq!(signed_jar.get("name").unwrap().value(), "value");
/// ```
pub fn get(&self, name: &str) -> Option<Cookie<'static>> {
self.parent.borrow().get(name).and_then(|c| self.verify(c.clone()))
}
}
if let Some(cookie_ref) = self.parent.get(name) {
let mut cookie = cookie_ref.clone();
if let Ok(value) = self.verify(cookie.value()) {
cookie.set_value(value);
return Some(cookie);
}
}
None
}
impl<J: BorrowMut<CookieJar>> SignedJar<J> {
/// Adds `cookie` to the parent jar. The cookie's value is signed assuring
/// integrity and authenticity.
///
@ -136,7 +89,7 @@ impl<J: BorrowMut<CookieJar>> SignedJar<J> {
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// jar.signed_mut(&key).add(Cookie::new("name", "value"));
/// jar.signed(&key).add(Cookie::new("name", "value"));
///
/// assert_ne!(jar.get("name").unwrap().value(), "value");
/// assert!(jar.get("name").unwrap().value().contains("value"));
@ -144,14 +97,14 @@ impl<J: BorrowMut<CookieJar>> SignedJar<J> {
/// ```
pub fn add(&mut self, mut cookie: Cookie<'static>) {
self.sign_cookie(&mut cookie);
self.parent.borrow_mut().add(cookie);
self.parent.add(cookie);
}
/// Adds an "original" `cookie` to this jar. The cookie's value is signed
/// assuring integrity and authenticity. Adding an original cookie does not
/// affect the [`CookieJar::delta()`] computation. This method is intended
/// to be used to seed the cookie jar with cookies received from a client's
/// HTTP message.
/// affect the [`CookieJar::delta()`](struct.CookieJar.html#method.delta)
/// computation. This method is intended to be used to seed the cookie jar
/// with cookies received from a client's HTTP message.
///
/// For accurate `delta` computations, this method should not be called
/// after calling `remove`.
@ -163,14 +116,22 @@ impl<J: BorrowMut<CookieJar>> SignedJar<J> {
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// jar.signed_mut(&key).add_original(Cookie::new("name", "value"));
/// jar.signed(&key).add_original(Cookie::new("name", "value"));
///
/// assert_eq!(jar.iter().count(), 1);
/// assert_eq!(jar.delta().count(), 0);
/// ```
pub fn add_original(&mut self, mut cookie: Cookie<'static>) {
self.sign_cookie(&mut cookie);
self.parent.borrow_mut().add_original(cookie);
self.parent.add_original(cookie);
}
/// Signs the cookie's value assuring integrity and authenticity.
fn sign_cookie(&self, cookie: &mut Cookie) {
let digest = sign(&self.key, cookie.value().as_bytes());
let mut new_value = base64::encode(digest.as_ref());
new_value.push_str(cookie.value());
cookie.set_value(new_value);
}
/// Removes `cookie` from the parent jar.
@ -178,8 +139,8 @@ impl<J: BorrowMut<CookieJar>> SignedJar<J> {
/// For correct removal, the passed in `cookie` must contain the same `path`
/// and `domain` as the cookie that was initially set.
///
/// This is identical to [`CookieJar::remove()`]. See the method's
/// documentation for more details.
/// See [CookieJar::remove](struct.CookieJar.html#method.remove) for more
/// details.
///
/// # Example
///
@ -188,7 +149,7 @@ impl<J: BorrowMut<CookieJar>> SignedJar<J> {
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// let mut signed_jar = jar.signed_mut(&key);
/// let mut signed_jar = jar.signed(&key);
///
/// signed_jar.add(Cookie::new("name", "value"));
/// assert!(signed_jar.get("name").is_some());
@ -197,55 +158,25 @@ impl<J: BorrowMut<CookieJar>> SignedJar<J> {
/// assert!(signed_jar.get("name").is_none());
/// ```
pub fn remove(&mut self, cookie: Cookie<'static>) {
self.parent.borrow_mut().remove(cookie);
self.parent.remove(cookie);
}
}
#[cfg(test)]
mod test {
use crate::{CookieJar, Cookie, Key};
use {CookieJar, Cookie, Key};
#[test]
fn simple() {
let key = Key::generate();
let mut jar = CookieJar::new();
assert_simple_behaviour!(jar, jar.signed_mut(&key));
assert_simple_behaviour!(jar, jar.signed(&key));
}
#[test]
fn private() {
let key = Key::generate();
let mut jar = CookieJar::new();
assert_secure_behaviour!(jar, jar.signed_mut(&key));
}
#[test]
fn roundtrip() {
// Secret is SHA-256 hash of 'Super secret!' passed through HKDF-SHA256.
let key = Key::from(&[89, 202, 200, 125, 230, 90, 197, 245, 166, 249,
34, 169, 135, 31, 20, 197, 94, 154, 254, 79, 60, 26, 8, 143, 254,
24, 116, 138, 92, 225, 159, 60, 157, 41, 135, 129, 31, 226, 196, 16,
198, 168, 134, 4, 42, 1, 196, 24, 57, 103, 241, 147, 201, 185, 233,
10, 180, 170, 187, 89, 252, 137, 110, 107]);
let mut jar = CookieJar::new();
jar.add(Cookie::new("signed_with_ring014",
"3tdHXEQ2kf6fxC7dWzBGmpSLMtJenXLKrZ9cHkSsl1w=Tamper-proof"));
jar.add(Cookie::new("signed_with_ring016",
"3tdHXEQ2kf6fxC7dWzBGmpSLMtJenXLKrZ9cHkSsl1w=Tamper-proof"));
let signed = jar.signed(&key);
assert_eq!(signed.get("signed_with_ring014").unwrap().value(), "Tamper-proof");
assert_eq!(signed.get("signed_with_ring016").unwrap().value(), "Tamper-proof");
}
#[test]
fn issue_178() {
let data = "x=yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy£";
let c = Cookie::parse(data).expect("failed to parse cookie");
let key = Key::from(&[0u8; 64]);
let mut jar = CookieJar::new();
let signed = jar.signed_mut(&key);
assert!(signed.verify(c).is_none());
assert_secure_behaviour!(jar, jar.signed(&key));
}
}

Просмотреть файл

@ -1 +0,0 @@
{"files":{"Cargo.toml":"aadc4e4ba33e86861d8d1d8b848ac11a27b6f87340d082b47f762387464c61ed","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"20c7855c364d57ea4c97889a5e8d98470a9952dade37bd9248b9a54431670e5e","src/lib.rs":"5d30edec687843447c97e4ea87583983eb9fc06135ae718c8ecc0fa8cebef2df"},"package":"5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191"}

25
third_party/rust/form_urlencoded/LICENSE-MIT поставляемый
Просмотреть файл

@ -1,25 +0,0 @@
Copyright (c) 2013-2016 The rust-url developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

420
third_party/rust/form_urlencoded/src/lib.rs поставляемый
Просмотреть файл

@ -1,420 +0,0 @@
// Copyright 2013-2016 The rust-url developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Parser and serializer for the [`application/x-www-form-urlencoded` syntax](
//! http://url.spec.whatwg.org/#application/x-www-form-urlencoded),
//! as used by HTML forms.
//!
//! Converts between a string (such as an URLs query string)
//! and a sequence of (name, value) pairs.
#[macro_use]
extern crate matches;
use percent_encoding::{percent_decode, percent_encode_byte};
use std::borrow::{Borrow, Cow};
use std::str;
/// Convert a byte string in the `application/x-www-form-urlencoded` syntax
/// into a iterator of (name, value) pairs.
///
/// Use `parse(input.as_bytes())` to parse a `&str` string.
///
/// The names and values are percent-decoded. For instance, `%23first=%25try%25` will be
/// converted to `[("#first", "%try%")]`.
#[inline]
pub fn parse(input: &[u8]) -> Parse<'_> {
Parse { input }
}
/// The return type of `parse()`.
#[derive(Copy, Clone)]
pub struct Parse<'a> {
input: &'a [u8],
}
impl<'a> Iterator for Parse<'a> {
type Item = (Cow<'a, str>, Cow<'a, str>);
fn next(&mut self) -> Option<Self::Item> {
loop {
if self.input.is_empty() {
return None;
}
let mut split2 = self.input.splitn(2, |&b| b == b'&');
let sequence = split2.next().unwrap();
self.input = split2.next().unwrap_or(&[][..]);
if sequence.is_empty() {
continue;
}
let mut split2 = sequence.splitn(2, |&b| b == b'=');
let name = split2.next().unwrap();
let value = split2.next().unwrap_or(&[][..]);
return Some((decode(name), decode(value)));
}
}
}
fn decode(input: &[u8]) -> Cow<'_, str> {
let replaced = replace_plus(input);
decode_utf8_lossy(match percent_decode(&replaced).into() {
Cow::Owned(vec) => Cow::Owned(vec),
Cow::Borrowed(_) => replaced,
})
}
/// Replace b'+' with b' '
fn replace_plus(input: &[u8]) -> Cow<'_, [u8]> {
match input.iter().position(|&b| b == b'+') {
None => Cow::Borrowed(input),
Some(first_position) => {
let mut replaced = input.to_owned();
replaced[first_position] = b' ';
for byte in &mut replaced[first_position + 1..] {
if *byte == b'+' {
*byte = b' ';
}
}
Cow::Owned(replaced)
}
}
}
impl<'a> Parse<'a> {
/// Return a new iterator that yields pairs of `String` instead of pairs of `Cow<str>`.
pub fn into_owned(self) -> ParseIntoOwned<'a> {
ParseIntoOwned { inner: self }
}
}
/// Like `Parse`, but yields pairs of `String` instead of pairs of `Cow<str>`.
pub struct ParseIntoOwned<'a> {
inner: Parse<'a>,
}
impl<'a> Iterator for ParseIntoOwned<'a> {
type Item = (String, String);
fn next(&mut self) -> Option<Self::Item> {
self.inner
.next()
.map(|(k, v)| (k.into_owned(), v.into_owned()))
}
}
/// The [`application/x-www-form-urlencoded` byte serializer](
/// https://url.spec.whatwg.org/#concept-urlencoded-byte-serializer).
///
/// Return an iterator of `&str` slices.
pub fn byte_serialize(input: &[u8]) -> ByteSerialize<'_> {
ByteSerialize { bytes: input }
}
/// Return value of `byte_serialize()`.
#[derive(Debug)]
pub struct ByteSerialize<'a> {
bytes: &'a [u8],
}
fn byte_serialized_unchanged(byte: u8) -> bool {
matches!(byte, b'*' | b'-' | b'.' | b'0' ..= b'9' | b'A' ..= b'Z' | b'_' | b'a' ..= b'z')
}
impl<'a> Iterator for ByteSerialize<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<&'a str> {
if let Some((&first, tail)) = self.bytes.split_first() {
if !byte_serialized_unchanged(first) {
self.bytes = tail;
return Some(if first == b' ' {
"+"
} else {
percent_encode_byte(first)
});
}
let position = tail.iter().position(|&b| !byte_serialized_unchanged(b));
let (unchanged_slice, remaining) = match position {
// 1 for first_byte + i unchanged in tail
Some(i) => self.bytes.split_at(1 + i),
None => (self.bytes, &[][..]),
};
self.bytes = remaining;
// This unsafe is appropriate because we have already checked these
// bytes in byte_serialized_unchanged, which checks for a subset
// of UTF-8. So we know these bytes are valid UTF-8, and doing
// another UTF-8 check would be wasteful.
Some(unsafe { str::from_utf8_unchecked(unchanged_slice) })
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
if self.bytes.is_empty() {
(0, Some(0))
} else {
(1, Some(self.bytes.len()))
}
}
}
/// The [`application/x-www-form-urlencoded` serializer](
/// https://url.spec.whatwg.org/#concept-urlencoded-serializer).
pub struct Serializer<'a, T: Target> {
target: Option<T>,
start_position: usize,
encoding: EncodingOverride<'a>,
}
pub trait Target {
fn as_mut_string(&mut self) -> &mut String;
fn finish(self) -> Self::Finished;
type Finished;
}
impl Target for String {
fn as_mut_string(&mut self) -> &mut String {
self
}
fn finish(self) -> Self {
self
}
type Finished = Self;
}
impl<'a> Target for &'a mut String {
fn as_mut_string(&mut self) -> &mut String {
&mut **self
}
fn finish(self) -> Self {
self
}
type Finished = Self;
}
impl<'a, T: Target> Serializer<'a, T> {
/// Create a new `application/x-www-form-urlencoded` serializer for the given target.
///
/// If the target is non-empty,
/// its content is assumed to already be in `application/x-www-form-urlencoded` syntax.
pub fn new(target: T) -> Self {
Self::for_suffix(target, 0)
}
/// Create a new `application/x-www-form-urlencoded` serializer
/// for a suffix of the given target.
///
/// If that suffix is non-empty,
/// its content is assumed to already be in `application/x-www-form-urlencoded` syntax.
pub fn for_suffix(mut target: T, start_position: usize) -> Self {
if target.as_mut_string().len() < start_position {
panic!(
"invalid length {} for target of length {}",
start_position,
target.as_mut_string().len()
);
}
Serializer {
target: Some(target),
start_position,
encoding: None,
}
}
/// Remove any existing name/value pair.
///
/// Panics if called after `.finish()`.
pub fn clear(&mut self) -> &mut Self {
string(&mut self.target).truncate(self.start_position);
self
}
/// Set the character encoding to be used for names and values before percent-encoding.
pub fn encoding_override(&mut self, new: EncodingOverride<'a>) -> &mut Self {
self.encoding = new;
self
}
/// Serialize and append a name/value pair.
///
/// Panics if called after `.finish()`.
pub fn append_pair(&mut self, name: &str, value: &str) -> &mut Self {
append_pair(
string(&mut self.target),
self.start_position,
self.encoding,
name,
value,
);
self
}
/// Serialize and append a name of parameter without any value.
///
/// Panics if called after `.finish()`.
pub fn append_key_only(&mut self, name: &str) -> &mut Self {
append_key_only(
string(&mut self.target),
self.start_position,
self.encoding,
name,
);
self
}
/// Serialize and append a number of name/value pairs.
///
/// This simply calls `append_pair` repeatedly.
/// This can be more convenient, so the user doesnt need to introduce a block
/// to limit the scope of `Serializer`s borrow of its string.
///
/// Panics if called after `.finish()`.
pub fn extend_pairs<I, K, V>(&mut self, iter: I) -> &mut Self
where
I: IntoIterator,
I::Item: Borrow<(K, V)>,
K: AsRef<str>,
V: AsRef<str>,
{
{
let string = string(&mut self.target);
for pair in iter {
let &(ref k, ref v) = pair.borrow();
append_pair(
string,
self.start_position,
self.encoding,
k.as_ref(),
v.as_ref(),
);
}
}
self
}
/// Serialize and append a number of names without values.
///
/// This simply calls `append_key_only` repeatedly.
/// This can be more convenient, so the user doesnt need to introduce a block
/// to limit the scope of `Serializer`s borrow of its string.
///
/// Panics if called after `.finish()`.
pub fn extend_keys_only<I, K>(&mut self, iter: I) -> &mut Self
where
I: IntoIterator,
I::Item: Borrow<K>,
K: AsRef<str>,
{
{
let string = string(&mut self.target);
for key in iter {
let k = key.borrow().as_ref();
append_key_only(string, self.start_position, self.encoding, k);
}
}
self
}
/// If this serializer was constructed with a string, take and return that string.
///
/// ```rust
/// use form_urlencoded;
/// let encoded: String = form_urlencoded::Serializer::new(String::new())
/// .append_pair("foo", "bar & baz")
/// .append_pair("saison", "Été+hiver")
/// .finish();
/// assert_eq!(encoded, "foo=bar+%26+baz&saison=%C3%89t%C3%A9%2Bhiver");
/// ```
///
/// Panics if called more than once.
pub fn finish(&mut self) -> T::Finished {
self.target
.take()
.expect("url::form_urlencoded::Serializer double finish")
.finish()
}
}
fn append_separator_if_needed(string: &mut String, start_position: usize) {
if string.len() > start_position {
string.push('&')
}
}
fn string<T: Target>(target: &mut Option<T>) -> &mut String {
target
.as_mut()
.expect("url::form_urlencoded::Serializer finished")
.as_mut_string()
}
fn append_pair(
string: &mut String,
start_position: usize,
encoding: EncodingOverride<'_>,
name: &str,
value: &str,
) {
append_separator_if_needed(string, start_position);
append_encoded(name, string, encoding);
string.push('=');
append_encoded(value, string, encoding);
}
fn append_key_only(
string: &mut String,
start_position: usize,
encoding: EncodingOverride,
name: &str,
) {
append_separator_if_needed(string, start_position);
append_encoded(name, string, encoding);
}
fn append_encoded(s: &str, string: &mut String, encoding: EncodingOverride<'_>) {
string.extend(byte_serialize(&encode(encoding, s)))
}
pub(crate) fn encode<'a>(encoding_override: EncodingOverride<'_>, input: &'a str) -> Cow<'a, [u8]> {
if let Some(o) = encoding_override {
return o(input);
}
input.as_bytes().into()
}
pub(crate) fn decode_utf8_lossy(input: Cow<'_, [u8]>) -> Cow<'_, str> {
// Note: This function is duplicated in `percent_encoding/lib.rs`.
match input {
Cow::Borrowed(bytes) => String::from_utf8_lossy(bytes),
Cow::Owned(bytes) => {
match String::from_utf8_lossy(&bytes) {
Cow::Borrowed(utf8) => {
// If from_utf8_lossy returns a Cow::Borrowed, then we can
// be sure our original bytes were valid UTF-8. This is because
// if the bytes were invalid UTF-8 from_utf8_lossy would have
// to allocate a new owned string to back the Cow so it could
// replace invalid bytes with a placeholder.
// First we do a debug_assert to confirm our description above.
let raw_utf8: *const [u8];
raw_utf8 = utf8.as_bytes();
debug_assert!(raw_utf8 == &*bytes as *const [u8]);
// Given we know the original input bytes are valid UTF-8,
// and we have ownership of those bytes, we re-use them and
// return a Cow::Owned here.
Cow::Owned(unsafe { String::from_utf8_unchecked(bytes) })
}
Cow::Owned(s) => Cow::Owned(s),
}
}
}
}
pub type EncodingOverride<'a> = Option<&'a dyn Fn(&str) -> Cow<'_, [u8]>>;

2
third_party/rust/h2/.cargo-checksum.json поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

87
third_party/rust/h2/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,90 +1,3 @@
# 0.3.13 (March 31, 2022)
* Update private internal `tokio-util` dependency.
# 0.3.12 (March 9, 2022)
* Avoid time operations that can panic (#599)
* Bump MSRV to Rust 1.49 (#606)
* Fix header decoding error when a header name is contained at a continuation
header boundary (#589)
* Remove I/O type names from handshake `tracing` spans (#608)
# 0.3.11 (January 26, 2022)
* Make `SendStream::poll_capacity` never return `Ok(Some(0))` (#596)
* Fix panic when receiving already reset push promise (#597)
# 0.3.10 (January 6, 2022)
* Add `Error::is_go_away()` and `Error::is_remote()` methods.
* Fix panic if receiving malformed PUSH_PROMISE with stream ID of 0.
# 0.3.9 (December 9, 2021)
* Fix hang related to new `max_send_buffer_size`.
# 0.3.8 (December 8, 2021)
* Add "extended CONNECT support". Adds `h2::ext::Protocol`, which is used for request and response extensions to connect new protocols over an HTTP/2 stream.
* Add `max_send_buffer_size` options to client and server builders, and a default of ~400MB. This acts like a high-water mark for the `poll_capacity()` method.
* Fix panic if receiving malformed HEADERS with stream ID of 0.
# 0.3.7 (October 22, 2021)
* Fix panic if server sends a malformed frame on a stream client was about to open.
* Fix server to treat `:status` in a request as a stream error instead of connection error.
# 0.3.6 (September 30, 2021)
* Fix regression of `h2::Error` that were created via `From<h2::Reason>` not returning their reason code in `Error::reason()`.
# 0.3.5 (September 29, 2021)
* Fix sending of very large headers. Previously when a single header was too big to fit in a single `HEADERS` frame, an error was returned. Now it is broken up and sent correctly.
* Fix buffered data field to be a bigger integer size.
* Refactor error format to include what initiated the error (remote, local, or user), if it was a stream or connection-level error, and any received debug data.
# 0.3.4 (August 20, 2021)
* Fix panic when encoding header size update over a certain size.
* Fix `SendRequest` to wake up connection when dropped.
* Fix potential hang if `RecvStream` is placed in the request or response `extensions`.
* Stop calling `Instant::now` if zero reset streams are configured.
# 0.3.3 (April 29, 2021)
* Fix client being able to make `CONNECT` requests without a `:path`.
* Expose `RecvStream::poll_data`.
* Fix some docs.
# 0.3.2 (March 24, 2021)
* Fix incorrect handling of received 1xx responses on the client when the request body is still streaming.
# 0.3.1 (February 26, 2021)
* Add `Connection::max_concurrent_recv_streams()` getter.
* Add `Connection::max_concurrent_send_streams()` getter.
* Fix client to ignore receipt of 1xx headers frames.
* Fix incorrect calculation of pseudo header lengths when determining if a received header is too big.
* Reduce monomorphized code size of internal code.
# 0.3.0 (December 23, 2020)
* Update to Tokio v1 and Bytes v1.
* Disable `tracing`'s `log` feature. (It can still be enabled by a user in their own `Cargo.toml`.)
# 0.2.7 (October 22, 2020)
* Fix stream ref count when sending a push promise
* Fix receiving empty DATA frames in response to a HEAD request
* Fix handling of client disabling SERVER_PUSH
# 0.2.6 (July 13, 2020)
* Integrate `tracing` directly where `log` was used. (For 0.2.x, `log`s are still emitted by default.)
# 0.2.5 (May 6, 2020)
* Fix rare debug assert failure in store shutdown.

721
third_party/rust/h2/Cargo.lock сгенерированный поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

90
third_party/rust/h2/Cargo.toml поставляемый
Просмотреть файл

@ -3,45 +3,28 @@
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
# to registry (e.g., crates.io) dependencies
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "h2"
version = "0.3.13"
authors = [
"Carl Lerche <me@carllerche.com>",
"Sean McArthur <sean@seanmonstar.com>",
]
exclude = [
"fixtures/**",
"ci/**",
]
description = "An HTTP/2 client and server"
documentation = "https://docs.rs/h2"
version = "0.2.5"
authors = ["Carl Lerche <me@carllerche.com>", "Sean McArthur <sean@seanmonstar.com>"]
exclude = ["fixtures/**", "ci/**"]
description = "An HTTP/2.0 client and server"
documentation = "https://docs.rs/h2/0.2.5/h2/"
readme = "README.md"
keywords = [
"http",
"async",
"non-blocking",
]
categories = [
"asynchronous",
"web-programming",
"network-programming",
]
keywords = ["http", "async", "non-blocking"]
categories = ["asynchronous", "web-programming", "network-programming"]
license = "MIT"
repository = "https://github.com/hyperium/h2"
[package.metadata.docs.rs]
features = ["stream"]
[dependencies.bytes]
version = "1"
version = "0.5.2"
[dependencies.fnv]
version = "1.0.5"
@ -62,38 +45,37 @@ default-features = false
version = "0.2"
[dependencies.indexmap]
version = "1.5.2"
features = ["std"]
version = "1.0"
[dependencies.log]
version = "0.4.1"
[dependencies.slab]
version = "0.4.2"
version = "0.4.0"
[dependencies.tokio]
version = "1"
version = "0.2"
features = ["io-util"]
[dependencies.tokio-util]
version = "0.7.1"
version = "0.3.1"
features = ["codec"]
[dependencies.tracing]
version = "0.1.21"
features = ["std"]
default-features = false
[dev-dependencies.env_logger]
version = "0.9"
version = "0.5.3"
default-features = false
[dev-dependencies.hex]
version = "0.4.3"
version = "0.2.0"
[dev-dependencies.quickcheck]
version = "1.0.3"
version = "0.4.1"
default-features = false
[dev-dependencies.rand]
version = "0.8.4"
version = "0.3.15"
[dev-dependencies.rustls]
version = "0.16"
[dev-dependencies.serde]
version = "1.0.0"
@ -102,22 +84,20 @@ version = "1.0.0"
version = "1.0.0"
[dev-dependencies.tokio]
version = "1"
features = [
"rt-multi-thread",
"macros",
"sync",
"net",
]
version = "0.2"
features = ["dns", "macros", "rt-core", "sync", "tcp"]
[dev-dependencies.tokio-rustls]
version = "0.23.2"
version = "0.12.0"
[dev-dependencies.walkdir]
version = "2.3.2"
version = "1.0.0"
[dev-dependencies.webpki]
version = "0.21"
[dev-dependencies.webpki-roots]
version = "0.22.2"
version = "0.17"
[features]
stream = []

17
third_party/rust/h2/README.md поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
# H2
A Tokio aware, HTTP/2 client & server implementation for Rust.
A Tokio aware, HTTP/2.0 client & server implementation for Rust.
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![Crates.io](https://img.shields.io/crates/v/h2.svg)](https://crates.io/crates/h2)
@ -12,23 +12,24 @@ More information about this crate can be found in the [crate documentation][dox]
## Features
* Client and server HTTP/2 implementation.
* Implements the full HTTP/2 specification.
* Client and server HTTP/2.0 implementation.
* Implements the full HTTP/2.0 specification.
* Passes [h2spec](https://github.com/summerwind/h2spec).
* Focus on performance and correctness.
* Built on [Tokio](https://tokio.rs).
## Non goals
This crate is intended to only be an implementation of the HTTP/2
This crate is intended to only be an implementation of the HTTP/2.0
specification. It does not handle:
* Managing TCP connections
* HTTP 1.0 upgrade
* TLS
* Any feature not described by the HTTP/2 specification.
* Any feature not described by the HTTP/2.0 specification.
This crate is now used by [hyper](https://github.com/hyperium/hyper), which will provide all of these features.
The intent is that this crate will eventually be used by
[hyper](https://github.com/hyperium/hyper), which will provide all of these features.
## Usage
@ -36,7 +37,7 @@ To use `h2`, first add this to your `Cargo.toml`:
```toml
[dependencies]
h2 = "0.3"
h2 = "0.2"
```
Next, add this to your crate:
@ -55,7 +56,7 @@ fn main() {
**How does h2 compare to [solicit] or [rust-http2]?**
The h2 library has implemented more of the details of the HTTP/2 specification
The h2 library has implemented more of the details of the HTTP/2.0 specification
than any other Rust library. It also passes the [h2spec] set of tests. The h2
library is rapidly approaching "production ready" quality.

24
third_party/rust/h2/examples/akamai.rs поставляемый
Просмотреть файл

@ -3,9 +3,9 @@ use http::{Method, Request};
use tokio::net::TcpStream;
use tokio_rustls::TlsConnector;
use tokio_rustls::rustls::{OwnedTrustAnchor, RootCertStore, ServerName};
use rustls::Session;
use webpki::DNSNameRef;
use std::convert::TryFrom;
use std::error::Error;
use std::net::ToSocketAddrs;
@ -16,19 +16,9 @@ pub async fn main() -> Result<(), Box<dyn Error>> {
let _ = env_logger::try_init();
let tls_client_config = std::sync::Arc::new({
let mut root_store = RootCertStore::empty();
root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.0.iter().map(|ta| {
OwnedTrustAnchor::from_subject_spki_name_constraints(
ta.subject,
ta.spki,
ta.name_constraints,
)
}));
let mut c = tokio_rustls::rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store)
.with_no_client_auth();
let mut c = rustls::ClientConfig::new();
c.root_store
.add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS);
c.alpn_protocols.push(ALPN_H2.as_bytes().to_owned());
c
});
@ -43,13 +33,13 @@ pub async fn main() -> Result<(), Box<dyn Error>> {
println!("ADDR: {:?}", addr);
let tcp = TcpStream::connect(&addr).await?;
let dns_name = ServerName::try_from("http2.akamai.com").unwrap();
let dns_name = DNSNameRef::try_from_ascii_str("http2.akamai.com").unwrap();
let connector = TlsConnector::from(tls_client_config);
let res = connector.connect(dns_name, tcp).await;
let tls = res.unwrap();
{
let (_, session) = tls.get_ref();
let negotiated_protocol = session.alpn_protocol();
let negotiated_protocol = session.get_alpn_protocol();
assert_eq!(
Some(ALPN_H2.as_bytes()),
negotiated_protocol.as_ref().map(|x| &**x)

47
third_party/rust/h2/examples/server.rs поставляемый
Просмотреть файл

@ -1,23 +1,21 @@
use std::error::Error;
use bytes::Bytes;
use h2::server::{self, SendResponse};
use h2::RecvStream;
use http::Request;
use h2::server;
use tokio::net::{TcpListener, TcpStream};
#[tokio::main]
async fn main() -> Result<(), Box<dyn Error + Send + Sync>> {
let _ = env_logger::try_init();
let listener = TcpListener::bind("127.0.0.1:5928").await?;
let mut listener = TcpListener::bind("127.0.0.1:5928").await?;
println!("listening on {:?}", listener.local_addr());
loop {
if let Ok((socket, _peer_addr)) = listener.accept().await {
tokio::spawn(async move {
if let Err(e) = serve(socket).await {
if let Err(e) = handle(socket).await {
println!(" -> err={:?}", e);
}
});
@ -25,41 +23,22 @@ async fn main() -> Result<(), Box<dyn Error + Send + Sync>> {
}
}
async fn serve(socket: TcpStream) -> Result<(), Box<dyn Error + Send + Sync>> {
async fn handle(socket: TcpStream) -> Result<(), Box<dyn Error + Send + Sync>> {
let mut connection = server::handshake(socket).await?;
println!("H2 connection bound");
while let Some(result) = connection.accept().await {
let (request, respond) = result?;
tokio::spawn(async move {
if let Err(e) = handle_request(request, respond).await {
println!("error while handling request: {}", e);
}
});
let (request, mut respond) = result?;
println!("GOT request: {:?}", request);
let response = http::Response::new(());
let mut send = respond.send_response(response, false)?;
println!(">>>> sending data");
send.send_data(Bytes::from_static(b"hello world"), true)?;
}
println!("~~~~~~~~~~~ H2 connection CLOSE !!!!!! ~~~~~~~~~~~");
Ok(())
}
async fn handle_request(
mut request: Request<RecvStream>,
mut respond: SendResponse<Bytes>,
) -> Result<(), Box<dyn Error + Send + Sync>> {
println!("GOT request: {:?}", request);
let body = request.body_mut();
while let Some(data) = body.data().await {
let data = data?;
println!("<<<< recv {:?}", data);
let _ = body.flow_control().release_capacity(data.len());
}
let response = http::Response::new(());
let mut send = respond.send_response(response, false)?;
println!(">>>> send");
send.send_data(Bytes::from_static(b"hello "), false)?;
send.send_data(Bytes::from_static(b"world\n"), true)?;
println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~ H2 connection CLOSE !!!!!! ~~~~~~~~~~~");
Ok(())
}

203
third_party/rust/h2/src/client.rs поставляемый
Просмотреть файл

@ -1,18 +1,18 @@
//! Client implementation of the HTTP/2 protocol.
//! Client implementation of the HTTP/2.0 protocol.
//!
//! # Getting started
//!
//! Running an HTTP/2 client requires the caller to establish the underlying
//! Running an HTTP/2.0 client requires the caller to establish the underlying
//! connection as well as get the connection to a state that is ready to begin
//! the HTTP/2 handshake. See [here](../index.html#handshake) for more
//! the HTTP/2.0 handshake. See [here](../index.html#handshake) for more
//! details.
//!
//! This could be as basic as using Tokio's [`TcpStream`] to connect to a remote
//! host, but usually it means using either ALPN or HTTP/1.1 protocol upgrades.
//!
//! Once a connection is obtained, it is passed to [`handshake`], which will
//! begin the [HTTP/2 handshake]. This returns a future that completes once
//! the handshake process is performed and HTTP/2 streams may be initialized.
//! begin the [HTTP/2.0 handshake]. This returns a future that completes once
//! the handshake process is performed and HTTP/2.0 streams may be initialized.
//!
//! [`handshake`] uses default configuration values. There are a number of
//! settings that can be changed by using [`Builder`] instead.
@ -26,16 +26,16 @@
//! # Making requests
//!
//! Requests are made using the [`SendRequest`] handle provided by the handshake
//! future. Once a request is submitted, an HTTP/2 stream is initialized and
//! future. Once a request is submitted, an HTTP/2.0 stream is initialized and
//! the request is sent to the server.
//!
//! A request body and request trailers are sent using [`SendRequest`] and the
//! server's response is returned once the [`ResponseFuture`] future completes.
//! Both the [`SendStream`] and [`ResponseFuture`] instances are returned by
//! [`SendRequest::send_request`] and are tied to the HTTP/2 stream
//! [`SendRequest::send_request`] and are tied to the HTTP/2.0 stream
//! initialized by the sent request.
//!
//! The [`SendRequest::poll_ready`] function returns `Ready` when a new HTTP/2
//! The [`SendRequest::poll_ready`] function returns `Ready` when a new HTTP/2.0
//! stream can be created, i.e. as long as the current number of active streams
//! is below [`MAX_CONCURRENT_STREAMS`]. If a new stream cannot be created, the
//! caller will be notified once an existing stream closes, freeing capacity for
@ -131,14 +131,13 @@
//! [`SendRequest`]: struct.SendRequest.html
//! [`ResponseFuture`]: struct.ResponseFuture.html
//! [`SendRequest::poll_ready`]: struct.SendRequest.html#method.poll_ready
//! [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader
//! [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader
//! [`Builder`]: struct.Builder.html
//! [`Error`]: ../struct.Error.html
use crate::codec::{Codec, SendError, UserError};
use crate::ext::Protocol;
use crate::codec::{Codec, RecvError, SendError, UserError};
use crate::frame::{Headers, Pseudo, Reason, Settings, StreamId};
use crate::proto::{self, Error};
use crate::proto;
use crate::{FlowControl, PingPong, RecvStream, SendStream};
use bytes::{Buf, Bytes};
@ -150,9 +149,8 @@ use std::task::{Context, Poll};
use std::time::Duration;
use std::usize;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
use tracing::Instrument;
/// Initializes new HTTP/2 streams on a connection by sending a request.
/// Initializes new HTTP/2.0 streams on a connection by sending a request.
///
/// This type does no work itself. Instead, it is a handle to the inner
/// connection state held by [`Connection`]. If the associated connection
@ -162,7 +160,7 @@ use tracing::Instrument;
/// / threads than their associated [`Connection`] instance. Internally, there
/// is a buffer used to stage requests before they get written to the
/// connection. There is no guarantee that requests get written to the
/// connection in FIFO order as HTTP/2 prioritization logic can play a role.
/// connection in FIFO order as HTTP/2.0 prioritization logic can play a role.
///
/// [`SendRequest`] implements [`Clone`], enabling the creation of many
/// instances that are backed by a single connection.
@ -185,10 +183,10 @@ pub struct ReadySendRequest<B: Buf> {
inner: Option<SendRequest<B>>,
}
/// Manages all state associated with an HTTP/2 client connection.
/// Manages all state associated with an HTTP/2.0 client connection.
///
/// A `Connection` is backed by an I/O resource (usually a TCP socket) and
/// implements the HTTP/2 client logic for that connection. It is responsible
/// implements the HTTP/2.0 client logic for that connection. It is responsible
/// for driving the internal state forward, performing the work requested of the
/// associated handles ([`SendRequest`], [`ResponseFuture`], [`SendStream`],
/// [`RecvStream`]).
@ -221,7 +219,7 @@ pub struct ReadySendRequest<B: Buf> {
/// // Submit the connection handle to an executor.
/// tokio::spawn(async { connection.await.expect("connection failed"); });
///
/// // Now, use `send_request` to initialize HTTP/2 streams.
/// // Now, use `send_request` to initialize HTTP/2.0 streams.
/// // ...
/// # Ok(())
/// # }
@ -275,7 +273,7 @@ pub struct PushPromises {
/// Methods can be chained in order to set the configuration values.
///
/// The client is constructed by calling [`handshake`] and passing the I/O
/// handle that will back the HTTP/2 server.
/// handle that will back the HTTP/2.0 server.
///
/// New instances of `Builder` are obtained via [`Builder::new`].
///
@ -295,7 +293,7 @@ pub struct PushPromises {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # {
/// // `client_fut` is a future representing the completion of the HTTP/2
/// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake.
/// let client_fut = Builder::new()
/// .initial_window_size(1_000_000)
@ -320,9 +318,6 @@ pub struct Builder {
/// Initial target window size for new connections.
initial_target_connection_window_size: Option<u32>,
/// Maximum amount of bytes to "buffer" for writing per stream.
max_send_buffer_size: usize,
/// Maximum number of locally reset streams to keep at a time.
reset_stream_max: usize,
@ -343,7 +338,7 @@ impl<B> SendRequest<B>
where
B: Buf + 'static,
{
/// Returns `Ready` when the connection can initialize a new HTTP/2
/// Returns `Ready` when the connection can initialize a new HTTP/2.0
/// stream.
///
/// This function must return `Ready` before `send_request` is called. When
@ -391,16 +386,16 @@ where
ReadySendRequest { inner: Some(self) }
}
/// Sends a HTTP/2 request to the server.
/// Sends a HTTP/2.0 request to the server.
///
/// `send_request` initializes a new HTTP/2 stream on the associated
/// `send_request` initializes a new HTTP/2.0 stream on the associated
/// connection, then sends the given request using this new stream. Only the
/// request head is sent.
///
/// On success, a [`ResponseFuture`] instance and [`SendStream`] instance
/// are returned. The [`ResponseFuture`] instance is used to get the
/// server's response and the [`SendStream`] instance is used to send a
/// request body or trailers to the server over the same HTTP/2 stream.
/// request body or trailers to the server over the same HTTP/2.0 stream.
///
/// To send a request body or trailers, set `end_of_stream` to `false`.
/// Then, use the returned [`SendStream`] instance to stream request body
@ -521,19 +516,6 @@ where
(response, stream)
})
}
/// Returns whether the [extended CONNECT protocol][1] is enabled or not.
///
/// This setting is configured by the server peer by sending the
/// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame.
/// This method returns the currently acknowledged value recieved from the
/// remote.
///
/// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
/// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3
pub fn is_extended_connect_protocol_enabled(&self) -> bool {
self.inner.is_extended_connect_protocol_enabled()
}
}
impl<B> fmt::Debug for SendRequest<B>
@ -618,7 +600,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # {
/// // `client_fut` is a future representing the completion of the HTTP/2
/// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake.
/// let client_fut = Builder::new()
/// .initial_window_size(1_000_000)
@ -631,7 +613,6 @@ impl Builder {
/// ```
pub fn new() -> Builder {
Builder {
max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE,
reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS),
reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX,
initial_target_connection_window_size: None,
@ -661,7 +642,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # {
/// // `client_fut` is a future representing the completion of the HTTP/2
/// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake.
/// let client_fut = Builder::new()
/// .initial_window_size(1_000_000)
@ -696,7 +677,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # {
/// // `client_fut` is a future representing the completion of the HTTP/2
/// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake.
/// let client_fut = Builder::new()
/// .initial_connection_window_size(1_000_000)
@ -711,7 +692,7 @@ impl Builder {
self
}
/// Indicates the size (in octets) of the largest HTTP/2 frame payload that the
/// Indicates the size (in octets) of the largest HTTP/2.0 frame payload that the
/// configured client is able to accept.
///
/// The sender may send data frames that are **smaller** than this value,
@ -730,7 +711,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # {
/// // `client_fut` is a future representing the completion of the HTTP/2
/// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake.
/// let client_fut = Builder::new()
/// .max_frame_size(1_000_000)
@ -770,7 +751,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # {
/// // `client_fut` is a future representing the completion of the HTTP/2
/// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake.
/// let client_fut = Builder::new()
/// .max_header_list_size(16 * 1024)
@ -805,7 +786,7 @@ impl Builder {
/// a protocol level error. Instead, the `h2` library will immediately reset
/// the stream.
///
/// See [Section 5.1.2] in the HTTP/2 spec for more details.
/// See [Section 5.1.2] in the HTTP/2.0 spec for more details.
///
/// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2
///
@ -819,7 +800,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # {
/// // `client_fut` is a future representing the completion of the HTTP/2
/// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake.
/// let client_fut = Builder::new()
/// .max_concurrent_streams(1000)
@ -846,7 +827,7 @@ impl Builder {
/// Sending streams past the limit returned by the peer will be treated
/// as a stream error of type PROTOCOL_ERROR or REFUSED_STREAM.
///
/// See [Section 5.1.2] in the HTTP/2 spec for more details.
/// See [Section 5.1.2] in the HTTP/2.0 spec for more details.
///
/// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2
///
@ -860,7 +841,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # {
/// // `client_fut` is a future representing the completion of the HTTP/2
/// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake.
/// let client_fut = Builder::new()
/// .initial_max_send_streams(1000)
@ -877,7 +858,7 @@ impl Builder {
/// Sets the maximum number of concurrent locally reset streams.
///
/// When a stream is explicitly reset, the HTTP/2 specification requires
/// When a stream is explicitly reset, the HTTP/2.0 specification requires
/// that any further frames received for that stream must be ignored for
/// "some time".
///
@ -905,7 +886,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # {
/// // `client_fut` is a future representing the completion of the HTTP/2
/// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake.
/// let client_fut = Builder::new()
/// .max_concurrent_reset_streams(1000)
@ -922,7 +903,7 @@ impl Builder {
/// Sets the duration to remember locally reset streams.
///
/// When a stream is explicitly reset, the HTTP/2 specification requires
/// When a stream is explicitly reset, the HTTP/2.0 specification requires
/// that any further frames received for that stream must be ignored for
/// "some time".
///
@ -951,7 +932,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # {
/// // `client_fut` is a future representing the completion of the HTTP/2
/// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake.
/// let client_fut = Builder::new()
/// .reset_stream_duration(Duration::from_secs(10))
@ -966,24 +947,6 @@ impl Builder {
self
}
/// Sets the maximum send buffer size per stream.
///
/// Once a stream has buffered up to (or over) the maximum, the stream's
/// flow control will not "poll" additional capacity. Once bytes for the
/// stream have been written to the connection, the send buffer capacity
/// will be freed up again.
///
/// The default is currently ~400MB, but may change.
///
/// # Panics
///
/// This function panics if `max` is larger than `u32::MAX`.
pub fn max_send_buffer_size(&mut self, max: usize) -> &mut Self {
assert!(max <= std::u32::MAX as usize);
self.max_send_buffer_size = max;
self
}
/// Enables or disables server push promises.
///
/// This value is included in the initial SETTINGS handshake. When set, the
@ -991,7 +954,7 @@ impl Builder {
/// false in the initial SETTINGS handshake guarantees that the remote server
/// will never send a push promise.
///
/// This setting can be changed during the life of a single HTTP/2
/// This setting can be changed during the life of a single HTTP/2.0
/// connection by sending another settings frame updating the value.
///
/// Default value: `true`.
@ -1007,7 +970,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # {
/// // `client_fut` is a future representing the completion of the HTTP/2
/// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake.
/// let client_fut = Builder::new()
/// .enable_push(false)
@ -1033,22 +996,22 @@ impl Builder {
self
}
/// Creates a new configured HTTP/2 client backed by `io`.
/// Creates a new configured HTTP/2.0 client backed by `io`.
///
/// It is expected that `io` already be in an appropriate state to commence
/// the [HTTP/2 handshake]. The handshake is completed once both the connection
/// the [HTTP/2.0 handshake]. The handshake is completed once both the connection
/// preface and the initial settings frame is sent by the client.
///
/// The handshake future does not wait for the initial settings frame from the
/// server.
///
/// Returns a future which resolves to the [`Connection`] / [`SendRequest`]
/// tuple once the HTTP/2 handshake has been completed.
/// tuple once the HTTP/2.0 handshake has been completed.
///
/// This function also allows the caller to configure the send payload data
/// type. See [Outbound data type] for more details.
///
/// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader
/// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader
/// [`Connection`]: struct.Connection.html
/// [`SendRequest`]: struct.SendRequest.html
/// [Outbound data type]: ../index.html#outbound-data-type.
@ -1065,7 +1028,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # {
/// // `client_fut` is a future representing the completion of the HTTP/2
/// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake.
/// let client_fut = Builder::new()
/// .handshake(my_io);
@ -1085,7 +1048,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<&'static [u8]>, Connection<T, &'static [u8]>)), h2::Error>
/// # {
/// // `client_fut` is a future representing the completion of the HTTP/2
/// // `client_fut` is a future representing the completion of the HTTP/2.0
/// // handshake.
/// let client_fut = Builder::new()
/// .handshake::<_, &'static [u8]>(my_io);
@ -1112,19 +1075,19 @@ impl Default for Builder {
}
}
/// Creates a new configured HTTP/2 client with default configuration
/// Creates a new configured HTTP/2.0 client with default configuration
/// values backed by `io`.
///
/// It is expected that `io` already be in an appropriate state to commence
/// the [HTTP/2 handshake]. See [Handshake] for more details.
/// the [HTTP/2.0 handshake]. See [Handshake] for more details.
///
/// Returns a future which resolves to the [`Connection`] / [`SendRequest`]
/// tuple once the HTTP/2 handshake has been completed. The returned
/// tuple once the HTTP/2.0 handshake has been completed. The returned
/// [`Connection`] instance will be using default configuration values. Use
/// [`Builder`] to customize the configuration values used by a [`Connection`]
/// instance.
///
/// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader
/// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader
/// [Handshake]: ../index.html#handshake
/// [`Connection`]: struct.Connection.html
/// [`SendRequest`]: struct.SendRequest.html
@ -1139,7 +1102,7 @@ impl Default for Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) -> Result<(), h2::Error>
/// # {
/// let (send_request, connection) = client::handshake(my_io).await?;
/// // The HTTP/2 handshake has completed, now start polling
/// // The HTTP/2.0 handshake has completed, now start polling
/// // `connection` and use `send_request` to send requests to the
/// // server.
/// # Ok(())
@ -1152,28 +1115,11 @@ where
T: AsyncRead + AsyncWrite + Unpin,
{
let builder = Builder::new();
builder
.handshake(io)
.instrument(tracing::trace_span!("client_handshake"))
.await
builder.handshake(io).await
}
// ===== impl Connection =====
async fn bind_connection<T>(io: &mut T) -> Result<(), crate::Error>
where
T: AsyncRead + AsyncWrite + Unpin,
{
tracing::debug!("binding client connection");
let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
io.write_all(msg).await.map_err(crate::Error::from_io)?;
tracing::debug!("client connection bound");
Ok(())
}
impl<T, B> Connection<T, B>
where
T: AsyncRead + AsyncWrite + Unpin,
@ -1183,7 +1129,12 @@ where
mut io: T,
builder: Builder,
) -> Result<(SendRequest<B>, Connection<T, B>), crate::Error> {
bind_connection(&mut io).await?;
log::debug!("binding client connection");
let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
io.write_all(msg).await.map_err(crate::Error::from_io)?;
log::debug!("client connection bound");
// Create the codec
let mut codec = Codec::new(io);
@ -1206,7 +1157,6 @@ where
proto::Config {
next_stream_id: builder.stream_id,
initial_max_send_streams: builder.initial_max_send_streams,
max_send_buffer_size: builder.max_send_buffer_size,
reset_stream_duration: builder.reset_stream_duration,
reset_stream_max: builder.reset_stream_max,
settings: builder.settings.clone(),
@ -1274,33 +1224,6 @@ where
pub fn ping_pong(&mut self) -> Option<PingPong> {
self.inner.take_user_pings().map(PingPong::new)
}
/// Returns the maximum number of concurrent streams that may be initiated
/// by this client.
///
/// This limit is configured by the server peer by sending the
/// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][1] in a `SETTINGS` frame.
/// This method returns the currently acknowledged value recieved from the
/// remote.
///
/// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2
pub fn max_concurrent_send_streams(&self) -> usize {
self.inner.max_send_streams()
}
/// Returns the maximum number of concurrent streams that may be initiated
/// by the server on this connection.
///
/// This returns the value of the [`SETTINGS_MAX_CONCURRENT_STREAMS`
/// parameter][1] sent in a `SETTINGS` frame that has been
/// acknowledged by the remote peer. The value to be sent is configured by
/// the [`Builder::max_concurrent_streams`][2] method before handshaking
/// with the remote peer.
///
/// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2
/// [2]: ../struct.Builder.html#method.max_concurrent_streams
pub fn max_concurrent_recv_streams(&self) -> usize {
self.inner.max_recv_streams()
}
}
impl<T, B> Future for Connection<T, B>
@ -1452,7 +1375,6 @@ impl Peer {
pub fn convert_send_message(
id: StreamId,
request: Request<()>,
protocol: Option<Protocol>,
end_of_stream: bool,
) -> Result<Headers, SendError> {
use http::request::Parts;
@ -1472,7 +1394,7 @@ impl Peer {
// Build the set pseudo header set. All requests will include `method`
// and `path`.
let mut pseudo = Pseudo::request(method, uri, protocol);
let mut pseudo = Pseudo::request(method, uri);
if pseudo.scheme.is_none() {
// If the scheme is not set, then there are a two options.
@ -1492,7 +1414,7 @@ impl Peer {
return Err(UserError::MissingUriSchemeAndAuthority.into());
} else {
// This is acceptable as per the above comment. However,
// HTTP/2 requires that a scheme is set. Since we are
// HTTP/2.0 requires that a scheme is set. Since we are
// forwarding an HTTP 1.1 request, the scheme is set to
// "http".
pseudo.set_scheme(uri::Scheme::HTTP);
@ -1516,8 +1438,6 @@ impl Peer {
impl proto::Peer for Peer {
type Poll = Response<()>;
const NAME: &'static str = "Client";
fn r#dyn() -> proto::DynPeer {
proto::DynPeer::Client
}
@ -1530,7 +1450,7 @@ impl proto::Peer for Peer {
pseudo: Pseudo,
fields: HeaderMap,
stream_id: StreamId,
) -> Result<Self::Poll, Error> {
) -> Result<Self::Poll, RecvError> {
let mut b = Response::builder();
b = b.version(Version::HTTP_2);
@ -1544,7 +1464,10 @@ impl proto::Peer for Peer {
Err(_) => {
// TODO: Should there be more specialized handling for different
// kinds of errors
return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR));
return Err(RecvError::Stream {
id: stream_id,
reason: Reason::PROTOCOL_ERROR,
});
}
};

55
third_party/rust/h2/src/codec/error.rs поставляемый
Просмотреть файл

@ -1,12 +1,26 @@
use crate::proto::Error;
use crate::frame::{Reason, StreamId};
use std::{error, fmt, io};
/// Errors that are received
#[derive(Debug)]
pub enum RecvError {
Connection(Reason),
Stream { id: StreamId, reason: Reason },
Io(io::Error),
}
/// Errors caused by sending a message
#[derive(Debug)]
pub enum SendError {
Connection(Error),
/// User error
User(UserError),
/// Connection error prevents sending.
Connection(Reason),
/// I/O error
Io(io::Error),
}
/// Errors caused by users of the library
@ -21,6 +35,9 @@ pub enum UserError {
/// The payload size is too big
PayloadTooBig,
/// A header size is too big
HeaderTooBig,
/// The application attempted to initiate too many streams to remote.
Rejected,
@ -46,9 +63,28 @@ pub enum UserError {
/// Tries to update local SETTINGS while ACK has not been received.
SendSettingsWhilePending,
}
/// Tries to send push promise to peer who has disabled server push
PeerDisabledServerPush,
// ===== impl RecvError =====
impl From<io::Error> for RecvError {
fn from(src: io::Error) -> Self {
RecvError::Io(src)
}
}
impl error::Error for RecvError {}
impl fmt::Display for RecvError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::RecvError::*;
match *self {
Connection(ref reason) => reason.fmt(fmt),
Stream { ref reason, .. } => reason.fmt(fmt),
Io(ref e) => e.fmt(fmt),
}
}
}
// ===== impl SendError =====
@ -57,16 +93,19 @@ impl error::Error for SendError {}
impl fmt::Display for SendError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::SendError::*;
match *self {
Self::Connection(ref e) => e.fmt(fmt),
Self::User(ref e) => e.fmt(fmt),
User(ref e) => e.fmt(fmt),
Connection(ref reason) => reason.fmt(fmt),
Io(ref e) => e.fmt(fmt),
}
}
}
impl From<io::Error> for SendError {
fn from(src: io::Error) -> Self {
Self::Connection(src.into())
SendError::Io(src)
}
}
@ -88,6 +127,7 @@ impl fmt::Display for UserError {
InactiveStreamId => "inactive stream",
UnexpectedFrameType => "unexpected frame type",
PayloadTooBig => "payload too big",
HeaderTooBig => "header too big",
Rejected => "rejected",
ReleaseCapacityTooBig => "release capacity too big",
OverflowedStreamId => "stream ID overflowed",
@ -96,7 +136,6 @@ impl fmt::Display for UserError {
PollResetAfterSendResponse => "poll_reset after send_response is illegal",
SendPingWhilePending => "send_ping before received previous pong",
SendSettingsWhilePending => "sending SETTINGS before received previous ACK",
PeerDisabledServerPush => "sending PUSH_PROMISE to peer who disabled server push",
})
}
}

501
third_party/rust/h2/src/codec/framed_read.rs поставляемый
Просмотреть файл

@ -1,8 +1,8 @@
use crate::codec::RecvError;
use crate::frame::{self, Frame, Kind, Reason};
use crate::frame::{
DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, MAX_MAX_FRAME_SIZE,
};
use crate::proto::Error;
use crate::hpack;
@ -59,6 +59,247 @@ impl<T> FramedRead<T> {
}
}
fn decode_frame(&mut self, mut bytes: BytesMut) -> Result<Option<Frame>, RecvError> {
use self::RecvError::*;
log::trace!("decoding frame from {}B", bytes.len());
// Parse the head
let head = frame::Head::parse(&bytes);
if self.partial.is_some() && head.kind() != Kind::Continuation {
proto_err!(conn: "expected CONTINUATION, got {:?}", head.kind());
return Err(Connection(Reason::PROTOCOL_ERROR));
}
let kind = head.kind();
log::trace!(" -> kind={:?}", kind);
macro_rules! header_block {
($frame:ident, $head:ident, $bytes:ident) => ({
// Drop the frame header
// TODO: Change to drain: carllerche/bytes#130
let _ = $bytes.split_to(frame::HEADER_LEN);
// Parse the header frame w/o parsing the payload
let (mut frame, mut payload) = match frame::$frame::load($head, $bytes) {
Ok(res) => res,
Err(frame::Error::InvalidDependencyId) => {
proto_err!(stream: "invalid HEADERS dependency ID");
// A stream cannot depend on itself. An endpoint MUST
// treat this as a stream error (Section 5.4.2) of type
// `PROTOCOL_ERROR`.
return Err(Stream {
id: $head.stream_id(),
reason: Reason::PROTOCOL_ERROR,
});
},
Err(e) => {
proto_err!(conn: "failed to load frame; err={:?}", e);
return Err(Connection(Reason::PROTOCOL_ERROR));
}
};
let is_end_headers = frame.is_end_headers();
// Load the HPACK encoded headers
match frame.load_hpack(&mut payload, self.max_header_list_size, &mut self.hpack) {
Ok(_) => {},
Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {},
Err(frame::Error::MalformedMessage) => {
let id = $head.stream_id();
proto_err!(stream: "malformed header block; stream={:?}", id);
return Err(Stream {
id,
reason: Reason::PROTOCOL_ERROR,
});
},
Err(e) => {
proto_err!(conn: "failed HPACK decoding; err={:?}", e);
return Err(Connection(Reason::PROTOCOL_ERROR));
}
}
if is_end_headers {
frame.into()
} else {
log::trace!("loaded partial header block");
// Defer returning the frame
self.partial = Some(Partial {
frame: Continuable::$frame(frame),
buf: payload,
});
return Ok(None);
}
});
}
let frame = match kind {
Kind::Settings => {
let res = frame::Settings::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load SETTINGS frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::Ping => {
let res = frame::Ping::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load PING frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::WindowUpdate => {
let res = frame::WindowUpdate::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::Data => {
let _ = bytes.split_to(frame::HEADER_LEN);
let res = frame::Data::load(head, bytes.freeze());
// TODO: Should this always be connection level? Probably not...
res.map_err(|e| {
proto_err!(conn: "failed to load DATA frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::Headers => header_block!(Headers, head, bytes),
Kind::Reset => {
let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load RESET frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::GoAway => {
let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::PushPromise => header_block!(PushPromise, head, bytes),
Kind::Priority => {
if head.stream_id() == 0 {
// Invalid stream identifier
proto_err!(conn: "invalid stream ID 0");
return Err(Connection(Reason::PROTOCOL_ERROR));
}
match frame::Priority::load(head, &bytes[frame::HEADER_LEN..]) {
Ok(frame) => frame.into(),
Err(frame::Error::InvalidDependencyId) => {
// A stream cannot depend on itself. An endpoint MUST
// treat this as a stream error (Section 5.4.2) of type
// `PROTOCOL_ERROR`.
let id = head.stream_id();
proto_err!(stream: "PRIORITY invalid dependency ID; stream={:?}", id);
return Err(Stream {
id,
reason: Reason::PROTOCOL_ERROR,
});
}
Err(e) => {
proto_err!(conn: "failed to load PRIORITY frame; err={:?};", e);
return Err(Connection(Reason::PROTOCOL_ERROR));
}
}
}
Kind::Continuation => {
let is_end_headers = (head.flag() & 0x4) == 0x4;
let mut partial = match self.partial.take() {
Some(partial) => partial,
None => {
proto_err!(conn: "received unexpected CONTINUATION frame");
return Err(Connection(Reason::PROTOCOL_ERROR));
}
};
// The stream identifiers must match
if partial.frame.stream_id() != head.stream_id() {
proto_err!(conn: "CONTINUATION frame stream ID does not match previous frame stream ID");
return Err(Connection(Reason::PROTOCOL_ERROR));
}
// Extend the buf
if partial.buf.is_empty() {
partial.buf = bytes.split_off(frame::HEADER_LEN);
} else {
if partial.frame.is_over_size() {
// If there was left over bytes previously, they may be
// needed to continue decoding, even though we will
// be ignoring this frame. This is done to keep the HPACK
// decoder state up-to-date.
//
// Still, we need to be careful, because if a malicious
// attacker were to try to send a gigantic string, such
// that it fits over multiple header blocks, we could
// grow memory uncontrollably again, and that'd be a shame.
//
// Instead, we use a simple heuristic to determine if
// we should continue to ignore decoding, or to tell
// the attacker to go away.
if partial.buf.len() + bytes.len() > self.max_header_list_size {
proto_err!(conn: "CONTINUATION frame header block size over ignorable limit");
return Err(Connection(Reason::COMPRESSION_ERROR));
}
}
partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]);
}
match partial.frame.load_hpack(
&mut partial.buf,
self.max_header_list_size,
&mut self.hpack,
) {
Ok(_) => {}
Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_)))
if !is_end_headers => {}
Err(frame::Error::MalformedMessage) => {
let id = head.stream_id();
proto_err!(stream: "malformed CONTINUATION frame; stream={:?}", id);
return Err(Stream {
id,
reason: Reason::PROTOCOL_ERROR,
});
}
Err(e) => {
proto_err!(conn: "failed HPACK decoding; err={:?}", e);
return Err(Connection(Reason::PROTOCOL_ERROR));
}
}
if is_end_headers {
partial.frame.into()
} else {
self.partial = Some(partial);
return Ok(None);
}
}
Kind::Unknown => {
// Unknown frames are ignored
return Ok(None);
}
};
Ok(Some(frame))
}
pub fn get_ref(&self) -> &T {
self.inner.get_ref()
}
@ -90,279 +331,35 @@ impl<T> FramedRead<T> {
}
}
/// Decodes a frame.
///
/// This method is intentionally de-generified and outlined because it is very large.
fn decode_frame(
hpack: &mut hpack::Decoder,
max_header_list_size: usize,
partial_inout: &mut Option<Partial>,
mut bytes: BytesMut,
) -> Result<Option<Frame>, Error> {
let span = tracing::trace_span!("FramedRead::decode_frame", offset = bytes.len());
let _e = span.enter();
tracing::trace!("decoding frame from {}B", bytes.len());
// Parse the head
let head = frame::Head::parse(&bytes);
if partial_inout.is_some() && head.kind() != Kind::Continuation {
proto_err!(conn: "expected CONTINUATION, got {:?}", head.kind());
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into());
}
let kind = head.kind();
tracing::trace!(frame.kind = ?kind);
macro_rules! header_block {
($frame:ident, $head:ident, $bytes:ident) => ({
// Drop the frame header
// TODO: Change to drain: carllerche/bytes#130
let _ = $bytes.split_to(frame::HEADER_LEN);
// Parse the header frame w/o parsing the payload
let (mut frame, mut payload) = match frame::$frame::load($head, $bytes) {
Ok(res) => res,
Err(frame::Error::InvalidDependencyId) => {
proto_err!(stream: "invalid HEADERS dependency ID");
// A stream cannot depend on itself. An endpoint MUST
// treat this as a stream error (Section 5.4.2) of type
// `PROTOCOL_ERROR`.
return Err(Error::library_reset($head.stream_id(), Reason::PROTOCOL_ERROR));
},
Err(e) => {
proto_err!(conn: "failed to load frame; err={:?}", e);
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
}
};
let is_end_headers = frame.is_end_headers();
// Load the HPACK encoded headers
match frame.load_hpack(&mut payload, max_header_list_size, hpack) {
Ok(_) => {},
Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {},
Err(frame::Error::MalformedMessage) => {
let id = $head.stream_id();
proto_err!(stream: "malformed header block; stream={:?}", id);
return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR));
},
Err(e) => {
proto_err!(conn: "failed HPACK decoding; err={:?}", e);
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
}
}
if is_end_headers {
frame.into()
} else {
tracing::trace!("loaded partial header block");
// Defer returning the frame
*partial_inout = Some(Partial {
frame: Continuable::$frame(frame),
buf: payload,
});
return Ok(None);
}
});
}
let frame = match kind {
Kind::Settings => {
let res = frame::Settings::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load SETTINGS frame; err={:?}", e);
Error::library_go_away(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::Ping => {
let res = frame::Ping::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load PING frame; err={:?}", e);
Error::library_go_away(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::WindowUpdate => {
let res = frame::WindowUpdate::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", e);
Error::library_go_away(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::Data => {
let _ = bytes.split_to(frame::HEADER_LEN);
let res = frame::Data::load(head, bytes.freeze());
// TODO: Should this always be connection level? Probably not...
res.map_err(|e| {
proto_err!(conn: "failed to load DATA frame; err={:?}", e);
Error::library_go_away(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::Headers => header_block!(Headers, head, bytes),
Kind::Reset => {
let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load RESET frame; err={:?}", e);
Error::library_go_away(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::GoAway => {
let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", e);
Error::library_go_away(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::PushPromise => header_block!(PushPromise, head, bytes),
Kind::Priority => {
if head.stream_id() == 0 {
// Invalid stream identifier
proto_err!(conn: "invalid stream ID 0");
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into());
}
match frame::Priority::load(head, &bytes[frame::HEADER_LEN..]) {
Ok(frame) => frame.into(),
Err(frame::Error::InvalidDependencyId) => {
// A stream cannot depend on itself. An endpoint MUST
// treat this as a stream error (Section 5.4.2) of type
// `PROTOCOL_ERROR`.
let id = head.stream_id();
proto_err!(stream: "PRIORITY invalid dependency ID; stream={:?}", id);
return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR));
}
Err(e) => {
proto_err!(conn: "failed to load PRIORITY frame; err={:?};", e);
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
}
}
}
Kind::Continuation => {
let is_end_headers = (head.flag() & 0x4) == 0x4;
let mut partial = match partial_inout.take() {
Some(partial) => partial,
None => {
proto_err!(conn: "received unexpected CONTINUATION frame");
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into());
}
};
// The stream identifiers must match
if partial.frame.stream_id() != head.stream_id() {
proto_err!(conn: "CONTINUATION frame stream ID does not match previous frame stream ID");
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into());
}
// Extend the buf
if partial.buf.is_empty() {
partial.buf = bytes.split_off(frame::HEADER_LEN);
} else {
if partial.frame.is_over_size() {
// If there was left over bytes previously, they may be
// needed to continue decoding, even though we will
// be ignoring this frame. This is done to keep the HPACK
// decoder state up-to-date.
//
// Still, we need to be careful, because if a malicious
// attacker were to try to send a gigantic string, such
// that it fits over multiple header blocks, we could
// grow memory uncontrollably again, and that'd be a shame.
//
// Instead, we use a simple heuristic to determine if
// we should continue to ignore decoding, or to tell
// the attacker to go away.
if partial.buf.len() + bytes.len() > max_header_list_size {
proto_err!(conn: "CONTINUATION frame header block size over ignorable limit");
return Err(Error::library_go_away(Reason::COMPRESSION_ERROR).into());
}
}
partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]);
}
match partial
.frame
.load_hpack(&mut partial.buf, max_header_list_size, hpack)
{
Ok(_) => {}
Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {}
Err(frame::Error::MalformedMessage) => {
let id = head.stream_id();
proto_err!(stream: "malformed CONTINUATION frame; stream={:?}", id);
return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR));
}
Err(e) => {
proto_err!(conn: "failed HPACK decoding; err={:?}", e);
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
}
}
if is_end_headers {
partial.frame.into()
} else {
*partial_inout = Some(partial);
return Ok(None);
}
}
Kind::Unknown => {
// Unknown frames are ignored
return Ok(None);
}
};
Ok(Some(frame))
}
impl<T> Stream for FramedRead<T>
where
T: AsyncRead + Unpin,
{
type Item = Result<Frame, Error>;
type Item = Result<Frame, RecvError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let span = tracing::trace_span!("FramedRead::poll_next");
let _e = span.enter();
loop {
tracing::trace!("poll");
log::trace!("poll");
let bytes = match ready!(Pin::new(&mut self.inner).poll_next(cx)) {
Some(Ok(bytes)) => bytes,
Some(Err(e)) => return Poll::Ready(Some(Err(map_err(e)))),
None => return Poll::Ready(None),
};
tracing::trace!(read.bytes = bytes.len());
let Self {
ref mut hpack,
max_header_list_size,
ref mut partial,
..
} = *self;
if let Some(frame) = decode_frame(hpack, max_header_list_size, partial, bytes)? {
tracing::debug!(?frame, "received");
log::trace!("poll; bytes={}B", bytes.len());
if let Some(frame) = self.decode_frame(bytes)? {
log::debug!("received; frame={:?}", frame);
return Poll::Ready(Some(Ok(frame)));
}
}
}
}
fn map_err(err: io::Error) -> Error {
fn map_err(err: io::Error) -> RecvError {
if let io::ErrorKind::InvalidData = err.kind() {
if let Some(custom) = err.get_ref() {
if custom.is::<LengthDelimitedCodecError>() {
return Error::library_go_away(Reason::FRAME_SIZE_ERROR);
return RecvError::Connection(Reason::FRAME_SIZE_ERROR);
}
}
}

261
third_party/rust/h2/src/codec/framed_write.rs поставляемый
Просмотреть файл

@ -3,12 +3,15 @@ use crate::codec::UserError::*;
use crate::frame::{self, Frame, FrameSize};
use crate::hpack;
use bytes::{Buf, BufMut, BytesMut};
use bytes::{
buf::{BufExt, BufMutExt},
Buf, BufMut, BytesMut,
};
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::io::{AsyncRead, AsyncWrite};
use std::io::{self, Cursor, IoSlice};
use std::io::{self, Cursor};
// A macro to get around a method needing to borrow &mut self
macro_rules! limited_write_buf {
@ -23,11 +26,6 @@ pub struct FramedWrite<T, B> {
/// Upstream `AsyncWrite`
inner: T,
encoder: Encoder<B>,
}
#[derive(Debug)]
struct Encoder<B> {
/// HPACK encoder
hpack: hpack::Encoder,
@ -44,9 +42,6 @@ struct Encoder<B> {
/// Max frame size, this is specified by the peer
max_frame_size: FrameSize,
/// Whether or not the wrapped `AsyncWrite` supports vectored IO.
is_write_vectored: bool,
}
#[derive(Debug)]
@ -55,7 +50,7 @@ enum Next<B> {
Continuation(frame::Continuation),
}
/// Initialize the connection with this amount of write buffer.
/// Initialze the connection with this amount of write buffer.
///
/// The minimum MAX_FRAME_SIZE is 16kb, so always be able to send a HEADERS
/// frame that big.
@ -76,17 +71,13 @@ where
B: Buf,
{
pub fn new(inner: T) -> FramedWrite<T, B> {
let is_write_vectored = inner.is_write_vectored();
FramedWrite {
inner,
encoder: Encoder {
hpack: hpack::Encoder::default(),
buf: Cursor::new(BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY)),
next: None,
last_data_frame: None,
max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE,
is_write_vectored,
},
hpack: hpack::Encoder::default(),
buf: Cursor::new(BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY)),
next: None,
last_data_frame: None,
max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE,
}
}
@ -95,11 +86,11 @@ where
/// Calling this function may result in the current contents of the buffer
/// to be flushed to `T`.
pub fn poll_ready(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
if !self.encoder.has_capacity() {
if !self.has_capacity() {
// Try flushing
ready!(self.flush(cx))?;
if !self.encoder.has_capacity() {
if !self.has_capacity() {
return Poll::Pending;
}
}
@ -112,124 +103,10 @@ where
/// `poll_ready` must be called first to ensure that a frame may be
/// accepted.
pub fn buffer(&mut self, item: Frame<B>) -> Result<(), UserError> {
self.encoder.buffer(item)
}
/// Flush buffered data to the wire
pub fn flush(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
let span = tracing::trace_span!("FramedWrite::flush");
let _e = span.enter();
loop {
while !self.encoder.is_empty() {
match self.encoder.next {
Some(Next::Data(ref mut frame)) => {
tracing::trace!(queued_data_frame = true);
let mut buf = (&mut self.encoder.buf).chain(frame.payload_mut());
ready!(write(
&mut self.inner,
self.encoder.is_write_vectored,
&mut buf,
cx,
))?
}
_ => {
tracing::trace!(queued_data_frame = false);
ready!(write(
&mut self.inner,
self.encoder.is_write_vectored,
&mut self.encoder.buf,
cx,
))?
}
}
}
match self.encoder.unset_frame() {
ControlFlow::Continue => (),
ControlFlow::Break => break,
}
}
tracing::trace!("flushing buffer");
// Flush the upstream
ready!(Pin::new(&mut self.inner).poll_flush(cx))?;
Poll::Ready(Ok(()))
}
/// Close the codec
pub fn shutdown(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
ready!(self.flush(cx))?;
Pin::new(&mut self.inner).poll_shutdown(cx)
}
}
fn write<T, B>(
writer: &mut T,
is_write_vectored: bool,
buf: &mut B,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>>
where
T: AsyncWrite + Unpin,
B: Buf,
{
// TODO(eliza): when tokio-util 0.5.1 is released, this
// could just use `poll_write_buf`...
const MAX_IOVS: usize = 64;
let n = if is_write_vectored {
let mut bufs = [IoSlice::new(&[]); MAX_IOVS];
let cnt = buf.chunks_vectored(&mut bufs);
ready!(Pin::new(writer).poll_write_vectored(cx, &bufs[..cnt]))?
} else {
ready!(Pin::new(writer).poll_write(cx, buf.chunk()))?
};
buf.advance(n);
Ok(()).into()
}
#[must_use]
enum ControlFlow {
Continue,
Break,
}
impl<B> Encoder<B>
where
B: Buf,
{
fn unset_frame(&mut self) -> ControlFlow {
// Clear internal buffer
self.buf.set_position(0);
self.buf.get_mut().clear();
// The data frame has been written, so unset it
match self.next.take() {
Some(Next::Data(frame)) => {
self.last_data_frame = Some(frame);
debug_assert!(self.is_empty());
ControlFlow::Break
}
Some(Next::Continuation(frame)) => {
// Buffer the continuation frame, then try to write again
let mut buf = limited_write_buf!(self);
if let Some(continuation) = frame.encode(&mut buf) {
self.next = Some(Next::Continuation(continuation));
}
ControlFlow::Continue
}
None => ControlFlow::Break,
}
}
fn buffer(&mut self, item: Frame<B>) -> Result<(), UserError> {
// Ensure that we have enough capacity to accept the write.
assert!(self.has_capacity());
let span = tracing::trace_span!("FramedWrite::buffer", frame = ?item);
let _e = span.enter();
tracing::debug!(frame = ?item, "send");
log::debug!("send; frame={:?}", item);
match item {
Frame::Data(mut v) => {
@ -273,37 +150,103 @@ where
}
Frame::Settings(v) => {
v.encode(self.buf.get_mut());
tracing::trace!(rem = self.buf.remaining(), "encoded settings");
log::trace!("encoded settings; rem={:?}", self.buf.remaining());
}
Frame::GoAway(v) => {
v.encode(self.buf.get_mut());
tracing::trace!(rem = self.buf.remaining(), "encoded go_away");
log::trace!("encoded go_away; rem={:?}", self.buf.remaining());
}
Frame::Ping(v) => {
v.encode(self.buf.get_mut());
tracing::trace!(rem = self.buf.remaining(), "encoded ping");
log::trace!("encoded ping; rem={:?}", self.buf.remaining());
}
Frame::WindowUpdate(v) => {
v.encode(self.buf.get_mut());
tracing::trace!(rem = self.buf.remaining(), "encoded window_update");
log::trace!("encoded window_update; rem={:?}", self.buf.remaining());
}
Frame::Priority(_) => {
/*
v.encode(self.buf.get_mut());
tracing::trace!("encoded priority; rem={:?}", self.buf.remaining());
log::trace!("encoded priority; rem={:?}", self.buf.remaining());
*/
unimplemented!();
}
Frame::Reset(v) => {
v.encode(self.buf.get_mut());
tracing::trace!(rem = self.buf.remaining(), "encoded reset");
log::trace!("encoded reset; rem={:?}", self.buf.remaining());
}
}
Ok(())
}
/// Flush buffered data to the wire
pub fn flush(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
log::trace!("flush");
loop {
while !self.is_empty() {
match self.next {
Some(Next::Data(ref mut frame)) => {
log::trace!(" -> queued data frame");
let mut buf = (&mut self.buf).chain(frame.payload_mut());
ready!(Pin::new(&mut self.inner).poll_write_buf(cx, &mut buf))?;
}
_ => {
log::trace!(" -> not a queued data frame");
ready!(Pin::new(&mut self.inner).poll_write_buf(cx, &mut self.buf))?;
}
}
}
// Clear internal buffer
self.buf.set_position(0);
self.buf.get_mut().clear();
// The data frame has been written, so unset it
match self.next.take() {
Some(Next::Data(frame)) => {
self.last_data_frame = Some(frame);
debug_assert!(self.is_empty());
break;
}
Some(Next::Continuation(frame)) => {
// Buffer the continuation frame, then try to write again
let mut buf = limited_write_buf!(self);
if let Some(continuation) = frame.encode(&mut self.hpack, &mut buf) {
// We previously had a CONTINUATION, and after encoding
// it, we got *another* one? Let's just double check
// that at least some progress is being made...
if self.buf.get_ref().len() == frame::HEADER_LEN {
// If *only* the CONTINUATION frame header was
// written, and *no* header fields, we're stuck
// in a loop...
panic!("CONTINUATION frame write loop; header value too big to encode");
}
self.next = Some(Next::Continuation(continuation));
}
}
None => {
break;
}
}
}
log::trace!("flushing buffer");
// Flush the upstream
ready!(Pin::new(&mut self.inner).poll_flush(cx))?;
Poll::Ready(Ok(()))
}
/// Close the codec
pub fn shutdown(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
ready!(self.flush(cx))?;
Pin::new(&mut self.inner).poll_shutdown(cx)
}
fn has_capacity(&self) -> bool {
self.next.is_none() && self.buf.get_ref().remaining_mut() >= MIN_BUFFER_CAPACITY
}
@ -316,32 +259,26 @@ where
}
}
impl<B> Encoder<B> {
fn max_frame_size(&self) -> usize {
self.max_frame_size as usize
}
}
impl<T, B> FramedWrite<T, B> {
/// Returns the max frame size that can be sent
pub fn max_frame_size(&self) -> usize {
self.encoder.max_frame_size()
self.max_frame_size as usize
}
/// Set the peer's max frame size.
pub fn set_max_frame_size(&mut self, val: usize) {
assert!(val <= frame::MAX_MAX_FRAME_SIZE as usize);
self.encoder.max_frame_size = val as FrameSize;
self.max_frame_size = val as FrameSize;
}
/// Set the peer's header table size.
pub fn set_header_table_size(&mut self, val: usize) {
self.encoder.hpack.update_max_size(val);
self.hpack.update_max_size(val);
}
/// Retrieve the last data frame that has been sent
pub fn take_last_data_frame(&mut self) -> Option<frame::Data<B>> {
self.encoder.last_data_frame.take()
self.last_data_frame.take()
}
pub fn get_mut(&mut self) -> &mut T {
@ -350,13 +287,25 @@ impl<T, B> FramedWrite<T, B> {
}
impl<T: AsyncRead + Unpin, B> AsyncRead for FramedWrite<T, B> {
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [std::mem::MaybeUninit<u8>]) -> bool {
self.inner.prepare_uninitialized_buffer(buf)
}
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf,
) -> Poll<io::Result<()>> {
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.inner).poll_read(cx, buf)
}
fn poll_read_buf<Buf: BufMut>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut Buf,
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.inner).poll_read_buf(cx, buf)
}
}
// We never project the Pin to `B`.

5
third_party/rust/h2/src/codec/mod.rs поставляемый
Просмотреть файл

@ -2,13 +2,12 @@ mod error;
mod framed_read;
mod framed_write;
pub use self::error::{SendError, UserError};
pub use self::error::{RecvError, SendError, UserError};
use self::framed_read::FramedRead;
use self::framed_write::FramedWrite;
use crate::frame::{self, Data, Frame};
use crate::proto::Error;
use bytes::Buf;
use futures_core::Stream;
@ -156,7 +155,7 @@ impl<T, B> Stream for Codec<T, B>
where
T: AsyncRead + Unpin,
{
type Item = Result<Frame, Error>;
type Item = Result<Frame, RecvError>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
Pin::new(&mut self.inner).poll_next(cx)

101
third_party/rust/h2/src/error.rs поставляемый
Просмотреть файл

@ -1,13 +1,11 @@
use crate::codec::{SendError, UserError};
use crate::frame::StreamId;
use crate::proto::{self, Initiator};
use crate::proto;
use bytes::Bytes;
use std::{error, fmt, io};
pub use crate::frame::Reason;
/// Represents HTTP/2 operation errors.
/// Represents HTTP/2.0 operation errors.
///
/// `Error` covers error cases raised by protocol errors caused by the
/// peer, I/O (transport) errors, and errors caused by the user of the library.
@ -24,14 +22,11 @@ pub struct Error {
#[derive(Debug)]
enum Kind {
/// A RST_STREAM frame was received or sent.
Reset(StreamId, Reason, Initiator),
/// A GO_AWAY frame was received or sent.
GoAway(Bytes, Reason, Initiator),
/// The user created an error from a bare Reason.
Reason(Reason),
/// An error caused by an action taken by the remote peer.
///
/// This is either an error received by the peer or caused by an invalid
/// action taken by the peer (i.e. a protocol error).
Proto(Reason),
/// An error resulting from an invalid action taken by the user of this
/// library.
@ -50,14 +45,12 @@ impl Error {
/// action taken by the peer (i.e. a protocol error).
pub fn reason(&self) -> Option<Reason> {
match self.kind {
Kind::Reset(_, reason, _) | Kind::GoAway(_, reason, _) | Kind::Reason(reason) => {
Some(reason)
}
Kind::Proto(reason) => Some(reason),
_ => None,
}
}
/// Returns true if the error is an io::Error
/// Returns the true if the error is an io::Error
pub fn is_io(&self) -> bool {
match self.kind {
Kind::Io(_) => true,
@ -86,21 +79,6 @@ impl Error {
kind: Kind::Io(err),
}
}
/// Returns true if the error is from a `GOAWAY`.
pub fn is_go_away(&self) -> bool {
matches!(self.kind, Kind::GoAway(..))
}
/// Returns true if the error was received in a frame from the remote.
///
/// Such as from a received `RST_STREAM` or `GOAWAY` frame.
pub fn is_remote(&self) -> bool {
matches!(
self.kind,
Kind::GoAway(_, _, Initiator::Remote) | Kind::Reset(_, _, Initiator::Remote)
)
}
}
impl From<proto::Error> for Error {
@ -109,13 +87,8 @@ impl From<proto::Error> for Error {
Error {
kind: match src {
Reset(stream_id, reason, initiator) => Kind::Reset(stream_id, reason, initiator),
GoAway(debug_data, reason, initiator) => {
Kind::GoAway(debug_data, reason, initiator)
}
Io(kind, inner) => {
Kind::Io(inner.map_or_else(|| kind.into(), |inner| io::Error::new(kind, inner)))
}
Proto(reason) => Kind::Proto(reason),
Io(e) => Kind::Io(e),
},
}
}
@ -124,7 +97,7 @@ impl From<proto::Error> for Error {
impl From<Reason> for Error {
fn from(src: Reason) -> Error {
Error {
kind: Kind::Reason(src),
kind: Kind::Proto(src),
}
}
}
@ -133,7 +106,8 @@ impl From<SendError> for Error {
fn from(src: SendError) -> Error {
match src {
SendError::User(e) => e.into(),
SendError::Connection(e) => e.into(),
SendError::Connection(reason) => reason.into(),
SendError::Io(e) => Error::from_io(e),
}
}
}
@ -148,51 +122,14 @@ impl From<UserError> for Error {
impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let debug_data = match self.kind {
Kind::Reset(_, reason, Initiator::User) => {
return write!(fmt, "stream error sent by user: {}", reason)
}
Kind::Reset(_, reason, Initiator::Library) => {
return write!(fmt, "stream error detected: {}", reason)
}
Kind::Reset(_, reason, Initiator::Remote) => {
return write!(fmt, "stream error received: {}", reason)
}
Kind::GoAway(ref debug_data, reason, Initiator::User) => {
write!(fmt, "connection error sent by user: {}", reason)?;
debug_data
}
Kind::GoAway(ref debug_data, reason, Initiator::Library) => {
write!(fmt, "connection error detected: {}", reason)?;
debug_data
}
Kind::GoAway(ref debug_data, reason, Initiator::Remote) => {
write!(fmt, "connection error received: {}", reason)?;
debug_data
}
Kind::Reason(reason) => return write!(fmt, "protocol error: {}", reason),
Kind::User(ref e) => return write!(fmt, "user error: {}", e),
Kind::Io(ref e) => return e.fmt(fmt),
};
use self::Kind::*;
if !debug_data.is_empty() {
write!(fmt, " ({:?})", debug_data)?;
match self.kind {
Proto(ref reason) => write!(fmt, "protocol error: {}", reason),
User(ref e) => write!(fmt, "user error: {}", e),
Io(ref e) => fmt::Display::fmt(e, fmt),
}
Ok(())
}
}
impl error::Error for Error {}
#[cfg(test)]
mod tests {
use super::Error;
use crate::Reason;
#[test]
fn error_from_reason() {
let err = Error::from(Reason::HTTP_1_1_REQUIRED);
assert_eq!(err.reason(), Some(Reason::HTTP_1_1_REQUIRED));
}
}

55
third_party/rust/h2/src/ext.rs поставляемый
Просмотреть файл

@ -1,55 +0,0 @@
//! Extensions specific to the HTTP/2 protocol.
use crate::hpack::BytesStr;
use bytes::Bytes;
use std::fmt;
/// Represents the `:protocol` pseudo-header used by
/// the [Extended CONNECT Protocol].
///
/// [Extended CONNECT Protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
#[derive(Clone, Eq, PartialEq)]
pub struct Protocol {
value: BytesStr,
}
impl Protocol {
/// Converts a static string to a protocol name.
pub const fn from_static(value: &'static str) -> Self {
Self {
value: BytesStr::from_static(value),
}
}
/// Returns a str representation of the header.
pub fn as_str(&self) -> &str {
self.value.as_str()
}
pub(crate) fn try_from(bytes: Bytes) -> Result<Self, std::str::Utf8Error> {
Ok(Self {
value: BytesStr::try_from(bytes)?,
})
}
}
impl<'a> From<&'a str> for Protocol {
fn from(value: &'a str) -> Self {
Self {
value: BytesStr::from(value),
}
}
}
impl AsRef<[u8]> for Protocol {
fn as_ref(&self) -> &[u8] {
self.value.as_ref()
}
}
impl fmt::Debug for Protocol {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.value.fmt(f)
}
}

4
third_party/rust/h2/src/frame/data.rs поставляемый
Просмотреть файл

@ -36,7 +36,7 @@ impl<T> Data<T> {
}
}
/// Returns the stream identifier that this frame is associated with.
/// Returns the stream identifer that this frame is associated with.
///
/// This cannot be a zero stream identifier.
pub fn stream_id(&self) -> StreamId {
@ -63,7 +63,7 @@ impl<T> Data<T> {
}
}
/// Returns whether the `PADDED` flag is set on this frame.
/// Returns whther the `PADDED` flag is set on this frame.
#[cfg(feature = "unstable")]
pub fn is_padded(&self) -> bool {
self.flags.is_padded()

5
third_party/rust/h2/src/frame/go_away.rs поставляемый
Просмотреть файл

@ -29,7 +29,8 @@ impl GoAway {
self.error_code
}
pub fn debug_data(&self) -> &Bytes {
#[cfg(feature = "unstable")]
pub fn debug_data(&self) -> &[u8] {
&self.debug_data
}
@ -50,7 +51,7 @@ impl GoAway {
}
pub fn encode<B: BufMut>(&self, dst: &mut B) {
tracing::trace!("encoding GO_AWAY; code={:?}", self.error_code);
log::trace!("encoding GO_AWAY; code={:?}", self.error_code);
let head = Head::new(Kind::GoAway, 0, StreamId::zero());
head.encode(8, dst);
dst.put_u32(self.last_stream_id.into());

2
third_party/rust/h2/src/frame/head.rs поставляемый
Просмотреть файл

@ -36,7 +36,7 @@ impl Head {
}
}
/// Parse an HTTP/2 frame header
/// Parse an HTTP/2.0 frame header
pub fn parse(header: &[u8]) -> Head {
let (stream_id, _) = StreamId::parse(&header[5..]);

285
third_party/rust/h2/src/frame/headers.rs поставляемый
Просмотреть файл

@ -1,17 +1,21 @@
use super::{util, StreamDependency, StreamId};
use crate::ext::Protocol;
use crate::frame::{Error, Frame, Head, Kind};
use crate::hpack::{self, BytesStr};
use http::header::{self, HeaderName, HeaderValue};
use http::{uri, HeaderMap, Method, Request, StatusCode, Uri};
use bytes::{BufMut, Bytes, BytesMut};
use bytes::{Bytes, BytesMut};
use std::fmt;
use std::io::Cursor;
type EncodeBuf<'a> = bytes::buf::Limit<&'a mut BytesMut>;
type EncodeBuf<'a> = bytes::buf::ext::Limit<&'a mut BytesMut>;
// Minimum MAX_FRAME_SIZE is 16kb, so save some arbitrary space for frame
// head and other header bits.
const MAX_HEADER_LENGTH: usize = 1024 * 16 - 100;
/// Header frame
///
/// This could be either a request or a response.
@ -67,7 +71,6 @@ pub struct Pseudo {
pub scheme: Option<BytesStr>,
pub authority: Option<BytesStr>,
pub path: Option<BytesStr>,
pub protocol: Option<Protocol>,
// Response
pub status: Option<StatusCode>,
@ -97,7 +100,11 @@ struct HeaderBlock {
#[derive(Debug)]
struct EncodingHeaderBlock {
hpack: Bytes,
/// Argument to pass to the HPACK encoder to resume encoding
hpack: Option<hpack::EncodeState>,
/// remaining headers to encode
headers: Iter,
}
const END_STREAM: u8 = 0x1;
@ -146,11 +153,7 @@ impl Headers {
let flags = HeadersFlag(head.flag());
let mut pad = 0;
tracing::trace!("loading headers; flags={:?}", flags);
if head.stream_id().is_zero() {
return Err(Error::InvalidStreamId);
}
log::trace!("loading headers; flags={:?}", flags);
// Read the padding length
if flags.is_padded() {
@ -238,6 +241,10 @@ impl Headers {
self.header_block.is_over_size
}
pub(crate) fn has_too_big_field(&self) -> bool {
self.header_block.has_too_big_field()
}
pub fn into_parts(self) -> (Pseudo, HeaderMap) {
(self.header_block.pseudo, self.header_block.fields)
}
@ -247,11 +254,6 @@ impl Headers {
&mut self.header_block.pseudo
}
/// Whether it has status 1xx
pub(crate) fn is_informational(&self) -> bool {
self.header_block.pseudo.is_informational()
}
pub fn fields(&self) -> &HeaderMap {
&self.header_block.fields
}
@ -272,8 +274,8 @@ impl Headers {
let head = self.head();
self.header_block
.into_encoding(encoder)
.encode(&head, dst, |_| {})
.into_encoding()
.encode(&head, encoder, dst, |_| {})
}
fn head(&self) -> Head {
@ -294,10 +296,6 @@ impl fmt::Debug for Headers {
.field("stream_id", &self.stream_id)
.field("flags", &self.flags);
if let Some(ref protocol) = self.header_block.pseudo.protocol {
builder.field("protocol", protocol);
}
if let Some(ref dep) = self.stream_dep {
builder.field("stream_dep", dep);
}
@ -400,10 +398,6 @@ impl PushPromise {
let flags = PushPromiseFlag(head.flag());
let mut pad = 0;
if head.stream_id().is_zero() {
return Err(Error::InvalidStreamId);
}
// Read the padding length
if flags.is_padded() {
if src.is_empty() {
@ -481,6 +475,8 @@ impl PushPromise {
encoder: &mut hpack::Encoder,
dst: &mut EncodeBuf<'_>,
) -> Option<Continuation> {
use bytes::BufMut;
// At this point, the `is_end_headers` flag should always be set
debug_assert!(self.flags.is_end_headers());
@ -488,8 +484,8 @@ impl PushPromise {
let promised_id = self.promised_id;
self.header_block
.into_encoding(encoder)
.encode(&head, dst, |dst| {
.into_encoding()
.encode(&head, encoder, dst, |dst| {
dst.put_u32(promised_id.into());
})
}
@ -528,39 +524,38 @@ impl Continuation {
Head::new(Kind::Continuation, END_HEADERS, self.stream_id)
}
pub fn encode(self, dst: &mut EncodeBuf<'_>) -> Option<Continuation> {
pub fn encode(
self,
encoder: &mut hpack::Encoder,
dst: &mut EncodeBuf<'_>,
) -> Option<Continuation> {
// Get the CONTINUATION frame head
let head = self.head();
self.header_block.encode(&head, dst, |_| {})
self.header_block.encode(&head, encoder, dst, |_| {})
}
}
// ===== impl Pseudo =====
impl Pseudo {
pub fn request(method: Method, uri: Uri, protocol: Option<Protocol>) -> Self {
pub fn request(method: Method, uri: Uri) -> Self {
let parts = uri::Parts::from(uri);
let mut path = parts
.path_and_query
.map(|v| BytesStr::from(v.as_str()))
.unwrap_or(BytesStr::from_static(""));
.map(|v| Bytes::copy_from_slice(v.as_str().as_bytes()))
.unwrap_or_else(Bytes::new);
match method {
Method::OPTIONS | Method::CONNECT => {}
_ if path.is_empty() => {
path = BytesStr::from_static("/");
}
_ => {}
if path.is_empty() && method != Method::OPTIONS {
path = Bytes::from_static(b"/");
}
let mut pseudo = Pseudo {
method: Some(method),
scheme: None,
authority: None,
path: Some(path).filter(|p| !p.is_empty()),
protocol,
path: Some(unsafe { BytesStr::from_utf8_unchecked(path) }),
status: None,
};
@ -574,7 +569,9 @@ impl Pseudo {
// If the URI includes an authority component, add it to the pseudo
// headers
if let Some(authority) = parts.authority {
pseudo.set_authority(BytesStr::from(authority.as_str()));
pseudo.set_authority(unsafe {
BytesStr::from_utf8_unchecked(Bytes::copy_from_slice(authority.as_str().as_bytes()))
});
}
pseudo
@ -586,45 +583,34 @@ impl Pseudo {
scheme: None,
authority: None,
path: None,
protocol: None,
status: Some(status),
}
}
#[cfg(feature = "unstable")]
pub fn set_status(&mut self, value: StatusCode) {
self.status = Some(value);
}
pub fn set_scheme(&mut self, scheme: uri::Scheme) {
let bytes_str = match scheme.as_str() {
"http" => BytesStr::from_static("http"),
"https" => BytesStr::from_static("https"),
s => BytesStr::from(s),
let bytes = match scheme.as_str() {
"http" => Bytes::from_static(b"http"),
"https" => Bytes::from_static(b"https"),
s => Bytes::copy_from_slice(s.as_bytes()),
};
self.scheme = Some(bytes_str);
}
#[cfg(feature = "unstable")]
pub fn set_protocol(&mut self, protocol: Protocol) {
self.protocol = Some(protocol);
self.scheme = Some(unsafe { BytesStr::from_utf8_unchecked(bytes) });
}
pub fn set_authority(&mut self, authority: BytesStr) {
self.authority = Some(authority);
}
/// Whether it has status 1xx
pub(crate) fn is_informational(&self) -> bool {
self.status
.map_or(false, |status| status.is_informational())
}
}
// ===== impl EncodingHeaderBlock =====
impl EncodingHeaderBlock {
fn encode<F>(mut self, head: &Head, dst: &mut EncodeBuf<'_>, f: F) -> Option<Continuation>
fn encode<F>(
mut self,
head: &Head,
encoder: &mut hpack::Encoder,
dst: &mut EncodeBuf<'_>,
f: F,
) -> Option<Continuation>
where
F: FnOnce(&mut EncodeBuf<'_>),
{
@ -640,17 +626,15 @@ impl EncodingHeaderBlock {
f(dst);
// Now, encode the header payload
let continuation = if self.hpack.len() > dst.remaining_mut() {
dst.put_slice(&self.hpack.split_to(dst.remaining_mut()));
Some(Continuation {
let continuation = match encoder.encode(self.hpack, &mut self.headers, dst) {
hpack::Encode::Full => None,
hpack::Encode::Partial(state) => Some(Continuation {
stream_id: head.stream_id(),
header_block: self,
})
} else {
dst.put_slice(&self.hpack);
None
header_block: EncodingHeaderBlock {
hpack: Some(state),
headers: self.headers,
},
}),
};
// Compute the header block length
@ -698,10 +682,6 @@ impl Iterator for Iter {
return Some(Path(path));
}
if let Some(protocol) = pseudo.protocol.take() {
return Some(Protocol(protocol));
}
if let Some(status) = pseudo.status.take() {
return Some(Status(status));
}
@ -837,19 +817,19 @@ impl HeaderBlock {
macro_rules! set_pseudo {
($field:ident, $val:expr) => {{
if reg {
tracing::trace!("load_hpack; header malformed -- pseudo not at head of block");
log::trace!("load_hpack; header malformed -- pseudo not at head of block");
malformed = true;
} else if self.pseudo.$field.is_some() {
tracing::trace!("load_hpack; header malformed -- repeated pseudo");
log::trace!("load_hpack; header malformed -- repeated pseudo");
malformed = true;
} else {
let __val = $val;
headers_size +=
decoded_header_size(stringify!($field).len() + 1, __val.as_str().len());
decoded_header_size(stringify!($ident).len() + 1, __val.as_str().len());
if headers_size < max_header_list_size {
self.pseudo.$field = Some(__val);
} else if !self.is_over_size {
tracing::trace!("load_hpack; header list size over max");
log::trace!("load_hpack; header list size over max");
self.is_over_size = true;
}
}
@ -876,13 +856,10 @@ impl HeaderBlock {
|| name == "keep-alive"
|| name == "proxy-connection"
{
tracing::trace!("load_hpack; connection level header");
log::trace!("load_hpack; connection level header");
malformed = true;
} else if name == header::TE && value != "trailers" {
tracing::trace!(
"load_hpack; TE header not set to trailers; val={:?}",
value
);
log::trace!("load_hpack; TE header not set to trailers; val={:?}", value);
malformed = true;
} else {
reg = true;
@ -891,7 +868,7 @@ impl HeaderBlock {
if headers_size < max_header_list_size {
self.fields.append(name, value);
} else if !self.is_over_size {
tracing::trace!("load_hpack; header list size over max");
log::trace!("load_hpack; header list size over max");
self.is_over_size = true;
}
}
@ -900,35 +877,30 @@ impl HeaderBlock {
Method(v) => set_pseudo!(method, v),
Scheme(v) => set_pseudo!(scheme, v),
Path(v) => set_pseudo!(path, v),
Protocol(v) => set_pseudo!(protocol, v),
Status(v) => set_pseudo!(status, v),
}
});
if let Err(e) = res {
tracing::trace!("hpack decoding error; err={:?}", e);
log::trace!("hpack decoding error; err={:?}", e);
return Err(e.into());
}
if malformed {
tracing::trace!("malformed message");
log::trace!("malformed message");
return Err(Error::MalformedMessage);
}
Ok(())
}
fn into_encoding(self, encoder: &mut hpack::Encoder) -> EncodingHeaderBlock {
let mut hpack = BytesMut::new();
let headers = Iter {
pseudo: Some(self.pseudo),
fields: self.fields.into_iter(),
};
encoder.encode(headers, &mut hpack);
fn into_encoding(self) -> EncodingHeaderBlock {
EncodingHeaderBlock {
hpack: hpack.freeze(),
hpack: None,
headers: Iter {
pseudo: Some(self.pseudo),
fields: self.fields.into_iter(),
},
}
}
@ -961,79 +933,48 @@ impl HeaderBlock {
.map(|(name, value)| decoded_header_size(name.as_str().len(), value.len()))
.sum::<usize>()
}
/// Iterate over all pseudos and headers to see if any individual pair
/// would be too large to encode.
pub(crate) fn has_too_big_field(&self) -> bool {
macro_rules! pseudo_size {
($name:ident) => {{
self.pseudo
.$name
.as_ref()
.map(|m| decoded_header_size(stringify!($name).len() + 1, m.as_str().len()))
.unwrap_or(0)
}};
}
if pseudo_size!(method) > MAX_HEADER_LENGTH {
return true;
}
if pseudo_size!(scheme) > MAX_HEADER_LENGTH {
return true;
}
if pseudo_size!(authority) > MAX_HEADER_LENGTH {
return true;
}
if pseudo_size!(path) > MAX_HEADER_LENGTH {
return true;
}
// skip :status, its never going to be too big
for (name, value) in &self.fields {
if decoded_header_size(name.as_str().len(), value.len()) > MAX_HEADER_LENGTH {
return true;
}
}
false
}
}
fn decoded_header_size(name: usize, value: usize) -> usize {
name + value + 32
}
#[cfg(test)]
mod test {
use std::iter::FromIterator;
use http::HeaderValue;
use super::*;
use crate::frame;
use crate::hpack::{huffman, Encoder};
#[test]
fn test_nameless_header_at_resume() {
let mut encoder = Encoder::default();
let mut dst = BytesMut::new();
let headers = Headers::new(
StreamId::ZERO,
Default::default(),
HeaderMap::from_iter(vec![
(
HeaderName::from_static("hello"),
HeaderValue::from_static("world"),
),
(
HeaderName::from_static("hello"),
HeaderValue::from_static("zomg"),
),
(
HeaderName::from_static("hello"),
HeaderValue::from_static("sup"),
),
]),
);
let continuation = headers
.encode(&mut encoder, &mut (&mut dst).limit(frame::HEADER_LEN + 8))
.unwrap();
assert_eq!(17, dst.len());
assert_eq!([0, 0, 8, 1, 0, 0, 0, 0, 0], &dst[0..9]);
assert_eq!(&[0x40, 0x80 | 4], &dst[9..11]);
assert_eq!("hello", huff_decode(&dst[11..15]));
assert_eq!(0x80 | 4, dst[15]);
let mut world = dst[16..17].to_owned();
dst.clear();
assert!(continuation
.encode(&mut (&mut dst).limit(frame::HEADER_LEN + 16))
.is_none());
world.extend_from_slice(&dst[9..12]);
assert_eq!("world", huff_decode(&world));
assert_eq!(24, dst.len());
assert_eq!([0, 0, 15, 9, 4, 0, 0, 0, 0], &dst[0..9]);
// // Next is not indexed
assert_eq!(&[15, 47, 0x80 | 3], &dst[12..15]);
assert_eq!("zomg", huff_decode(&dst[15..18]));
assert_eq!(&[15, 47, 0x80 | 3], &dst[18..21]);
assert_eq!("sup", huff_decode(&dst[21..]));
}
fn huff_decode(src: &[u8]) -> BytesMut {
let mut buf = BytesMut::new();
huffman::decode(src, &mut buf).unwrap()
}
}

1
third_party/rust/h2/src/frame/mod.rs поставляемый
Просмотреть файл

@ -15,6 +15,7 @@ use std::fmt;
/// let buf: [u8; 4] = [0, 0, 0, 1];
/// assert_eq!(1u32, unpack_octets_4!(buf, 0, u32));
/// ```
#[macro_escape]
macro_rules! unpack_octets_4 {
// TODO: Get rid of this macro
($buf:expr, $offset:expr, $tip:ty) => {

2
third_party/rust/h2/src/frame/ping.rs поставляемый
Просмотреть файл

@ -85,7 +85,7 @@ impl Ping {
pub fn encode<B: BufMut>(&self, dst: &mut B) {
let sz = self.payload.len();
tracing::trace!("encoding PING; ack={} len={}", self.ack, sz);
log::trace!("encoding PING; ack={} len={}", self.ack, sz);
let flags = if self.ack { ACK_FLAG } else { 0 };
let head = Head::new(Kind::Ping, flags, StreamId::zero());

2
third_party/rust/h2/src/frame/reason.rs поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
use std::fmt;
/// HTTP/2 error codes.
/// HTTP/2.0 error codes.
///
/// Error codes are used in `RST_STREAM` and `GOAWAY` frames to convey the
/// reasons for the stream or connection error. For example,

4
third_party/rust/h2/src/frame/reset.rs поставляемый
Просмотреть файл

@ -2,7 +2,7 @@ use crate::frame::{self, Error, Head, Kind, Reason, StreamId};
use bytes::BufMut;
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[derive(Debug, Eq, PartialEq)]
pub struct Reset {
stream_id: StreamId,
error_code: Reason,
@ -38,7 +38,7 @@ impl Reset {
}
pub fn encode<B: BufMut>(&self, dst: &mut B) {
tracing::trace!(
log::trace!(
"encoding RESET; id={:?} code={:?}",
self.stream_id,
self.error_code

37
third_party/rust/h2/src/frame/settings.rs поставляемый
Просмотреть файл

@ -13,7 +13,6 @@ pub struct Settings {
initial_window_size: Option<u32>,
max_frame_size: Option<u32>,
max_header_list_size: Option<u32>,
enable_connect_protocol: Option<u32>,
}
/// An enum that lists all valid settings that can be sent in a SETTINGS
@ -28,7 +27,6 @@ pub enum Setting {
InitialWindowSize(u32),
MaxFrameSize(u32),
MaxHeaderListSize(u32),
EnableConnectProtocol(u32),
}
#[derive(Copy, Clone, Eq, PartialEq, Default)]
@ -101,22 +99,14 @@ impl Settings {
self.max_header_list_size = size;
}
pub fn is_push_enabled(&self) -> Option<bool> {
self.enable_push.map(|val| val != 0)
pub fn is_push_enabled(&self) -> bool {
self.enable_push.unwrap_or(1) != 0
}
pub fn set_enable_push(&mut self, enable: bool) {
self.enable_push = Some(enable as u32);
}
pub fn is_extended_connect_protocol_enabled(&self) -> Option<bool> {
self.enable_connect_protocol.map(|val| val != 0)
}
pub fn set_enable_connect_protocol(&mut self, val: Option<u32>) {
self.enable_connect_protocol = val;
}
pub fn header_table_size(&self) -> Option<u32> {
self.header_table_size
}
@ -151,7 +141,7 @@ impl Settings {
// Ensure the payload length is correct, each setting is 6 bytes long.
if payload.len() % 6 != 0 {
tracing::debug!("invalid settings payload length; len={:?}", payload.len());
log::debug!("invalid settings payload length; len={:?}", payload.len());
return Err(Error::InvalidPayloadAckSettings);
}
@ -191,14 +181,6 @@ impl Settings {
Some(MaxHeaderListSize(val)) => {
settings.max_header_list_size = Some(val);
}
Some(EnableConnectProtocol(val)) => match val {
0 | 1 => {
settings.enable_connect_protocol = Some(val);
}
_ => {
return Err(Error::InvalidSettingValue);
}
},
None => {}
}
}
@ -217,13 +199,13 @@ impl Settings {
let head = Head::new(Kind::Settings, self.flags.into(), StreamId::zero());
let payload_len = self.payload_len();
tracing::trace!("encoding SETTINGS; len={}", payload_len);
log::trace!("encoding SETTINGS; len={}", payload_len);
head.encode(payload_len, dst);
// Encode the settings
self.for_each(|setting| {
tracing::trace!("encoding setting; val={:?}", setting);
log::trace!("encoding setting; val={:?}", setting);
setting.encode(dst)
});
}
@ -254,10 +236,6 @@ impl Settings {
if let Some(v) = self.max_header_list_size {
f(MaxHeaderListSize(v));
}
if let Some(v) = self.enable_connect_protocol {
f(EnableConnectProtocol(v));
}
}
}
@ -291,9 +269,6 @@ impl fmt::Debug for Settings {
Setting::MaxHeaderListSize(v) => {
builder.field("max_header_list_size", &v);
}
Setting::EnableConnectProtocol(v) => {
builder.field("enable_connect_protocol", &v);
}
});
builder.finish()
@ -316,7 +291,6 @@ impl Setting {
4 => Some(InitialWindowSize(val)),
5 => Some(MaxFrameSize(val)),
6 => Some(MaxHeaderListSize(val)),
8 => Some(EnableConnectProtocol(val)),
_ => None,
}
}
@ -348,7 +322,6 @@ impl Setting {
InitialWindowSize(v) => (4, v),
MaxFrameSize(v) => (5, v),
MaxHeaderListSize(v) => (6, v),
EnableConnectProtocol(v) => (8, v),
};
dst.put_u16(kind);

Просмотреть файл

@ -48,7 +48,7 @@ impl WindowUpdate {
}
pub fn encode<B: BufMut>(&self, dst: &mut B) {
tracing::trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id);
log::trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id);
let head = Head::new(Kind::WindowUpdate, 0, self.stream_id);
head.encode(4, dst);
dst.put_u32(self.size_increment);

28
third_party/rust/h2/src/fuzz_bridge.rs поставляемый
Просмотреть файл

@ -1,28 +0,0 @@
#[cfg(fuzzing)]
pub mod fuzz_logic {
use crate::hpack;
use bytes::BytesMut;
use http::header::HeaderName;
use std::io::Cursor;
pub fn fuzz_hpack(data_: &[u8]) {
let mut decoder_ = hpack::Decoder::new(0);
let mut buf = BytesMut::new();
buf.extend(data_);
let _dec_res = decoder_.decode(&mut Cursor::new(&mut buf), |_h| {});
if let Ok(s) = std::str::from_utf8(data_) {
if let Ok(h) = http::Method::from_bytes(s.as_bytes()) {
let m_ = hpack::Header::Method(h);
let mut encoder = hpack::Encoder::new(0, 0);
let _res = encode(&mut encoder, vec![m_]);
}
}
}
fn encode(e: &mut hpack::Encoder, hdrs: Vec<hpack::Header<Option<HeaderName>>>) -> BytesMut {
let mut dst = BytesMut::with_capacity(1024);
e.encode(&mut hdrs.into_iter(), &mut dst);
dst
}
}

150
third_party/rust/h2/src/hpack/decoder.rs поставляемый
Просмотреть файл

@ -142,12 +142,6 @@ struct Table {
max_size: usize,
}
struct StringMarker {
offset: usize,
len: usize,
string: Option<Bytes>,
}
// ===== impl Decoder =====
impl Decoder {
@ -189,10 +183,7 @@ impl Decoder {
self.last_max_update = size;
}
let span = tracing::trace_span!("hpack::decode");
let _e = span.enter();
tracing::trace!("decode");
log::trace!("decode");
while let Some(ty) = peek_u8(src) {
// At this point we are always at the beginning of the next block
@ -200,14 +191,14 @@ impl Decoder {
// determined from the first byte.
match Representation::load(ty)? {
Indexed => {
tracing::trace!(rem = src.remaining(), kind = %"Indexed");
log::trace!(" Indexed; rem={:?}", src.remaining());
can_resize = false;
let entry = self.decode_indexed(src)?;
consume(src);
f(entry);
}
LiteralWithIndexing => {
tracing::trace!(rem = src.remaining(), kind = %"LiteralWithIndexing");
log::trace!(" LiteralWithIndexing; rem={:?}", src.remaining());
can_resize = false;
let entry = self.decode_literal(src, true)?;
@ -218,14 +209,14 @@ impl Decoder {
f(entry);
}
LiteralWithoutIndexing => {
tracing::trace!(rem = src.remaining(), kind = %"LiteralWithoutIndexing");
log::trace!(" LiteralWithoutIndexing; rem={:?}", src.remaining());
can_resize = false;
let entry = self.decode_literal(src, false)?;
consume(src);
f(entry);
}
LiteralNeverIndexed => {
tracing::trace!(rem = src.remaining(), kind = %"LiteralNeverIndexed");
log::trace!(" LiteralNeverIndexed; rem={:?}", src.remaining());
can_resize = false;
let entry = self.decode_literal(src, false)?;
consume(src);
@ -235,7 +226,7 @@ impl Decoder {
f(entry);
}
SizeUpdate => {
tracing::trace!(rem = src.remaining(), kind = %"SizeUpdate");
log::trace!(" SizeUpdate; rem={:?}", src.remaining());
if !can_resize {
return Err(DecoderError::InvalidMaxDynamicSize);
}
@ -257,10 +248,10 @@ impl Decoder {
return Err(DecoderError::InvalidMaxDynamicSize);
}
tracing::debug!(
from = self.table.size(),
to = new_size,
"Decoder changed max table size"
log::debug!(
"Decoder changed max table size from {} to {}",
self.table.size(),
new_size
);
self.table.set_max_size(new_size);
@ -285,13 +276,10 @@ impl Decoder {
// First, read the header name
if table_idx == 0 {
let old_pos = buf.position();
let name_marker = self.try_decode_string(buf)?;
let value_marker = self.try_decode_string(buf)?;
buf.set_position(old_pos);
// Read the name as a literal
let name = name_marker.consume(buf);
let value = value_marker.consume(buf);
let name = self.decode_string(buf)?;
let value = self.decode_string(buf)?;
Header::new(name, value)
} else {
let e = self.table.get(table_idx)?;
@ -301,11 +289,7 @@ impl Decoder {
}
}
fn try_decode_string(
&mut self,
buf: &mut Cursor<&mut BytesMut>,
) -> Result<StringMarker, DecoderError> {
let old_pos = buf.position();
fn decode_string(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result<Bytes, DecoderError> {
const HUFF_FLAG: u8 = 0b1000_0000;
// The first bit in the first byte contains the huffman encoded flag.
@ -318,38 +302,25 @@ impl Decoder {
let len = decode_int(buf, 7)?;
if len > buf.remaining() {
tracing::trace!(len, remaining = buf.remaining(), "decode_string underflow",);
log::trace!(
"decode_string underflow; len={}; remaining={}",
len,
buf.remaining()
);
return Err(DecoderError::NeedMore(NeedMore::StringUnderflow));
}
let offset = (buf.position() - old_pos) as usize;
if huff {
let ret = {
let raw = &buf.chunk()[..len];
huffman::decode(raw, &mut self.buffer).map(|buf| StringMarker {
offset,
len,
string: Some(BytesMut::freeze(buf)),
})
let raw = &buf.bytes()[..len];
huffman::decode(raw, &mut self.buffer).map(BytesMut::freeze)
};
buf.advance(len);
ret
} else {
buf.advance(len);
Ok(StringMarker {
offset,
len,
string: None,
})
return ret;
}
}
fn decode_string(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result<Bytes, DecoderError> {
let old_pos = buf.position();
let marker = self.try_decode_string(buf)?;
buf.set_position(old_pos);
Ok(marker.consume(buf))
Ok(take(buf, len))
}
}
@ -449,7 +420,7 @@ fn decode_int<B: Buf>(buf: &mut B, prefix_size: u8) -> Result<usize, DecoderErro
fn peek_u8<B: Buf>(buf: &mut B) -> Option<u8> {
if buf.has_remaining() {
Some(buf.chunk()[0])
Some(buf.bytes()[0])
} else {
None
}
@ -463,19 +434,6 @@ fn take(buf: &mut Cursor<&mut BytesMut>, n: usize) -> Bytes {
head.freeze()
}
impl StringMarker {
fn consume(self, buf: &mut Cursor<&mut BytesMut>) -> Bytes {
buf.advance(self.offset);
match self.string {
Some(string) => {
buf.advance(self.len);
string
}
None => take(buf, self.len),
}
}
}
fn consume(buf: &mut Cursor<&mut BytesMut>) {
// remove bytes from the internal BytesMut when they have been successfully
// decoded. This is a more permanent cursor position, which will be
@ -620,13 +578,13 @@ pub fn get_static(idx: usize) -> Header {
use http::header::HeaderValue;
match idx {
1 => Header::Authority(BytesStr::from_static("")),
1 => Header::Authority(from_static("")),
2 => Header::Method(Method::GET),
3 => Header::Method(Method::POST),
4 => Header::Path(BytesStr::from_static("/")),
5 => Header::Path(BytesStr::from_static("/index.html")),
6 => Header::Scheme(BytesStr::from_static("http")),
7 => Header::Scheme(BytesStr::from_static("https")),
4 => Header::Path(from_static("/")),
5 => Header::Path(from_static("/index.html")),
6 => Header::Scheme(from_static("http")),
7 => Header::Scheme(from_static("https")),
8 => Header::Status(StatusCode::OK),
9 => Header::Status(StatusCode::NO_CONTENT),
10 => Header::Status(StatusCode::PARTIAL_CONTENT),
@ -826,6 +784,10 @@ pub fn get_static(idx: usize) -> Header {
}
}
fn from_static(s: &'static str) -> BytesStr {
unsafe { BytesStr::from_utf8_unchecked(Bytes::from_static(s.as_bytes())) }
}
#[cfg(test)]
mod test {
use super::*;
@ -890,51 +852,7 @@ mod test {
fn huff_encode(src: &[u8]) -> BytesMut {
let mut buf = BytesMut::new();
huffman::encode(src, &mut buf);
huffman::encode(src, &mut buf).unwrap();
buf
}
#[test]
fn test_decode_continuation_header_with_non_huff_encoded_name() {
let mut de = Decoder::new(0);
let value = huff_encode(b"bar");
let mut buf = BytesMut::new();
// header name is non_huff encoded
buf.extend(&[0b01000000, 0x00 | 3]);
buf.extend(b"foo");
// header value is partial
buf.extend(&[0x80 | 3]);
buf.extend(&value[0..1]);
let mut res = vec![];
let e = de
.decode(&mut Cursor::new(&mut buf), |h| {
res.push(h);
})
.unwrap_err();
// decode error because the header value is partial
assert_eq!(e, DecoderError::NeedMore(NeedMore::StringUnderflow));
// extend buf with the remaining header value
buf.extend(&value[1..]);
let _ = de
.decode(&mut Cursor::new(&mut buf), |h| {
res.push(h);
})
.unwrap();
assert_eq!(res.len(), 1);
assert_eq!(de.table.size(), 0);
match res[0] {
Header::Field {
ref name,
ref value,
} => {
assert_eq!(name, "foo");
assert_eq!(value, "bar");
}
_ => panic!(),
}
}
}

261
third_party/rust/h2/src/hpack/encoder.rs поставляемый
Просмотреть файл

@ -1,15 +1,34 @@
use super::table::{Index, Table};
use super::{huffman, Header};
use bytes::{BufMut, BytesMut};
use bytes::{buf::ext::Limit, BufMut, BytesMut};
use http::header::{HeaderName, HeaderValue};
type DstBuf<'a> = Limit<&'a mut BytesMut>;
#[derive(Debug)]
pub struct Encoder {
table: Table,
size_update: Option<SizeUpdate>,
}
#[derive(Debug)]
pub enum Encode {
Full,
Partial(EncodeState),
}
#[derive(Debug)]
pub struct EncodeState {
index: Index,
value: Option<HeaderValue>,
}
#[derive(Debug, PartialEq, Eq)]
pub enum EncoderError {
BufferOverflow,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum SizeUpdate {
One(usize),
@ -58,24 +77,56 @@ impl Encoder {
}
/// Encode a set of headers into the provide buffer
pub fn encode<I>(&mut self, headers: I, dst: &mut BytesMut)
pub fn encode<I>(
&mut self,
resume: Option<EncodeState>,
headers: &mut I,
dst: &mut DstBuf<'_>,
) -> Encode
where
I: IntoIterator<Item = Header<Option<HeaderName>>>,
I: Iterator<Item = Header<Option<HeaderName>>>,
{
let span = tracing::trace_span!("hpack::encode");
let _e = span.enter();
let pos = position(dst);
self.encode_size_updates(dst);
if let Err(e) = self.encode_size_updates(dst) {
if e == EncoderError::BufferOverflow {
rewind(dst, pos);
}
unreachable!("encode_size_updates errored");
}
let mut last_index = None;
if let Some(resume) = resume {
let pos = position(dst);
let res = match resume.value {
Some(ref value) => self.encode_header_without_name(&resume.index, value, dst),
None => self.encode_header(&resume.index, dst),
};
if res.is_err() {
rewind(dst, pos);
return Encode::Partial(resume);
}
last_index = Some(resume.index);
}
for header in headers {
let pos = position(dst);
match header.reify() {
// The header has an associated name. In which case, try to
// index it in the table.
Ok(header) => {
let index = self.table.index(header);
self.encode_header(&index, dst);
let res = self.encode_header(&index, dst);
if res.is_err() {
rewind(dst, pos);
return Encode::Partial(EncodeState { index, value: None });
}
last_index = Some(index);
}
@ -84,61 +135,77 @@ impl Encoder {
// which case, we skip table lookup and just use the same index
// as the previous entry.
Err(value) => {
self.encode_header_without_name(
let res = self.encode_header_without_name(
last_index.as_ref().unwrap_or_else(|| {
panic!("encoding header without name, but no previous index to use for name");
}),
&value,
dst,
);
if res.is_err() {
rewind(dst, pos);
return Encode::Partial(EncodeState {
index: last_index.unwrap(), // checked just above
value: Some(value),
});
}
}
}
};
}
Encode::Full
}
fn encode_size_updates(&mut self, dst: &mut BytesMut) {
fn encode_size_updates(&mut self, dst: &mut DstBuf<'_>) -> Result<(), EncoderError> {
match self.size_update.take() {
Some(SizeUpdate::One(val)) => {
self.table.resize(val);
encode_size_update(val, dst);
encode_size_update(val, dst)?;
}
Some(SizeUpdate::Two(min, max)) => {
self.table.resize(min);
self.table.resize(max);
encode_size_update(min, dst);
encode_size_update(max, dst);
encode_size_update(min, dst)?;
encode_size_update(max, dst)?;
}
None => {}
}
Ok(())
}
fn encode_header(&mut self, index: &Index, dst: &mut BytesMut) {
fn encode_header(&mut self, index: &Index, dst: &mut DstBuf<'_>) -> Result<(), EncoderError> {
match *index {
Index::Indexed(idx, _) => {
encode_int(idx, 7, 0x80, dst);
encode_int(idx, 7, 0x80, dst)?;
}
Index::Name(idx, _) => {
let header = self.table.resolve(&index);
encode_not_indexed(idx, header.value_slice(), header.is_sensitive(), dst);
encode_not_indexed(idx, header.value_slice(), header.is_sensitive(), dst)?;
}
Index::Inserted(_) => {
let header = self.table.resolve(&index);
assert!(!header.is_sensitive());
if !dst.has_remaining_mut() {
return Err(EncoderError::BufferOverflow);
}
dst.put_u8(0b0100_0000);
encode_str(header.name().as_slice(), dst);
encode_str(header.value_slice(), dst);
encode_str(header.name().as_slice(), dst)?;
encode_str(header.value_slice(), dst)?;
}
Index::InsertedValue(idx, _) => {
let header = self.table.resolve(&index);
assert!(!header.is_sensitive());
encode_int(idx, 6, 0b0100_0000, dst);
encode_str(header.value_slice(), dst);
encode_int(idx, 6, 0b0100_0000, dst)?;
encode_str(header.value_slice(), dst)?;
}
Index::NotIndexed(_) => {
let header = self.table.resolve(&index);
@ -148,17 +215,19 @@ impl Encoder {
header.value_slice(),
header.is_sensitive(),
dst,
);
)?;
}
}
Ok(())
}
fn encode_header_without_name(
&mut self,
last: &Index,
value: &HeaderValue,
dst: &mut BytesMut,
) {
dst: &mut DstBuf<'_>,
) -> Result<(), EncoderError> {
match *last {
Index::Indexed(..)
| Index::Name(..)
@ -166,7 +235,7 @@ impl Encoder {
| Index::InsertedValue(..) => {
let idx = self.table.resolve_idx(last);
encode_not_indexed(idx, value.as_ref(), value.is_sensitive(), dst);
encode_not_indexed(idx, value.as_ref(), value.is_sensitive(), dst)?;
}
Index::NotIndexed(_) => {
let last = self.table.resolve(last);
@ -176,9 +245,11 @@ impl Encoder {
value.as_ref(),
value.is_sensitive(),
dst,
);
)?;
}
}
Ok(())
}
}
@ -188,32 +259,52 @@ impl Default for Encoder {
}
}
fn encode_size_update(val: usize, dst: &mut BytesMut) {
fn encode_size_update<B: BufMut>(val: usize, dst: &mut B) -> Result<(), EncoderError> {
encode_int(val, 5, 0b0010_0000, dst)
}
fn encode_not_indexed(name: usize, value: &[u8], sensitive: bool, dst: &mut BytesMut) {
fn encode_not_indexed(
name: usize,
value: &[u8],
sensitive: bool,
dst: &mut DstBuf<'_>,
) -> Result<(), EncoderError> {
if sensitive {
encode_int(name, 4, 0b10000, dst);
encode_int(name, 4, 0b10000, dst)?;
} else {
encode_int(name, 4, 0, dst);
encode_int(name, 4, 0, dst)?;
}
encode_str(value, dst);
encode_str(value, dst)?;
Ok(())
}
fn encode_not_indexed2(name: &[u8], value: &[u8], sensitive: bool, dst: &mut BytesMut) {
fn encode_not_indexed2(
name: &[u8],
value: &[u8],
sensitive: bool,
dst: &mut DstBuf<'_>,
) -> Result<(), EncoderError> {
if !dst.has_remaining_mut() {
return Err(EncoderError::BufferOverflow);
}
if sensitive {
dst.put_u8(0b10000);
} else {
dst.put_u8(0);
}
encode_str(name, dst);
encode_str(value, dst);
encode_str(name, dst)?;
encode_str(value, dst)?;
Ok(())
}
fn encode_str(val: &[u8], dst: &mut BytesMut) {
fn encode_str(val: &[u8], dst: &mut DstBuf<'_>) -> Result<(), EncoderError> {
if !dst.has_remaining_mut() {
return Err(EncoderError::BufferOverflow);
}
if !val.is_empty() {
let idx = position(dst);
@ -221,43 +312,50 @@ fn encode_str(val: &[u8], dst: &mut BytesMut) {
dst.put_u8(0);
// Encode with huffman
huffman::encode(val, dst);
huffman::encode(val, dst)?;
let huff_len = position(dst) - (idx + 1);
if encode_int_one_byte(huff_len, 7) {
// Write the string head
dst[idx] = 0x80 | huff_len as u8;
dst.get_mut()[idx] = 0x80 | huff_len as u8;
} else {
// Write the head to a placeholder
// Write the head to a placeholer
const PLACEHOLDER_LEN: usize = 8;
let mut buf = [0u8; PLACEHOLDER_LEN];
let head_len = {
let mut head_dst = &mut buf[..];
encode_int(huff_len, 7, 0x80, &mut head_dst);
encode_int(huff_len, 7, 0x80, &mut head_dst)?;
PLACEHOLDER_LEN - head_dst.remaining_mut()
};
if dst.remaining_mut() < head_len {
return Err(EncoderError::BufferOverflow);
}
// This is just done to reserve space in the destination
dst.put_slice(&buf[1..head_len]);
let written = dst.get_mut();
// Shift the header forward
for i in 0..huff_len {
let src_i = idx + 1 + (huff_len - (i + 1));
let dst_i = idx + head_len + (huff_len - (i + 1));
dst[dst_i] = dst[src_i];
written[dst_i] = written[src_i];
}
// Copy in the head
for i in 0..head_len {
dst[idx + i] = buf[i];
written[idx + i] = buf[i];
}
}
} else {
// Write an empty string
dst.put_u8(0);
}
Ok(())
}
/// Encode an integer into the given destination buffer
@ -266,25 +364,47 @@ fn encode_int<B: BufMut>(
prefix_bits: usize, // The number of bits in the prefix
first_byte: u8, // The base upon which to start encoding the int
dst: &mut B,
) {
) -> Result<(), EncoderError> {
let mut rem = dst.remaining_mut();
if rem == 0 {
return Err(EncoderError::BufferOverflow);
}
if encode_int_one_byte(value, prefix_bits) {
dst.put_u8(first_byte | value as u8);
return;
return Ok(());
}
let low = (1 << prefix_bits) - 1;
value -= low;
if value > 0x0fff_ffff {
panic!("value out of range");
}
dst.put_u8(first_byte | low as u8);
rem -= 1;
while value >= 128 {
if rem == 0 {
return Err(EncoderError::BufferOverflow);
}
dst.put_u8(0b1000_0000 | value as u8);
rem -= 1;
value >>= 7;
}
if rem == 0 {
return Err(EncoderError::BufferOverflow);
}
dst.put_u8(value as u8);
Ok(())
}
/// Returns true if the in the int can be fully encoded in the first byte.
@ -292,14 +412,19 @@ fn encode_int_one_byte(value: usize, prefix_bits: usize) -> bool {
value < (1 << prefix_bits) - 1
}
fn position(buf: &BytesMut) -> usize {
buf.len()
fn position(buf: &DstBuf<'_>) -> usize {
buf.get_ref().len()
}
fn rewind(buf: &mut DstBuf<'_>, pos: usize) {
buf.get_mut().truncate(pos);
}
#[cfg(test)]
mod test {
use super::*;
use crate::hpack::Header;
use bytes::buf::BufMutExt;
use http::*;
#[test]
@ -677,15 +802,49 @@ mod test {
}
#[test]
fn test_large_size_update() {
fn test_nameless_header_at_resume() {
let mut encoder = Encoder::default();
let max_len = 15;
let mut dst = BytesMut::with_capacity(64);
encoder.update_max_size(1912930560);
assert_eq!(Some(SizeUpdate::One(1912930560)), encoder.size_update);
let mut input = vec![
Header::Field {
name: Some("hello".parse().unwrap()),
value: HeaderValue::from_bytes(b"world").unwrap(),
},
Header::Field {
name: None,
value: HeaderValue::from_bytes(b"zomg").unwrap(),
},
Header::Field {
name: None,
value: HeaderValue::from_bytes(b"sup").unwrap(),
},
]
.into_iter();
let mut dst = BytesMut::with_capacity(6);
encoder.encode_size_updates(&mut dst);
assert_eq!([63, 225, 129, 148, 144, 7], &dst[..]);
let resume = match encoder.encode(None, &mut input, &mut (&mut dst).limit(max_len)) {
Encode::Partial(r) => r,
_ => panic!("encode should be partial"),
};
assert_eq!(&[0x40, 0x80 | 4], &dst[0..2]);
assert_eq!("hello", huff_decode(&dst[2..6]));
assert_eq!(0x80 | 4, dst[6]);
assert_eq!("world", huff_decode(&dst[7..11]));
dst.clear();
match encoder.encode(Some(resume), &mut input, &mut (&mut dst).limit(max_len)) {
Encode::Full => {}
unexpected => panic!("resume returned unexpected: {:?}", unexpected),
}
// Next is not indexed
assert_eq!(&[15, 47, 0x80 | 3], &dst[0..3]);
assert_eq!("zomg", huff_decode(&dst[3..6]));
assert_eq!(&[15, 47, 0x80 | 3], &dst[6..9]);
assert_eq!("sup", huff_decode(&dst[9..]));
}
#[test]
@ -696,7 +855,7 @@ mod test {
fn encode(e: &mut Encoder, hdrs: Vec<Header<Option<HeaderName>>>) -> BytesMut {
let mut dst = BytesMut::with_capacity(1024);
e.encode(&mut hdrs.into_iter(), &mut dst);
e.encode(None, &mut hdrs.into_iter(), &mut (&mut dst).limit(1024));
dst
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше