Bug 1710421 - [webdriver] Update warp and hyper dependencies. r=mjf,webdriver-reviewers

At the same time, update mdns_service to socket2 0.4 to avoid a duplicate.

Differential Revision: https://phabricator.services.mozilla.com/D147479
This commit is contained in:
Mike Hommey 2022-06-01 06:44:07 +00:00
Родитель 2edc1ca2ed
Коммит 2bfc571ce5
1119 изменённых файлов: 64078 добавлений и 93118 удалений

205
Cargo.lock сгенерированный
Просмотреть файл

@ -554,12 +554,6 @@ dependencies = [
"iovec", "iovec",
] ]
[[package]]
name = "bytes"
version = "0.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38"
[[package]] [[package]]
name = "bytes" name = "bytes"
version = "1.1.0" version = "1.1.0"
@ -1741,6 +1735,16 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b"
[[package]]
name = "form_urlencoded"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191"
dependencies = [
"matches",
"percent-encoding",
]
[[package]] [[package]]
name = "freetype" name = "freetype"
version = "0.7.0" version = "0.7.0"
@ -1866,7 +1870,7 @@ dependencies = [
"futures-sink", "futures-sink",
"futures-task", "futures-task",
"memchr", "memchr",
"pin-project-lite 0.2.9", "pin-project-lite",
"pin-utils", "pin-utils",
"slab", "slab",
] ]
@ -2302,21 +2306,21 @@ dependencies = [
[[package]] [[package]]
name = "h2" name = "h2"
version = "0.2.5" version = "0.3.13"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "79b7246d7e4b979c03fa093da39cfb3617a96bbeee6310af63991668d7e843ff" checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57"
dependencies = [ dependencies = [
"bytes 0.5.6", "bytes 1.1.0",
"fnv", "fnv",
"futures-core", "futures-core",
"futures-sink", "futures-sink",
"futures-util", "futures-util",
"http", "http",
"indexmap", "indexmap",
"log",
"slab", "slab",
"tokio 0.2.25", "tokio 1.17.0",
"tokio-util", "tokio-util 0.7.2",
"tracing",
] ]
[[package]] [[package]]
@ -2386,23 +2390,24 @@ checksum = "dfa686283ad6dd069f105e5ab091b04c62850d3e4cf5d67debad1933f55023df"
[[package]] [[package]]
name = "http" name = "http"
version = "0.2.5" version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1323096b05d41827dadeaee54c9981958c0f94e670bc94ed80037d1a7b8b186b" checksum = "ff8670570af52249509a86f5e3e18a08c60b177071826898fde8997cf5f6bfbb"
dependencies = [ dependencies = [
"bytes 1.1.0", "bytes 1.1.0",
"fnv", "fnv",
"itoa 0.4.999", "itoa 1.0.2",
] ]
[[package]] [[package]]
name = "http-body" name = "http-body"
version = "0.3.1" version = "0.4.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1"
dependencies = [ dependencies = [
"bytes 0.5.6", "bytes 1.1.0",
"http", "http",
"pin-project-lite",
] ]
[[package]] [[package]]
@ -2452,11 +2457,11 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]] [[package]]
name = "hyper" name = "hyper"
version = "0.13.6" version = "0.14.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6e7655b9594024ad0ee439f3b5a7299369dc2a3f459b47c696f9ff676f9aa1f" checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2"
dependencies = [ dependencies = [
"bytes 0.5.6", "bytes 1.1.0",
"futures-channel", "futures-channel",
"futures-core", "futures-core",
"futures-util", "futures-util",
@ -2464,13 +2469,13 @@ dependencies = [
"http", "http",
"http-body", "http-body",
"httparse", "httparse",
"itoa 0.4.999", "httpdate",
"log", "itoa 1.0.2",
"pin-project", "pin-project-lite",
"socket2", "socket2",
"time 0.1.43", "tokio 1.17.0",
"tokio 0.2.25",
"tower-service", "tower-service",
"tracing",
"want", "want",
] ]
@ -2778,7 +2783,7 @@ dependencies = [
"fluent-fallback", "fluent-fallback",
"fluent-testing", "fluent-testing",
"futures 0.3.21", "futures 0.3.21",
"pin-project-lite 0.2.9", "pin-project-lite",
"replace_with", "replace_with",
"rustc-hash", "rustc-hash",
"serial_test", "serial_test",
@ -3937,30 +3942,24 @@ dependencies = [
[[package]] [[package]]
name = "pin-project" name = "pin-project"
version = "0.4.29" version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9615c18d31137579e9ff063499264ddc1278e7b1982757ebc111028c4d1dc909" checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e"
dependencies = [ dependencies = [
"pin-project-internal", "pin-project-internal",
] ]
[[package]] [[package]]
name = "pin-project-internal" name = "pin-project-internal"
version = "0.4.29" version = "1.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "044964427019eed9d49d9d5bbce6047ef18f37100ea400912a9fa4a3523ab12a" checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn", "syn",
] ]
[[package]]
name = "pin-project-lite"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777"
[[package]] [[package]]
name = "pin-project-lite" name = "pin-project-lite"
version = "0.2.9" version = "0.2.9"
@ -4602,14 +4601,14 @@ dependencies = [
[[package]] [[package]]
name = "serde_urlencoded" name = "serde_urlencoded"
version = "0.6.1" version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ec5d77e2d4c73717816afac02670d5c4f534ea95ed430442cad02e7a6e32c97" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd"
dependencies = [ dependencies = [
"dtoa", "form_urlencoded",
"itoa 0.4.999", "itoa 1.0.2",
"ryu",
"serde", "serde",
"url",
] ]
[[package]] [[package]]
@ -4757,11 +4756,10 @@ dependencies = [
[[package]] [[package]]
name = "socket2" name = "socket2"
version = "0.3.19" version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0"
dependencies = [ dependencies = [
"cfg-if 1.0.0",
"libc", "libc",
"winapi", "winapi",
] ]
@ -5153,23 +5151,6 @@ dependencies = [
"tokio-uds", "tokio-uds",
] ]
[[package]]
name = "tokio"
version = "0.2.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6703a273949a90131b290be1fe7b039d0fc884aa1935860dfcbe056f28cd8092"
dependencies = [
"bytes 0.5.6",
"fnv",
"futures-core",
"iovec",
"lazy_static",
"memchr",
"mio 0.6.23",
"pin-project-lite 0.1.12",
"slab",
]
[[package]] [[package]]
name = "tokio" name = "tokio"
version = "1.17.0" version = "1.17.0"
@ -5177,10 +5158,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee"
dependencies = [ dependencies = [
"bytes 1.1.0", "bytes 1.1.0",
"libc",
"memchr", "memchr",
"mio 0.8.0",
"num_cpus", "num_cpus",
"pin-project-lite 0.2.9", "pin-project-lite",
"socket2",
"tokio-macros", "tokio-macros",
"winapi",
] ]
[[package]] [[package]]
@ -5261,6 +5246,17 @@ dependencies = [
"tokio-io", "tokio-io",
] ]
[[package]]
name = "tokio-stream"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3"
dependencies = [
"futures-core",
"pin-project-lite",
"tokio 1.17.0",
]
[[package]] [[package]]
name = "tokio-tcp" name = "tokio-tcp"
version = "0.1.4" version = "0.1.4"
@ -5339,16 +5335,23 @@ dependencies = [
[[package]] [[package]]
name = "tokio-util" name = "tokio-util"
version = "0.3.1" version = "0.6.999"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499"
dependencies = [ dependencies = [
"bytes 0.5.6", "tokio-util 0.7.2",
]
[[package]]
name = "tokio-util"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f988a1a1adc2fb21f9c12aa96441da33a1728193ae0b95d2be22dbd17fcb4e5c"
dependencies = [
"bytes 1.1.0",
"futures-core", "futures-core",
"futures-sink", "futures-sink",
"log", "pin-project-lite",
"pin-project-lite 0.1.12", "tokio 1.17.0",
"tokio 0.2.25", "tracing",
] ]
[[package]] [[package]]
@ -5372,6 +5375,39 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6"
[[package]]
name = "tracing"
version = "0.1.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d0ecdcb44a79f0fe9844f0c4f33a342cbcbb5117de8001e6ba0dc2351327d09"
dependencies = [
"cfg-if 1.0.0",
"log",
"pin-project-lite",
"tracing-attributes",
"tracing-core",
]
[[package]]
name = "tracing-attributes"
version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tracing-core"
version = "0.1.26"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f54c8ca710e81886d498c2fd3331b56c93aa248d49de2222ad2742247c60072f"
dependencies = [
"lazy_static",
]
[[package]] [[package]]
name = "tracy-rs" name = "tracy-rs"
version = "0.1.2" version = "0.1.2"
@ -5537,12 +5573,6 @@ dependencies = [
"serde", "serde",
] ]
[[package]]
name = "urlencoding"
version = "1.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a1f0175e03a0973cf4afd476bef05c26e228520400eb1fd473ad417b1c00ffb"
[[package]] [[package]]
name = "uuid" name = "uuid"
version = "0.8.1" version = "0.8.1"
@ -5608,26 +5638,30 @@ dependencies = [
[[package]] [[package]]
name = "warp" name = "warp"
version = "0.2.3" version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e95175b7a927258ecbb816bdada3cc469cb68593e7940b96a60f4af366a9970" checksum = "3cef4e1e9114a4b7f1ac799f16ce71c14de5778500c5450ec6b7b920c55b587e"
dependencies = [ dependencies = [
"bytes 0.5.6", "bytes 1.1.0",
"futures 0.3.21", "futures-channel",
"futures-util",
"headers", "headers",
"http", "http",
"hyper", "hyper",
"log", "log",
"mime", "mime",
"mime_guess", "mime_guess",
"percent-encoding",
"pin-project", "pin-project",
"scoped-tls", "scoped-tls",
"serde", "serde",
"serde_json", "serde_json",
"serde_urlencoded", "serde_urlencoded",
"tokio 0.2.25", "tokio 1.17.0",
"tokio-stream",
"tokio-util 0.6.999",
"tower-service", "tower-service",
"urlencoding", "tracing",
] ]
[[package]] [[package]]
@ -5696,7 +5730,7 @@ name = "webdriver"
version = "0.45.0" version = "0.45.0"
dependencies = [ dependencies = [
"base64 0.12.3", "base64 0.12.3",
"bytes 0.5.6", "bytes 1.1.0",
"cookie", "cookie",
"http", "http",
"log", "log",
@ -5704,7 +5738,8 @@ dependencies = [
"serde_derive", "serde_derive",
"serde_json", "serde_json",
"time 0.3.9", "time 0.3.9",
"tokio 0.2.25", "tokio 1.17.0",
"tokio-stream",
"unicode-segmentation", "unicode-segmentation",
"url", "url",
"warp", "warp",

Просмотреть файл

@ -117,6 +117,9 @@ rand = { path = "build/rust/rand" }
# Patch hashbrown 0.9 to 0.11 # Patch hashbrown 0.9 to 0.11
hashbrown = { path = "build/rust/hashbrown" } hashbrown = { path = "build/rust/hashbrown" }
# Patch tokio-util 0.6 to 0.7
tokio-util = { path = "build/rust/tokio-util" }
# Patch autocfg to hide rustc output. Workaround for https://github.com/cuviper/autocfg/issues/30 # Patch autocfg to hide rustc output. Workaround for https://github.com/cuviper/autocfg/issues/30
autocfg = { path = "third_party/rust/autocfg" } autocfg = { path = "third_party/rust/autocfg" }

Просмотреть файл

@ -0,0 +1,23 @@
[package]
name = "tokio-util"
version = "0.6.999"
edition = "2018"
license = "MPL-2.0"
[lib]
path = "lib.rs"
[dependencies]
tokio-util = "0.7"
[features]
__docs_rs = ["tokio-util/__docs_rs"]
codec = ["tokio-util/codec"]
compat = ["tokio-util/compat"]
default = ["tokio-util/default"]
full = ["tokio-util/full"]
io = ["tokio-util/io"]
io-util = ["tokio-util/io-util"]
net = ["tokio-util/net"]
rt = ["tokio-util/rt"]
time = ["tokio-util/time"]

Просмотреть файл

@ -0,0 +1,5 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
pub use tokio_util::*;

Просмотреть файл

@ -9,5 +9,5 @@ byteorder = "1.3.1"
dns-parser = "0.8.0" dns-parser = "0.8.0"
gecko-profiler = { path = "../../../../../tools/profiler/rust-api" } gecko-profiler = { path = "../../../../../tools/profiler/rust-api" }
log = "0.4" log = "0.4"
socket2 = { version = "0.3.9", features = ["reuseport"] } socket2 = { version = "0.4", features = ["all"] }
uuid = { version = "0.8", features = ["v4"] } uuid = { version = "0.8", features = ["v4"] }

Просмотреть файл

@ -412,7 +412,7 @@ impl MDNSService {
let mdns_addr = std::net::Ipv4Addr::new(224, 0, 0, 251); let mdns_addr = std::net::Ipv4Addr::new(224, 0, 0, 251);
let port = 5353; let port = 5353;
let socket = Socket::new(Domain::ipv4(), Type::dgram(), None)?; let socket = Socket::new(Domain::IPV4, Type::DGRAM, None)?;
socket.set_reuse_address(true)?; socket.set_reuse_address(true)?;
#[cfg(not(target_os = "windows"))] #[cfg(not(target_os = "windows"))]
@ -422,7 +422,7 @@ impl MDNSService {
port, port,
))))?; ))))?;
let socket = socket.into_udp_socket(); let socket = std::net::UdpSocket::from(socket);
socket.set_multicast_loop_v4(true)?; socket.set_multicast_loop_v4(true)?;
socket.set_read_timeout(Some(time::Duration::from_millis(1)))?; socket.set_read_timeout(Some(time::Duration::from_millis(1)))?;
socket.set_write_timeout(Some(time::Duration::from_millis(1)))?; socket.set_write_timeout(Some(time::Duration::from_millis(1)))?;
@ -658,7 +658,7 @@ mod tests {
fn listen_until(addr: &std::net::Ipv4Addr, stop: u64) -> thread::JoinHandle<Vec<String>> { fn listen_until(addr: &std::net::Ipv4Addr, stop: u64) -> thread::JoinHandle<Vec<String>> {
let port = 5353; let port = 5353;
let socket = Socket::new(Domain::ipv4(), Type::dgram(), None).unwrap(); let socket = Socket::new(Domain::IPV4, Type::DGRAM, None).unwrap();
socket.set_reuse_address(true).unwrap(); socket.set_reuse_address(true).unwrap();
#[cfg(not(target_os = "windows"))] #[cfg(not(target_os = "windows"))]
@ -670,7 +670,7 @@ mod tests {
)))) ))))
.unwrap(); .unwrap();
let socket = socket.into_udp_socket(); let socket = std::net::UdpSocket::from(socket);
socket.set_multicast_loop_v4(true).unwrap(); socket.set_multicast_loop_v4(true).unwrap();
socket socket
.set_read_timeout(Some(time::Duration::from_millis(10))) .set_read_timeout(Some(time::Duration::from_millis(10)))

Просмотреть файл

@ -74,7 +74,7 @@ PACKAGES_WE_ALWAYS_WANT_AN_OVERRIDE_OF = [
# add a comment as to why. # add a comment as to why.
TOLERATED_DUPES = { TOLERATED_DUPES = {
"base64": 2, "base64": 2,
"bytes": 3, "bytes": 2,
"crossbeam-deque": 2, "crossbeam-deque": 2,
"crossbeam-epoch": 2, "crossbeam-epoch": 2,
"crossbeam-utils": 3, "crossbeam-utils": 3,
@ -82,12 +82,11 @@ TOLERATED_DUPES = {
"libloading": 2, "libloading": 2,
"memoffset": 2, "memoffset": 2,
"mio": 2, "mio": 2,
"pin-project-lite": 2,
# Transition from time 0.1 to 0.3 underway, but chrono is stuck on 0.1 # Transition from time 0.1 to 0.3 underway, but chrono is stuck on 0.1
# and hasn't been updated in 1.5 years (an hypothetical update is # and hasn't been updated in 1.5 years (an hypothetical update is
# expected to remove the dependency on time altogether). # expected to remove the dependency on time altogether).
"time": 2, "time": 2,
"tokio": 3, "tokio": 2,
} }

Просмотреть файл

@ -13,7 +13,7 @@ edition = "2018"
base64 = "0.12" base64 = "0.12"
chrono = "0.4.6" chrono = "0.4.6"
clap = { version = "3.1", default-features = false, features = ["cargo", "std", "suggestions", "wrap_help"] } clap = { version = "3.1", default-features = false, features = ["cargo", "std", "suggestions", "wrap_help"] }
hyper = "0.13" hyper = "0.14"
lazy_static = "1.0" lazy_static = "1.0"
log = { version = "0.4", features = ["std"] } log = { version = "0.4", features = ["std"] }
marionette = { path = "./marionette", version="0.2.0" } marionette = { path = "./marionette", version="0.2.0" }

Просмотреть файл

@ -12,11 +12,11 @@ edition = "2018"
[features] [features]
default = ["server"] default = ["server"]
server = ["tokio", "warp"] server = ["tokio", "tokio-stream", "warp"]
[dependencies] [dependencies]
base64 = "0.12" base64 = "0.12"
bytes = "0.5" bytes = "1.0"
cookie = { version = "0.16", default-features = false } cookie = { version = "0.16", default-features = false }
http = "0.2" http = "0.2"
log = "0.4" log = "0.4"
@ -24,7 +24,8 @@ serde = "1.0"
serde_json = "1.0" serde_json = "1.0"
serde_derive = "1.0" serde_derive = "1.0"
time = "0.3" time = "0.3"
tokio = { version = "0.2", features = ["rt-core"], optional = true} tokio = { version = "1.0", features = ["rt", "net"], optional = true}
tokio-stream = { version = "0.1", features = ["net"], optional = true}
unicode-segmentation = "1.2" unicode-segmentation = "1.2"
url = "2.0" url = "2.0"
warp = { version = "0.2", default-features = false, optional = true } warp = { version = "0.3", default-features = false, optional = true }

Просмотреть файл

@ -17,6 +17,7 @@ use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::thread; use std::thread;
use tokio::net::TcpListener; use tokio::net::TcpListener;
use tokio_stream::wrappers::TcpListenerStream;
use url::{Host, Url}; use url::{Host, Url};
use warp::{self, Buf, Filter, Rejection}; use warp::{self, Buf, Filter, Rejection};
@ -218,14 +219,11 @@ where
let builder = thread::Builder::new().name("webdriver server".to_string()); let builder = thread::Builder::new().name("webdriver server".to_string());
let handle = builder.spawn(move || { let handle = builder.spawn(move || {
let mut rt = tokio::runtime::Builder::new() let rt = tokio::runtime::Builder::new_current_thread()
.basic_scheduler()
.enable_io() .enable_io()
.build() .build()
.unwrap(); .unwrap();
let mut listener = rt let listener = TcpListener::from_std(listener).unwrap();
.handle()
.enter(|| TcpListener::from_std(listener).unwrap());
let wroutes = build_warp_routes( let wroutes = build_warp_routes(
address, address,
allow_hosts, allow_hosts,
@ -233,7 +231,7 @@ where
&extension_routes, &extension_routes,
msg_send.clone(), msg_send.clone(),
); );
let fut = warp::serve(wroutes).run_incoming(listener.incoming()); let fut = warp::serve(wroutes).run_incoming(TcpListenerStream::new(listener));
rt.block_on(fut); rt.block_on(fut);
})?; })?;
@ -498,7 +496,7 @@ fn build_route<U: 'static + WebDriverExtensionRoute + Send + Sync>(
Some(_) | None => {} Some(_) | None => {}
} }
} }
let body = String::from_utf8(body.bytes().to_vec()); let body = String::from_utf8(body.chunk().to_vec());
if body.is_err() { if body.is_err() {
let err = WebDriverError::new( let err = WebDriverError::new(
ErrorStatus::UnknownError, ErrorStatus::UnknownError,

Просмотреть файл

@ -1 +0,0 @@
{"files":{"CHANGELOG.md":"7c1c6fe9fa6aa8a155d4a04dab5d4e3abadb349121886b2f24252db0e45fba51","Cargo.toml":"bb5072cd9bad83919ed35f49f3a7f88b608a0150d6ccdcbb4bf17dfb3c64ef3f","LICENSE":"45f522cacecb1023856e46df79ca625dfc550c94910078bd8aec6e02880b3d42","README.md":"2c2f6f1a240ad375f9dbd8e7f023510b645d98e327ea0a42ba339c94fd9baaa9","benches/buf.rs":"b0f4f1130081680f6f99d1efd49a75bd1d97d9a30117b7ad9525c96b7c8968e6","benches/bytes.rs":"dc5289a9ce82be35e71ed5853ab33aa108a30460e481135f6058fe4d2f7dc15e","benches/bytes_mut.rs":"1326fe6224b26826228e02b4133151e756f38152c2d9cfe66adf83af76c3ec98","ci/test-stable.sh":"6e010f1a95b72fea7bebdd217fda78427f3eb07b1e753f79507c71d982b2d38a","ci/tsan.sh":"466b86b19225dd26c756cf2252cb1973f87a145642c99364b462ed7ceb55c7dd","src/buf/buf_impl.rs":"fe1bc64bb9aef5b57d83901268f89bf148490e71bebc340c7ecc40ff95bcfb70","src/buf/buf_mut.rs":"d226189d9db76c9023537dcca0687aa5dd25851a9052d19154de8ee9b25bdee3","src/buf/ext/chain.rs":"337f58e1a8da5b4768e55921ff394f4ba3a0c6d476448fd5bceab6f3c1db1b3e","src/buf/ext/limit.rs":"a705d7cf38f9a11a904d6ee5e7afea83e9abdf8f454bb8e16b407b0e055dc11a","src/buf/ext/mod.rs":"ba2fa392c61b7429530c71797114e3f09d9b6b750b6f77f57fde964d2b218bc4","src/buf/ext/reader.rs":"ee4733fa2c2d893c6df8151c2333a46171619e8a45ec9bae863edc8deb438ac5","src/buf/ext/take.rs":"e92be765539b8b0c1cb67a01b691319cccd35fc098f2bb59ced3bbbe41ee0257","src/buf/ext/writer.rs":"3c52df6e73d09935d37bed9a05689c1966952f980b85b40aaab05081ec7ef6d8","src/buf/iter.rs":"a0de69367fa61d0d1c6c2ff4b4d337de9c5f4213d0c86e083226cf409666d860","src/buf/mod.rs":"4f8e3b4c4b69b7d004306d458ad835801e53659b38ca08312d7217d82da4c64f","src/buf/vec_deque.rs":"5a4063961d10380c1ab3681f8b3f6201112766d9f57a63e2861dc9f2b134668d","src/bytes.rs":"8c3aa5fe425604206ffc1b85a8bff5a9be38917786453450955984523f829cec","src/bytes_mut.rs":"e276f74da841ab65ca681cb09820de98aa2e9837dd975ed564b1a9be40440cf3","src/fmt/debug.rs":"19ebe7e5516e40ab712995f3ec2e0ba78ddfa905cce117e6d01e8eb330f3970a","src/fmt/hex.rs":"13755ec6f1b79923e1f1a05c51b179a38c03c40bb8ed2db0210e8901812e61e7","src/fmt/mod.rs":"176da4e359da99b8e5cf16e480cb7b978f574876827f1b9bb9c08da4d74ac0f5","src/lib.rs":"9b96e2a011a782ceb82428e6b71fd212a46bc186bd152102018c7b6428a0d441","src/loom.rs":"5dc97a5afce14875a66e44cbf0afa67e084c8b6b8c560bc14e7a70ef73aee96e","src/serde.rs":"3ecd7e828cd4c2b7db93c807cb1548fad209e674df493edf7cda69a7b04d405d","tests/test_buf.rs":"3ca99c58f470e7c4beb18e5dc69250ce541dd8ac96b88fb1162640510a735ada","tests/test_buf_mut.rs":"56636e439cb07af2fabdfb60a08995829680c9730a8ebe5c6ad2f54dbf208e32","tests/test_bytes.rs":"3ec0a82ce98fea633ed7d635caca21cd8035d0c9ea4287d1cc0199e167a4a3c1","tests/test_bytes_odd_alloc.rs":"87d51d4ab6ad98193b140ea8158f6631eba985a204c2ea94d34b3bb157791a16","tests/test_bytes_vec_alloc.rs":"2b686b6ab44f924e69d8270a4f256eb3626a3b4db8c1919b74bc422c10124899","tests/test_chain.rs":"71772fbc0bab72a697bd85c6c1be0eddfe7d7dc4f4737a0cd53be4ad191d076b","tests/test_debug.rs":"13299107172809e8cbbd823964ac9450cd0d6b6de79f2e6a2e0f44b9225a0593","tests/test_iter.rs":"c1f46823df26a90139645fd8728a03138edd95b2849dfec830452a80ddd9726d","tests/test_reader.rs":"9c94e164aa7de4c10966f8084ad04d06f4e9c66e156d017d194a1dac3dfc6619","tests/test_serde.rs":"2691f891796ba259de0ecf926de05c514f4912cc5fcd3e6a1591efbcd23ed4d0","tests/test_take.rs":"975aa2e216b6a3c939b31e41ecfbb3a90938096413a14a2ae986c842d2250180"},"package":"0e4cec68f03f32e44924783795810fa50a7035d8c8ebe78580ad7e6c703fba38"}

166
third_party/rust/bytes-0.5.6/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,166 +0,0 @@
# 0.5.6 (July 13, 2020)
- Improve `BytesMut` to reuse buffer when fully `advance`d.
- Mark `BytesMut::{as_mut, set_len}` with `#[inline]`.
- Relax synchronization when cloning in shared vtable of `Bytes`.
- Move `loom` to `dev-dependencies`.
# 0.5.5 (June 18, 2020)
### Added
- Allow using the `serde` feature in `no_std` environments (#385).
### Fix
- Fix `BufMut::advance_mut` to panic if advanced passed the capacity (#354)..
- Fix `BytesMut::freeze` ignoring amount previously `advance`d (#352).
# 0.5.4 (January 23, 2020)
### Added
- Make `Bytes::new` a `const fn`.
- Add `From<BytesMut>` for `Bytes`.
### Fix
- Fix reversed arguments in `PartialOrd` for `Bytes`.
- Fix `Bytes::truncate` losing original capacity when repr is an unshared `Vec`.
- Fix `Bytes::from(Vec)` when allocator gave `Vec` a pointer with LSB set.
- Fix panic in `Bytes::slice_ref` if argument is an empty slice.
# 0.5.3 (December 12, 2019)
### Added
- `must_use` attributes to `split`, `split_off`, and `split_to` methods (#337).
### Fix
- Potential freeing of a null pointer in `Bytes` when constructed with an empty `Vec<u8>` (#341, #342).
- Calling `Bytes::truncate` with a size large than the length will no longer clear the `Bytes` (#333).
# 0.5.2 (November 27, 2019)
### Added
- `Limit` methods `into_inner`, `get_ref`, `get_mut`, `limit`, and `set_limit` (#325).
# 0.5.1 (November 25, 2019)
### Fix
- Growth documentation for `BytesMut` (#321)
# 0.5.0 (November 25, 2019)
### Fix
- Potential overflow in `copy_to_slice`
### Changed
- Increased minimum supported Rust version to 1.39.
- `Bytes` is now a "trait object", allowing for custom allocation strategies (#298)
- `BytesMut` implicitly grows internal storage. `remaining_mut()` returns
`usize::MAX` (#316).
- `BufMut::bytes_mut` returns `&mut [MaybeUninit<u8>]` to reflect the unknown
initialization state (#305).
- `Buf` / `BufMut` implementations for `&[u8]` and `&mut [u8]`
respectively (#261).
- Move `Buf` / `BufMut` "extra" functions to an extension trait (#306).
- `BufMutExt::limit` (#309).
- `Bytes::slice` takes a `RangeBounds` argument (#265).
- `Bytes::from_static` is now a `const fn` (#311).
- A multitude of smaller performance optimizations.
### Added
- `no_std` support (#281).
- `get_*`, `put_*`, `get_*_le`, and `put_*le` accessors for handling byte order.
- `BorrowMut` implementation for `BytesMut` (#185).
### Removed
- `IntoBuf` (#288).
- `Buf` implementation for `&str` (#301).
- `byteorder` dependency (#280).
- `iovec` dependency, use `std::IoSlice` instead (#263).
- optional `either` dependency (#315).
- optional `i128` feature -- now available on stable. (#276).
# 0.4.12 (March 6, 2019)
### Added
- Implement `FromIterator<&'a u8>` for `BytesMut`/`Bytes` (#244).
- Implement `Buf` for `VecDeque` (#249).
# 0.4.11 (November 17, 2018)
* Use raw pointers for potentially racy loads (#233).
* Implement `BufRead` for `buf::Reader` (#232).
* Documentation tweaks (#234).
# 0.4.10 (September 4, 2018)
* impl `Buf` and `BufMut` for `Either` (#225).
* Add `Bytes::slice_ref` (#208).
# 0.4.9 (July 12, 2018)
* Add 128 bit number support behind a feature flag (#209).
* Implement `IntoBuf` for `&mut [u8]`
# 0.4.8 (May 25, 2018)
* Fix panic in `BytesMut` `FromIterator` implementation.
* Bytes: Recycle space when reserving space in vec mode (#197).
* Bytes: Add resize fn (#203).
# 0.4.7 (April 27, 2018)
* Make `Buf` and `BufMut` usable as trait objects (#186).
* impl BorrowMut for BytesMut (#185).
* Improve accessor performance (#195).
# 0.4.6 (Janary 8, 2018)
* Implement FromIterator for Bytes/BytesMut (#148).
* Add `advance` fn to Bytes/BytesMut (#166).
* Add `unsplit` fn to `BytesMut` (#162, #173).
* Improvements to Bytes split fns (#92).
# 0.4.5 (August 12, 2017)
* Fix range bug in `Take::bytes`
* Misc performance improvements
* Add extra `PartialEq` implementations.
* Add `Bytes::with_capacity`
* Implement `AsMut[u8]` for `BytesMut`
# 0.4.4 (May 26, 2017)
* Add serde support behind feature flag
* Add `extend_from_slice` on `Bytes` and `BytesMut`
* Add `truncate` and `clear` on `Bytes`
* Misc additional std trait implementations
* Misc performance improvements
# 0.4.3 (April 30, 2017)
* Fix Vec::advance_mut bug
* Bump minimum Rust version to 1.15
* Misc performance tweaks
# 0.4.2 (April 5, 2017)
* Misc performance tweaks
* Improved `Debug` implementation for `Bytes`
* Avoid some incorrect assert panics
# 0.4.1 (March 15, 2017)
* Expose `buf` module and have most types available from there vs. root.
* Implement `IntoBuf` for `T: Buf`.
* Add `FromBuf` and `Buf::collect`.
* Add iterator adapter for `Buf`.
* Add scatter/gather support to `Buf` and `BufMut`.
* Add `Buf::chain`.
* Reduce allocations on repeated calls to `BytesMut::reserve`.
* Implement `Debug` for more types.
* Remove `Source` in favor of `IntoBuf`.
* Implement `Extend` for `BytesMut`.
# 0.4.0 (February 24, 2017)
* Initial release

37
third_party/rust/bytes-0.5.6/Cargo.toml поставляемый
Просмотреть файл

@ -1,37 +0,0 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies
#
# If you believe there's an error in this file please file an
# issue against the rust-lang/cargo repository. If you're
# editing this file be aware that the upstream Cargo.toml
# will likely look very different (and much more reasonable)
[package]
edition = "2018"
name = "bytes"
version = "0.5.6"
authors = ["Carl Lerche <me@carllerche.com>", "Sean McArthur <sean@seanmonstar.com>"]
description = "Types and traits for working with bytes"
documentation = "https://docs.rs/bytes"
readme = "README.md"
keywords = ["buffers", "zero-copy", "io"]
categories = ["network-programming", "data-structures"]
license = "MIT"
repository = "https://github.com/tokio-rs/bytes"
[dependencies.serde]
version = "1.0.60"
features = ["alloc"]
optional = true
default-features = false
[dev-dependencies.serde_test]
version = "1.0"
[features]
default = ["std"]
std = []
[target."cfg(loom)".dev-dependencies.loom]
version = "0.3"

47
third_party/rust/bytes-0.5.6/README.md поставляемый
Просмотреть файл

@ -1,47 +0,0 @@
# Bytes
A utility library for working with bytes.
[![Crates.io][crates-badge]][crates-url]
[![Build Status][ci-badge]][ci-url]
[crates-badge]: https://img.shields.io/crates/v/bytes.svg
[crates-url]: https://crates.io/crates/bytes
[ci-badge]: https://github.com/tokio-rs/bytes/workflows/CI/badge.svg
[ci-url]: https://github.com/tokio-rs/bytes/actions
[Documentation](https://docs.rs/bytes)
## Usage
To use `bytes`, first add this to your `Cargo.toml`:
```toml
[dependencies]
bytes = "0.5"
```
Next, add this to your crate:
```rust
use bytes::{Bytes, BytesMut, Buf, BufMut};
```
## Serde support
Serde support is optional and disabled by default. To enable use the feature `serde`.
```toml
[dependencies]
bytes = { version = "0.5", features = ["serde"] }
```
## License
This project is licensed under the [MIT license](LICENSE).
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in `bytes` by you, shall be licensed as MIT, without any additional
terms or conditions.

187
third_party/rust/bytes-0.5.6/benches/buf.rs поставляемый
Просмотреть файл

@ -1,187 +0,0 @@
#![feature(test)]
#![warn(rust_2018_idioms)]
extern crate test;
use bytes::Buf;
use test::Bencher;
/// Dummy Buf implementation
struct TestBuf {
buf: &'static [u8],
readlens: &'static [usize],
init_pos: usize,
pos: usize,
readlen_pos: usize,
readlen: usize,
}
impl TestBuf {
fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBuf {
let mut buf = TestBuf {
buf,
readlens,
init_pos,
pos: 0,
readlen_pos: 0,
readlen: 0,
};
buf.reset();
buf
}
fn reset(&mut self) {
self.pos = self.init_pos;
self.readlen_pos = 0;
self.next_readlen();
}
/// Compute the length of the next read :
/// - use the next value specified in readlens (capped by remaining) if any
/// - else the remaining
fn next_readlen(&mut self) {
self.readlen = self.buf.len() - self.pos;
if let Some(readlen) = self.readlens.get(self.readlen_pos) {
self.readlen = std::cmp::min(self.readlen, *readlen);
self.readlen_pos += 1;
}
}
}
impl Buf for TestBuf {
fn remaining(&self) -> usize {
return self.buf.len() - self.pos;
}
fn advance(&mut self, cnt: usize) {
self.pos += cnt;
assert!(self.pos <= self.buf.len());
self.next_readlen();
}
fn bytes(&self) -> &[u8] {
if self.readlen == 0 {
Default::default()
} else {
&self.buf[self.pos..self.pos + self.readlen]
}
}
}
/// Dummy Buf implementation
/// version with methods forced to not be inlined (to simulate costly calls)
struct TestBufC {
inner: TestBuf,
}
impl TestBufC {
fn new(buf: &'static [u8], readlens: &'static [usize], init_pos: usize) -> TestBufC {
TestBufC {
inner: TestBuf::new(buf, readlens, init_pos),
}
}
fn reset(&mut self) {
self.inner.reset()
}
}
impl Buf for TestBufC {
#[inline(never)]
fn remaining(&self) -> usize {
self.inner.remaining()
}
#[inline(never)]
fn advance(&mut self, cnt: usize) {
self.inner.advance(cnt)
}
#[inline(never)]
fn bytes(&self) -> &[u8] {
self.inner.bytes()
}
}
macro_rules! bench {
($fname:ident, testbuf $testbuf:ident $readlens:expr, $method:ident $(,$arg:expr)*) => (
#[bench]
fn $fname(b: &mut Bencher) {
let mut bufs = [
$testbuf::new(&[1u8; 8+0], $readlens, 0),
$testbuf::new(&[1u8; 8+1], $readlens, 1),
$testbuf::new(&[1u8; 8+2], $readlens, 2),
$testbuf::new(&[1u8; 8+3], $readlens, 3),
$testbuf::new(&[1u8; 8+4], $readlens, 4),
$testbuf::new(&[1u8; 8+5], $readlens, 5),
$testbuf::new(&[1u8; 8+6], $readlens, 6),
$testbuf::new(&[1u8; 8+7], $readlens, 7),
];
b.iter(|| {
for i in 0..8 {
bufs[i].reset();
let buf: &mut dyn Buf = &mut bufs[i]; // type erasure
test::black_box(buf.$method($($arg,)*));
}
})
}
);
($fname:ident, slice, $method:ident $(,$arg:expr)*) => (
#[bench]
fn $fname(b: &mut Bencher) {
// buf must be long enough for one read of 8 bytes starting at pos 7
let arr = [1u8; 8+7];
b.iter(|| {
for i in 0..8 {
let mut buf = &arr[i..];
let buf = &mut buf as &mut dyn Buf; // type erasure
test::black_box(buf.$method($($arg,)*));
}
})
}
);
($fname:ident, option) => (
#[bench]
fn $fname(b: &mut Bencher) {
let data = [1u8; 1];
b.iter(|| {
for _ in 0..8 {
let mut buf = Some(data);
let buf = &mut buf as &mut dyn Buf; // type erasure
test::black_box(buf.get_u8());
}
})
}
);
}
macro_rules! bench_group {
($method:ident $(,$arg:expr)*) => (
bench!(slice, slice, $method $(,$arg)*);
bench!(tbuf_1, testbuf TestBuf &[], $method $(,$arg)*);
bench!(tbuf_1_costly, testbuf TestBufC &[], $method $(,$arg)*);
bench!(tbuf_2, testbuf TestBuf &[1], $method $(,$arg)*);
bench!(tbuf_2_costly, testbuf TestBufC &[1], $method $(,$arg)*);
// bench!(tbuf_onebyone, testbuf TestBuf &[1,1,1,1,1,1,1,1], $method $(,$arg)*);
// bench!(tbuf_onebyone_costly, testbuf TestBufC &[1,1,1,1,1,1,1,1], $method $(,$arg)*);
);
}
mod get_u8 {
use super::*;
bench_group!(get_u8);
bench!(option, option);
}
mod get_u16 {
use super::*;
bench_group!(get_u16);
}
mod get_u32 {
use super::*;
bench_group!(get_u32);
}
mod get_u64 {
use super::*;
bench_group!(get_u64);
}
mod get_f32 {
use super::*;
bench_group!(get_f32);
}
mod get_f64 {
use super::*;
bench_group!(get_f64);
}
mod get_uint24 {
use super::*;
bench_group!(get_uint, 3);
}

119
third_party/rust/bytes-0.5.6/benches/bytes.rs поставляемый
Просмотреть файл

@ -1,119 +0,0 @@
#![feature(test)]
#![warn(rust_2018_idioms)]
extern crate test;
use bytes::Bytes;
use test::Bencher;
#[bench]
fn deref_unique(b: &mut Bencher) {
let buf = Bytes::from(vec![0; 1024]);
b.iter(|| {
for _ in 0..1024 {
test::black_box(&buf[..]);
}
})
}
#[bench]
fn deref_shared(b: &mut Bencher) {
let buf = Bytes::from(vec![0; 1024]);
let _b2 = buf.clone();
b.iter(|| {
for _ in 0..1024 {
test::black_box(&buf[..]);
}
})
}
#[bench]
fn deref_static(b: &mut Bencher) {
let buf = Bytes::from_static(b"hello world");
b.iter(|| {
for _ in 0..1024 {
test::black_box(&buf[..]);
}
})
}
#[bench]
fn clone_static(b: &mut Bencher) {
let bytes =
Bytes::from_static("hello world 1234567890 and have a good byte 0987654321".as_bytes());
b.iter(|| {
for _ in 0..1024 {
test::black_box(&bytes.clone());
}
})
}
#[bench]
fn clone_shared(b: &mut Bencher) {
let bytes = Bytes::from(b"hello world 1234567890 and have a good byte 0987654321".to_vec());
b.iter(|| {
for _ in 0..1024 {
test::black_box(&bytes.clone());
}
})
}
#[bench]
fn clone_arc_vec(b: &mut Bencher) {
use std::sync::Arc;
let bytes = Arc::new(b"hello world 1234567890 and have a good byte 0987654321".to_vec());
b.iter(|| {
for _ in 0..1024 {
test::black_box(&bytes.clone());
}
})
}
#[bench]
fn from_long_slice(b: &mut Bencher) {
let data = [0u8; 128];
b.bytes = data.len() as u64;
b.iter(|| {
let buf = Bytes::copy_from_slice(&data[..]);
test::black_box(buf);
})
}
#[bench]
fn slice_empty(b: &mut Bencher) {
b.iter(|| {
let b = Bytes::from(vec![17; 1024]).clone();
for i in 0..1000 {
test::black_box(b.slice(i % 100..i % 100));
}
})
}
#[bench]
fn slice_short_from_arc(b: &mut Bencher) {
b.iter(|| {
// `clone` is to convert to ARC
let b = Bytes::from(vec![17; 1024]).clone();
for i in 0..1000 {
test::black_box(b.slice(1..2 + i % 10));
}
})
}
#[bench]
fn split_off_and_drop(b: &mut Bencher) {
b.iter(|| {
for _ in 0..1024 {
let v = vec![10; 200];
let mut b = Bytes::from(v);
test::black_box(b.split_off(100));
test::black_box(b);
}
})
}

Просмотреть файл

@ -1,266 +0,0 @@
#![feature(test)]
#![warn(rust_2018_idioms)]
extern crate test;
use bytes::{BufMut, BytesMut};
use test::Bencher;
#[bench]
fn alloc_small(b: &mut Bencher) {
b.iter(|| {
for _ in 0..1024 {
test::black_box(BytesMut::with_capacity(12));
}
})
}
#[bench]
fn alloc_mid(b: &mut Bencher) {
b.iter(|| {
test::black_box(BytesMut::with_capacity(128));
})
}
#[bench]
fn alloc_big(b: &mut Bencher) {
b.iter(|| {
test::black_box(BytesMut::with_capacity(4096));
})
}
#[bench]
fn deref_unique(b: &mut Bencher) {
let mut buf = BytesMut::with_capacity(4096);
buf.put(&[0u8; 1024][..]);
b.iter(|| {
for _ in 0..1024 {
test::black_box(&buf[..]);
}
})
}
#[bench]
fn deref_unique_unroll(b: &mut Bencher) {
let mut buf = BytesMut::with_capacity(4096);
buf.put(&[0u8; 1024][..]);
b.iter(|| {
for _ in 0..128 {
test::black_box(&buf[..]);
test::black_box(&buf[..]);
test::black_box(&buf[..]);
test::black_box(&buf[..]);
test::black_box(&buf[..]);
test::black_box(&buf[..]);
test::black_box(&buf[..]);
test::black_box(&buf[..]);
}
})
}
#[bench]
fn deref_shared(b: &mut Bencher) {
let mut buf = BytesMut::with_capacity(4096);
buf.put(&[0u8; 1024][..]);
let _b2 = buf.split_off(1024);
b.iter(|| {
for _ in 0..1024 {
test::black_box(&buf[..]);
}
})
}
#[bench]
fn deref_two(b: &mut Bencher) {
let mut buf1 = BytesMut::with_capacity(8);
buf1.put(&[0u8; 8][..]);
let mut buf2 = BytesMut::with_capacity(4096);
buf2.put(&[0u8; 1024][..]);
b.iter(|| {
for _ in 0..512 {
test::black_box(&buf1[..]);
test::black_box(&buf2[..]);
}
})
}
#[bench]
fn clone_frozen(b: &mut Bencher) {
let bytes = BytesMut::from(&b"hello world 1234567890 and have a good byte 0987654321"[..])
.split()
.freeze();
b.iter(|| {
for _ in 0..1024 {
test::black_box(&bytes.clone());
}
})
}
#[bench]
fn alloc_write_split_to_mid(b: &mut Bencher) {
b.iter(|| {
let mut buf = BytesMut::with_capacity(128);
buf.put_slice(&[0u8; 64]);
test::black_box(buf.split_to(64));
})
}
#[bench]
fn drain_write_drain(b: &mut Bencher) {
let data = [0u8; 128];
b.iter(|| {
let mut buf = BytesMut::with_capacity(1024);
let mut parts = Vec::with_capacity(8);
for _ in 0..8 {
buf.put(&data[..]);
parts.push(buf.split_to(128));
}
test::black_box(parts);
})
}
#[bench]
fn fmt_write(b: &mut Bencher) {
use std::fmt::Write;
let mut buf = BytesMut::with_capacity(128);
let s = "foo bar baz quux lorem ipsum dolor et";
b.bytes = s.len() as u64;
b.iter(|| {
let _ = write!(buf, "{}", s);
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
})
}
#[bench]
fn bytes_mut_extend(b: &mut Bencher) {
let mut buf = BytesMut::with_capacity(256);
let data = [33u8; 32];
b.bytes = data.len() as u64 * 4;
b.iter(|| {
for _ in 0..4 {
buf.extend(&data);
}
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
});
}
// BufMut for BytesMut vs Vec<u8>
#[bench]
fn put_slice_bytes_mut(b: &mut Bencher) {
let mut buf = BytesMut::with_capacity(256);
let data = [33u8; 32];
b.bytes = data.len() as u64 * 4;
b.iter(|| {
for _ in 0..4 {
buf.put_slice(&data);
}
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
});
}
#[bench]
fn put_u8_bytes_mut(b: &mut Bencher) {
let mut buf = BytesMut::with_capacity(256);
let cnt = 128;
b.bytes = cnt as u64;
b.iter(|| {
for _ in 0..cnt {
buf.put_u8(b'x');
}
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
});
}
#[bench]
fn put_slice_vec(b: &mut Bencher) {
let mut buf = Vec::<u8>::with_capacity(256);
let data = [33u8; 32];
b.bytes = data.len() as u64 * 4;
b.iter(|| {
for _ in 0..4 {
buf.put_slice(&data);
}
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
});
}
#[bench]
fn put_u8_vec(b: &mut Bencher) {
let mut buf = Vec::<u8>::with_capacity(256);
let cnt = 128;
b.bytes = cnt as u64;
b.iter(|| {
for _ in 0..cnt {
buf.put_u8(b'x');
}
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
});
}
#[bench]
fn put_slice_vec_extend(b: &mut Bencher) {
let mut buf = Vec::<u8>::with_capacity(256);
let data = [33u8; 32];
b.bytes = data.len() as u64 * 4;
b.iter(|| {
for _ in 0..4 {
buf.extend_from_slice(&data);
}
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
});
}
#[bench]
fn put_u8_vec_push(b: &mut Bencher) {
let mut buf = Vec::<u8>::with_capacity(256);
let cnt = 128;
b.bytes = cnt as u64;
b.iter(|| {
for _ in 0..cnt {
buf.push(b'x');
}
test::black_box(&buf);
unsafe {
buf.set_len(0);
}
});
}

Просмотреть файл

@ -1,27 +0,0 @@
#!/bin/bash
set -ex
cmd="${1:-test}"
# Install cargo-hack for feature flag test
cargo install cargo-hack
# Run with each feature
# * --each-feature includes both default/no-default features
# * --optional-deps is needed for serde feature
cargo hack "${cmd}" --each-feature --optional-deps
# Run with all features
cargo "${cmd}" --all-features
cargo doc --no-deps --all-features
if [[ "${RUST_VERSION}" == "nightly"* ]]; then
# Check benchmarks
cargo check --benches
# Check minimal versions
cargo clean
cargo update -Zminimal-versions
cargo check --all-features
fi

13
third_party/rust/bytes-0.5.6/ci/tsan.sh поставляемый
Просмотреть файл

@ -1,13 +0,0 @@
#!/bin/bash
set -ex
export ASAN_OPTIONS="detect_odr_violation=0 detect_leaks=0"
# Run address sanitizer
RUSTFLAGS="-Z sanitizer=address" \
cargo test --target x86_64-unknown-linux-gnu --test test_bytes --test test_buf --test test_buf_mut
# Run thread sanitizer
RUSTFLAGS="-Z sanitizer=thread" \
cargo -Zbuild-std test --target x86_64-unknown-linux-gnu --test test_bytes --test test_buf --test test_buf_mut

1007
third_party/rust/bytes-0.5.6/src/buf/buf_impl.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

1100
third_party/rust/bytes-0.5.6/src/buf/buf_mut.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,233 +0,0 @@
use crate::buf::IntoIter;
use crate::{Buf, BufMut};
use core::mem::MaybeUninit;
#[cfg(feature = "std")]
use crate::buf::IoSliceMut;
#[cfg(feature = "std")]
use std::io::IoSlice;
/// A `Chain` sequences two buffers.
///
/// `Chain` is an adapter that links two underlying buffers and provides a
/// continuous view across both buffers. It is able to sequence either immutable
/// buffers ([`Buf`] values) or mutable buffers ([`BufMut`] values).
///
/// This struct is generally created by calling [`Buf::chain`]. Please see that
/// function's documentation for more detail.
///
/// # Examples
///
/// ```
/// use bytes::{Bytes, Buf, buf::BufExt};
///
/// let mut buf = (&b"hello "[..])
/// .chain(&b"world"[..]);
///
/// let full: Bytes = buf.to_bytes();
/// assert_eq!(full[..], b"hello world"[..]);
/// ```
///
/// [`Buf::chain`]: trait.Buf.html#method.chain
/// [`Buf`]: trait.Buf.html
/// [`BufMut`]: trait.BufMut.html
#[derive(Debug)]
pub struct Chain<T, U> {
a: T,
b: U,
}
impl<T, U> Chain<T, U> {
/// Creates a new `Chain` sequencing the provided values.
pub fn new(a: T, b: U) -> Chain<T, U> {
Chain { a, b }
}
/// Gets a reference to the first underlying `Buf`.
///
/// # Examples
///
/// ```
/// use bytes::buf::BufExt;
///
/// let buf = (&b"hello"[..])
/// .chain(&b"world"[..]);
///
/// assert_eq!(buf.first_ref()[..], b"hello"[..]);
/// ```
pub fn first_ref(&self) -> &T {
&self.a
}
/// Gets a mutable reference to the first underlying `Buf`.
///
/// # Examples
///
/// ```
/// use bytes::{Buf, buf::BufExt};
///
/// let mut buf = (&b"hello"[..])
/// .chain(&b"world"[..]);
///
/// buf.first_mut().advance(1);
///
/// let full = buf.to_bytes();
/// assert_eq!(full, b"elloworld"[..]);
/// ```
pub fn first_mut(&mut self) -> &mut T {
&mut self.a
}
/// Gets a reference to the last underlying `Buf`.
///
/// # Examples
///
/// ```
/// use bytes::buf::BufExt;
///
/// let buf = (&b"hello"[..])
/// .chain(&b"world"[..]);
///
/// assert_eq!(buf.last_ref()[..], b"world"[..]);
/// ```
pub fn last_ref(&self) -> &U {
&self.b
}
/// Gets a mutable reference to the last underlying `Buf`.
///
/// # Examples
///
/// ```
/// use bytes::{Buf, buf::BufExt};
///
/// let mut buf = (&b"hello "[..])
/// .chain(&b"world"[..]);
///
/// buf.last_mut().advance(1);
///
/// let full = buf.to_bytes();
/// assert_eq!(full, b"hello orld"[..]);
/// ```
pub fn last_mut(&mut self) -> &mut U {
&mut self.b
}
/// Consumes this `Chain`, returning the underlying values.
///
/// # Examples
///
/// ```
/// use bytes::buf::BufExt;
///
/// let chain = (&b"hello"[..])
/// .chain(&b"world"[..]);
///
/// let (first, last) = chain.into_inner();
/// assert_eq!(first[..], b"hello"[..]);
/// assert_eq!(last[..], b"world"[..]);
/// ```
pub fn into_inner(self) -> (T, U) {
(self.a, self.b)
}
}
impl<T, U> Buf for Chain<T, U>
where
T: Buf,
U: Buf,
{
fn remaining(&self) -> usize {
self.a.remaining() + self.b.remaining()
}
fn bytes(&self) -> &[u8] {
if self.a.has_remaining() {
self.a.bytes()
} else {
self.b.bytes()
}
}
fn advance(&mut self, mut cnt: usize) {
let a_rem = self.a.remaining();
if a_rem != 0 {
if a_rem >= cnt {
self.a.advance(cnt);
return;
}
// Consume what is left of a
self.a.advance(a_rem);
cnt -= a_rem;
}
self.b.advance(cnt);
}
#[cfg(feature = "std")]
fn bytes_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
let mut n = self.a.bytes_vectored(dst);
n += self.b.bytes_vectored(&mut dst[n..]);
n
}
}
impl<T, U> BufMut for Chain<T, U>
where
T: BufMut,
U: BufMut,
{
fn remaining_mut(&self) -> usize {
self.a.remaining_mut() + self.b.remaining_mut()
}
fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] {
if self.a.has_remaining_mut() {
self.a.bytes_mut()
} else {
self.b.bytes_mut()
}
}
unsafe fn advance_mut(&mut self, mut cnt: usize) {
let a_rem = self.a.remaining_mut();
if a_rem != 0 {
if a_rem >= cnt {
self.a.advance_mut(cnt);
return;
}
// Consume what is left of a
self.a.advance_mut(a_rem);
cnt -= a_rem;
}
self.b.advance_mut(cnt);
}
#[cfg(feature = "std")]
fn bytes_vectored_mut<'a>(&'a mut self, dst: &mut [IoSliceMut<'a>]) -> usize {
let mut n = self.a.bytes_vectored_mut(dst);
n += self.b.bytes_vectored_mut(&mut dst[n..]);
n
}
}
impl<T, U> IntoIterator for Chain<T, U>
where
T: Buf,
U: Buf,
{
type Item = u8;
type IntoIter = IntoIter<Chain<T, U>>;
fn into_iter(self) -> Self::IntoIter {
IntoIter::new(self)
}
}

Просмотреть файл

@ -1,74 +0,0 @@
use crate::BufMut;
use core::{cmp, mem::MaybeUninit};
/// A `BufMut` adapter which limits the amount of bytes that can be written
/// to an underlying buffer.
#[derive(Debug)]
pub struct Limit<T> {
inner: T,
limit: usize,
}
pub(super) fn new<T>(inner: T, limit: usize) -> Limit<T> {
Limit { inner, limit }
}
impl<T> Limit<T> {
/// Consumes this `Limit`, returning the underlying value.
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying `BufMut`.
///
/// It is inadvisable to directly write to the underlying `BufMut`.
pub fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying `BufMut`.
///
/// It is inadvisable to directly write to the underlying `BufMut`.
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Returns the maximum number of bytes that can be written
///
/// # Note
///
/// If the inner `BufMut` has fewer bytes than indicated by this method then
/// that is the actual number of available bytes.
pub fn limit(&self) -> usize {
self.limit
}
/// Sets the maximum number of bytes that can be written.
///
/// # Note
///
/// If the inner `BufMut` has fewer bytes than `lim` then that is the actual
/// number of available bytes.
pub fn set_limit(&mut self, lim: usize) {
self.limit = lim
}
}
impl<T: BufMut> BufMut for Limit<T> {
fn remaining_mut(&self) -> usize {
cmp::min(self.inner.remaining_mut(), self.limit)
}
fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] {
let bytes = self.inner.bytes_mut();
let end = cmp::min(bytes.len(), self.limit);
&mut bytes[..end]
}
unsafe fn advance_mut(&mut self, cnt: usize) {
assert!(cnt <= self.limit);
self.inner.advance_mut(cnt);
self.limit -= cnt;
}
}

Просмотреть файл

@ -1,186 +0,0 @@
//! Extra utilities for `Buf` and `BufMut` types.
use super::{Buf, BufMut};
mod chain;
mod limit;
#[cfg(feature = "std")]
mod reader;
mod take;
#[cfg(feature = "std")]
mod writer;
pub use self::chain::Chain;
pub use self::limit::Limit;
pub use self::take::Take;
#[cfg(feature = "std")]
pub use self::{reader::Reader, writer::Writer};
/// Extra methods for implementations of `Buf`.
pub trait BufExt: Buf {
/// Creates an adaptor which will read at most `limit` bytes from `self`.
///
/// This function returns a new instance of `Buf` which will read at most
/// `limit` bytes.
///
/// # Examples
///
/// ```
/// use bytes::{BufMut, buf::BufExt};
///
/// let mut buf = b"hello world"[..].take(5);
/// let mut dst = vec![];
///
/// dst.put(&mut buf);
/// assert_eq!(dst, b"hello");
///
/// let mut buf = buf.into_inner();
/// dst.clear();
/// dst.put(&mut buf);
/// assert_eq!(dst, b" world");
/// ```
fn take(self, limit: usize) -> Take<Self>
where
Self: Sized,
{
take::new(self, limit)
}
/// Creates an adaptor which will chain this buffer with another.
///
/// The returned `Buf` instance will first consume all bytes from `self`.
/// Afterwards the output is equivalent to the output of next.
///
/// # Examples
///
/// ```
/// use bytes::{Buf, buf::BufExt};
///
/// let mut chain = b"hello "[..].chain(&b"world"[..]);
///
/// let full = chain.to_bytes();
/// assert_eq!(full.bytes(), b"hello world");
/// ```
fn chain<U: Buf>(self, next: U) -> Chain<Self, U>
where
Self: Sized,
{
Chain::new(self, next)
}
/// Creates an adaptor which implements the `Read` trait for `self`.
///
/// This function returns a new value which implements `Read` by adapting
/// the `Read` trait functions to the `Buf` trait functions. Given that
/// `Buf` operations are infallible, none of the `Read` functions will
/// return with `Err`.
///
/// # Examples
///
/// ```
/// use bytes::{Bytes, buf::BufExt};
/// use std::io::Read;
///
/// let buf = Bytes::from("hello world");
///
/// let mut reader = buf.reader();
/// let mut dst = [0; 1024];
///
/// let num = reader.read(&mut dst).unwrap();
///
/// assert_eq!(11, num);
/// assert_eq!(&dst[..11], &b"hello world"[..]);
/// ```
#[cfg(feature = "std")]
fn reader(self) -> Reader<Self>
where
Self: Sized,
{
reader::new(self)
}
}
impl<B: Buf + ?Sized> BufExt for B {}
/// Extra methods for implementations of `BufMut`.
pub trait BufMutExt: BufMut {
/// Creates an adaptor which can write at most `limit` bytes to `self`.
///
/// # Examples
///
/// ```
/// use bytes::{BufMut, buf::BufMutExt};
///
/// let arr = &mut [0u8; 128][..];
/// assert_eq!(arr.remaining_mut(), 128);
///
/// let dst = arr.limit(10);
/// assert_eq!(dst.remaining_mut(), 10);
/// ```
fn limit(self, limit: usize) -> Limit<Self>
where
Self: Sized,
{
limit::new(self, limit)
}
/// Creates an adaptor which implements the `Write` trait for `self`.
///
/// This function returns a new value which implements `Write` by adapting
/// the `Write` trait functions to the `BufMut` trait functions. Given that
/// `BufMut` operations are infallible, none of the `Write` functions will
/// return with `Err`.
///
/// # Examples
///
/// ```
/// use bytes::buf::BufMutExt;
/// use std::io::Write;
///
/// let mut buf = vec![].writer();
///
/// let num = buf.write(&b"hello world"[..]).unwrap();
/// assert_eq!(11, num);
///
/// let buf = buf.into_inner();
///
/// assert_eq!(*buf, b"hello world"[..]);
/// ```
#[cfg(feature = "std")]
fn writer(self) -> Writer<Self>
where
Self: Sized,
{
writer::new(self)
}
/// Creates an adapter which will chain this buffer with another.
///
/// The returned `BufMut` instance will first write to all bytes from
/// `self`. Afterwards, it will write to `next`.
///
/// # Examples
///
/// ```
/// use bytes::{BufMut, buf::BufMutExt};
///
/// let mut a = [0u8; 5];
/// let mut b = [0u8; 6];
///
/// let mut chain = (&mut a[..]).chain_mut(&mut b[..]);
///
/// chain.put_slice(b"hello world");
///
/// assert_eq!(&a[..], b"hello");
/// assert_eq!(&b[..], b" world");
/// ```
fn chain_mut<U: BufMut>(self, next: U) -> Chain<Self, U>
where
Self: Sized,
{
Chain::new(self, next)
}
}
impl<B: BufMut + ?Sized> BufMutExt for B {}

Просмотреть файл

@ -1,81 +0,0 @@
use crate::Buf;
use std::{cmp, io};
/// A `Buf` adapter which implements `io::Read` for the inner value.
///
/// This struct is generally created by calling `reader()` on `Buf`. See
/// documentation of [`reader()`](trait.Buf.html#method.reader) for more
/// details.
#[derive(Debug)]
pub struct Reader<B> {
buf: B,
}
pub fn new<B>(buf: B) -> Reader<B> {
Reader { buf }
}
impl<B: Buf> Reader<B> {
/// Gets a reference to the underlying `Buf`.
///
/// It is inadvisable to directly read from the underlying `Buf`.
///
/// # Examples
///
/// ```rust
/// use bytes::buf::BufExt;
///
/// let buf = b"hello world".reader();
///
/// assert_eq!(b"hello world", buf.get_ref());
/// ```
pub fn get_ref(&self) -> &B {
&self.buf
}
/// Gets a mutable reference to the underlying `Buf`.
///
/// It is inadvisable to directly read from the underlying `Buf`.
pub fn get_mut(&mut self) -> &mut B {
&mut self.buf
}
/// Consumes this `Reader`, returning the underlying value.
///
/// # Examples
///
/// ```rust
/// use bytes::{Buf, buf::BufExt};
/// use std::io;
///
/// let mut buf = b"hello world".reader();
/// let mut dst = vec![];
///
/// io::copy(&mut buf, &mut dst).unwrap();
///
/// let buf = buf.into_inner();
/// assert_eq!(0, buf.remaining());
/// ```
pub fn into_inner(self) -> B {
self.buf
}
}
impl<B: Buf + Sized> io::Read for Reader<B> {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
let len = cmp::min(self.buf.remaining(), dst.len());
Buf::copy_to_slice(&mut self.buf, &mut dst[0..len]);
Ok(len)
}
}
impl<B: Buf + Sized> io::BufRead for Reader<B> {
fn fill_buf(&mut self) -> io::Result<&[u8]> {
Ok(self.buf.bytes())
}
fn consume(&mut self, amt: usize) {
self.buf.advance(amt)
}
}

Просмотреть файл

@ -1,147 +0,0 @@
use crate::Buf;
use core::cmp;
/// A `Buf` adapter which limits the bytes read from an underlying buffer.
///
/// This struct is generally created by calling `take()` on `Buf`. See
/// documentation of [`take()`](trait.BufExt.html#method.take) for more details.
#[derive(Debug)]
pub struct Take<T> {
inner: T,
limit: usize,
}
pub fn new<T>(inner: T, limit: usize) -> Take<T> {
Take { inner, limit }
}
impl<T> Take<T> {
/// Consumes this `Take`, returning the underlying value.
///
/// # Examples
///
/// ```rust
/// use bytes::buf::{BufMut, BufExt};
///
/// let mut buf = b"hello world".take(2);
/// let mut dst = vec![];
///
/// dst.put(&mut buf);
/// assert_eq!(*dst, b"he"[..]);
///
/// let mut buf = buf.into_inner();
///
/// dst.clear();
/// dst.put(&mut buf);
/// assert_eq!(*dst, b"llo world"[..]);
/// ```
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying `Buf`.
///
/// It is inadvisable to directly read from the underlying `Buf`.
///
/// # Examples
///
/// ```rust
/// use bytes::{Buf, buf::BufExt};
///
/// let buf = b"hello world".take(2);
///
/// assert_eq!(11, buf.get_ref().remaining());
/// ```
pub fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying `Buf`.
///
/// It is inadvisable to directly read from the underlying `Buf`.
///
/// # Examples
///
/// ```rust
/// use bytes::{Buf, BufMut, buf::BufExt};
///
/// let mut buf = b"hello world".take(2);
/// let mut dst = vec![];
///
/// buf.get_mut().advance(2);
///
/// dst.put(&mut buf);
/// assert_eq!(*dst, b"ll"[..]);
/// ```
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Returns the maximum number of bytes that can be read.
///
/// # Note
///
/// If the inner `Buf` has fewer bytes than indicated by this method then
/// that is the actual number of available bytes.
///
/// # Examples
///
/// ```rust
/// use bytes::{Buf, buf::BufExt};
///
/// let mut buf = b"hello world".take(2);
///
/// assert_eq!(2, buf.limit());
/// assert_eq!(b'h', buf.get_u8());
/// assert_eq!(1, buf.limit());
/// ```
pub fn limit(&self) -> usize {
self.limit
}
/// Sets the maximum number of bytes that can be read.
///
/// # Note
///
/// If the inner `Buf` has fewer bytes than `lim` then that is the actual
/// number of available bytes.
///
/// # Examples
///
/// ```rust
/// use bytes::{BufMut, buf::BufExt};
///
/// let mut buf = b"hello world".take(2);
/// let mut dst = vec![];
///
/// dst.put(&mut buf);
/// assert_eq!(*dst, b"he"[..]);
///
/// dst.clear();
///
/// buf.set_limit(3);
/// dst.put(&mut buf);
/// assert_eq!(*dst, b"llo"[..]);
/// ```
pub fn set_limit(&mut self, lim: usize) {
self.limit = lim
}
}
impl<T: Buf> Buf for Take<T> {
fn remaining(&self) -> usize {
cmp::min(self.inner.remaining(), self.limit)
}
fn bytes(&self) -> &[u8] {
let bytes = self.inner.bytes();
&bytes[..cmp::min(bytes.len(), self.limit)]
}
fn advance(&mut self, cnt: usize) {
assert!(cnt <= self.limit);
self.inner.advance(cnt);
self.limit -= cnt;
}
}

Просмотреть файл

@ -1,88 +0,0 @@
use crate::BufMut;
use std::{cmp, io};
/// A `BufMut` adapter which implements `io::Write` for the inner value.
///
/// This struct is generally created by calling `writer()` on `BufMut`. See
/// documentation of [`writer()`](trait.BufMut.html#method.writer) for more
/// details.
#[derive(Debug)]
pub struct Writer<B> {
buf: B,
}
pub fn new<B>(buf: B) -> Writer<B> {
Writer { buf }
}
impl<B: BufMut> Writer<B> {
/// Gets a reference to the underlying `BufMut`.
///
/// It is inadvisable to directly write to the underlying `BufMut`.
///
/// # Examples
///
/// ```rust
/// use bytes::buf::BufMutExt;
///
/// let buf = Vec::with_capacity(1024).writer();
///
/// assert_eq!(1024, buf.get_ref().capacity());
/// ```
pub fn get_ref(&self) -> &B {
&self.buf
}
/// Gets a mutable reference to the underlying `BufMut`.
///
/// It is inadvisable to directly write to the underlying `BufMut`.
///
/// # Examples
///
/// ```rust
/// use bytes::buf::BufMutExt;
///
/// let mut buf = vec![].writer();
///
/// buf.get_mut().reserve(1024);
///
/// assert_eq!(1024, buf.get_ref().capacity());
/// ```
pub fn get_mut(&mut self) -> &mut B {
&mut self.buf
}
/// Consumes this `Writer`, returning the underlying value.
///
/// # Examples
///
/// ```rust
/// use bytes::buf::BufMutExt;
/// use std::io;
///
/// let mut buf = vec![].writer();
/// let mut src = &b"hello world"[..];
///
/// io::copy(&mut src, &mut buf).unwrap();
///
/// let buf = buf.into_inner();
/// assert_eq!(*buf, b"hello world"[..]);
/// ```
pub fn into_inner(self) -> B {
self.buf
}
}
impl<B: BufMut + Sized> io::Write for Writer<B> {
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
let n = cmp::min(self.buf.remaining_mut(), src.len());
self.buf.put(&src[0..n]);
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}

133
third_party/rust/bytes-0.5.6/src/buf/iter.rs поставляемый
Просмотреть файл

@ -1,133 +0,0 @@
use crate::Buf;
/// Iterator over the bytes contained by the buffer.
///
/// This struct is created by the [`iter`] method on [`Buf`].
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use bytes::Bytes;
///
/// let buf = Bytes::from(&b"abc"[..]);
/// let mut iter = buf.into_iter();
///
/// assert_eq!(iter.next(), Some(b'a'));
/// assert_eq!(iter.next(), Some(b'b'));
/// assert_eq!(iter.next(), Some(b'c'));
/// assert_eq!(iter.next(), None);
/// ```
///
/// [`iter`]: trait.Buf.html#method.iter
/// [`Buf`]: trait.Buf.html
#[derive(Debug)]
pub struct IntoIter<T> {
inner: T,
}
impl<T> IntoIter<T> {
/// Creates an iterator over the bytes contained by the buffer.
///
/// # Examples
///
/// ```
/// use bytes::Bytes;
/// use bytes::buf::IntoIter;
///
/// let buf = Bytes::from_static(b"abc");
/// let mut iter = IntoIter::new(buf);
///
/// assert_eq!(iter.next(), Some(b'a'));
/// assert_eq!(iter.next(), Some(b'b'));
/// assert_eq!(iter.next(), Some(b'c'));
/// assert_eq!(iter.next(), None);
/// ```
pub fn new(inner: T) -> IntoIter<T> {
IntoIter { inner }
}
/// Consumes this `IntoIter`, returning the underlying value.
///
/// # Examples
///
/// ```rust
/// use bytes::{Buf, Bytes};
///
/// let buf = Bytes::from(&b"abc"[..]);
/// let mut iter = buf.into_iter();
///
/// assert_eq!(iter.next(), Some(b'a'));
///
/// let buf = iter.into_inner();
/// assert_eq!(2, buf.remaining());
/// ```
pub fn into_inner(self) -> T {
self.inner
}
/// Gets a reference to the underlying `Buf`.
///
/// It is inadvisable to directly read from the underlying `Buf`.
///
/// # Examples
///
/// ```rust
/// use bytes::{Buf, Bytes};
///
/// let buf = Bytes::from(&b"abc"[..]);
/// let mut iter = buf.into_iter();
///
/// assert_eq!(iter.next(), Some(b'a'));
///
/// assert_eq!(2, iter.get_ref().remaining());
/// ```
pub fn get_ref(&self) -> &T {
&self.inner
}
/// Gets a mutable reference to the underlying `Buf`.
///
/// It is inadvisable to directly read from the underlying `Buf`.
///
/// # Examples
///
/// ```rust
/// use bytes::{Buf, BytesMut};
///
/// let buf = BytesMut::from(&b"abc"[..]);
/// let mut iter = buf.into_iter();
///
/// assert_eq!(iter.next(), Some(b'a'));
///
/// iter.get_mut().advance(1);
///
/// assert_eq!(iter.next(), Some(b'c'));
/// ```
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
}
impl<T: Buf> Iterator for IntoIter<T> {
type Item = u8;
fn next(&mut self) -> Option<u8> {
if !self.inner.has_remaining() {
return None;
}
let b = self.inner.bytes()[0];
self.inner.advance(1);
Some(b)
}
fn size_hint(&self) -> (usize, Option<usize>) {
let rem = self.inner.remaining();
(rem, Some(rem))
}
}
impl<T: Buf> ExactSizeIterator for IntoIter<T> {}

30
third_party/rust/bytes-0.5.6/src/buf/mod.rs поставляемый
Просмотреть файл

@ -1,30 +0,0 @@
//! Utilities for working with buffers.
//!
//! A buffer is any structure that contains a sequence of bytes. The bytes may
//! or may not be stored in contiguous memory. This module contains traits used
//! to abstract over buffers as well as utilities for working with buffer types.
//!
//! # `Buf`, `BufMut`
//!
//! These are the two foundational traits for abstractly working with buffers.
//! They can be thought as iterators for byte structures. They offer additional
//! performance over `Iterator` by providing an API optimized for byte slices.
//!
//! See [`Buf`] and [`BufMut`] for more details.
//!
//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure)
//! [`Buf`]: trait.Buf.html
//! [`BufMut`]: trait.BufMut.html
mod buf_impl;
mod buf_mut;
pub mod ext;
mod iter;
mod vec_deque;
pub use self::buf_impl::Buf;
pub use self::buf_mut::BufMut;
#[cfg(feature = "std")]
pub use self::buf_mut::IoSliceMut;
pub use self::ext::{BufExt, BufMutExt};
pub use self::iter::IntoIter;

Просмотреть файл

@ -1,22 +0,0 @@
use alloc::collections::VecDeque;
use super::Buf;
impl Buf for VecDeque<u8> {
fn remaining(&self) -> usize {
self.len()
}
fn bytes(&self) -> &[u8] {
let (s1, s2) = self.as_slices();
if s1.is_empty() {
s2
} else {
s1
}
}
fn advance(&mut self, cnt: usize) {
self.drain(..cnt);
}
}

1108
third_party/rust/bytes-0.5.6/src/bytes.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

1581
third_party/rust/bytes-0.5.6/src/bytes_mut.rs поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

49
third_party/rust/bytes-0.5.6/src/fmt/debug.rs поставляемый
Просмотреть файл

@ -1,49 +0,0 @@
use core::fmt::{Debug, Formatter, Result};
use super::BytesRef;
use crate::{Bytes, BytesMut};
/// Alternative implementation of `std::fmt::Debug` for byte slice.
///
/// Standard `Debug` implementation for `[u8]` is comma separated
/// list of numbers. Since large amount of byte strings are in fact
/// ASCII strings or contain a lot of ASCII strings (e. g. HTTP),
/// it is convenient to print strings as ASCII when possible.
impl Debug for BytesRef<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
write!(f, "b\"")?;
for &b in self.0 {
// https://doc.rust-lang.org/reference/tokens.html#byte-escapes
if b == b'\n' {
write!(f, "\\n")?;
} else if b == b'\r' {
write!(f, "\\r")?;
} else if b == b'\t' {
write!(f, "\\t")?;
} else if b == b'\\' || b == b'"' {
write!(f, "\\{}", b as char)?;
} else if b == b'\0' {
write!(f, "\\0")?;
// ASCII printable
} else if b >= 0x20 && b < 0x7f {
write!(f, "{}", b as char)?;
} else {
write!(f, "\\x{:02x}", b)?;
}
}
write!(f, "\"")?;
Ok(())
}
}
impl Debug for Bytes {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Debug::fmt(&BytesRef(&self.as_ref()), f)
}
}
impl Debug for BytesMut {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Debug::fmt(&BytesRef(&self.as_ref()), f)
}
}

37
third_party/rust/bytes-0.5.6/src/fmt/hex.rs поставляемый
Просмотреть файл

@ -1,37 +0,0 @@
use core::fmt::{Formatter, LowerHex, Result, UpperHex};
use super::BytesRef;
use crate::{Bytes, BytesMut};
impl LowerHex for BytesRef<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
for &b in self.0 {
write!(f, "{:02x}", b)?;
}
Ok(())
}
}
impl UpperHex for BytesRef<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
for &b in self.0 {
write!(f, "{:02X}", b)?;
}
Ok(())
}
}
macro_rules! hex_impl {
($tr:ident, $ty:ty) => {
impl $tr for $ty {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
$tr::fmt(&BytesRef(self.as_ref()), f)
}
}
};
}
hex_impl!(LowerHex, Bytes);
hex_impl!(LowerHex, BytesMut);
hex_impl!(UpperHex, Bytes);
hex_impl!(UpperHex, BytesMut);

5
third_party/rust/bytes-0.5.6/src/fmt/mod.rs поставляемый
Просмотреть файл

@ -1,5 +0,0 @@
mod debug;
mod hex;
/// `BytesRef` is not a part of public API of bytes crate.
struct BytesRef<'a>(&'a [u8]);

117
third_party/rust/bytes-0.5.6/src/lib.rs поставляемый
Просмотреть файл

@ -1,117 +0,0 @@
#![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)]
#![doc(test(
no_crate_inject,
attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))
))]
#![doc(html_root_url = "https://docs.rs/bytes/0.5.6")]
#![no_std]
//! Provides abstractions for working with bytes.
//!
//! The `bytes` crate provides an efficient byte buffer structure
//! ([`Bytes`](struct.Bytes.html)) and traits for working with buffer
//! implementations ([`Buf`], [`BufMut`]).
//!
//! [`Buf`]: trait.Buf.html
//! [`BufMut`]: trait.BufMut.html
//!
//! # `Bytes`
//!
//! `Bytes` is an efficient container for storing and operating on contiguous
//! slices of memory. It is intended for use primarily in networking code, but
//! could have applications elsewhere as well.
//!
//! `Bytes` values facilitate zero-copy network programming by allowing multiple
//! `Bytes` objects to point to the same underlying memory. This is managed by
//! using a reference count to track when the memory is no longer needed and can
//! be freed.
//!
//! A `Bytes` handle can be created directly from an existing byte store (such as `&[u8]`
//! or `Vec<u8>`), but usually a `BytesMut` is used first and written to. For
//! example:
//!
//! ```rust
//! use bytes::{BytesMut, BufMut};
//!
//! let mut buf = BytesMut::with_capacity(1024);
//! buf.put(&b"hello world"[..]);
//! buf.put_u16(1234);
//!
//! let a = buf.split();
//! assert_eq!(a, b"hello world\x04\xD2"[..]);
//!
//! buf.put(&b"goodbye world"[..]);
//!
//! let b = buf.split();
//! assert_eq!(b, b"goodbye world"[..]);
//!
//! assert_eq!(buf.capacity(), 998);
//! ```
//!
//! In the above example, only a single buffer of 1024 is allocated. The handles
//! `a` and `b` will share the underlying buffer and maintain indices tracking
//! the view into the buffer represented by the handle.
//!
//! See the [struct docs] for more details.
//!
//! [struct docs]: struct.Bytes.html
//!
//! # `Buf`, `BufMut`
//!
//! These two traits provide read and write access to buffers. The underlying
//! storage may or may not be in contiguous memory. For example, `Bytes` is a
//! buffer that guarantees contiguous memory, but a [rope] stores the bytes in
//! disjoint chunks. `Buf` and `BufMut` maintain cursors tracking the current
//! position in the underlying byte storage. When bytes are read or written, the
//! cursor is advanced.
//!
//! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure)
//!
//! ## Relation with `Read` and `Write`
//!
//! At first glance, it may seem that `Buf` and `BufMut` overlap in
//! functionality with `std::io::Read` and `std::io::Write`. However, they
//! serve different purposes. A buffer is the value that is provided as an
//! argument to `Read::read` and `Write::write`. `Read` and `Write` may then
//! perform a syscall, which has the potential of failing. Operations on `Buf`
//! and `BufMut` are infallible.
extern crate alloc;
#[cfg(feature = "std")]
extern crate std;
pub mod buf;
pub use crate::buf::{Buf, BufMut};
mod bytes;
mod bytes_mut;
mod fmt;
mod loom;
pub use crate::bytes::Bytes;
pub use crate::bytes_mut::BytesMut;
// Optional Serde support
#[cfg(feature = "serde")]
mod serde;
#[inline(never)]
#[cold]
fn abort() -> ! {
#[cfg(feature = "std")]
{
std::process::abort();
}
#[cfg(not(feature = "std"))]
{
struct Abort;
impl Drop for Abort {
fn drop(&mut self) {
panic!();
}
}
let _a = Abort;
panic!("abort");
}
}

30
third_party/rust/bytes-0.5.6/src/loom.rs поставляемый
Просмотреть файл

@ -1,30 +0,0 @@
#[cfg(not(all(test, loom)))]
pub(crate) mod sync {
pub(crate) mod atomic {
pub(crate) use core::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
pub(crate) trait AtomicMut<T> {
fn with_mut<F, R>(&mut self, f: F) -> R
where
F: FnOnce(&mut *mut T) -> R;
}
impl<T> AtomicMut<T> for AtomicPtr<T> {
fn with_mut<F, R>(&mut self, f: F) -> R
where
F: FnOnce(&mut *mut T) -> R,
{
f(self.get_mut())
}
}
}
}
#[cfg(all(test, loom))]
pub(crate) mod sync {
pub(crate) mod atomic {
pub(crate) use loom::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
pub(crate) trait AtomicMut<T> {}
}
}

89
third_party/rust/bytes-0.5.6/src/serde.rs поставляемый
Просмотреть файл

@ -1,89 +0,0 @@
use super::{Bytes, BytesMut};
use alloc::string::String;
use alloc::vec::Vec;
use core::{cmp, fmt};
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
macro_rules! serde_impl {
($ty:ident, $visitor_ty:ident, $from_slice:ident, $from_vec:ident) => {
impl Serialize for $ty {
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_bytes(&self)
}
}
struct $visitor_ty;
impl<'de> de::Visitor<'de> for $visitor_ty {
type Value = $ty;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("byte array")
}
#[inline]
fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error>
where
V: de::SeqAccess<'de>,
{
let len = cmp::min(seq.size_hint().unwrap_or(0), 4096);
let mut values: Vec<u8> = Vec::with_capacity(len);
while let Some(value) = seq.next_element()? {
values.push(value);
}
Ok($ty::$from_vec(values))
}
#[inline]
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok($ty::$from_slice(v))
}
#[inline]
fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok($ty::$from_vec(v))
}
#[inline]
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok($ty::$from_slice(v.as_bytes()))
}
#[inline]
fn visit_string<E>(self, v: String) -> Result<Self::Value, E>
where
E: de::Error,
{
Ok($ty::$from_vec(v.into_bytes()))
}
}
impl<'de> Deserialize<'de> for $ty {
#[inline]
fn deserialize<D>(deserializer: D) -> Result<$ty, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_byte_buf($visitor_ty)
}
}
};
}
serde_impl!(Bytes, BytesVisitor, copy_from_slice, from);
serde_impl!(BytesMut, BytesMutVisitor, from, from_vec);

103
third_party/rust/bytes-0.5.6/tests/test_buf.rs поставляемый
Просмотреть файл

@ -1,103 +0,0 @@
#![warn(rust_2018_idioms)]
use bytes::Buf;
#[cfg(feature = "std")]
use std::io::IoSlice;
#[test]
fn test_fresh_cursor_vec() {
let mut buf = &b"hello"[..];
assert_eq!(buf.remaining(), 5);
assert_eq!(buf.bytes(), b"hello");
buf.advance(2);
assert_eq!(buf.remaining(), 3);
assert_eq!(buf.bytes(), b"llo");
buf.advance(3);
assert_eq!(buf.remaining(), 0);
assert_eq!(buf.bytes(), b"");
}
#[test]
fn test_get_u8() {
let mut buf = &b"\x21zomg"[..];
assert_eq!(0x21, buf.get_u8());
}
#[test]
fn test_get_u16() {
let mut buf = &b"\x21\x54zomg"[..];
assert_eq!(0x2154, buf.get_u16());
let mut buf = &b"\x21\x54zomg"[..];
assert_eq!(0x5421, buf.get_u16_le());
}
#[test]
#[should_panic]
fn test_get_u16_buffer_underflow() {
let mut buf = &b"\x21"[..];
buf.get_u16();
}
#[cfg(feature = "std")]
#[test]
fn test_bufs_vec() {
let buf = &b"hello world"[..];
let b1: &[u8] = &mut [];
let b2: &[u8] = &mut [];
let mut dst = [IoSlice::new(b1), IoSlice::new(b2)];
assert_eq!(1, buf.bytes_vectored(&mut dst[..]));
}
#[test]
fn test_vec_deque() {
use std::collections::VecDeque;
let mut buffer: VecDeque<u8> = VecDeque::new();
buffer.extend(b"hello world");
assert_eq!(11, buffer.remaining());
assert_eq!(b"hello world", buffer.bytes());
buffer.advance(6);
assert_eq!(b"world", buffer.bytes());
buffer.extend(b" piece");
let mut out = [0; 11];
buffer.copy_to_slice(&mut out);
assert_eq!(b"world piece", &out[..]);
}
#[test]
fn test_deref_buf_forwards() {
struct Special;
impl Buf for Special {
fn remaining(&self) -> usize {
unreachable!("remaining");
}
fn bytes(&self) -> &[u8] {
unreachable!("bytes");
}
fn advance(&mut self, _: usize) {
unreachable!("advance");
}
fn get_u8(&mut self) -> u8 {
// specialized!
b'x'
}
}
// these should all use the specialized method
assert_eq!(Special.get_u8(), b'x');
assert_eq!((&mut Special as &mut dyn Buf).get_u8(), b'x');
assert_eq!((Box::new(Special) as Box<dyn Buf>).get_u8(), b'x');
assert_eq!(Box::new(Special).get_u8(), b'x');
}

Просмотреть файл

@ -1,120 +0,0 @@
#![warn(rust_2018_idioms)]
#[cfg(feature = "std")]
use bytes::buf::IoSliceMut;
use bytes::{BufMut, BytesMut};
use core::fmt::Write;
use core::usize;
#[test]
fn test_vec_as_mut_buf() {
let mut buf = Vec::with_capacity(64);
assert_eq!(buf.remaining_mut(), usize::MAX);
assert!(buf.bytes_mut().len() >= 64);
buf.put(&b"zomg"[..]);
assert_eq!(&buf, b"zomg");
assert_eq!(buf.remaining_mut(), usize::MAX - 4);
assert_eq!(buf.capacity(), 64);
for _ in 0..16 {
buf.put(&b"zomg"[..]);
}
assert_eq!(buf.len(), 68);
}
#[test]
fn test_put_u8() {
let mut buf = Vec::with_capacity(8);
buf.put_u8(33);
assert_eq!(b"\x21", &buf[..]);
}
#[test]
fn test_put_u16() {
let mut buf = Vec::with_capacity(8);
buf.put_u16(8532);
assert_eq!(b"\x21\x54", &buf[..]);
buf.clear();
buf.put_u16_le(8532);
assert_eq!(b"\x54\x21", &buf[..]);
}
#[test]
#[should_panic(expected = "cannot advance")]
fn test_vec_advance_mut() {
// Verify fix for #354
let mut buf = Vec::with_capacity(8);
unsafe {
buf.advance_mut(12);
}
}
#[test]
fn test_clone() {
let mut buf = BytesMut::with_capacity(100);
buf.write_str("this is a test").unwrap();
let buf2 = buf.clone();
buf.write_str(" of our emergency broadcast system").unwrap();
assert!(buf != buf2);
}
#[cfg(feature = "std")]
#[test]
fn test_bufs_vec_mut() {
let b1: &mut [u8] = &mut [];
let b2: &mut [u8] = &mut [];
let mut dst = [IoSliceMut::from(b1), IoSliceMut::from(b2)];
// with no capacity
let mut buf = BytesMut::new();
assert_eq!(buf.capacity(), 0);
assert_eq!(1, buf.bytes_vectored_mut(&mut dst[..]));
// with capacity
let mut buf = BytesMut::with_capacity(64);
assert_eq!(1, buf.bytes_vectored_mut(&mut dst[..]));
}
#[test]
fn test_mut_slice() {
let mut v = vec![0, 0, 0, 0];
let mut s = &mut v[..];
s.put_u32(42);
}
#[test]
fn test_deref_bufmut_forwards() {
struct Special;
impl BufMut for Special {
fn remaining_mut(&self) -> usize {
unreachable!("remaining_mut");
}
fn bytes_mut(&mut self) -> &mut [std::mem::MaybeUninit<u8>] {
unreachable!("bytes_mut");
}
unsafe fn advance_mut(&mut self, _: usize) {
unreachable!("advance");
}
fn put_u8(&mut self, _: u8) {
// specialized!
}
}
// these should all use the specialized method
Special.put_u8(b'x');
(&mut Special as &mut dyn BufMut).put_u8(b'x');
(Box::new(Special) as Box<dyn BufMut>).put_u8(b'x');
Box::new(Special).put_u8(b'x');
}

Просмотреть файл

@ -1,962 +0,0 @@
#![warn(rust_2018_idioms)]
use bytes::{Buf, BufMut, Bytes, BytesMut};
use std::usize;
const LONG: &'static [u8] = b"mary had a little lamb, little lamb, little lamb";
const SHORT: &'static [u8] = b"hello world";
fn is_sync<T: Sync>() {}
fn is_send<T: Send>() {}
#[test]
fn test_bounds() {
is_sync::<Bytes>();
is_sync::<BytesMut>();
is_send::<Bytes>();
is_send::<BytesMut>();
}
#[test]
fn test_layout() {
use std::mem;
assert_eq!(
mem::size_of::<Bytes>(),
mem::size_of::<usize>() * 4,
"Bytes size should be 4 words",
);
assert_eq!(
mem::size_of::<BytesMut>(),
mem::size_of::<usize>() * 4,
"BytesMut should be 4 words",
);
assert_eq!(
mem::size_of::<Bytes>(),
mem::size_of::<Option<Bytes>>(),
"Bytes should be same size as Option<Bytes>",
);
assert_eq!(
mem::size_of::<BytesMut>(),
mem::size_of::<Option<BytesMut>>(),
"BytesMut should be same size as Option<BytesMut>",
);
}
#[test]
fn from_slice() {
let a = Bytes::from(&b"abcdefgh"[..]);
assert_eq!(a, b"abcdefgh"[..]);
assert_eq!(a, &b"abcdefgh"[..]);
assert_eq!(a, Vec::from(&b"abcdefgh"[..]));
assert_eq!(b"abcdefgh"[..], a);
assert_eq!(&b"abcdefgh"[..], a);
assert_eq!(Vec::from(&b"abcdefgh"[..]), a);
let a = BytesMut::from(&b"abcdefgh"[..]);
assert_eq!(a, b"abcdefgh"[..]);
assert_eq!(a, &b"abcdefgh"[..]);
assert_eq!(a, Vec::from(&b"abcdefgh"[..]));
assert_eq!(b"abcdefgh"[..], a);
assert_eq!(&b"abcdefgh"[..], a);
assert_eq!(Vec::from(&b"abcdefgh"[..]), a);
}
#[test]
fn fmt() {
let a = format!("{:?}", Bytes::from(&b"abcdefg"[..]));
let b = "b\"abcdefg\"";
assert_eq!(a, b);
let a = format!("{:?}", BytesMut::from(&b"abcdefg"[..]));
assert_eq!(a, b);
}
#[test]
fn fmt_write() {
use std::fmt::Write;
use std::iter::FromIterator;
let s = String::from_iter((0..10).map(|_| "abcdefg"));
let mut a = BytesMut::with_capacity(64);
write!(a, "{}", &s[..64]).unwrap();
assert_eq!(a, s[..64].as_bytes());
let mut b = BytesMut::with_capacity(64);
write!(b, "{}", &s[..32]).unwrap();
write!(b, "{}", &s[32..64]).unwrap();
assert_eq!(b, s[..64].as_bytes());
let mut c = BytesMut::with_capacity(64);
write!(c, "{}", s).unwrap();
assert_eq!(c, s[..].as_bytes());
}
#[test]
fn len() {
let a = Bytes::from(&b"abcdefg"[..]);
assert_eq!(a.len(), 7);
let a = BytesMut::from(&b"abcdefg"[..]);
assert_eq!(a.len(), 7);
let a = Bytes::from(&b""[..]);
assert!(a.is_empty());
let a = BytesMut::from(&b""[..]);
assert!(a.is_empty());
}
#[test]
fn index() {
let a = Bytes::from(&b"hello world"[..]);
assert_eq!(a[0..5], *b"hello");
}
#[test]
fn slice() {
let a = Bytes::from(&b"hello world"[..]);
let b = a.slice(3..5);
assert_eq!(b, b"lo"[..]);
let b = a.slice(0..0);
assert_eq!(b, b""[..]);
let b = a.slice(3..3);
assert_eq!(b, b""[..]);
let b = a.slice(a.len()..a.len());
assert_eq!(b, b""[..]);
let b = a.slice(..5);
assert_eq!(b, b"hello"[..]);
let b = a.slice(3..);
assert_eq!(b, b"lo world"[..]);
}
#[test]
#[should_panic]
fn slice_oob_1() {
let a = Bytes::from(&b"hello world"[..]);
a.slice(5..44);
}
#[test]
#[should_panic]
fn slice_oob_2() {
let a = Bytes::from(&b"hello world"[..]);
a.slice(44..49);
}
#[test]
fn split_off() {
let mut hello = Bytes::from(&b"helloworld"[..]);
let world = hello.split_off(5);
assert_eq!(hello, &b"hello"[..]);
assert_eq!(world, &b"world"[..]);
let mut hello = BytesMut::from(&b"helloworld"[..]);
let world = hello.split_off(5);
assert_eq!(hello, &b"hello"[..]);
assert_eq!(world, &b"world"[..]);
}
#[test]
#[should_panic]
fn split_off_oob() {
let mut hello = Bytes::from(&b"helloworld"[..]);
let _ = hello.split_off(44);
}
#[test]
fn split_off_uninitialized() {
let mut bytes = BytesMut::with_capacity(1024);
let other = bytes.split_off(128);
assert_eq!(bytes.len(), 0);
assert_eq!(bytes.capacity(), 128);
assert_eq!(other.len(), 0);
assert_eq!(other.capacity(), 896);
}
#[test]
fn split_off_to_loop() {
let s = b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
for i in 0..(s.len() + 1) {
{
let mut bytes = Bytes::from(&s[..]);
let off = bytes.split_off(i);
assert_eq!(i, bytes.len());
let mut sum = Vec::new();
sum.extend(bytes.iter());
sum.extend(off.iter());
assert_eq!(&s[..], &sum[..]);
}
{
let mut bytes = BytesMut::from(&s[..]);
let off = bytes.split_off(i);
assert_eq!(i, bytes.len());
let mut sum = Vec::new();
sum.extend(&bytes);
sum.extend(&off);
assert_eq!(&s[..], &sum[..]);
}
{
let mut bytes = Bytes::from(&s[..]);
let off = bytes.split_to(i);
assert_eq!(i, off.len());
let mut sum = Vec::new();
sum.extend(off.iter());
sum.extend(bytes.iter());
assert_eq!(&s[..], &sum[..]);
}
{
let mut bytes = BytesMut::from(&s[..]);
let off = bytes.split_to(i);
assert_eq!(i, off.len());
let mut sum = Vec::new();
sum.extend(&off);
sum.extend(&bytes);
assert_eq!(&s[..], &sum[..]);
}
}
}
#[test]
fn split_to_1() {
// Static
let mut a = Bytes::from_static(SHORT);
let b = a.split_to(4);
assert_eq!(SHORT[4..], a);
assert_eq!(SHORT[..4], b);
// Allocated
let mut a = Bytes::copy_from_slice(LONG);
let b = a.split_to(4);
assert_eq!(LONG[4..], a);
assert_eq!(LONG[..4], b);
let mut a = Bytes::copy_from_slice(LONG);
let b = a.split_to(30);
assert_eq!(LONG[30..], a);
assert_eq!(LONG[..30], b);
}
#[test]
fn split_to_2() {
let mut a = Bytes::from(LONG);
assert_eq!(LONG, a);
let b = a.split_to(1);
assert_eq!(LONG[1..], a);
drop(b);
}
#[test]
#[should_panic]
fn split_to_oob() {
let mut hello = Bytes::from(&b"helloworld"[..]);
let _ = hello.split_to(33);
}
#[test]
#[should_panic]
fn split_to_oob_mut() {
let mut hello = BytesMut::from(&b"helloworld"[..]);
let _ = hello.split_to(33);
}
#[test]
#[should_panic]
fn split_to_uninitialized() {
let mut bytes = BytesMut::with_capacity(1024);
let _other = bytes.split_to(128);
}
#[test]
fn split_off_to_at_gt_len() {
fn make_bytes() -> Bytes {
let mut bytes = BytesMut::with_capacity(100);
bytes.put_slice(&[10, 20, 30, 40]);
bytes.freeze()
}
use std::panic;
let _ = make_bytes().split_to(4);
let _ = make_bytes().split_off(4);
assert!(panic::catch_unwind(move || {
let _ = make_bytes().split_to(5);
})
.is_err());
assert!(panic::catch_unwind(move || {
let _ = make_bytes().split_off(5);
})
.is_err());
}
#[test]
fn truncate() {
let s = &b"helloworld"[..];
let mut hello = Bytes::from(s);
hello.truncate(15);
assert_eq!(hello, s);
hello.truncate(10);
assert_eq!(hello, s);
hello.truncate(5);
assert_eq!(hello, "hello");
}
#[test]
fn freeze_clone_shared() {
let s = &b"abcdefgh"[..];
let b = BytesMut::from(s).split().freeze();
assert_eq!(b, s);
let c = b.clone();
assert_eq!(c, s);
}
#[test]
fn freeze_clone_unique() {
let s = &b"abcdefgh"[..];
let b = BytesMut::from(s).freeze();
assert_eq!(b, s);
let c = b.clone();
assert_eq!(c, s);
}
#[test]
fn freeze_after_advance() {
let s = &b"abcdefgh"[..];
let mut b = BytesMut::from(s);
b.advance(1);
assert_eq!(b, s[1..]);
let b = b.freeze();
// Verify fix for #352. Previously, freeze would ignore the start offset
// for BytesMuts in Vec mode.
assert_eq!(b, s[1..]);
}
#[test]
fn freeze_after_advance_arc() {
let s = &b"abcdefgh"[..];
let mut b = BytesMut::from(s);
// Make b Arc
let _ = b.split_to(0);
b.advance(1);
assert_eq!(b, s[1..]);
let b = b.freeze();
assert_eq!(b, s[1..]);
}
#[test]
fn freeze_after_split_to() {
let s = &b"abcdefgh"[..];
let mut b = BytesMut::from(s);
let _ = b.split_to(1);
assert_eq!(b, s[1..]);
let b = b.freeze();
assert_eq!(b, s[1..]);
}
#[test]
fn freeze_after_truncate() {
let s = &b"abcdefgh"[..];
let mut b = BytesMut::from(s);
b.truncate(7);
assert_eq!(b, s[..7]);
let b = b.freeze();
assert_eq!(b, s[..7]);
}
#[test]
fn freeze_after_truncate_arc() {
let s = &b"abcdefgh"[..];
let mut b = BytesMut::from(s);
// Make b Arc
let _ = b.split_to(0);
b.truncate(7);
assert_eq!(b, s[..7]);
let b = b.freeze();
assert_eq!(b, s[..7]);
}
#[test]
fn freeze_after_split_off() {
let s = &b"abcdefgh"[..];
let mut b = BytesMut::from(s);
let _ = b.split_off(7);
assert_eq!(b, s[..7]);
let b = b.freeze();
assert_eq!(b, s[..7]);
}
#[test]
fn fns_defined_for_bytes_mut() {
let mut bytes = BytesMut::from(&b"hello world"[..]);
bytes.as_ptr();
bytes.as_mut_ptr();
// Iterator
let v: Vec<u8> = bytes.as_ref().iter().cloned().collect();
assert_eq!(&v[..], bytes);
}
#[test]
fn reserve_convert() {
// Vec -> Vec
let mut bytes = BytesMut::from(LONG);
bytes.reserve(64);
assert_eq!(bytes.capacity(), LONG.len() + 64);
// Arc -> Vec
let mut bytes = BytesMut::from(LONG);
let a = bytes.split_to(30);
bytes.reserve(128);
assert!(bytes.capacity() >= bytes.len() + 128);
drop(a);
}
#[test]
fn reserve_growth() {
let mut bytes = BytesMut::with_capacity(64);
bytes.put("hello world".as_bytes());
let _ = bytes.split();
bytes.reserve(65);
assert_eq!(bytes.capacity(), 128);
}
#[test]
fn reserve_allocates_at_least_original_capacity() {
let mut bytes = BytesMut::with_capacity(1024);
for i in 0..1020 {
bytes.put_u8(i as u8);
}
let _other = bytes.split();
bytes.reserve(16);
assert_eq!(bytes.capacity(), 1024);
}
#[test]
fn reserve_max_original_capacity_value() {
const SIZE: usize = 128 * 1024;
let mut bytes = BytesMut::with_capacity(SIZE);
for _ in 0..SIZE {
bytes.put_u8(0u8);
}
let _other = bytes.split();
bytes.reserve(16);
assert_eq!(bytes.capacity(), 64 * 1024);
}
#[test]
fn reserve_vec_recycling() {
let mut bytes = BytesMut::with_capacity(16);
assert_eq!(bytes.capacity(), 16);
let addr = bytes.as_ptr() as usize;
bytes.put("0123456789012345".as_bytes());
assert_eq!(bytes.as_ptr() as usize, addr);
bytes.advance(10);
assert_eq!(bytes.capacity(), 6);
bytes.reserve(8);
assert_eq!(bytes.capacity(), 16);
assert_eq!(bytes.as_ptr() as usize, addr);
}
#[test]
fn reserve_in_arc_unique_does_not_overallocate() {
let mut bytes = BytesMut::with_capacity(1000);
let _ = bytes.split();
// now bytes is Arc and refcount == 1
assert_eq!(1000, bytes.capacity());
bytes.reserve(2001);
assert_eq!(2001, bytes.capacity());
}
#[test]
fn reserve_in_arc_unique_doubles() {
let mut bytes = BytesMut::with_capacity(1000);
let _ = bytes.split();
// now bytes is Arc and refcount == 1
assert_eq!(1000, bytes.capacity());
bytes.reserve(1001);
assert_eq!(2000, bytes.capacity());
}
#[test]
fn reserve_in_arc_nonunique_does_not_overallocate() {
let mut bytes = BytesMut::with_capacity(1000);
let _copy = bytes.split();
// now bytes is Arc and refcount == 2
assert_eq!(1000, bytes.capacity());
bytes.reserve(2001);
assert_eq!(2001, bytes.capacity());
}
#[test]
fn extend_mut() {
let mut bytes = BytesMut::with_capacity(0);
bytes.extend(LONG);
assert_eq!(*bytes, LONG[..]);
}
#[test]
fn extend_from_slice_mut() {
for &i in &[3, 34] {
let mut bytes = BytesMut::new();
bytes.extend_from_slice(&LONG[..i]);
bytes.extend_from_slice(&LONG[i..]);
assert_eq!(LONG[..], *bytes);
}
}
#[test]
fn extend_mut_without_size_hint() {
let mut bytes = BytesMut::with_capacity(0);
let mut long_iter = LONG.iter();
// Use iter::from_fn since it doesn't know a size_hint
bytes.extend(std::iter::from_fn(|| long_iter.next()));
assert_eq!(*bytes, LONG[..]);
}
#[test]
fn from_static() {
let mut a = Bytes::from_static(b"ab");
let b = a.split_off(1);
assert_eq!(a, b"a"[..]);
assert_eq!(b, b"b"[..]);
}
#[test]
fn advance_static() {
let mut a = Bytes::from_static(b"hello world");
a.advance(6);
assert_eq!(a, &b"world"[..]);
}
#[test]
fn advance_vec() {
let mut a = Bytes::from(b"hello world boooo yah world zomg wat wat".to_vec());
a.advance(16);
assert_eq!(a, b"o yah world zomg wat wat"[..]);
a.advance(4);
assert_eq!(a, b"h world zomg wat wat"[..]);
a.advance(6);
assert_eq!(a, b"d zomg wat wat"[..]);
}
#[test]
fn advance_bytes_mut() {
let mut a = BytesMut::from("hello world boooo yah world zomg wat wat");
a.advance(16);
assert_eq!(a, b"o yah world zomg wat wat"[..]);
a.advance(4);
assert_eq!(a, b"h world zomg wat wat"[..]);
// Reserve some space.
a.reserve(1024);
assert_eq!(a, b"h world zomg wat wat"[..]);
a.advance(6);
assert_eq!(a, b"d zomg wat wat"[..]);
}
#[test]
#[should_panic]
fn advance_past_len() {
let mut a = BytesMut::from("hello world");
a.advance(20);
}
#[test]
// Only run these tests on little endian systems. CI uses qemu for testing
// little endian... and qemu doesn't really support threading all that well.
#[cfg(target_endian = "little")]
fn stress() {
// Tests promoting a buffer from a vec -> shared in a concurrent situation
use std::sync::{Arc, Barrier};
use std::thread;
const THREADS: usize = 8;
const ITERS: usize = 1_000;
for i in 0..ITERS {
let data = [i as u8; 256];
let buf = Arc::new(Bytes::copy_from_slice(&data[..]));
let barrier = Arc::new(Barrier::new(THREADS));
let mut joins = Vec::with_capacity(THREADS);
for _ in 0..THREADS {
let c = barrier.clone();
let buf = buf.clone();
joins.push(thread::spawn(move || {
c.wait();
let buf: Bytes = (*buf).clone();
drop(buf);
}));
}
for th in joins {
th.join().unwrap();
}
assert_eq!(*buf, data[..]);
}
}
#[test]
fn partial_eq_bytesmut() {
let bytes = Bytes::from(&b"The quick red fox"[..]);
let bytesmut = BytesMut::from(&b"The quick red fox"[..]);
assert!(bytes == bytesmut);
assert!(bytesmut == bytes);
let bytes2 = Bytes::from(&b"Jumped over the lazy brown dog"[..]);
assert!(bytes2 != bytesmut);
assert!(bytesmut != bytes2);
}
/*
#[test]
fn bytes_unsplit_basic() {
let buf = Bytes::from(&b"aaabbbcccddd"[..]);
let splitted = buf.split_off(6);
assert_eq!(b"aaabbb", &buf[..]);
assert_eq!(b"cccddd", &splitted[..]);
buf.unsplit(splitted);
assert_eq!(b"aaabbbcccddd", &buf[..]);
}
#[test]
fn bytes_unsplit_empty_other() {
let buf = Bytes::from(&b"aaabbbcccddd"[..]);
// empty other
let other = Bytes::new();
buf.unsplit(other);
assert_eq!(b"aaabbbcccddd", &buf[..]);
}
#[test]
fn bytes_unsplit_empty_self() {
// empty self
let mut buf = Bytes::new();
let mut other = Bytes::with_capacity(64);
other.extend_from_slice(b"aaabbbcccddd");
buf.unsplit(other);
assert_eq!(b"aaabbbcccddd", &buf[..]);
}
#[test]
fn bytes_unsplit_arc_different() {
let mut buf = Bytes::with_capacity(64);
buf.extend_from_slice(b"aaaabbbbeeee");
buf.split_off(8); //arc
let mut buf2 = Bytes::with_capacity(64);
buf2.extend_from_slice(b"ccccddddeeee");
buf2.split_off(8); //arc
buf.unsplit(buf2);
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
}
#[test]
fn bytes_unsplit_arc_non_contiguous() {
let mut buf = Bytes::with_capacity(64);
buf.extend_from_slice(b"aaaabbbbeeeeccccdddd");
let mut buf2 = buf.split_off(8); //arc
let buf3 = buf2.split_off(4); //arc
buf.unsplit(buf3);
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
}
#[test]
fn bytes_unsplit_two_split_offs() {
let mut buf = Bytes::with_capacity(64);
buf.extend_from_slice(b"aaaabbbbccccdddd");
let mut buf2 = buf.split_off(8); //arc
let buf3 = buf2.split_off(4); //arc
buf2.unsplit(buf3);
buf.unsplit(buf2);
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
}
#[test]
fn bytes_unsplit_overlapping_references() {
let mut buf = Bytes::with_capacity(64);
buf.extend_from_slice(b"abcdefghijklmnopqrstuvwxyz");
let mut buf0010 = buf.slice(0..10);
let buf1020 = buf.slice(10..20);
let buf0515 = buf.slice(5..15);
buf0010.unsplit(buf1020);
assert_eq!(b"abcdefghijklmnopqrst", &buf0010[..]);
assert_eq!(b"fghijklmno", &buf0515[..]);
}
*/
#[test]
fn bytes_mut_unsplit_basic() {
let mut buf = BytesMut::with_capacity(64);
buf.extend_from_slice(b"aaabbbcccddd");
let splitted = buf.split_off(6);
assert_eq!(b"aaabbb", &buf[..]);
assert_eq!(b"cccddd", &splitted[..]);
buf.unsplit(splitted);
assert_eq!(b"aaabbbcccddd", &buf[..]);
}
#[test]
fn bytes_mut_unsplit_empty_other() {
let mut buf = BytesMut::with_capacity(64);
buf.extend_from_slice(b"aaabbbcccddd");
// empty other
let other = BytesMut::new();
buf.unsplit(other);
assert_eq!(b"aaabbbcccddd", &buf[..]);
}
#[test]
fn bytes_mut_unsplit_empty_self() {
// empty self
let mut buf = BytesMut::new();
let mut other = BytesMut::with_capacity(64);
other.extend_from_slice(b"aaabbbcccddd");
buf.unsplit(other);
assert_eq!(b"aaabbbcccddd", &buf[..]);
}
#[test]
fn bytes_mut_unsplit_arc_different() {
let mut buf = BytesMut::with_capacity(64);
buf.extend_from_slice(b"aaaabbbbeeee");
let _ = buf.split_off(8); //arc
let mut buf2 = BytesMut::with_capacity(64);
buf2.extend_from_slice(b"ccccddddeeee");
let _ = buf2.split_off(8); //arc
buf.unsplit(buf2);
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
}
#[test]
fn bytes_mut_unsplit_arc_non_contiguous() {
let mut buf = BytesMut::with_capacity(64);
buf.extend_from_slice(b"aaaabbbbeeeeccccdddd");
let mut buf2 = buf.split_off(8); //arc
let buf3 = buf2.split_off(4); //arc
buf.unsplit(buf3);
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
}
#[test]
fn bytes_mut_unsplit_two_split_offs() {
let mut buf = BytesMut::with_capacity(64);
buf.extend_from_slice(b"aaaabbbbccccdddd");
let mut buf2 = buf.split_off(8); //arc
let buf3 = buf2.split_off(4); //arc
buf2.unsplit(buf3);
buf.unsplit(buf2);
assert_eq!(b"aaaabbbbccccdddd", &buf[..]);
}
#[test]
fn from_iter_no_size_hint() {
use std::iter;
let mut expect = vec![];
let actual: Bytes = iter::repeat(b'x')
.scan(100, |cnt, item| {
if *cnt >= 1 {
*cnt -= 1;
expect.push(item);
Some(item)
} else {
None
}
})
.collect();
assert_eq!(&actual[..], &expect[..]);
}
fn test_slice_ref(bytes: &Bytes, start: usize, end: usize, expected: &[u8]) {
let slice = &(bytes.as_ref()[start..end]);
let sub = bytes.slice_ref(&slice);
assert_eq!(&sub[..], expected);
}
#[test]
fn slice_ref_works() {
let bytes = Bytes::from(&b"012345678"[..]);
test_slice_ref(&bytes, 0, 0, b"");
test_slice_ref(&bytes, 0, 3, b"012");
test_slice_ref(&bytes, 2, 6, b"2345");
test_slice_ref(&bytes, 7, 9, b"78");
test_slice_ref(&bytes, 9, 9, b"");
}
#[test]
fn slice_ref_empty() {
let bytes = Bytes::from(&b""[..]);
let slice = &(bytes.as_ref()[0..0]);
let sub = bytes.slice_ref(&slice);
assert_eq!(&sub[..], b"");
}
#[test]
fn slice_ref_empty_subslice() {
let bytes = Bytes::from(&b"abcde"[..]);
let subbytes = bytes.slice(0..0);
let slice = &subbytes[..];
// The `slice` object is derived from the original `bytes` object
// so `slice_ref` should work.
assert_eq!(Bytes::new(), bytes.slice_ref(slice));
}
#[test]
#[should_panic]
fn slice_ref_catches_not_a_subset() {
let bytes = Bytes::from(&b"012345678"[..]);
let slice = &b"012345"[0..4];
bytes.slice_ref(slice);
}
#[test]
fn slice_ref_not_an_empty_subset() {
let bytes = Bytes::from(&b"012345678"[..]);
let slice = &b""[0..0];
assert_eq!(Bytes::new(), bytes.slice_ref(slice));
}
#[test]
fn empty_slice_ref_not_an_empty_subset() {
let bytes = Bytes::new();
let slice = &b"some other slice"[0..0];
assert_eq!(Bytes::new(), bytes.slice_ref(slice));
}
#[test]
fn bytes_buf_mut_advance() {
let mut bytes = BytesMut::with_capacity(1024);
unsafe {
let ptr = bytes.bytes_mut().as_ptr();
assert_eq!(1024, bytes.bytes_mut().len());
bytes.advance_mut(10);
let next = bytes.bytes_mut().as_ptr();
assert_eq!(1024 - 10, bytes.bytes_mut().len());
assert_eq!(ptr.offset(10), next);
// advance to the end
bytes.advance_mut(1024 - 10);
// The buffer size is doubled
assert_eq!(1024, bytes.bytes_mut().len());
}
}
#[test]
fn bytes_buf_mut_reuse_when_fully_consumed() {
use bytes::{Buf, BytesMut};
let mut buf = BytesMut::new();
buf.reserve(8192);
buf.extend_from_slice(&[0u8; 100][..]);
let p = &buf[0] as *const u8;
buf.advance(100);
buf.reserve(8192);
buf.extend_from_slice(b" ");
assert_eq!(&buf[0] as *const u8, p);
}
#[test]
#[should_panic]
fn bytes_reserve_overflow() {
let mut bytes = BytesMut::with_capacity(1024);
bytes.put_slice(b"hello world");
bytes.reserve(usize::MAX);
}
#[test]
fn bytes_with_capacity_but_empty() {
// See https://github.com/tokio-rs/bytes/issues/340
let vec = Vec::with_capacity(1);
let _ = Bytes::from(vec);
}

Просмотреть файл

@ -1,67 +0,0 @@
//! Test using `Bytes` with an allocator that hands out "odd" pointers for
//! vectors (pointers where the LSB is set).
use std::alloc::{GlobalAlloc, Layout, System};
use std::ptr;
use bytes::Bytes;
#[global_allocator]
static ODD: Odd = Odd;
struct Odd;
unsafe impl GlobalAlloc for Odd {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
if layout.align() == 1 && layout.size() > 0 {
// Allocate slightly bigger so that we can offset the pointer by 1
let size = layout.size() + 1;
let new_layout = match Layout::from_size_align(size, 1) {
Ok(layout) => layout,
Err(_err) => return ptr::null_mut(),
};
let ptr = System.alloc(new_layout);
if !ptr.is_null() {
let ptr = ptr.offset(1);
ptr
} else {
ptr
}
} else {
System.alloc(layout)
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if layout.align() == 1 && layout.size() > 0 {
let size = layout.size() + 1;
let new_layout = match Layout::from_size_align(size, 1) {
Ok(layout) => layout,
Err(_err) => std::process::abort(),
};
System.dealloc(ptr.offset(-1), new_layout);
} else {
System.dealloc(ptr, layout);
}
}
}
#[test]
fn sanity_check_odd_allocator() {
let vec = vec![33u8; 1024];
let p = vec.as_ptr() as usize;
assert!(p & 0x1 == 0x1, "{:#b}", p);
}
#[test]
fn test_bytes_from_vec_drop() {
let vec = vec![33u8; 1024];
let _b = Bytes::from(vec);
}
#[test]
fn test_bytes_clone_drop() {
let vec = vec![33u8; 1024];
let b1 = Bytes::from(vec);
let _b2 = b1.clone();
}

Просмотреть файл

@ -1,79 +0,0 @@
use std::alloc::{GlobalAlloc, Layout, System};
use std::{mem, ptr};
use bytes::{Buf, Bytes};
#[global_allocator]
static LEDGER: Ledger = Ledger;
struct Ledger;
const USIZE_SIZE: usize = mem::size_of::<usize>();
unsafe impl GlobalAlloc for Ledger {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
if layout.align() == 1 && layout.size() > 0 {
// Allocate extra space to stash a record of
// how much space there was.
let orig_size = layout.size();
let size = orig_size + USIZE_SIZE;
let new_layout = match Layout::from_size_align(size, 1) {
Ok(layout) => layout,
Err(_err) => return ptr::null_mut(),
};
let ptr = System.alloc(new_layout);
if !ptr.is_null() {
(ptr as *mut usize).write(orig_size);
let ptr = ptr.offset(USIZE_SIZE as isize);
ptr
} else {
ptr
}
} else {
System.alloc(layout)
}
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if layout.align() == 1 && layout.size() > 0 {
let off_ptr = (ptr as *mut usize).offset(-1);
let orig_size = off_ptr.read();
if orig_size != layout.size() {
panic!(
"bad dealloc: alloc size was {}, dealloc size is {}",
orig_size,
layout.size()
);
}
let new_layout = match Layout::from_size_align(layout.size() + USIZE_SIZE, 1) {
Ok(layout) => layout,
Err(_err) => std::process::abort(),
};
System.dealloc(off_ptr as *mut u8, new_layout);
} else {
System.dealloc(ptr, layout);
}
}
}
#[test]
fn test_bytes_advance() {
let mut bytes = Bytes::from(vec![10, 20, 30]);
bytes.advance(1);
drop(bytes);
}
#[test]
fn test_bytes_truncate() {
let mut bytes = Bytes::from(vec![10, 20, 30]);
bytes.truncate(2);
drop(bytes);
}
#[test]
fn test_bytes_truncate_and_advance() {
let mut bytes = Bytes::from(vec![10, 20, 30]);
bytes.truncate(2);
bytes.advance(1);
drop(bytes);
}

Просмотреть файл

@ -1,135 +0,0 @@
#![warn(rust_2018_idioms)]
use bytes::buf::{BufExt, BufMutExt};
use bytes::{Buf, BufMut, Bytes};
#[cfg(feature = "std")]
use std::io::IoSlice;
#[test]
fn collect_two_bufs() {
let a = Bytes::from(&b"hello"[..]);
let b = Bytes::from(&b"world"[..]);
let res = a.chain(b).to_bytes();
assert_eq!(res, &b"helloworld"[..]);
}
#[test]
fn writing_chained() {
let mut a = [0u8; 64];
let mut b = [0u8; 64];
{
let mut buf = (&mut a[..]).chain_mut(&mut b[..]);
for i in 0u8..128 {
buf.put_u8(i);
}
}
for i in 0..64 {
let expect = i as u8;
assert_eq!(expect, a[i]);
assert_eq!(expect + 64, b[i]);
}
}
#[test]
fn iterating_two_bufs() {
let a = Bytes::from(&b"hello"[..]);
let b = Bytes::from(&b"world"[..]);
let res: Vec<u8> = a.chain(b).into_iter().collect();
assert_eq!(res, &b"helloworld"[..]);
}
#[cfg(feature = "std")]
#[test]
fn vectored_read() {
let a = Bytes::from(&b"hello"[..]);
let b = Bytes::from(&b"world"[..]);
let mut buf = a.chain(b);
{
let b1: &[u8] = &mut [];
let b2: &[u8] = &mut [];
let b3: &[u8] = &mut [];
let b4: &[u8] = &mut [];
let mut iovecs = [
IoSlice::new(b1),
IoSlice::new(b2),
IoSlice::new(b3),
IoSlice::new(b4),
];
assert_eq!(2, buf.bytes_vectored(&mut iovecs));
assert_eq!(iovecs[0][..], b"hello"[..]);
assert_eq!(iovecs[1][..], b"world"[..]);
assert_eq!(iovecs[2][..], b""[..]);
assert_eq!(iovecs[3][..], b""[..]);
}
buf.advance(2);
{
let b1: &[u8] = &mut [];
let b2: &[u8] = &mut [];
let b3: &[u8] = &mut [];
let b4: &[u8] = &mut [];
let mut iovecs = [
IoSlice::new(b1),
IoSlice::new(b2),
IoSlice::new(b3),
IoSlice::new(b4),
];
assert_eq!(2, buf.bytes_vectored(&mut iovecs));
assert_eq!(iovecs[0][..], b"llo"[..]);
assert_eq!(iovecs[1][..], b"world"[..]);
assert_eq!(iovecs[2][..], b""[..]);
assert_eq!(iovecs[3][..], b""[..]);
}
buf.advance(3);
{
let b1: &[u8] = &mut [];
let b2: &[u8] = &mut [];
let b3: &[u8] = &mut [];
let b4: &[u8] = &mut [];
let mut iovecs = [
IoSlice::new(b1),
IoSlice::new(b2),
IoSlice::new(b3),
IoSlice::new(b4),
];
assert_eq!(1, buf.bytes_vectored(&mut iovecs));
assert_eq!(iovecs[0][..], b"world"[..]);
assert_eq!(iovecs[1][..], b""[..]);
assert_eq!(iovecs[2][..], b""[..]);
assert_eq!(iovecs[3][..], b""[..]);
}
buf.advance(3);
{
let b1: &[u8] = &mut [];
let b2: &[u8] = &mut [];
let b3: &[u8] = &mut [];
let b4: &[u8] = &mut [];
let mut iovecs = [
IoSlice::new(b1),
IoSlice::new(b2),
IoSlice::new(b3),
IoSlice::new(b4),
];
assert_eq!(1, buf.bytes_vectored(&mut iovecs));
assert_eq!(iovecs[0][..], b"ld"[..]);
assert_eq!(iovecs[1][..], b""[..]);
assert_eq!(iovecs[2][..], b""[..]);
assert_eq!(iovecs[3][..], b""[..]);
}
}

Просмотреть файл

@ -1,35 +0,0 @@
#![warn(rust_2018_idioms)]
use bytes::Bytes;
#[test]
fn fmt() {
let vec: Vec<_> = (0..0x100).map(|b| b as u8).collect();
let expected = "b\"\
\\0\\x01\\x02\\x03\\x04\\x05\\x06\\x07\
\\x08\\t\\n\\x0b\\x0c\\r\\x0e\\x0f\
\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\
\\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f\
\x20!\\\"#$%&'()*+,-./0123456789:;<=>?\
@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_\
`abcdefghijklmnopqrstuvwxyz{|}~\\x7f\
\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\
\\x88\\x89\\x8a\\x8b\\x8c\\x8d\\x8e\\x8f\
\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\
\\x98\\x99\\x9a\\x9b\\x9c\\x9d\\x9e\\x9f\
\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\
\\xa8\\xa9\\xaa\\xab\\xac\\xad\\xae\\xaf\
\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7\
\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\
\\xc0\\xc1\\xc2\\xc3\\xc4\\xc5\\xc6\\xc7\
\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\
\\xd0\\xd1\\xd2\\xd3\\xd4\\xd5\\xd6\\xd7\
\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\
\\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\
\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef\
\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\
\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd\\xfe\\xff\"";
assert_eq!(expected, format!("{:?}", Bytes::from(vec)));
}

Просмотреть файл

@ -1,21 +0,0 @@
#![warn(rust_2018_idioms)]
use bytes::Bytes;
#[test]
fn iter_len() {
let buf = Bytes::from_static(b"hello world");
let iter = buf.iter();
assert_eq!(iter.size_hint(), (11, Some(11)));
assert_eq!(iter.len(), 11);
}
#[test]
fn empty_iter_len() {
let buf = Bytes::from_static(b"");
let iter = buf.iter();
assert_eq!(iter.size_hint(), (0, Some(0)));
assert_eq!(iter.len(), 0);
}

Просмотреть файл

@ -1,29 +0,0 @@
#![warn(rust_2018_idioms)]
#![cfg(feature = "std")]
use std::io::{BufRead, Read};
use bytes::buf::BufExt;
#[test]
fn read() {
let buf1 = &b"hello "[..];
let buf2 = &b"world"[..];
let buf = BufExt::chain(buf1, buf2); // Disambiguate with Read::chain
let mut buffer = Vec::new();
buf.reader().read_to_end(&mut buffer).unwrap();
assert_eq!(b"hello world", &buffer[..]);
}
#[test]
fn buf_read() {
let buf1 = &b"hell"[..];
let buf2 = &b"o\nworld"[..];
let mut reader = BufExt::chain(buf1, buf2).reader();
let mut line = String::new();
reader.read_line(&mut line).unwrap();
assert_eq!("hello\n", &line);
line.clear();
reader.read_line(&mut line).unwrap();
assert_eq!("world", &line);
}

Просмотреть файл

@ -1,20 +0,0 @@
#![cfg(feature = "serde")]
#![warn(rust_2018_idioms)]
use serde_test::{assert_tokens, Token};
#[test]
fn test_ser_de_empty() {
let b = bytes::Bytes::new();
assert_tokens(&b, &[Token::Bytes(b"")]);
let b = bytes::BytesMut::with_capacity(0);
assert_tokens(&b, &[Token::Bytes(b"")]);
}
#[test]
fn test_ser_de() {
let b = bytes::Bytes::from(&b"bytes"[..]);
assert_tokens(&b, &[Token::Bytes(b"bytes")]);
let b = bytes::BytesMut::from(&b"bytes"[..]);
assert_tokens(&b, &[Token::Bytes(b"bytes")]);
}

Просмотреть файл

@ -1,12 +0,0 @@
#![warn(rust_2018_idioms)]
use bytes::buf::{Buf, BufExt};
#[test]
fn long_take() {
// Tests that get a take with a size greater than the buffer length will not
// overrun the buffer. Regression test for #138.
let buf = b"hello world".take(100);
assert_eq!(11, buf.remaining());
assert_eq!(b"hello world", buf.bytes());
}

1
third_party/rust/form_urlencoded/.cargo-checksum.json поставляемый Normal file
Просмотреть файл

@ -0,0 +1 @@
{"files":{"Cargo.toml":"aadc4e4ba33e86861d8d1d8b848ac11a27b6f87340d082b47f762387464c61ed","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"20c7855c364d57ea4c97889a5e8d98470a9952dade37bd9248b9a54431670e5e","src/lib.rs":"5d30edec687843447c97e4ea87583983eb9fc06135ae718c8ecc0fa8cebef2df"},"package":"5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191"}

Просмотреть файл

@ -12,15 +12,17 @@
[package] [package]
edition = "2018" edition = "2018"
name = "urlencoding" name = "form_urlencoded"
version = "1.3.3" version = "1.0.1"
authors = ["Kornel <kornel@geekhood.net>", "Bertram Truong <b@bertramtruong.com>"] authors = ["The rust-url developers"]
description = "A Rust library for doing URL percentage encoding." description = "Parser and serializer for the application/x-www-form-urlencoded syntax, as used by HTML forms."
homepage = "https://lib.rs/urlencoding" license = "MIT/Apache-2.0"
readme = "README.md" repository = "https://github.com/servo/rust-url"
keywords = ["url", "percent", "escape", "urlencode", "urldecode"]
categories = ["encoding", "web-programming"] [lib]
license = "MIT" test = false
repository = "https://github.com/kornelski/rust_urlencoding" [dependencies.matches]
[package.metadata.docs.rs] version = "0.1"
targets = ["x86_64-unknown-linux-gnu"]
[dependencies.percent-encoding]
version = "2.1.0"

201
third_party/rust/form_urlencoded/LICENSE-APACHE поставляемый Normal file
Просмотреть файл

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

Просмотреть файл

@ -1,3 +1,5 @@
Copyright (c) 2013-2016 The rust-url developers
Permission is hereby granted, free of charge, to any Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the documentation files (the "Software"), to deal in the

420
third_party/rust/form_urlencoded/src/lib.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,420 @@
// Copyright 2013-2016 The rust-url developers.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Parser and serializer for the [`application/x-www-form-urlencoded` syntax](
//! http://url.spec.whatwg.org/#application/x-www-form-urlencoded),
//! as used by HTML forms.
//!
//! Converts between a string (such as an URLs query string)
//! and a sequence of (name, value) pairs.
#[macro_use]
extern crate matches;
use percent_encoding::{percent_decode, percent_encode_byte};
use std::borrow::{Borrow, Cow};
use std::str;
/// Convert a byte string in the `application/x-www-form-urlencoded` syntax
/// into a iterator of (name, value) pairs.
///
/// Use `parse(input.as_bytes())` to parse a `&str` string.
///
/// The names and values are percent-decoded. For instance, `%23first=%25try%25` will be
/// converted to `[("#first", "%try%")]`.
#[inline]
pub fn parse(input: &[u8]) -> Parse<'_> {
Parse { input }
}
/// The return type of `parse()`.
#[derive(Copy, Clone)]
pub struct Parse<'a> {
input: &'a [u8],
}
impl<'a> Iterator for Parse<'a> {
type Item = (Cow<'a, str>, Cow<'a, str>);
fn next(&mut self) -> Option<Self::Item> {
loop {
if self.input.is_empty() {
return None;
}
let mut split2 = self.input.splitn(2, |&b| b == b'&');
let sequence = split2.next().unwrap();
self.input = split2.next().unwrap_or(&[][..]);
if sequence.is_empty() {
continue;
}
let mut split2 = sequence.splitn(2, |&b| b == b'=');
let name = split2.next().unwrap();
let value = split2.next().unwrap_or(&[][..]);
return Some((decode(name), decode(value)));
}
}
}
fn decode(input: &[u8]) -> Cow<'_, str> {
let replaced = replace_plus(input);
decode_utf8_lossy(match percent_decode(&replaced).into() {
Cow::Owned(vec) => Cow::Owned(vec),
Cow::Borrowed(_) => replaced,
})
}
/// Replace b'+' with b' '
fn replace_plus(input: &[u8]) -> Cow<'_, [u8]> {
match input.iter().position(|&b| b == b'+') {
None => Cow::Borrowed(input),
Some(first_position) => {
let mut replaced = input.to_owned();
replaced[first_position] = b' ';
for byte in &mut replaced[first_position + 1..] {
if *byte == b'+' {
*byte = b' ';
}
}
Cow::Owned(replaced)
}
}
}
impl<'a> Parse<'a> {
/// Return a new iterator that yields pairs of `String` instead of pairs of `Cow<str>`.
pub fn into_owned(self) -> ParseIntoOwned<'a> {
ParseIntoOwned { inner: self }
}
}
/// Like `Parse`, but yields pairs of `String` instead of pairs of `Cow<str>`.
pub struct ParseIntoOwned<'a> {
inner: Parse<'a>,
}
impl<'a> Iterator for ParseIntoOwned<'a> {
type Item = (String, String);
fn next(&mut self) -> Option<Self::Item> {
self.inner
.next()
.map(|(k, v)| (k.into_owned(), v.into_owned()))
}
}
/// The [`application/x-www-form-urlencoded` byte serializer](
/// https://url.spec.whatwg.org/#concept-urlencoded-byte-serializer).
///
/// Return an iterator of `&str` slices.
pub fn byte_serialize(input: &[u8]) -> ByteSerialize<'_> {
ByteSerialize { bytes: input }
}
/// Return value of `byte_serialize()`.
#[derive(Debug)]
pub struct ByteSerialize<'a> {
bytes: &'a [u8],
}
fn byte_serialized_unchanged(byte: u8) -> bool {
matches!(byte, b'*' | b'-' | b'.' | b'0' ..= b'9' | b'A' ..= b'Z' | b'_' | b'a' ..= b'z')
}
impl<'a> Iterator for ByteSerialize<'a> {
type Item = &'a str;
fn next(&mut self) -> Option<&'a str> {
if let Some((&first, tail)) = self.bytes.split_first() {
if !byte_serialized_unchanged(first) {
self.bytes = tail;
return Some(if first == b' ' {
"+"
} else {
percent_encode_byte(first)
});
}
let position = tail.iter().position(|&b| !byte_serialized_unchanged(b));
let (unchanged_slice, remaining) = match position {
// 1 for first_byte + i unchanged in tail
Some(i) => self.bytes.split_at(1 + i),
None => (self.bytes, &[][..]),
};
self.bytes = remaining;
// This unsafe is appropriate because we have already checked these
// bytes in byte_serialized_unchanged, which checks for a subset
// of UTF-8. So we know these bytes are valid UTF-8, and doing
// another UTF-8 check would be wasteful.
Some(unsafe { str::from_utf8_unchecked(unchanged_slice) })
} else {
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
if self.bytes.is_empty() {
(0, Some(0))
} else {
(1, Some(self.bytes.len()))
}
}
}
/// The [`application/x-www-form-urlencoded` serializer](
/// https://url.spec.whatwg.org/#concept-urlencoded-serializer).
pub struct Serializer<'a, T: Target> {
target: Option<T>,
start_position: usize,
encoding: EncodingOverride<'a>,
}
pub trait Target {
fn as_mut_string(&mut self) -> &mut String;
fn finish(self) -> Self::Finished;
type Finished;
}
impl Target for String {
fn as_mut_string(&mut self) -> &mut String {
self
}
fn finish(self) -> Self {
self
}
type Finished = Self;
}
impl<'a> Target for &'a mut String {
fn as_mut_string(&mut self) -> &mut String {
&mut **self
}
fn finish(self) -> Self {
self
}
type Finished = Self;
}
impl<'a, T: Target> Serializer<'a, T> {
/// Create a new `application/x-www-form-urlencoded` serializer for the given target.
///
/// If the target is non-empty,
/// its content is assumed to already be in `application/x-www-form-urlencoded` syntax.
pub fn new(target: T) -> Self {
Self::for_suffix(target, 0)
}
/// Create a new `application/x-www-form-urlencoded` serializer
/// for a suffix of the given target.
///
/// If that suffix is non-empty,
/// its content is assumed to already be in `application/x-www-form-urlencoded` syntax.
pub fn for_suffix(mut target: T, start_position: usize) -> Self {
if target.as_mut_string().len() < start_position {
panic!(
"invalid length {} for target of length {}",
start_position,
target.as_mut_string().len()
);
}
Serializer {
target: Some(target),
start_position,
encoding: None,
}
}
/// Remove any existing name/value pair.
///
/// Panics if called after `.finish()`.
pub fn clear(&mut self) -> &mut Self {
string(&mut self.target).truncate(self.start_position);
self
}
/// Set the character encoding to be used for names and values before percent-encoding.
pub fn encoding_override(&mut self, new: EncodingOverride<'a>) -> &mut Self {
self.encoding = new;
self
}
/// Serialize and append a name/value pair.
///
/// Panics if called after `.finish()`.
pub fn append_pair(&mut self, name: &str, value: &str) -> &mut Self {
append_pair(
string(&mut self.target),
self.start_position,
self.encoding,
name,
value,
);
self
}
/// Serialize and append a name of parameter without any value.
///
/// Panics if called after `.finish()`.
pub fn append_key_only(&mut self, name: &str) -> &mut Self {
append_key_only(
string(&mut self.target),
self.start_position,
self.encoding,
name,
);
self
}
/// Serialize and append a number of name/value pairs.
///
/// This simply calls `append_pair` repeatedly.
/// This can be more convenient, so the user doesnt need to introduce a block
/// to limit the scope of `Serializer`s borrow of its string.
///
/// Panics if called after `.finish()`.
pub fn extend_pairs<I, K, V>(&mut self, iter: I) -> &mut Self
where
I: IntoIterator,
I::Item: Borrow<(K, V)>,
K: AsRef<str>,
V: AsRef<str>,
{
{
let string = string(&mut self.target);
for pair in iter {
let &(ref k, ref v) = pair.borrow();
append_pair(
string,
self.start_position,
self.encoding,
k.as_ref(),
v.as_ref(),
);
}
}
self
}
/// Serialize and append a number of names without values.
///
/// This simply calls `append_key_only` repeatedly.
/// This can be more convenient, so the user doesnt need to introduce a block
/// to limit the scope of `Serializer`s borrow of its string.
///
/// Panics if called after `.finish()`.
pub fn extend_keys_only<I, K>(&mut self, iter: I) -> &mut Self
where
I: IntoIterator,
I::Item: Borrow<K>,
K: AsRef<str>,
{
{
let string = string(&mut self.target);
for key in iter {
let k = key.borrow().as_ref();
append_key_only(string, self.start_position, self.encoding, k);
}
}
self
}
/// If this serializer was constructed with a string, take and return that string.
///
/// ```rust
/// use form_urlencoded;
/// let encoded: String = form_urlencoded::Serializer::new(String::new())
/// .append_pair("foo", "bar & baz")
/// .append_pair("saison", "Été+hiver")
/// .finish();
/// assert_eq!(encoded, "foo=bar+%26+baz&saison=%C3%89t%C3%A9%2Bhiver");
/// ```
///
/// Panics if called more than once.
pub fn finish(&mut self) -> T::Finished {
self.target
.take()
.expect("url::form_urlencoded::Serializer double finish")
.finish()
}
}
fn append_separator_if_needed(string: &mut String, start_position: usize) {
if string.len() > start_position {
string.push('&')
}
}
fn string<T: Target>(target: &mut Option<T>) -> &mut String {
target
.as_mut()
.expect("url::form_urlencoded::Serializer finished")
.as_mut_string()
}
fn append_pair(
string: &mut String,
start_position: usize,
encoding: EncodingOverride<'_>,
name: &str,
value: &str,
) {
append_separator_if_needed(string, start_position);
append_encoded(name, string, encoding);
string.push('=');
append_encoded(value, string, encoding);
}
fn append_key_only(
string: &mut String,
start_position: usize,
encoding: EncodingOverride,
name: &str,
) {
append_separator_if_needed(string, start_position);
append_encoded(name, string, encoding);
}
fn append_encoded(s: &str, string: &mut String, encoding: EncodingOverride<'_>) {
string.extend(byte_serialize(&encode(encoding, s)))
}
pub(crate) fn encode<'a>(encoding_override: EncodingOverride<'_>, input: &'a str) -> Cow<'a, [u8]> {
if let Some(o) = encoding_override {
return o(input);
}
input.as_bytes().into()
}
pub(crate) fn decode_utf8_lossy(input: Cow<'_, [u8]>) -> Cow<'_, str> {
// Note: This function is duplicated in `percent_encoding/lib.rs`.
match input {
Cow::Borrowed(bytes) => String::from_utf8_lossy(bytes),
Cow::Owned(bytes) => {
match String::from_utf8_lossy(&bytes) {
Cow::Borrowed(utf8) => {
// If from_utf8_lossy returns a Cow::Borrowed, then we can
// be sure our original bytes were valid UTF-8. This is because
// if the bytes were invalid UTF-8 from_utf8_lossy would have
// to allocate a new owned string to back the Cow so it could
// replace invalid bytes with a placeholder.
// First we do a debug_assert to confirm our description above.
let raw_utf8: *const [u8];
raw_utf8 = utf8.as_bytes();
debug_assert!(raw_utf8 == &*bytes as *const [u8]);
// Given we know the original input bytes are valid UTF-8,
// and we have ownership of those bytes, we re-use them and
// return a Cow::Owned here.
Cow::Owned(unsafe { String::from_utf8_unchecked(bytes) })
}
Cow::Owned(s) => Cow::Owned(s),
}
}
}
}
pub type EncodingOverride<'a> = Option<&'a dyn Fn(&str) -> Cow<'_, [u8]>>;

2
third_party/rust/h2/.cargo-checksum.json поставляемый

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

87
third_party/rust/h2/CHANGELOG.md поставляемый
Просмотреть файл

@ -1,3 +1,90 @@
# 0.3.13 (March 31, 2022)
* Update private internal `tokio-util` dependency.
# 0.3.12 (March 9, 2022)
* Avoid time operations that can panic (#599)
* Bump MSRV to Rust 1.49 (#606)
* Fix header decoding error when a header name is contained at a continuation
header boundary (#589)
* Remove I/O type names from handshake `tracing` spans (#608)
# 0.3.11 (January 26, 2022)
* Make `SendStream::poll_capacity` never return `Ok(Some(0))` (#596)
* Fix panic when receiving already reset push promise (#597)
# 0.3.10 (January 6, 2022)
* Add `Error::is_go_away()` and `Error::is_remote()` methods.
* Fix panic if receiving malformed PUSH_PROMISE with stream ID of 0.
# 0.3.9 (December 9, 2021)
* Fix hang related to new `max_send_buffer_size`.
# 0.3.8 (December 8, 2021)
* Add "extended CONNECT support". Adds `h2::ext::Protocol`, which is used for request and response extensions to connect new protocols over an HTTP/2 stream.
* Add `max_send_buffer_size` options to client and server builders, and a default of ~400MB. This acts like a high-water mark for the `poll_capacity()` method.
* Fix panic if receiving malformed HEADERS with stream ID of 0.
# 0.3.7 (October 22, 2021)
* Fix panic if server sends a malformed frame on a stream client was about to open.
* Fix server to treat `:status` in a request as a stream error instead of connection error.
# 0.3.6 (September 30, 2021)
* Fix regression of `h2::Error` that were created via `From<h2::Reason>` not returning their reason code in `Error::reason()`.
# 0.3.5 (September 29, 2021)
* Fix sending of very large headers. Previously when a single header was too big to fit in a single `HEADERS` frame, an error was returned. Now it is broken up and sent correctly.
* Fix buffered data field to be a bigger integer size.
* Refactor error format to include what initiated the error (remote, local, or user), if it was a stream or connection-level error, and any received debug data.
# 0.3.4 (August 20, 2021)
* Fix panic when encoding header size update over a certain size.
* Fix `SendRequest` to wake up connection when dropped.
* Fix potential hang if `RecvStream` is placed in the request or response `extensions`.
* Stop calling `Instant::now` if zero reset streams are configured.
# 0.3.3 (April 29, 2021)
* Fix client being able to make `CONNECT` requests without a `:path`.
* Expose `RecvStream::poll_data`.
* Fix some docs.
# 0.3.2 (March 24, 2021)
* Fix incorrect handling of received 1xx responses on the client when the request body is still streaming.
# 0.3.1 (February 26, 2021)
* Add `Connection::max_concurrent_recv_streams()` getter.
* Add `Connection::max_concurrent_send_streams()` getter.
* Fix client to ignore receipt of 1xx headers frames.
* Fix incorrect calculation of pseudo header lengths when determining if a received header is too big.
* Reduce monomorphized code size of internal code.
# 0.3.0 (December 23, 2020)
* Update to Tokio v1 and Bytes v1.
* Disable `tracing`'s `log` feature. (It can still be enabled by a user in their own `Cargo.toml`.)
# 0.2.7 (October 22, 2020)
* Fix stream ref count when sending a push promise
* Fix receiving empty DATA frames in response to a HEAD request
* Fix handling of client disabling SERVER_PUSH
# 0.2.6 (July 13, 2020)
* Integrate `tracing` directly where `log` was used. (For 0.2.x, `log`s are still emitted by default.)
# 0.2.5 (May 6, 2020) # 0.2.5 (May 6, 2020)
* Fix rare debug assert failure in store shutdown. * Fix rare debug assert failure in store shutdown.

711
third_party/rust/h2/Cargo.lock сгенерированный поставляемый

Разница между файлами не показана из-за своего большого размера Загрузить разницу

90
third_party/rust/h2/Cargo.toml поставляемый
Просмотреть файл

@ -3,28 +3,45 @@
# When uploading crates to the registry Cargo will automatically # When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility # "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies # with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies # to registry (e.g., crates.io) dependencies.
# #
# If you believe there's an error in this file please file an # If you are reading this file be aware that the original Cargo.toml
# issue against the rust-lang/cargo repository. If you're # will likely look very different (and much more reasonable).
# editing this file be aware that the upstream Cargo.toml # See Cargo.toml.orig for the original contents.
# will likely look very different (and much more reasonable)
[package] [package]
edition = "2018" edition = "2018"
name = "h2" name = "h2"
version = "0.2.5" version = "0.3.13"
authors = ["Carl Lerche <me@carllerche.com>", "Sean McArthur <sean@seanmonstar.com>"] authors = [
exclude = ["fixtures/**", "ci/**"] "Carl Lerche <me@carllerche.com>",
description = "An HTTP/2.0 client and server" "Sean McArthur <sean@seanmonstar.com>",
documentation = "https://docs.rs/h2/0.2.5/h2/" ]
exclude = [
"fixtures/**",
"ci/**",
]
description = "An HTTP/2 client and server"
documentation = "https://docs.rs/h2"
readme = "README.md" readme = "README.md"
keywords = ["http", "async", "non-blocking"] keywords = [
categories = ["asynchronous", "web-programming", "network-programming"] "http",
"async",
"non-blocking",
]
categories = [
"asynchronous",
"web-programming",
"network-programming",
]
license = "MIT" license = "MIT"
repository = "https://github.com/hyperium/h2" repository = "https://github.com/hyperium/h2"
[package.metadata.docs.rs]
features = ["stream"]
[dependencies.bytes] [dependencies.bytes]
version = "0.5.2" version = "1"
[dependencies.fnv] [dependencies.fnv]
version = "1.0.5" version = "1.0.5"
@ -45,37 +62,38 @@ default-features = false
version = "0.2" version = "0.2"
[dependencies.indexmap] [dependencies.indexmap]
version = "1.0" version = "1.5.2"
features = ["std"]
[dependencies.log]
version = "0.4.1"
[dependencies.slab] [dependencies.slab]
version = "0.4.0" version = "0.4.2"
[dependencies.tokio] [dependencies.tokio]
version = "0.2" version = "1"
features = ["io-util"] features = ["io-util"]
[dependencies.tokio-util] [dependencies.tokio-util]
version = "0.3.1" version = "0.7.1"
features = ["codec"] features = ["codec"]
[dependencies.tracing]
version = "0.1.21"
features = ["std"]
default-features = false
[dev-dependencies.env_logger] [dev-dependencies.env_logger]
version = "0.5.3" version = "0.9"
default-features = false default-features = false
[dev-dependencies.hex] [dev-dependencies.hex]
version = "0.2.0" version = "0.4.3"
[dev-dependencies.quickcheck] [dev-dependencies.quickcheck]
version = "0.4.1" version = "1.0.3"
default-features = false default-features = false
[dev-dependencies.rand] [dev-dependencies.rand]
version = "0.3.15" version = "0.8.4"
[dev-dependencies.rustls]
version = "0.16"
[dev-dependencies.serde] [dev-dependencies.serde]
version = "1.0.0" version = "1.0.0"
@ -84,20 +102,22 @@ version = "1.0.0"
version = "1.0.0" version = "1.0.0"
[dev-dependencies.tokio] [dev-dependencies.tokio]
version = "0.2" version = "1"
features = ["dns", "macros", "rt-core", "sync", "tcp"] features = [
"rt-multi-thread",
"macros",
"sync",
"net",
]
[dev-dependencies.tokio-rustls] [dev-dependencies.tokio-rustls]
version = "0.12.0" version = "0.23.2"
[dev-dependencies.walkdir] [dev-dependencies.walkdir]
version = "1.0.0" version = "2.3.2"
[dev-dependencies.webpki]
version = "0.21"
[dev-dependencies.webpki-roots] [dev-dependencies.webpki-roots]
version = "0.17" version = "0.22.2"
[features] [features]
stream = [] stream = []

17
third_party/rust/h2/README.md поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
# H2 # H2
A Tokio aware, HTTP/2.0 client & server implementation for Rust. A Tokio aware, HTTP/2 client & server implementation for Rust.
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT) [![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](https://opensource.org/licenses/MIT)
[![Crates.io](https://img.shields.io/crates/v/h2.svg)](https://crates.io/crates/h2) [![Crates.io](https://img.shields.io/crates/v/h2.svg)](https://crates.io/crates/h2)
@ -12,24 +12,23 @@ More information about this crate can be found in the [crate documentation][dox]
## Features ## Features
* Client and server HTTP/2.0 implementation. * Client and server HTTP/2 implementation.
* Implements the full HTTP/2.0 specification. * Implements the full HTTP/2 specification.
* Passes [h2spec](https://github.com/summerwind/h2spec). * Passes [h2spec](https://github.com/summerwind/h2spec).
* Focus on performance and correctness. * Focus on performance and correctness.
* Built on [Tokio](https://tokio.rs). * Built on [Tokio](https://tokio.rs).
## Non goals ## Non goals
This crate is intended to only be an implementation of the HTTP/2.0 This crate is intended to only be an implementation of the HTTP/2
specification. It does not handle: specification. It does not handle:
* Managing TCP connections * Managing TCP connections
* HTTP 1.0 upgrade * HTTP 1.0 upgrade
* TLS * TLS
* Any feature not described by the HTTP/2.0 specification. * Any feature not described by the HTTP/2 specification.
The intent is that this crate will eventually be used by This crate is now used by [hyper](https://github.com/hyperium/hyper), which will provide all of these features.
[hyper](https://github.com/hyperium/hyper), which will provide all of these features.
## Usage ## Usage
@ -37,7 +36,7 @@ To use `h2`, first add this to your `Cargo.toml`:
```toml ```toml
[dependencies] [dependencies]
h2 = "0.2" h2 = "0.3"
``` ```
Next, add this to your crate: Next, add this to your crate:
@ -56,7 +55,7 @@ fn main() {
**How does h2 compare to [solicit] or [rust-http2]?** **How does h2 compare to [solicit] or [rust-http2]?**
The h2 library has implemented more of the details of the HTTP/2.0 specification The h2 library has implemented more of the details of the HTTP/2 specification
than any other Rust library. It also passes the [h2spec] set of tests. The h2 than any other Rust library. It also passes the [h2spec] set of tests. The h2
library is rapidly approaching "production ready" quality. library is rapidly approaching "production ready" quality.

24
third_party/rust/h2/examples/akamai.rs поставляемый
Просмотреть файл

@ -3,9 +3,9 @@ use http::{Method, Request};
use tokio::net::TcpStream; use tokio::net::TcpStream;
use tokio_rustls::TlsConnector; use tokio_rustls::TlsConnector;
use rustls::Session; use tokio_rustls::rustls::{OwnedTrustAnchor, RootCertStore, ServerName};
use webpki::DNSNameRef;
use std::convert::TryFrom;
use std::error::Error; use std::error::Error;
use std::net::ToSocketAddrs; use std::net::ToSocketAddrs;
@ -16,9 +16,19 @@ pub async fn main() -> Result<(), Box<dyn Error>> {
let _ = env_logger::try_init(); let _ = env_logger::try_init();
let tls_client_config = std::sync::Arc::new({ let tls_client_config = std::sync::Arc::new({
let mut c = rustls::ClientConfig::new(); let mut root_store = RootCertStore::empty();
c.root_store root_store.add_server_trust_anchors(webpki_roots::TLS_SERVER_ROOTS.0.iter().map(|ta| {
.add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS); OwnedTrustAnchor::from_subject_spki_name_constraints(
ta.subject,
ta.spki,
ta.name_constraints,
)
}));
let mut c = tokio_rustls::rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(root_store)
.with_no_client_auth();
c.alpn_protocols.push(ALPN_H2.as_bytes().to_owned()); c.alpn_protocols.push(ALPN_H2.as_bytes().to_owned());
c c
}); });
@ -33,13 +43,13 @@ pub async fn main() -> Result<(), Box<dyn Error>> {
println!("ADDR: {:?}", addr); println!("ADDR: {:?}", addr);
let tcp = TcpStream::connect(&addr).await?; let tcp = TcpStream::connect(&addr).await?;
let dns_name = DNSNameRef::try_from_ascii_str("http2.akamai.com").unwrap(); let dns_name = ServerName::try_from("http2.akamai.com").unwrap();
let connector = TlsConnector::from(tls_client_config); let connector = TlsConnector::from(tls_client_config);
let res = connector.connect(dns_name, tcp).await; let res = connector.connect(dns_name, tcp).await;
let tls = res.unwrap(); let tls = res.unwrap();
{ {
let (_, session) = tls.get_ref(); let (_, session) = tls.get_ref();
let negotiated_protocol = session.get_alpn_protocol(); let negotiated_protocol = session.alpn_protocol();
assert_eq!( assert_eq!(
Some(ALPN_H2.as_bytes()), Some(ALPN_H2.as_bytes()),
negotiated_protocol.as_ref().map(|x| &**x) negotiated_protocol.as_ref().map(|x| &**x)

47
third_party/rust/h2/examples/server.rs поставляемый
Просмотреть файл

@ -1,21 +1,23 @@
use std::error::Error; use std::error::Error;
use bytes::Bytes; use bytes::Bytes;
use h2::server; use h2::server::{self, SendResponse};
use h2::RecvStream;
use http::Request;
use tokio::net::{TcpListener, TcpStream}; use tokio::net::{TcpListener, TcpStream};
#[tokio::main] #[tokio::main]
async fn main() -> Result<(), Box<dyn Error + Send + Sync>> { async fn main() -> Result<(), Box<dyn Error + Send + Sync>> {
let _ = env_logger::try_init(); let _ = env_logger::try_init();
let mut listener = TcpListener::bind("127.0.0.1:5928").await?; let listener = TcpListener::bind("127.0.0.1:5928").await?;
println!("listening on {:?}", listener.local_addr()); println!("listening on {:?}", listener.local_addr());
loop { loop {
if let Ok((socket, _peer_addr)) = listener.accept().await { if let Ok((socket, _peer_addr)) = listener.accept().await {
tokio::spawn(async move { tokio::spawn(async move {
if let Err(e) = handle(socket).await { if let Err(e) = serve(socket).await {
println!(" -> err={:?}", e); println!(" -> err={:?}", e);
} }
}); });
@ -23,22 +25,41 @@ async fn main() -> Result<(), Box<dyn Error + Send + Sync>> {
} }
} }
async fn handle(socket: TcpStream) -> Result<(), Box<dyn Error + Send + Sync>> { async fn serve(socket: TcpStream) -> Result<(), Box<dyn Error + Send + Sync>> {
let mut connection = server::handshake(socket).await?; let mut connection = server::handshake(socket).await?;
println!("H2 connection bound"); println!("H2 connection bound");
while let Some(result) = connection.accept().await { while let Some(result) = connection.accept().await {
let (request, mut respond) = result?; let (request, respond) = result?;
println!("GOT request: {:?}", request); tokio::spawn(async move {
let response = http::Response::new(()); if let Err(e) = handle_request(request, respond).await {
println!("error while handling request: {}", e);
let mut send = respond.send_response(response, false)?; }
});
println!(">>>> sending data");
send.send_data(Bytes::from_static(b"hello world"), true)?;
} }
println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~ H2 connection CLOSE !!!!!! ~~~~~~~~~~~"); println!("~~~~~~~~~~~ H2 connection CLOSE !!!!!! ~~~~~~~~~~~");
Ok(())
}
async fn handle_request(
mut request: Request<RecvStream>,
mut respond: SendResponse<Bytes>,
) -> Result<(), Box<dyn Error + Send + Sync>> {
println!("GOT request: {:?}", request);
let body = request.body_mut();
while let Some(data) = body.data().await {
let data = data?;
println!("<<<< recv {:?}", data);
let _ = body.flow_control().release_capacity(data.len());
}
let response = http::Response::new(());
let mut send = respond.send_response(response, false)?;
println!(">>>> send");
send.send_data(Bytes::from_static(b"hello "), false)?;
send.send_data(Bytes::from_static(b"world\n"), true)?;
Ok(()) Ok(())
} }

203
third_party/rust/h2/src/client.rs поставляемый
Просмотреть файл

@ -1,18 +1,18 @@
//! Client implementation of the HTTP/2.0 protocol. //! Client implementation of the HTTP/2 protocol.
//! //!
//! # Getting started //! # Getting started
//! //!
//! Running an HTTP/2.0 client requires the caller to establish the underlying //! Running an HTTP/2 client requires the caller to establish the underlying
//! connection as well as get the connection to a state that is ready to begin //! connection as well as get the connection to a state that is ready to begin
//! the HTTP/2.0 handshake. See [here](../index.html#handshake) for more //! the HTTP/2 handshake. See [here](../index.html#handshake) for more
//! details. //! details.
//! //!
//! This could be as basic as using Tokio's [`TcpStream`] to connect to a remote //! This could be as basic as using Tokio's [`TcpStream`] to connect to a remote
//! host, but usually it means using either ALPN or HTTP/1.1 protocol upgrades. //! host, but usually it means using either ALPN or HTTP/1.1 protocol upgrades.
//! //!
//! Once a connection is obtained, it is passed to [`handshake`], which will //! Once a connection is obtained, it is passed to [`handshake`], which will
//! begin the [HTTP/2.0 handshake]. This returns a future that completes once //! begin the [HTTP/2 handshake]. This returns a future that completes once
//! the handshake process is performed and HTTP/2.0 streams may be initialized. //! the handshake process is performed and HTTP/2 streams may be initialized.
//! //!
//! [`handshake`] uses default configuration values. There are a number of //! [`handshake`] uses default configuration values. There are a number of
//! settings that can be changed by using [`Builder`] instead. //! settings that can be changed by using [`Builder`] instead.
@ -26,16 +26,16 @@
//! # Making requests //! # Making requests
//! //!
//! Requests are made using the [`SendRequest`] handle provided by the handshake //! Requests are made using the [`SendRequest`] handle provided by the handshake
//! future. Once a request is submitted, an HTTP/2.0 stream is initialized and //! future. Once a request is submitted, an HTTP/2 stream is initialized and
//! the request is sent to the server. //! the request is sent to the server.
//! //!
//! A request body and request trailers are sent using [`SendRequest`] and the //! A request body and request trailers are sent using [`SendRequest`] and the
//! server's response is returned once the [`ResponseFuture`] future completes. //! server's response is returned once the [`ResponseFuture`] future completes.
//! Both the [`SendStream`] and [`ResponseFuture`] instances are returned by //! Both the [`SendStream`] and [`ResponseFuture`] instances are returned by
//! [`SendRequest::send_request`] and are tied to the HTTP/2.0 stream //! [`SendRequest::send_request`] and are tied to the HTTP/2 stream
//! initialized by the sent request. //! initialized by the sent request.
//! //!
//! The [`SendRequest::poll_ready`] function returns `Ready` when a new HTTP/2.0 //! The [`SendRequest::poll_ready`] function returns `Ready` when a new HTTP/2
//! stream can be created, i.e. as long as the current number of active streams //! stream can be created, i.e. as long as the current number of active streams
//! is below [`MAX_CONCURRENT_STREAMS`]. If a new stream cannot be created, the //! is below [`MAX_CONCURRENT_STREAMS`]. If a new stream cannot be created, the
//! caller will be notified once an existing stream closes, freeing capacity for //! caller will be notified once an existing stream closes, freeing capacity for
@ -131,13 +131,14 @@
//! [`SendRequest`]: struct.SendRequest.html //! [`SendRequest`]: struct.SendRequest.html
//! [`ResponseFuture`]: struct.ResponseFuture.html //! [`ResponseFuture`]: struct.ResponseFuture.html
//! [`SendRequest::poll_ready`]: struct.SendRequest.html#method.poll_ready //! [`SendRequest::poll_ready`]: struct.SendRequest.html#method.poll_ready
//! [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader //! [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader
//! [`Builder`]: struct.Builder.html //! [`Builder`]: struct.Builder.html
//! [`Error`]: ../struct.Error.html //! [`Error`]: ../struct.Error.html
use crate::codec::{Codec, RecvError, SendError, UserError}; use crate::codec::{Codec, SendError, UserError};
use crate::ext::Protocol;
use crate::frame::{Headers, Pseudo, Reason, Settings, StreamId}; use crate::frame::{Headers, Pseudo, Reason, Settings, StreamId};
use crate::proto; use crate::proto::{self, Error};
use crate::{FlowControl, PingPong, RecvStream, SendStream}; use crate::{FlowControl, PingPong, RecvStream, SendStream};
use bytes::{Buf, Bytes}; use bytes::{Buf, Bytes};
@ -149,8 +150,9 @@ use std::task::{Context, Poll};
use std::time::Duration; use std::time::Duration;
use std::usize; use std::usize;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
use tracing::Instrument;
/// Initializes new HTTP/2.0 streams on a connection by sending a request. /// Initializes new HTTP/2 streams on a connection by sending a request.
/// ///
/// This type does no work itself. Instead, it is a handle to the inner /// This type does no work itself. Instead, it is a handle to the inner
/// connection state held by [`Connection`]. If the associated connection /// connection state held by [`Connection`]. If the associated connection
@ -160,7 +162,7 @@ use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
/// / threads than their associated [`Connection`] instance. Internally, there /// / threads than their associated [`Connection`] instance. Internally, there
/// is a buffer used to stage requests before they get written to the /// is a buffer used to stage requests before they get written to the
/// connection. There is no guarantee that requests get written to the /// connection. There is no guarantee that requests get written to the
/// connection in FIFO order as HTTP/2.0 prioritization logic can play a role. /// connection in FIFO order as HTTP/2 prioritization logic can play a role.
/// ///
/// [`SendRequest`] implements [`Clone`], enabling the creation of many /// [`SendRequest`] implements [`Clone`], enabling the creation of many
/// instances that are backed by a single connection. /// instances that are backed by a single connection.
@ -183,10 +185,10 @@ pub struct ReadySendRequest<B: Buf> {
inner: Option<SendRequest<B>>, inner: Option<SendRequest<B>>,
} }
/// Manages all state associated with an HTTP/2.0 client connection. /// Manages all state associated with an HTTP/2 client connection.
/// ///
/// A `Connection` is backed by an I/O resource (usually a TCP socket) and /// A `Connection` is backed by an I/O resource (usually a TCP socket) and
/// implements the HTTP/2.0 client logic for that connection. It is responsible /// implements the HTTP/2 client logic for that connection. It is responsible
/// for driving the internal state forward, performing the work requested of the /// for driving the internal state forward, performing the work requested of the
/// associated handles ([`SendRequest`], [`ResponseFuture`], [`SendStream`], /// associated handles ([`SendRequest`], [`ResponseFuture`], [`SendStream`],
/// [`RecvStream`]). /// [`RecvStream`]).
@ -219,7 +221,7 @@ pub struct ReadySendRequest<B: Buf> {
/// // Submit the connection handle to an executor. /// // Submit the connection handle to an executor.
/// tokio::spawn(async { connection.await.expect("connection failed"); }); /// tokio::spawn(async { connection.await.expect("connection failed"); });
/// ///
/// // Now, use `send_request` to initialize HTTP/2.0 streams. /// // Now, use `send_request` to initialize HTTP/2 streams.
/// // ... /// // ...
/// # Ok(()) /// # Ok(())
/// # } /// # }
@ -273,7 +275,7 @@ pub struct PushPromises {
/// Methods can be chained in order to set the configuration values. /// Methods can be chained in order to set the configuration values.
/// ///
/// The client is constructed by calling [`handshake`] and passing the I/O /// The client is constructed by calling [`handshake`] and passing the I/O
/// handle that will back the HTTP/2.0 server. /// handle that will back the HTTP/2 server.
/// ///
/// New instances of `Builder` are obtained via [`Builder::new`]. /// New instances of `Builder` are obtained via [`Builder::new`].
/// ///
@ -293,7 +295,7 @@ pub struct PushPromises {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error> /// -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2
/// // handshake. /// // handshake.
/// let client_fut = Builder::new() /// let client_fut = Builder::new()
/// .initial_window_size(1_000_000) /// .initial_window_size(1_000_000)
@ -318,6 +320,9 @@ pub struct Builder {
/// Initial target window size for new connections. /// Initial target window size for new connections.
initial_target_connection_window_size: Option<u32>, initial_target_connection_window_size: Option<u32>,
/// Maximum amount of bytes to "buffer" for writing per stream.
max_send_buffer_size: usize,
/// Maximum number of locally reset streams to keep at a time. /// Maximum number of locally reset streams to keep at a time.
reset_stream_max: usize, reset_stream_max: usize,
@ -338,7 +343,7 @@ impl<B> SendRequest<B>
where where
B: Buf + 'static, B: Buf + 'static,
{ {
/// Returns `Ready` when the connection can initialize a new HTTP/2.0 /// Returns `Ready` when the connection can initialize a new HTTP/2
/// stream. /// stream.
/// ///
/// This function must return `Ready` before `send_request` is called. When /// This function must return `Ready` before `send_request` is called. When
@ -386,16 +391,16 @@ where
ReadySendRequest { inner: Some(self) } ReadySendRequest { inner: Some(self) }
} }
/// Sends a HTTP/2.0 request to the server. /// Sends a HTTP/2 request to the server.
/// ///
/// `send_request` initializes a new HTTP/2.0 stream on the associated /// `send_request` initializes a new HTTP/2 stream on the associated
/// connection, then sends the given request using this new stream. Only the /// connection, then sends the given request using this new stream. Only the
/// request head is sent. /// request head is sent.
/// ///
/// On success, a [`ResponseFuture`] instance and [`SendStream`] instance /// On success, a [`ResponseFuture`] instance and [`SendStream`] instance
/// are returned. The [`ResponseFuture`] instance is used to get the /// are returned. The [`ResponseFuture`] instance is used to get the
/// server's response and the [`SendStream`] instance is used to send a /// server's response and the [`SendStream`] instance is used to send a
/// request body or trailers to the server over the same HTTP/2.0 stream. /// request body or trailers to the server over the same HTTP/2 stream.
/// ///
/// To send a request body or trailers, set `end_of_stream` to `false`. /// To send a request body or trailers, set `end_of_stream` to `false`.
/// Then, use the returned [`SendStream`] instance to stream request body /// Then, use the returned [`SendStream`] instance to stream request body
@ -516,6 +521,19 @@ where
(response, stream) (response, stream)
}) })
} }
/// Returns whether the [extended CONNECT protocol][1] is enabled or not.
///
/// This setting is configured by the server peer by sending the
/// [`SETTINGS_ENABLE_CONNECT_PROTOCOL` parameter][2] in a `SETTINGS` frame.
/// This method returns the currently acknowledged value recieved from the
/// remote.
///
/// [1]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
/// [2]: https://datatracker.ietf.org/doc/html/rfc8441#section-3
pub fn is_extended_connect_protocol_enabled(&self) -> bool {
self.inner.is_extended_connect_protocol_enabled()
}
} }
impl<B> fmt::Debug for SendRequest<B> impl<B> fmt::Debug for SendRequest<B>
@ -600,7 +618,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error> /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2
/// // handshake. /// // handshake.
/// let client_fut = Builder::new() /// let client_fut = Builder::new()
/// .initial_window_size(1_000_000) /// .initial_window_size(1_000_000)
@ -613,6 +631,7 @@ impl Builder {
/// ``` /// ```
pub fn new() -> Builder { pub fn new() -> Builder {
Builder { Builder {
max_send_buffer_size: proto::DEFAULT_MAX_SEND_BUFFER_SIZE,
reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS), reset_stream_duration: Duration::from_secs(proto::DEFAULT_RESET_STREAM_SECS),
reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX, reset_stream_max: proto::DEFAULT_RESET_STREAM_MAX,
initial_target_connection_window_size: None, initial_target_connection_window_size: None,
@ -642,7 +661,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error> /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2
/// // handshake. /// // handshake.
/// let client_fut = Builder::new() /// let client_fut = Builder::new()
/// .initial_window_size(1_000_000) /// .initial_window_size(1_000_000)
@ -677,7 +696,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error> /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2
/// // handshake. /// // handshake.
/// let client_fut = Builder::new() /// let client_fut = Builder::new()
/// .initial_connection_window_size(1_000_000) /// .initial_connection_window_size(1_000_000)
@ -692,7 +711,7 @@ impl Builder {
self self
} }
/// Indicates the size (in octets) of the largest HTTP/2.0 frame payload that the /// Indicates the size (in octets) of the largest HTTP/2 frame payload that the
/// configured client is able to accept. /// configured client is able to accept.
/// ///
/// The sender may send data frames that are **smaller** than this value, /// The sender may send data frames that are **smaller** than this value,
@ -711,7 +730,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error> /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2
/// // handshake. /// // handshake.
/// let client_fut = Builder::new() /// let client_fut = Builder::new()
/// .max_frame_size(1_000_000) /// .max_frame_size(1_000_000)
@ -751,7 +770,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error> /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2
/// // handshake. /// // handshake.
/// let client_fut = Builder::new() /// let client_fut = Builder::new()
/// .max_header_list_size(16 * 1024) /// .max_header_list_size(16 * 1024)
@ -786,7 +805,7 @@ impl Builder {
/// a protocol level error. Instead, the `h2` library will immediately reset /// a protocol level error. Instead, the `h2` library will immediately reset
/// the stream. /// the stream.
/// ///
/// See [Section 5.1.2] in the HTTP/2.0 spec for more details. /// See [Section 5.1.2] in the HTTP/2 spec for more details.
/// ///
/// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2
/// ///
@ -800,7 +819,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error> /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2
/// // handshake. /// // handshake.
/// let client_fut = Builder::new() /// let client_fut = Builder::new()
/// .max_concurrent_streams(1000) /// .max_concurrent_streams(1000)
@ -827,7 +846,7 @@ impl Builder {
/// Sending streams past the limit returned by the peer will be treated /// Sending streams past the limit returned by the peer will be treated
/// as a stream error of type PROTOCOL_ERROR or REFUSED_STREAM. /// as a stream error of type PROTOCOL_ERROR or REFUSED_STREAM.
/// ///
/// See [Section 5.1.2] in the HTTP/2.0 spec for more details. /// See [Section 5.1.2] in the HTTP/2 spec for more details.
/// ///
/// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2 /// [Section 5.1.2]: https://http2.github.io/http2-spec/#rfc.section.5.1.2
/// ///
@ -841,7 +860,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error> /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2
/// // handshake. /// // handshake.
/// let client_fut = Builder::new() /// let client_fut = Builder::new()
/// .initial_max_send_streams(1000) /// .initial_max_send_streams(1000)
@ -858,7 +877,7 @@ impl Builder {
/// Sets the maximum number of concurrent locally reset streams. /// Sets the maximum number of concurrent locally reset streams.
/// ///
/// When a stream is explicitly reset, the HTTP/2.0 specification requires /// When a stream is explicitly reset, the HTTP/2 specification requires
/// that any further frames received for that stream must be ignored for /// that any further frames received for that stream must be ignored for
/// "some time". /// "some time".
/// ///
@ -886,7 +905,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error> /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2
/// // handshake. /// // handshake.
/// let client_fut = Builder::new() /// let client_fut = Builder::new()
/// .max_concurrent_reset_streams(1000) /// .max_concurrent_reset_streams(1000)
@ -903,7 +922,7 @@ impl Builder {
/// Sets the duration to remember locally reset streams. /// Sets the duration to remember locally reset streams.
/// ///
/// When a stream is explicitly reset, the HTTP/2.0 specification requires /// When a stream is explicitly reset, the HTTP/2 specification requires
/// that any further frames received for that stream must be ignored for /// that any further frames received for that stream must be ignored for
/// "some time". /// "some time".
/// ///
@ -932,7 +951,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error> /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2
/// // handshake. /// // handshake.
/// let client_fut = Builder::new() /// let client_fut = Builder::new()
/// .reset_stream_duration(Duration::from_secs(10)) /// .reset_stream_duration(Duration::from_secs(10))
@ -947,6 +966,24 @@ impl Builder {
self self
} }
/// Sets the maximum send buffer size per stream.
///
/// Once a stream has buffered up to (or over) the maximum, the stream's
/// flow control will not "poll" additional capacity. Once bytes for the
/// stream have been written to the connection, the send buffer capacity
/// will be freed up again.
///
/// The default is currently ~400MB, but may change.
///
/// # Panics
///
/// This function panics if `max` is larger than `u32::MAX`.
pub fn max_send_buffer_size(&mut self, max: usize) -> &mut Self {
assert!(max <= std::u32::MAX as usize);
self.max_send_buffer_size = max;
self
}
/// Enables or disables server push promises. /// Enables or disables server push promises.
/// ///
/// This value is included in the initial SETTINGS handshake. When set, the /// This value is included in the initial SETTINGS handshake. When set, the
@ -954,7 +991,7 @@ impl Builder {
/// false in the initial SETTINGS handshake guarantees that the remote server /// false in the initial SETTINGS handshake guarantees that the remote server
/// will never send a push promise. /// will never send a push promise.
/// ///
/// This setting can be changed during the life of a single HTTP/2.0 /// This setting can be changed during the life of a single HTTP/2
/// connection by sending another settings frame updating the value. /// connection by sending another settings frame updating the value.
/// ///
/// Default value: `true`. /// Default value: `true`.
@ -970,7 +1007,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error> /// # -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2
/// // handshake. /// // handshake.
/// let client_fut = Builder::new() /// let client_fut = Builder::new()
/// .enable_push(false) /// .enable_push(false)
@ -996,22 +1033,22 @@ impl Builder {
self self
} }
/// Creates a new configured HTTP/2.0 client backed by `io`. /// Creates a new configured HTTP/2 client backed by `io`.
/// ///
/// It is expected that `io` already be in an appropriate state to commence /// It is expected that `io` already be in an appropriate state to commence
/// the [HTTP/2.0 handshake]. The handshake is completed once both the connection /// the [HTTP/2 handshake]. The handshake is completed once both the connection
/// preface and the initial settings frame is sent by the client. /// preface and the initial settings frame is sent by the client.
/// ///
/// The handshake future does not wait for the initial settings frame from the /// The handshake future does not wait for the initial settings frame from the
/// server. /// server.
/// ///
/// Returns a future which resolves to the [`Connection`] / [`SendRequest`] /// Returns a future which resolves to the [`Connection`] / [`SendRequest`]
/// tuple once the HTTP/2.0 handshake has been completed. /// tuple once the HTTP/2 handshake has been completed.
/// ///
/// This function also allows the caller to configure the send payload data /// This function also allows the caller to configure the send payload data
/// type. See [Outbound data type] for more details. /// type. See [Outbound data type] for more details.
/// ///
/// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader /// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader
/// [`Connection`]: struct.Connection.html /// [`Connection`]: struct.Connection.html
/// [`SendRequest`]: struct.SendRequest.html /// [`SendRequest`]: struct.SendRequest.html
/// [Outbound data type]: ../index.html#outbound-data-type. /// [Outbound data type]: ../index.html#outbound-data-type.
@ -1028,7 +1065,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error> /// -> Result<((SendRequest<Bytes>, Connection<T, Bytes>)), h2::Error>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2
/// // handshake. /// // handshake.
/// let client_fut = Builder::new() /// let client_fut = Builder::new()
/// .handshake(my_io); /// .handshake(my_io);
@ -1048,7 +1085,7 @@ impl Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T)
/// # -> Result<((SendRequest<&'static [u8]>, Connection<T, &'static [u8]>)), h2::Error> /// # -> Result<((SendRequest<&'static [u8]>, Connection<T, &'static [u8]>)), h2::Error>
/// # { /// # {
/// // `client_fut` is a future representing the completion of the HTTP/2.0 /// // `client_fut` is a future representing the completion of the HTTP/2
/// // handshake. /// // handshake.
/// let client_fut = Builder::new() /// let client_fut = Builder::new()
/// .handshake::<_, &'static [u8]>(my_io); /// .handshake::<_, &'static [u8]>(my_io);
@ -1075,19 +1112,19 @@ impl Default for Builder {
} }
} }
/// Creates a new configured HTTP/2.0 client with default configuration /// Creates a new configured HTTP/2 client with default configuration
/// values backed by `io`. /// values backed by `io`.
/// ///
/// It is expected that `io` already be in an appropriate state to commence /// It is expected that `io` already be in an appropriate state to commence
/// the [HTTP/2.0 handshake]. See [Handshake] for more details. /// the [HTTP/2 handshake]. See [Handshake] for more details.
/// ///
/// Returns a future which resolves to the [`Connection`] / [`SendRequest`] /// Returns a future which resolves to the [`Connection`] / [`SendRequest`]
/// tuple once the HTTP/2.0 handshake has been completed. The returned /// tuple once the HTTP/2 handshake has been completed. The returned
/// [`Connection`] instance will be using default configuration values. Use /// [`Connection`] instance will be using default configuration values. Use
/// [`Builder`] to customize the configuration values used by a [`Connection`] /// [`Builder`] to customize the configuration values used by a [`Connection`]
/// instance. /// instance.
/// ///
/// [HTTP/2.0 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader /// [HTTP/2 handshake]: http://httpwg.org/specs/rfc7540.html#ConnectionHeader
/// [Handshake]: ../index.html#handshake /// [Handshake]: ../index.html#handshake
/// [`Connection`]: struct.Connection.html /// [`Connection`]: struct.Connection.html
/// [`SendRequest`]: struct.SendRequest.html /// [`SendRequest`]: struct.SendRequest.html
@ -1102,7 +1139,7 @@ impl Default for Builder {
/// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) -> Result<(), h2::Error> /// # async fn doc<T: AsyncRead + AsyncWrite + Unpin>(my_io: T) -> Result<(), h2::Error>
/// # { /// # {
/// let (send_request, connection) = client::handshake(my_io).await?; /// let (send_request, connection) = client::handshake(my_io).await?;
/// // The HTTP/2.0 handshake has completed, now start polling /// // The HTTP/2 handshake has completed, now start polling
/// // `connection` and use `send_request` to send requests to the /// // `connection` and use `send_request` to send requests to the
/// // server. /// // server.
/// # Ok(()) /// # Ok(())
@ -1115,11 +1152,28 @@ where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
{ {
let builder = Builder::new(); let builder = Builder::new();
builder.handshake(io).await builder
.handshake(io)
.instrument(tracing::trace_span!("client_handshake"))
.await
} }
// ===== impl Connection ===== // ===== impl Connection =====
async fn bind_connection<T>(io: &mut T) -> Result<(), crate::Error>
where
T: AsyncRead + AsyncWrite + Unpin,
{
tracing::debug!("binding client connection");
let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
io.write_all(msg).await.map_err(crate::Error::from_io)?;
tracing::debug!("client connection bound");
Ok(())
}
impl<T, B> Connection<T, B> impl<T, B> Connection<T, B>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
@ -1129,12 +1183,7 @@ where
mut io: T, mut io: T,
builder: Builder, builder: Builder,
) -> Result<(SendRequest<B>, Connection<T, B>), crate::Error> { ) -> Result<(SendRequest<B>, Connection<T, B>), crate::Error> {
log::debug!("binding client connection"); bind_connection(&mut io).await?;
let msg: &'static [u8] = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n";
io.write_all(msg).await.map_err(crate::Error::from_io)?;
log::debug!("client connection bound");
// Create the codec // Create the codec
let mut codec = Codec::new(io); let mut codec = Codec::new(io);
@ -1157,6 +1206,7 @@ where
proto::Config { proto::Config {
next_stream_id: builder.stream_id, next_stream_id: builder.stream_id,
initial_max_send_streams: builder.initial_max_send_streams, initial_max_send_streams: builder.initial_max_send_streams,
max_send_buffer_size: builder.max_send_buffer_size,
reset_stream_duration: builder.reset_stream_duration, reset_stream_duration: builder.reset_stream_duration,
reset_stream_max: builder.reset_stream_max, reset_stream_max: builder.reset_stream_max,
settings: builder.settings.clone(), settings: builder.settings.clone(),
@ -1224,6 +1274,33 @@ where
pub fn ping_pong(&mut self) -> Option<PingPong> { pub fn ping_pong(&mut self) -> Option<PingPong> {
self.inner.take_user_pings().map(PingPong::new) self.inner.take_user_pings().map(PingPong::new)
} }
/// Returns the maximum number of concurrent streams that may be initiated
/// by this client.
///
/// This limit is configured by the server peer by sending the
/// [`SETTINGS_MAX_CONCURRENT_STREAMS` parameter][1] in a `SETTINGS` frame.
/// This method returns the currently acknowledged value recieved from the
/// remote.
///
/// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2
pub fn max_concurrent_send_streams(&self) -> usize {
self.inner.max_send_streams()
}
/// Returns the maximum number of concurrent streams that may be initiated
/// by the server on this connection.
///
/// This returns the value of the [`SETTINGS_MAX_CONCURRENT_STREAMS`
/// parameter][1] sent in a `SETTINGS` frame that has been
/// acknowledged by the remote peer. The value to be sent is configured by
/// the [`Builder::max_concurrent_streams`][2] method before handshaking
/// with the remote peer.
///
/// [1]: https://tools.ietf.org/html/rfc7540#section-5.1.2
/// [2]: ../struct.Builder.html#method.max_concurrent_streams
pub fn max_concurrent_recv_streams(&self) -> usize {
self.inner.max_recv_streams()
}
} }
impl<T, B> Future for Connection<T, B> impl<T, B> Future for Connection<T, B>
@ -1375,6 +1452,7 @@ impl Peer {
pub fn convert_send_message( pub fn convert_send_message(
id: StreamId, id: StreamId,
request: Request<()>, request: Request<()>,
protocol: Option<Protocol>,
end_of_stream: bool, end_of_stream: bool,
) -> Result<Headers, SendError> { ) -> Result<Headers, SendError> {
use http::request::Parts; use http::request::Parts;
@ -1394,7 +1472,7 @@ impl Peer {
// Build the set pseudo header set. All requests will include `method` // Build the set pseudo header set. All requests will include `method`
// and `path`. // and `path`.
let mut pseudo = Pseudo::request(method, uri); let mut pseudo = Pseudo::request(method, uri, protocol);
if pseudo.scheme.is_none() { if pseudo.scheme.is_none() {
// If the scheme is not set, then there are a two options. // If the scheme is not set, then there are a two options.
@ -1414,7 +1492,7 @@ impl Peer {
return Err(UserError::MissingUriSchemeAndAuthority.into()); return Err(UserError::MissingUriSchemeAndAuthority.into());
} else { } else {
// This is acceptable as per the above comment. However, // This is acceptable as per the above comment. However,
// HTTP/2.0 requires that a scheme is set. Since we are // HTTP/2 requires that a scheme is set. Since we are
// forwarding an HTTP 1.1 request, the scheme is set to // forwarding an HTTP 1.1 request, the scheme is set to
// "http". // "http".
pseudo.set_scheme(uri::Scheme::HTTP); pseudo.set_scheme(uri::Scheme::HTTP);
@ -1438,6 +1516,8 @@ impl Peer {
impl proto::Peer for Peer { impl proto::Peer for Peer {
type Poll = Response<()>; type Poll = Response<()>;
const NAME: &'static str = "Client";
fn r#dyn() -> proto::DynPeer { fn r#dyn() -> proto::DynPeer {
proto::DynPeer::Client proto::DynPeer::Client
} }
@ -1450,7 +1530,7 @@ impl proto::Peer for Peer {
pseudo: Pseudo, pseudo: Pseudo,
fields: HeaderMap, fields: HeaderMap,
stream_id: StreamId, stream_id: StreamId,
) -> Result<Self::Poll, RecvError> { ) -> Result<Self::Poll, Error> {
let mut b = Response::builder(); let mut b = Response::builder();
b = b.version(Version::HTTP_2); b = b.version(Version::HTTP_2);
@ -1464,10 +1544,7 @@ impl proto::Peer for Peer {
Err(_) => { Err(_) => {
// TODO: Should there be more specialized handling for different // TODO: Should there be more specialized handling for different
// kinds of errors // kinds of errors
return Err(RecvError::Stream { return Err(Error::library_reset(stream_id, Reason::PROTOCOL_ERROR));
id: stream_id,
reason: Reason::PROTOCOL_ERROR,
});
} }
}; };

55
third_party/rust/h2/src/codec/error.rs поставляемый
Просмотреть файл

@ -1,26 +1,12 @@
use crate::frame::{Reason, StreamId}; use crate::proto::Error;
use std::{error, fmt, io}; use std::{error, fmt, io};
/// Errors that are received
#[derive(Debug)]
pub enum RecvError {
Connection(Reason),
Stream { id: StreamId, reason: Reason },
Io(io::Error),
}
/// Errors caused by sending a message /// Errors caused by sending a message
#[derive(Debug)] #[derive(Debug)]
pub enum SendError { pub enum SendError {
/// User error Connection(Error),
User(UserError), User(UserError),
/// Connection error prevents sending.
Connection(Reason),
/// I/O error
Io(io::Error),
} }
/// Errors caused by users of the library /// Errors caused by users of the library
@ -35,9 +21,6 @@ pub enum UserError {
/// The payload size is too big /// The payload size is too big
PayloadTooBig, PayloadTooBig,
/// A header size is too big
HeaderTooBig,
/// The application attempted to initiate too many streams to remote. /// The application attempted to initiate too many streams to remote.
Rejected, Rejected,
@ -63,28 +46,9 @@ pub enum UserError {
/// Tries to update local SETTINGS while ACK has not been received. /// Tries to update local SETTINGS while ACK has not been received.
SendSettingsWhilePending, SendSettingsWhilePending,
}
// ===== impl RecvError ===== /// Tries to send push promise to peer who has disabled server push
PeerDisabledServerPush,
impl From<io::Error> for RecvError {
fn from(src: io::Error) -> Self {
RecvError::Io(src)
}
}
impl error::Error for RecvError {}
impl fmt::Display for RecvError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::RecvError::*;
match *self {
Connection(ref reason) => reason.fmt(fmt),
Stream { ref reason, .. } => reason.fmt(fmt),
Io(ref e) => e.fmt(fmt),
}
}
} }
// ===== impl SendError ===== // ===== impl SendError =====
@ -93,19 +57,16 @@ impl error::Error for SendError {}
impl fmt::Display for SendError { impl fmt::Display for SendError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::SendError::*;
match *self { match *self {
User(ref e) => e.fmt(fmt), Self::Connection(ref e) => e.fmt(fmt),
Connection(ref reason) => reason.fmt(fmt), Self::User(ref e) => e.fmt(fmt),
Io(ref e) => e.fmt(fmt),
} }
} }
} }
impl From<io::Error> for SendError { impl From<io::Error> for SendError {
fn from(src: io::Error) -> Self { fn from(src: io::Error) -> Self {
SendError::Io(src) Self::Connection(src.into())
} }
} }
@ -127,7 +88,6 @@ impl fmt::Display for UserError {
InactiveStreamId => "inactive stream", InactiveStreamId => "inactive stream",
UnexpectedFrameType => "unexpected frame type", UnexpectedFrameType => "unexpected frame type",
PayloadTooBig => "payload too big", PayloadTooBig => "payload too big",
HeaderTooBig => "header too big",
Rejected => "rejected", Rejected => "rejected",
ReleaseCapacityTooBig => "release capacity too big", ReleaseCapacityTooBig => "release capacity too big",
OverflowedStreamId => "stream ID overflowed", OverflowedStreamId => "stream ID overflowed",
@ -136,6 +96,7 @@ impl fmt::Display for UserError {
PollResetAfterSendResponse => "poll_reset after send_response is illegal", PollResetAfterSendResponse => "poll_reset after send_response is illegal",
SendPingWhilePending => "send_ping before received previous pong", SendPingWhilePending => "send_ping before received previous pong",
SendSettingsWhilePending => "sending SETTINGS before received previous ACK", SendSettingsWhilePending => "sending SETTINGS before received previous ACK",
PeerDisabledServerPush => "sending PUSH_PROMISE to peer who disabled server push",
}) })
} }
} }

501
third_party/rust/h2/src/codec/framed_read.rs поставляемый
Просмотреть файл

@ -1,8 +1,8 @@
use crate::codec::RecvError;
use crate::frame::{self, Frame, Kind, Reason}; use crate::frame::{self, Frame, Kind, Reason};
use crate::frame::{ use crate::frame::{
DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, MAX_MAX_FRAME_SIZE, DEFAULT_MAX_FRAME_SIZE, DEFAULT_SETTINGS_HEADER_TABLE_SIZE, MAX_MAX_FRAME_SIZE,
}; };
use crate::proto::Error;
use crate::hpack; use crate::hpack;
@ -59,247 +59,6 @@ impl<T> FramedRead<T> {
} }
} }
fn decode_frame(&mut self, mut bytes: BytesMut) -> Result<Option<Frame>, RecvError> {
use self::RecvError::*;
log::trace!("decoding frame from {}B", bytes.len());
// Parse the head
let head = frame::Head::parse(&bytes);
if self.partial.is_some() && head.kind() != Kind::Continuation {
proto_err!(conn: "expected CONTINUATION, got {:?}", head.kind());
return Err(Connection(Reason::PROTOCOL_ERROR));
}
let kind = head.kind();
log::trace!(" -> kind={:?}", kind);
macro_rules! header_block {
($frame:ident, $head:ident, $bytes:ident) => ({
// Drop the frame header
// TODO: Change to drain: carllerche/bytes#130
let _ = $bytes.split_to(frame::HEADER_LEN);
// Parse the header frame w/o parsing the payload
let (mut frame, mut payload) = match frame::$frame::load($head, $bytes) {
Ok(res) => res,
Err(frame::Error::InvalidDependencyId) => {
proto_err!(stream: "invalid HEADERS dependency ID");
// A stream cannot depend on itself. An endpoint MUST
// treat this as a stream error (Section 5.4.2) of type
// `PROTOCOL_ERROR`.
return Err(Stream {
id: $head.stream_id(),
reason: Reason::PROTOCOL_ERROR,
});
},
Err(e) => {
proto_err!(conn: "failed to load frame; err={:?}", e);
return Err(Connection(Reason::PROTOCOL_ERROR));
}
};
let is_end_headers = frame.is_end_headers();
// Load the HPACK encoded headers
match frame.load_hpack(&mut payload, self.max_header_list_size, &mut self.hpack) {
Ok(_) => {},
Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {},
Err(frame::Error::MalformedMessage) => {
let id = $head.stream_id();
proto_err!(stream: "malformed header block; stream={:?}", id);
return Err(Stream {
id,
reason: Reason::PROTOCOL_ERROR,
});
},
Err(e) => {
proto_err!(conn: "failed HPACK decoding; err={:?}", e);
return Err(Connection(Reason::PROTOCOL_ERROR));
}
}
if is_end_headers {
frame.into()
} else {
log::trace!("loaded partial header block");
// Defer returning the frame
self.partial = Some(Partial {
frame: Continuable::$frame(frame),
buf: payload,
});
return Ok(None);
}
});
}
let frame = match kind {
Kind::Settings => {
let res = frame::Settings::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load SETTINGS frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::Ping => {
let res = frame::Ping::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load PING frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::WindowUpdate => {
let res = frame::WindowUpdate::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::Data => {
let _ = bytes.split_to(frame::HEADER_LEN);
let res = frame::Data::load(head, bytes.freeze());
// TODO: Should this always be connection level? Probably not...
res.map_err(|e| {
proto_err!(conn: "failed to load DATA frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::Headers => header_block!(Headers, head, bytes),
Kind::Reset => {
let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load RESET frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::GoAway => {
let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", e);
Connection(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::PushPromise => header_block!(PushPromise, head, bytes),
Kind::Priority => {
if head.stream_id() == 0 {
// Invalid stream identifier
proto_err!(conn: "invalid stream ID 0");
return Err(Connection(Reason::PROTOCOL_ERROR));
}
match frame::Priority::load(head, &bytes[frame::HEADER_LEN..]) {
Ok(frame) => frame.into(),
Err(frame::Error::InvalidDependencyId) => {
// A stream cannot depend on itself. An endpoint MUST
// treat this as a stream error (Section 5.4.2) of type
// `PROTOCOL_ERROR`.
let id = head.stream_id();
proto_err!(stream: "PRIORITY invalid dependency ID; stream={:?}", id);
return Err(Stream {
id,
reason: Reason::PROTOCOL_ERROR,
});
}
Err(e) => {
proto_err!(conn: "failed to load PRIORITY frame; err={:?};", e);
return Err(Connection(Reason::PROTOCOL_ERROR));
}
}
}
Kind::Continuation => {
let is_end_headers = (head.flag() & 0x4) == 0x4;
let mut partial = match self.partial.take() {
Some(partial) => partial,
None => {
proto_err!(conn: "received unexpected CONTINUATION frame");
return Err(Connection(Reason::PROTOCOL_ERROR));
}
};
// The stream identifiers must match
if partial.frame.stream_id() != head.stream_id() {
proto_err!(conn: "CONTINUATION frame stream ID does not match previous frame stream ID");
return Err(Connection(Reason::PROTOCOL_ERROR));
}
// Extend the buf
if partial.buf.is_empty() {
partial.buf = bytes.split_off(frame::HEADER_LEN);
} else {
if partial.frame.is_over_size() {
// If there was left over bytes previously, they may be
// needed to continue decoding, even though we will
// be ignoring this frame. This is done to keep the HPACK
// decoder state up-to-date.
//
// Still, we need to be careful, because if a malicious
// attacker were to try to send a gigantic string, such
// that it fits over multiple header blocks, we could
// grow memory uncontrollably again, and that'd be a shame.
//
// Instead, we use a simple heuristic to determine if
// we should continue to ignore decoding, or to tell
// the attacker to go away.
if partial.buf.len() + bytes.len() > self.max_header_list_size {
proto_err!(conn: "CONTINUATION frame header block size over ignorable limit");
return Err(Connection(Reason::COMPRESSION_ERROR));
}
}
partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]);
}
match partial.frame.load_hpack(
&mut partial.buf,
self.max_header_list_size,
&mut self.hpack,
) {
Ok(_) => {}
Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_)))
if !is_end_headers => {}
Err(frame::Error::MalformedMessage) => {
let id = head.stream_id();
proto_err!(stream: "malformed CONTINUATION frame; stream={:?}", id);
return Err(Stream {
id,
reason: Reason::PROTOCOL_ERROR,
});
}
Err(e) => {
proto_err!(conn: "failed HPACK decoding; err={:?}", e);
return Err(Connection(Reason::PROTOCOL_ERROR));
}
}
if is_end_headers {
partial.frame.into()
} else {
self.partial = Some(partial);
return Ok(None);
}
}
Kind::Unknown => {
// Unknown frames are ignored
return Ok(None);
}
};
Ok(Some(frame))
}
pub fn get_ref(&self) -> &T { pub fn get_ref(&self) -> &T {
self.inner.get_ref() self.inner.get_ref()
} }
@ -331,35 +90,279 @@ impl<T> FramedRead<T> {
} }
} }
/// Decodes a frame.
///
/// This method is intentionally de-generified and outlined because it is very large.
fn decode_frame(
hpack: &mut hpack::Decoder,
max_header_list_size: usize,
partial_inout: &mut Option<Partial>,
mut bytes: BytesMut,
) -> Result<Option<Frame>, Error> {
let span = tracing::trace_span!("FramedRead::decode_frame", offset = bytes.len());
let _e = span.enter();
tracing::trace!("decoding frame from {}B", bytes.len());
// Parse the head
let head = frame::Head::parse(&bytes);
if partial_inout.is_some() && head.kind() != Kind::Continuation {
proto_err!(conn: "expected CONTINUATION, got {:?}", head.kind());
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into());
}
let kind = head.kind();
tracing::trace!(frame.kind = ?kind);
macro_rules! header_block {
($frame:ident, $head:ident, $bytes:ident) => ({
// Drop the frame header
// TODO: Change to drain: carllerche/bytes#130
let _ = $bytes.split_to(frame::HEADER_LEN);
// Parse the header frame w/o parsing the payload
let (mut frame, mut payload) = match frame::$frame::load($head, $bytes) {
Ok(res) => res,
Err(frame::Error::InvalidDependencyId) => {
proto_err!(stream: "invalid HEADERS dependency ID");
// A stream cannot depend on itself. An endpoint MUST
// treat this as a stream error (Section 5.4.2) of type
// `PROTOCOL_ERROR`.
return Err(Error::library_reset($head.stream_id(), Reason::PROTOCOL_ERROR));
},
Err(e) => {
proto_err!(conn: "failed to load frame; err={:?}", e);
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
}
};
let is_end_headers = frame.is_end_headers();
// Load the HPACK encoded headers
match frame.load_hpack(&mut payload, max_header_list_size, hpack) {
Ok(_) => {},
Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {},
Err(frame::Error::MalformedMessage) => {
let id = $head.stream_id();
proto_err!(stream: "malformed header block; stream={:?}", id);
return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR));
},
Err(e) => {
proto_err!(conn: "failed HPACK decoding; err={:?}", e);
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
}
}
if is_end_headers {
frame.into()
} else {
tracing::trace!("loaded partial header block");
// Defer returning the frame
*partial_inout = Some(Partial {
frame: Continuable::$frame(frame),
buf: payload,
});
return Ok(None);
}
});
}
let frame = match kind {
Kind::Settings => {
let res = frame::Settings::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load SETTINGS frame; err={:?}", e);
Error::library_go_away(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::Ping => {
let res = frame::Ping::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load PING frame; err={:?}", e);
Error::library_go_away(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::WindowUpdate => {
let res = frame::WindowUpdate::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load WINDOW_UPDATE frame; err={:?}", e);
Error::library_go_away(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::Data => {
let _ = bytes.split_to(frame::HEADER_LEN);
let res = frame::Data::load(head, bytes.freeze());
// TODO: Should this always be connection level? Probably not...
res.map_err(|e| {
proto_err!(conn: "failed to load DATA frame; err={:?}", e);
Error::library_go_away(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::Headers => header_block!(Headers, head, bytes),
Kind::Reset => {
let res = frame::Reset::load(head, &bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load RESET frame; err={:?}", e);
Error::library_go_away(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::GoAway => {
let res = frame::GoAway::load(&bytes[frame::HEADER_LEN..]);
res.map_err(|e| {
proto_err!(conn: "failed to load GO_AWAY frame; err={:?}", e);
Error::library_go_away(Reason::PROTOCOL_ERROR)
})?
.into()
}
Kind::PushPromise => header_block!(PushPromise, head, bytes),
Kind::Priority => {
if head.stream_id() == 0 {
// Invalid stream identifier
proto_err!(conn: "invalid stream ID 0");
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into());
}
match frame::Priority::load(head, &bytes[frame::HEADER_LEN..]) {
Ok(frame) => frame.into(),
Err(frame::Error::InvalidDependencyId) => {
// A stream cannot depend on itself. An endpoint MUST
// treat this as a stream error (Section 5.4.2) of type
// `PROTOCOL_ERROR`.
let id = head.stream_id();
proto_err!(stream: "PRIORITY invalid dependency ID; stream={:?}", id);
return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR));
}
Err(e) => {
proto_err!(conn: "failed to load PRIORITY frame; err={:?};", e);
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
}
}
}
Kind::Continuation => {
let is_end_headers = (head.flag() & 0x4) == 0x4;
let mut partial = match partial_inout.take() {
Some(partial) => partial,
None => {
proto_err!(conn: "received unexpected CONTINUATION frame");
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into());
}
};
// The stream identifiers must match
if partial.frame.stream_id() != head.stream_id() {
proto_err!(conn: "CONTINUATION frame stream ID does not match previous frame stream ID");
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR).into());
}
// Extend the buf
if partial.buf.is_empty() {
partial.buf = bytes.split_off(frame::HEADER_LEN);
} else {
if partial.frame.is_over_size() {
// If there was left over bytes previously, they may be
// needed to continue decoding, even though we will
// be ignoring this frame. This is done to keep the HPACK
// decoder state up-to-date.
//
// Still, we need to be careful, because if a malicious
// attacker were to try to send a gigantic string, such
// that it fits over multiple header blocks, we could
// grow memory uncontrollably again, and that'd be a shame.
//
// Instead, we use a simple heuristic to determine if
// we should continue to ignore decoding, or to tell
// the attacker to go away.
if partial.buf.len() + bytes.len() > max_header_list_size {
proto_err!(conn: "CONTINUATION frame header block size over ignorable limit");
return Err(Error::library_go_away(Reason::COMPRESSION_ERROR).into());
}
}
partial.buf.extend_from_slice(&bytes[frame::HEADER_LEN..]);
}
match partial
.frame
.load_hpack(&mut partial.buf, max_header_list_size, hpack)
{
Ok(_) => {}
Err(frame::Error::Hpack(hpack::DecoderError::NeedMore(_))) if !is_end_headers => {}
Err(frame::Error::MalformedMessage) => {
let id = head.stream_id();
proto_err!(stream: "malformed CONTINUATION frame; stream={:?}", id);
return Err(Error::library_reset(id, Reason::PROTOCOL_ERROR));
}
Err(e) => {
proto_err!(conn: "failed HPACK decoding; err={:?}", e);
return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
}
}
if is_end_headers {
partial.frame.into()
} else {
*partial_inout = Some(partial);
return Ok(None);
}
}
Kind::Unknown => {
// Unknown frames are ignored
return Ok(None);
}
};
Ok(Some(frame))
}
impl<T> Stream for FramedRead<T> impl<T> Stream for FramedRead<T>
where where
T: AsyncRead + Unpin, T: AsyncRead + Unpin,
{ {
type Item = Result<Frame, RecvError>; type Item = Result<Frame, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let span = tracing::trace_span!("FramedRead::poll_next");
let _e = span.enter();
loop { loop {
log::trace!("poll"); tracing::trace!("poll");
let bytes = match ready!(Pin::new(&mut self.inner).poll_next(cx)) { let bytes = match ready!(Pin::new(&mut self.inner).poll_next(cx)) {
Some(Ok(bytes)) => bytes, Some(Ok(bytes)) => bytes,
Some(Err(e)) => return Poll::Ready(Some(Err(map_err(e)))), Some(Err(e)) => return Poll::Ready(Some(Err(map_err(e)))),
None => return Poll::Ready(None), None => return Poll::Ready(None),
}; };
log::trace!("poll; bytes={}B", bytes.len()); tracing::trace!(read.bytes = bytes.len());
if let Some(frame) = self.decode_frame(bytes)? { let Self {
log::debug!("received; frame={:?}", frame); ref mut hpack,
max_header_list_size,
ref mut partial,
..
} = *self;
if let Some(frame) = decode_frame(hpack, max_header_list_size, partial, bytes)? {
tracing::debug!(?frame, "received");
return Poll::Ready(Some(Ok(frame))); return Poll::Ready(Some(Ok(frame)));
} }
} }
} }
} }
fn map_err(err: io::Error) -> RecvError { fn map_err(err: io::Error) -> Error {
if let io::ErrorKind::InvalidData = err.kind() { if let io::ErrorKind::InvalidData = err.kind() {
if let Some(custom) = err.get_ref() { if let Some(custom) = err.get_ref() {
if custom.is::<LengthDelimitedCodecError>() { if custom.is::<LengthDelimitedCodecError>() {
return RecvError::Connection(Reason::FRAME_SIZE_ERROR); return Error::library_go_away(Reason::FRAME_SIZE_ERROR);
} }
} }
} }

261
third_party/rust/h2/src/codec/framed_write.rs поставляемый
Просмотреть файл

@ -3,15 +3,12 @@ use crate::codec::UserError::*;
use crate::frame::{self, Frame, FrameSize}; use crate::frame::{self, Frame, FrameSize};
use crate::hpack; use crate::hpack;
use bytes::{ use bytes::{Buf, BufMut, BytesMut};
buf::{BufExt, BufMutExt},
Buf, BufMut, BytesMut,
};
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use tokio::io::{AsyncRead, AsyncWrite}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use std::io::{self, Cursor}; use std::io::{self, Cursor, IoSlice};
// A macro to get around a method needing to borrow &mut self // A macro to get around a method needing to borrow &mut self
macro_rules! limited_write_buf { macro_rules! limited_write_buf {
@ -26,6 +23,11 @@ pub struct FramedWrite<T, B> {
/// Upstream `AsyncWrite` /// Upstream `AsyncWrite`
inner: T, inner: T,
encoder: Encoder<B>,
}
#[derive(Debug)]
struct Encoder<B> {
/// HPACK encoder /// HPACK encoder
hpack: hpack::Encoder, hpack: hpack::Encoder,
@ -42,6 +44,9 @@ pub struct FramedWrite<T, B> {
/// Max frame size, this is specified by the peer /// Max frame size, this is specified by the peer
max_frame_size: FrameSize, max_frame_size: FrameSize,
/// Whether or not the wrapped `AsyncWrite` supports vectored IO.
is_write_vectored: bool,
} }
#[derive(Debug)] #[derive(Debug)]
@ -50,7 +55,7 @@ enum Next<B> {
Continuation(frame::Continuation), Continuation(frame::Continuation),
} }
/// Initialze the connection with this amount of write buffer. /// Initialize the connection with this amount of write buffer.
/// ///
/// The minimum MAX_FRAME_SIZE is 16kb, so always be able to send a HEADERS /// The minimum MAX_FRAME_SIZE is 16kb, so always be able to send a HEADERS
/// frame that big. /// frame that big.
@ -71,13 +76,17 @@ where
B: Buf, B: Buf,
{ {
pub fn new(inner: T) -> FramedWrite<T, B> { pub fn new(inner: T) -> FramedWrite<T, B> {
let is_write_vectored = inner.is_write_vectored();
FramedWrite { FramedWrite {
inner, inner,
hpack: hpack::Encoder::default(), encoder: Encoder {
buf: Cursor::new(BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY)), hpack: hpack::Encoder::default(),
next: None, buf: Cursor::new(BytesMut::with_capacity(DEFAULT_BUFFER_CAPACITY)),
last_data_frame: None, next: None,
max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE, last_data_frame: None,
max_frame_size: frame::DEFAULT_MAX_FRAME_SIZE,
is_write_vectored,
},
} }
} }
@ -86,11 +95,11 @@ where
/// Calling this function may result in the current contents of the buffer /// Calling this function may result in the current contents of the buffer
/// to be flushed to `T`. /// to be flushed to `T`.
pub fn poll_ready(&mut self, cx: &mut Context) -> Poll<io::Result<()>> { pub fn poll_ready(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
if !self.has_capacity() { if !self.encoder.has_capacity() {
// Try flushing // Try flushing
ready!(self.flush(cx))?; ready!(self.flush(cx))?;
if !self.has_capacity() { if !self.encoder.has_capacity() {
return Poll::Pending; return Poll::Pending;
} }
} }
@ -103,10 +112,124 @@ where
/// `poll_ready` must be called first to ensure that a frame may be /// `poll_ready` must be called first to ensure that a frame may be
/// accepted. /// accepted.
pub fn buffer(&mut self, item: Frame<B>) -> Result<(), UserError> { pub fn buffer(&mut self, item: Frame<B>) -> Result<(), UserError> {
self.encoder.buffer(item)
}
/// Flush buffered data to the wire
pub fn flush(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
let span = tracing::trace_span!("FramedWrite::flush");
let _e = span.enter();
loop {
while !self.encoder.is_empty() {
match self.encoder.next {
Some(Next::Data(ref mut frame)) => {
tracing::trace!(queued_data_frame = true);
let mut buf = (&mut self.encoder.buf).chain(frame.payload_mut());
ready!(write(
&mut self.inner,
self.encoder.is_write_vectored,
&mut buf,
cx,
))?
}
_ => {
tracing::trace!(queued_data_frame = false);
ready!(write(
&mut self.inner,
self.encoder.is_write_vectored,
&mut self.encoder.buf,
cx,
))?
}
}
}
match self.encoder.unset_frame() {
ControlFlow::Continue => (),
ControlFlow::Break => break,
}
}
tracing::trace!("flushing buffer");
// Flush the upstream
ready!(Pin::new(&mut self.inner).poll_flush(cx))?;
Poll::Ready(Ok(()))
}
/// Close the codec
pub fn shutdown(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
ready!(self.flush(cx))?;
Pin::new(&mut self.inner).poll_shutdown(cx)
}
}
fn write<T, B>(
writer: &mut T,
is_write_vectored: bool,
buf: &mut B,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>>
where
T: AsyncWrite + Unpin,
B: Buf,
{
// TODO(eliza): when tokio-util 0.5.1 is released, this
// could just use `poll_write_buf`...
const MAX_IOVS: usize = 64;
let n = if is_write_vectored {
let mut bufs = [IoSlice::new(&[]); MAX_IOVS];
let cnt = buf.chunks_vectored(&mut bufs);
ready!(Pin::new(writer).poll_write_vectored(cx, &bufs[..cnt]))?
} else {
ready!(Pin::new(writer).poll_write(cx, buf.chunk()))?
};
buf.advance(n);
Ok(()).into()
}
#[must_use]
enum ControlFlow {
Continue,
Break,
}
impl<B> Encoder<B>
where
B: Buf,
{
fn unset_frame(&mut self) -> ControlFlow {
// Clear internal buffer
self.buf.set_position(0);
self.buf.get_mut().clear();
// The data frame has been written, so unset it
match self.next.take() {
Some(Next::Data(frame)) => {
self.last_data_frame = Some(frame);
debug_assert!(self.is_empty());
ControlFlow::Break
}
Some(Next::Continuation(frame)) => {
// Buffer the continuation frame, then try to write again
let mut buf = limited_write_buf!(self);
if let Some(continuation) = frame.encode(&mut buf) {
self.next = Some(Next::Continuation(continuation));
}
ControlFlow::Continue
}
None => ControlFlow::Break,
}
}
fn buffer(&mut self, item: Frame<B>) -> Result<(), UserError> {
// Ensure that we have enough capacity to accept the write. // Ensure that we have enough capacity to accept the write.
assert!(self.has_capacity()); assert!(self.has_capacity());
let span = tracing::trace_span!("FramedWrite::buffer", frame = ?item);
let _e = span.enter();
log::debug!("send; frame={:?}", item); tracing::debug!(frame = ?item, "send");
match item { match item {
Frame::Data(mut v) => { Frame::Data(mut v) => {
@ -150,103 +273,37 @@ where
} }
Frame::Settings(v) => { Frame::Settings(v) => {
v.encode(self.buf.get_mut()); v.encode(self.buf.get_mut());
log::trace!("encoded settings; rem={:?}", self.buf.remaining()); tracing::trace!(rem = self.buf.remaining(), "encoded settings");
} }
Frame::GoAway(v) => { Frame::GoAway(v) => {
v.encode(self.buf.get_mut()); v.encode(self.buf.get_mut());
log::trace!("encoded go_away; rem={:?}", self.buf.remaining()); tracing::trace!(rem = self.buf.remaining(), "encoded go_away");
} }
Frame::Ping(v) => { Frame::Ping(v) => {
v.encode(self.buf.get_mut()); v.encode(self.buf.get_mut());
log::trace!("encoded ping; rem={:?}", self.buf.remaining()); tracing::trace!(rem = self.buf.remaining(), "encoded ping");
} }
Frame::WindowUpdate(v) => { Frame::WindowUpdate(v) => {
v.encode(self.buf.get_mut()); v.encode(self.buf.get_mut());
log::trace!("encoded window_update; rem={:?}", self.buf.remaining()); tracing::trace!(rem = self.buf.remaining(), "encoded window_update");
} }
Frame::Priority(_) => { Frame::Priority(_) => {
/* /*
v.encode(self.buf.get_mut()); v.encode(self.buf.get_mut());
log::trace!("encoded priority; rem={:?}", self.buf.remaining()); tracing::trace!("encoded priority; rem={:?}", self.buf.remaining());
*/ */
unimplemented!(); unimplemented!();
} }
Frame::Reset(v) => { Frame::Reset(v) => {
v.encode(self.buf.get_mut()); v.encode(self.buf.get_mut());
log::trace!("encoded reset; rem={:?}", self.buf.remaining()); tracing::trace!(rem = self.buf.remaining(), "encoded reset");
} }
} }
Ok(()) Ok(())
} }
/// Flush buffered data to the wire
pub fn flush(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
log::trace!("flush");
loop {
while !self.is_empty() {
match self.next {
Some(Next::Data(ref mut frame)) => {
log::trace!(" -> queued data frame");
let mut buf = (&mut self.buf).chain(frame.payload_mut());
ready!(Pin::new(&mut self.inner).poll_write_buf(cx, &mut buf))?;
}
_ => {
log::trace!(" -> not a queued data frame");
ready!(Pin::new(&mut self.inner).poll_write_buf(cx, &mut self.buf))?;
}
}
}
// Clear internal buffer
self.buf.set_position(0);
self.buf.get_mut().clear();
// The data frame has been written, so unset it
match self.next.take() {
Some(Next::Data(frame)) => {
self.last_data_frame = Some(frame);
debug_assert!(self.is_empty());
break;
}
Some(Next::Continuation(frame)) => {
// Buffer the continuation frame, then try to write again
let mut buf = limited_write_buf!(self);
if let Some(continuation) = frame.encode(&mut self.hpack, &mut buf) {
// We previously had a CONTINUATION, and after encoding
// it, we got *another* one? Let's just double check
// that at least some progress is being made...
if self.buf.get_ref().len() == frame::HEADER_LEN {
// If *only* the CONTINUATION frame header was
// written, and *no* header fields, we're stuck
// in a loop...
panic!("CONTINUATION frame write loop; header value too big to encode");
}
self.next = Some(Next::Continuation(continuation));
}
}
None => {
break;
}
}
}
log::trace!("flushing buffer");
// Flush the upstream
ready!(Pin::new(&mut self.inner).poll_flush(cx))?;
Poll::Ready(Ok(()))
}
/// Close the codec
pub fn shutdown(&mut self, cx: &mut Context) -> Poll<io::Result<()>> {
ready!(self.flush(cx))?;
Pin::new(&mut self.inner).poll_shutdown(cx)
}
fn has_capacity(&self) -> bool { fn has_capacity(&self) -> bool {
self.next.is_none() && self.buf.get_ref().remaining_mut() >= MIN_BUFFER_CAPACITY self.next.is_none() && self.buf.get_ref().remaining_mut() >= MIN_BUFFER_CAPACITY
} }
@ -259,26 +316,32 @@ where
} }
} }
impl<B> Encoder<B> {
fn max_frame_size(&self) -> usize {
self.max_frame_size as usize
}
}
impl<T, B> FramedWrite<T, B> { impl<T, B> FramedWrite<T, B> {
/// Returns the max frame size that can be sent /// Returns the max frame size that can be sent
pub fn max_frame_size(&self) -> usize { pub fn max_frame_size(&self) -> usize {
self.max_frame_size as usize self.encoder.max_frame_size()
} }
/// Set the peer's max frame size. /// Set the peer's max frame size.
pub fn set_max_frame_size(&mut self, val: usize) { pub fn set_max_frame_size(&mut self, val: usize) {
assert!(val <= frame::MAX_MAX_FRAME_SIZE as usize); assert!(val <= frame::MAX_MAX_FRAME_SIZE as usize);
self.max_frame_size = val as FrameSize; self.encoder.max_frame_size = val as FrameSize;
} }
/// Set the peer's header table size. /// Set the peer's header table size.
pub fn set_header_table_size(&mut self, val: usize) { pub fn set_header_table_size(&mut self, val: usize) {
self.hpack.update_max_size(val); self.encoder.hpack.update_max_size(val);
} }
/// Retrieve the last data frame that has been sent /// Retrieve the last data frame that has been sent
pub fn take_last_data_frame(&mut self) -> Option<frame::Data<B>> { pub fn take_last_data_frame(&mut self) -> Option<frame::Data<B>> {
self.last_data_frame.take() self.encoder.last_data_frame.take()
} }
pub fn get_mut(&mut self) -> &mut T { pub fn get_mut(&mut self) -> &mut T {
@ -287,25 +350,13 @@ impl<T, B> FramedWrite<T, B> {
} }
impl<T: AsyncRead + Unpin, B> AsyncRead for FramedWrite<T, B> { impl<T: AsyncRead + Unpin, B> AsyncRead for FramedWrite<T, B> {
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [std::mem::MaybeUninit<u8>]) -> bool {
self.inner.prepare_uninitialized_buffer(buf)
}
fn poll_read( fn poll_read(
mut self: Pin<&mut Self>, mut self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
buf: &mut [u8], buf: &mut ReadBuf,
) -> Poll<io::Result<usize>> { ) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_read(cx, buf) Pin::new(&mut self.inner).poll_read(cx, buf)
} }
fn poll_read_buf<Buf: BufMut>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut Buf,
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.inner).poll_read_buf(cx, buf)
}
} }
// We never project the Pin to `B`. // We never project the Pin to `B`.

5
third_party/rust/h2/src/codec/mod.rs поставляемый
Просмотреть файл

@ -2,12 +2,13 @@ mod error;
mod framed_read; mod framed_read;
mod framed_write; mod framed_write;
pub use self::error::{RecvError, SendError, UserError}; pub use self::error::{SendError, UserError};
use self::framed_read::FramedRead; use self::framed_read::FramedRead;
use self::framed_write::FramedWrite; use self::framed_write::FramedWrite;
use crate::frame::{self, Data, Frame}; use crate::frame::{self, Data, Frame};
use crate::proto::Error;
use bytes::Buf; use bytes::Buf;
use futures_core::Stream; use futures_core::Stream;
@ -155,7 +156,7 @@ impl<T, B> Stream for Codec<T, B>
where where
T: AsyncRead + Unpin, T: AsyncRead + Unpin,
{ {
type Item = Result<Frame, RecvError>; type Item = Result<Frame, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> { fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
Pin::new(&mut self.inner).poll_next(cx) Pin::new(&mut self.inner).poll_next(cx)

101
third_party/rust/h2/src/error.rs поставляемый
Просмотреть файл

@ -1,11 +1,13 @@
use crate::codec::{SendError, UserError}; use crate::codec::{SendError, UserError};
use crate::proto; use crate::frame::StreamId;
use crate::proto::{self, Initiator};
use bytes::Bytes;
use std::{error, fmt, io}; use std::{error, fmt, io};
pub use crate::frame::Reason; pub use crate::frame::Reason;
/// Represents HTTP/2.0 operation errors. /// Represents HTTP/2 operation errors.
/// ///
/// `Error` covers error cases raised by protocol errors caused by the /// `Error` covers error cases raised by protocol errors caused by the
/// peer, I/O (transport) errors, and errors caused by the user of the library. /// peer, I/O (transport) errors, and errors caused by the user of the library.
@ -22,11 +24,14 @@ pub struct Error {
#[derive(Debug)] #[derive(Debug)]
enum Kind { enum Kind {
/// An error caused by an action taken by the remote peer. /// A RST_STREAM frame was received or sent.
/// Reset(StreamId, Reason, Initiator),
/// This is either an error received by the peer or caused by an invalid
/// action taken by the peer (i.e. a protocol error). /// A GO_AWAY frame was received or sent.
Proto(Reason), GoAway(Bytes, Reason, Initiator),
/// The user created an error from a bare Reason.
Reason(Reason),
/// An error resulting from an invalid action taken by the user of this /// An error resulting from an invalid action taken by the user of this
/// library. /// library.
@ -45,12 +50,14 @@ impl Error {
/// action taken by the peer (i.e. a protocol error). /// action taken by the peer (i.e. a protocol error).
pub fn reason(&self) -> Option<Reason> { pub fn reason(&self) -> Option<Reason> {
match self.kind { match self.kind {
Kind::Proto(reason) => Some(reason), Kind::Reset(_, reason, _) | Kind::GoAway(_, reason, _) | Kind::Reason(reason) => {
Some(reason)
}
_ => None, _ => None,
} }
} }
/// Returns the true if the error is an io::Error /// Returns true if the error is an io::Error
pub fn is_io(&self) -> bool { pub fn is_io(&self) -> bool {
match self.kind { match self.kind {
Kind::Io(_) => true, Kind::Io(_) => true,
@ -79,6 +86,21 @@ impl Error {
kind: Kind::Io(err), kind: Kind::Io(err),
} }
} }
/// Returns true if the error is from a `GOAWAY`.
pub fn is_go_away(&self) -> bool {
matches!(self.kind, Kind::GoAway(..))
}
/// Returns true if the error was received in a frame from the remote.
///
/// Such as from a received `RST_STREAM` or `GOAWAY` frame.
pub fn is_remote(&self) -> bool {
matches!(
self.kind,
Kind::GoAway(_, _, Initiator::Remote) | Kind::Reset(_, _, Initiator::Remote)
)
}
} }
impl From<proto::Error> for Error { impl From<proto::Error> for Error {
@ -87,8 +109,13 @@ impl From<proto::Error> for Error {
Error { Error {
kind: match src { kind: match src {
Proto(reason) => Kind::Proto(reason), Reset(stream_id, reason, initiator) => Kind::Reset(stream_id, reason, initiator),
Io(e) => Kind::Io(e), GoAway(debug_data, reason, initiator) => {
Kind::GoAway(debug_data, reason, initiator)
}
Io(kind, inner) => {
Kind::Io(inner.map_or_else(|| kind.into(), |inner| io::Error::new(kind, inner)))
}
}, },
} }
} }
@ -97,7 +124,7 @@ impl From<proto::Error> for Error {
impl From<Reason> for Error { impl From<Reason> for Error {
fn from(src: Reason) -> Error { fn from(src: Reason) -> Error {
Error { Error {
kind: Kind::Proto(src), kind: Kind::Reason(src),
} }
} }
} }
@ -106,8 +133,7 @@ impl From<SendError> for Error {
fn from(src: SendError) -> Error { fn from(src: SendError) -> Error {
match src { match src {
SendError::User(e) => e.into(), SendError::User(e) => e.into(),
SendError::Connection(reason) => reason.into(), SendError::Connection(e) => e.into(),
SendError::Io(e) => Error::from_io(e),
} }
} }
} }
@ -122,14 +148,51 @@ impl From<UserError> for Error {
impl fmt::Display for Error { impl fmt::Display for Error {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
use self::Kind::*; let debug_data = match self.kind {
Kind::Reset(_, reason, Initiator::User) => {
return write!(fmt, "stream error sent by user: {}", reason)
}
Kind::Reset(_, reason, Initiator::Library) => {
return write!(fmt, "stream error detected: {}", reason)
}
Kind::Reset(_, reason, Initiator::Remote) => {
return write!(fmt, "stream error received: {}", reason)
}
Kind::GoAway(ref debug_data, reason, Initiator::User) => {
write!(fmt, "connection error sent by user: {}", reason)?;
debug_data
}
Kind::GoAway(ref debug_data, reason, Initiator::Library) => {
write!(fmt, "connection error detected: {}", reason)?;
debug_data
}
Kind::GoAway(ref debug_data, reason, Initiator::Remote) => {
write!(fmt, "connection error received: {}", reason)?;
debug_data
}
Kind::Reason(reason) => return write!(fmt, "protocol error: {}", reason),
Kind::User(ref e) => return write!(fmt, "user error: {}", e),
Kind::Io(ref e) => return e.fmt(fmt),
};
match self.kind { if !debug_data.is_empty() {
Proto(ref reason) => write!(fmt, "protocol error: {}", reason), write!(fmt, " ({:?})", debug_data)?;
User(ref e) => write!(fmt, "user error: {}", e),
Io(ref e) => fmt::Display::fmt(e, fmt),
} }
Ok(())
} }
} }
impl error::Error for Error {} impl error::Error for Error {}
#[cfg(test)]
mod tests {
use super::Error;
use crate::Reason;
#[test]
fn error_from_reason() {
let err = Error::from(Reason::HTTP_1_1_REQUIRED);
assert_eq!(err.reason(), Some(Reason::HTTP_1_1_REQUIRED));
}
}

55
third_party/rust/h2/src/ext.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,55 @@
//! Extensions specific to the HTTP/2 protocol.
use crate::hpack::BytesStr;
use bytes::Bytes;
use std::fmt;
/// Represents the `:protocol` pseudo-header used by
/// the [Extended CONNECT Protocol].
///
/// [Extended CONNECT Protocol]: https://datatracker.ietf.org/doc/html/rfc8441#section-4
#[derive(Clone, Eq, PartialEq)]
pub struct Protocol {
value: BytesStr,
}
impl Protocol {
/// Converts a static string to a protocol name.
pub const fn from_static(value: &'static str) -> Self {
Self {
value: BytesStr::from_static(value),
}
}
/// Returns a str representation of the header.
pub fn as_str(&self) -> &str {
self.value.as_str()
}
pub(crate) fn try_from(bytes: Bytes) -> Result<Self, std::str::Utf8Error> {
Ok(Self {
value: BytesStr::try_from(bytes)?,
})
}
}
impl<'a> From<&'a str> for Protocol {
fn from(value: &'a str) -> Self {
Self {
value: BytesStr::from(value),
}
}
}
impl AsRef<[u8]> for Protocol {
fn as_ref(&self) -> &[u8] {
self.value.as_ref()
}
}
impl fmt::Debug for Protocol {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.value.fmt(f)
}
}

4
third_party/rust/h2/src/frame/data.rs поставляемый
Просмотреть файл

@ -36,7 +36,7 @@ impl<T> Data<T> {
} }
} }
/// Returns the stream identifer that this frame is associated with. /// Returns the stream identifier that this frame is associated with.
/// ///
/// This cannot be a zero stream identifier. /// This cannot be a zero stream identifier.
pub fn stream_id(&self) -> StreamId { pub fn stream_id(&self) -> StreamId {
@ -63,7 +63,7 @@ impl<T> Data<T> {
} }
} }
/// Returns whther the `PADDED` flag is set on this frame. /// Returns whether the `PADDED` flag is set on this frame.
#[cfg(feature = "unstable")] #[cfg(feature = "unstable")]
pub fn is_padded(&self) -> bool { pub fn is_padded(&self) -> bool {
self.flags.is_padded() self.flags.is_padded()

5
third_party/rust/h2/src/frame/go_away.rs поставляемый
Просмотреть файл

@ -29,8 +29,7 @@ impl GoAway {
self.error_code self.error_code
} }
#[cfg(feature = "unstable")] pub fn debug_data(&self) -> &Bytes {
pub fn debug_data(&self) -> &[u8] {
&self.debug_data &self.debug_data
} }
@ -51,7 +50,7 @@ impl GoAway {
} }
pub fn encode<B: BufMut>(&self, dst: &mut B) { pub fn encode<B: BufMut>(&self, dst: &mut B) {
log::trace!("encoding GO_AWAY; code={:?}", self.error_code); tracing::trace!("encoding GO_AWAY; code={:?}", self.error_code);
let head = Head::new(Kind::GoAway, 0, StreamId::zero()); let head = Head::new(Kind::GoAway, 0, StreamId::zero());
head.encode(8, dst); head.encode(8, dst);
dst.put_u32(self.last_stream_id.into()); dst.put_u32(self.last_stream_id.into());

2
third_party/rust/h2/src/frame/head.rs поставляемый
Просмотреть файл

@ -36,7 +36,7 @@ impl Head {
} }
} }
/// Parse an HTTP/2.0 frame header /// Parse an HTTP/2 frame header
pub fn parse(header: &[u8]) -> Head { pub fn parse(header: &[u8]) -> Head {
let (stream_id, _) = StreamId::parse(&header[5..]); let (stream_id, _) = StreamId::parse(&header[5..]);

285
third_party/rust/h2/src/frame/headers.rs поставляемый
Просмотреть файл

@ -1,21 +1,17 @@
use super::{util, StreamDependency, StreamId}; use super::{util, StreamDependency, StreamId};
use crate::ext::Protocol;
use crate::frame::{Error, Frame, Head, Kind}; use crate::frame::{Error, Frame, Head, Kind};
use crate::hpack::{self, BytesStr}; use crate::hpack::{self, BytesStr};
use http::header::{self, HeaderName, HeaderValue}; use http::header::{self, HeaderName, HeaderValue};
use http::{uri, HeaderMap, Method, Request, StatusCode, Uri}; use http::{uri, HeaderMap, Method, Request, StatusCode, Uri};
use bytes::{Bytes, BytesMut}; use bytes::{BufMut, Bytes, BytesMut};
use std::fmt; use std::fmt;
use std::io::Cursor; use std::io::Cursor;
type EncodeBuf<'a> = bytes::buf::ext::Limit<&'a mut BytesMut>; type EncodeBuf<'a> = bytes::buf::Limit<&'a mut BytesMut>;
// Minimum MAX_FRAME_SIZE is 16kb, so save some arbitrary space for frame
// head and other header bits.
const MAX_HEADER_LENGTH: usize = 1024 * 16 - 100;
/// Header frame /// Header frame
/// ///
/// This could be either a request or a response. /// This could be either a request or a response.
@ -71,6 +67,7 @@ pub struct Pseudo {
pub scheme: Option<BytesStr>, pub scheme: Option<BytesStr>,
pub authority: Option<BytesStr>, pub authority: Option<BytesStr>,
pub path: Option<BytesStr>, pub path: Option<BytesStr>,
pub protocol: Option<Protocol>,
// Response // Response
pub status: Option<StatusCode>, pub status: Option<StatusCode>,
@ -100,11 +97,7 @@ struct HeaderBlock {
#[derive(Debug)] #[derive(Debug)]
struct EncodingHeaderBlock { struct EncodingHeaderBlock {
/// Argument to pass to the HPACK encoder to resume encoding hpack: Bytes,
hpack: Option<hpack::EncodeState>,
/// remaining headers to encode
headers: Iter,
} }
const END_STREAM: u8 = 0x1; const END_STREAM: u8 = 0x1;
@ -153,7 +146,11 @@ impl Headers {
let flags = HeadersFlag(head.flag()); let flags = HeadersFlag(head.flag());
let mut pad = 0; let mut pad = 0;
log::trace!("loading headers; flags={:?}", flags); tracing::trace!("loading headers; flags={:?}", flags);
if head.stream_id().is_zero() {
return Err(Error::InvalidStreamId);
}
// Read the padding length // Read the padding length
if flags.is_padded() { if flags.is_padded() {
@ -241,10 +238,6 @@ impl Headers {
self.header_block.is_over_size self.header_block.is_over_size
} }
pub(crate) fn has_too_big_field(&self) -> bool {
self.header_block.has_too_big_field()
}
pub fn into_parts(self) -> (Pseudo, HeaderMap) { pub fn into_parts(self) -> (Pseudo, HeaderMap) {
(self.header_block.pseudo, self.header_block.fields) (self.header_block.pseudo, self.header_block.fields)
} }
@ -254,6 +247,11 @@ impl Headers {
&mut self.header_block.pseudo &mut self.header_block.pseudo
} }
/// Whether it has status 1xx
pub(crate) fn is_informational(&self) -> bool {
self.header_block.pseudo.is_informational()
}
pub fn fields(&self) -> &HeaderMap { pub fn fields(&self) -> &HeaderMap {
&self.header_block.fields &self.header_block.fields
} }
@ -274,8 +272,8 @@ impl Headers {
let head = self.head(); let head = self.head();
self.header_block self.header_block
.into_encoding() .into_encoding(encoder)
.encode(&head, encoder, dst, |_| {}) .encode(&head, dst, |_| {})
} }
fn head(&self) -> Head { fn head(&self) -> Head {
@ -296,6 +294,10 @@ impl fmt::Debug for Headers {
.field("stream_id", &self.stream_id) .field("stream_id", &self.stream_id)
.field("flags", &self.flags); .field("flags", &self.flags);
if let Some(ref protocol) = self.header_block.pseudo.protocol {
builder.field("protocol", protocol);
}
if let Some(ref dep) = self.stream_dep { if let Some(ref dep) = self.stream_dep {
builder.field("stream_dep", dep); builder.field("stream_dep", dep);
} }
@ -398,6 +400,10 @@ impl PushPromise {
let flags = PushPromiseFlag(head.flag()); let flags = PushPromiseFlag(head.flag());
let mut pad = 0; let mut pad = 0;
if head.stream_id().is_zero() {
return Err(Error::InvalidStreamId);
}
// Read the padding length // Read the padding length
if flags.is_padded() { if flags.is_padded() {
if src.is_empty() { if src.is_empty() {
@ -475,8 +481,6 @@ impl PushPromise {
encoder: &mut hpack::Encoder, encoder: &mut hpack::Encoder,
dst: &mut EncodeBuf<'_>, dst: &mut EncodeBuf<'_>,
) -> Option<Continuation> { ) -> Option<Continuation> {
use bytes::BufMut;
// At this point, the `is_end_headers` flag should always be set // At this point, the `is_end_headers` flag should always be set
debug_assert!(self.flags.is_end_headers()); debug_assert!(self.flags.is_end_headers());
@ -484,8 +488,8 @@ impl PushPromise {
let promised_id = self.promised_id; let promised_id = self.promised_id;
self.header_block self.header_block
.into_encoding() .into_encoding(encoder)
.encode(&head, encoder, dst, |dst| { .encode(&head, dst, |dst| {
dst.put_u32(promised_id.into()); dst.put_u32(promised_id.into());
}) })
} }
@ -524,38 +528,39 @@ impl Continuation {
Head::new(Kind::Continuation, END_HEADERS, self.stream_id) Head::new(Kind::Continuation, END_HEADERS, self.stream_id)
} }
pub fn encode( pub fn encode(self, dst: &mut EncodeBuf<'_>) -> Option<Continuation> {
self,
encoder: &mut hpack::Encoder,
dst: &mut EncodeBuf<'_>,
) -> Option<Continuation> {
// Get the CONTINUATION frame head // Get the CONTINUATION frame head
let head = self.head(); let head = self.head();
self.header_block.encode(&head, encoder, dst, |_| {}) self.header_block.encode(&head, dst, |_| {})
} }
} }
// ===== impl Pseudo ===== // ===== impl Pseudo =====
impl Pseudo { impl Pseudo {
pub fn request(method: Method, uri: Uri) -> Self { pub fn request(method: Method, uri: Uri, protocol: Option<Protocol>) -> Self {
let parts = uri::Parts::from(uri); let parts = uri::Parts::from(uri);
let mut path = parts let mut path = parts
.path_and_query .path_and_query
.map(|v| Bytes::copy_from_slice(v.as_str().as_bytes())) .map(|v| BytesStr::from(v.as_str()))
.unwrap_or_else(Bytes::new); .unwrap_or(BytesStr::from_static(""));
if path.is_empty() && method != Method::OPTIONS { match method {
path = Bytes::from_static(b"/"); Method::OPTIONS | Method::CONNECT => {}
_ if path.is_empty() => {
path = BytesStr::from_static("/");
}
_ => {}
} }
let mut pseudo = Pseudo { let mut pseudo = Pseudo {
method: Some(method), method: Some(method),
scheme: None, scheme: None,
authority: None, authority: None,
path: Some(unsafe { BytesStr::from_utf8_unchecked(path) }), path: Some(path).filter(|p| !p.is_empty()),
protocol,
status: None, status: None,
}; };
@ -569,9 +574,7 @@ impl Pseudo {
// If the URI includes an authority component, add it to the pseudo // If the URI includes an authority component, add it to the pseudo
// headers // headers
if let Some(authority) = parts.authority { if let Some(authority) = parts.authority {
pseudo.set_authority(unsafe { pseudo.set_authority(BytesStr::from(authority.as_str()));
BytesStr::from_utf8_unchecked(Bytes::copy_from_slice(authority.as_str().as_bytes()))
});
} }
pseudo pseudo
@ -583,34 +586,45 @@ impl Pseudo {
scheme: None, scheme: None,
authority: None, authority: None,
path: None, path: None,
protocol: None,
status: Some(status), status: Some(status),
} }
} }
#[cfg(feature = "unstable")]
pub fn set_status(&mut self, value: StatusCode) {
self.status = Some(value);
}
pub fn set_scheme(&mut self, scheme: uri::Scheme) { pub fn set_scheme(&mut self, scheme: uri::Scheme) {
let bytes = match scheme.as_str() { let bytes_str = match scheme.as_str() {
"http" => Bytes::from_static(b"http"), "http" => BytesStr::from_static("http"),
"https" => Bytes::from_static(b"https"), "https" => BytesStr::from_static("https"),
s => Bytes::copy_from_slice(s.as_bytes()), s => BytesStr::from(s),
}; };
self.scheme = Some(unsafe { BytesStr::from_utf8_unchecked(bytes) }); self.scheme = Some(bytes_str);
}
#[cfg(feature = "unstable")]
pub fn set_protocol(&mut self, protocol: Protocol) {
self.protocol = Some(protocol);
} }
pub fn set_authority(&mut self, authority: BytesStr) { pub fn set_authority(&mut self, authority: BytesStr) {
self.authority = Some(authority); self.authority = Some(authority);
} }
/// Whether it has status 1xx
pub(crate) fn is_informational(&self) -> bool {
self.status
.map_or(false, |status| status.is_informational())
}
} }
// ===== impl EncodingHeaderBlock ===== // ===== impl EncodingHeaderBlock =====
impl EncodingHeaderBlock { impl EncodingHeaderBlock {
fn encode<F>( fn encode<F>(mut self, head: &Head, dst: &mut EncodeBuf<'_>, f: F) -> Option<Continuation>
mut self,
head: &Head,
encoder: &mut hpack::Encoder,
dst: &mut EncodeBuf<'_>,
f: F,
) -> Option<Continuation>
where where
F: FnOnce(&mut EncodeBuf<'_>), F: FnOnce(&mut EncodeBuf<'_>),
{ {
@ -626,15 +640,17 @@ impl EncodingHeaderBlock {
f(dst); f(dst);
// Now, encode the header payload // Now, encode the header payload
let continuation = match encoder.encode(self.hpack, &mut self.headers, dst) { let continuation = if self.hpack.len() > dst.remaining_mut() {
hpack::Encode::Full => None, dst.put_slice(&self.hpack.split_to(dst.remaining_mut()));
hpack::Encode::Partial(state) => Some(Continuation {
Some(Continuation {
stream_id: head.stream_id(), stream_id: head.stream_id(),
header_block: EncodingHeaderBlock { header_block: self,
hpack: Some(state), })
headers: self.headers, } else {
}, dst.put_slice(&self.hpack);
}),
None
}; };
// Compute the header block length // Compute the header block length
@ -682,6 +698,10 @@ impl Iterator for Iter {
return Some(Path(path)); return Some(Path(path));
} }
if let Some(protocol) = pseudo.protocol.take() {
return Some(Protocol(protocol));
}
if let Some(status) = pseudo.status.take() { if let Some(status) = pseudo.status.take() {
return Some(Status(status)); return Some(Status(status));
} }
@ -817,19 +837,19 @@ impl HeaderBlock {
macro_rules! set_pseudo { macro_rules! set_pseudo {
($field:ident, $val:expr) => {{ ($field:ident, $val:expr) => {{
if reg { if reg {
log::trace!("load_hpack; header malformed -- pseudo not at head of block"); tracing::trace!("load_hpack; header malformed -- pseudo not at head of block");
malformed = true; malformed = true;
} else if self.pseudo.$field.is_some() { } else if self.pseudo.$field.is_some() {
log::trace!("load_hpack; header malformed -- repeated pseudo"); tracing::trace!("load_hpack; header malformed -- repeated pseudo");
malformed = true; malformed = true;
} else { } else {
let __val = $val; let __val = $val;
headers_size += headers_size +=
decoded_header_size(stringify!($ident).len() + 1, __val.as_str().len()); decoded_header_size(stringify!($field).len() + 1, __val.as_str().len());
if headers_size < max_header_list_size { if headers_size < max_header_list_size {
self.pseudo.$field = Some(__val); self.pseudo.$field = Some(__val);
} else if !self.is_over_size { } else if !self.is_over_size {
log::trace!("load_hpack; header list size over max"); tracing::trace!("load_hpack; header list size over max");
self.is_over_size = true; self.is_over_size = true;
} }
} }
@ -856,10 +876,13 @@ impl HeaderBlock {
|| name == "keep-alive" || name == "keep-alive"
|| name == "proxy-connection" || name == "proxy-connection"
{ {
log::trace!("load_hpack; connection level header"); tracing::trace!("load_hpack; connection level header");
malformed = true; malformed = true;
} else if name == header::TE && value != "trailers" { } else if name == header::TE && value != "trailers" {
log::trace!("load_hpack; TE header not set to trailers; val={:?}", value); tracing::trace!(
"load_hpack; TE header not set to trailers; val={:?}",
value
);
malformed = true; malformed = true;
} else { } else {
reg = true; reg = true;
@ -868,7 +891,7 @@ impl HeaderBlock {
if headers_size < max_header_list_size { if headers_size < max_header_list_size {
self.fields.append(name, value); self.fields.append(name, value);
} else if !self.is_over_size { } else if !self.is_over_size {
log::trace!("load_hpack; header list size over max"); tracing::trace!("load_hpack; header list size over max");
self.is_over_size = true; self.is_over_size = true;
} }
} }
@ -877,30 +900,35 @@ impl HeaderBlock {
Method(v) => set_pseudo!(method, v), Method(v) => set_pseudo!(method, v),
Scheme(v) => set_pseudo!(scheme, v), Scheme(v) => set_pseudo!(scheme, v),
Path(v) => set_pseudo!(path, v), Path(v) => set_pseudo!(path, v),
Protocol(v) => set_pseudo!(protocol, v),
Status(v) => set_pseudo!(status, v), Status(v) => set_pseudo!(status, v),
} }
}); });
if let Err(e) = res { if let Err(e) = res {
log::trace!("hpack decoding error; err={:?}", e); tracing::trace!("hpack decoding error; err={:?}", e);
return Err(e.into()); return Err(e.into());
} }
if malformed { if malformed {
log::trace!("malformed message"); tracing::trace!("malformed message");
return Err(Error::MalformedMessage); return Err(Error::MalformedMessage);
} }
Ok(()) Ok(())
} }
fn into_encoding(self) -> EncodingHeaderBlock { fn into_encoding(self, encoder: &mut hpack::Encoder) -> EncodingHeaderBlock {
let mut hpack = BytesMut::new();
let headers = Iter {
pseudo: Some(self.pseudo),
fields: self.fields.into_iter(),
};
encoder.encode(headers, &mut hpack);
EncodingHeaderBlock { EncodingHeaderBlock {
hpack: None, hpack: hpack.freeze(),
headers: Iter {
pseudo: Some(self.pseudo),
fields: self.fields.into_iter(),
},
} }
} }
@ -933,48 +961,79 @@ impl HeaderBlock {
.map(|(name, value)| decoded_header_size(name.as_str().len(), value.len())) .map(|(name, value)| decoded_header_size(name.as_str().len(), value.len()))
.sum::<usize>() .sum::<usize>()
} }
/// Iterate over all pseudos and headers to see if any individual pair
/// would be too large to encode.
pub(crate) fn has_too_big_field(&self) -> bool {
macro_rules! pseudo_size {
($name:ident) => {{
self.pseudo
.$name
.as_ref()
.map(|m| decoded_header_size(stringify!($name).len() + 1, m.as_str().len()))
.unwrap_or(0)
}};
}
if pseudo_size!(method) > MAX_HEADER_LENGTH {
return true;
}
if pseudo_size!(scheme) > MAX_HEADER_LENGTH {
return true;
}
if pseudo_size!(authority) > MAX_HEADER_LENGTH {
return true;
}
if pseudo_size!(path) > MAX_HEADER_LENGTH {
return true;
}
// skip :status, its never going to be too big
for (name, value) in &self.fields {
if decoded_header_size(name.as_str().len(), value.len()) > MAX_HEADER_LENGTH {
return true;
}
}
false
}
} }
fn decoded_header_size(name: usize, value: usize) -> usize { fn decoded_header_size(name: usize, value: usize) -> usize {
name + value + 32 name + value + 32
} }
#[cfg(test)]
mod test {
use std::iter::FromIterator;
use http::HeaderValue;
use super::*;
use crate::frame;
use crate::hpack::{huffman, Encoder};
#[test]
fn test_nameless_header_at_resume() {
let mut encoder = Encoder::default();
let mut dst = BytesMut::new();
let headers = Headers::new(
StreamId::ZERO,
Default::default(),
HeaderMap::from_iter(vec![
(
HeaderName::from_static("hello"),
HeaderValue::from_static("world"),
),
(
HeaderName::from_static("hello"),
HeaderValue::from_static("zomg"),
),
(
HeaderName::from_static("hello"),
HeaderValue::from_static("sup"),
),
]),
);
let continuation = headers
.encode(&mut encoder, &mut (&mut dst).limit(frame::HEADER_LEN + 8))
.unwrap();
assert_eq!(17, dst.len());
assert_eq!([0, 0, 8, 1, 0, 0, 0, 0, 0], &dst[0..9]);
assert_eq!(&[0x40, 0x80 | 4], &dst[9..11]);
assert_eq!("hello", huff_decode(&dst[11..15]));
assert_eq!(0x80 | 4, dst[15]);
let mut world = dst[16..17].to_owned();
dst.clear();
assert!(continuation
.encode(&mut (&mut dst).limit(frame::HEADER_LEN + 16))
.is_none());
world.extend_from_slice(&dst[9..12]);
assert_eq!("world", huff_decode(&world));
assert_eq!(24, dst.len());
assert_eq!([0, 0, 15, 9, 4, 0, 0, 0, 0], &dst[0..9]);
// // Next is not indexed
assert_eq!(&[15, 47, 0x80 | 3], &dst[12..15]);
assert_eq!("zomg", huff_decode(&dst[15..18]));
assert_eq!(&[15, 47, 0x80 | 3], &dst[18..21]);
assert_eq!("sup", huff_decode(&dst[21..]));
}
fn huff_decode(src: &[u8]) -> BytesMut {
let mut buf = BytesMut::new();
huffman::decode(src, &mut buf).unwrap()
}
}

1
third_party/rust/h2/src/frame/mod.rs поставляемый
Просмотреть файл

@ -15,7 +15,6 @@ use std::fmt;
/// let buf: [u8; 4] = [0, 0, 0, 1]; /// let buf: [u8; 4] = [0, 0, 0, 1];
/// assert_eq!(1u32, unpack_octets_4!(buf, 0, u32)); /// assert_eq!(1u32, unpack_octets_4!(buf, 0, u32));
/// ``` /// ```
#[macro_escape]
macro_rules! unpack_octets_4 { macro_rules! unpack_octets_4 {
// TODO: Get rid of this macro // TODO: Get rid of this macro
($buf:expr, $offset:expr, $tip:ty) => { ($buf:expr, $offset:expr, $tip:ty) => {

2
third_party/rust/h2/src/frame/ping.rs поставляемый
Просмотреть файл

@ -85,7 +85,7 @@ impl Ping {
pub fn encode<B: BufMut>(&self, dst: &mut B) { pub fn encode<B: BufMut>(&self, dst: &mut B) {
let sz = self.payload.len(); let sz = self.payload.len();
log::trace!("encoding PING; ack={} len={}", self.ack, sz); tracing::trace!("encoding PING; ack={} len={}", self.ack, sz);
let flags = if self.ack { ACK_FLAG } else { 0 }; let flags = if self.ack { ACK_FLAG } else { 0 };
let head = Head::new(Kind::Ping, flags, StreamId::zero()); let head = Head::new(Kind::Ping, flags, StreamId::zero());

2
third_party/rust/h2/src/frame/reason.rs поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
use std::fmt; use std::fmt;
/// HTTP/2.0 error codes. /// HTTP/2 error codes.
/// ///
/// Error codes are used in `RST_STREAM` and `GOAWAY` frames to convey the /// Error codes are used in `RST_STREAM` and `GOAWAY` frames to convey the
/// reasons for the stream or connection error. For example, /// reasons for the stream or connection error. For example,

4
third_party/rust/h2/src/frame/reset.rs поставляемый
Просмотреть файл

@ -2,7 +2,7 @@ use crate::frame::{self, Error, Head, Kind, Reason, StreamId};
use bytes::BufMut; use bytes::BufMut;
#[derive(Debug, Eq, PartialEq)] #[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub struct Reset { pub struct Reset {
stream_id: StreamId, stream_id: StreamId,
error_code: Reason, error_code: Reason,
@ -38,7 +38,7 @@ impl Reset {
} }
pub fn encode<B: BufMut>(&self, dst: &mut B) { pub fn encode<B: BufMut>(&self, dst: &mut B) {
log::trace!( tracing::trace!(
"encoding RESET; id={:?} code={:?}", "encoding RESET; id={:?} code={:?}",
self.stream_id, self.stream_id,
self.error_code self.error_code

37
third_party/rust/h2/src/frame/settings.rs поставляемый
Просмотреть файл

@ -13,6 +13,7 @@ pub struct Settings {
initial_window_size: Option<u32>, initial_window_size: Option<u32>,
max_frame_size: Option<u32>, max_frame_size: Option<u32>,
max_header_list_size: Option<u32>, max_header_list_size: Option<u32>,
enable_connect_protocol: Option<u32>,
} }
/// An enum that lists all valid settings that can be sent in a SETTINGS /// An enum that lists all valid settings that can be sent in a SETTINGS
@ -27,6 +28,7 @@ pub enum Setting {
InitialWindowSize(u32), InitialWindowSize(u32),
MaxFrameSize(u32), MaxFrameSize(u32),
MaxHeaderListSize(u32), MaxHeaderListSize(u32),
EnableConnectProtocol(u32),
} }
#[derive(Copy, Clone, Eq, PartialEq, Default)] #[derive(Copy, Clone, Eq, PartialEq, Default)]
@ -99,14 +101,22 @@ impl Settings {
self.max_header_list_size = size; self.max_header_list_size = size;
} }
pub fn is_push_enabled(&self) -> bool { pub fn is_push_enabled(&self) -> Option<bool> {
self.enable_push.unwrap_or(1) != 0 self.enable_push.map(|val| val != 0)
} }
pub fn set_enable_push(&mut self, enable: bool) { pub fn set_enable_push(&mut self, enable: bool) {
self.enable_push = Some(enable as u32); self.enable_push = Some(enable as u32);
} }
pub fn is_extended_connect_protocol_enabled(&self) -> Option<bool> {
self.enable_connect_protocol.map(|val| val != 0)
}
pub fn set_enable_connect_protocol(&mut self, val: Option<u32>) {
self.enable_connect_protocol = val;
}
pub fn header_table_size(&self) -> Option<u32> { pub fn header_table_size(&self) -> Option<u32> {
self.header_table_size self.header_table_size
} }
@ -141,7 +151,7 @@ impl Settings {
// Ensure the payload length is correct, each setting is 6 bytes long. // Ensure the payload length is correct, each setting is 6 bytes long.
if payload.len() % 6 != 0 { if payload.len() % 6 != 0 {
log::debug!("invalid settings payload length; len={:?}", payload.len()); tracing::debug!("invalid settings payload length; len={:?}", payload.len());
return Err(Error::InvalidPayloadAckSettings); return Err(Error::InvalidPayloadAckSettings);
} }
@ -181,6 +191,14 @@ impl Settings {
Some(MaxHeaderListSize(val)) => { Some(MaxHeaderListSize(val)) => {
settings.max_header_list_size = Some(val); settings.max_header_list_size = Some(val);
} }
Some(EnableConnectProtocol(val)) => match val {
0 | 1 => {
settings.enable_connect_protocol = Some(val);
}
_ => {
return Err(Error::InvalidSettingValue);
}
},
None => {} None => {}
} }
} }
@ -199,13 +217,13 @@ impl Settings {
let head = Head::new(Kind::Settings, self.flags.into(), StreamId::zero()); let head = Head::new(Kind::Settings, self.flags.into(), StreamId::zero());
let payload_len = self.payload_len(); let payload_len = self.payload_len();
log::trace!("encoding SETTINGS; len={}", payload_len); tracing::trace!("encoding SETTINGS; len={}", payload_len);
head.encode(payload_len, dst); head.encode(payload_len, dst);
// Encode the settings // Encode the settings
self.for_each(|setting| { self.for_each(|setting| {
log::trace!("encoding setting; val={:?}", setting); tracing::trace!("encoding setting; val={:?}", setting);
setting.encode(dst) setting.encode(dst)
}); });
} }
@ -236,6 +254,10 @@ impl Settings {
if let Some(v) = self.max_header_list_size { if let Some(v) = self.max_header_list_size {
f(MaxHeaderListSize(v)); f(MaxHeaderListSize(v));
} }
if let Some(v) = self.enable_connect_protocol {
f(EnableConnectProtocol(v));
}
} }
} }
@ -269,6 +291,9 @@ impl fmt::Debug for Settings {
Setting::MaxHeaderListSize(v) => { Setting::MaxHeaderListSize(v) => {
builder.field("max_header_list_size", &v); builder.field("max_header_list_size", &v);
} }
Setting::EnableConnectProtocol(v) => {
builder.field("enable_connect_protocol", &v);
}
}); });
builder.finish() builder.finish()
@ -291,6 +316,7 @@ impl Setting {
4 => Some(InitialWindowSize(val)), 4 => Some(InitialWindowSize(val)),
5 => Some(MaxFrameSize(val)), 5 => Some(MaxFrameSize(val)),
6 => Some(MaxHeaderListSize(val)), 6 => Some(MaxHeaderListSize(val)),
8 => Some(EnableConnectProtocol(val)),
_ => None, _ => None,
} }
} }
@ -322,6 +348,7 @@ impl Setting {
InitialWindowSize(v) => (4, v), InitialWindowSize(v) => (4, v),
MaxFrameSize(v) => (5, v), MaxFrameSize(v) => (5, v),
MaxHeaderListSize(v) => (6, v), MaxHeaderListSize(v) => (6, v),
EnableConnectProtocol(v) => (8, v),
}; };
dst.put_u16(kind); dst.put_u16(kind);

Просмотреть файл

@ -48,7 +48,7 @@ impl WindowUpdate {
} }
pub fn encode<B: BufMut>(&self, dst: &mut B) { pub fn encode<B: BufMut>(&self, dst: &mut B) {
log::trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id); tracing::trace!("encoding WINDOW_UPDATE; id={:?}", self.stream_id);
let head = Head::new(Kind::WindowUpdate, 0, self.stream_id); let head = Head::new(Kind::WindowUpdate, 0, self.stream_id);
head.encode(4, dst); head.encode(4, dst);
dst.put_u32(self.size_increment); dst.put_u32(self.size_increment);

28
third_party/rust/h2/src/fuzz_bridge.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,28 @@
#[cfg(fuzzing)]
pub mod fuzz_logic {
use crate::hpack;
use bytes::BytesMut;
use http::header::HeaderName;
use std::io::Cursor;
pub fn fuzz_hpack(data_: &[u8]) {
let mut decoder_ = hpack::Decoder::new(0);
let mut buf = BytesMut::new();
buf.extend(data_);
let _dec_res = decoder_.decode(&mut Cursor::new(&mut buf), |_h| {});
if let Ok(s) = std::str::from_utf8(data_) {
if let Ok(h) = http::Method::from_bytes(s.as_bytes()) {
let m_ = hpack::Header::Method(h);
let mut encoder = hpack::Encoder::new(0, 0);
let _res = encode(&mut encoder, vec![m_]);
}
}
}
fn encode(e: &mut hpack::Encoder, hdrs: Vec<hpack::Header<Option<HeaderName>>>) -> BytesMut {
let mut dst = BytesMut::with_capacity(1024);
e.encode(&mut hdrs.into_iter(), &mut dst);
dst
}
}

150
third_party/rust/h2/src/hpack/decoder.rs поставляемый
Просмотреть файл

@ -142,6 +142,12 @@ struct Table {
max_size: usize, max_size: usize,
} }
struct StringMarker {
offset: usize,
len: usize,
string: Option<Bytes>,
}
// ===== impl Decoder ===== // ===== impl Decoder =====
impl Decoder { impl Decoder {
@ -183,7 +189,10 @@ impl Decoder {
self.last_max_update = size; self.last_max_update = size;
} }
log::trace!("decode"); let span = tracing::trace_span!("hpack::decode");
let _e = span.enter();
tracing::trace!("decode");
while let Some(ty) = peek_u8(src) { while let Some(ty) = peek_u8(src) {
// At this point we are always at the beginning of the next block // At this point we are always at the beginning of the next block
@ -191,14 +200,14 @@ impl Decoder {
// determined from the first byte. // determined from the first byte.
match Representation::load(ty)? { match Representation::load(ty)? {
Indexed => { Indexed => {
log::trace!(" Indexed; rem={:?}", src.remaining()); tracing::trace!(rem = src.remaining(), kind = %"Indexed");
can_resize = false; can_resize = false;
let entry = self.decode_indexed(src)?; let entry = self.decode_indexed(src)?;
consume(src); consume(src);
f(entry); f(entry);
} }
LiteralWithIndexing => { LiteralWithIndexing => {
log::trace!(" LiteralWithIndexing; rem={:?}", src.remaining()); tracing::trace!(rem = src.remaining(), kind = %"LiteralWithIndexing");
can_resize = false; can_resize = false;
let entry = self.decode_literal(src, true)?; let entry = self.decode_literal(src, true)?;
@ -209,14 +218,14 @@ impl Decoder {
f(entry); f(entry);
} }
LiteralWithoutIndexing => { LiteralWithoutIndexing => {
log::trace!(" LiteralWithoutIndexing; rem={:?}", src.remaining()); tracing::trace!(rem = src.remaining(), kind = %"LiteralWithoutIndexing");
can_resize = false; can_resize = false;
let entry = self.decode_literal(src, false)?; let entry = self.decode_literal(src, false)?;
consume(src); consume(src);
f(entry); f(entry);
} }
LiteralNeverIndexed => { LiteralNeverIndexed => {
log::trace!(" LiteralNeverIndexed; rem={:?}", src.remaining()); tracing::trace!(rem = src.remaining(), kind = %"LiteralNeverIndexed");
can_resize = false; can_resize = false;
let entry = self.decode_literal(src, false)?; let entry = self.decode_literal(src, false)?;
consume(src); consume(src);
@ -226,7 +235,7 @@ impl Decoder {
f(entry); f(entry);
} }
SizeUpdate => { SizeUpdate => {
log::trace!(" SizeUpdate; rem={:?}", src.remaining()); tracing::trace!(rem = src.remaining(), kind = %"SizeUpdate");
if !can_resize { if !can_resize {
return Err(DecoderError::InvalidMaxDynamicSize); return Err(DecoderError::InvalidMaxDynamicSize);
} }
@ -248,10 +257,10 @@ impl Decoder {
return Err(DecoderError::InvalidMaxDynamicSize); return Err(DecoderError::InvalidMaxDynamicSize);
} }
log::debug!( tracing::debug!(
"Decoder changed max table size from {} to {}", from = self.table.size(),
self.table.size(), to = new_size,
new_size "Decoder changed max table size"
); );
self.table.set_max_size(new_size); self.table.set_max_size(new_size);
@ -276,10 +285,13 @@ impl Decoder {
// First, read the header name // First, read the header name
if table_idx == 0 { if table_idx == 0 {
let old_pos = buf.position();
let name_marker = self.try_decode_string(buf)?;
let value_marker = self.try_decode_string(buf)?;
buf.set_position(old_pos);
// Read the name as a literal // Read the name as a literal
let name = self.decode_string(buf)?; let name = name_marker.consume(buf);
let value = self.decode_string(buf)?; let value = value_marker.consume(buf);
Header::new(name, value) Header::new(name, value)
} else { } else {
let e = self.table.get(table_idx)?; let e = self.table.get(table_idx)?;
@ -289,7 +301,11 @@ impl Decoder {
} }
} }
fn decode_string(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result<Bytes, DecoderError> { fn try_decode_string(
&mut self,
buf: &mut Cursor<&mut BytesMut>,
) -> Result<StringMarker, DecoderError> {
let old_pos = buf.position();
const HUFF_FLAG: u8 = 0b1000_0000; const HUFF_FLAG: u8 = 0b1000_0000;
// The first bit in the first byte contains the huffman encoded flag. // The first bit in the first byte contains the huffman encoded flag.
@ -302,25 +318,38 @@ impl Decoder {
let len = decode_int(buf, 7)?; let len = decode_int(buf, 7)?;
if len > buf.remaining() { if len > buf.remaining() {
log::trace!( tracing::trace!(len, remaining = buf.remaining(), "decode_string underflow",);
"decode_string underflow; len={}; remaining={}",
len,
buf.remaining()
);
return Err(DecoderError::NeedMore(NeedMore::StringUnderflow)); return Err(DecoderError::NeedMore(NeedMore::StringUnderflow));
} }
let offset = (buf.position() - old_pos) as usize;
if huff { if huff {
let ret = { let ret = {
let raw = &buf.bytes()[..len]; let raw = &buf.chunk()[..len];
huffman::decode(raw, &mut self.buffer).map(BytesMut::freeze) huffman::decode(raw, &mut self.buffer).map(|buf| StringMarker {
offset,
len,
string: Some(BytesMut::freeze(buf)),
})
}; };
buf.advance(len); buf.advance(len);
return ret; ret
} else {
buf.advance(len);
Ok(StringMarker {
offset,
len,
string: None,
})
} }
}
Ok(take(buf, len)) fn decode_string(&mut self, buf: &mut Cursor<&mut BytesMut>) -> Result<Bytes, DecoderError> {
let old_pos = buf.position();
let marker = self.try_decode_string(buf)?;
buf.set_position(old_pos);
Ok(marker.consume(buf))
} }
} }
@ -420,7 +449,7 @@ fn decode_int<B: Buf>(buf: &mut B, prefix_size: u8) -> Result<usize, DecoderErro
fn peek_u8<B: Buf>(buf: &mut B) -> Option<u8> { fn peek_u8<B: Buf>(buf: &mut B) -> Option<u8> {
if buf.has_remaining() { if buf.has_remaining() {
Some(buf.bytes()[0]) Some(buf.chunk()[0])
} else { } else {
None None
} }
@ -434,6 +463,19 @@ fn take(buf: &mut Cursor<&mut BytesMut>, n: usize) -> Bytes {
head.freeze() head.freeze()
} }
impl StringMarker {
fn consume(self, buf: &mut Cursor<&mut BytesMut>) -> Bytes {
buf.advance(self.offset);
match self.string {
Some(string) => {
buf.advance(self.len);
string
}
None => take(buf, self.len),
}
}
}
fn consume(buf: &mut Cursor<&mut BytesMut>) { fn consume(buf: &mut Cursor<&mut BytesMut>) {
// remove bytes from the internal BytesMut when they have been successfully // remove bytes from the internal BytesMut when they have been successfully
// decoded. This is a more permanent cursor position, which will be // decoded. This is a more permanent cursor position, which will be
@ -578,13 +620,13 @@ pub fn get_static(idx: usize) -> Header {
use http::header::HeaderValue; use http::header::HeaderValue;
match idx { match idx {
1 => Header::Authority(from_static("")), 1 => Header::Authority(BytesStr::from_static("")),
2 => Header::Method(Method::GET), 2 => Header::Method(Method::GET),
3 => Header::Method(Method::POST), 3 => Header::Method(Method::POST),
4 => Header::Path(from_static("/")), 4 => Header::Path(BytesStr::from_static("/")),
5 => Header::Path(from_static("/index.html")), 5 => Header::Path(BytesStr::from_static("/index.html")),
6 => Header::Scheme(from_static("http")), 6 => Header::Scheme(BytesStr::from_static("http")),
7 => Header::Scheme(from_static("https")), 7 => Header::Scheme(BytesStr::from_static("https")),
8 => Header::Status(StatusCode::OK), 8 => Header::Status(StatusCode::OK),
9 => Header::Status(StatusCode::NO_CONTENT), 9 => Header::Status(StatusCode::NO_CONTENT),
10 => Header::Status(StatusCode::PARTIAL_CONTENT), 10 => Header::Status(StatusCode::PARTIAL_CONTENT),
@ -784,10 +826,6 @@ pub fn get_static(idx: usize) -> Header {
} }
} }
fn from_static(s: &'static str) -> BytesStr {
unsafe { BytesStr::from_utf8_unchecked(Bytes::from_static(s.as_bytes())) }
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
@ -852,7 +890,51 @@ mod test {
fn huff_encode(src: &[u8]) -> BytesMut { fn huff_encode(src: &[u8]) -> BytesMut {
let mut buf = BytesMut::new(); let mut buf = BytesMut::new();
huffman::encode(src, &mut buf).unwrap(); huffman::encode(src, &mut buf);
buf buf
} }
#[test]
fn test_decode_continuation_header_with_non_huff_encoded_name() {
let mut de = Decoder::new(0);
let value = huff_encode(b"bar");
let mut buf = BytesMut::new();
// header name is non_huff encoded
buf.extend(&[0b01000000, 0x00 | 3]);
buf.extend(b"foo");
// header value is partial
buf.extend(&[0x80 | 3]);
buf.extend(&value[0..1]);
let mut res = vec![];
let e = de
.decode(&mut Cursor::new(&mut buf), |h| {
res.push(h);
})
.unwrap_err();
// decode error because the header value is partial
assert_eq!(e, DecoderError::NeedMore(NeedMore::StringUnderflow));
// extend buf with the remaining header value
buf.extend(&value[1..]);
let _ = de
.decode(&mut Cursor::new(&mut buf), |h| {
res.push(h);
})
.unwrap();
assert_eq!(res.len(), 1);
assert_eq!(de.table.size(), 0);
match res[0] {
Header::Field {
ref name,
ref value,
} => {
assert_eq!(name, "foo");
assert_eq!(value, "bar");
}
_ => panic!(),
}
}
} }

261
third_party/rust/h2/src/hpack/encoder.rs поставляемый
Просмотреть файл

@ -1,34 +1,15 @@
use super::table::{Index, Table}; use super::table::{Index, Table};
use super::{huffman, Header}; use super::{huffman, Header};
use bytes::{buf::ext::Limit, BufMut, BytesMut}; use bytes::{BufMut, BytesMut};
use http::header::{HeaderName, HeaderValue}; use http::header::{HeaderName, HeaderValue};
type DstBuf<'a> = Limit<&'a mut BytesMut>;
#[derive(Debug)] #[derive(Debug)]
pub struct Encoder { pub struct Encoder {
table: Table, table: Table,
size_update: Option<SizeUpdate>, size_update: Option<SizeUpdate>,
} }
#[derive(Debug)]
pub enum Encode {
Full,
Partial(EncodeState),
}
#[derive(Debug)]
pub struct EncodeState {
index: Index,
value: Option<HeaderValue>,
}
#[derive(Debug, PartialEq, Eq)]
pub enum EncoderError {
BufferOverflow,
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)] #[derive(Debug, Copy, Clone, Eq, PartialEq)]
enum SizeUpdate { enum SizeUpdate {
One(usize), One(usize),
@ -77,56 +58,24 @@ impl Encoder {
} }
/// Encode a set of headers into the provide buffer /// Encode a set of headers into the provide buffer
pub fn encode<I>( pub fn encode<I>(&mut self, headers: I, dst: &mut BytesMut)
&mut self,
resume: Option<EncodeState>,
headers: &mut I,
dst: &mut DstBuf<'_>,
) -> Encode
where where
I: Iterator<Item = Header<Option<HeaderName>>>, I: IntoIterator<Item = Header<Option<HeaderName>>>,
{ {
let pos = position(dst); let span = tracing::trace_span!("hpack::encode");
let _e = span.enter();
if let Err(e) = self.encode_size_updates(dst) { self.encode_size_updates(dst);
if e == EncoderError::BufferOverflow {
rewind(dst, pos);
}
unreachable!("encode_size_updates errored");
}
let mut last_index = None; let mut last_index = None;
if let Some(resume) = resume {
let pos = position(dst);
let res = match resume.value {
Some(ref value) => self.encode_header_without_name(&resume.index, value, dst),
None => self.encode_header(&resume.index, dst),
};
if res.is_err() {
rewind(dst, pos);
return Encode::Partial(resume);
}
last_index = Some(resume.index);
}
for header in headers { for header in headers {
let pos = position(dst);
match header.reify() { match header.reify() {
// The header has an associated name. In which case, try to // The header has an associated name. In which case, try to
// index it in the table. // index it in the table.
Ok(header) => { Ok(header) => {
let index = self.table.index(header); let index = self.table.index(header);
let res = self.encode_header(&index, dst); self.encode_header(&index, dst);
if res.is_err() {
rewind(dst, pos);
return Encode::Partial(EncodeState { index, value: None });
}
last_index = Some(index); last_index = Some(index);
} }
@ -135,77 +84,61 @@ impl Encoder {
// which case, we skip table lookup and just use the same index // which case, we skip table lookup and just use the same index
// as the previous entry. // as the previous entry.
Err(value) => { Err(value) => {
let res = self.encode_header_without_name( self.encode_header_without_name(
last_index.as_ref().unwrap_or_else(|| { last_index.as_ref().unwrap_or_else(|| {
panic!("encoding header without name, but no previous index to use for name"); panic!("encoding header without name, but no previous index to use for name");
}), }),
&value, &value,
dst, dst,
); );
if res.is_err() {
rewind(dst, pos);
return Encode::Partial(EncodeState {
index: last_index.unwrap(), // checked just above
value: Some(value),
});
}
} }
}; }
} }
Encode::Full
} }
fn encode_size_updates(&mut self, dst: &mut DstBuf<'_>) -> Result<(), EncoderError> { fn encode_size_updates(&mut self, dst: &mut BytesMut) {
match self.size_update.take() { match self.size_update.take() {
Some(SizeUpdate::One(val)) => { Some(SizeUpdate::One(val)) => {
self.table.resize(val); self.table.resize(val);
encode_size_update(val, dst)?; encode_size_update(val, dst);
} }
Some(SizeUpdate::Two(min, max)) => { Some(SizeUpdate::Two(min, max)) => {
self.table.resize(min); self.table.resize(min);
self.table.resize(max); self.table.resize(max);
encode_size_update(min, dst)?; encode_size_update(min, dst);
encode_size_update(max, dst)?; encode_size_update(max, dst);
} }
None => {} None => {}
} }
Ok(())
} }
fn encode_header(&mut self, index: &Index, dst: &mut DstBuf<'_>) -> Result<(), EncoderError> { fn encode_header(&mut self, index: &Index, dst: &mut BytesMut) {
match *index { match *index {
Index::Indexed(idx, _) => { Index::Indexed(idx, _) => {
encode_int(idx, 7, 0x80, dst)?; encode_int(idx, 7, 0x80, dst);
} }
Index::Name(idx, _) => { Index::Name(idx, _) => {
let header = self.table.resolve(&index); let header = self.table.resolve(&index);
encode_not_indexed(idx, header.value_slice(), header.is_sensitive(), dst)?; encode_not_indexed(idx, header.value_slice(), header.is_sensitive(), dst);
} }
Index::Inserted(_) => { Index::Inserted(_) => {
let header = self.table.resolve(&index); let header = self.table.resolve(&index);
assert!(!header.is_sensitive()); assert!(!header.is_sensitive());
if !dst.has_remaining_mut() {
return Err(EncoderError::BufferOverflow);
}
dst.put_u8(0b0100_0000); dst.put_u8(0b0100_0000);
encode_str(header.name().as_slice(), dst)?; encode_str(header.name().as_slice(), dst);
encode_str(header.value_slice(), dst)?; encode_str(header.value_slice(), dst);
} }
Index::InsertedValue(idx, _) => { Index::InsertedValue(idx, _) => {
let header = self.table.resolve(&index); let header = self.table.resolve(&index);
assert!(!header.is_sensitive()); assert!(!header.is_sensitive());
encode_int(idx, 6, 0b0100_0000, dst)?; encode_int(idx, 6, 0b0100_0000, dst);
encode_str(header.value_slice(), dst)?; encode_str(header.value_slice(), dst);
} }
Index::NotIndexed(_) => { Index::NotIndexed(_) => {
let header = self.table.resolve(&index); let header = self.table.resolve(&index);
@ -215,19 +148,17 @@ impl Encoder {
header.value_slice(), header.value_slice(),
header.is_sensitive(), header.is_sensitive(),
dst, dst,
)?; );
} }
} }
Ok(())
} }
fn encode_header_without_name( fn encode_header_without_name(
&mut self, &mut self,
last: &Index, last: &Index,
value: &HeaderValue, value: &HeaderValue,
dst: &mut DstBuf<'_>, dst: &mut BytesMut,
) -> Result<(), EncoderError> { ) {
match *last { match *last {
Index::Indexed(..) Index::Indexed(..)
| Index::Name(..) | Index::Name(..)
@ -235,7 +166,7 @@ impl Encoder {
| Index::InsertedValue(..) => { | Index::InsertedValue(..) => {
let idx = self.table.resolve_idx(last); let idx = self.table.resolve_idx(last);
encode_not_indexed(idx, value.as_ref(), value.is_sensitive(), dst)?; encode_not_indexed(idx, value.as_ref(), value.is_sensitive(), dst);
} }
Index::NotIndexed(_) => { Index::NotIndexed(_) => {
let last = self.table.resolve(last); let last = self.table.resolve(last);
@ -245,11 +176,9 @@ impl Encoder {
value.as_ref(), value.as_ref(),
value.is_sensitive(), value.is_sensitive(),
dst, dst,
)?; );
} }
} }
Ok(())
} }
} }
@ -259,52 +188,32 @@ impl Default for Encoder {
} }
} }
fn encode_size_update<B: BufMut>(val: usize, dst: &mut B) -> Result<(), EncoderError> { fn encode_size_update(val: usize, dst: &mut BytesMut) {
encode_int(val, 5, 0b0010_0000, dst) encode_int(val, 5, 0b0010_0000, dst)
} }
fn encode_not_indexed( fn encode_not_indexed(name: usize, value: &[u8], sensitive: bool, dst: &mut BytesMut) {
name: usize,
value: &[u8],
sensitive: bool,
dst: &mut DstBuf<'_>,
) -> Result<(), EncoderError> {
if sensitive { if sensitive {
encode_int(name, 4, 0b10000, dst)?; encode_int(name, 4, 0b10000, dst);
} else { } else {
encode_int(name, 4, 0, dst)?; encode_int(name, 4, 0, dst);
} }
encode_str(value, dst)?; encode_str(value, dst);
Ok(())
} }
fn encode_not_indexed2( fn encode_not_indexed2(name: &[u8], value: &[u8], sensitive: bool, dst: &mut BytesMut) {
name: &[u8],
value: &[u8],
sensitive: bool,
dst: &mut DstBuf<'_>,
) -> Result<(), EncoderError> {
if !dst.has_remaining_mut() {
return Err(EncoderError::BufferOverflow);
}
if sensitive { if sensitive {
dst.put_u8(0b10000); dst.put_u8(0b10000);
} else { } else {
dst.put_u8(0); dst.put_u8(0);
} }
encode_str(name, dst)?; encode_str(name, dst);
encode_str(value, dst)?; encode_str(value, dst);
Ok(())
} }
fn encode_str(val: &[u8], dst: &mut DstBuf<'_>) -> Result<(), EncoderError> { fn encode_str(val: &[u8], dst: &mut BytesMut) {
if !dst.has_remaining_mut() {
return Err(EncoderError::BufferOverflow);
}
if !val.is_empty() { if !val.is_empty() {
let idx = position(dst); let idx = position(dst);
@ -312,50 +221,43 @@ fn encode_str(val: &[u8], dst: &mut DstBuf<'_>) -> Result<(), EncoderError> {
dst.put_u8(0); dst.put_u8(0);
// Encode with huffman // Encode with huffman
huffman::encode(val, dst)?; huffman::encode(val, dst);
let huff_len = position(dst) - (idx + 1); let huff_len = position(dst) - (idx + 1);
if encode_int_one_byte(huff_len, 7) { if encode_int_one_byte(huff_len, 7) {
// Write the string head // Write the string head
dst.get_mut()[idx] = 0x80 | huff_len as u8; dst[idx] = 0x80 | huff_len as u8;
} else { } else {
// Write the head to a placeholer // Write the head to a placeholder
const PLACEHOLDER_LEN: usize = 8; const PLACEHOLDER_LEN: usize = 8;
let mut buf = [0u8; PLACEHOLDER_LEN]; let mut buf = [0u8; PLACEHOLDER_LEN];
let head_len = { let head_len = {
let mut head_dst = &mut buf[..]; let mut head_dst = &mut buf[..];
encode_int(huff_len, 7, 0x80, &mut head_dst)?; encode_int(huff_len, 7, 0x80, &mut head_dst);
PLACEHOLDER_LEN - head_dst.remaining_mut() PLACEHOLDER_LEN - head_dst.remaining_mut()
}; };
if dst.remaining_mut() < head_len {
return Err(EncoderError::BufferOverflow);
}
// This is just done to reserve space in the destination // This is just done to reserve space in the destination
dst.put_slice(&buf[1..head_len]); dst.put_slice(&buf[1..head_len]);
let written = dst.get_mut();
// Shift the header forward // Shift the header forward
for i in 0..huff_len { for i in 0..huff_len {
let src_i = idx + 1 + (huff_len - (i + 1)); let src_i = idx + 1 + (huff_len - (i + 1));
let dst_i = idx + head_len + (huff_len - (i + 1)); let dst_i = idx + head_len + (huff_len - (i + 1));
written[dst_i] = written[src_i]; dst[dst_i] = dst[src_i];
} }
// Copy in the head // Copy in the head
for i in 0..head_len { for i in 0..head_len {
written[idx + i] = buf[i]; dst[idx + i] = buf[i];
} }
} }
} else { } else {
// Write an empty string // Write an empty string
dst.put_u8(0); dst.put_u8(0);
} }
Ok(())
} }
/// Encode an integer into the given destination buffer /// Encode an integer into the given destination buffer
@ -364,47 +266,25 @@ fn encode_int<B: BufMut>(
prefix_bits: usize, // The number of bits in the prefix prefix_bits: usize, // The number of bits in the prefix
first_byte: u8, // The base upon which to start encoding the int first_byte: u8, // The base upon which to start encoding the int
dst: &mut B, dst: &mut B,
) -> Result<(), EncoderError> { ) {
let mut rem = dst.remaining_mut();
if rem == 0 {
return Err(EncoderError::BufferOverflow);
}
if encode_int_one_byte(value, prefix_bits) { if encode_int_one_byte(value, prefix_bits) {
dst.put_u8(first_byte | value as u8); dst.put_u8(first_byte | value as u8);
return Ok(()); return;
} }
let low = (1 << prefix_bits) - 1; let low = (1 << prefix_bits) - 1;
value -= low; value -= low;
if value > 0x0fff_ffff {
panic!("value out of range");
}
dst.put_u8(first_byte | low as u8); dst.put_u8(first_byte | low as u8);
rem -= 1;
while value >= 128 { while value >= 128 {
if rem == 0 {
return Err(EncoderError::BufferOverflow);
}
dst.put_u8(0b1000_0000 | value as u8); dst.put_u8(0b1000_0000 | value as u8);
rem -= 1;
value >>= 7; value >>= 7;
} }
if rem == 0 {
return Err(EncoderError::BufferOverflow);
}
dst.put_u8(value as u8); dst.put_u8(value as u8);
Ok(())
} }
/// Returns true if the in the int can be fully encoded in the first byte. /// Returns true if the in the int can be fully encoded in the first byte.
@ -412,19 +292,14 @@ fn encode_int_one_byte(value: usize, prefix_bits: usize) -> bool {
value < (1 << prefix_bits) - 1 value < (1 << prefix_bits) - 1
} }
fn position(buf: &DstBuf<'_>) -> usize { fn position(buf: &BytesMut) -> usize {
buf.get_ref().len() buf.len()
}
fn rewind(buf: &mut DstBuf<'_>, pos: usize) {
buf.get_mut().truncate(pos);
} }
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use crate::hpack::Header; use crate::hpack::Header;
use bytes::buf::BufMutExt;
use http::*; use http::*;
#[test] #[test]
@ -802,49 +677,15 @@ mod test {
} }
#[test] #[test]
fn test_nameless_header_at_resume() { fn test_large_size_update() {
let mut encoder = Encoder::default(); let mut encoder = Encoder::default();
let max_len = 15;
let mut dst = BytesMut::with_capacity(64);
let mut input = vec![ encoder.update_max_size(1912930560);
Header::Field { assert_eq!(Some(SizeUpdate::One(1912930560)), encoder.size_update);
name: Some("hello".parse().unwrap()),
value: HeaderValue::from_bytes(b"world").unwrap(),
},
Header::Field {
name: None,
value: HeaderValue::from_bytes(b"zomg").unwrap(),
},
Header::Field {
name: None,
value: HeaderValue::from_bytes(b"sup").unwrap(),
},
]
.into_iter();
let resume = match encoder.encode(None, &mut input, &mut (&mut dst).limit(max_len)) { let mut dst = BytesMut::with_capacity(6);
Encode::Partial(r) => r, encoder.encode_size_updates(&mut dst);
_ => panic!("encode should be partial"), assert_eq!([63, 225, 129, 148, 144, 7], &dst[..]);
};
assert_eq!(&[0x40, 0x80 | 4], &dst[0..2]);
assert_eq!("hello", huff_decode(&dst[2..6]));
assert_eq!(0x80 | 4, dst[6]);
assert_eq!("world", huff_decode(&dst[7..11]));
dst.clear();
match encoder.encode(Some(resume), &mut input, &mut (&mut dst).limit(max_len)) {
Encode::Full => {}
unexpected => panic!("resume returned unexpected: {:?}", unexpected),
}
// Next is not indexed
assert_eq!(&[15, 47, 0x80 | 3], &dst[0..3]);
assert_eq!("zomg", huff_decode(&dst[3..6]));
assert_eq!(&[15, 47, 0x80 | 3], &dst[6..9]);
assert_eq!("sup", huff_decode(&dst[9..]));
} }
#[test] #[test]
@ -855,7 +696,7 @@ mod test {
fn encode(e: &mut Encoder, hdrs: Vec<Header<Option<HeaderName>>>) -> BytesMut { fn encode(e: &mut Encoder, hdrs: Vec<Header<Option<HeaderName>>>) -> BytesMut {
let mut dst = BytesMut::with_capacity(1024); let mut dst = BytesMut::with_capacity(1024);
e.encode(None, &mut hdrs.into_iter(), &mut (&mut dst).limit(1024)); e.encode(&mut hdrs.into_iter(), &mut dst);
dst dst
} }

28
third_party/rust/h2/src/hpack/header.rs поставляемый
Просмотреть файл

@ -1,11 +1,12 @@
use super::{DecoderError, NeedMore}; use super::{DecoderError, NeedMore};
use crate::ext::Protocol;
use bytes::Bytes; use bytes::Bytes;
use http::header::{HeaderName, HeaderValue}; use http::header::{HeaderName, HeaderValue};
use http::{Method, StatusCode}; use http::{Method, StatusCode};
use std::fmt; use std::fmt;
/// HTTP/2.0 Header /// HTTP/2 Header
#[derive(Debug, Clone, Eq, PartialEq)] #[derive(Debug, Clone, Eq, PartialEq)]
pub enum Header<T = HeaderName> { pub enum Header<T = HeaderName> {
Field { name: T, value: HeaderValue }, Field { name: T, value: HeaderValue },
@ -14,6 +15,7 @@ pub enum Header<T = HeaderName> {
Method(Method), Method(Method),
Scheme(BytesStr), Scheme(BytesStr),
Path(BytesStr), Path(BytesStr),
Protocol(Protocol),
Status(StatusCode), Status(StatusCode),
} }
@ -25,6 +27,7 @@ pub enum Name<'a> {
Method, Method,
Scheme, Scheme,
Path, Path,
Protocol,
Status, Status,
} }
@ -51,6 +54,7 @@ impl Header<Option<HeaderName>> {
Method(v) => Method(v), Method(v) => Method(v),
Scheme(v) => Scheme(v), Scheme(v) => Scheme(v),
Path(v) => Path(v), Path(v) => Path(v),
Protocol(v) => Protocol(v),
Status(v) => Status(v), Status(v) => Status(v),
}) })
} }
@ -79,6 +83,10 @@ impl Header {
let value = BytesStr::try_from(value)?; let value = BytesStr::try_from(value)?;
Ok(Header::Path(value)) Ok(Header::Path(value))
} }
b"protocol" => {
let value = Protocol::try_from(value)?;
Ok(Header::Protocol(value))
}
b"status" => { b"status" => {
let status = StatusCode::from_bytes(&value)?; let status = StatusCode::from_bytes(&value)?;
Ok(Header::Status(status)) Ok(Header::Status(status))
@ -104,6 +112,7 @@ impl Header {
Header::Method(ref v) => 32 + 7 + v.as_ref().len(), Header::Method(ref v) => 32 + 7 + v.as_ref().len(),
Header::Scheme(ref v) => 32 + 7 + v.len(), Header::Scheme(ref v) => 32 + 7 + v.len(),
Header::Path(ref v) => 32 + 5 + v.len(), Header::Path(ref v) => 32 + 5 + v.len(),
Header::Protocol(ref v) => 32 + 9 + v.as_str().len(),
Header::Status(_) => 32 + 7 + 3, Header::Status(_) => 32 + 7 + 3,
} }
} }
@ -116,6 +125,7 @@ impl Header {
Header::Method(..) => Name::Method, Header::Method(..) => Name::Method,
Header::Scheme(..) => Name::Scheme, Header::Scheme(..) => Name::Scheme,
Header::Path(..) => Name::Path, Header::Path(..) => Name::Path,
Header::Protocol(..) => Name::Protocol,
Header::Status(..) => Name::Status, Header::Status(..) => Name::Status,
} }
} }
@ -127,6 +137,7 @@ impl Header {
Header::Method(ref v) => v.as_ref().as_ref(), Header::Method(ref v) => v.as_ref().as_ref(),
Header::Scheme(ref v) => v.as_ref(), Header::Scheme(ref v) => v.as_ref(),
Header::Path(ref v) => v.as_ref(), Header::Path(ref v) => v.as_ref(),
Header::Protocol(ref v) => v.as_ref(),
Header::Status(ref v) => v.as_str().as_ref(), Header::Status(ref v) => v.as_str().as_ref(),
} }
} }
@ -156,6 +167,10 @@ impl Header {
Header::Path(ref b) => a == b, Header::Path(ref b) => a == b,
_ => false, _ => false,
}, },
Header::Protocol(ref a) => match *other {
Header::Protocol(ref b) => a == b,
_ => false,
},
Header::Status(ref a) => match *other { Header::Status(ref a) => match *other {
Header::Status(ref b) => a == b, Header::Status(ref b) => a == b,
_ => false, _ => false,
@ -205,6 +220,7 @@ impl From<Header> for Header<Option<HeaderName>> {
Header::Method(v) => Header::Method(v), Header::Method(v) => Header::Method(v),
Header::Scheme(v) => Header::Scheme(v), Header::Scheme(v) => Header::Scheme(v),
Header::Path(v) => Header::Path(v), Header::Path(v) => Header::Path(v),
Header::Protocol(v) => Header::Protocol(v),
Header::Status(v) => Header::Status(v), Header::Status(v) => Header::Status(v),
} }
} }
@ -221,6 +237,7 @@ impl<'a> Name<'a> {
Name::Method => Ok(Header::Method(Method::from_bytes(&*value)?)), Name::Method => Ok(Header::Method(Method::from_bytes(&*value)?)),
Name::Scheme => Ok(Header::Scheme(BytesStr::try_from(value)?)), Name::Scheme => Ok(Header::Scheme(BytesStr::try_from(value)?)),
Name::Path => Ok(Header::Path(BytesStr::try_from(value)?)), Name::Path => Ok(Header::Path(BytesStr::try_from(value)?)),
Name::Protocol => Ok(Header::Protocol(Protocol::try_from(value)?)),
Name::Status => { Name::Status => {
match StatusCode::from_bytes(&value) { match StatusCode::from_bytes(&value) {
Ok(status) => Ok(Header::Status(status)), Ok(status) => Ok(Header::Status(status)),
@ -238,6 +255,7 @@ impl<'a> Name<'a> {
Name::Method => b":method", Name::Method => b":method",
Name::Scheme => b":scheme", Name::Scheme => b":scheme",
Name::Path => b":path", Name::Path => b":path",
Name::Protocol => b":protocol",
Name::Status => b":status", Name::Status => b":status",
} }
} }
@ -246,8 +264,12 @@ impl<'a> Name<'a> {
// ===== impl BytesStr ===== // ===== impl BytesStr =====
impl BytesStr { impl BytesStr {
pub(crate) unsafe fn from_utf8_unchecked(bytes: Bytes) -> Self { pub(crate) const fn from_static(value: &'static str) -> Self {
BytesStr(bytes) BytesStr(Bytes::from_static(value.as_bytes()))
}
pub(crate) fn from(value: &str) -> Self {
BytesStr(Bytes::copy_from_slice(value.as_bytes()))
} }
#[doc(hidden)] #[doc(hidden)]

33
third_party/rust/h2/src/hpack/huffman/mod.rs поставляемый
Просмотреть файл

@ -1,7 +1,7 @@
mod table; mod table;
use self::table::{DECODE_TABLE, ENCODE_TABLE}; use self::table::{DECODE_TABLE, ENCODE_TABLE};
use crate::hpack::{DecoderError, EncoderError}; use crate::hpack::DecoderError;
use bytes::{BufMut, BytesMut}; use bytes::{BufMut, BytesMut};
@ -40,11 +40,9 @@ pub fn decode(src: &[u8], buf: &mut BytesMut) -> Result<BytesMut, DecoderError>
Ok(buf.split()) Ok(buf.split())
} }
// TODO: return error when there is not enough room to encode the value pub fn encode(src: &[u8], dst: &mut BytesMut) {
pub fn encode<B: BufMut>(src: &[u8], dst: &mut B) -> Result<(), EncoderError> {
let mut bits: u64 = 0; let mut bits: u64 = 0;
let mut bits_left = 40; let mut bits_left = 40;
let mut rem = dst.remaining_mut();
for &b in src { for &b in src {
let (nbits, code) = ENCODE_TABLE[b as usize]; let (nbits, code) = ENCODE_TABLE[b as usize];
@ -53,29 +51,18 @@ pub fn encode<B: BufMut>(src: &[u8], dst: &mut B) -> Result<(), EncoderError> {
bits_left -= nbits; bits_left -= nbits;
while bits_left <= 32 { while bits_left <= 32 {
if rem == 0 {
return Err(EncoderError::BufferOverflow);
}
dst.put_u8((bits >> 32) as u8); dst.put_u8((bits >> 32) as u8);
bits <<= 8; bits <<= 8;
bits_left += 8; bits_left += 8;
rem -= 1;
} }
} }
if bits_left != 40 { if bits_left != 40 {
if rem == 0 {
return Err(EncoderError::BufferOverflow);
}
// This writes the EOS token // This writes the EOS token
bits |= (1 << bits_left) - 1; bits |= (1 << bits_left) - 1;
dst.put_u8((bits >> 32) as u8); dst.put_u8((bits >> 32) as u8);
} }
Ok(())
} }
impl Decoder { impl Decoder {
@ -144,17 +131,17 @@ mod test {
#[test] #[test]
fn encode_single_byte() { fn encode_single_byte() {
let mut dst = Vec::with_capacity(1); let mut dst = BytesMut::with_capacity(1);
encode(b"o", &mut dst).unwrap(); encode(b"o", &mut dst);
assert_eq!(&dst[..], &[0b00111111]); assert_eq!(&dst[..], &[0b00111111]);
dst.clear(); dst.clear();
encode(b"0", &mut dst).unwrap(); encode(b"0", &mut dst);
assert_eq!(&dst[..], &[0x0 + 7]); assert_eq!(&dst[..], &[0x0 + 7]);
dst.clear(); dst.clear();
encode(b"A", &mut dst).unwrap(); encode(b"A", &mut dst);
assert_eq!(&dst[..], &[(0x21 << 2) + 3]); assert_eq!(&dst[..], &[(0x21 << 2) + 3]);
} }
@ -185,9 +172,9 @@ mod test {
]; ];
for s in DATA { for s in DATA {
let mut dst = Vec::with_capacity(s.len()); let mut dst = BytesMut::with_capacity(s.len());
encode(s.as_bytes(), &mut dst).unwrap(); encode(s.as_bytes(), &mut dst);
let decoded = decode(&dst).unwrap(); let decoded = decode(&dst).unwrap();
@ -201,9 +188,9 @@ mod test {
&[b"\0", b"\0\0\0", b"\0\x01\x02\x03\x04\x05", b"\xFF\xF8"]; &[b"\0", b"\0\0\0", b"\0\x01\x02\x03\x04\x05", b"\xFF\xF8"];
for s in DATA { for s in DATA {
let mut dst = Vec::with_capacity(s.len()); let mut dst = BytesMut::with_capacity(s.len());
encode(s, &mut dst).unwrap(); encode(s, &mut dst);
let decoded = decode(&dst).unwrap(); let decoded = decode(&dst).unwrap();

4
third_party/rust/h2/src/hpack/mod.rs поставляемый
Просмотреть файл

@ -1,12 +1,12 @@
mod decoder; mod decoder;
mod encoder; mod encoder;
pub(crate) mod header; pub(crate) mod header;
mod huffman; pub(crate) mod huffman;
mod table; mod table;
#[cfg(test)] #[cfg(test)]
mod test; mod test;
pub use self::decoder::{Decoder, DecoderError, NeedMore}; pub use self::decoder::{Decoder, DecoderError, NeedMore};
pub use self::encoder::{Encode, EncodeState, Encoder, EncoderError}; pub use self::encoder::Encoder;
pub use self::header::{BytesStr, Header}; pub use self::header::{BytesStr, Header};

3
third_party/rust/h2/src/hpack/table.rs поставляемый
Просмотреть файл

@ -597,7 +597,7 @@ impl Table {
} }
assert!(dist <= their_dist, assert!(dist <= their_dist,
"could not find entry; actual={}; desired={};" + "could not find entry; actual={}; desired={}" +
"probe={}, dist={}; their_dist={}; index={}; msg={}", "probe={}, dist={}; their_dist={}; index={}; msg={}",
actual, desired, probe, dist, their_dist, actual, desired, probe, dist, their_dist,
index.wrapping_sub(self.inserted), msg); index.wrapping_sub(self.inserted), msg);
@ -751,6 +751,7 @@ fn index_static(header: &Header) -> Option<(usize, bool)> {
"/index.html" => Some((5, true)), "/index.html" => Some((5, true)),
_ => Some((4, false)), _ => Some((4, false)),
}, },
Header::Protocol(..) => None,
Header::Status(ref v) => match u16::from(*v) { Header::Status(ref v) => match u16::from(*v) {
200 => Some((8, true)), 200 => Some((8, true)),
204 => Some((9, true)), 204 => Some((9, true)),

10
third_party/rust/h2/src/hpack/test/fixture.rs поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
use crate::hpack::{Decoder, Encoder, Header}; use crate::hpack::{Decoder, Encoder, Header};
use bytes::{buf::BufMutExt, BytesMut}; use bytes::BytesMut;
use hex::FromHex; use hex::FromHex;
use serde_json::Value; use serde_json::Value;
@ -107,11 +107,7 @@ fn test_story(story: Value) {
}) })
.collect(); .collect();
encoder.encode( encoder.encode(&mut input.clone().into_iter(), &mut buf);
None,
&mut input.clone().into_iter(),
&mut (&mut buf).limit(limit),
);
decoder decoder
.decode(&mut Cursor::new(&mut buf), |e| { .decode(&mut Cursor::new(&mut buf), |e| {
@ -138,6 +134,7 @@ fn key_str(e: &Header) -> &str {
Header::Method(..) => ":method", Header::Method(..) => ":method",
Header::Scheme(..) => ":scheme", Header::Scheme(..) => ":scheme",
Header::Path(..) => ":path", Header::Path(..) => ":path",
Header::Protocol(..) => ":protocol",
Header::Status(..) => ":status", Header::Status(..) => ":status",
} }
} }
@ -149,6 +146,7 @@ fn value_str(e: &Header) -> &str {
Header::Method(ref m) => m.as_str(), Header::Method(ref m) => m.as_str(),
Header::Scheme(ref v) => &**v, Header::Scheme(ref v) => &**v,
Header::Path(ref v) => &**v, Header::Path(ref v) => &**v,
Header::Protocol(ref v) => v.as_str(),
Header::Status(ref v) => v.as_str(), Header::Status(ref v) => v.as_str(),
} }
} }

266
third_party/rust/h2/src/hpack/test/fuzz.rs поставляемый
Просмотреть файл

@ -1,14 +1,15 @@
use crate::hpack::{Decoder, Encode, Encoder, Header}; use crate::hpack::{Decoder, Encoder, Header};
use http::header::{HeaderName, HeaderValue}; use http::header::{HeaderName, HeaderValue};
use bytes::{buf::BufMutExt, Bytes, BytesMut}; use bytes::BytesMut;
use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult}; use quickcheck::{Arbitrary, Gen, QuickCheck, TestResult};
use rand::{Rng, SeedableRng, StdRng}; use rand::distributions::Slice;
use rand::rngs::StdRng;
use rand::{thread_rng, Rng, SeedableRng};
use std::io::Cursor; use std::io::Cursor;
const MIN_CHUNK: usize = 16;
const MAX_CHUNK: usize = 2 * 1024; const MAX_CHUNK: usize = 2 * 1024;
#[test] #[test]
@ -36,17 +37,8 @@ fn hpack_fuzz_seeded() {
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
struct FuzzHpack { struct FuzzHpack {
// The magic seed that makes the test case reproducible
seed: [usize; 4],
// The set of headers to encode / decode // The set of headers to encode / decode
frames: Vec<HeaderFrame>, frames: Vec<HeaderFrame>,
// The list of chunk sizes to do it in
chunks: Vec<usize>,
// Number of times reduced
reduced: usize,
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -56,9 +48,9 @@ struct HeaderFrame {
} }
impl FuzzHpack { impl FuzzHpack {
fn new(seed: [usize; 4]) -> FuzzHpack { fn new(seed: [u8; 32]) -> FuzzHpack {
// Seed the RNG // Seed the RNG
let mut rng = StdRng::from_seed(&seed); let mut rng = StdRng::from_seed(seed);
// Generates a bunch of source headers // Generates a bunch of source headers
let mut source: Vec<Header<Option<HeaderName>>> = vec![]; let mut source: Vec<Header<Option<HeaderName>>> = vec![];
@ -68,12 +60,12 @@ impl FuzzHpack {
} }
// Actual test run headers // Actual test run headers
let num: usize = rng.gen_range(40, 500); let num: usize = rng.gen_range(40..500);
let mut frames: Vec<HeaderFrame> = vec![]; let mut frames: Vec<HeaderFrame> = vec![];
let mut added = 0; let mut added = 0;
let skew: i32 = rng.gen_range(1, 5); let skew: i32 = rng.gen_range(1..5);
// Rough number of headers to add // Rough number of headers to add
while added < num { while added < num {
@ -82,24 +74,24 @@ impl FuzzHpack {
headers: vec![], headers: vec![],
}; };
match rng.gen_range(0, 20) { match rng.gen_range(0..20) {
0 => { 0 => {
// Two resizes // Two resizes
let high = rng.gen_range(128, MAX_CHUNK * 2); let high = rng.gen_range(128..MAX_CHUNK * 2);
let low = rng.gen_range(0, high); let low = rng.gen_range(0..high);
frame.resizes.extend(&[low, high]); frame.resizes.extend(&[low, high]);
} }
1..=3 => { 1..=3 => {
frame.resizes.push(rng.gen_range(128, MAX_CHUNK * 2)); frame.resizes.push(rng.gen_range(128..MAX_CHUNK * 2));
} }
_ => {} _ => {}
} }
let mut is_name_required = true; let mut is_name_required = true;
for _ in 0..rng.gen_range(1, (num - added) + 1) { for _ in 0..rng.gen_range(1..(num - added) + 1) {
let x: f64 = rng.gen_range(0.0, 1.0); let x: f64 = rng.gen_range(0.0..1.0);
let x = x.powi(skew); let x = x.powi(skew);
let i = (x * source.len() as f64) as usize; let i = (x * source.len() as f64) as usize;
@ -128,23 +120,10 @@ impl FuzzHpack {
frames.push(frame); frames.push(frame);
} }
// Now, generate the buffer sizes used to encode FuzzHpack { frames }
let mut chunks = vec![];
for _ in 0..rng.gen_range(0, 100) {
chunks.push(rng.gen_range(MIN_CHUNK, MAX_CHUNK));
}
FuzzHpack {
seed: seed,
frames: frames,
chunks: chunks,
reduced: 0,
}
} }
fn run(self) { fn run(self) {
let mut chunks = self.chunks;
let frames = self.frames; let frames = self.frames;
let mut expect = vec![]; let mut expect = vec![];
@ -173,11 +152,7 @@ impl FuzzHpack {
} }
} }
let mut input = frame.headers.into_iter(); let mut buf = BytesMut::new();
let mut index = None;
let mut max_chunk = chunks.pop().unwrap_or(MAX_CHUNK);
let mut buf = BytesMut::with_capacity(max_chunk);
if let Some(max) = frame.resizes.iter().max() { if let Some(max) = frame.resizes.iter().max() {
decoder.queue_size_update(*max); decoder.queue_size_update(*max);
@ -188,25 +163,7 @@ impl FuzzHpack {
encoder.update_max_size(*resize); encoder.update_max_size(*resize);
} }
loop { encoder.encode(frame.headers, &mut buf);
match encoder.encode(index.take(), &mut input, &mut (&mut buf).limit(max_chunk)) {
Encode::Full => break,
Encode::Partial(i) => {
index = Some(i);
// Decode the chunk!
decoder
.decode(&mut Cursor::new(&mut buf), |h| {
let e = expect.remove(0);
assert_eq!(h, e);
})
.expect("partial decode");
max_chunk = chunks.pop().unwrap_or(MAX_CHUNK);
buf = BytesMut::with_capacity(max_chunk);
}
}
}
// Decode the chunk! // Decode the chunk!
decoder decoder
@ -222,31 +179,31 @@ impl FuzzHpack {
} }
impl Arbitrary for FuzzHpack { impl Arbitrary for FuzzHpack {
fn arbitrary<G: Gen>(g: &mut G) -> Self { fn arbitrary(_: &mut Gen) -> Self {
FuzzHpack::new(quickcheck::Rng::gen(g)) FuzzHpack::new(thread_rng().gen())
} }
} }
fn gen_header(g: &mut StdRng) -> Header<Option<HeaderName>> { fn gen_header(g: &mut StdRng) -> Header<Option<HeaderName>> {
use http::{Method, StatusCode}; use http::{Method, StatusCode};
if g.gen_weighted_bool(10) { if g.gen_ratio(1, 10) {
match g.next_u32() % 5 { match g.gen_range(0u32..5) {
0 => { 0 => {
let value = gen_string(g, 4, 20); let value = gen_string(g, 4, 20);
Header::Authority(to_shared(value)) Header::Authority(to_shared(value))
} }
1 => { 1 => {
let method = match g.next_u32() % 6 { let method = match g.gen_range(0u32..6) {
0 => Method::GET, 0 => Method::GET,
1 => Method::POST, 1 => Method::POST,
2 => Method::PUT, 2 => Method::PUT,
3 => Method::PATCH, 3 => Method::PATCH,
4 => Method::DELETE, 4 => Method::DELETE,
5 => { 5 => {
let n: usize = g.gen_range(3, 7); let n: usize = g.gen_range(3..7);
let bytes: Vec<u8> = (0..n) let bytes: Vec<u8> = (0..n)
.map(|_| g.choose(b"ABCDEFGHIJKLMNOPQRSTUVWXYZ").unwrap().clone()) .map(|_| *g.sample(Slice::new(b"ABCDEFGHIJKLMNOPQRSTUVWXYZ").unwrap()))
.collect(); .collect();
Method::from_bytes(&bytes).unwrap() Method::from_bytes(&bytes).unwrap()
@ -257,7 +214,7 @@ fn gen_header(g: &mut StdRng) -> Header<Option<HeaderName>> {
Header::Method(method) Header::Method(method)
} }
2 => { 2 => {
let value = match g.next_u32() % 2 { let value = match g.gen_range(0u32..2) {
0 => "http", 0 => "http",
1 => "https", 1 => "https",
_ => unreachable!(), _ => unreachable!(),
@ -266,7 +223,7 @@ fn gen_header(g: &mut StdRng) -> Header<Option<HeaderName>> {
Header::Scheme(to_shared(value.to_string())) Header::Scheme(to_shared(value.to_string()))
} }
3 => { 3 => {
let value = match g.next_u32() % 100 { let value = match g.gen_range(0u32..100) {
0 => "/".to_string(), 0 => "/".to_string(),
1 => "/index.html".to_string(), 1 => "/index.html".to_string(),
_ => gen_string(g, 2, 20), _ => gen_string(g, 2, 20),
@ -282,14 +239,14 @@ fn gen_header(g: &mut StdRng) -> Header<Option<HeaderName>> {
_ => unreachable!(), _ => unreachable!(),
} }
} else { } else {
let name = if g.gen_weighted_bool(10) { let name = if g.gen_ratio(1, 10) {
None None
} else { } else {
Some(gen_header_name(g)) Some(gen_header_name(g))
}; };
let mut value = gen_header_value(g); let mut value = gen_header_value(g);
if g.gen_weighted_bool(30) { if g.gen_ratio(1, 30) {
value.set_sensitive(true); value.set_sensitive(true);
} }
@ -300,84 +257,86 @@ fn gen_header(g: &mut StdRng) -> Header<Option<HeaderName>> {
fn gen_header_name(g: &mut StdRng) -> HeaderName { fn gen_header_name(g: &mut StdRng) -> HeaderName {
use http::header; use http::header;
if g.gen_weighted_bool(2) { if g.gen_ratio(1, 2) {
g.choose(&[ g.sample(
header::ACCEPT, Slice::new(&[
header::ACCEPT_CHARSET, header::ACCEPT,
header::ACCEPT_ENCODING, header::ACCEPT_CHARSET,
header::ACCEPT_LANGUAGE, header::ACCEPT_ENCODING,
header::ACCEPT_RANGES, header::ACCEPT_LANGUAGE,
header::ACCESS_CONTROL_ALLOW_CREDENTIALS, header::ACCEPT_RANGES,
header::ACCESS_CONTROL_ALLOW_HEADERS, header::ACCESS_CONTROL_ALLOW_CREDENTIALS,
header::ACCESS_CONTROL_ALLOW_METHODS, header::ACCESS_CONTROL_ALLOW_HEADERS,
header::ACCESS_CONTROL_ALLOW_ORIGIN, header::ACCESS_CONTROL_ALLOW_METHODS,
header::ACCESS_CONTROL_EXPOSE_HEADERS, header::ACCESS_CONTROL_ALLOW_ORIGIN,
header::ACCESS_CONTROL_MAX_AGE, header::ACCESS_CONTROL_EXPOSE_HEADERS,
header::ACCESS_CONTROL_REQUEST_HEADERS, header::ACCESS_CONTROL_MAX_AGE,
header::ACCESS_CONTROL_REQUEST_METHOD, header::ACCESS_CONTROL_REQUEST_HEADERS,
header::AGE, header::ACCESS_CONTROL_REQUEST_METHOD,
header::ALLOW, header::AGE,
header::ALT_SVC, header::ALLOW,
header::AUTHORIZATION, header::ALT_SVC,
header::CACHE_CONTROL, header::AUTHORIZATION,
header::CONNECTION, header::CACHE_CONTROL,
header::CONTENT_DISPOSITION, header::CONNECTION,
header::CONTENT_ENCODING, header::CONTENT_DISPOSITION,
header::CONTENT_LANGUAGE, header::CONTENT_ENCODING,
header::CONTENT_LENGTH, header::CONTENT_LANGUAGE,
header::CONTENT_LOCATION, header::CONTENT_LENGTH,
header::CONTENT_RANGE, header::CONTENT_LOCATION,
header::CONTENT_SECURITY_POLICY, header::CONTENT_RANGE,
header::CONTENT_SECURITY_POLICY_REPORT_ONLY, header::CONTENT_SECURITY_POLICY,
header::CONTENT_TYPE, header::CONTENT_SECURITY_POLICY_REPORT_ONLY,
header::COOKIE, header::CONTENT_TYPE,
header::DNT, header::COOKIE,
header::DATE, header::DNT,
header::ETAG, header::DATE,
header::EXPECT, header::ETAG,
header::EXPIRES, header::EXPECT,
header::FORWARDED, header::EXPIRES,
header::FROM, header::FORWARDED,
header::HOST, header::FROM,
header::IF_MATCH, header::HOST,
header::IF_MODIFIED_SINCE, header::IF_MATCH,
header::IF_NONE_MATCH, header::IF_MODIFIED_SINCE,
header::IF_RANGE, header::IF_NONE_MATCH,
header::IF_UNMODIFIED_SINCE, header::IF_RANGE,
header::LAST_MODIFIED, header::IF_UNMODIFIED_SINCE,
header::LINK, header::LAST_MODIFIED,
header::LOCATION, header::LINK,
header::MAX_FORWARDS, header::LOCATION,
header::ORIGIN, header::MAX_FORWARDS,
header::PRAGMA, header::ORIGIN,
header::PROXY_AUTHENTICATE, header::PRAGMA,
header::PROXY_AUTHORIZATION, header::PROXY_AUTHENTICATE,
header::PUBLIC_KEY_PINS, header::PROXY_AUTHORIZATION,
header::PUBLIC_KEY_PINS_REPORT_ONLY, header::PUBLIC_KEY_PINS,
header::RANGE, header::PUBLIC_KEY_PINS_REPORT_ONLY,
header::REFERER, header::RANGE,
header::REFERRER_POLICY, header::REFERER,
header::REFRESH, header::REFERRER_POLICY,
header::RETRY_AFTER, header::REFRESH,
header::SERVER, header::RETRY_AFTER,
header::SET_COOKIE, header::SERVER,
header::STRICT_TRANSPORT_SECURITY, header::SET_COOKIE,
header::TE, header::STRICT_TRANSPORT_SECURITY,
header::TRAILER, header::TE,
header::TRANSFER_ENCODING, header::TRAILER,
header::USER_AGENT, header::TRANSFER_ENCODING,
header::UPGRADE, header::USER_AGENT,
header::UPGRADE_INSECURE_REQUESTS, header::UPGRADE,
header::VARY, header::UPGRADE_INSECURE_REQUESTS,
header::VIA, header::VARY,
header::WARNING, header::VIA,
header::WWW_AUTHENTICATE, header::WARNING,
header::X_CONTENT_TYPE_OPTIONS, header::WWW_AUTHENTICATE,
header::X_DNS_PREFETCH_CONTROL, header::X_CONTENT_TYPE_OPTIONS,
header::X_FRAME_OPTIONS, header::X_DNS_PREFETCH_CONTROL,
header::X_XSS_PROTECTION, header::X_FRAME_OPTIONS,
]) header::X_XSS_PROTECTION,
.unwrap() ])
.unwrap(),
)
.clone() .clone()
} else { } else {
let value = gen_string(g, 1, 25); let value = gen_string(g, 1, 25);
@ -394,9 +353,7 @@ fn gen_string(g: &mut StdRng, min: usize, max: usize) -> String {
let bytes: Vec<_> = (min..max) let bytes: Vec<_> = (min..max)
.map(|_| { .map(|_| {
// Chars to pick from // Chars to pick from
g.choose(b"ABCDEFGHIJKLMNOPQRSTUVabcdefghilpqrstuvwxyz----") *g.sample(Slice::new(b"ABCDEFGHIJKLMNOPQRSTUVabcdefghilpqrstuvwxyz----").unwrap())
.unwrap()
.clone()
}) })
.collect(); .collect();
@ -404,6 +361,5 @@ fn gen_string(g: &mut StdRng, min: usize, max: usize) -> String {
} }
fn to_shared(src: String) -> crate::hpack::BytesStr { fn to_shared(src: String) -> crate::hpack::BytesStr {
let b: Bytes = src.into(); crate::hpack::BytesStr::from(src.as_str())
unsafe { crate::hpack::BytesStr::from_utf8_unchecked(b) }
} }

41
third_party/rust/h2/src/lib.rs поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
//! An asynchronous, HTTP/2.0 server and client implementation. //! An asynchronous, HTTP/2 server and client implementation.
//! //!
//! This library implements the [HTTP/2.0] specification. The implementation is //! This library implements the [HTTP/2] specification. The implementation is
//! asynchronous, using [futures] as the basis for the API. The implementation //! asynchronous, using [futures] as the basis for the API. The implementation
//! is also decoupled from TCP or TLS details. The user must handle ALPN and //! is also decoupled from TCP or TLS details. The user must handle ALPN and
//! HTTP/1.1 upgrades themselves. //! HTTP/1.1 upgrades themselves.
@ -11,7 +11,7 @@
//! //!
//! ```toml //! ```toml
//! [dependencies] //! [dependencies]
//! h2 = "0.2" //! h2 = "0.3"
//! ``` //! ```
//! //!
//! # Layout //! # Layout
@ -24,19 +24,19 @@
//! # Handshake //! # Handshake
//! //!
//! Both the client and the server require a connection to already be in a state //! Both the client and the server require a connection to already be in a state
//! ready to start the HTTP/2.0 handshake. This library does not provide //! ready to start the HTTP/2 handshake. This library does not provide
//! facilities to do this. //! facilities to do this.
//! //!
//! There are three ways to reach an appropriate state to start the HTTP/2.0 //! There are three ways to reach an appropriate state to start the HTTP/2
//! handshake. //! handshake.
//! //!
//! * Opening an HTTP/1.1 connection and performing an [upgrade]. //! * Opening an HTTP/1.1 connection and performing an [upgrade].
//! * Opening a connection with TLS and use ALPN to negotiate the protocol. //! * Opening a connection with TLS and use ALPN to negotiate the protocol.
//! * Open a connection with prior knowledge, i.e. both the client and the //! * Open a connection with prior knowledge, i.e. both the client and the
//! server assume that the connection is immediately ready to start the //! server assume that the connection is immediately ready to start the
//! HTTP/2.0 handshake once opened. //! HTTP/2 handshake once opened.
//! //!
//! Once the connection is ready to start the HTTP/2.0 handshake, it can be //! Once the connection is ready to start the HTTP/2 handshake, it can be
//! passed to [`server::handshake`] or [`client::handshake`]. At this point, the //! passed to [`server::handshake`] or [`client::handshake`]. At this point, the
//! library will start the handshake process, which consists of: //! library will start the handshake process, which consists of:
//! //!
@ -48,10 +48,10 @@
//! //!
//! # Flow control //! # Flow control
//! //!
//! [Flow control] is a fundamental feature of HTTP/2.0. The `h2` library //! [Flow control] is a fundamental feature of HTTP/2. The `h2` library
//! exposes flow control to the user. //! exposes flow control to the user.
//! //!
//! An HTTP/2.0 client or server may not send unlimited data to the peer. When a //! An HTTP/2 client or server may not send unlimited data to the peer. When a
//! stream is initiated, both the client and the server are provided with an //! stream is initiated, both the client and the server are provided with an
//! initial window size for that stream. A window size is the number of bytes //! initial window size for that stream. A window size is the number of bytes
//! the endpoint can send to the peer. At any point in time, the peer may //! the endpoint can send to the peer. At any point in time, the peer may
@ -66,7 +66,7 @@
//! Managing flow control for outbound data is done through [`SendStream`]. See //! Managing flow control for outbound data is done through [`SendStream`]. See
//! the struct level documentation for those two types for more details. //! the struct level documentation for those two types for more details.
//! //!
//! [HTTP/2.0]: https://http2.github.io/ //! [HTTP/2]: https://http2.github.io/
//! [futures]: https://docs.rs/futures/ //! [futures]: https://docs.rs/futures/
//! [`client`]: client/index.html //! [`client`]: client/index.html
//! [`server`]: server/index.html //! [`server`]: server/index.html
@ -78,16 +78,16 @@
//! [`server::handshake`]: server/fn.handshake.html //! [`server::handshake`]: server/fn.handshake.html
//! [`client::handshake`]: client/fn.handshake.html //! [`client::handshake`]: client/fn.handshake.html
#![doc(html_root_url = "https://docs.rs/h2/0.2.5")] #![doc(html_root_url = "https://docs.rs/h2/0.3.13")]
#![deny(missing_debug_implementations, missing_docs)] #![deny(missing_debug_implementations, missing_docs)]
#![cfg_attr(test, deny(warnings))] #![cfg_attr(test, deny(warnings))]
macro_rules! proto_err { macro_rules! proto_err {
(conn: $($msg:tt)+) => { (conn: $($msg:tt)+) => {
log::debug!("connection error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) tracing::debug!("connection error PROTOCOL_ERROR -- {};", format_args!($($msg)+))
}; };
(stream: $($msg:tt)+) => { (stream: $($msg:tt)+) => {
log::debug!("stream error PROTOCOL_ERROR -- {};", format_args!($($msg)+)) tracing::debug!("stream error PROTOCOL_ERROR -- {};", format_args!($($msg)+))
}; };
} }
@ -104,8 +104,14 @@ macro_rules! ready {
mod codec; mod codec;
mod error; mod error;
mod hpack; mod hpack;
#[cfg(not(feature = "unstable"))]
mod proto; mod proto;
#[cfg(feature = "unstable")]
#[allow(missing_docs)]
pub mod proto;
#[cfg(not(feature = "unstable"))] #[cfg(not(feature = "unstable"))]
mod frame; mod frame;
@ -114,19 +120,24 @@ mod frame;
pub mod frame; pub mod frame;
pub mod client; pub mod client;
pub mod ext;
pub mod server; pub mod server;
mod share; mod share;
#[cfg(fuzzing)]
#[cfg_attr(feature = "unstable", allow(missing_docs))]
pub mod fuzz_bridge;
pub use crate::error::{Error, Reason}; pub use crate::error::{Error, Reason};
pub use crate::share::{FlowControl, Ping, PingPong, Pong, RecvStream, SendStream, StreamId}; pub use crate::share::{FlowControl, Ping, PingPong, Pong, RecvStream, SendStream, StreamId};
#[cfg(feature = "unstable")] #[cfg(feature = "unstable")]
pub use codec::{Codec, RecvError, SendError, UserError}; pub use codec::{Codec, SendError, UserError};
use std::task::Poll; use std::task::Poll;
// TODO: Get rid of this trait once https://github.com/rust-lang/rust/pull/63512 // TODO: Get rid of this trait once https://github.com/rust-lang/rust/pull/63512
// is stablized. // is stabilized.
trait PollExt<T, E> { trait PollExt<T, E> {
/// Changes the success value of this `Poll` with the closure provided. /// Changes the success value of this `Poll` with the closure provided.
fn map_ok_<U, F>(self, f: F) -> Poll<Option<Result<U, E>>> fn map_ok_<U, F>(self, f: F) -> Poll<Option<Result<U, E>>>

549
third_party/rust/h2/src/proto/connection.rs поставляемый
Просмотреть файл

@ -1,6 +1,6 @@
use crate::codec::{RecvError, UserError}; use crate::codec::UserError;
use crate::frame::{Reason, StreamId}; use crate::frame::{Reason, StreamId};
use crate::{client, frame, proto, server}; use crate::{client, frame, server};
use crate::frame::DEFAULT_INITIAL_WINDOW_SIZE; use crate::frame::DEFAULT_INITIAL_WINDOW_SIZE;
use crate::proto::*; use crate::proto::*;
@ -17,6 +17,19 @@ use tokio::io::{AsyncRead, AsyncWrite};
/// An H2 connection /// An H2 connection
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct Connection<T, P, B: Buf = Bytes> pub(crate) struct Connection<T, P, B: Buf = Bytes>
where
P: Peer,
{
/// Read / write frame values
codec: Codec<T, Prioritized<B>>,
inner: ConnectionInner<P, B>,
}
// Extracted part of `Connection` which does not depend on `T`. Reduces the amount of duplicated
// method instantiations.
#[derive(Debug)]
struct ConnectionInner<P, B: Buf = Bytes>
where where
P: Peer, P: Peer,
{ {
@ -27,10 +40,7 @@ where
/// ///
/// This exists separately from State in order to support /// This exists separately from State in order to support
/// graceful shutdown. /// graceful shutdown.
error: Option<Reason>, error: Option<frame::GoAway>,
/// Read / write frame values
codec: Codec<T, Prioritized<B>>,
/// Pending GOAWAY frames to write. /// Pending GOAWAY frames to write.
go_away: GoAway, go_away: GoAway,
@ -44,14 +54,30 @@ where
/// Stream state handler /// Stream state handler
streams: Streams<B, P>, streams: Streams<B, P>,
/// A `tracing` span tracking the lifetime of the connection.
span: tracing::Span,
/// Client or server /// Client or server
_phantom: PhantomData<P>, _phantom: PhantomData<P>,
} }
struct DynConnection<'a, B: Buf = Bytes> {
state: &'a mut State,
go_away: &'a mut GoAway,
streams: DynStreams<'a, B>,
error: &'a mut Option<frame::GoAway>,
ping_pong: &'a mut PingPong,
}
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub(crate) struct Config { pub(crate) struct Config {
pub next_stream_id: StreamId, pub next_stream_id: StreamId,
pub initial_max_send_streams: usize, pub initial_max_send_streams: usize,
pub max_send_buffer_size: usize,
pub reset_stream_duration: Duration, pub reset_stream_duration: Duration,
pub reset_stream_max: usize, pub reset_stream_max: usize,
pub settings: frame::Settings, pub settings: frame::Settings,
@ -63,10 +89,10 @@ enum State {
Open, Open,
/// The codec must be flushed /// The codec must be flushed
Closing(Reason), Closing(Reason, Initiator),
/// In a closed state /// In a closed state
Closed(Reason), Closed(Reason, Initiator),
} }
impl<T, P, B> Connection<T, P, B> impl<T, P, B> Connection<T, P, B>
@ -76,58 +102,92 @@ where
B: Buf, B: Buf,
{ {
pub fn new(codec: Codec<T, Prioritized<B>>, config: Config) -> Connection<T, P, B> { pub fn new(codec: Codec<T, Prioritized<B>>, config: Config) -> Connection<T, P, B> {
let streams = Streams::new(streams::Config { fn streams_config(config: &Config) -> streams::Config {
local_init_window_sz: config streams::Config {
.settings local_init_window_sz: config
.initial_window_size() .settings
.unwrap_or(DEFAULT_INITIAL_WINDOW_SIZE), .initial_window_size()
initial_max_send_streams: config.initial_max_send_streams, .unwrap_or(DEFAULT_INITIAL_WINDOW_SIZE),
local_next_stream_id: config.next_stream_id, initial_max_send_streams: config.initial_max_send_streams,
local_push_enabled: config.settings.is_push_enabled(), local_max_buffer_size: config.max_send_buffer_size,
local_reset_duration: config.reset_stream_duration, local_next_stream_id: config.next_stream_id,
local_reset_max: config.reset_stream_max, local_push_enabled: config.settings.is_push_enabled().unwrap_or(true),
remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE, extended_connect_protocol_enabled: config
remote_max_initiated: config .settings
.settings .is_extended_connect_protocol_enabled()
.max_concurrent_streams() .unwrap_or(false),
.map(|max| max as usize), local_reset_duration: config.reset_stream_duration,
}); local_reset_max: config.reset_stream_max,
remote_init_window_sz: DEFAULT_INITIAL_WINDOW_SIZE,
remote_max_initiated: config
.settings
.max_concurrent_streams()
.map(|max| max as usize),
}
}
let streams = Streams::new(streams_config(&config));
Connection { Connection {
state: State::Open,
error: None,
codec, codec,
go_away: GoAway::new(), inner: ConnectionInner {
ping_pong: PingPong::new(), state: State::Open,
settings: Settings::new(config.settings), error: None,
streams, go_away: GoAway::new(),
_phantom: PhantomData, ping_pong: PingPong::new(),
settings: Settings::new(config.settings),
streams,
span: tracing::debug_span!("Connection", peer = %P::NAME),
_phantom: PhantomData,
},
} }
} }
/// connection flow control /// connection flow control
pub(crate) fn set_target_window_size(&mut self, size: WindowSize) { pub(crate) fn set_target_window_size(&mut self, size: WindowSize) {
self.streams.set_target_connection_window_size(size); self.inner.streams.set_target_connection_window_size(size);
} }
/// Send a new SETTINGS frame with an updated initial window size. /// Send a new SETTINGS frame with an updated initial window size.
pub(crate) fn set_initial_window_size(&mut self, size: WindowSize) -> Result<(), UserError> { pub(crate) fn set_initial_window_size(&mut self, size: WindowSize) -> Result<(), UserError> {
let mut settings = frame::Settings::default(); let mut settings = frame::Settings::default();
settings.set_initial_window_size(Some(size)); settings.set_initial_window_size(Some(size));
self.settings.send_settings(settings) self.inner.settings.send_settings(settings)
}
/// Send a new SETTINGS frame with extended CONNECT protocol enabled.
pub(crate) fn set_enable_connect_protocol(&mut self) -> Result<(), UserError> {
let mut settings = frame::Settings::default();
settings.set_enable_connect_protocol(Some(1));
self.inner.settings.send_settings(settings)
}
/// Returns the maximum number of concurrent streams that may be initiated
/// by this peer.
pub(crate) fn max_send_streams(&self) -> usize {
self.inner.streams.max_send_streams()
}
/// Returns the maximum number of concurrent streams that may be initiated
/// by the remote peer.
pub(crate) fn max_recv_streams(&self) -> usize {
self.inner.streams.max_recv_streams()
} }
/// Returns `Ready` when the connection is ready to receive a frame. /// Returns `Ready` when the connection is ready to receive a frame.
/// ///
/// Returns `RecvError` as this may raise errors that are caused by delayed /// Returns `Error` as this may raise errors that are caused by delayed
/// processing of received frames. /// processing of received frames.
fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), RecvError>> { fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), Error>> {
let _e = self.inner.span.enter();
let span = tracing::trace_span!("poll_ready");
let _e = span.enter();
// The order of these calls don't really matter too much // The order of these calls don't really matter too much
ready!(self.ping_pong.send_pending_pong(cx, &mut self.codec))?; ready!(self.inner.ping_pong.send_pending_pong(cx, &mut self.codec))?;
ready!(self.ping_pong.send_pending_ping(cx, &mut self.codec))?; ready!(self.inner.ping_pong.send_pending_ping(cx, &mut self.codec))?;
ready!(self ready!(self
.inner
.settings .settings
.poll_send(cx, &mut self.codec, &mut self.streams))?; .poll_send(cx, &mut self.codec, &mut self.inner.streams))?;
ready!(self.streams.send_pending_refusal(cx, &mut self.codec))?; ready!(self.inner.streams.send_pending_refusal(cx, &mut self.codec))?;
Poll::Ready(Ok(())) Poll::Ready(Ok(()))
} }
@ -137,50 +197,31 @@ where
/// This will return `Some(reason)` if the connection should be closed /// This will return `Some(reason)` if the connection should be closed
/// afterwards. If this is a graceful shutdown, this returns `None`. /// afterwards. If this is a graceful shutdown, this returns `None`.
fn poll_go_away(&mut self, cx: &mut Context) -> Poll<Option<io::Result<Reason>>> { fn poll_go_away(&mut self, cx: &mut Context) -> Poll<Option<io::Result<Reason>>> {
self.go_away.send_pending_go_away(cx, &mut self.codec) self.inner.go_away.send_pending_go_away(cx, &mut self.codec)
}
fn go_away(&mut self, id: StreamId, e: Reason) {
let frame = frame::GoAway::new(id, e);
self.streams.send_go_away(id);
self.go_away.go_away(frame);
}
fn go_away_now(&mut self, e: Reason) {
let last_processed_id = self.streams.last_processed_id();
let frame = frame::GoAway::new(last_processed_id, e);
self.go_away.go_away_now(frame);
} }
pub fn go_away_from_user(&mut self, e: Reason) { pub fn go_away_from_user(&mut self, e: Reason) {
let last_processed_id = self.streams.last_processed_id(); self.inner.as_dyn().go_away_from_user(e)
let frame = frame::GoAway::new(last_processed_id, e);
self.go_away.go_away_from_user(frame);
// Notify all streams of reason we're abruptly closing.
self.streams.recv_err(&proto::Error::Proto(e));
} }
fn take_error(&mut self, ours: Reason) -> Poll<Result<(), proto::Error>> { fn take_error(&mut self, ours: Reason, initiator: Initiator) -> Result<(), Error> {
let reason = if let Some(theirs) = self.error.take() { let (debug_data, theirs) = self
match (ours, theirs) { .inner
// If either side reported an error, return that .error
// to the user. .take()
(Reason::NO_ERROR, err) | (err, Reason::NO_ERROR) => err, .as_ref()
// If both sides reported an error, give their .map_or((Bytes::new(), Reason::NO_ERROR), |frame| {
// error back to th user. We assume our error (frame.debug_data().clone(), frame.reason())
// was a consequence of their error, and less });
// important.
(_, theirs) => theirs,
}
} else {
ours
};
if reason == Reason::NO_ERROR { match (ours, theirs) {
Poll::Ready(Ok(())) (Reason::NO_ERROR, Reason::NO_ERROR) => return Ok(()),
} else { (ours, Reason::NO_ERROR) => Err(Error::GoAway(Bytes::new(), ours, initiator)),
Poll::Ready(Err(proto::Error::Proto(reason))) // If both sides reported an error, give their
// error back to th user. We assume our error
// was a consequence of their error, and less
// important.
(_, theirs) => Err(Error::remote_go_away(debug_data, theirs)),
} }
} }
@ -189,102 +230,71 @@ where
pub fn maybe_close_connection_if_no_streams(&mut self) { pub fn maybe_close_connection_if_no_streams(&mut self) {
// If we poll() and realize that there are no streams or references // If we poll() and realize that there are no streams or references
// then we can close the connection by transitioning to GOAWAY // then we can close the connection by transitioning to GOAWAY
if !self.streams.has_streams_or_other_references() { if !self.inner.streams.has_streams_or_other_references() {
self.go_away_now(Reason::NO_ERROR); self.inner.as_dyn().go_away_now(Reason::NO_ERROR);
} }
} }
pub(crate) fn take_user_pings(&mut self) -> Option<UserPings> { pub(crate) fn take_user_pings(&mut self) -> Option<UserPings> {
self.ping_pong.take_user_pings() self.inner.ping_pong.take_user_pings()
} }
/// Advances the internal state of the connection. /// Advances the internal state of the connection.
pub fn poll(&mut self, cx: &mut Context) -> Poll<Result<(), proto::Error>> { pub fn poll(&mut self, cx: &mut Context) -> Poll<Result<(), Error>> {
use crate::codec::RecvError::*; // XXX(eliza): cloning the span is unfortunately necessary here in
// order to placate the borrow checker — `self` is mutably borrowed by
// `poll2`, which means that we can't borrow `self.span` to enter it.
// The clone is just an atomic ref bump.
let span = self.inner.span.clone();
let _e = span.enter();
let span = tracing::trace_span!("poll");
let _e = span.enter();
loop { loop {
tracing::trace!(connection.state = ?self.inner.state);
// TODO: probably clean up this glob of code // TODO: probably clean up this glob of code
match self.state { match self.inner.state {
// When open, continue to poll a frame // When open, continue to poll a frame
State::Open => { State::Open => {
match self.poll2(cx) { let result = match self.poll2(cx) {
// The connection has shutdown normally Poll::Ready(result) => result,
Poll::Ready(Ok(())) => self.state = State::Closing(Reason::NO_ERROR),
// The connection is not ready to make progress // The connection is not ready to make progress
Poll::Pending => { Poll::Pending => {
// Ensure all window updates have been sent. // Ensure all window updates have been sent.
// //
// This will also handle flushing `self.codec` // This will also handle flushing `self.codec`
ready!(self.streams.poll_complete(cx, &mut self.codec))?; ready!(self.inner.streams.poll_complete(cx, &mut self.codec))?;
if (self.error.is_some() || self.go_away.should_close_on_idle()) if (self.inner.error.is_some()
&& !self.streams.has_streams() || self.inner.go_away.should_close_on_idle())
&& !self.inner.streams.has_streams()
{ {
self.go_away_now(Reason::NO_ERROR); self.inner.as_dyn().go_away_now(Reason::NO_ERROR);
continue; continue;
} }
return Poll::Pending; return Poll::Pending;
} }
// Attempting to read a frame resulted in a connection level };
// error. This is handled by setting a GOAWAY frame followed by
// terminating the connection.
Poll::Ready(Err(Connection(e))) => {
log::debug!("Connection::poll; connection error={:?}", e);
// We may have already sent a GOAWAY for this error, self.inner.as_dyn().handle_poll2_result(result)?
// if so, don't send another, just flush and close up.
if let Some(reason) = self.go_away.going_away_reason() {
if reason == e {
log::trace!(" -> already going away");
self.state = State::Closing(e);
continue;
}
}
// Reset all active streams
self.streams.recv_err(&e.into());
self.go_away_now(e);
}
// Attempting to read a frame resulted in a stream level error.
// This is handled by resetting the frame then trying to read
// another frame.
Poll::Ready(Err(Stream { id, reason })) => {
log::trace!("stream error; id={:?}; reason={:?}", id, reason);
self.streams.send_reset(id, reason);
}
// Attempting to read a frame resulted in an I/O error. All
// active streams must be reset.
//
// TODO: Are I/O errors recoverable?
Poll::Ready(Err(Io(e))) => {
log::debug!("Connection::poll; IO error={:?}", e);
let e = e.into();
// Reset all active streams
self.streams.recv_err(&e);
// Return the error
return Poll::Ready(Err(e));
}
}
} }
State::Closing(reason) => { State::Closing(reason, initiator) => {
log::trace!("connection closing after flush"); tracing::trace!("connection closing after flush");
// Flush/shutdown the codec // Flush/shutdown the codec
ready!(self.codec.shutdown(cx))?; ready!(self.codec.shutdown(cx))?;
// Transition the state to error // Transition the state to error
self.state = State::Closed(reason); self.inner.state = State::Closed(reason, initiator);
}
State::Closed(reason, initiator) => {
return Poll::Ready(self.take_error(reason, initiator));
} }
State::Closed(reason) => return self.take_error(reason),
} }
} }
} }
fn poll2(&mut self, cx: &mut Context) -> Poll<Result<(), RecvError>> { fn poll2(&mut self, cx: &mut Context) -> Poll<Result<(), Error>> {
use crate::frame::Frame::*;
// This happens outside of the loop to prevent needing to do a clock // This happens outside of the loop to prevent needing to do a clock
// check and then comparison of the queue possibly multiple times a // check and then comparison of the queue possibly multiple times a
// second (and thus, the clock wouldn't have changed enough to matter). // second (and thus, the clock wouldn't have changed enough to matter).
@ -297,13 +307,13 @@ where
// - poll_go_away may buffer a graceful shutdown GOAWAY frame // - poll_go_away may buffer a graceful shutdown GOAWAY frame
// - If it has, we've also added a PING to be sent in poll_ready // - If it has, we've also added a PING to be sent in poll_ready
if let Some(reason) = ready!(self.poll_go_away(cx)?) { if let Some(reason) = ready!(self.poll_go_away(cx)?) {
if self.go_away.should_close_now() { if self.inner.go_away.should_close_now() {
if self.go_away.is_user_initiated() { if self.inner.go_away.is_user_initiated() {
// A user initiated abrupt shutdown shouldn't return // A user initiated abrupt shutdown shouldn't return
// the same error back to the user. // the same error back to the user.
return Poll::Ready(Ok(())); return Poll::Ready(Ok(()));
} else { } else {
return Poll::Ready(Err(RecvError::Connection(reason))); return Poll::Ready(Err(Error::library_go_away(reason)));
} }
} }
// Only NO_ERROR should be waiting for idle // Only NO_ERROR should be waiting for idle
@ -315,61 +325,20 @@ where
} }
ready!(self.poll_ready(cx))?; ready!(self.poll_ready(cx))?;
match ready!(Pin::new(&mut self.codec).poll_next(cx)?) { match self
Some(Headers(frame)) => { .inner
log::trace!("recv HEADERS; frame={:?}", frame); .as_dyn()
self.streams.recv_headers(frame)?; .recv_frame(ready!(Pin::new(&mut self.codec).poll_next(cx)?))?
{
ReceivedFrame::Settings(frame) => {
self.inner.settings.recv_settings(
frame,
&mut self.codec,
&mut self.inner.streams,
)?;
} }
Some(Data(frame)) => { ReceivedFrame::Continue => (),
log::trace!("recv DATA; frame={:?}", frame); ReceivedFrame::Done => {
self.streams.recv_data(frame)?;
}
Some(Reset(frame)) => {
log::trace!("recv RST_STREAM; frame={:?}", frame);
self.streams.recv_reset(frame)?;
}
Some(PushPromise(frame)) => {
log::trace!("recv PUSH_PROMISE; frame={:?}", frame);
self.streams.recv_push_promise(frame)?;
}
Some(Settings(frame)) => {
log::trace!("recv SETTINGS; frame={:?}", frame);
self.settings
.recv_settings(frame, &mut self.codec, &mut self.streams)?;
}
Some(GoAway(frame)) => {
log::trace!("recv GOAWAY; frame={:?}", frame);
// This should prevent starting new streams,
// but should allow continuing to process current streams
// until they are all EOS. Once they are, State should
// transition to GoAway.
self.streams.recv_go_away(&frame)?;
self.error = Some(frame.reason());
}
Some(Ping(frame)) => {
log::trace!("recv PING; frame={:?}", frame);
let status = self.ping_pong.recv_ping(frame);
if status.is_shutdown() {
assert!(
self.go_away.is_going_away(),
"received unexpected shutdown ping"
);
let last_processed_id = self.streams.last_processed_id();
self.go_away(last_processed_id, Reason::NO_ERROR);
}
}
Some(WindowUpdate(frame)) => {
log::trace!("recv WINDOW_UPDATE; frame={:?}", frame);
self.streams.recv_window_update(frame)?;
}
Some(Priority(frame)) => {
log::trace!("recv PRIORITY; frame={:?}", frame);
// TODO: handle
}
None => {
log::trace!("codec closed");
self.streams.recv_eof(false).expect("mutex poisoned");
return Poll::Ready(Ok(())); return Poll::Ready(Ok(()));
} }
} }
@ -377,17 +346,193 @@ where
} }
fn clear_expired_reset_streams(&mut self) { fn clear_expired_reset_streams(&mut self) {
self.streams.clear_expired_reset_streams(); self.inner.streams.clear_expired_reset_streams();
} }
} }
impl<P, B> ConnectionInner<P, B>
where
P: Peer,
B: Buf,
{
fn as_dyn(&mut self) -> DynConnection<'_, B> {
let ConnectionInner {
state,
go_away,
streams,
error,
ping_pong,
..
} = self;
let streams = streams.as_dyn();
DynConnection {
state,
go_away,
streams,
error,
ping_pong,
}
}
}
impl<B> DynConnection<'_, B>
where
B: Buf,
{
fn go_away(&mut self, id: StreamId, e: Reason) {
let frame = frame::GoAway::new(id, e);
self.streams.send_go_away(id);
self.go_away.go_away(frame);
}
fn go_away_now(&mut self, e: Reason) {
let last_processed_id = self.streams.last_processed_id();
let frame = frame::GoAway::new(last_processed_id, e);
self.go_away.go_away_now(frame);
}
fn go_away_from_user(&mut self, e: Reason) {
let last_processed_id = self.streams.last_processed_id();
let frame = frame::GoAway::new(last_processed_id, e);
self.go_away.go_away_from_user(frame);
// Notify all streams of reason we're abruptly closing.
self.streams.handle_error(Error::user_go_away(e));
}
fn handle_poll2_result(&mut self, result: Result<(), Error>) -> Result<(), Error> {
match result {
// The connection has shutdown normally
Ok(()) => {
*self.state = State::Closing(Reason::NO_ERROR, Initiator::Library);
Ok(())
}
// Attempting to read a frame resulted in a connection level
// error. This is handled by setting a GOAWAY frame followed by
// terminating the connection.
Err(Error::GoAway(debug_data, reason, initiator)) => {
let e = Error::GoAway(debug_data, reason, initiator);
tracing::debug!(error = ?e, "Connection::poll; connection error");
// We may have already sent a GOAWAY for this error,
// if so, don't send another, just flush and close up.
if self
.go_away
.going_away()
.map_or(false, |frame| frame.reason() == reason)
{
tracing::trace!(" -> already going away");
*self.state = State::Closing(reason, initiator);
return Ok(());
}
// Reset all active streams
self.streams.handle_error(e);
self.go_away_now(reason);
Ok(())
}
// Attempting to read a frame resulted in a stream level error.
// This is handled by resetting the frame then trying to read
// another frame.
Err(Error::Reset(id, reason, initiator)) => {
debug_assert_eq!(initiator, Initiator::Library);
tracing::trace!(?id, ?reason, "stream error");
self.streams.send_reset(id, reason);
Ok(())
}
// Attempting to read a frame resulted in an I/O error. All
// active streams must be reset.
//
// TODO: Are I/O errors recoverable?
Err(Error::Io(e, inner)) => {
tracing::debug!(error = ?e, "Connection::poll; IO error");
let e = Error::Io(e, inner);
// Reset all active streams
self.streams.handle_error(e.clone());
// Return the error
Err(e)
}
}
}
fn recv_frame(&mut self, frame: Option<Frame>) -> Result<ReceivedFrame, Error> {
use crate::frame::Frame::*;
match frame {
Some(Headers(frame)) => {
tracing::trace!(?frame, "recv HEADERS");
self.streams.recv_headers(frame)?;
}
Some(Data(frame)) => {
tracing::trace!(?frame, "recv DATA");
self.streams.recv_data(frame)?;
}
Some(Reset(frame)) => {
tracing::trace!(?frame, "recv RST_STREAM");
self.streams.recv_reset(frame)?;
}
Some(PushPromise(frame)) => {
tracing::trace!(?frame, "recv PUSH_PROMISE");
self.streams.recv_push_promise(frame)?;
}
Some(Settings(frame)) => {
tracing::trace!(?frame, "recv SETTINGS");
return Ok(ReceivedFrame::Settings(frame));
}
Some(GoAway(frame)) => {
tracing::trace!(?frame, "recv GOAWAY");
// This should prevent starting new streams,
// but should allow continuing to process current streams
// until they are all EOS. Once they are, State should
// transition to GoAway.
self.streams.recv_go_away(&frame)?;
*self.error = Some(frame);
}
Some(Ping(frame)) => {
tracing::trace!(?frame, "recv PING");
let status = self.ping_pong.recv_ping(frame);
if status.is_shutdown() {
assert!(
self.go_away.is_going_away(),
"received unexpected shutdown ping"
);
let last_processed_id = self.streams.last_processed_id();
self.go_away(last_processed_id, Reason::NO_ERROR);
}
}
Some(WindowUpdate(frame)) => {
tracing::trace!(?frame, "recv WINDOW_UPDATE");
self.streams.recv_window_update(frame)?;
}
Some(Priority(frame)) => {
tracing::trace!(?frame, "recv PRIORITY");
// TODO: handle
}
None => {
tracing::trace!("codec closed");
self.streams.recv_eof(false).expect("mutex poisoned");
return Ok(ReceivedFrame::Done);
}
}
Ok(ReceivedFrame::Continue)
}
}
enum ReceivedFrame {
Settings(frame::Settings),
Continue,
Done,
}
impl<T, B> Connection<T, client::Peer, B> impl<T, B> Connection<T, client::Peer, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite,
B: Buf, B: Buf,
{ {
pub(crate) fn streams(&self) -> &Streams<B, client::Peer> { pub(crate) fn streams(&self) -> &Streams<B, client::Peer> {
&self.streams &self.inner.streams
} }
} }
@ -397,12 +542,12 @@ where
B: Buf, B: Buf,
{ {
pub fn next_incoming(&mut self) -> Option<StreamRef<B>> { pub fn next_incoming(&mut self) -> Option<StreamRef<B>> {
self.streams.next_incoming() self.inner.streams.next_incoming()
} }
// Graceful shutdown only makes sense for server peers. // Graceful shutdown only makes sense for server peers.
pub fn go_away_gracefully(&mut self) { pub fn go_away_gracefully(&mut self) {
if self.go_away.is_going_away() { if self.inner.go_away.is_going_away() {
// No reason to start a new one. // No reason to start a new one.
return; return;
} }
@ -418,11 +563,11 @@ where
// > send another GOAWAY frame with an updated last stream identifier. // > send another GOAWAY frame with an updated last stream identifier.
// > This ensures that a connection can be cleanly shut down without // > This ensures that a connection can be cleanly shut down without
// > losing requests. // > losing requests.
self.go_away(StreamId::MAX, Reason::NO_ERROR); self.inner.as_dyn().go_away(StreamId::MAX, Reason::NO_ERROR);
// We take the advice of waiting 1 RTT literally, and wait // We take the advice of waiting 1 RTT literally, and wait
// for a pong before proceeding. // for a pong before proceeding.
self.ping_pong.ping_shutdown(); self.inner.ping_pong.ping_shutdown();
} }
} }
@ -433,6 +578,6 @@ where
{ {
fn drop(&mut self) { fn drop(&mut self) {
// Ignore errors as this indicates that the mutex is poisoned. // Ignore errors as this indicates that the mutex is poisoned.
let _ = self.streams.recv_eof(true); let _ = self.inner.streams.recv_eof(true);
} }
} }

92
third_party/rust/h2/src/proto/error.rs поставляемый
Просмотреть файл

@ -1,53 +1,87 @@
use crate::codec::{RecvError, SendError}; use crate::codec::SendError;
use crate::frame::Reason; use crate::frame::{Reason, StreamId};
use bytes::Bytes;
use std::fmt;
use std::io; use std::io;
/// Either an H2 reason or an I/O error /// Either an H2 reason or an I/O error
#[derive(Debug)] #[derive(Clone, Debug)]
pub enum Error { pub enum Error {
Proto(Reason), Reset(StreamId, Reason, Initiator),
Io(io::Error), GoAway(Bytes, Reason, Initiator),
Io(io::ErrorKind, Option<String>),
}
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum Initiator {
User,
Library,
Remote,
} }
impl Error { impl Error {
/// Clone the error for internal purposes. pub(crate) fn is_local(&self) -> bool {
///
/// `io::Error` is not `Clone`, so we only copy the `ErrorKind`.
pub(super) fn shallow_clone(&self) -> Error {
match *self { match *self {
Error::Proto(reason) => Error::Proto(reason), Self::Reset(_, _, initiator) | Self::GoAway(_, _, initiator) => initiator.is_local(),
Error::Io(ref io) => Error::Io(io::Error::from(io.kind())), Self::Io(..) => true,
}
}
pub(crate) fn user_go_away(reason: Reason) -> Self {
Self::GoAway(Bytes::new(), reason, Initiator::User)
}
pub(crate) fn library_reset(stream_id: StreamId, reason: Reason) -> Self {
Self::Reset(stream_id, reason, Initiator::Library)
}
pub(crate) fn library_go_away(reason: Reason) -> Self {
Self::GoAway(Bytes::new(), reason, Initiator::Library)
}
pub(crate) fn remote_reset(stream_id: StreamId, reason: Reason) -> Self {
Self::Reset(stream_id, reason, Initiator::Remote)
}
pub(crate) fn remote_go_away(debug_data: Bytes, reason: Reason) -> Self {
Self::GoAway(debug_data, reason, Initiator::Remote)
}
}
impl Initiator {
fn is_local(&self) -> bool {
match *self {
Self::User | Self::Library => true,
Self::Remote => false,
} }
} }
} }
impl From<Reason> for Error { impl fmt::Display for Error {
fn from(src: Reason) -> Self { fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
Error::Proto(src) match *self {
Self::Reset(_, reason, _) | Self::GoAway(_, reason, _) => reason.fmt(fmt),
Self::Io(_, Some(ref inner)) => inner.fmt(fmt),
Self::Io(kind, None) => io::Error::from(kind).fmt(fmt),
}
}
}
impl From<io::ErrorKind> for Error {
fn from(src: io::ErrorKind) -> Self {
Error::Io(src.into(), None)
} }
} }
impl From<io::Error> for Error { impl From<io::Error> for Error {
fn from(src: io::Error) -> Self { fn from(src: io::Error) -> Self {
Error::Io(src) Error::Io(src.kind(), src.get_ref().map(|inner| inner.to_string()))
}
}
impl From<Error> for RecvError {
fn from(src: Error) -> RecvError {
match src {
Error::Proto(reason) => RecvError::Connection(reason),
Error::Io(e) => RecvError::Io(e),
}
} }
} }
impl From<Error> for SendError { impl From<Error> for SendError {
fn from(src: Error) -> SendError { fn from(src: Error) -> Self {
match src { Self::Connection(src)
Error::Proto(reason) => SendError::Connection(reason),
Error::Io(e) => SendError::Io(e),
}
} }
} }

16
third_party/rust/h2/src/proto/go_away.rs поставляемый
Просмотреть файл

@ -31,7 +31,7 @@ pub(super) struct GoAway {
/// well, and we wouldn't want to save that here to accidentally dump in logs, /// well, and we wouldn't want to save that here to accidentally dump in logs,
/// or waste struct space.) /// or waste struct space.)
#[derive(Debug)] #[derive(Debug)]
struct GoingAway { pub(crate) struct GoingAway {
/// Stores the highest stream ID of a GOAWAY that has been sent. /// Stores the highest stream ID of a GOAWAY that has been sent.
/// ///
/// It's illegal to send a subsequent GOAWAY with a higher ID. /// It's illegal to send a subsequent GOAWAY with a higher ID.
@ -98,9 +98,9 @@ impl GoAway {
self.is_user_initiated self.is_user_initiated
} }
/// Return the last Reason we've sent. /// Returns the going away info, if any.
pub fn going_away_reason(&self) -> Option<Reason> { pub fn going_away(&self) -> Option<&GoingAway> {
self.going_away.as_ref().map(|g| g.reason) self.going_away.as_ref()
} }
/// Returns if the connection should close now, or wait until idle. /// Returns if the connection should close now, or wait until idle.
@ -141,7 +141,7 @@ impl GoAway {
return Poll::Ready(Some(Ok(reason))); return Poll::Ready(Some(Ok(reason)));
} else if self.should_close_now() { } else if self.should_close_now() {
return match self.going_away_reason() { return match self.going_away().map(|going_away| going_away.reason) {
Some(reason) => Poll::Ready(Some(Ok(reason))), Some(reason) => Poll::Ready(Some(Ok(reason))),
None => Poll::Ready(None), None => Poll::Ready(None),
}; };
@ -150,3 +150,9 @@ impl GoAway {
Poll::Ready(None) Poll::Ready(None)
} }
} }
impl GoingAway {
pub(crate) fn reason(&self) -> Reason {
self.reason
}
}

5
third_party/rust/h2/src/proto/mod.rs поставляемый
Просмотреть файл

@ -7,10 +7,10 @@ mod settings;
mod streams; mod streams;
pub(crate) use self::connection::{Config, Connection}; pub(crate) use self::connection::{Config, Connection};
pub(crate) use self::error::Error; pub use self::error::{Error, Initiator};
pub(crate) use self::peer::{Dyn as DynPeer, Peer}; pub(crate) use self::peer::{Dyn as DynPeer, Peer};
pub(crate) use self::ping_pong::UserPings; pub(crate) use self::ping_pong::UserPings;
pub(crate) use self::streams::{OpaqueStreamRef, StreamRef, Streams}; pub(crate) use self::streams::{DynStreams, OpaqueStreamRef, StreamRef, Streams};
pub(crate) use self::streams::{Open, PollReset, Prioritized}; pub(crate) use self::streams::{Open, PollReset, Prioritized};
use crate::codec::Codec; use crate::codec::Codec;
@ -33,3 +33,4 @@ pub type WindowSize = u32;
pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1; pub const MAX_WINDOW_SIZE: WindowSize = (1 << 31) - 1;
pub const DEFAULT_RESET_STREAM_MAX: usize = 10; pub const DEFAULT_RESET_STREAM_MAX: usize = 10;
pub const DEFAULT_RESET_STREAM_SECS: u64 = 30; pub const DEFAULT_RESET_STREAM_SECS: u64 = 30;
pub const DEFAULT_MAX_SEND_BUFFER_SIZE: usize = 1024 * 400;

14
third_party/rust/h2/src/proto/peer.rs поставляемый
Просмотреть файл

@ -1,7 +1,6 @@
use crate::codec::RecvError;
use crate::error::Reason; use crate::error::Reason;
use crate::frame::{Pseudo, StreamId}; use crate::frame::{Pseudo, StreamId};
use crate::proto::Open; use crate::proto::{Error, Open};
use http::{HeaderMap, Request, Response}; use http::{HeaderMap, Request, Response};
@ -11,6 +10,7 @@ use std::fmt;
pub(crate) trait Peer { pub(crate) trait Peer {
/// Message type polled from the transport /// Message type polled from the transport
type Poll: fmt::Debug; type Poll: fmt::Debug;
const NAME: &'static str;
fn r#dyn() -> Dyn; fn r#dyn() -> Dyn;
@ -20,7 +20,7 @@ pub(crate) trait Peer {
pseudo: Pseudo, pseudo: Pseudo,
fields: HeaderMap, fields: HeaderMap,
stream_id: StreamId, stream_id: StreamId,
) -> Result<Self::Poll, RecvError>; ) -> Result<Self::Poll, Error>;
fn is_local_init(id: StreamId) -> bool { fn is_local_init(id: StreamId) -> bool {
assert!(!id.is_zero()); assert!(!id.is_zero());
@ -60,7 +60,7 @@ impl Dyn {
pseudo: Pseudo, pseudo: Pseudo,
fields: HeaderMap, fields: HeaderMap,
stream_id: StreamId, stream_id: StreamId,
) -> Result<PollMessage, RecvError> { ) -> Result<PollMessage, Error> {
if self.is_server() { if self.is_server() {
crate::server::Peer::convert_poll_message(pseudo, fields, stream_id) crate::server::Peer::convert_poll_message(pseudo, fields, stream_id)
.map(PollMessage::Server) .map(PollMessage::Server)
@ -71,12 +71,12 @@ impl Dyn {
} }
/// Returns true if the remote peer can initiate a stream with the given ID. /// Returns true if the remote peer can initiate a stream with the given ID.
pub fn ensure_can_open(&self, id: StreamId, mode: Open) -> Result<(), RecvError> { pub fn ensure_can_open(&self, id: StreamId, mode: Open) -> Result<(), Error> {
if self.is_server() { if self.is_server() {
// Ensure that the ID is a valid client initiated ID // Ensure that the ID is a valid client initiated ID
if mode.is_push_promise() || !id.is_client_initiated() { if mode.is_push_promise() || !id.is_client_initiated() {
proto_err!(conn: "cannot open stream {:?} - not client initiated", id); proto_err!(conn: "cannot open stream {:?} - not client initiated", id);
return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
} }
Ok(()) Ok(())
@ -84,7 +84,7 @@ impl Dyn {
// Ensure that the ID is a valid server initiated ID // Ensure that the ID is a valid server initiated ID
if !mode.is_push_promise() || !id.is_server_initiated() { if !mode.is_push_promise() || !id.is_server_initiated() {
proto_err!(conn: "cannot open stream {:?} - not server initiated", id); proto_err!(conn: "cannot open stream {:?} - not server initiated", id);
return Err(RecvError::Connection(Reason::PROTOCOL_ERROR)); return Err(Error::library_go_away(Reason::PROTOCOL_ERROR));
} }
Ok(()) Ok(())

51
third_party/rust/h2/src/proto/ping_pong.rs поставляемый
Просмотреть файл

@ -107,7 +107,7 @@ impl PingPong {
&Ping::SHUTDOWN, &Ping::SHUTDOWN,
"pending_ping should be for shutdown", "pending_ping should be for shutdown",
); );
log::trace!("recv PING SHUTDOWN ack"); tracing::trace!("recv PING SHUTDOWN ack");
return ReceivedPing::Shutdown; return ReceivedPing::Shutdown;
} }
@ -117,7 +117,7 @@ impl PingPong {
if let Some(ref users) = self.user_pings { if let Some(ref users) = self.user_pings {
if ping.payload() == &Ping::USER && users.receive_pong() { if ping.payload() == &Ping::USER && users.receive_pong() {
log::trace!("recv PING USER ack"); tracing::trace!("recv PING USER ack");
return ReceivedPing::Unknown; return ReceivedPing::Unknown;
} }
} }
@ -125,7 +125,7 @@ impl PingPong {
// else we were acked a ping we didn't send? // else we were acked a ping we didn't send?
// The spec doesn't require us to do anything about this, // The spec doesn't require us to do anything about this,
// so for resiliency, just ignore it for now. // so for resiliency, just ignore it for now.
log::warn!("recv PING ack that we never sent: {:?}", ping); tracing::warn!("recv PING ack that we never sent: {:?}", ping);
ReceivedPing::Unknown ReceivedPing::Unknown
} else { } else {
// Save the ping's payload to be sent as an acknowledgement. // Save the ping's payload to be sent as an acknowledgement.
@ -211,11 +211,16 @@ impl ReceivedPing {
impl UserPings { impl UserPings {
pub(crate) fn send_ping(&self) -> Result<(), Option<proto::Error>> { pub(crate) fn send_ping(&self) -> Result<(), Option<proto::Error>> {
let prev = self.0.state.compare_and_swap( let prev = self
USER_STATE_EMPTY, // current .0
USER_STATE_PENDING_PING, // new .state
Ordering::AcqRel, .compare_exchange(
); USER_STATE_EMPTY, // current
USER_STATE_PENDING_PING, // new
Ordering::AcqRel,
Ordering::Acquire,
)
.unwrap_or_else(|v| v);
match prev { match prev {
USER_STATE_EMPTY => { USER_STATE_EMPTY => {
@ -234,11 +239,16 @@ impl UserPings {
// Must register before checking state, in case state were to change // Must register before checking state, in case state were to change
// before we could register, and then the ping would just be lost. // before we could register, and then the ping would just be lost.
self.0.pong_task.register(cx.waker()); self.0.pong_task.register(cx.waker());
let prev = self.0.state.compare_and_swap( let prev = self
USER_STATE_RECEIVED_PONG, // current .0
USER_STATE_EMPTY, // new .state
Ordering::AcqRel, .compare_exchange(
); USER_STATE_RECEIVED_PONG, // current
USER_STATE_EMPTY, // new
Ordering::AcqRel,
Ordering::Acquire,
)
.unwrap_or_else(|v| v);
match prev { match prev {
USER_STATE_RECEIVED_PONG => Poll::Ready(Ok(())), USER_STATE_RECEIVED_PONG => Poll::Ready(Ok(())),
@ -252,11 +262,16 @@ impl UserPings {
impl UserPingsRx { impl UserPingsRx {
fn receive_pong(&self) -> bool { fn receive_pong(&self) -> bool {
let prev = self.0.state.compare_and_swap( let prev = self
USER_STATE_PENDING_PONG, // current .0
USER_STATE_RECEIVED_PONG, // new .state
Ordering::AcqRel, .compare_exchange(
); USER_STATE_PENDING_PONG, // current
USER_STATE_RECEIVED_PONG, // new
Ordering::AcqRel,
Ordering::Acquire,
)
.unwrap_or_else(|v| v);
if prev == USER_STATE_PENDING_PONG { if prev == USER_STATE_PENDING_PONG {
self.0.pong_task.wake(); self.0.pong_task.wake();

20
third_party/rust/h2/src/proto/settings.rs поставляемый
Просмотреть файл

@ -1,4 +1,4 @@
use crate::codec::{RecvError, UserError}; use crate::codec::UserError;
use crate::error::Reason; use crate::error::Reason;
use crate::frame; use crate::frame;
use crate::proto::*; use crate::proto::*;
@ -40,7 +40,7 @@ impl Settings {
frame: frame::Settings, frame: frame::Settings,
codec: &mut Codec<T, B>, codec: &mut Codec<T, B>,
streams: &mut Streams<C, P>, streams: &mut Streams<C, P>,
) -> Result<(), RecvError> ) -> Result<(), Error>
where where
T: AsyncWrite + Unpin, T: AsyncWrite + Unpin,
B: Buf, B: Buf,
@ -50,7 +50,7 @@ impl Settings {
if frame.is_ack() { if frame.is_ack() {
match &self.local { match &self.local {
Local::WaitingAck(local) => { Local::WaitingAck(local) => {
log::debug!("received settings ACK; applying {:?}", local); tracing::debug!("received settings ACK; applying {:?}", local);
if let Some(max) = local.max_frame_size() { if let Some(max) = local.max_frame_size() {
codec.set_max_recv_frame_size(max as usize); codec.set_max_recv_frame_size(max as usize);
@ -68,7 +68,7 @@ impl Settings {
// We haven't sent any SETTINGS frames to be ACKed, so // We haven't sent any SETTINGS frames to be ACKed, so
// this is very bizarre! Remote is either buggy or malicious. // this is very bizarre! Remote is either buggy or malicious.
proto_err!(conn: "received unexpected settings ack"); proto_err!(conn: "received unexpected settings ack");
Err(RecvError::Connection(Reason::PROTOCOL_ERROR)) Err(Error::library_go_away(Reason::PROTOCOL_ERROR))
} }
} }
} else { } else {
@ -85,7 +85,7 @@ impl Settings {
match &self.local { match &self.local {
Local::ToSend(..) | Local::WaitingAck(..) => Err(UserError::SendSettingsWhilePending), Local::ToSend(..) | Local::WaitingAck(..) => Err(UserError::SendSettingsWhilePending),
Local::Synced => { Local::Synced => {
log::trace!("queue to send local settings: {:?}", frame); tracing::trace!("queue to send local settings: {:?}", frame);
self.local = Local::ToSend(frame); self.local = Local::ToSend(frame);
Ok(()) Ok(())
} }
@ -97,7 +97,7 @@ impl Settings {
cx: &mut Context, cx: &mut Context,
dst: &mut Codec<T, B>, dst: &mut Codec<T, B>,
streams: &mut Streams<C, P>, streams: &mut Streams<C, P>,
) -> Poll<Result<(), RecvError>> ) -> Poll<Result<(), Error>>
where where
T: AsyncWrite + Unpin, T: AsyncWrite + Unpin,
B: Buf, B: Buf,
@ -115,7 +115,9 @@ impl Settings {
// Buffer the settings frame // Buffer the settings frame
dst.buffer(frame.into()).expect("invalid settings frame"); dst.buffer(frame.into()).expect("invalid settings frame");
log::trace!("ACK sent; applying settings"); tracing::trace!("ACK sent; applying settings");
streams.apply_remote_settings(settings)?;
if let Some(val) = settings.header_table_size() { if let Some(val) = settings.header_table_size() {
dst.set_send_header_table_size(val as usize); dst.set_send_header_table_size(val as usize);
@ -124,8 +126,6 @@ impl Settings {
if let Some(val) = settings.max_frame_size() { if let Some(val) = settings.max_frame_size() {
dst.set_max_send_frame_size(val as usize); dst.set_max_send_frame_size(val as usize);
} }
streams.apply_remote_settings(settings)?;
} }
self.remote = None; self.remote = None;
@ -139,7 +139,7 @@ impl Settings {
// Buffer the settings frame // Buffer the settings frame
dst.buffer(settings.clone().into()) dst.buffer(settings.clone().into())
.expect("invalid settings frame"); .expect("invalid settings frame");
log::trace!("local settings sent; waiting for ack: {:?}", settings); tracing::trace!("local settings sent; waiting for ack: {:?}", settings);
self.local = Local::WaitingAck(settings.clone()); self.local = Local::WaitingAck(settings.clone());
} }

Просмотреть файл

@ -92,13 +92,4 @@ impl Deque {
None => None, None => None,
} }
} }
/*
pub fn peek_front<'a, T>(&self, buf: &'a Buffer<T>) -> Option<&'a T> {
match self.indices {
Some(idxs) => Some(&buf.slab[idxs.head].value),
None => None,
}
}
*/
} }

Просмотреть файл

@ -133,7 +133,7 @@ impl Counts {
// TODO: move this to macro? // TODO: move this to macro?
pub fn transition_after(&mut self, mut stream: store::Ptr, is_reset_counted: bool) { pub fn transition_after(&mut self, mut stream: store::Ptr, is_reset_counted: bool) {
log::trace!( tracing::trace!(
"transition_after; stream={:?}; state={:?}; is_closed={:?}; \ "transition_after; stream={:?}; state={:?}; is_closed={:?}; \
pending_send_empty={:?}; buffered_send_data={}; \ pending_send_empty={:?}; buffered_send_data={}; \
num_recv={}; num_send={}", num_recv={}; num_send={}",
@ -155,7 +155,7 @@ impl Counts {
} }
if stream.is_counted { if stream.is_counted {
log::trace!("dec_num_streams; stream={:?}", stream.id); tracing::trace!("dec_num_streams; stream={:?}", stream.id);
// Decrement the number of active streams. // Decrement the number of active streams.
self.dec_num_streams(&mut stream); self.dec_num_streams(&mut stream);
} }
@ -167,6 +167,18 @@ impl Counts {
} }
} }
/// Returns the maximum number of streams that can be initiated by this
/// peer.
pub(crate) fn max_send_streams(&self) -> usize {
self.max_send_streams
}
/// Returns the maximum number of streams that can be initiated by the
/// remote peer.
pub(crate) fn max_recv_streams(&self) -> usize {
self.max_recv_streams
}
fn dec_num_streams(&mut self, stream: &mut store::Ptr) { fn dec_num_streams(&mut self, stream: &mut store::Ptr) {
assert!(stream.is_counted); assert!(stream.is_counted);

Просмотреть файл

@ -120,7 +120,7 @@ impl FlowControl {
return Err(Reason::FLOW_CONTROL_ERROR); return Err(Reason::FLOW_CONTROL_ERROR);
} }
log::trace!( tracing::trace!(
"inc_window; sz={}; old={}; new={}", "inc_window; sz={}; old={}; new={}",
sz, sz,
self.window_size, self.window_size,
@ -136,7 +136,7 @@ impl FlowControl {
/// This is called after receiving a SETTINGS frame with a lower /// This is called after receiving a SETTINGS frame with a lower
/// INITIAL_WINDOW_SIZE value. /// INITIAL_WINDOW_SIZE value.
pub fn dec_send_window(&mut self, sz: WindowSize) { pub fn dec_send_window(&mut self, sz: WindowSize) {
log::trace!( tracing::trace!(
"dec_window; sz={}; window={}, available={}", "dec_window; sz={}; window={}, available={}",
sz, sz,
self.window_size, self.window_size,
@ -151,7 +151,7 @@ impl FlowControl {
/// This is called after receiving a SETTINGS ACK frame with a lower /// This is called after receiving a SETTINGS ACK frame with a lower
/// INITIAL_WINDOW_SIZE value. /// INITIAL_WINDOW_SIZE value.
pub fn dec_recv_window(&mut self, sz: WindowSize) { pub fn dec_recv_window(&mut self, sz: WindowSize) {
log::trace!( tracing::trace!(
"dec_recv_window; sz={}; window={}, available={}", "dec_recv_window; sz={}; window={}, available={}",
sz, sz,
self.window_size, self.window_size,
@ -165,7 +165,7 @@ impl FlowControl {
/// Decrements the window reflecting data has actually been sent. The caller /// Decrements the window reflecting data has actually been sent. The caller
/// must ensure that the window has capacity. /// must ensure that the window has capacity.
pub fn send_data(&mut self, sz: WindowSize) { pub fn send_data(&mut self, sz: WindowSize) {
log::trace!( tracing::trace!(
"send_data; sz={}; window={}; available={}", "send_data; sz={}; window={}; available={}",
sz, sz,
self.window_size, self.window_size,
@ -173,7 +173,7 @@ impl FlowControl {
); );
// Ensure that the argument is correct // Ensure that the argument is correct
assert!(sz <= self.window_size); assert!(self.window_size >= sz as usize);
// Update values // Update values
self.window_size -= sz; self.window_size -= sz;
@ -206,38 +206,22 @@ impl Window {
} }
} }
impl PartialEq<WindowSize> for Window { impl PartialEq<usize> for Window {
fn eq(&self, other: &WindowSize) -> bool { fn eq(&self, other: &usize) -> bool {
if self.0 < 0 { if self.0 < 0 {
false false
} else { } else {
(self.0 as WindowSize).eq(other) (self.0 as usize).eq(other)
} }
} }
} }
impl PartialEq<Window> for WindowSize { impl PartialOrd<usize> for Window {
fn eq(&self, other: &Window) -> bool { fn partial_cmp(&self, other: &usize) -> Option<::std::cmp::Ordering> {
other.eq(self)
}
}
impl PartialOrd<WindowSize> for Window {
fn partial_cmp(&self, other: &WindowSize) -> Option<::std::cmp::Ordering> {
if self.0 < 0 { if self.0 < 0 {
Some(::std::cmp::Ordering::Less) Some(::std::cmp::Ordering::Less)
} else { } else {
(self.0 as WindowSize).partial_cmp(other) (self.0 as usize).partial_cmp(other)
}
}
}
impl PartialOrd<Window> for WindowSize {
fn partial_cmp(&self, other: &Window) -> Option<::std::cmp::Ordering> {
if other.0 < 0 {
Some(::std::cmp::Ordering::Greater)
} else {
self.partial_cmp(&(other.0 as WindowSize))
} }
} }
} }

Просмотреть файл

@ -12,7 +12,7 @@ mod streams;
pub(crate) use self::prioritize::Prioritized; pub(crate) use self::prioritize::Prioritized;
pub(crate) use self::recv::Open; pub(crate) use self::recv::Open;
pub(crate) use self::send::PollReset; pub(crate) use self::send::PollReset;
pub(crate) use self::streams::{OpaqueStreamRef, StreamRef, Streams}; pub(crate) use self::streams::{DynStreams, OpaqueStreamRef, StreamRef, Streams};
use self::buffer::Buffer; use self::buffer::Buffer;
use self::counts::Counts; use self::counts::Counts;
@ -41,12 +41,18 @@ pub struct Config {
/// MAX_CONCURRENT_STREAMS specified in the frame. /// MAX_CONCURRENT_STREAMS specified in the frame.
pub initial_max_send_streams: usize, pub initial_max_send_streams: usize,
/// Max amount of DATA bytes to buffer per stream.
pub local_max_buffer_size: usize,
/// The stream ID to start the next local stream with /// The stream ID to start the next local stream with
pub local_next_stream_id: StreamId, pub local_next_stream_id: StreamId,
/// If the local peer is willing to receive push promises /// If the local peer is willing to receive push promises
pub local_push_enabled: bool, pub local_push_enabled: bool,
/// If extended connect protocol is enabled.
pub extended_connect_protocol_enabled: bool,
/// How long a locally reset stream should ignore frames /// How long a locally reset stream should ignore frames
pub local_reset_duration: Duration, pub local_reset_duration: Duration,

Просмотреть файл

@ -6,7 +6,7 @@ use crate::frame::{Reason, StreamId};
use crate::codec::UserError; use crate::codec::UserError;
use crate::codec::UserError::*; use crate::codec::UserError::*;
use bytes::buf::ext::{BufExt, Take}; use bytes::buf::{Buf, Take};
use std::io; use std::io;
use std::task::{Context, Poll, Waker}; use std::task::{Context, Poll, Waker};
use std::{cmp, fmt, mem}; use std::{cmp, fmt, mem};
@ -18,7 +18,7 @@ use std::{cmp, fmt, mem};
/// This is because "idle" stream IDs – those which have been initiated but /// This is because "idle" stream IDs – those which have been initiated but
/// have yet to receive frames – will be implicitly closed on receipt of a /// have yet to receive frames – will be implicitly closed on receipt of a
/// frame on a higher stream ID. If these queues was not ordered by stream /// frame on a higher stream ID. If these queues was not ordered by stream
/// IDs, some mechanism would be necessary to ensure that the lowest-numberedh] /// IDs, some mechanism would be necessary to ensure that the lowest-numbered]
/// idle stream is opened first. /// idle stream is opened first.
#[derive(Debug)] #[derive(Debug)]
pub(super) struct Prioritize { pub(super) struct Prioritize {
@ -51,6 +51,9 @@ pub(super) struct Prioritize {
/// What `DATA` frame is currently being sent in the codec. /// What `DATA` frame is currently being sent in the codec.
in_flight_data_frame: InFlightData, in_flight_data_frame: InFlightData,
/// The maximum amount of bytes a stream should buffer.
max_buffer_size: usize,
} }
#[derive(Debug, Eq, PartialEq)] #[derive(Debug, Eq, PartialEq)]
@ -84,7 +87,7 @@ impl Prioritize {
flow.assign_capacity(config.remote_init_window_sz); flow.assign_capacity(config.remote_init_window_sz);
log::trace!("Prioritize::new; flow={:?}", flow); tracing::trace!("Prioritize::new; flow={:?}", flow);
Prioritize { Prioritize {
pending_send: store::Queue::new(), pending_send: store::Queue::new(),
@ -93,9 +96,14 @@ impl Prioritize {
flow, flow,
last_opened_id: StreamId::ZERO, last_opened_id: StreamId::ZERO,
in_flight_data_frame: InFlightData::Nothing, in_flight_data_frame: InFlightData::Nothing,
max_buffer_size: config.local_max_buffer_size,
} }
} }
pub(crate) fn max_buffer_size(&self) -> usize {
self.max_buffer_size
}
/// Queue a frame to be sent to the remote /// Queue a frame to be sent to the remote
pub fn queue_frame<B>( pub fn queue_frame<B>(
&mut self, &mut self,
@ -104,6 +112,8 @@ impl Prioritize {
stream: &mut store::Ptr, stream: &mut store::Ptr,
task: &mut Option<Waker>, task: &mut Option<Waker>,
) { ) {
let span = tracing::trace_span!("Prioritize::queue_frame", ?stream.id);
let _e = span.enter();
// Queue the frame in the buffer // Queue the frame in the buffer
stream.pending_send.push_back(buffer, frame); stream.pending_send.push_back(buffer, frame);
self.schedule_send(stream, task); self.schedule_send(stream, task);
@ -112,7 +122,7 @@ impl Prioritize {
pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option<Waker>) { pub fn schedule_send(&mut self, stream: &mut store::Ptr, task: &mut Option<Waker>) {
// If the stream is waiting to be opened, nothing more to do. // If the stream is waiting to be opened, nothing more to do.
if stream.is_send_ready() { if stream.is_send_ready() {
log::trace!("schedule_send; {:?}", stream.id); tracing::trace!(?stream.id, "schedule_send");
// Queue the stream // Queue the stream
self.pending_send.push(stream); self.pending_send.push(stream);
@ -156,20 +166,19 @@ impl Prioritize {
} }
// Update the buffered data counter // Update the buffered data counter
stream.buffered_send_data += sz; stream.buffered_send_data += sz as usize;
log::trace!( let span =
"send_data; sz={}; buffered={}; requested={}", tracing::trace_span!("send_data", sz, requested = stream.requested_send_capacity);
sz, let _e = span.enter();
stream.buffered_send_data, tracing::trace!(buffered = stream.buffered_send_data);
stream.requested_send_capacity
);
// Implicitly request more send capacity if not enough has been // Implicitly request more send capacity if not enough has been
// requested yet. // requested yet.
if stream.requested_send_capacity < stream.buffered_send_data { if (stream.requested_send_capacity as usize) < stream.buffered_send_data {
// Update the target requested capacity // Update the target requested capacity
stream.requested_send_capacity = stream.buffered_send_data; stream.requested_send_capacity =
cmp::min(stream.buffered_send_data, WindowSize::MAX as usize) as WindowSize;
self.try_assign_capacity(stream); self.try_assign_capacity(stream);
} }
@ -179,10 +188,9 @@ impl Prioritize {
self.reserve_capacity(0, stream, counts); self.reserve_capacity(0, stream, counts);
} }
log::trace!( tracing::trace!(
"send_data (2); available={}; buffered={}", available = %stream.send_flow.available(),
stream.send_flow.available(), buffered = stream.buffered_send_data,
stream.buffered_send_data
); );
// The `stream.buffered_send_data == 0` check is here so that, if a zero // The `stream.buffered_send_data == 0` check is here so that, if a zero
@ -214,31 +222,32 @@ impl Prioritize {
stream: &mut store::Ptr, stream: &mut store::Ptr,
counts: &mut Counts, counts: &mut Counts,
) { ) {
log::trace!( let span = tracing::trace_span!(
"reserve_capacity; stream={:?}; requested={:?}; effective={:?}; curr={:?}", "reserve_capacity",
stream.id, ?stream.id,
capacity, requested = capacity,
capacity + stream.buffered_send_data, effective = (capacity as usize) + stream.buffered_send_data,
stream.requested_send_capacity curr = stream.requested_send_capacity
); );
let _e = span.enter();
// Actual capacity is `capacity` + the current amount of buffered data. // Actual capacity is `capacity` + the current amount of buffered data.
// If it were less, then we could never send out the buffered data. // If it were less, then we could never send out the buffered data.
let capacity = capacity + stream.buffered_send_data; let capacity = (capacity as usize) + stream.buffered_send_data;
if capacity == stream.requested_send_capacity { if capacity == stream.requested_send_capacity as usize {
// Nothing to do // Nothing to do
} else if capacity < stream.requested_send_capacity { } else if capacity < stream.requested_send_capacity as usize {
// Update the target requested capacity // Update the target requested capacity
stream.requested_send_capacity = capacity; stream.requested_send_capacity = capacity as WindowSize;
// Currently available capacity assigned to the stream // Currently available capacity assigned to the stream
let available = stream.send_flow.available().as_size(); let available = stream.send_flow.available().as_size();
// If the stream has more assigned capacity than requested, reclaim // If the stream has more assigned capacity than requested, reclaim
// some for the connection // some for the connection
if available > capacity { if available as usize > capacity {
let diff = available - capacity; let diff = available - capacity as WindowSize;
stream.send_flow.claim_capacity(diff); stream.send_flow.claim_capacity(diff);
@ -252,7 +261,8 @@ impl Prioritize {
} }
// Update the target requested capacity // Update the target requested capacity
stream.requested_send_capacity = capacity; stream.requested_send_capacity =
cmp::min(capacity, WindowSize::MAX as usize) as WindowSize;
// Try to assign additional capacity to the stream. If none is // Try to assign additional capacity to the stream. If none is
// currently available, the stream will be queued to receive some // currently available, the stream will be queued to receive some
@ -266,13 +276,14 @@ impl Prioritize {
inc: WindowSize, inc: WindowSize,
stream: &mut store::Ptr, stream: &mut store::Ptr,
) -> Result<(), Reason> { ) -> Result<(), Reason> {
log::trace!( let span = tracing::trace_span!(
"recv_stream_window_update; stream={:?}; state={:?}; inc={}; flow={:?}", "recv_stream_window_update",
stream.id, ?stream.id,
stream.state, ?stream.state,
inc, inc,
stream.send_flow flow = ?stream.send_flow
); );
let _e = span.enter();
if stream.state.is_send_closed() && stream.buffered_send_data == 0 { if stream.state.is_send_closed() && stream.buffered_send_data == 0 {
// We can't send any data, so don't bother doing anything else. // We can't send any data, so don't bother doing anything else.
@ -315,8 +326,8 @@ impl Prioritize {
/// it to the connection /// it to the connection
pub fn reclaim_reserved_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) { pub fn reclaim_reserved_capacity(&mut self, stream: &mut store::Ptr, counts: &mut Counts) {
// only reclaim requested capacity that isn't already buffered // only reclaim requested capacity that isn't already buffered
if stream.requested_send_capacity > stream.buffered_send_data { if stream.requested_send_capacity as usize > stream.buffered_send_data {
let reserved = stream.requested_send_capacity - stream.buffered_send_data; let reserved = stream.requested_send_capacity - stream.buffered_send_data as WindowSize;
stream.send_flow.claim_capacity(reserved); stream.send_flow.claim_capacity(reserved);
self.assign_connection_capacity(reserved, stream, counts); self.assign_connection_capacity(reserved, stream, counts);
@ -324,9 +335,11 @@ impl Prioritize {
} }
pub fn clear_pending_capacity(&mut self, store: &mut Store, counts: &mut Counts) { pub fn clear_pending_capacity(&mut self, store: &mut Store, counts: &mut Counts) {
let span = tracing::trace_span!("clear_pending_capacity");
let _e = span.enter();
while let Some(stream) = self.pending_capacity.pop(store) { while let Some(stream) = self.pending_capacity.pop(store) {
counts.transition(stream, |_, stream| { counts.transition(stream, |_, stream| {
log::trace!("clear_pending_capacity; stream={:?}", stream.id); tracing::trace!(?stream.id, "clear_pending_capacity");
}) })
} }
} }
@ -339,7 +352,8 @@ impl Prioritize {
) where ) where
R: Resolve, R: Resolve,
{ {
log::trace!("assign_connection_capacity; inc={}", inc); let span = tracing::trace_span!("assign_connection_capacity", inc);
let _e = span.enter();
self.flow.assign_capacity(inc); self.flow.assign_capacity(inc);
@ -373,7 +387,7 @@ impl Prioritize {
// Total requested should never go below actual assigned // Total requested should never go below actual assigned
// (Note: the window size can go lower than assigned) // (Note: the window size can go lower than assigned)
debug_assert!(total_requested >= stream.send_flow.available()); debug_assert!(stream.send_flow.available() <= total_requested as usize);
// The amount of additional capacity that the stream requests. // The amount of additional capacity that the stream requests.
// Don't assign more than the window has available! // Don't assign more than the window has available!
@ -382,15 +396,14 @@ impl Prioritize {
// Can't assign more than what is available // Can't assign more than what is available
stream.send_flow.window_size() - stream.send_flow.available().as_size(), stream.send_flow.window_size() - stream.send_flow.available().as_size(),
); );
let span = tracing::trace_span!("try_assign_capacity", ?stream.id);
log::trace!( let _e = span.enter();
"try_assign_capacity; stream={:?}, requested={}; additional={}; buffered={}; window={}; conn={}", tracing::trace!(
stream.id, requested = total_requested,
total_requested,
additional, additional,
stream.buffered_send_data, buffered = stream.buffered_send_data,
stream.send_flow.window_size(), window = stream.send_flow.window_size(),
self.flow.available() conn = %self.flow.available()
); );
if additional == 0 { if additional == 0 {
@ -416,24 +429,23 @@ impl Prioritize {
// TODO: Should prioritization factor into this? // TODO: Should prioritization factor into this?
let assign = cmp::min(conn_available, additional); let assign = cmp::min(conn_available, additional);
log::trace!(" assigning; stream={:?}, capacity={}", stream.id, assign,); tracing::trace!(capacity = assign, "assigning");
// Assign the capacity to the stream // Assign the capacity to the stream
stream.assign_capacity(assign); stream.assign_capacity(assign, self.max_buffer_size);
// Claim the capacity from the connection // Claim the capacity from the connection
self.flow.claim_capacity(assign); self.flow.claim_capacity(assign);
} }
log::trace!( tracing::trace!(
"try_assign_capacity(2); available={}; requested={}; buffered={}; has_unavailable={:?}", available = %stream.send_flow.available(),
stream.send_flow.available(), requested = stream.requested_send_capacity,
stream.requested_send_capacity, buffered = stream.buffered_send_data,
stream.buffered_send_data, has_unavailable = %stream.send_flow.has_unavailable()
stream.send_flow.has_unavailable()
); );
if stream.send_flow.available() < stream.requested_send_capacity if stream.send_flow.available() < stream.requested_send_capacity as usize
&& stream.send_flow.has_unavailable() && stream.send_flow.has_unavailable()
{ {
// The stream requires additional capacity and the stream's // The stream requires additional capacity and the stream's
@ -485,14 +497,14 @@ impl Prioritize {
// The max frame length // The max frame length
let max_frame_len = dst.max_send_frame_size(); let max_frame_len = dst.max_send_frame_size();
log::trace!("poll_complete"); tracing::trace!("poll_complete");
loop { loop {
self.schedule_pending_open(store, counts); self.schedule_pending_open(store, counts);
match self.pop_frame(buffer, store, max_frame_len, counts) { match self.pop_frame(buffer, store, max_frame_len, counts) {
Some(frame) => { Some(frame) => {
log::trace!("writing frame={:?}", frame); tracing::trace!(?frame, "writing");
debug_assert_eq!(self.in_flight_data_frame, InFlightData::Nothing); debug_assert_eq!(self.in_flight_data_frame, InFlightData::Nothing);
if let Frame::Data(ref frame) = frame { if let Frame::Data(ref frame) = frame {
@ -538,47 +550,62 @@ impl Prioritize {
where where
B: Buf, B: Buf,
{ {
log::trace!("try reclaim frame"); let span = tracing::trace_span!("try_reclaim_frame");
let _e = span.enter();
// First check if there are any data chunks to take back // First check if there are any data chunks to take back
if let Some(frame) = dst.take_last_data_frame() { if let Some(frame) = dst.take_last_data_frame() {
log::trace!( self.reclaim_frame_inner(buffer, store, frame)
" -> reclaimed; frame={:?}; sz={}", } else {
frame, false
frame.payload().inner.get_ref().remaining() }
); }
let mut eos = false; fn reclaim_frame_inner<B>(
let key = frame.payload().stream; &mut self,
buffer: &mut Buffer<Frame<B>>,
store: &mut Store,
frame: frame::Data<Prioritized<B>>,
) -> bool
where
B: Buf,
{
tracing::trace!(
?frame,
sz = frame.payload().inner.get_ref().remaining(),
"reclaimed"
);
match mem::replace(&mut self.in_flight_data_frame, InFlightData::Nothing) { let mut eos = false;
InFlightData::Nothing => panic!("wasn't expecting a frame to reclaim"), let key = frame.payload().stream;
InFlightData::Drop => {
log::trace!("not reclaiming frame for cancelled stream"); match mem::replace(&mut self.in_flight_data_frame, InFlightData::Nothing) {
return false; InFlightData::Nothing => panic!("wasn't expecting a frame to reclaim"),
} InFlightData::Drop => {
InFlightData::DataFrame(k) => { tracing::trace!("not reclaiming frame for cancelled stream");
debug_assert_eq!(k, key); return false;
} }
InFlightData::DataFrame(k) => {
debug_assert_eq!(k, key);
}
}
let mut frame = frame.map(|prioritized| {
// TODO: Ensure fully written
eos = prioritized.end_of_stream;
prioritized.inner.into_inner()
});
if frame.payload().has_remaining() {
let mut stream = store.resolve(key);
if eos {
frame.set_end_stream(true);
} }
let mut frame = frame.map(|prioritized| { self.push_back_frame(frame.into(), buffer, &mut stream);
// TODO: Ensure fully written
eos = prioritized.end_of_stream;
prioritized.inner.into_inner()
});
if frame.payload().has_remaining() { return true;
let mut stream = store.resolve(key);
if eos {
frame.set_end_stream(true);
}
self.push_back_frame(frame.into(), buffer, &mut stream);
return true;
}
} }
false false
@ -603,11 +630,12 @@ impl Prioritize {
} }
pub fn clear_queue<B>(&mut self, buffer: &mut Buffer<Frame<B>>, stream: &mut store::Ptr) { pub fn clear_queue<B>(&mut self, buffer: &mut Buffer<Frame<B>>, stream: &mut store::Ptr) {
log::trace!("clear_queue; stream={:?}", stream.id); let span = tracing::trace_span!("clear_queue", ?stream.id);
let _e = span.enter();
// TODO: make this more efficient? // TODO: make this more efficient?
while let Some(frame) = stream.pending_send.pop_front(buffer) { while let Some(frame) = stream.pending_send.pop_front(buffer) {
log::trace!("dropping; frame={:?}", frame); tracing::trace!(?frame, "dropping");
} }
stream.buffered_send_data = 0; stream.buffered_send_data = 0;
@ -644,16 +672,14 @@ impl Prioritize {
where where
B: Buf, B: Buf,
{ {
log::trace!("pop_frame"); let span = tracing::trace_span!("pop_frame");
let _e = span.enter();
loop { loop {
match self.pending_send.pop(store) { match self.pending_send.pop(store) {
Some(mut stream) => { Some(mut stream) => {
log::trace!( let span = tracing::trace_span!("popped", ?stream.id, ?stream.state);
"pop_frame; stream={:?}; stream.state={:?}", let _e = span.enter();
stream.id,
stream.state
);
// It's possible that this stream, besides having data to send, // It's possible that this stream, besides having data to send,
// is also queued to send a reset, and thus is already in the queue // is also queued to send a reset, and thus is already in the queue
@ -662,11 +688,7 @@ impl Prioritize {
// To be safe, we just always ask the stream. // To be safe, we just always ask the stream.
let is_pending_reset = stream.is_pending_reset_expiration(); let is_pending_reset = stream.is_pending_reset_expiration();
log::trace!( tracing::trace!(is_pending_reset);
" --> stream={:?}; is_pending_reset={:?};",
stream.id,
is_pending_reset
);
let frame = match stream.pending_send.pop_front(buffer) { let frame = match stream.pending_send.pop_front(buffer) {
Some(Frame::Data(mut frame)) => { Some(Frame::Data(mut frame)) => {
@ -675,25 +697,20 @@ impl Prioritize {
let stream_capacity = stream.send_flow.available(); let stream_capacity = stream.send_flow.available();
let sz = frame.payload().remaining(); let sz = frame.payload().remaining();
log::trace!( tracing::trace!(
" --> data frame; stream={:?}; sz={}; eos={:?}; window={}; \
available={}; requested={}; buffered={};",
frame.stream_id(),
sz, sz,
frame.is_end_stream(), eos = frame.is_end_stream(),
stream_capacity, window = %stream_capacity,
stream.send_flow.available(), available = %stream.send_flow.available(),
stream.requested_send_capacity, requested = stream.requested_send_capacity,
stream.buffered_send_data, buffered = stream.buffered_send_data,
"data frame"
); );
// Zero length data frames always have capacity to // Zero length data frames always have capacity to
// be sent. // be sent.
if sz > 0 && stream_capacity == 0 { if sz > 0 && stream_capacity == 0 {
log::trace!( tracing::trace!("stream capacity is 0");
" --> stream capacity is 0; requested={}",
stream.requested_send_capacity
);
// Ensure that the stream is waiting for // Ensure that the stream is waiting for
// connection level capacity // connection level capacity
@ -721,34 +738,43 @@ impl Prioritize {
// capacity at this point. // capacity at this point.
debug_assert!(len <= self.flow.window_size()); debug_assert!(len <= self.flow.window_size());
log::trace!(" --> sending data frame; len={}", len); tracing::trace!(len, "sending data frame");
// Update the flow control // Update the flow control
log::trace!(" -- updating stream flow --"); tracing::trace_span!("updating stream flow").in_scope(|| {
stream.send_flow.send_data(len); stream.send_flow.send_data(len);
// Decrement the stream's buffered data counter // Decrement the stream's buffered data counter
debug_assert!(stream.buffered_send_data >= len); debug_assert!(stream.buffered_send_data >= len as usize);
stream.buffered_send_data -= len; stream.buffered_send_data -= len as usize;
stream.requested_send_capacity -= len; stream.requested_send_capacity -= len;
// Assign the capacity back to the connection that // If the capacity was limited because of the
// was just consumed from the stream in the previous // max_send_buffer_size, then consider waking
// line. // the send task again...
self.flow.assign_capacity(len); stream.notify_if_can_buffer_more(self.max_buffer_size);
log::trace!(" -- updating connection flow --"); // Assign the capacity back to the connection that
self.flow.send_data(len); // was just consumed from the stream in the previous
// line.
self.flow.assign_capacity(len);
});
// Wrap the frame's data payload to ensure that the let (eos, len) = tracing::trace_span!("updating connection flow")
// correct amount of data gets written. .in_scope(|| {
self.flow.send_data(len);
let eos = frame.is_end_stream(); // Wrap the frame's data payload to ensure that the
let len = len as usize; // correct amount of data gets written.
if frame.payload().remaining() > len { let eos = frame.is_end_stream();
frame.set_end_stream(false); let len = len as usize;
}
if frame.payload().remaining() > len {
frame.set_end_stream(false);
}
(eos, len)
});
Frame::Data(frame.map(|buf| Prioritized { Frame::Data(frame.map(|buf| Prioritized {
inner: buf.take(len), inner: buf.take(len),
@ -780,7 +806,10 @@ impl Prioritize {
}), }),
None => { None => {
if let Some(reason) = stream.state.get_scheduled_reset() { if let Some(reason) = stream.state.get_scheduled_reset() {
stream.state.set_reset(reason); let stream_id = stream.id;
stream
.state
.set_reset(stream_id, reason, Initiator::Library);
let frame = frame::Reset::new(stream.id, reason); let frame = frame::Reset::new(stream.id, reason);
Frame::Reset(frame) Frame::Reset(frame)
@ -789,7 +818,7 @@ impl Prioritize {
// had data buffered to be sent, but all the frames are cleared // had data buffered to be sent, but all the frames are cleared
// in clear_queue(). Instead of doing O(N) traversal through queue // in clear_queue(). Instead of doing O(N) traversal through queue
// to remove, lets just ignore the stream here. // to remove, lets just ignore the stream here.
log::trace!("removing dangling stream from pending_send"); tracing::trace!("removing dangling stream from pending_send");
// Since this should only happen as a consequence of `clear_queue`, // Since this should only happen as a consequence of `clear_queue`,
// we must be in a closed state of some kind. // we must be in a closed state of some kind.
debug_assert!(stream.state.is_closed()); debug_assert!(stream.state.is_closed());
@ -799,7 +828,7 @@ impl Prioritize {
} }
}; };
log::trace!("pop_frame; frame={:?}", frame); tracing::trace!("pop_frame; frame={:?}", frame);
if cfg!(debug_assertions) && stream.state.is_idle() { if cfg!(debug_assertions) && stream.state.is_idle() {
debug_assert!(stream.id > self.last_opened_id); debug_assert!(stream.id > self.last_opened_id);
@ -824,11 +853,11 @@ impl Prioritize {
} }
fn schedule_pending_open(&mut self, store: &mut Store, counts: &mut Counts) { fn schedule_pending_open(&mut self, store: &mut Store, counts: &mut Counts) {
log::trace!("schedule_pending_open"); tracing::trace!("schedule_pending_open");
// check for any pending open streams // check for any pending open streams
while counts.can_inc_num_send_streams() { while counts.can_inc_num_send_streams() {
if let Some(mut stream) = self.pending_open.pop(store) { if let Some(mut stream) = self.pending_open.pop(store) {
log::trace!("schedule_pending_open; stream={:?}", stream.id); tracing::trace!("schedule_pending_open; stream={:?}", stream.id);
counts.inc_num_send_streams(&mut stream); counts.inc_num_send_streams(&mut stream);
self.pending_send.push(&mut stream); self.pending_send.push(&mut stream);
@ -850,8 +879,12 @@ where
self.inner.remaining() self.inner.remaining()
} }
fn bytes(&self) -> &[u8] { fn chunk(&self) -> &[u8] {
self.inner.bytes() self.inner.chunk()
}
fn chunks_vectored<'a>(&'a self, dst: &mut [std::io::IoSlice<'a>]) -> usize {
self.inner.chunks_vectored(dst)
} }
fn advance(&mut self, cnt: usize) { fn advance(&mut self, cnt: usize) {

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше