Bug 1895319 - neqo v0.7.8, r=necko-reviewers,valentin

Differential Revision: https://phabricator.services.mozilla.com/D210496
This commit is contained in:
Kershaw Chang 2024-05-21 09:34:47 +00:00
Родитель 76f79470e9
Коммит 0dcbccfde8
53 изменённых файлов: 953 добавлений и 948 удалений

Просмотреть файл

@ -90,9 +90,9 @@ git = "https://github.com/mozilla/mp4parse-rust"
rev = "a138e40ec1c603615873e524b5b22e11c0ec4820"
replace-with = "vendored-sources"
[source."git+https://github.com/mozilla/neqo?tag=v0.7.7"]
[source."git+https://github.com/mozilla/neqo?tag=v0.7.8"]
git = "https://github.com/mozilla/neqo"
tag = "v0.7.7"
tag = "v0.7.8"
replace-with = "vendored-sources"
[source."git+https://github.com/seanmonstar/warp?rev=9d081461ae1167eb321585ce424f4fef6cf0092b"]

20
Cargo.lock сгенерированный
Просмотреть файл

@ -4018,8 +4018,8 @@ dependencies = [
[[package]]
name = "neqo-common"
version = "0.7.7"
source = "git+https://github.com/mozilla/neqo?tag=v0.7.7#343df5cc0d02e0b0953de4a0a390ae8980d89081"
version = "0.7.8"
source = "git+https://github.com/mozilla/neqo?tag=v0.7.8#a71e43dacf8fae41e5aa30cf95b2e826f63a7466"
dependencies = [
"enum-map",
"env_logger",
@ -4031,8 +4031,8 @@ dependencies = [
[[package]]
name = "neqo-crypto"
version = "0.7.7"
source = "git+https://github.com/mozilla/neqo?tag=v0.7.7#343df5cc0d02e0b0953de4a0a390ae8980d89081"
version = "0.7.8"
source = "git+https://github.com/mozilla/neqo?tag=v0.7.8#a71e43dacf8fae41e5aa30cf95b2e826f63a7466"
dependencies = [
"bindgen 0.69.4",
"log",
@ -4046,8 +4046,8 @@ dependencies = [
[[package]]
name = "neqo-http3"
version = "0.7.7"
source = "git+https://github.com/mozilla/neqo?tag=v0.7.7#343df5cc0d02e0b0953de4a0a390ae8980d89081"
version = "0.7.8"
source = "git+https://github.com/mozilla/neqo?tag=v0.7.8#a71e43dacf8fae41e5aa30cf95b2e826f63a7466"
dependencies = [
"enumset",
"log",
@ -4063,8 +4063,8 @@ dependencies = [
[[package]]
name = "neqo-qpack"
version = "0.7.7"
source = "git+https://github.com/mozilla/neqo?tag=v0.7.7#343df5cc0d02e0b0953de4a0a390ae8980d89081"
version = "0.7.8"
source = "git+https://github.com/mozilla/neqo?tag=v0.7.8#a71e43dacf8fae41e5aa30cf95b2e826f63a7466"
dependencies = [
"log",
"neqo-common",
@ -4076,8 +4076,8 @@ dependencies = [
[[package]]
name = "neqo-transport"
version = "0.7.7"
source = "git+https://github.com/mozilla/neqo?tag=v0.7.7#343df5cc0d02e0b0953de4a0a390ae8980d89081"
version = "0.7.8"
source = "git+https://github.com/mozilla/neqo?tag=v0.7.8#a71e43dacf8fae41e5aa30cf95b2e826f63a7466"
dependencies = [
"enum-map",
"indexmap 2.2.6",

Просмотреть файл

@ -9,10 +9,10 @@ license = "MPL-2.0"
name = "neqo_glue"
[dependencies]
neqo-http3 = { tag = "v0.7.7", git = "https://github.com/mozilla/neqo" }
neqo-transport = { tag = "v0.7.7", git = "https://github.com/mozilla/neqo" }
neqo-common = { tag = "v0.7.7", git = "https://github.com/mozilla/neqo" }
neqo-qpack = { tag = "v0.7.7", git = "https://github.com/mozilla/neqo" }
neqo-http3 = { tag = "v0.7.8", git = "https://github.com/mozilla/neqo" }
neqo-transport = { tag = "v0.7.8", git = "https://github.com/mozilla/neqo" }
neqo-common = { tag = "v0.7.8", git = "https://github.com/mozilla/neqo" }
neqo-qpack = { tag = "v0.7.8", git = "https://github.com/mozilla/neqo" }
nserror = { path = "../../../xpcom/rust/nserror" }
nsstring = { path = "../../../xpcom/rust/nsstring" }
xpcom = { path = "../../../xpcom/rust/xpcom" }
@ -27,7 +27,7 @@ uuid = { version = "1.0", features = ["v4"] }
winapi = {version = "0.3", features = ["ws2def"] }
[dependencies.neqo-crypto]
tag = "v0.7.7"
tag = "v0.7.8"
git = "https://github.com/mozilla/neqo"
default-features = false
features = ["gecko"]

Просмотреть файл

@ -6,10 +6,10 @@ edition = "2018"
license = "MPL-2.0"
[dependencies]
neqo-transport = { tag = "v0.7.7", git = "https://github.com/mozilla/neqo" }
neqo-common = { tag = "v0.7.7", git = "https://github.com/mozilla/neqo" }
neqo-http3 = { tag = "v0.7.7", git = "https://github.com/mozilla/neqo" }
neqo-qpack = { tag = "v0.7.7", git = "https://github.com/mozilla/neqo" }
neqo-transport = { tag = "v0.7.8", git = "https://github.com/mozilla/neqo" }
neqo-common = { tag = "v0.7.8", git = "https://github.com/mozilla/neqo" }
neqo-http3 = { tag = "v0.7.8", git = "https://github.com/mozilla/neqo" }
neqo-qpack = { tag = "v0.7.8", git = "https://github.com/mozilla/neqo" }
mio = "0.6.17"
mio-extras = "2.0.5"
log = "0.4.0"
@ -21,7 +21,7 @@ tokio = { version = "1", features = ["rt-multi-thread"] }
mozilla-central-workspace-hack = { version = "0.1", features = ["http3server"], optional = true }
[dependencies.neqo-crypto]
tag = "v0.7.7"
tag = "v0.7.8"
git = "https://github.com/mozilla/neqo"
default-features = false
features = ["gecko"]

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"3459350f69e0272710e4d5d861f9646225f9c187698e721f01d38f2d9b1b3394","benches/timer.rs":"52d35abe1e06b92e913f43d95295b4eded0f19809677a7a63857fe92dad2c6fa","build.rs":"306b2f909a25ae38daf5404a4e128d2a94e8975b70870864c2a71cafec9717c7","src/codec.rs":"fd239f75d374db6ff744211344c82bcd19ecf753e07410e1fe37732bbb81dfe9","src/datagram.rs":"19882ecf3d3b03d6e87a1b8f871429f07f5b0db6d891c9362c91306a0cee33c7","src/event.rs":"106ca6c4afb107fa49a1bc72f5eb4ae95f4baa1ba19736aa38c8ba973774c160","src/fuzz.rs":"1ca74a34bdc97fedecf8a63c4a13cc487d1b2212398fb76f67792c822002138d","src/header.rs":"467b947f78bfe354d8bb51e8df0c2be69e75a45e2be688d81f0d268aa77c89ef","src/hrtime.rs":"112dc758e65301b8a7a508b125d3d61063180d432bffaec566a050d4f907ab18","src/incrdecoder.rs":"577c32b9ace51f2daaf940be6d0c391c4f55cd42ef6848c68c1ffc970d8c57b5","src/lib.rs":"bc585a11daf56f9680fc5a652c5ca79e00eb0f3fa34a45ecd1b6c60200b95cf1","src/log.rs":"6ed99e15707c4256ae793011ed2f4b33aa81fed70205aaf5f8d3cd11ad451cf0","src/qlog.rs":"9b081f32bf158fd340300693acc97fe0554b617ae664eba86e4d3572e2b1e16e","src/timer.rs":"f6da86baf3b5d91c1230d5296ef886fb7233cdefa8c8e2b4197fcf82425a54fa","src/tos.rs":"087cd9b12a2510f05605e755d85c07179817c22670fe1b5d6db987357f77b38e","tests/log.rs":"a11e21fb570258ca93bb40e3923817d381e1e605accbc3aed1df5a0a9918b41d"},"package":null}
{"files":{"Cargo.toml":"490c66f6904d559ed4be60b5fea594b565da346eda98e884b54809f741bcc08d","build.rs":"306b2f909a25ae38daf5404a4e128d2a94e8975b70870864c2a71cafec9717c7","src/codec.rs":"fd239f75d374db6ff744211344c82bcd19ecf753e07410e1fe37732bbb81dfe9","src/datagram.rs":"56b4c1001c6bfb9394e4b3f968e2e17f55db774510ea576b91e127a6b32300d0","src/event.rs":"106ca6c4afb107fa49a1bc72f5eb4ae95f4baa1ba19736aa38c8ba973774c160","src/fuzz.rs":"1ca74a34bdc97fedecf8a63c4a13cc487d1b2212398fb76f67792c822002138d","src/header.rs":"467b947f78bfe354d8bb51e8df0c2be69e75a45e2be688d81f0d268aa77c89ef","src/hrtime.rs":"112dc758e65301b8a7a508b125d3d61063180d432bffaec566a050d4f907ab18","src/incrdecoder.rs":"577c32b9ace51f2daaf940be6d0c391c4f55cd42ef6848c68c1ffc970d8c57b5","src/lib.rs":"a8efb2d2c098575ad28069d9da1be450784adf1fecb9e4a9cf30928e70092e49","src/log.rs":"6ed99e15707c4256ae793011ed2f4b33aa81fed70205aaf5f8d3cd11ad451cf0","src/qlog.rs":"9b081f32bf158fd340300693acc97fe0554b617ae664eba86e4d3572e2b1e16e","src/tos.rs":"087cd9b12a2510f05605e755d85c07179817c22670fe1b5d6db987357f77b38e","tests/log.rs":"a11e21fb570258ca93bb40e3923817d381e1e605accbc3aed1df5a0a9918b41d"},"package":null}

7
third_party/rust/neqo-common/Cargo.toml поставляемый
Просмотреть файл

@ -13,9 +13,8 @@
edition = "2021"
rust-version = "1.76.0"
name = "neqo-common"
version = "0.7.7"
version = "0.7.8"
authors = ["The Neqo Authors <necko@mozilla.com>"]
build = "build.rs"
homepage = "https://github.com/mozilla/neqo/"
license = "MIT OR Apache-2.0"
repository = "https://github.com/mozilla/neqo/"
@ -23,10 +22,6 @@ repository = "https://github.com/mozilla/neqo/"
[lib]
bench = false
[[bench]]
name = "timer"
harness = false
[dependencies.enum-map]
version = "2.7"
default-features = false

39
third_party/rust/neqo-common/benches/timer.rs поставляемый
Просмотреть файл

@ -1,39 +0,0 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::time::{Duration, Instant};
use criterion::{criterion_group, criterion_main, Criterion};
use neqo_common::timer::Timer;
use test_fixture::now;
fn benchmark_timer(c: &mut Criterion) {
c.bench_function("drain a timer quickly", |b| {
b.iter_batched_ref(
make_timer,
|(_now, timer)| {
while let Some(t) = timer.next_time() {
assert!(timer.take_next(t).is_some());
}
},
criterion::BatchSize::SmallInput,
);
});
}
fn make_timer() -> (Instant, Timer<()>) {
const TIMES: &[u64] = &[1, 2, 3, 5, 8, 13, 21, 34];
let now = now();
let mut timer = Timer::new(now, Duration::from_millis(777), 100);
for &t in TIMES {
timer.add(now + Duration::from_secs(t), ());
}
(now, timer)
}
criterion_group!(benches, benchmark_timer);
criterion_main!(benches);

Просмотреть файл

@ -81,12 +81,6 @@ impl std::fmt::Debug for Datagram {
}
}
impl From<Datagram> for Vec<u8> {
fn from(datagram: Datagram) -> Self {
datagram.d
}
}
#[cfg(test)]
use test_fixture::datagram;

1
third_party/rust/neqo-common/src/lib.rs поставляемый
Просмотреть файл

@ -16,7 +16,6 @@ pub mod hrtime;
mod incrdecoder;
pub mod log;
pub mod qlog;
pub mod timer;
pub mod tos;
use std::fmt::Write;

420
third_party/rust/neqo-common/src/timer.rs поставляемый
Просмотреть файл

@ -1,420 +0,0 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::{
collections::VecDeque,
mem,
time::{Duration, Instant},
};
/// Internal structure for a timer item.
struct TimerItem<T> {
time: Instant,
item: T,
}
impl<T> TimerItem<T> {
fn time(ti: &Self) -> Instant {
ti.time
}
}
/// A timer queue.
/// This uses a classic timer wheel arrangement, with some characteristics that might be considered
/// peculiar. Each slot in the wheel is sorted (complexity O(N) insertions, but O(logN) to find cut
/// points). Time is relative, the wheel has an origin time and it is unable to represent times that
/// are more than `granularity * capacity` past that time.
pub struct Timer<T> {
items: Vec<VecDeque<TimerItem<T>>>,
now: Instant,
granularity: Duration,
cursor: usize,
}
impl<T> Timer<T> {
/// Construct a new wheel at the given granularity, starting at the given time.
///
/// # Panics
///
/// When `capacity` is too large to fit in `u32` or `granularity` is zero.
pub fn new(now: Instant, granularity: Duration, capacity: usize) -> Self {
assert!(u32::try_from(capacity).is_ok());
assert!(granularity.as_nanos() > 0);
let mut items = Vec::with_capacity(capacity);
items.resize_with(capacity, Default::default);
Self {
items,
now,
granularity,
cursor: 0,
}
}
/// Return a reference to the time of the next entry.
#[must_use]
pub fn next_time(&self) -> Option<Instant> {
let idx = self.bucket(0);
for i in idx..self.items.len() {
if let Some(t) = self.items[i].front() {
return Some(t.time);
}
}
for i in 0..idx {
if let Some(t) = self.items[i].front() {
return Some(t.time);
}
}
None
}
/// Get the full span of time that this can cover.
/// Two timers cannot be more than this far apart.
/// In practice, this value is less by one amount of the timer granularity.
#[inline]
#[allow(clippy::cast_possible_truncation)] // guarded by assertion
#[must_use]
pub fn span(&self) -> Duration {
self.granularity * (self.items.len() as u32)
}
/// For the given `time`, get the number of whole buckets in the future that is.
#[inline]
#[allow(clippy::cast_possible_truncation)] // guarded by assertion
fn delta(&self, time: Instant) -> usize {
// This really should use Duration::div_duration_f??(), but it can't yet.
((time - self.now).as_nanos() / self.granularity.as_nanos()) as usize
}
#[inline]
fn time_bucket(&self, time: Instant) -> usize {
self.bucket(self.delta(time))
}
#[inline]
fn bucket(&self, delta: usize) -> usize {
debug_assert!(delta < self.items.len());
(self.cursor + delta) % self.items.len()
}
/// Slide forward in time by `n * self.granularity`.
#[allow(clippy::cast_possible_truncation, clippy::reversed_empty_ranges)]
// cast_possible_truncation is ok because we have an assertion guard.
// reversed_empty_ranges is to avoid different types on the if/else.
fn tick(&mut self, n: usize) {
let new = self.bucket(n);
let iter = if new < self.cursor {
(self.cursor..self.items.len()).chain(0..new)
} else {
(self.cursor..new).chain(0..0)
};
for i in iter {
assert!(self.items[i].is_empty());
}
self.now += self.granularity * (n as u32);
self.cursor = new;
}
/// Asserts if the time given is in the past or too far in the future.
///
/// # Panics
///
/// When `time` is in the past relative to previous calls.
pub fn add(&mut self, time: Instant, item: T) {
assert!(time >= self.now);
// Skip forward quickly if there is too large a gap.
let short_span = self.span() - self.granularity;
if time >= (self.now + self.span() + short_span) {
// Assert that there aren't any items.
for i in &self.items {
debug_assert!(i.is_empty());
}
self.now = time.checked_sub(short_span).unwrap();
self.cursor = 0;
}
// Adjust time forward the minimum amount necessary.
let mut d = self.delta(time);
if d >= self.items.len() {
self.tick(1 + d - self.items.len());
d = self.items.len() - 1;
}
let bucket = self.bucket(d);
let ins = match self.items[bucket].binary_search_by_key(&time, TimerItem::time) {
Ok(j) | Err(j) => j,
};
self.items[bucket].insert(ins, TimerItem { time, item });
}
/// Given knowledge of the time an item was added, remove it.
/// This requires use of a predicate that identifies matching items.
///
/// # Panics
/// Impossible, I think.
pub fn remove<F>(&mut self, time: Instant, mut selector: F) -> Option<T>
where
F: FnMut(&T) -> bool,
{
if time < self.now {
return None;
}
if time > self.now + self.span() {
return None;
}
let bucket = self.time_bucket(time);
let Ok(start_index) = self.items[bucket].binary_search_by_key(&time, TimerItem::time)
else {
return None;
};
// start_index is just one of potentially many items with the same time.
// Search backwards for a match, ...
for i in (0..=start_index).rev() {
if self.items[bucket][i].time != time {
break;
}
if selector(&self.items[bucket][i].item) {
return Some(self.items[bucket].remove(i).unwrap().item);
}
}
// ... then forwards.
for i in (start_index + 1)..self.items[bucket].len() {
if self.items[bucket][i].time != time {
break;
}
if selector(&self.items[bucket][i].item) {
return Some(self.items[bucket].remove(i).unwrap().item);
}
}
None
}
/// Take the next item, unless there are no items with
/// a timeout in the past relative to `until`.
pub fn take_next(&mut self, until: Instant) -> Option<T> {
fn maybe_take<T>(v: &mut VecDeque<TimerItem<T>>, until: Instant) -> Option<T> {
if !v.is_empty() && v[0].time <= until {
Some(v.pop_front().unwrap().item)
} else {
None
}
}
let idx = self.bucket(0);
for i in idx..self.items.len() {
let res = maybe_take(&mut self.items[i], until);
if res.is_some() {
return res;
}
}
for i in 0..idx {
let res = maybe_take(&mut self.items[i], until);
if res.is_some() {
return res;
}
}
None
}
/// Create an iterator that takes all items until the given time.
/// Note: Items might be removed even if the iterator is not fully exhausted.
pub fn take_until(&mut self, until: Instant) -> impl Iterator<Item = T> {
let get_item = move |x: TimerItem<T>| x.item;
if until >= self.now + self.span() {
// Drain everything, so a clean sweep.
let mut empty_items = Vec::with_capacity(self.items.len());
empty_items.resize_with(self.items.len(), VecDeque::default);
let mut items = mem::replace(&mut self.items, empty_items);
self.now = until;
self.cursor = 0;
let tail = items.split_off(self.cursor);
return tail.into_iter().chain(items).flatten().map(get_item);
}
// Only returning a partial span, so do it bucket at a time.
let delta = self.delta(until);
let mut buckets = Vec::with_capacity(delta + 1);
// First, the whole buckets.
for i in 0..delta {
let idx = self.bucket(i);
buckets.push(mem::take(&mut self.items[idx]));
}
self.tick(delta);
// Now we need to split the last bucket, because there might be
// some items with `item.time > until`.
let bucket = &mut self.items[self.cursor];
let last_idx = match bucket.binary_search_by_key(&until, TimerItem::time) {
Ok(mut m) => {
// If there are multiple values, the search will hit any of them.
// Make sure to get them all.
while m < bucket.len() && bucket[m].time == until {
m += 1;
}
m
}
Err(ins) => ins,
};
let tail = bucket.split_off(last_idx);
buckets.push(mem::replace(bucket, tail));
// This tomfoolery with the empty vector ensures that
// the returned type here matches the one above precisely
// without having to invoke the `either` crate.
buckets.into_iter().chain(vec![]).flatten().map(get_item)
}
}
#[cfg(test)]
mod test {
use std::sync::OnceLock;
use super::{Duration, Instant, Timer};
fn now() -> Instant {
static NOW: OnceLock<Instant> = OnceLock::new();
*NOW.get_or_init(Instant::now)
}
const GRANULARITY: Duration = Duration::from_millis(10);
const CAPACITY: usize = 10;
#[test]
fn create() {
let t: Timer<()> = Timer::new(now(), GRANULARITY, CAPACITY);
assert_eq!(t.span(), Duration::from_millis(100));
assert_eq!(None, t.next_time());
}
#[test]
fn immediate_entry() {
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
t.add(now(), 12);
assert_eq!(now(), t.next_time().expect("should have an entry"));
let values: Vec<_> = t.take_until(now()).collect();
assert_eq!(vec![12], values);
}
#[test]
fn same_time() {
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
let v1 = 12;
let v2 = 13;
t.add(now(), v1);
t.add(now(), v2);
assert_eq!(now(), t.next_time().expect("should have an entry"));
let values: Vec<_> = t.take_until(now()).collect();
assert!(values.contains(&v1));
assert!(values.contains(&v2));
}
#[test]
fn add() {
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
let near_future = now() + Duration::from_millis(17);
let v = 9;
t.add(near_future, v);
assert_eq!(near_future, t.next_time().expect("should return a value"));
assert_eq!(
t.take_until(near_future.checked_sub(Duration::from_millis(1)).unwrap())
.count(),
0
);
assert!(t
.take_until(near_future + Duration::from_millis(1))
.any(|x| x == v));
}
#[test]
fn add_future() {
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
let future = now() + Duration::from_millis(117);
let v = 9;
t.add(future, v);
assert_eq!(future, t.next_time().expect("should return a value"));
assert!(t.take_until(future).any(|x| x == v));
}
#[test]
fn add_far_future() {
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
let far_future = now() + Duration::from_millis(892);
let v = 9;
t.add(far_future, v);
assert_eq!(far_future, t.next_time().expect("should return a value"));
assert!(t.take_until(far_future).any(|x| x == v));
}
const TIMES: &[Duration] = &[
Duration::from_millis(40),
Duration::from_millis(91),
Duration::from_millis(6),
Duration::from_millis(3),
Duration::from_millis(22),
Duration::from_millis(40),
];
fn with_times() -> Timer<usize> {
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
for (i, time) in TIMES.iter().enumerate() {
t.add(now() + *time, i);
}
assert_eq!(
now() + *TIMES.iter().min().unwrap(),
t.next_time().expect("should have a time")
);
t
}
#[test]
#[allow(clippy::needless_collect)] // false positive
fn multiple_values() {
let mut t = with_times();
let values: Vec<_> = t.take_until(now() + *TIMES.iter().max().unwrap()).collect();
for i in 0..TIMES.len() {
assert!(values.contains(&i));
}
}
#[test]
#[allow(clippy::needless_collect)] // false positive
fn take_far_future() {
let mut t = with_times();
let values: Vec<_> = t.take_until(now() + Duration::from_secs(100)).collect();
for i in 0..TIMES.len() {
assert!(values.contains(&i));
}
}
#[test]
fn remove_each() {
let mut t = with_times();
for (i, time) in TIMES.iter().enumerate() {
assert_eq!(Some(i), t.remove(now() + *time, |&x| x == i));
}
assert_eq!(None, t.next_time());
}
#[test]
fn remove_future() {
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
let future = now() + Duration::from_millis(117);
let v = 9;
t.add(future, v);
assert_eq!(Some(v), t.remove(future, |candidate| *candidate == v));
}
#[test]
fn remove_too_far_future() {
let mut t = Timer::new(now(), GRANULARITY, CAPACITY);
let future = now() + Duration::from_millis(117);
let too_far_future = now() + t.span() + Duration::from_millis(117);
let v = 9;
t.add(future, v);
assert_eq!(None, t.remove(too_far_future, |candidate| *candidate == v));
}
}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"c3bbe27a12dc333744b81b97fec121b1e016524ab22bd4cf553a43b7e7f4cd68","bindings/bindings.toml":"29ec7a8ef3d5f1e4a632003e2d36c270e1caf12fd3fcf108a22d1893b90a41a6","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"cbf6a7d912314784c8c124cf7319c910a786d0e263f466843edd3f43826f036c","min_version.txt":"7e98f86c69cddb4f65cf96a6de1f4297e3ce224a4c4628609e29042b6c4dcfb9","src/aead.rs":"fc42bc20b84d2e5ccfd56271ae2d2db082e55586ea2926470c102da177f22296","src/aead_null.rs":"3a553f21126c9ca0116c2be81e5a777011b33c159fd88c4f384614bbdb06bb2e","src/agent.rs":"b12004faee4a136c10e8168848d397443b5927e9497edb62c72e6db3eb1c10a0","src/agentio.rs":"415f70b95312d3ee6d74ba6f28094246101ab6d535aa9df880c38d8bb5a9279e","src/auth.rs":"ced1a18f691894984244088020ea25dc1ee678603317f0c7dfc8b8842fa750b4","src/cert.rs":"8942cb3ce25a61f92b6ffc30fb286052ed6f56eeda3be12fd46ea76ceba6c1cf","src/constants.rs":"f5c779db128a8b0607841ca18c376971017eb327e102e5e6959a7d8effe4b3a6","src/ech.rs":"9d322fcc01c0886f1dfe9bb6273cb9f88a746452ac9a802761b1816a05930c1f","src/err.rs":"ae979f334604aba89640c4491262641910033f0bd790d58671f649f5039b291c","src/exp.rs":"cec59d61fc95914f9703d2fb6490a8507af993c9db710dde894f2f8fd38123c7","src/ext.rs":"cbf7d9f5ecabf4b8c9efd6c334637ab1596ec5266d38ab8d2d6ceae305283deb","src/hkdf.rs":"ef32f20e30a9bd7f094199536d19c87c4231b7fbbe4a9c54c70e84ca9c6575be","src/hp.rs":"644f1bed67f1c6189a67c8d02ab3358aaa7f63af4b913dd7395becbc01a84291","src/lib.rs":"6b2d0eb2c55f6351d673d3a3e5fc5adac8d1030c67dae9af4c79552de0f57455","src/min_version.rs":"89b7ef6f9d2301db4f689f4d963b58375d577f705b92003a804048441e00cfd1","src/p11.rs":"704c5f164c4f195c8051c5bf1e69a912c34b613a8cf6bed5f577dc5674eea34e","src/prio.rs":"e5e169296c0ac69919c59fb6c1f8bd6bf079452eaa13d75da0edd41d435d3f6f","src/replay.rs":"96b7af8eff9e14313e79303092018b12e8834f780c96b8e247c497fdc680c696","src/result.rs":"0587cbb6aace71a7f9765ef7c01dcd9f73a49dcc6331e1d8fe4de2aef6ca65b6","src/secrets.rs":"4ffaa66f25df47dadf042063bff5953effa7bf2f4920cafe827757d6a659cb58","src/selfencrypt.rs":"b7cc1c896c7661c37461fc3a8bcbfdf2589433b907fa5f968ae4f6907704b441","src/ssl.rs":"c83baa5518b81dd06f2e4072ea3c2d666ccdeb8b1ff6e3746eea9f1af47023a6","src/time.rs":"c71a01ff8aa2c0e97fb16ad620df4ed6b7cc1819ff93f46634e2f1c9551627ec","tests/aead.rs":"e36ae77802df1ea6d17cfd1bd2178a3706089577d6fd1554ca86e748b8b235b9","tests/agent.rs":"824735f88e487a3748200844e9481e81a72163ad74d82faa9aa16594d9b9bb25","tests/ext.rs":"1b047d23d9b224ad06eb65d8f3a7b351e263774e404c79bbcbe8f43790e29c18","tests/handshake.rs":"e892a2839b31414be16e96cdf3b1a65978716094700c1a4989229f7edbf578a0","tests/hkdf.rs":"1d2098dc8398395864baf13e4886cfd1da6d36118727c3b264f457ee3da6b048","tests/hp.rs":"b24fec53771c169be788772532d2617a5349196cf87d6444dc74214f7c73e92c","tests/init.rs":"616313cb38eac44b8c71a1d23a52a7d7b4c7c07d4c20dc9ea6600c3317f92613","tests/selfencrypt.rs":"8d10840b41629bf449a6b3a551377315e8a05ca26c6b041548748196652c5909"},"package":null}
{"files":{"Cargo.toml":"b60660030f363cac327ddfafc4259a210e4c91ee7a39aec90fef354dbdde7250","bindings/bindings.toml":"29ec7a8ef3d5f1e4a632003e2d36c270e1caf12fd3fcf108a22d1893b90a41a6","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"6c3e94359395cce5cb29bc0063ff930ffcd7edd50c040cb459acce6c80aa4ef4","min_version.txt":"7e98f86c69cddb4f65cf96a6de1f4297e3ce224a4c4628609e29042b6c4dcfb9","src/aead.rs":"fc42bc20b84d2e5ccfd56271ae2d2db082e55586ea2926470c102da177f22296","src/aead_null.rs":"3a553f21126c9ca0116c2be81e5a777011b33c159fd88c4f384614bbdb06bb2e","src/agent.rs":"0ef7b488480d12c01a122050e82809bc784443ef6277d75fce21d706fbf5eaaf","src/agentio.rs":"415f70b95312d3ee6d74ba6f28094246101ab6d535aa9df880c38d8bb5a9279e","src/auth.rs":"ced1a18f691894984244088020ea25dc1ee678603317f0c7dfc8b8842fa750b4","src/cert.rs":"8942cb3ce25a61f92b6ffc30fb286052ed6f56eeda3be12fd46ea76ceba6c1cf","src/constants.rs":"f5c779db128a8b0607841ca18c376971017eb327e102e5e6959a7d8effe4b3a6","src/ech.rs":"9d322fcc01c0886f1dfe9bb6273cb9f88a746452ac9a802761b1816a05930c1f","src/err.rs":"ae979f334604aba89640c4491262641910033f0bd790d58671f649f5039b291c","src/exp.rs":"cec59d61fc95914f9703d2fb6490a8507af993c9db710dde894f2f8fd38123c7","src/ext.rs":"cbf7d9f5ecabf4b8c9efd6c334637ab1596ec5266d38ab8d2d6ceae305283deb","src/hkdf.rs":"ef32f20e30a9bd7f094199536d19c87c4231b7fbbe4a9c54c70e84ca9c6575be","src/hp.rs":"644f1bed67f1c6189a67c8d02ab3358aaa7f63af4b913dd7395becbc01a84291","src/lib.rs":"6b2d0eb2c55f6351d673d3a3e5fc5adac8d1030c67dae9af4c79552de0f57455","src/min_version.rs":"89b7ef6f9d2301db4f689f4d963b58375d577f705b92003a804048441e00cfd1","src/p11.rs":"704c5f164c4f195c8051c5bf1e69a912c34b613a8cf6bed5f577dc5674eea34e","src/prio.rs":"e5e169296c0ac69919c59fb6c1f8bd6bf079452eaa13d75da0edd41d435d3f6f","src/replay.rs":"96b7af8eff9e14313e79303092018b12e8834f780c96b8e247c497fdc680c696","src/result.rs":"0587cbb6aace71a7f9765ef7c01dcd9f73a49dcc6331e1d8fe4de2aef6ca65b6","src/secrets.rs":"4ffaa66f25df47dadf042063bff5953effa7bf2f4920cafe827757d6a659cb58","src/selfencrypt.rs":"b7cc1c896c7661c37461fc3a8bcbfdf2589433b907fa5f968ae4f6907704b441","src/ssl.rs":"c83baa5518b81dd06f2e4072ea3c2d666ccdeb8b1ff6e3746eea9f1af47023a6","src/time.rs":"c71a01ff8aa2c0e97fb16ad620df4ed6b7cc1819ff93f46634e2f1c9551627ec","tests/aead.rs":"e36ae77802df1ea6d17cfd1bd2178a3706089577d6fd1554ca86e748b8b235b9","tests/agent.rs":"824735f88e487a3748200844e9481e81a72163ad74d82faa9aa16594d9b9bb25","tests/ext.rs":"1b047d23d9b224ad06eb65d8f3a7b351e263774e404c79bbcbe8f43790e29c18","tests/handshake.rs":"e892a2839b31414be16e96cdf3b1a65978716094700c1a4989229f7edbf578a0","tests/hkdf.rs":"1d2098dc8398395864baf13e4886cfd1da6d36118727c3b264f457ee3da6b048","tests/hp.rs":"b24fec53771c169be788772532d2617a5349196cf87d6444dc74214f7c73e92c","tests/init.rs":"616313cb38eac44b8c71a1d23a52a7d7b4c7c07d4c20dc9ea6600c3317f92613","tests/selfencrypt.rs":"8d10840b41629bf449a6b3a551377315e8a05ca26c6b041548748196652c5909"},"package":null}

3
third_party/rust/neqo-crypto/Cargo.toml поставляемый
Просмотреть файл

@ -13,9 +13,8 @@
edition = "2021"
rust-version = "1.76.0"
name = "neqo-crypto"
version = "0.7.7"
version = "0.7.8"
authors = ["The Neqo Authors <necko@mozilla.com>"]
build = "build.rs"
homepage = "https://github.com/mozilla/neqo/"
license = "MIT OR Apache-2.0"
repository = "https://github.com/mozilla/neqo/"

1
third_party/rust/neqo-crypto/build.rs поставляемый
Просмотреть файл

@ -421,6 +421,7 @@ fn setup_for_gecko() -> Vec<String> {
}
fn main() {
println!("cargo:rustc-check-cfg=cfg(nss_nodb)");
let flags = if cfg!(feature = "gecko") {
setup_for_gecko()
} else if let Ok(nss_dir) = env::var("NSS_DIR") {

7
third_party/rust/neqo-crypto/src/agent.rs поставляемый
Просмотреть файл

@ -875,14 +875,13 @@ impl Client {
arg: *mut c_void,
) -> ssl::SECStatus {
let mut info: MaybeUninit<ssl::SSLResumptionTokenInfo> = MaybeUninit::uninit();
if ssl::SSL_GetResumptionTokenInfo(
let info_res = &ssl::SSL_GetResumptionTokenInfo(
token,
len,
info.as_mut_ptr(),
c_uint::try_from(mem::size_of::<ssl::SSLResumptionTokenInfo>()).unwrap(),
)
.is_err()
{
);
if info_res.is_err() {
// Ignore the token.
return ssl::SECSuccess;
}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"08e52a9eb3d14c9b985d5ab9e88f2bfec51bc9f13b6d62d84e143aece1f55654","src/buffered_send_stream.rs":"0e8ad3f443a33e213d55ba26a1ff266692c9087a1627c9b3d10c5025dee550ac","src/client_events.rs":"77fedca72ce54956eaba3fb7103085d196a631b764662584ea2629224c5c234e","src/conn_params.rs":"224a8ea6ef632930a7788a1cabf47ce69ad41bd4bc8dcf3053fbd998fdb38e82","src/connection.rs":"f5f49dd72170f7e42c180b738ff8bddae92fd0c71acc5a08736e298bf53483e7","src/connection_client.rs":"77cf08711b89e03c7cea47bf3cf02c76397485877121e42c321206cf7bef4ddc","src/connection_server.rs":"de5a6cb42b8c4dc08fb1f626f681b45cd22435892b11e6053b61a5401490db94","src/control_stream_local.rs":"ae52e3286f1686ca1265e7de841392addd42616db02799bb967a59feb6039cb5","src/control_stream_remote.rs":"59eb4041e366d92f9f294e8446755caa5e91fd943bba7b79b726698ba13be248","src/features/extended_connect/mod.rs":"3b02f6b18627f3855465a81b1d9b285e6f13839e75a8a6db648ed9082908d7f0","src/features/extended_connect/tests/mod.rs":"fd6aee37243713e80fc526552f21f0222338cec9890409b6575a2a637b17ec1f","src/features/extended_connect/tests/webtransport/datagrams.rs":"4c85a90afb753ce588e3fdeb773669bc49c013aebc28912340359eb01b74fd70","src/features/extended_connect/tests/webtransport/mod.rs":"a30ea715f5271a826a739278b18e145964dedbce7026eed45f1b7d0355c407d5","src/features/extended_connect/tests/webtransport/negotiation.rs":"a8940b99e21a6b29fef2fc32f3dce05d12de8b09079dfdc0502c13e4582dbdb6","src/features/extended_connect/tests/webtransport/sessions.rs":"de3d836f666c2bec31e70b33bdc2669572cabbe17df2225db7282613a224a364","src/features/extended_connect/tests/webtransport/streams.rs":"8b3c34cac1b2171252a4bb53d420ac2098549a20309c327bf56e2e9ba9e33538","src/features/extended_connect/webtransport_session.rs":"239d92c06fbc5f6226078bb411a803f57b555dea0077349d49d7f57671cf2eab","src/features/extended_connect/webtransport_streams.rs":"5d7507aaf6a819d266fbea9b7a415c8324329df0f6936d9045b73e17a5b844ee","src/features/mod.rs":"925aae4427ad82e4d019354802b223d53db5e5585d4a940f5417a24a9503d7ee","src/frames/hframe.rs":"56c36ac597504f28c73cf2370acd82104f8c7a7b9ffc0f6d222378abc524482d","src/frames/mod.rs":"7d0a46ca147336d14781edb8dbee8b03c2e4bcd6646f5473a9d93d31fe73fecb","src/frames/reader.rs":"e07ee9de74bc499c10afcda592fefd9a7eef3381c045aa14f6596d67313546ca","src/frames/tests/hframe.rs":"01ec74eb3eb25d95042aa0263f9267f89535e6b7b8c1161fab4ba9ee5352d4a7","src/frames/tests/mod.rs":"0610609b316767a6a022837d32ee0452e37ea296fde37e51bec87e7c77e923a3","src/frames/tests/reader.rs":"2bfadc7afbc41bff9f5f930b31550259a8a92484d35f6c5d8dd8fd9acfb88f5b","src/frames/tests/wtframe.rs":"589ebe1e62ce4da63b37b7d22cde7ba572ddbf29336fdcdbbcd0a745f79dacd8","src/frames/wtframe.rs":"1d9d0256ace2ba7262343ed035df795f21a4d45065792d3fd45b3391b6916b2f","src/headers_checks.rs":"be0f0109298dcc3a40350b7c0950076ddfe20617d195b305e3ffc8582557ab18","src/lib.rs":"4f908a021222bcc79b9d569bc3759a493379a20b47dfa228fddf51600bf6e446","src/priority.rs":"f3b77c208962e44a4e2d13138c6998b703d40e7bcf8f73ea84d8ef5b556e0aee","src/push_controller.rs":"13bccf2834ae19109504cf695a5948c3b2d03fd101bc032a92bb77a033423854","src/qlog.rs":"2debd75c7ea103c95ff79e44412f1408c3e496e324976100c55d5a833912b6c3","src/qpack_decoder_receiver.rs":"c927dfc3e58c71d282210ba79280f6f03e789733bc3bedc247e68bab516b9e9e","src/qpack_encoder_receiver.rs":"d0ac03cc111b6e1c555a8654d3234116f2b135b5b040edac23cefe2d640beba9","src/recv_message.rs":"7ac8d4057ba53874e4edfc62cd25ad5d3f0b10aaac5bf6e156103c3bc44e18cc","src/request_target.rs":"6041a69a0a74969ec08bc164509c055e9bad99f53bbeb16c0aa17d108dd68b8c","src/send_message.rs":"bc1bb096e56560088db961ab0f7a4e08acd3d3977f483ffcbdcfeec7ed8d855a","src/server.rs":"24822b9b164271862777cf5afcc74edbecaa4ce648978b0a6559e1490e3cea55","src/server_connection_events.rs":"12d353ca6301467f6d475dde3b789951a5716c89ddd7dbf1383efef8082361f3","src/server_events.rs":"1cda8d6c413fad0fa67fcfd7cb78e795bf7ef7f0e09b5720992646a82d51ce16","src/settings.rs":"476b154b5eea4c8d69a4a790fee3e527cef4d375df1cfb5eed04ec56406fe15a","src/stream_type_reader.rs":"7a7226b7911d69f7e00ec4987c2a32a5e8a33463203398cbee1e6645d2691478","tests/httpconn.rs":"ee2f29c6104f5379bee2606f160005683f00ae85f2c43216e7ffaa89ff633466","tests/priority.rs":"364754507873298612ad12e8d1d106d26d993712142d0be4cbf056da5338854c","tests/send_message.rs":"b5435045b16429d9e626ea94a8f10e2937e1a5a878af0035763a4f5ec09bf53c","tests/webtransport.rs":"25794305017ff58e57dc3c3b9b078e5bfc1814ea82a521b7b7156228e613c092"},"package":null}
{"files":{"Cargo.toml":"d47fc19b1c133e6edceba68b8926573d1699698daa2df07d8f9f1a17b026abda","src/buffered_send_stream.rs":"0e8ad3f443a33e213d55ba26a1ff266692c9087a1627c9b3d10c5025dee550ac","src/client_events.rs":"77fedca72ce54956eaba3fb7103085d196a631b764662584ea2629224c5c234e","src/conn_params.rs":"224a8ea6ef632930a7788a1cabf47ce69ad41bd4bc8dcf3053fbd998fdb38e82","src/connection.rs":"f5f49dd72170f7e42c180b738ff8bddae92fd0c71acc5a08736e298bf53483e7","src/connection_client.rs":"0e578d714c04d1f21319b72a6d3be0e3f3440b8234d2e1bd2b5889dd2b51d88a","src/connection_server.rs":"de5a6cb42b8c4dc08fb1f626f681b45cd22435892b11e6053b61a5401490db94","src/control_stream_local.rs":"ae52e3286f1686ca1265e7de841392addd42616db02799bb967a59feb6039cb5","src/control_stream_remote.rs":"59eb4041e366d92f9f294e8446755caa5e91fd943bba7b79b726698ba13be248","src/features/extended_connect/mod.rs":"3b02f6b18627f3855465a81b1d9b285e6f13839e75a8a6db648ed9082908d7f0","src/features/extended_connect/tests/mod.rs":"fd6aee37243713e80fc526552f21f0222338cec9890409b6575a2a637b17ec1f","src/features/extended_connect/tests/webtransport/datagrams.rs":"4c85a90afb753ce588e3fdeb773669bc49c013aebc28912340359eb01b74fd70","src/features/extended_connect/tests/webtransport/mod.rs":"cdb8de335293249525c73b52b49b44752425eaf0183182b4102d89494a5e5a52","src/features/extended_connect/tests/webtransport/negotiation.rs":"a8940b99e21a6b29fef2fc32f3dce05d12de8b09079dfdc0502c13e4582dbdb6","src/features/extended_connect/tests/webtransport/sessions.rs":"de3d836f666c2bec31e70b33bdc2669572cabbe17df2225db7282613a224a364","src/features/extended_connect/tests/webtransport/streams.rs":"8b3c34cac1b2171252a4bb53d420ac2098549a20309c327bf56e2e9ba9e33538","src/features/extended_connect/webtransport_session.rs":"239d92c06fbc5f6226078bb411a803f57b555dea0077349d49d7f57671cf2eab","src/features/extended_connect/webtransport_streams.rs":"5d7507aaf6a819d266fbea9b7a415c8324329df0f6936d9045b73e17a5b844ee","src/features/mod.rs":"925aae4427ad82e4d019354802b223d53db5e5585d4a940f5417a24a9503d7ee","src/frames/hframe.rs":"56c36ac597504f28c73cf2370acd82104f8c7a7b9ffc0f6d222378abc524482d","src/frames/mod.rs":"7d0a46ca147336d14781edb8dbee8b03c2e4bcd6646f5473a9d93d31fe73fecb","src/frames/reader.rs":"e07ee9de74bc499c10afcda592fefd9a7eef3381c045aa14f6596d67313546ca","src/frames/tests/hframe.rs":"01ec74eb3eb25d95042aa0263f9267f89535e6b7b8c1161fab4ba9ee5352d4a7","src/frames/tests/mod.rs":"0610609b316767a6a022837d32ee0452e37ea296fde37e51bec87e7c77e923a3","src/frames/tests/reader.rs":"2bfadc7afbc41bff9f5f930b31550259a8a92484d35f6c5d8dd8fd9acfb88f5b","src/frames/tests/wtframe.rs":"589ebe1e62ce4da63b37b7d22cde7ba572ddbf29336fdcdbbcd0a745f79dacd8","src/frames/wtframe.rs":"1d9d0256ace2ba7262343ed035df795f21a4d45065792d3fd45b3391b6916b2f","src/headers_checks.rs":"be0f0109298dcc3a40350b7c0950076ddfe20617d195b305e3ffc8582557ab18","src/lib.rs":"4f908a021222bcc79b9d569bc3759a493379a20b47dfa228fddf51600bf6e446","src/priority.rs":"f3b77c208962e44a4e2d13138c6998b703d40e7bcf8f73ea84d8ef5b556e0aee","src/push_controller.rs":"13bccf2834ae19109504cf695a5948c3b2d03fd101bc032a92bb77a033423854","src/qlog.rs":"2debd75c7ea103c95ff79e44412f1408c3e496e324976100c55d5a833912b6c3","src/qpack_decoder_receiver.rs":"c927dfc3e58c71d282210ba79280f6f03e789733bc3bedc247e68bab516b9e9e","src/qpack_encoder_receiver.rs":"d0ac03cc111b6e1c555a8654d3234116f2b135b5b040edac23cefe2d640beba9","src/recv_message.rs":"7ac8d4057ba53874e4edfc62cd25ad5d3f0b10aaac5bf6e156103c3bc44e18cc","src/request_target.rs":"6041a69a0a74969ec08bc164509c055e9bad99f53bbeb16c0aa17d108dd68b8c","src/send_message.rs":"a16496e00a8ee9a69f20d97cf75c01230039eb8261b6b917d82238a42cb99bbe","src/server.rs":"ad477c0792a1e75d982215f4ad35a2dcb6dc82fc817093026670b675dd15dd21","src/server_connection_events.rs":"12d353ca6301467f6d475dde3b789951a5716c89ddd7dbf1383efef8082361f3","src/server_events.rs":"d7c845355a74ddee42047dc65fe394ef3a7625702beac3be75830002f4940f21","src/settings.rs":"476b154b5eea4c8d69a4a790fee3e527cef4d375df1cfb5eed04ec56406fe15a","src/stream_type_reader.rs":"7a7226b7911d69f7e00ec4987c2a32a5e8a33463203398cbee1e6645d2691478","tests/httpconn.rs":"a7137c37fe1003c9a3ba057b10cad4d8de1441100a24bad128345cfed69fb5b7","tests/priority.rs":"364754507873298612ad12e8d1d106d26d993712142d0be4cbf056da5338854c","tests/send_message.rs":"b5435045b16429d9e626ea94a8f10e2937e1a5a878af0035763a4f5ec09bf53c","tests/webtransport.rs":"25794305017ff58e57dc3c3b9b078e5bfc1814ea82a521b7b7156228e613c092"},"package":null}

2
third_party/rust/neqo-http3/Cargo.toml поставляемый
Просмотреть файл

@ -13,7 +13,7 @@
edition = "2021"
rust-version = "1.76.0"
name = "neqo-http3"
version = "0.7.7"
version = "0.7.8"
authors = ["The Neqo Authors <necko@mozilla.com>"]
homepage = "https://github.com/mozilla/neqo/"
license = "MIT OR Apache-2.0"

Просмотреть файл

@ -1292,7 +1292,7 @@ mod tests {
use neqo_qpack::{encoder::QPackEncoder, QpackSettings};
use neqo_transport::{
CloseReason, ConnectionEvent, ConnectionParameters, Output, State, StreamId, StreamType,
Version, RECV_BUFFER_SIZE, SEND_BUFFER_SIZE,
Version, MIN_INITIAL_PACKET_SIZE, RECV_BUFFER_SIZE, SEND_BUFFER_SIZE,
};
use test_fixture::{
anti_replay, default_server_h3, fixture_init, new_server, now,
@ -7157,8 +7157,9 @@ mod tests {
#[test]
fn priority_update_during_full_buffer() {
// set a lower MAX_DATA on the server side to restrict the data the client can send
let (mut client, mut server) =
connect_with_connection_parameters(ConnectionParameters::default().max_data(1200));
let (mut client, mut server) = connect_with_connection_parameters(
ConnectionParameters::default().max_data(MIN_INITIAL_PACKET_SIZE.try_into().unwrap()),
);
let request_stream_id = make_request_and_exchange_pkts(&mut client, &mut server, false);
let data_writable = |e| matches!(e, Http3ClientEvent::DataWritable { .. });

Просмотреть файл

@ -12,7 +12,7 @@ use std::{cell::RefCell, rc::Rc, time::Duration};
use neqo_common::event::Provider;
use neqo_crypto::AuthenticationStatus;
use neqo_transport::{ConnectionParameters, StreamId, StreamType};
use neqo_transport::{ConnectionParameters, StreamId, StreamType, MIN_INITIAL_PACKET_SIZE};
use test_fixture::{
anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ADDR, DEFAULT_ALPN_H3,
DEFAULT_KEYS, DEFAULT_SERVER_NAME,
@ -25,7 +25,7 @@ use crate::{
WebTransportServerEvent, WebTransportSessionAcceptAction,
};
const DATAGRAM_SIZE: u64 = 1200;
const DATAGRAM_SIZE: u64 = MIN_INITIAL_PACKET_SIZE as u64;
pub fn wt_default_parameters() -> Http3Parameters {
Http3Parameters::default()

Просмотреть файл

@ -4,7 +4,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use std::{cell::RefCell, cmp::min, fmt::Debug, rc::Rc};
use std::{cell::RefCell, cmp::min, fmt::Debug, num::NonZeroUsize, rc::Rc};
use neqo_common::{qdebug, qtrace, Encoder, Header, MessageType};
use neqo_qpack::encoder::QPackEncoder;
@ -17,6 +17,7 @@ use crate::{
SendStream, SendStreamEvents, Stream,
};
const MIN_DATA_FRAME_SIZE: usize = 3; // Minimal DATA frame size: 2 (header) + 1 (payload)
const MAX_DATA_HEADER_SIZE_2: usize = (1 << 6) - 1; // Maximal amount of data with DATA frame header size 2
const MAX_DATA_HEADER_SIZE_2_LIMIT: usize = MAX_DATA_HEADER_SIZE_2 + 3; // 63 + 3 (size of the next buffer data frame header)
const MAX_DATA_HEADER_SIZE_3: usize = (1 << 14) - 1; // Maximal amount of data with DATA frame header size 3
@ -177,7 +178,14 @@ impl SendStream for SendMessage {
let available = conn
.stream_avail_send_space(self.stream_id())
.map_err(|e| Error::map_stream_send_errors(&e.into()))?;
if available <= 2 {
if available < MIN_DATA_FRAME_SIZE {
// Setting this once, instead of every time the available send space
// is exhausted, would suffice. That said, function call should be
// cheap, thus not worth optimizing.
conn.stream_set_writable_event_low_watermark(
self.stream_id(),
NonZeroUsize::new(MIN_DATA_FRAME_SIZE).unwrap(),
)?;
return Ok(0);
}
let to_send = if available <= MAX_DATA_HEADER_SIZE_2_LIMIT {

6
third_party/rust/neqo-http3/src/server.rs поставляемый
Просмотреть файл

@ -1271,11 +1271,11 @@ mod tests {
while let Some(event) = hconn.next_event() {
match event {
Http3ServerEvent::Headers { stream, .. } => {
assert!(!requests.contains_key(&stream));
requests.insert(stream, 0);
assert!(!requests.contains_key(&stream.stream_id()));
requests.insert(stream.stream_id(), 0);
}
Http3ServerEvent::Data { stream, .. } => {
assert!(requests.contains_key(&stream));
assert!(requests.contains_key(&stream.stream_id()));
}
Http3ServerEvent::DataWritable { .. }
| Http3ServerEvent::StreamReset { .. }

Просмотреть файл

@ -84,6 +84,19 @@ impl StreamHandler {
.send_data(self.stream_id(), buf, &mut self.conn.borrow_mut())
}
/// Bytes sendable on stream at the QUIC layer.
///
/// Note that this does not yet account for HTTP3 frame headers.
///
/// # Errors
///
/// It may return `InvalidStreamId` if a stream does not exist anymore.
pub fn available(&mut self) -> Res<usize> {
let stream_id = self.stream_id();
let n = self.conn.borrow_mut().stream_avail_send_space(stream_id)?;
Ok(n)
}
/// Close sending side.
///
/// # Errors

91
third_party/rust/neqo-http3/tests/httpconn.rs поставляемый
Просмотреть файл

@ -246,6 +246,83 @@ fn test_103_response() {
process_client_events(&mut hconn_c);
}
/// Test [`neqo_http3::SendMessage::send_data`] to set
/// [`neqo_transport::SendStream::set_writable_event_low_watermark`].
#[allow(clippy::cast_possible_truncation)]
#[test]
fn test_data_writable_events_low_watermark() -> Result<(), Box<dyn std::error::Error>> {
const STREAM_LIMIT: u64 = 5000;
const DATA_FRAME_HEADER_SIZE: usize = 3;
// Create a client and a server.
let mut hconn_c = http3_client_with_params(Http3Parameters::default().connection_parameters(
ConnectionParameters::default().max_stream_data(StreamType::BiDi, false, STREAM_LIMIT),
));
let mut hconn_s = default_http3_server();
mem::drop(connect_peers(&mut hconn_c, &mut hconn_s));
// Client sends GET to server.
let stream_id = hconn_c.fetch(
now(),
"GET",
&("https", "something.com", "/"),
&[],
Priority::default(),
)?;
hconn_c.stream_close_send(stream_id)?;
exchange_packets(&mut hconn_c, &mut hconn_s, None);
// Server receives GET and responds with headers.
let mut request = receive_request(&mut hconn_s).unwrap();
request.send_headers(&[Header::new(":status", "200")])?;
// Sending these headers clears the server's send stream buffer and thus
// emits a DataWritable event.
exchange_packets(&mut hconn_c, &mut hconn_s, None);
let data_writable = |e| {
matches!(
e,
Http3ServerEvent::DataWritable {
stream
} if stream.stream_id() == stream_id
)
};
assert!(hconn_s.events().any(data_writable));
// Have server fill entire send buffer minus 1 byte.
let all_but_one = request.available()? - DATA_FRAME_HEADER_SIZE - 1;
let buf = vec![1; all_but_one];
let sent = request.send_data(&buf)?;
assert_eq!(sent, all_but_one);
assert_eq!(request.available()?, 1);
// Sending the buffered data clears the send stream buffer and thus emits a
// DataWritable event.
exchange_packets(&mut hconn_c, &mut hconn_s, None);
assert!(hconn_s.events().any(data_writable));
// Sending more fails, given that each data frame needs to be preceeded by a
// header, i.e. needs more than 1 byte of send space to send 1 byte payload.
assert_eq!(request.available()?, 1);
assert_eq!(request.send_data(&buf)?, 0);
// Have the client read all the pending data.
let mut recv_buf = vec![0_u8; all_but_one];
let (recvd, _) = hconn_c.read_data(now(), stream_id, &mut recv_buf)?;
assert_eq!(sent, recvd);
exchange_packets(&mut hconn_c, &mut hconn_s, None);
// Expect the server's available send space to be back to the stream limit.
assert_eq!(request.available()?, STREAM_LIMIT as usize);
// Expect the server to emit a DataWritable event, even though it always had
// at least 1 byte available to send, i.e. it never exhausted the entire
// available send space.
assert!(hconn_s.events().any(data_writable));
Ok(())
}
#[test]
fn test_data_writable_events() {
const STREAM_LIMIT: u64 = 5000;
@ -445,14 +522,12 @@ fn fetch_noresponse_will_idletimeout() {
let mut done = false;
while !done {
while let Some(event) = hconn_c.next_event() {
if let Http3ClientEvent::StateChange(state) = event {
match state {
Http3State::Closing(error_code) | Http3State::Closed(error_code) => {
assert_eq!(error_code, CloseReason::Transport(Error::IdleTimeout));
done = true;
}
_ => {}
}
if let Http3ClientEvent::StateChange(
Http3State::Closing(error_code) | Http3State::Closed(error_code),
) = event
{
assert_eq!(error_code, CloseReason::Transport(Error::IdleTimeout));
done = true;
}
}

Просмотреть файл

@ -1 +1 @@
{"files":{"Cargo.toml":"e0a99f1f9f0580d7166549bb90c603fbfc88babd675a84191fe6fb5124104d01","src/decoder.rs":"0675444129e074e9d5d56f0d45d2eaed614c85e22cfe9f2d28cdee912c15b420","src/decoder_instructions.rs":"d991d70e51f079bc5b30d3982fd0176edfa9bb7ba14c17a20ec3eea878c56206","src/encoder.rs":"84649cbee81e050f55d7ea691ac871e072741abd8bbf96303eb2e98aa8ee0aea","src/encoder_instructions.rs":"86e3abbd9cf94332041326ac6cf806ed64623e3fd38dbc0385b1f63c37e73fd9","src/header_block.rs":"3925476df69b90d950594faadc5cb24c374d46de8c75a374a235f0d27323a7d8","src/huffman.rs":"71ec740426eee0abb6205104e504f5b97f525a76c4a5f5827b78034d28ce1876","src/huffman_decode_helper.rs":"9ce470e318b3664f58aa109bed483ab15bfd9e0b17d261ea2b609668a42a9d80","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"fd673630b5ed64197851c9a9758685096d3c0aa04f4994290733a38057004ee6","src/prefix.rs":"fb4a9acbcf6fd3178f4474404cd3d3b131abca934f69fe14a9d744bc7e636dc5","src/qlog.rs":"e320007ea8309546b26f9c0019ab8722da80dbd38fa976233fd8ae19a0af637c","src/qpack_send_buf.rs":"755af90fe077b1bcca34a1a2a1bdce5ce601ea490b2ca3f1313e0107d13e67e2","src/reader.rs":"1581261741a0922b147a6975cc8b1a3503846f6dbfdb771d254760c298996982","src/static_table.rs":"fda9d5c6f38f94b0bf92d3afdf8432dce6e27e189736596e16727090c77b78ec","src/stats.rs":"624dfa3b40858c304097bb0ce5b1be1bb4d7916b1abfc222f1aa705907009730","src/table.rs":"ddf055a228bed575d640d9a06e19e1e9fd98a48e393a7d326f8254438fb94889"},"package":null}
{"files":{"Cargo.toml":"16d6744a4e1e31fe689e9a74b37a5cda826d88aece3253700ef50dbb6ac18527","src/decoder.rs":"0675444129e074e9d5d56f0d45d2eaed614c85e22cfe9f2d28cdee912c15b420","src/decoder_instructions.rs":"d991d70e51f079bc5b30d3982fd0176edfa9bb7ba14c17a20ec3eea878c56206","src/encoder.rs":"84649cbee81e050f55d7ea691ac871e072741abd8bbf96303eb2e98aa8ee0aea","src/encoder_instructions.rs":"86e3abbd9cf94332041326ac6cf806ed64623e3fd38dbc0385b1f63c37e73fd9","src/header_block.rs":"3925476df69b90d950594faadc5cb24c374d46de8c75a374a235f0d27323a7d8","src/huffman.rs":"71ec740426eee0abb6205104e504f5b97f525a76c4a5f5827b78034d28ce1876","src/huffman_decode_helper.rs":"9ce470e318b3664f58aa109bed483ab15bfd9e0b17d261ea2b609668a42a9d80","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"fd673630b5ed64197851c9a9758685096d3c0aa04f4994290733a38057004ee6","src/prefix.rs":"fb4a9acbcf6fd3178f4474404cd3d3b131abca934f69fe14a9d744bc7e636dc5","src/qlog.rs":"e320007ea8309546b26f9c0019ab8722da80dbd38fa976233fd8ae19a0af637c","src/qpack_send_buf.rs":"755af90fe077b1bcca34a1a2a1bdce5ce601ea490b2ca3f1313e0107d13e67e2","src/reader.rs":"1581261741a0922b147a6975cc8b1a3503846f6dbfdb771d254760c298996982","src/static_table.rs":"fda9d5c6f38f94b0bf92d3afdf8432dce6e27e189736596e16727090c77b78ec","src/stats.rs":"624dfa3b40858c304097bb0ce5b1be1bb4d7916b1abfc222f1aa705907009730","src/table.rs":"ddf055a228bed575d640d9a06e19e1e9fd98a48e393a7d326f8254438fb94889"},"package":null}

2
third_party/rust/neqo-qpack/Cargo.toml поставляемый
Просмотреть файл

@ -13,7 +13,7 @@
edition = "2021"
rust-version = "1.76.0"
name = "neqo-qpack"
version = "0.7.7"
version = "0.7.8"
authors = ["The Neqo Authors <necko@mozilla.com>"]
homepage = "https://github.com/mozilla/neqo/"
license = "MIT OR Apache-2.0"

Различия файлов скрыты, потому что одна или несколько строк слишком длинны

2
third_party/rust/neqo-transport/Cargo.toml поставляемый
Просмотреть файл

@ -13,7 +13,7 @@
edition = "2021"
rust-version = "1.76.0"
name = "neqo-transport"
version = "0.7.7"
version = "0.7.8"
authors = ["The Neqo Authors <necko@mozilla.com>"]
homepage = "https://github.com/mozilla/neqo/"
license = "MIT OR Apache-2.0"

9
third_party/rust/neqo-transport/build.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,9 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
fn main() {
println!("cargo:rustc-check-cfg=cfg(fuzzing)");
}

Просмотреть файл

@ -17,9 +17,9 @@ use crate::{
cc::MAX_DATAGRAM_SIZE,
packet::PacketNumber,
qlog::{self, QlogMetric},
recovery::SentPacket,
rtt::RttEstimate,
sender::PACING_BURST_SIZE,
tracking::SentPacket,
};
#[rustfmt::skip] // to keep `::` and thus prevent conflict with `crate::qlog`
use ::qlog::events::{quic::CongestionStateUpdated, EventData};
@ -167,8 +167,8 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
qdebug!(
"packet_acked this={:p}, pn={}, ps={}, ignored={}, lost={}, rtt_est={:?}",
self,
pkt.pn,
pkt.size,
pkt.pn(),
pkt.len(),
i32::from(!pkt.cc_outstanding()),
i32::from(pkt.lost()),
rtt_est,
@ -176,12 +176,12 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
if !pkt.cc_outstanding() {
continue;
}
if pkt.pn < self.first_app_limited {
if pkt.pn() < self.first_app_limited {
is_app_limited = false;
}
// BIF is set to 0 on a path change, but in case that was because of a simple rebinding
// event, we may still get ACKs for packets sent before the rebinding.
self.bytes_in_flight = self.bytes_in_flight.saturating_sub(pkt.size);
self.bytes_in_flight = self.bytes_in_flight.saturating_sub(pkt.len());
if !self.after_recovery_start(pkt) {
// Do not increase congestion window for packets sent before
@ -194,7 +194,7 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
qlog::metrics_updated(&mut self.qlog, &[QlogMetric::InRecovery(false)]);
}
new_acked += pkt.size;
new_acked += pkt.len();
}
if is_app_limited {
@ -269,12 +269,12 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
qdebug!(
"packet_lost this={:p}, pn={}, ps={}",
self,
pkt.pn,
pkt.size
pkt.pn(),
pkt.len()
);
// BIF is set to 0 on a path change, but in case that was because of a simple rebinding
// event, we may still declare packets lost that were sent before the rebinding.
self.bytes_in_flight = self.bytes_in_flight.saturating_sub(pkt.size);
self.bytes_in_flight = self.bytes_in_flight.saturating_sub(pkt.len());
}
qlog::metrics_updated(
&mut self.qlog,
@ -308,13 +308,13 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
fn discard(&mut self, pkt: &SentPacket) {
if pkt.cc_outstanding() {
assert!(self.bytes_in_flight >= pkt.size);
self.bytes_in_flight -= pkt.size;
assert!(self.bytes_in_flight >= pkt.len());
self.bytes_in_flight -= pkt.len();
qlog::metrics_updated(
&mut self.qlog,
&[QlogMetric::BytesInFlight(self.bytes_in_flight)],
);
qtrace!([self], "Ignore pkt with size {}", pkt.size);
qtrace!([self], "Ignore pkt with size {}", pkt.len());
}
}
@ -329,7 +329,7 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
fn on_packet_sent(&mut self, pkt: &SentPacket) {
// Record the recovery time and exit any transient state.
if self.state.transient() {
self.recovery_start = Some(pkt.pn);
self.recovery_start = Some(pkt.pn());
self.state.update();
}
@ -341,15 +341,15 @@ impl<T: WindowAdjustment> CongestionControl for ClassicCongestionControl<T> {
// window. Assume that all in-flight packets up to this one are NOT app-limited.
// However, subsequent packets might be app-limited. Set `first_app_limited` to the
// next packet number.
self.first_app_limited = pkt.pn + 1;
self.first_app_limited = pkt.pn() + 1;
}
self.bytes_in_flight += pkt.size;
self.bytes_in_flight += pkt.len();
qdebug!(
"packet_sent this={:p}, pn={}, ps={}",
self,
pkt.pn,
pkt.size
pkt.pn(),
pkt.len()
);
qlog::metrics_updated(
&mut self.qlog,
@ -448,20 +448,20 @@ impl<T: WindowAdjustment> ClassicCongestionControl<T> {
let cutoff = max(first_rtt_sample_time, prev_largest_acked_sent);
for p in lost_packets
.iter()
.skip_while(|p| Some(p.time_sent) < cutoff)
.skip_while(|p| Some(p.time_sent()) < cutoff)
{
if p.pn != last_pn + 1 {
if p.pn() != last_pn + 1 {
// Not a contiguous range of lost packets, start over.
start = None;
}
last_pn = p.pn;
last_pn = p.pn();
if !p.cc_in_flight() {
// Not interesting, keep looking.
continue;
}
if let Some(t) = start {
let elapsed = p
.time_sent
.time_sent()
.checked_duration_since(t)
.expect("time is monotonic");
if elapsed > pc_period {
@ -476,7 +476,7 @@ impl<T: WindowAdjustment> ClassicCongestionControl<T> {
return true;
}
} else {
start = Some(p.time_sent);
start = Some(p.time_sent());
}
}
false
@ -490,7 +490,7 @@ impl<T: WindowAdjustment> ClassicCongestionControl<T> {
// state and update the variable `self.recovery_start`. Before the
// first recovery, all packets were sent after the recovery event,
// allowing to reduce the cwnd on congestion events.
!self.state.transient() && self.recovery_start.map_or(true, |pn| packet.pn >= pn)
!self.state.transient() && self.recovery_start.map_or(true, |pn| packet.pn() >= pn)
}
/// Handle a congestion event.
@ -560,8 +560,8 @@ mod tests {
CongestionControl, CongestionControlAlgorithm, CWND_INITIAL_PKTS, MAX_DATAGRAM_SIZE,
},
packet::{PacketNumber, PacketType},
recovery::SentPacket,
rtt::RttEstimate,
tracking::SentPacket,
};
const PTO: Duration = Duration::from_millis(100);
@ -923,13 +923,13 @@ mod tests {
fn persistent_congestion_ack_eliciting() {
let mut lost = make_lost(&[1, PERSISTENT_CONG_THRESH + 2]);
lost[0] = SentPacket::new(
lost[0].pt,
lost[0].pn,
lost[0].ecn_mark,
lost[0].time_sent,
lost[0].packet_type(),
lost[0].pn(),
lost[0].ecn_mark(),
lost[0].time_sent(),
false,
Vec::new(),
lost[0].size,
lost[0].len(),
);
assert!(!persistent_congestion_by_pto(
ClassicCongestionControl::new(NewReno::default()),

Просмотреть файл

@ -14,7 +14,7 @@ use std::{
use neqo_common::qlog::NeqoQlog;
use crate::{path::PATH_MTU_V6, rtt::RttEstimate, tracking::SentPacket, Error};
use crate::{path::PATH_MTU_V6, recovery::SentPacket, rtt::RttEstimate, Error};
mod classic_cc;
mod cubic;

Просмотреть файл

@ -25,8 +25,8 @@ use crate::{
CongestionControl, MAX_DATAGRAM_SIZE, MAX_DATAGRAM_SIZE_F64,
},
packet::PacketType,
recovery::SentPacket,
rtt::RttEstimate,
tracking::SentPacket,
};
const RTT: Duration = Duration::from_millis(100);

Просмотреть файл

@ -17,8 +17,8 @@ use crate::{
MAX_DATAGRAM_SIZE,
},
packet::PacketType,
recovery::SentPacket,
rtt::RttEstimate,
tracking::SentPacket,
};
const PTO: Duration = Duration::from_millis(100);
@ -133,14 +133,14 @@ fn issue_876() {
// and ack it. cwnd increases slightly
cc.on_packets_acked(&sent_packets[6..], &RTT_ESTIMATE, time_now);
assert_eq!(cc.acked_bytes(), sent_packets[6].size);
assert_eq!(cc.acked_bytes(), sent_packets[6].len());
cwnd_is_halved(&cc);
assert_eq!(cc.bytes_in_flight(), 5 * MAX_DATAGRAM_SIZE - 2);
// Packet from before is lost. Should not hurt cwnd.
cc.on_packets_lost(Some(time_now), None, PTO, &sent_packets[1..2]);
assert!(!cc.recovery_packet());
assert_eq!(cc.acked_bytes(), sent_packets[6].size);
assert_eq!(cc.acked_bytes(), sent_packets[6].len());
cwnd_is_halved(&cc);
assert_eq!(cc.bytes_in_flight(), 4 * MAX_DATAGRAM_SIZE);
}

Просмотреть файл

@ -12,6 +12,7 @@ use std::{
fmt::{self, Debug},
iter, mem,
net::{IpAddr, SocketAddr},
num::NonZeroUsize,
ops::RangeInclusive,
rc::{Rc, Weak},
time::{Duration, Instant},
@ -45,7 +46,7 @@ use crate::{
path::{Path, PathRef, Paths},
qlog,
quic_datagrams::{DatagramTracking, QuicDatagrams},
recovery::{LossRecovery, RecoveryToken, SendProfile},
recovery::{LossRecovery, RecoveryToken, SendProfile, SentPacket},
recv_stream::RecvStreamStats,
rtt::{RttEstimate, GRANULARITY},
send_stream::SendStream,
@ -56,7 +57,7 @@ use crate::{
self, TransportParameter, TransportParameterId, TransportParameters,
TransportParametersHandler,
},
tracking::{AckTracker, PacketNumberSpace, RecvdPackets, SentPacket},
tracking::{AckTracker, PacketNumberSpace, RecvdPackets},
version::{Version, WireVersion},
AppError, CloseReason, Error, Res, StreamId,
};
@ -2369,7 +2370,7 @@ impl Connection {
packets.len(),
mtu
);
initial.size += mtu - packets.len();
initial.track_padding(mtu - packets.len());
// These zeros aren't padding frames, they are an invalid all-zero coalesced
// packet, which is why we don't increase `frame_tx.padding` count here.
packets.resize(mtu, 0);
@ -2893,7 +2894,7 @@ impl Connection {
/// to retransmit the frame as needed.
fn handle_lost_packets(&mut self, lost_packets: &[SentPacket]) {
for lost in lost_packets {
for token in &lost.tokens {
for token in lost.tokens() {
qdebug!([self], "Lost: {:?}", token);
match token {
RecoveryToken::Ack(_) => {}
@ -2929,13 +2930,13 @@ impl Connection {
fn handle_ack<R>(
&mut self,
space: PacketNumberSpace,
largest_acknowledged: u64,
largest_acknowledged: PacketNumber,
ack_ranges: R,
ack_ecn: Option<EcnCount>,
ack_delay: u64,
now: Instant,
) where
R: IntoIterator<Item = RangeInclusive<u64>> + Debug,
R: IntoIterator<Item = RangeInclusive<PacketNumber>> + Debug,
R::IntoIter: ExactSizeIterator,
{
qdebug!([self], "Rx ACK space={}, ranges={:?}", space, ack_ranges);
@ -2953,7 +2954,7 @@ impl Connection {
now,
);
for acked in acked_packets {
for token in &acked.tokens {
for token in acked.tokens() {
match token {
RecoveryToken::Stream(stream_token) => self.streams.acked(stream_token),
RecoveryToken::Ack(at) => self.acks.acked(at),
@ -3184,6 +3185,34 @@ impl Connection {
Ok(self.streams.get_send_stream(stream_id)?.avail())
}
/// Set low watermark for [`ConnectionEvent::SendStreamWritable`] event.
///
/// Stream emits a [`crate::ConnectionEvent::SendStreamWritable`] event
/// when:
/// - the available sendable bytes increased to or above the watermark
/// - and was previously below the watermark.
///
/// Default value is `1`. In other words
/// [`crate::ConnectionEvent::SendStreamWritable`] is emitted whenever the
/// available sendable bytes was previously at `0` and now increased to `1`
/// or more.
///
/// Use this when your protocol needs at least `watermark` amount of available
/// sendable bytes to make progress.
///
/// # Errors
/// When the stream ID is invalid.
pub fn stream_set_writable_event_low_watermark(
&mut self,
stream_id: StreamId,
watermark: NonZeroUsize,
) -> Res<()> {
self.streams
.get_send_stream_mut(stream_id)?
.set_writable_event_low_watermark(watermark);
Ok(())
}
/// Close the stream. Enqueued data will be sent.
/// # Errors
/// When the stream ID is invalid.

Просмотреть файл

@ -19,14 +19,14 @@ use crate::{
packet::PacketBuilder,
quic_datagrams::MAX_QUIC_DATAGRAM,
send_stream::{RetransmissionPriority, TransmissionPriority},
CloseReason, Connection, ConnectionParameters, Error, StreamType,
CloseReason, Connection, ConnectionParameters, Error, StreamType, MIN_INITIAL_PACKET_SIZE,
};
const DATAGRAM_LEN_MTU: u64 = 1310;
const DATA_MTU: &[u8] = &[1; 1310];
const DATA_BIGGER_THAN_MTU: &[u8] = &[0; 2620];
const DATAGRAM_LEN_SMALLER_THAN_MTU: u64 = 1200;
const DATA_SMALLER_THAN_MTU: &[u8] = &[0; 1200];
const DATAGRAM_LEN_SMALLER_THAN_MTU: u64 = MIN_INITIAL_PACKET_SIZE as u64;
const DATA_SMALLER_THAN_MTU: &[u8] = &[0; MIN_INITIAL_PACKET_SIZE];
const DATA_SMALLER_THAN_MTU_2: &[u8] = &[0; 600];
const OUTGOING_QUEUE: usize = 2;
@ -259,7 +259,9 @@ fn datagram_after_stream_data() {
// Create a stream with normal priority and send some data.
let stream_id = client.stream_create(StreamType::BiDi).unwrap();
client.stream_send(stream_id, &[6; 1200]).unwrap();
client
.stream_send(stream_id, &[6; MIN_INITIAL_PACKET_SIZE])
.unwrap();
assert!(
matches!(send_packet_and_get_server_event(&mut client, &mut server), ConnectionEvent::RecvStreamReadable { stream_id: s } if s == stream_id)
@ -289,7 +291,9 @@ fn datagram_before_stream_data() {
RetransmissionPriority::default(),
)
.unwrap();
client.stream_send(stream_id, &[6; 1200]).unwrap();
client
.stream_send(stream_id, &[6; MIN_INITIAL_PACKET_SIZE])
.unwrap();
// Write a datagram.
let dgram_sent = client.stats().frame_tx.datagram;
@ -440,7 +444,7 @@ fn send_datagram(sender: &mut Connection, receiver: &mut Connection, data: &[u8]
#[test]
fn multiple_datagram_events() {
const DATA_SIZE: usize = 1200;
const DATA_SIZE: usize = MIN_INITIAL_PACKET_SIZE;
const MAX_QUEUE: usize = 3;
const FIRST_DATAGRAM: &[u8] = &[0; DATA_SIZE];
const SECOND_DATAGRAM: &[u8] = &[1; DATA_SIZE];
@ -486,7 +490,7 @@ fn multiple_datagram_events() {
#[test]
fn too_many_datagram_events() {
const DATA_SIZE: usize = 1200;
const DATA_SIZE: usize = MIN_INITIAL_PACKET_SIZE;
const MAX_QUEUE: usize = 2;
const FIRST_DATAGRAM: &[u8] = &[0; DATA_SIZE];
const SECOND_DATAGRAM: &[u8] = &[1; DATA_SIZE];

Просмотреть файл

@ -17,7 +17,7 @@ use super::{
use crate::{
packet::PACKET_BIT_LONG,
tparams::{self, TransportParameter},
ConnectionParameters, Error, Version,
ConnectionParameters, Error, Version, MIN_INITIAL_PACKET_SIZE,
};
// The expected PTO duration after the first Initial is sent.
@ -30,7 +30,7 @@ fn unknown_version() {
mem::drop(client.process(None, now()).dgram());
let mut unknown_version_packet = vec![0x80, 0x1a, 0x1a, 0x1a, 0x1a];
unknown_version_packet.resize(1200, 0x0);
unknown_version_packet.resize(MIN_INITIAL_PACKET_SIZE, 0x0);
mem::drop(client.process(Some(&datagram(unknown_version_packet)), now()));
assert_eq!(1, client.stats().dropped_rx);
}
@ -40,7 +40,7 @@ fn server_receive_unknown_first_packet() {
let mut server = default_server();
let mut unknown_version_packet = vec![0x80, 0x1a, 0x1a, 0x1a, 0x1a];
unknown_version_packet.resize(1200, 0x0);
unknown_version_packet.resize(MIN_INITIAL_PACKET_SIZE, 0x0);
assert_eq!(
server.process(Some(&datagram(unknown_version_packet,)), now(),),

Просмотреть файл

@ -14,7 +14,10 @@ use super::{
super::Connection, connect, default_client, default_server, exchange_ticket, new_server,
resumed_server, CountingConnectionIdGenerator,
};
use crate::{events::ConnectionEvent, ConnectionParameters, Error, StreamType, Version};
use crate::{
events::ConnectionEvent, ConnectionParameters, Error, StreamType, Version,
MIN_INITIAL_PACKET_SIZE,
};
#[test]
fn zero_rtt_negotiate() {
@ -57,8 +60,8 @@ fn zero_rtt_send_recv() {
client.stream_send(client_stream_id, &[1, 2, 3]).unwrap();
let client_0rtt = client.process(None, now());
assert!(client_0rtt.as_dgram_ref().is_some());
// 0-RTT packets on their own shouldn't be padded to 1200.
assert!(client_0rtt.as_dgram_ref().unwrap().len() < 1200);
// 0-RTT packets on their own shouldn't be padded to MIN_INITIAL_PACKET_SIZE.
assert!(client_0rtt.as_dgram_ref().unwrap().len() < MIN_INITIAL_PACKET_SIZE);
let server_hs = server.process(client_hs.as_dgram_ref(), now());
assert!(server_hs.as_dgram_ref().is_some()); // ServerHello, etc...

6
third_party/rust/neqo-transport/src/ecn.rs поставляемый
Просмотреть файл

@ -9,7 +9,7 @@ use std::ops::{AddAssign, Deref, DerefMut, Sub};
use enum_map::EnumMap;
use neqo_common::{qdebug, qinfo, qwarn, IpTosEcn};
use crate::{packet::PacketNumber, tracking::SentPacket};
use crate::{packet::PacketNumber, recovery::SentPacket};
/// The number of packets to use for testing a path for ECN capability.
pub const ECN_TEST_COUNT: usize = 10;
@ -159,7 +159,7 @@ impl EcnInfo {
// > Validating ECN counts from reordered ACK frames can result in failure. An endpoint MUST
// > NOT fail ECN validation as a result of processing an ACK frame that does not increase
// > the largest acknowledged packet number.
let largest_acked = acked_packets.first().expect("must be there").pn;
let largest_acked = acked_packets.first().expect("must be there").pn();
if largest_acked <= self.largest_acked {
return;
}
@ -186,7 +186,7 @@ impl EcnInfo {
// > ECT(0) marking.
let newly_acked_sent_with_ect0: u64 = acked_packets
.iter()
.filter(|p| p.ecn_mark == IpTosEcn::Ect0)
.filter(|p| p.ecn_mark() == IpTosEcn::Ect0)
.count()
.try_into()
.unwrap();

9
third_party/rust/neqo-transport/src/fc.rs поставляемый
Просмотреть файл

@ -64,15 +64,16 @@ where
}
}
/// Update the maximum. Returns `true` if the change was an increase.
pub fn update(&mut self, limit: u64) -> bool {
/// Update the maximum. Returns `Some` with the updated available flow
/// control if the change was an increase and `None` otherwise.
pub fn update(&mut self, limit: u64) -> Option<usize> {
debug_assert!(limit < u64::MAX);
if limit > self.limit {
self.limit = limit;
self.blocked_frame = false;
true
Some(self.available())
} else {
false
None
}
}

1
third_party/rust/neqo-transport/src/lib.rs поставляемый
Просмотреть файл

@ -61,6 +61,7 @@ pub use self::{
},
events::{ConnectionEvent, ConnectionEvents},
frame::CloseError,
packet::MIN_INITIAL_PACKET_SIZE,
quic_datagrams::DatagramTracking,
recv_stream::{RecvStreamStats, RECV_BUFFER_SIZE},
send_stream::{SendStreamStats, SEND_BUFFER_SIZE},

Просмотреть файл

@ -23,6 +23,10 @@ use crate::{
Error, Res,
};
/// `MIN_INITIAL_PACKET_SIZE` is the smallest packet that can be used to establish
/// a new connection across all QUIC versions this server supports.
pub const MIN_INITIAL_PACKET_SIZE: usize = 1200;
pub const PACKET_BIT_LONG: u8 = 0x80;
const PACKET_BIT_SHORT: u8 = 0x00;
const PACKET_BIT_FIXED_QUIC: u8 = 0x40;

8
third_party/rust/neqo-transport/src/path.rs поставляемый
Просмотреть файл

@ -25,11 +25,11 @@ use crate::{
ecn::{EcnCount, EcnInfo},
frame::{FRAME_TYPE_PATH_CHALLENGE, FRAME_TYPE_PATH_RESPONSE, FRAME_TYPE_RETIRE_CONNECTION_ID},
packet::PacketBuilder,
recovery::RecoveryToken,
recovery::{RecoveryToken, SentPacket},
rtt::RttEstimate,
sender::PacketSender,
stats::FrameStats,
tracking::{PacketNumberSpace, SentPacket},
tracking::PacketNumberSpace,
Stats,
};
@ -954,12 +954,12 @@ impl Path {
qinfo!(
[self],
"discarding a packet without an RTT estimate; guessing RTT={:?}",
now - sent.time_sent
now - sent.time_sent()
);
stats.rtt_init_guess = true;
self.rtt.update(
&mut self.qlog,
now - sent.time_sent,
now - sent.time_sent(),
Duration::new(0, 0),
false,
now,

5
third_party/rust/neqo-transport/src/qlog.rs поставляемый
Просмотреть файл

@ -27,9 +27,9 @@ use crate::{
frame::{CloseError, Frame},
packet::{DecryptedPacket, PacketNumber, PacketType, PublicPacket},
path::PathRef,
recovery::SentPacket,
stream_id::StreamType as NeqoStreamType,
tparams::{self, TransportParametersHandler},
tracking::SentPacket,
version::{Version, VersionConfig, WireVersion},
};
@ -254,7 +254,8 @@ pub fn packet_dropped(qlog: &mut NeqoQlog, public_packet: &PublicPacket) {
pub fn packets_lost(qlog: &mut NeqoQlog, pkts: &[SentPacket]) {
qlog.add_event_with_stream(|stream| {
for pkt in pkts {
let header = PacketHeader::with_type(pkt.pt.into(), Some(pkt.pn), None, None, None);
let header =
PacketHeader::with_type(pkt.packet_type().into(), Some(pkt.pn()), None, None, None);
let ev_data = EventData::PacketLost(PacketLost {
header: Some(header),

Просмотреть файл

@ -6,31 +6,30 @@
// Tracking of sent packets and detecting their loss.
mod sent;
mod token;
use std::{
cmp::{max, min},
collections::BTreeMap,
mem,
convert::TryFrom,
ops::RangeInclusive,
time::{Duration, Instant},
};
use neqo_common::{qdebug, qinfo, qlog::NeqoQlog, qtrace, qwarn};
pub use sent::SentPacket;
use sent::SentPackets;
use smallvec::{smallvec, SmallVec};
pub use token::{RecoveryToken, StreamRecoveryToken};
use crate::{
ackrate::AckRate,
cid::ConnectionIdEntry,
crypto::CryptoRecoveryToken,
ecn::EcnCount,
packet::PacketNumber,
path::{Path, PathRef},
qlog::{self, QlogMetric},
quic_datagrams::DatagramTracking,
rtt::RttEstimate,
send_stream::SendStreamRecoveryToken,
stats::{Stats, StatsCell},
stream_id::{StreamId, StreamType},
tracking::{AckToken, PacketNumberSpace, PacketNumberSpaceSet, SentPacket},
tracking::{PacketNumberSpace, PacketNumberSpaceSet},
};
pub(crate) const PACKET_THRESHOLD: u64 = 3;
@ -49,54 +48,6 @@ pub(crate) const MIN_OUTSTANDING_UNACK: usize = 16;
/// The scale we use for the fast PTO feature.
pub const FAST_PTO_SCALE: u8 = 100;
#[derive(Debug, Clone)]
#[allow(clippy::module_name_repetitions)]
pub enum StreamRecoveryToken {
Stream(SendStreamRecoveryToken),
ResetStream {
stream_id: StreamId,
},
StopSending {
stream_id: StreamId,
},
MaxData(u64),
DataBlocked(u64),
MaxStreamData {
stream_id: StreamId,
max_data: u64,
},
StreamDataBlocked {
stream_id: StreamId,
limit: u64,
},
MaxStreams {
stream_type: StreamType,
max_streams: u64,
},
StreamsBlocked {
stream_type: StreamType,
limit: u64,
},
}
#[derive(Debug, Clone)]
#[allow(clippy::module_name_repetitions)]
pub enum RecoveryToken {
Stream(StreamRecoveryToken),
Ack(AckToken),
Crypto(CryptoRecoveryToken),
HandshakeDone,
KeepAlive, // Special PING.
NewToken(usize),
NewConnectionId(ConnectionIdEntry<[u8; 16]>),
RetireConnectionId(u64),
AckFrequency(AckRate),
Datagram(DatagramTracking),
}
/// `SendProfile` tells a sender how to send packets.
#[derive(Debug)]
pub struct SendProfile {
@ -181,7 +132,8 @@ pub(crate) struct LossRecoverySpace {
/// This might be less than the number of ACK-eliciting packets,
/// because PTO packets don't count.
in_flight_outstanding: usize,
sent_packets: BTreeMap<u64, SentPacket>,
/// The packets that we have sent and are tracking.
sent_packets: SentPackets,
/// The time that the first out-of-order packet was sent.
/// This is `None` if there were no out-of-order packets detected.
/// When set to `Some(T)`, time-based loss detection should be enabled.
@ -196,7 +148,7 @@ impl LossRecoverySpace {
largest_acked_sent_time: None,
last_ack_eliciting: None,
in_flight_outstanding: 0,
sent_packets: BTreeMap::default(),
sent_packets: SentPackets::default(),
first_ooo_time: None,
}
}
@ -221,9 +173,9 @@ impl LossRecoverySpace {
pub fn pto_packets(&mut self, count: usize) -> impl Iterator<Item = &SentPacket> {
self.sent_packets
.iter_mut()
.filter_map(|(pn, sent)| {
.filter_map(|sent| {
if sent.pto() {
qtrace!("PTO: marking packet {} lost ", pn);
qtrace!("PTO: marking packet {} lost ", sent.pn());
Some(&*sent)
} else {
None
@ -256,16 +208,16 @@ impl LossRecoverySpace {
pub fn on_packet_sent(&mut self, sent_packet: SentPacket) {
if sent_packet.ack_eliciting() {
self.last_ack_eliciting = Some(sent_packet.time_sent);
self.last_ack_eliciting = Some(sent_packet.time_sent());
self.in_flight_outstanding += 1;
} else if self.space != PacketNumberSpace::ApplicationData
&& self.last_ack_eliciting.is_none()
{
// For Initial and Handshake spaces, make sure that we have a PTO baseline
// always. See `LossRecoverySpace::pto_base_time()` for details.
self.last_ack_eliciting = Some(sent_packet.time_sent);
self.last_ack_eliciting = Some(sent_packet.time_sent());
}
self.sent_packets.insert(sent_packet.pn, sent_packet);
self.sent_packets.track(sent_packet);
}
/// If we are only sending ACK frames, send a PING frame after 2 PTOs so that
@ -285,56 +237,42 @@ impl LossRecoverySpace {
.map_or(false, |t| now > t + (pto * n_pto))
}
fn remove_packet(&mut self, p: &SentPacket) {
if p.ack_eliciting() {
debug_assert!(self.in_flight_outstanding > 0);
self.in_flight_outstanding -= 1;
if self.in_flight_outstanding == 0 {
qtrace!("remove_packet outstanding == 0 for space {}", self.space);
}
fn remove_outstanding(&mut self, count: usize) {
debug_assert!(self.in_flight_outstanding >= count);
self.in_flight_outstanding -= count;
if self.in_flight_outstanding == 0 {
qtrace!("remove_packet outstanding == 0 for space {}", self.space);
}
}
/// Remove all acknowledged packets.
fn remove_packet(&mut self, p: &SentPacket) {
if p.ack_eliciting() {
self.remove_outstanding(1);
}
}
/// Remove all newly acknowledged packets.
/// Returns all the acknowledged packets, with the largest packet number first.
/// ...and a boolean indicating if any of those packets were ack-eliciting.
/// This operates more efficiently because it assumes that the input is sorted
/// in the order that an ACK frame is (from the top).
fn remove_acked<R>(&mut self, acked_ranges: R, stats: &mut Stats) -> (Vec<SentPacket>, bool)
where
R: IntoIterator<Item = RangeInclusive<u64>>,
R: IntoIterator<Item = RangeInclusive<PacketNumber>>,
R::IntoIter: ExactSizeIterator,
{
let acked_ranges = acked_ranges.into_iter();
let mut keep = Vec::with_capacity(acked_ranges.len());
let mut acked = Vec::new();
let acked = self.sent_packets.take_ranges(acked_ranges);
let mut eliciting = false;
for range in acked_ranges {
let first_keep = *range.end() + 1;
if let Some((&first, _)) = self.sent_packets.range(range).next() {
let mut tail = self.sent_packets.split_off(&first);
if let Some((&next, _)) = tail.range(first_keep..).next() {
keep.push(tail.split_off(&next));
}
for (_, p) in tail.into_iter().rev() {
self.remove_packet(&p);
eliciting |= p.ack_eliciting();
if p.lost() {
stats.late_ack += 1;
}
if p.pto_fired() {
stats.pto_ack += 1;
}
acked.push(p);
}
for p in &acked {
self.remove_packet(p);
eliciting |= p.ack_eliciting();
if p.lost() {
stats.late_ack += 1;
}
if p.pto_fired() {
stats.pto_ack += 1;
}
}
for mut k in keep.into_iter().rev() {
self.sent_packets.append(&mut k);
}
(acked, eliciting)
}
@ -343,12 +281,12 @@ impl LossRecoverySpace {
/// and when keys are dropped.
fn remove_ignored(&mut self) -> impl Iterator<Item = SentPacket> {
self.in_flight_outstanding = 0;
mem::take(&mut self.sent_packets).into_values()
std::mem::take(&mut self.sent_packets).drain_all()
}
/// Remove the primary path marking on any packets this is tracking.
fn migrate(&mut self) {
for pkt in self.sent_packets.values_mut() {
for pkt in self.sent_packets.iter_mut() {
pkt.clear_primary_path();
}
}
@ -357,26 +295,9 @@ impl LossRecoverySpace {
/// We try to keep these around until a probe is sent for them, so it is
/// important that `cd` is set to at least the current PTO time; otherwise we
/// might remove all in-flight packets and stop sending probes.
#[allow(clippy::option_if_let_else)] // Hard enough to read as-is.
fn remove_old_lost(&mut self, now: Instant, cd: Duration) {
let mut it = self.sent_packets.iter();
// If the first item is not expired, do nothing.
if it.next().map_or(false, |(_, p)| p.expired(now, cd)) {
// Find the index of the first unexpired packet.
let to_remove = if let Some(first_keep) =
it.find_map(|(i, p)| if p.expired(now, cd) { None } else { Some(*i) })
{
// Some packets haven't expired, so keep those.
let keep = self.sent_packets.split_off(&first_keep);
mem::replace(&mut self.sent_packets, keep)
} else {
// All packets are expired.
mem::take(&mut self.sent_packets)
};
for (_, p) in to_remove {
self.remove_packet(&p);
}
}
let removed = self.sent_packets.remove_expired(now, cd);
self.remove_outstanding(removed);
}
/// Detect lost packets.
@ -402,44 +323,39 @@ impl LossRecoverySpace {
let largest_acked = self.largest_acked;
// Lost for retrans/CC purposes
let mut lost_pns = SmallVec::<[_; 8]>::new();
for (pn, packet) in self
for packet in self
.sent_packets
.iter_mut()
// BTreeMap iterates in order of ascending PN
.take_while(|(&k, _)| k < largest_acked.unwrap_or(PacketNumber::MAX))
.take_while(|p| p.pn() < largest_acked.unwrap_or(PacketNumber::MAX))
{
// Packets sent before now - loss_delay are deemed lost.
if packet.time_sent + loss_delay <= now {
if packet.time_sent() + loss_delay <= now {
qtrace!(
"lost={}, time sent {:?} is before lost_delay {:?}",
pn,
packet.time_sent,
packet.pn(),
packet.time_sent(),
loss_delay
);
} else if largest_acked >= Some(*pn + PACKET_THRESHOLD) {
} else if largest_acked >= Some(packet.pn() + PACKET_THRESHOLD) {
qtrace!(
"lost={}, is >= {} from largest acked {:?}",
pn,
packet.pn(),
PACKET_THRESHOLD,
largest_acked
);
} else {
if largest_acked.is_some() {
self.first_ooo_time = Some(packet.time_sent);
self.first_ooo_time = Some(packet.time_sent());
}
// No more packets can be declared lost after this one.
break;
};
if packet.declare_lost(now) {
lost_pns.push(*pn);
lost_packets.push(packet.clone());
}
}
lost_packets.extend(lost_pns.iter().map(|pn| self.sent_packets[pn].clone()));
}
}
@ -629,8 +545,8 @@ impl LossRecovery {
}
pub fn on_packet_sent(&mut self, path: &PathRef, mut sent_packet: SentPacket) {
let pn_space = PacketNumberSpace::from(sent_packet.pt);
qdebug!([self], "packet {}-{} sent", pn_space, sent_packet.pn);
let pn_space = PacketNumberSpace::from(sent_packet.packet_type());
qdebug!([self], "packet {}-{} sent", pn_space, sent_packet.pn());
if let Some(space) = self.spaces.get_mut(pn_space) {
path.borrow_mut().packet_sent(&mut sent_packet);
space.on_packet_sent(sent_packet);
@ -639,7 +555,7 @@ impl LossRecovery {
[self],
"ignoring {}-{} from dropped space",
pn_space,
sent_packet.pn
sent_packet.pn()
);
}
}
@ -671,14 +587,14 @@ impl LossRecovery {
&mut self,
primary_path: &PathRef,
pn_space: PacketNumberSpace,
largest_acked: u64,
largest_acked: PacketNumber,
acked_ranges: R,
ack_ecn: Option<EcnCount>,
ack_delay: Duration,
now: Instant,
) -> (Vec<SentPacket>, Vec<SentPacket>)
where
R: IntoIterator<Item = RangeInclusive<u64>>,
R: IntoIterator<Item = RangeInclusive<PacketNumber>>,
R::IntoIter: ExactSizeIterator,
{
qdebug!(
@ -707,11 +623,11 @@ impl LossRecovery {
// If the largest acknowledged is newly acked and any newly acked
// packet was ack-eliciting, update the RTT. (-recovery 5.1)
space.largest_acked_sent_time = Some(largest_acked_pkt.time_sent);
space.largest_acked_sent_time = Some(largest_acked_pkt.time_sent());
if any_ack_eliciting && largest_acked_pkt.on_primary_path() {
self.rtt_sample(
primary_path.borrow_mut().rtt_mut(),
largest_acked_pkt.time_sent,
largest_acked_pkt.time_sent(),
now,
ack_delay,
);
@ -1019,6 +935,7 @@ impl ::std::fmt::Display for LossRecovery {
mod tests {
use std::{
cell::RefCell,
convert::TryInto,
ops::{Deref, DerefMut, RangeInclusive},
rc::Rc,
time::{Duration, Instant},
@ -1034,7 +951,7 @@ mod tests {
cc::CongestionControlAlgorithm,
cid::{ConnectionId, ConnectionIdEntry},
ecn::EcnCount,
packet::PacketType,
packet::{PacketNumber, PacketType},
path::{Path, PathRef},
rtt::RttEstimate,
stats::{Stats, StatsCell},
@ -1061,8 +978,8 @@ mod tests {
pub fn on_ack_received(
&mut self,
pn_space: PacketNumberSpace,
largest_acked: u64,
acked_ranges: Vec<RangeInclusive<u64>>,
largest_acked: PacketNumber,
acked_ranges: Vec<RangeInclusive<PacketNumber>>,
ack_ecn: Option<EcnCount>,
ack_delay: Duration,
now: Instant,
@ -1235,8 +1152,8 @@ mod tests {
);
}
fn add_sent(lrs: &mut LossRecoverySpace, packet_numbers: &[u64]) {
for &pn in packet_numbers {
fn add_sent(lrs: &mut LossRecoverySpace, max_pn: PacketNumber) {
for pn in 0..=max_pn {
lrs.on_packet_sent(SentPacket::new(
PacketType::Short,
pn,
@ -1249,15 +1166,18 @@ mod tests {
}
}
fn match_acked(acked: &[SentPacket], expected: &[u64]) {
assert!(acked.iter().map(|p| &p.pn).eq(expected));
fn match_acked(acked: &[SentPacket], expected: &[PacketNumber]) {
assert_eq!(
acked.iter().map(SentPacket::pn).collect::<Vec<_>>(),
expected
);
}
#[test]
fn remove_acked() {
let mut lrs = LossRecoverySpace::new(PacketNumberSpace::ApplicationData);
let mut stats = Stats::default();
add_sent(&mut lrs, &[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
add_sent(&mut lrs, 10);
let (acked, _) = lrs.remove_acked(vec![], &mut stats);
assert!(acked.is_empty());
let (acked, _) = lrs.remove_acked(vec![7..=8, 2..=4], &mut stats);
@ -1265,7 +1185,7 @@ mod tests {
let (acked, _) = lrs.remove_acked(vec![8..=11], &mut stats);
match_acked(&acked, &[10, 9]);
let (acked, _) = lrs.remove_acked(vec![0..=2], &mut stats);
match_acked(&acked, &[1]);
match_acked(&acked, &[1, 0]);
let (acked, _) = lrs.remove_acked(vec![5..=6], &mut stats);
match_acked(&acked, &[6, 5]);
}
@ -1517,7 +1437,7 @@ mod tests {
Vec::new(),
ON_SENT_SIZE,
);
let pn_space = PacketNumberSpace::from(sent_pkt.pt);
let pn_space = PacketNumberSpace::from(sent_pkt.packet_type());
lr.on_packet_sent(sent_pkt);
lr.on_ack_received(
pn_space,
@ -1630,7 +1550,7 @@ mod tests {
lr.on_packet_sent(SentPacket::new(
PacketType::Initial,
1,
0,
IpTosEcn::default(),
now(),
true,

379
third_party/rust/neqo-transport/src/recovery/sent.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,379 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// A collection for sent packets.
use std::{
collections::BTreeMap,
ops::RangeInclusive,
time::{Duration, Instant},
};
use neqo_common::IpTosEcn;
use crate::{
packet::{PacketNumber, PacketType},
recovery::RecoveryToken,
};
#[derive(Debug, Clone)]
pub struct SentPacket {
pt: PacketType,
pn: PacketNumber,
ecn_mark: IpTosEcn,
ack_eliciting: bool,
time_sent: Instant,
primary_path: bool,
tokens: Vec<RecoveryToken>,
time_declared_lost: Option<Instant>,
/// After a PTO, this is true when the packet has been released.
pto: bool,
len: usize,
}
impl SentPacket {
pub fn new(
pt: PacketType,
pn: PacketNumber,
ecn_mark: IpTosEcn,
time_sent: Instant,
ack_eliciting: bool,
tokens: Vec<RecoveryToken>,
len: usize,
) -> Self {
Self {
pt,
pn,
ecn_mark,
time_sent,
ack_eliciting,
primary_path: true,
tokens,
time_declared_lost: None,
pto: false,
len,
}
}
/// The type of this packet.
pub fn packet_type(&self) -> PacketType {
self.pt
}
/// The number of the packet.
pub fn pn(&self) -> PacketNumber {
self.pn
}
/// The ECN mark of the packet.
pub fn ecn_mark(&self) -> IpTosEcn {
self.ecn_mark
}
/// The time that this packet was sent.
pub fn time_sent(&self) -> Instant {
self.time_sent
}
/// Returns `true` if the packet will elicit an ACK.
pub fn ack_eliciting(&self) -> bool {
self.ack_eliciting
}
/// Returns `true` if the packet was sent on the primary path.
pub fn on_primary_path(&self) -> bool {
self.primary_path
}
/// The length of the packet that was sent.
pub fn len(&self) -> usize {
self.len
}
/// Access the recovery tokens that this holds.
pub fn tokens(&self) -> &[RecoveryToken] {
&self.tokens
}
/// Clears the flag that had this packet on the primary path.
/// Used when migrating to clear out state.
pub fn clear_primary_path(&mut self) {
self.primary_path = false;
}
/// For Initial packets, it is possible that the packet builder needs to amend the length.
pub fn track_padding(&mut self, padding: usize) {
debug_assert_eq!(self.pt, PacketType::Initial);
self.len += padding;
}
/// Whether the packet has been declared lost.
pub fn lost(&self) -> bool {
self.time_declared_lost.is_some()
}
/// Whether accounting for the loss or acknowledgement in the
/// congestion controller is pending.
/// Returns `true` if the packet counts as being "in flight",
/// and has not previously been declared lost.
/// Note that this should count packets that contain only ACK and PADDING,
/// but we don't send PADDING, so we don't track that.
pub fn cc_outstanding(&self) -> bool {
self.ack_eliciting() && self.on_primary_path() && !self.lost()
}
/// Whether the packet should be tracked as in-flight.
pub fn cc_in_flight(&self) -> bool {
self.ack_eliciting() && self.on_primary_path()
}
/// Declare the packet as lost. Returns `true` if this is the first time.
pub fn declare_lost(&mut self, now: Instant) -> bool {
if self.lost() {
false
} else {
self.time_declared_lost = Some(now);
true
}
}
/// Ask whether this tracked packet has been declared lost for long enough
/// that it can be expired and no longer tracked.
pub fn expired(&self, now: Instant, expiration_period: Duration) -> bool {
self.time_declared_lost
.map_or(false, |loss_time| (loss_time + expiration_period) <= now)
}
/// Whether the packet contents were cleared out after a PTO.
pub fn pto_fired(&self) -> bool {
self.pto
}
/// On PTO, we need to get the recovery tokens so that we can ensure that
/// the frames we sent can be sent again in the PTO packet(s). Do that just once.
pub fn pto(&mut self) -> bool {
if self.pto || self.lost() {
false
} else {
self.pto = true;
true
}
}
}
/// A collection for packets that we have sent that haven't been acknowledged.
#[derive(Debug, Default)]
pub struct SentPackets {
/// The collection.
packets: BTreeMap<u64, SentPacket>,
}
impl SentPackets {
pub fn len(&self) -> usize {
self.packets.len()
}
pub fn track(&mut self, packet: SentPacket) {
self.packets.insert(packet.pn, packet);
}
pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut SentPacket> {
self.packets.values_mut()
}
/// Take values from a specified ranges of packet numbers.
/// The values returned will be reversed, so that the most recent packet appears first.
/// This is because ACK frames arrive with ranges starting from the largest acknowledged
/// and we want to match that.
pub fn take_ranges<R>(&mut self, acked_ranges: R) -> Vec<SentPacket>
where
R: IntoIterator<Item = RangeInclusive<PacketNumber>>,
R::IntoIter: ExactSizeIterator,
{
let mut result = Vec::new();
// Remove all packets. We will add them back as we don't need them.
let mut packets = std::mem::take(&mut self.packets);
for range in acked_ranges {
// For each acked range, split off the acknowledged part,
// then split off the part that hasn't been acknowledged.
// This order works better when processing ranges that
// have already been processed, which is common.
let mut acked = packets.split_off(range.start());
let keep = acked.split_off(&(*range.end() + 1));
self.packets.extend(keep);
result.extend(acked.into_values().rev());
}
self.packets.extend(packets);
result
}
/// Empty out the packets, but keep the offset.
pub fn drain_all(&mut self) -> impl Iterator<Item = SentPacket> {
std::mem::take(&mut self.packets).into_values()
}
/// See `LossRecoverySpace::remove_old_lost` for details on `now` and `cd`.
/// Returns the number of ack-eliciting packets removed.
pub fn remove_expired(&mut self, now: Instant, cd: Duration) -> usize {
let mut it = self.packets.iter();
// If the first item is not expired, do nothing (the most common case).
if it.next().map_or(false, |(_, p)| p.expired(now, cd)) {
// Find the index of the first unexpired packet.
let to_remove = if let Some(first_keep) =
it.find_map(|(i, p)| if p.expired(now, cd) { None } else { Some(*i) })
{
// Some packets haven't expired, so keep those.
let keep = self.packets.split_off(&first_keep);
std::mem::replace(&mut self.packets, keep)
} else {
// All packets are expired.
std::mem::take(&mut self.packets)
};
to_remove
.into_values()
.filter(SentPacket::ack_eliciting)
.count()
} else {
0
}
}
}
#[cfg(test)]
mod tests {
use std::{
cell::OnceCell,
convert::TryFrom,
time::{Duration, Instant},
};
use neqo_common::IpTosEcn;
use super::{SentPacket, SentPackets};
use crate::packet::{PacketNumber, PacketType};
const PACKET_GAP: Duration = Duration::from_secs(1);
fn start_time() -> Instant {
thread_local!(static STARTING_TIME: OnceCell<Instant> = const { OnceCell::new() });
STARTING_TIME.with(|t| *t.get_or_init(Instant::now))
}
fn pkt(n: u32) -> SentPacket {
SentPacket::new(
PacketType::Short,
PacketNumber::from(n),
IpTosEcn::default(),
start_time() + (PACKET_GAP * n),
true,
Vec::new(),
100,
)
}
fn pkts() -> SentPackets {
let mut pkts = SentPackets::default();
pkts.track(pkt(0));
pkts.track(pkt(1));
pkts.track(pkt(2));
assert_eq!(pkts.len(), 3);
pkts
}
trait HasPacketNumber {
fn pn(&self) -> PacketNumber;
}
impl HasPacketNumber for SentPacket {
fn pn(&self) -> PacketNumber {
self.pn
}
}
impl HasPacketNumber for &'_ SentPacket {
fn pn(&self) -> PacketNumber {
self.pn
}
}
impl HasPacketNumber for &'_ mut SentPacket {
fn pn(&self) -> PacketNumber {
self.pn
}
}
fn remove_one(pkts: &mut SentPackets, idx: PacketNumber) {
assert_eq!(pkts.len(), 3);
let store = pkts.take_ranges([idx..=idx]);
let mut it = store.into_iter();
assert_eq!(idx, it.next().unwrap().pn());
assert!(it.next().is_none());
std::mem::drop(it);
assert_eq!(pkts.len(), 2);
}
fn assert_zero_and_two<'a, 'b: 'a>(
mut it: impl Iterator<Item = impl HasPacketNumber + 'b> + 'a,
) {
assert_eq!(it.next().unwrap().pn(), 0);
assert_eq!(it.next().unwrap().pn(), 2);
assert!(it.next().is_none());
}
#[test]
fn iterate_skipped() {
let mut pkts = pkts();
for (i, p) in pkts.packets.values().enumerate() {
assert_eq!(i, usize::try_from(p.pn).unwrap());
}
remove_one(&mut pkts, 1);
// Validate the merged result multiple ways.
assert_zero_and_two(pkts.iter_mut());
{
// Reverse the expectations here as this iterator reverses its output.
let store = pkts.take_ranges([0..=2]);
let mut it = store.into_iter();
assert_eq!(it.next().unwrap().pn(), 2);
assert_eq!(it.next().unwrap().pn(), 0);
assert!(it.next().is_none());
};
// The None values are still there in this case, so offset is 0.
assert_eq!(pkts.packets.len(), 0);
assert_eq!(pkts.len(), 0);
}
#[test]
fn drain() {
let mut pkts = pkts();
remove_one(&mut pkts, 1);
assert_zero_and_two(pkts.drain_all());
assert_eq!(pkts.len(), 0);
}
#[test]
fn remove_expired() {
let mut pkts = pkts();
remove_one(&mut pkts, 0);
for p in pkts.iter_mut() {
p.declare_lost(p.time_sent); // just to keep things simple.
}
// Expire up to pkt(1).
let count = pkts.remove_expired(start_time() + PACKET_GAP, Duration::new(0, 0));
assert_eq!(count, 1);
assert_eq!(pkts.len(), 1);
}
#[test]
fn first_skipped_ok() {
let mut pkts = SentPackets::default();
pkts.track(pkt(4)); // This is fine.
assert_eq!(pkts.len(), 1);
}
}

63
third_party/rust/neqo-transport/src/recovery/token.rs поставляемый Normal file
Просмотреть файл

@ -0,0 +1,63 @@
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::{
ackrate::AckRate,
cid::ConnectionIdEntry,
crypto::CryptoRecoveryToken,
quic_datagrams::DatagramTracking,
send_stream::SendStreamRecoveryToken,
stream_id::{StreamId, StreamType},
tracking::AckToken,
};
#[derive(Debug, Clone)]
#[allow(clippy::module_name_repetitions)]
pub enum StreamRecoveryToken {
Stream(SendStreamRecoveryToken),
ResetStream {
stream_id: StreamId,
},
StopSending {
stream_id: StreamId,
},
MaxData(u64),
DataBlocked(u64),
MaxStreamData {
stream_id: StreamId,
max_data: u64,
},
StreamDataBlocked {
stream_id: StreamId,
limit: u64,
},
MaxStreams {
stream_type: StreamType,
max_streams: u64,
},
StreamsBlocked {
stream_type: StreamType,
limit: u64,
},
}
#[derive(Debug, Clone)]
#[allow(clippy::module_name_repetitions)]
pub enum RecoveryToken {
Stream(StreamRecoveryToken),
Ack(AckToken),
Crypto(CryptoRecoveryToken),
HandshakeDone,
KeepAlive, // Special PING.
NewToken(usize),
NewConnectionId(ConnectionIdEntry<[u8; 16]>),
RetireConnectionId(u64),
AckFrequency(AckRate),
Datagram(DatagramTracking),
}

Просмотреть файл

@ -12,6 +12,7 @@ use std::{
collections::{btree_map::Entry, BTreeMap, VecDeque},
hash::{Hash, Hasher},
mem,
num::NonZeroUsize,
ops::Add,
rc::Rc,
};
@ -710,6 +711,7 @@ pub struct SendStream {
sendorder: Option<SendOrder>,
bytes_sent: u64,
fair: bool,
writable_event_low_watermark: NonZeroUsize,
}
impl Hash for SendStream {
@ -726,6 +728,7 @@ impl PartialEq for SendStream {
impl Eq for SendStream {}
impl SendStream {
#[allow(clippy::missing_panics_doc)] // not possible
pub fn new(
stream_id: StreamId,
max_stream_data: u64,
@ -745,6 +748,7 @@ impl SendStream {
sendorder: None,
bytes_sent: 0,
fair: false,
writable_event_low_watermark: 1.try_into().unwrap(),
};
if ss.avail() > 0 {
ss.conn_events.send_stream_writable(stream_id);
@ -1128,10 +1132,10 @@ impl SendStream {
SendStreamState::Send {
ref mut send_buf, ..
} => {
let previous_limit = send_buf.avail();
send_buf.mark_as_acked(offset, len);
if self.avail() > 0 {
self.conn_events.send_stream_writable(self.stream_id);
}
let current_limit = send_buf.avail();
self.maybe_emit_writable_event(previous_limit, current_limit);
}
SendStreamState::DataSent {
ref mut send_buf,
@ -1203,14 +1207,21 @@ impl SendStream {
}
}
/// Set low watermark for [`crate::ConnectionEvent::SendStreamWritable`]
/// event.
///
/// See [`crate::Connection::stream_set_writable_event_low_watermark`].
pub fn set_writable_event_low_watermark(&mut self, watermark: NonZeroUsize) {
self.writable_event_low_watermark = watermark;
}
pub fn set_max_stream_data(&mut self, limit: u64) {
if let SendStreamState::Ready { fc, .. } | SendStreamState::Send { fc, .. } =
&mut self.state
{
let stream_was_blocked = fc.available() == 0;
fc.update(limit);
if stream_was_blocked && self.avail() > 0 {
self.conn_events.send_stream_writable(self.stream_id);
let previous_limit = fc.available();
if let Some(current_limit) = fc.update(limit) {
self.maybe_emit_writable_event(previous_limit, current_limit);
}
}
}
@ -1369,6 +1380,27 @@ impl SendStream {
pub(crate) fn state(&mut self) -> &mut SendStreamState {
&mut self.state
}
pub(crate) fn maybe_emit_writable_event(
&mut self,
previous_limit: usize,
current_limit: usize,
) {
let low_watermark = self.writable_event_low_watermark.get();
// Skip if:
// - stream was not constrained by limit before,
// - or stream is still constrained by limit,
// - or stream is constrained by different limit.
if low_watermark < previous_limit
|| current_limit < low_watermark
|| self.avail() < low_watermark
{
return;
}
self.conn_events.send_stream_writable(self.stream_id);
}
}
impl ::std::fmt::Display for SendStream {
@ -1756,7 +1788,7 @@ pub struct SendStreamRecoveryToken {
#[cfg(test)]
mod tests {
use std::{cell::RefCell, collections::VecDeque, rc::Rc};
use std::{cell::RefCell, collections::VecDeque, num::NonZeroUsize, rc::Rc};
use neqo_common::{event::Provider, hex_with_len, qtrace, Encoder};
@ -2450,7 +2482,7 @@ mod tests {
// Increasing conn max (conn:4, stream:4) will unblock but not emit
// event b/c that happens in Connection::emit_frame() (tested in
// connection.rs)
assert!(conn_fc.borrow_mut().update(4));
assert!(conn_fc.borrow_mut().update(4).is_some());
assert_eq!(conn_events.events().count(), 0);
assert_eq!(s.avail(), 2);
assert_eq!(s.send(b"hello").unwrap(), 2);
@ -2476,6 +2508,53 @@ mod tests {
assert_eq!(s.send(b"hello").unwrap(), 0);
}
#[test]
fn send_stream_writable_event_gen_with_watermark() {
let conn_fc = connection_fc(0);
let mut conn_events = ConnectionEvents::default();
let mut s = SendStream::new(4.into(), 0, Rc::clone(&conn_fc), conn_events.clone());
// Set watermark at 3.
s.set_writable_event_low_watermark(NonZeroUsize::new(3).unwrap());
// Stream is initially blocked (conn:0, stream:0, watermark: 3) and will
// not accept data.
assert_eq!(s.avail(), 0);
assert_eq!(s.send(b"hi!").unwrap(), 0);
// Increasing the connection limit (conn:10, stream:0, watermark: 3) will not generate
// event or allow sending anything. Stream is constrained by stream limit.
assert!(conn_fc.borrow_mut().update(10).is_some());
assert_eq!(s.avail(), 0);
assert_eq!(conn_events.events().count(), 0);
// Increasing the connection limit further (conn:11, stream:0, watermark: 3) will not
// generate event or allow sending anything. Stream wasn't constrained by connection
// limit before.
assert!(conn_fc.borrow_mut().update(11).is_some());
assert_eq!(s.avail(), 0);
assert_eq!(conn_events.events().count(), 0);
// Increasing to (conn:11, stream:2, watermark: 3) will allow 2 bytes
// but not generate a SendStreamWritable event as it is still below the
// configured watermark.
s.set_max_stream_data(2);
assert_eq!(conn_events.events().count(), 0);
assert_eq!(s.avail(), 2);
// Increasing to (conn:11, stream:3, watermark: 3) will generate an
// event as available sendable bytes are >= watermark.
s.set_max_stream_data(3);
let evts = conn_events.events().collect::<Vec<_>>();
assert_eq!(evts.len(), 1);
assert!(matches!(
evts[0],
ConnectionEvent::SendStreamWritable { .. }
));
assert_eq!(s.send(b"hi!").unwrap(), 3);
}
#[test]
fn send_stream_writable_event_new_stream() {
let conn_fc = connection_fc(2);

Просмотреть файл

@ -18,8 +18,8 @@ use neqo_common::qlog::NeqoQlog;
use crate::{
cc::{ClassicCongestionControl, CongestionControl, CongestionControlAlgorithm, Cubic, NewReno},
pace::Pacer,
recovery::SentPacket,
rtt::RttEstimate,
tracking::SentPacket,
};
/// The number of packets we allow to burst from the pacer.
@ -114,7 +114,7 @@ impl PacketSender {
pub fn on_packet_sent(&mut self, pkt: &SentPacket, rtt: Duration) {
self.pacer
.spend(pkt.time_sent, rtt, self.cc.cwnd(), pkt.size);
.spend(pkt.time_sent(), rtt, self.cc.cwnd(), pkt.len());
self.cc.on_packet_sent(pkt);
}

95
third_party/rust/neqo-transport/src/server.rs поставляемый
Просмотреть файл

@ -15,12 +15,12 @@ use std::{
ops::{Deref, DerefMut},
path::PathBuf,
rc::{Rc, Weak},
time::{Duration, Instant},
time::Instant,
};
use neqo_common::{
self as common, event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn,
timer::Timer, Datagram, Decoder, Role,
Datagram, Decoder, Role,
};
use neqo_crypto::{
encode_ech_config, AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttCheckResult,
@ -33,7 +33,7 @@ use crate::{
addr_valid::{AddressValidation, AddressValidationResult},
cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdGenerator, ConnectionIdRef},
connection::{Connection, Output, State},
packet::{PacketBuilder, PacketType, PublicPacket},
packet::{PacketBuilder, PacketType, PublicPacket, MIN_INITIAL_PACKET_SIZE},
ConnectionParameters, Res, Version,
};
@ -43,17 +43,6 @@ pub enum InitialResult {
Retry(Vec<u8>),
}
/// `MIN_INITIAL_PACKET_SIZE` is the smallest packet that can be used to establish
/// a new connection across all QUIC versions this server supports.
const MIN_INITIAL_PACKET_SIZE: usize = 1200;
/// The size of timer buckets. This is higher than the actual timer granularity
/// as this depends on there being some distribution of events.
const TIMER_GRANULARITY: Duration = Duration::from_millis(4);
/// The number of buckets in the timer. As mentioned in the definition of `Timer`,
/// the granularity and capacity need to multiply to be larger than the largest
/// delay that might be used. That's the idle timeout (currently 30s).
const TIMER_CAPACITY: usize = 16384;
type StateRef = Rc<RefCell<ServerConnectionState>>;
type ConnectionTableRef = Rc<RefCell<HashMap<ConnectionId, StateRef>>>;
@ -61,7 +50,21 @@ type ConnectionTableRef = Rc<RefCell<HashMap<ConnectionId, StateRef>>>;
pub struct ServerConnectionState {
c: Connection,
active_attempt: Option<AttemptKey>,
last_timer: Instant,
wake_at: Option<Instant>,
}
impl ServerConnectionState {
fn set_wake_at(&mut self, at: Instant) {
self.wake_at = Some(at);
}
fn needs_waking(&self, now: Instant) -> bool {
self.wake_at.map_or(false, |t| t <= now)
}
fn woken(&mut self) {
self.wake_at = None;
}
}
impl Deref for ServerConnectionState {
@ -174,8 +177,8 @@ pub struct Server {
active: HashSet<ActiveConnectionRef>,
/// The set of connections that need immediate processing.
waiting: VecDeque<StateRef>,
/// Outstanding timers for connections.
timers: Timer<StateRef>,
/// The latest [`Output::Callback`] returned from [`Server::process`].
wake_at: Option<Instant>,
/// Address validation logic, which determines whether we send a Retry.
address_validation: Rc<RefCell<AddressValidation>>,
/// Directory to create qlog traces in
@ -219,10 +222,10 @@ impl Server {
connections: Rc::default(),
active: HashSet::default(),
waiting: VecDeque::default(),
timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY),
address_validation: Rc::new(RefCell::new(validation)),
qlog_dir: None,
ech_config: None,
wake_at: None,
})
}
@ -260,11 +263,6 @@ impl Server {
self.ech_config.as_ref().map_or(&[], |cfg| &cfg.encoded)
}
fn remove_timer(&mut self, c: &StateRef) {
let last = c.borrow().last_timer;
self.timers.remove(last, |t| Rc::ptr_eq(t, c));
}
fn process_connection(
&mut self,
c: &StateRef,
@ -280,16 +278,12 @@ impl Server {
}
Output::Callback(delay) => {
let next = now + delay;
if next != c.borrow().last_timer {
qtrace!([self], "Change timer to {:?}", next);
self.remove_timer(c);
c.borrow_mut().last_timer = next;
self.timers.add(next, Rc::clone(c));
c.borrow_mut().set_wake_at(next);
if self.wake_at.map_or(true, |c| c > next) {
self.wake_at = Some(next);
}
}
Output::None => {
self.remove_timer(c);
}
Output::None => {}
}
if c.borrow().has_events() {
qtrace!([self], "Connection active: {:?}", c);
@ -507,7 +501,7 @@ impl Server {
self.setup_connection(&mut c, &attempt_key, initial, orig_dcid);
let c = Rc::new(RefCell::new(ServerConnectionState {
c,
last_timer: now,
wake_at: None,
active_attempt: Some(attempt_key.clone()),
}));
cid_mgr.borrow_mut().set_connection(&c);
@ -646,24 +640,28 @@ impl Server {
return Some(d);
}
}
qtrace!([self], "No packet to send still, run timers");
while let Some(c) = self.timers.take_next(now) {
if let Some(d) = self.process_connection(&c, None, now) {
return Some(d);
}
}
None
}
fn next_time(&mut self, now: Instant) -> Option<Duration> {
if self.waiting.is_empty() {
self.timers.next_time().map(|x| x - now)
} else {
Some(Duration::new(0, 0))
qtrace!([self], "No packet to send still, check wake up times");
loop {
let connection = self
.connections
.borrow()
.values()
.find(|c| c.borrow().needs_waking(now))
.cloned()?;
let datagram = self.process_connection(&connection, None, now);
connection.borrow_mut().woken();
if datagram.is_some() {
return datagram;
}
}
}
pub fn process(&mut self, dgram: Option<&Datagram>, now: Instant) -> Output {
if self.wake_at.map_or(false, |c| c <= now) {
self.wake_at = None;
}
dgram
.and_then(|d| self.process_input(d, now))
.or_else(|| self.process_next_output(now))
@ -671,12 +669,7 @@ impl Server {
qtrace!([self], "Send packet: {:?}", d);
Output::Datagram(d)
})
.or_else(|| {
self.next_time(now).map(|delay| {
qtrace!([self], "Wait: {:?}", delay);
Output::Callback(delay)
})
})
.or_else(|| self.wake_at.take().map(|c| Output::Callback(c - now)))
.unwrap_or_else(|| {
qtrace!([self], "Go dormant");
Output::None

Просмотреть файл

@ -476,17 +476,13 @@ impl Streams {
}
pub fn handle_max_data(&mut self, maximum_data: u64) {
let conn_was_blocked = self.sender_fc.borrow().available() == 0;
let conn_credit_increased = self.sender_fc.borrow_mut().update(maximum_data);
let previous_limit = self.sender_fc.borrow().available();
let Some(current_limit) = self.sender_fc.borrow_mut().update(maximum_data) else {
return;
};
if conn_was_blocked && conn_credit_increased {
for (id, ss) in &mut self.send {
if ss.avail() > 0 {
// These may not actually all be writable if one
// uses up all the conn credit. Not our fault.
self.events.send_stream_writable(*id);
}
}
for (_id, ss) in &mut self.send {
ss.maybe_emit_writable_event(previous_limit, current_limit);
}
}
@ -531,7 +527,10 @@ impl Streams {
}
pub fn handle_max_streams(&mut self, stream_type: StreamType, maximum_streams: u64) {
if self.local_stream_limits[stream_type].update(maximum_streams) {
let increased = self.local_stream_limits[stream_type]
.update(maximum_streams)
.is_some();
if increased {
self.events.send_stream_creatable(stream_type);
}
}

Просмотреть файл

@ -22,6 +22,7 @@ use neqo_crypto::{
use crate::{
cid::{ConnectionId, ConnectionIdEntry, CONNECTION_ID_SEQNO_PREFERRED, MAX_CONNECTION_ID_LEN},
packet::MIN_INITIAL_PACKET_SIZE,
version::{Version, VersionConfig, WireVersion},
Error, Res,
};
@ -278,7 +279,7 @@ impl TransportParameter {
},
MAX_UDP_PAYLOAD_SIZE => match d.decode_varint() {
Some(v) if v >= 1200 => Self::Integer(v),
Some(v) if v >= MIN_INITIAL_PACKET_SIZE.try_into()? => Self::Integer(v),
_ => return Err(Error::TransportParameterError),
},

Просмотреть файл

@ -133,117 +133,6 @@ impl std::fmt::Debug for PacketNumberSpaceSet {
}
}
#[derive(Debug, Clone)]
pub struct SentPacket {
pub pt: PacketType,
pub pn: PacketNumber,
pub ecn_mark: IpTosEcn,
ack_eliciting: bool,
pub time_sent: Instant,
primary_path: bool,
pub tokens: Vec<RecoveryToken>,
time_declared_lost: Option<Instant>,
/// After a PTO, this is true when the packet has been released.
pto: bool,
pub size: usize,
}
impl SentPacket {
pub fn new(
pt: PacketType,
pn: PacketNumber,
ecn_mark: IpTosEcn,
time_sent: Instant,
ack_eliciting: bool,
tokens: Vec<RecoveryToken>,
size: usize,
) -> Self {
Self {
pt,
pn,
ecn_mark,
time_sent,
ack_eliciting,
primary_path: true,
tokens,
time_declared_lost: None,
pto: false,
size,
}
}
/// Returns `true` if the packet will elicit an ACK.
pub fn ack_eliciting(&self) -> bool {
self.ack_eliciting
}
/// Returns `true` if the packet was sent on the primary path.
pub fn on_primary_path(&self) -> bool {
self.primary_path
}
/// Clears the flag that had this packet on the primary path.
/// Used when migrating to clear out state.
pub fn clear_primary_path(&mut self) {
self.primary_path = false;
}
/// Whether the packet has been declared lost.
pub fn lost(&self) -> bool {
self.time_declared_lost.is_some()
}
/// Whether accounting for the loss or acknowledgement in the
/// congestion controller is pending.
/// Returns `true` if the packet counts as being "in flight",
/// and has not previously been declared lost.
/// Note that this should count packets that contain only ACK and PADDING,
/// but we don't send PADDING, so we don't track that.
pub fn cc_outstanding(&self) -> bool {
self.ack_eliciting() && self.on_primary_path() && !self.lost()
}
/// Whether the packet should be tracked as in-flight.
pub fn cc_in_flight(&self) -> bool {
self.ack_eliciting() && self.on_primary_path()
}
/// Declare the packet as lost. Returns `true` if this is the first time.
pub fn declare_lost(&mut self, now: Instant) -> bool {
if self.lost() {
false
} else {
self.time_declared_lost = Some(now);
true
}
}
/// Ask whether this tracked packet has been declared lost for long enough
/// that it can be expired and no longer tracked.
pub fn expired(&self, now: Instant, expiration_period: Duration) -> bool {
self.time_declared_lost
.map_or(false, |loss_time| (loss_time + expiration_period) <= now)
}
/// Whether the packet contents were cleared out after a PTO.
pub fn pto_fired(&self) -> bool {
self.pto
}
/// On PTO, we need to get the recovery tokens so that we can ensure that
/// the frames we sent can be sent again in the PTO packet(s). Do that just once.
pub fn pto(&mut self) -> bool {
if self.pto || self.lost() {
false
} else {
self.pto = true;
true
}
}
}
impl std::fmt::Display for PacketNumberSpace {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.write_str(match self {

Просмотреть файл

@ -7,7 +7,9 @@
mod common;
use neqo_common::{Datagram, Decoder, Encoder, Role};
use neqo_transport::{CloseReason, ConnectionParameters, Error, State, Version};
use neqo_transport::{
CloseReason, ConnectionParameters, Error, State, Version, MIN_INITIAL_PACKET_SIZE,
};
use test_fixture::{
default_client, default_server,
header_protection::{
@ -104,7 +106,7 @@ fn reorder_server_initial() {
// And rebuild a packet.
let mut packet = header.clone();
packet.resize(1200, 0);
packet.resize(MIN_INITIAL_PACKET_SIZE, 0);
aead.encrypt(pn, &header, &plaintext, &mut packet[header.len()..])
.unwrap();
apply_header_protection(&hp, &mut packet, protected_header.len()..header.len());
@ -237,7 +239,7 @@ fn overflow_crypto() {
let plen = payload.len();
payload.pad_to(plen + 1000, 44);
let mut packet = Encoder::with_capacity(1200);
let mut packet = Encoder::with_capacity(MIN_INITIAL_PACKET_SIZE);
packet
.encode_byte(0xc1) // Initial with packet number length of 2.
.encode_uint(4, Version::Version1.wire_version())
@ -254,7 +256,7 @@ fn overflow_crypto() {
aead.encrypt(pn, &header, payload.as_ref(), &mut packet[header.len()..])
.unwrap();
apply_header_protection(&hp, &mut packet, pn_offset..(pn_offset + 2));
packet.resize(1200, 0); // Initial has to be 1200 bytes!
packet.resize(MIN_INITIAL_PACKET_SIZE, 0); // Initial has to be MIN_INITIAL_PACKET_SIZE bytes!
let dgram = Datagram::new(
server_initial.source(),

Просмотреть файл

@ -17,7 +17,9 @@ use std::{
use common::{connected_server, default_server, generate_ticket};
use neqo_common::{hex_with_len, qdebug, qtrace, Datagram, Encoder, Role};
use neqo_crypto::AuthenticationStatus;
use neqo_transport::{server::ValidateAddress, CloseReason, Error, State, StreamType};
use neqo_transport::{
server::ValidateAddress, CloseReason, Error, State, StreamType, MIN_INITIAL_PACKET_SIZE,
};
use test_fixture::{
assertions, datagram, default_client,
header_protection::{
@ -331,7 +333,7 @@ fn retry_after_pto() {
// Let PTO fire on the client and then let it exhaust its PTO packets.
now += Duration::from_secs(1);
let pto = client.process(None, now).dgram();
assert!(pto.unwrap().len() >= 1200);
assert!(pto.unwrap().len() >= MIN_INITIAL_PACKET_SIZE);
let cb = client.process(None, now).callback();
assert_ne!(cb, Duration::new(0, 0));
@ -339,7 +341,7 @@ fn retry_after_pto() {
assertions::assert_retry(retry.as_ref().unwrap());
let ci2 = client.process(retry.as_ref(), now).dgram();
assert!(ci2.unwrap().len() >= 1200);
assert!(ci2.unwrap().len() >= MIN_INITIAL_PACKET_SIZE);
}
#[test]
@ -430,11 +432,11 @@ fn mitm_retry() {
qtrace!("notoken_header={}", hex_with_len(&notoken_header));
// Encrypt.
let mut notoken_packet = Encoder::with_capacity(1200)
let mut notoken_packet = Encoder::with_capacity(MIN_INITIAL_PACKET_SIZE)
.encode(&notoken_header)
.as_ref()
.to_vec();
notoken_packet.resize_with(1200, u8::default);
notoken_packet.resize_with(MIN_INITIAL_PACKET_SIZE, u8::default);
aead.encrypt(
pn,
&notoken_header,
@ -443,7 +445,7 @@ fn mitm_retry() {
)
.unwrap();
// Unlike with decryption, don't truncate.
// All 1200 bytes are needed to reach the minimum datagram size.
// All MIN_INITIAL_PACKET_SIZE bytes are needed to reach the minimum datagram size.
apply_header_protection(&hp, &mut notoken_packet, pn_offset..(pn_offset + pn_len));
qtrace!("packet={}", hex_with_len(&notoken_packet));

Просмотреть файл

@ -16,6 +16,7 @@ use neqo_crypto::{
use neqo_transport::{
server::{ActiveConnectionRef, Server, ValidateAddress},
CloseReason, Connection, ConnectionParameters, Error, Output, State, StreamType, Version,
MIN_INITIAL_PACKET_SIZE,
};
use test_fixture::{
assertions, datagram, default_client,
@ -227,14 +228,14 @@ fn drop_non_initial() {
let mut server = default_server();
// This is big enough to look like an Initial, but it uses the Retry type.
let mut header = neqo_common::Encoder::with_capacity(1200);
let mut header = neqo_common::Encoder::with_capacity(MIN_INITIAL_PACKET_SIZE);
header
.encode_byte(0xfa)
.encode_uint(4, Version::default().wire_version())
.encode_vec(1, CID)
.encode_vec(1, CID);
let mut bogus_data: Vec<u8> = header.into();
bogus_data.resize(1200, 66);
bogus_data.resize(MIN_INITIAL_PACKET_SIZE, 66);
let bogus = datagram(bogus_data);
assert!(server.process(Some(&bogus), now()).dgram().is_none());
@ -425,8 +426,8 @@ fn bad_client_initial() {
)
.unwrap();
assert_eq!(header_enc.len() + v.len(), ciphertext.len());
// Pad with zero to get up to 1200.
ciphertext.resize(1200, 0);
// Pad with zero to get up to MIN_INITIAL_PACKET_SIZE.
ciphertext.resize(MIN_INITIAL_PACKET_SIZE, 0);
apply_header_protection(
&hp,
@ -488,7 +489,7 @@ fn bad_client_initial_connection_close() {
let (aead, hp) = initial_aead_and_hp(d_cid, Role::Client);
let (_, pn) = remove_header_protection(&hp, header, payload);
let mut payload_enc = Encoder::with_capacity(1200);
let mut payload_enc = Encoder::with_capacity(MIN_INITIAL_PACKET_SIZE);
payload_enc.encode(&[0x1c, 0x01, 0x00, 0x00]); // Add a CONNECTION_CLOSE frame.
// Make a new header with a 1 byte packet number length.
@ -513,8 +514,8 @@ fn bad_client_initial_connection_close() {
)
.unwrap();
assert_eq!(header_enc.len() + v.len(), ciphertext.len());
// Pad with zero to get up to 1200.
ciphertext.resize(1200, 0);
// Pad with zero to get up to MIN_INITIAL_PACKET_SIZE.
ciphertext.resize(MIN_INITIAL_PACKET_SIZE, 0);
apply_header_protection(
&hp,