зеркало из https://github.com/mozilla/gecko-dev.git
Bug 1633006
- Update to Neqo 0.4.0 r=dragana,necko-reviewers
Document qlog uses 2 clause bsd license. Adapt to API changes in glue and http3server Differential Revision: https://phabricator.services.mozilla.com/D77903
This commit is contained in:
Родитель
214d31aea1
Коммит
95b90fb23e
|
@ -15,7 +15,7 @@ rev = "e3c3388e6632cf55e08d773b32e58b1cab9b2731"
|
|||
[source."https://github.com/mozilla/neqo"]
|
||||
git = "https://github.com/mozilla/neqo"
|
||||
replace-with = "vendored-sources"
|
||||
tag = "v0.2.4"
|
||||
tag = "v0.4.0"
|
||||
|
||||
[source."https://github.com/mozilla/mp4parse-rust"]
|
||||
git = "https://github.com/mozilla/mp4parse-rust"
|
||||
|
|
|
@ -2199,6 +2199,7 @@ dependencies = [
|
|||
"neqo-common",
|
||||
"neqo-crypto",
|
||||
"neqo-http3",
|
||||
"neqo-qpack",
|
||||
"neqo-transport",
|
||||
]
|
||||
|
||||
|
@ -3088,19 +3089,21 @@ checksum = "a2983372caf4480544083767bf2d27defafe32af49ab4df3a0b7fc90793a3664"
|
|||
|
||||
[[package]]
|
||||
name = "neqo-common"
|
||||
version = "0.2.4"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.2.4#6fca78d008168e6fdf4c9d3a76b49acab07522a7"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.4.0#f35a3b7800a7062c8b34cae1bdcbf64733935149"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"env_logger",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"num-traits",
|
||||
"qlog",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "neqo-crypto"
|
||||
version = "0.2.4"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.2.4#6fca78d008168e6fdf4c9d3a76b49acab07522a7"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.4.0#f35a3b7800a7062c8b34cae1bdcbf64733935149"
|
||||
dependencies = [
|
||||
"bindgen",
|
||||
"log",
|
||||
|
@ -3112,8 +3115,8 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "neqo-http3"
|
||||
version = "0.2.4"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.2.4#6fca78d008168e6fdf4c9d3a76b49acab07522a7"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.4.0#f35a3b7800a7062c8b34cae1bdcbf64733935149"
|
||||
dependencies = [
|
||||
"log",
|
||||
"neqo-common",
|
||||
|
@ -3126,26 +3129,29 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "neqo-qpack"
|
||||
version = "0.2.4"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.2.4#6fca78d008168e6fdf4c9d3a76b49acab07522a7"
|
||||
dependencies = [
|
||||
"log",
|
||||
"neqo-common",
|
||||
"neqo-crypto",
|
||||
"neqo-transport",
|
||||
"num-traits",
|
||||
"static_assertions",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "neqo-transport"
|
||||
version = "0.2.4"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.2.4#6fca78d008168e6fdf4c9d3a76b49acab07522a7"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.4.0#f35a3b7800a7062c8b34cae1bdcbf64733935149"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"log",
|
||||
"neqo-common",
|
||||
"neqo-crypto",
|
||||
"neqo-transport",
|
||||
"num-traits",
|
||||
"qlog",
|
||||
"static_assertions",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "neqo-transport"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/mozilla/neqo?tag=v0.4.0#f35a3b7800a7062c8b34cae1bdcbf64733935149"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"log",
|
||||
"neqo-common",
|
||||
"neqo-crypto",
|
||||
"qlog",
|
||||
"smallvec",
|
||||
]
|
||||
|
||||
|
@ -3156,6 +3162,7 @@ dependencies = [
|
|||
"neqo-common",
|
||||
"neqo-crypto",
|
||||
"neqo-http3",
|
||||
"neqo-qpack",
|
||||
"neqo-transport",
|
||||
"nserror",
|
||||
"nsstring",
|
||||
|
@ -3776,6 +3783,18 @@ dependencies = [
|
|||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "qlog"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3a1fdb4ef259b1b970958af7fbdedc4f107c872a9f856e01f3427c0bea44095f"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quick-error"
|
||||
version = "1.2.1"
|
||||
|
@ -4313,6 +4332,27 @@ dependencies = [
|
|||
"url",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_with"
|
||||
version = "1.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "89d3d595d64120bbbc70b7f6d5ae63298b62a3d9f373ec2f56acf5365ca8a444"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_with_macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_with_macros"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4070d2c9b9d258465ad1d82aabb985b84cd9a3afa94da25ece5a9938ba5f1606"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_yaml"
|
||||
version = "0.8.9"
|
||||
|
|
|
@ -210,10 +210,8 @@ nsresult Http3Session::ProcessInput(uint32_t* aCountRead) {
|
|||
*aCountRead += read;
|
||||
}
|
||||
} while (NS_SUCCEEDED(rv));
|
||||
// Call ProcessHttp3 if there has not been any socket error.
|
||||
// NS_BASE_STREAM_WOULD_BLOCK means that there is no more date to read.
|
||||
if (rv == NS_BASE_STREAM_WOULD_BLOCK) {
|
||||
mHttp3Connection->ProcessHttp3();
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
@ -292,21 +290,51 @@ nsresult Http3Session::ProcessEvents(uint32_t count) {
|
|||
MOZ_ASSERT(OnSocketThread(), "not on socket thread");
|
||||
|
||||
LOG(("Http3Session::ProcessEvents [this=%p]", this));
|
||||
Http3Event event = mHttp3Connection->GetEvent();
|
||||
|
||||
nsTArray<uint8_t> headerBytes;
|
||||
Http3Event event;
|
||||
event.tag = Http3Event::Tag::NoEvent;
|
||||
bool fin = false;
|
||||
|
||||
nsresult rv = mHttp3Connection->GetEvent(&event, headerBytes, &fin);
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG(("Http3Session::ProcessEvents [this=%p] rv=%" PRIx32, this,
|
||||
static_cast<uint32_t>(rv)));
|
||||
return rv;
|
||||
}
|
||||
|
||||
while (event.tag != Http3Event::Tag::NoEvent) {
|
||||
switch (event.tag) {
|
||||
case Http3Event::Tag::HeaderReady:
|
||||
case Http3Event::Tag::HeaderReady: {
|
||||
MOZ_ASSERT(mState == CONNECTED);
|
||||
LOG(("Http3Session::ProcessEvents - HeaderReady"));
|
||||
uint64_t id = event.header_ready.stream_id;
|
||||
|
||||
RefPtr<Http3Stream> stream = mStreamIdHash.Get(id);
|
||||
if (!stream) {
|
||||
LOG(
|
||||
("Http3Session::ProcessEvents - HeaderReady - stream not found "
|
||||
"stream_id=0x%" PRIx64 " [this=%p].",
|
||||
id, this));
|
||||
continue;
|
||||
}
|
||||
|
||||
stream->SetResponseHeaders(headerBytes, fin);
|
||||
|
||||
uint32_t read = 0;
|
||||
rv = ProcessTransactionRead(stream, count, &read);
|
||||
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG(("Http3Session::ProcessEvents [this=%p] rv=%" PRIx32, this,
|
||||
static_cast<uint32_t>(rv)));
|
||||
return rv;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Http3Event::Tag::DataReadable: {
|
||||
MOZ_ASSERT(mState == CONNECTED);
|
||||
uint64_t id;
|
||||
if (event.tag == Http3Event::Tag::HeaderReady) {
|
||||
LOG(("Http3Session::ProcessEvents - HeaderReady"));
|
||||
id = event.header_ready.stream_id;
|
||||
} else {
|
||||
LOG(("Http3Session::ProcessEvents - DataReadable"));
|
||||
id = event.data_readable.stream_id;
|
||||
}
|
||||
LOG(("Http3Session::ProcessEvents - DataReadable"));
|
||||
uint64_t id = event.data_readable.stream_id;
|
||||
|
||||
uint32_t read = 0;
|
||||
nsresult rv = ProcessTransactionRead(id, count, &read);
|
||||
|
@ -386,11 +414,16 @@ nsresult Http3Session::ProcessEvents(uint32_t count) {
|
|||
default:
|
||||
break;
|
||||
}
|
||||
event = mHttp3Connection->GetEvent();
|
||||
rv = mHttp3Connection->GetEvent(&event, headerBytes, &fin);
|
||||
if (NS_FAILED(rv)) {
|
||||
LOG(("Http3Session::ProcessEvents [this=%p] rv=%" PRIx32, this,
|
||||
static_cast<uint32_t>(rv)));
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
} // namespace net
|
||||
|
||||
// This function may return a socket error.
|
||||
// It will not return an error if socket error is
|
||||
|
@ -410,7 +443,6 @@ nsresult Http3Session::ProcessOutput() {
|
|||
mSegmentReaderWriter.get(), this));
|
||||
|
||||
// Process neqo.
|
||||
mHttp3Connection->ProcessHttp3();
|
||||
uint64_t timeout = mHttp3Connection->ProcessOutput();
|
||||
|
||||
// Check if we have a packet that could not have been sent in a previous
|
||||
|
@ -454,12 +486,10 @@ nsresult Http3Session::ProcessOutput() {
|
|||
// properly and close the connection.
|
||||
nsresult Http3Session::ProcessOutputAndEvents() {
|
||||
MOZ_ASSERT(OnSocketThread(), "not on socket thread");
|
||||
mHttp3Connection->ProcessTimer();
|
||||
nsresult rv = ProcessOutput();
|
||||
if (NS_FAILED(rv)) {
|
||||
return rv;
|
||||
}
|
||||
mHttp3Connection->ProcessHttp3();
|
||||
return ProcessEvents(nsIOService::gDefaultSegmentSize);
|
||||
}
|
||||
|
||||
|
@ -616,7 +646,6 @@ nsresult Http3Session::TryActivating(
|
|||
|
||||
MOZ_ASSERT(*aStreamId != UINT64_MAX);
|
||||
mStreamIdHash.Put(*aStreamId, RefPtr{aStream});
|
||||
mHttp3Connection->ProcessHttp3();
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
|
@ -1096,15 +1125,6 @@ nsresult Http3Session::OnWriteSegment(char* buf, uint32_t count,
|
|||
return NS_OK;
|
||||
}
|
||||
|
||||
// This is called by Http3Stream::OnWriteSegment.
|
||||
nsresult Http3Session::ReadResponseHeaders(uint64_t aStreamId,
|
||||
nsTArray<uint8_t>& aResponseHeaders,
|
||||
bool* aFin) {
|
||||
MOZ_ASSERT(OnSocketThread(), "not on socket thread");
|
||||
return mHttp3Connection->ReadResponseHeaders(aStreamId, aResponseHeaders,
|
||||
aFin);
|
||||
}
|
||||
|
||||
// This is called by Http3Stream::OnWriteSegment.
|
||||
nsresult Http3Session::ReadResponseData(uint64_t aStreamId, char* aBuf,
|
||||
uint32_t aCount,
|
||||
|
|
|
@ -238,6 +238,14 @@ nsresult Http3Stream::OnReadSegment(const char* buf, uint32_t count,
|
|||
return NS_OK;
|
||||
}
|
||||
|
||||
void Http3Stream::SetResponseHeaders(nsTArray<uint8_t>& aResponseHeaders,
|
||||
bool aFin) {
|
||||
MOZ_ASSERT(mFlatResponseHeaders.IsEmpty(),
|
||||
"Cannot set response headers more than once");
|
||||
mFlatResponseHeaders.SwapElements(aResponseHeaders);
|
||||
mFin = aFin;
|
||||
}
|
||||
|
||||
nsresult Http3Stream::OnWriteSegment(char* buf, uint32_t count,
|
||||
uint32_t* countWritten) {
|
||||
MOZ_ASSERT(OnSocketThread(), "not on socket thread");
|
||||
|
@ -250,15 +258,8 @@ nsresult Http3Stream::OnWriteSegment(char* buf, uint32_t count,
|
|||
case EARLY_RESPONSE:
|
||||
break;
|
||||
case READING_HEADERS: {
|
||||
if (mFlatResponseHeaders.IsEmpty()) {
|
||||
nsresult rv = mSession->ReadResponseHeaders(
|
||||
mStreamId, mFlatResponseHeaders, &mFin);
|
||||
if (NS_FAILED(rv) && (rv != NS_BASE_STREAM_WOULD_BLOCK)) {
|
||||
return rv;
|
||||
}
|
||||
LOG(("Http3Stream::OnWriteSegment [this=%p, read %u bytes of headers",
|
||||
this, (uint32_t)mFlatResponseHeaders.Length()));
|
||||
}
|
||||
// SetResponseHeaders should have been previously called.
|
||||
MOZ_ASSERT(!mFlatResponseHeaders.IsEmpty(), "Headers empty!");
|
||||
*countWritten = (mFlatResponseHeaders.Length() > count)
|
||||
? count
|
||||
: mFlatResponseHeaders.Length();
|
||||
|
|
|
@ -53,6 +53,8 @@ class Http3Stream final : public nsAHttpSegmentReader,
|
|||
bool RecvdReset() const { return mState == RECEIVED_RESET; }
|
||||
void SetRecvdReset() { mState = RECEIVED_RESET; }
|
||||
|
||||
void SetResponseHeaders(nsTArray<uint8_t>& aResponseHeaders, bool fin);
|
||||
|
||||
private:
|
||||
~Http3Stream() = default;
|
||||
|
||||
|
|
|
@ -8,16 +8,17 @@ edition = "2018"
|
|||
name = "neqo_glue"
|
||||
|
||||
[dependencies]
|
||||
neqo-http3 = { tag = "v0.2.4", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-transport = { tag = "v0.2.4", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-common = { tag = "v0.2.4", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-http3 = { tag = "v0.4.0", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-transport = { tag = "v0.4.0", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-common = { tag = "v0.4.0", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-qpack = { tag = "v0.4.0", git = "https://github.com/mozilla/neqo" }
|
||||
nserror = { path = "../../../xpcom/rust/nserror" }
|
||||
nsstring = { path = "../../../xpcom/rust/nsstring" }
|
||||
xpcom = { path = "../../../xpcom/rust/xpcom" }
|
||||
thin-vec = { version = "0.1.0", features = ["gecko-ffi"] }
|
||||
|
||||
[dependencies.neqo-crypto]
|
||||
tag = "v0.2.4"
|
||||
tag = "v0.4.0"
|
||||
git = "https://github.com/mozilla/neqo"
|
||||
default-features = false
|
||||
features = ["gecko"]
|
||||
|
|
|
@ -39,12 +39,8 @@ class NeqoHttp3Conn final {
|
|||
neqo_http3conn_process_input(this, aPacket, aLen);
|
||||
}
|
||||
|
||||
void ProcessHttp3() { neqo_http3conn_process_http3(this); }
|
||||
|
||||
uint64_t ProcessOutput() { return neqo_http3conn_process_output(this); }
|
||||
|
||||
void ProcessTimer() { neqo_http3conn_process_timer(this); }
|
||||
|
||||
bool HasDataToSend() { return neqo_http3conn_has_data_to_send(this); }
|
||||
|
||||
nsresult GetDataToSend(nsTArray<uint8_t>& aData) {
|
||||
|
@ -52,7 +48,10 @@ class NeqoHttp3Conn final {
|
|||
return neqo_http3conn_get_data_to_send(this, &aData);
|
||||
}
|
||||
|
||||
Http3Event GetEvent() { return neqo_http3conn_event(this); }
|
||||
nsresult GetEvent(Http3Event* aEvent, nsTArray<uint8_t>& aHeaderData,
|
||||
bool* aFin) {
|
||||
return neqo_http3conn_event(this, aEvent, &aHeaderData, aFin);
|
||||
}
|
||||
|
||||
nsresult Fetch(const nsACString& aMethod, const nsACString& aScheme,
|
||||
const nsACString& aHost, const nsACString& aPath,
|
||||
|
@ -72,12 +71,6 @@ class NeqoHttp3Conn final {
|
|||
return neqo_http3conn_close_stream(this, aStreamId);
|
||||
}
|
||||
|
||||
nsresult ReadResponseHeaders(uint64_t aStreamId, nsTArray<uint8_t>& aHeaders,
|
||||
bool* fin) {
|
||||
return neqo_http3conn_read_response_headers(this, aStreamId, &aHeaders,
|
||||
fin);
|
||||
}
|
||||
|
||||
nsresult ReadResponseData(uint64_t aStreamId, uint8_t* aBuf, uint32_t aLen,
|
||||
uint32_t* aRead, bool* aFin) {
|
||||
return neqo_http3conn_read_response_data(this, aStreamId, aBuf, aLen, aRead,
|
||||
|
|
|
@ -2,10 +2,11 @@
|
|||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at https://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use neqo_common::Datagram;
|
||||
use neqo_common::{matches, Datagram};
|
||||
use neqo_crypto::{init, PRErrorCode};
|
||||
use neqo_http3::Error as Http3Error;
|
||||
use neqo_http3::{Http3Client, Http3ClientEvent, Http3State};
|
||||
use neqo_qpack::QpackSettings;
|
||||
use neqo_transport::Error as TransportError;
|
||||
use neqo_transport::{FixedConnectionIdManager, Output};
|
||||
use nserror::*;
|
||||
|
@ -62,14 +63,19 @@ impl NeqoHttp3Conn {
|
|||
Err(_) => return Err(NS_ERROR_INVALID_ARG),
|
||||
};
|
||||
|
||||
let qpack_settings = QpackSettings {
|
||||
max_table_size_encoder: max_table_size,
|
||||
max_table_size_decoder: max_table_size,
|
||||
max_blocked_streams,
|
||||
};
|
||||
|
||||
let conn = match Http3Client::new(
|
||||
origin_conv,
|
||||
&[alpn_conv],
|
||||
Rc::new(RefCell::new(FixedConnectionIdManager::new(3))),
|
||||
local,
|
||||
remote,
|
||||
max_table_size,
|
||||
max_blocked_streams,
|
||||
qpack_settings,
|
||||
) {
|
||||
Ok(c) => c,
|
||||
Err(_) => return Err(NS_ERROR_INVALID_ARG),
|
||||
|
@ -155,11 +161,6 @@ pub extern "C" fn neqo_http3conn_process_input(
|
|||
);
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn neqo_http3conn_process_http3(conn: &mut NeqoHttp3Conn) {
|
||||
conn.conn.process_http3(Instant::now());
|
||||
}
|
||||
|
||||
/* Process output and store data to be sent into conn.packets_to_send.
|
||||
* neqo_http3conn_get_data_to_send will be called to pick up this data.
|
||||
*/
|
||||
|
@ -180,11 +181,6 @@ pub extern "C" fn neqo_http3conn_process_output(conn: &mut NeqoHttp3Conn) -> u64
|
|||
}
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn neqo_http3conn_process_timer(conn: &mut NeqoHttp3Conn) {
|
||||
conn.conn.process_timer(Instant::now());
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn neqo_http3conn_has_data_to_send(conn: &mut NeqoHttp3Conn) -> bool {
|
||||
!conn.packets_to_send.is_empty()
|
||||
|
@ -415,29 +411,50 @@ pub enum Http3Event {
|
|||
}
|
||||
|
||||
#[no_mangle]
|
||||
pub extern "C" fn neqo_http3conn_event(conn: &mut NeqoHttp3Conn) -> Http3Event {
|
||||
loop {
|
||||
match conn.conn.next_event() {
|
||||
None => break Http3Event::NoEvent,
|
||||
Some(e) => {
|
||||
let fe: Http3Event = e.into();
|
||||
match fe {
|
||||
Http3Event::NoEvent => {}
|
||||
_ => break fe,
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Http3ClientEvent> for Http3Event {
|
||||
fn from(event: Http3ClientEvent) -> Self {
|
||||
match event {
|
||||
pub extern "C" fn neqo_http3conn_event(
|
||||
conn: &mut NeqoHttp3Conn,
|
||||
ret_event: &mut Http3Event,
|
||||
ret_headers: &mut ThinVec<u8>,
|
||||
ret_fin: &mut bool,
|
||||
) -> nsresult {
|
||||
while let Some(evt) = conn.conn.next_event() {
|
||||
let fe = match evt {
|
||||
Http3ClientEvent::DataWritable { stream_id } => Http3Event::DataWritable { stream_id },
|
||||
Http3ClientEvent::StopSending { stream_id, .. } => {
|
||||
Http3Event::StopSending { stream_id }
|
||||
}
|
||||
Http3ClientEvent::HeaderReady { stream_id } => Http3Event::HeaderReady { stream_id },
|
||||
Http3ClientEvent::HeaderReady {
|
||||
stream_id,
|
||||
headers,
|
||||
fin,
|
||||
} => {
|
||||
if let Some(headers) = headers {
|
||||
if headers.iter().filter(|(k, _)| k == ":status").count() != 1 {
|
||||
return NS_ERROR_ILLEGAL_VALUE;
|
||||
}
|
||||
|
||||
let (_, status_val) = headers
|
||||
.iter()
|
||||
.find(|(k, _)| k == ":status")
|
||||
.expect("must be one");
|
||||
|
||||
ret_headers.extend_from_slice(b"HTTP/3 ");
|
||||
ret_headers.extend_from_slice(status_val.as_bytes());
|
||||
ret_headers.extend_from_slice(b"\r\n");
|
||||
|
||||
for (key, value) in headers.iter().filter(|(k, _)| k != ":status") {
|
||||
ret_headers.extend_from_slice(key.as_bytes());
|
||||
ret_headers.extend_from_slice(b": ");
|
||||
ret_headers.extend_from_slice(value.as_bytes());
|
||||
ret_headers.extend_from_slice(b"\r\n");
|
||||
}
|
||||
ret_headers.extend_from_slice(b"\r\n");
|
||||
}
|
||||
|
||||
*ret_fin = fin;
|
||||
|
||||
Http3Event::HeaderReady { stream_id }
|
||||
}
|
||||
Http3ClientEvent::DataReadable { stream_id } => Http3Event::DataReadable { stream_id },
|
||||
Http3ClientEvent::Reset { stream_id, error } => Http3Event::Reset { stream_id, error },
|
||||
Http3ClientEvent::NewPushStream { stream_id } => {
|
||||
|
@ -457,42 +474,16 @@ impl From<Http3ClientEvent> for Http3Event {
|
|||
},
|
||||
_ => Http3Event::NoEvent,
|
||||
},
|
||||
};
|
||||
|
||||
if !matches!(fe, Http3Event::NoEvent) {
|
||||
*ret_event = fe;
|
||||
return NS_OK;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Read response headers.
|
||||
// Firefox needs these headers to look like http1 heeaders, so we are
|
||||
// building that here.
|
||||
#[no_mangle]
|
||||
pub extern "C" fn neqo_http3conn_read_response_headers(
|
||||
conn: &mut NeqoHttp3Conn,
|
||||
stream_id: u64,
|
||||
headers: &mut ThinVec<u8>,
|
||||
fin: &mut bool,
|
||||
) -> nsresult {
|
||||
match conn.conn.read_response_headers(stream_id) {
|
||||
Ok((h, fin_recvd)) => {
|
||||
let status_element: Vec<_> = h.iter().filter(|elem| elem.0 == ":status").collect();
|
||||
if status_element.len() != 1 {
|
||||
return NS_ERROR_ILLEGAL_VALUE;
|
||||
}
|
||||
headers.extend_from_slice("HTTP/3 ".as_bytes());
|
||||
headers.extend_from_slice(status_element[0].1.as_bytes());
|
||||
headers.extend_from_slice("\r\n".as_bytes());
|
||||
|
||||
for elem in h.iter().filter(|elem| elem.0 != ":status") {
|
||||
headers.extend_from_slice(&elem.0.as_bytes());
|
||||
headers.extend_from_slice(": ".as_bytes());
|
||||
headers.extend_from_slice(&elem.1.as_bytes());
|
||||
headers.extend_from_slice("\r\n".as_bytes());
|
||||
}
|
||||
headers.extend_from_slice("\r\n".as_bytes());
|
||||
*fin = fin_recvd;
|
||||
NS_OK
|
||||
}
|
||||
Err(_) => NS_ERROR_INVALID_ARG,
|
||||
}
|
||||
*ret_event = Http3Event::NoEvent;
|
||||
NS_OK
|
||||
}
|
||||
|
||||
// Read response data into buf.
|
||||
|
@ -517,7 +508,7 @@ pub extern "C" fn neqo_http3conn_read_response_data(
|
|||
}
|
||||
Err(Http3Error::TransportError(TransportError::InvalidStreamId))
|
||||
| Err(Http3Error::TransportError(TransportError::NoMoreData)) => NS_ERROR_INVALID_ARG,
|
||||
Err(Http3Error::HttpFrameError) => NS_ERROR_ABORT,
|
||||
Err(Http3Error::HttpFrame) => NS_ERROR_ABORT,
|
||||
Err(_) => NS_ERROR_UNEXPECTED,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,15 +5,16 @@ authors = ["Dragana Damjanovic <dragana.damjano@gmail.com>"]
|
|||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
neqo-transport = { tag = "v0.2.4", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-common = { tag = "v0.2.4", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-http3 = { tag = "v0.2.4", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-transport = { tag = "v0.4.0", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-common = { tag = "v0.4.0", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-http3 = { tag = "v0.4.0", git = "https://github.com/mozilla/neqo" }
|
||||
neqo-qpack = { tag = "v0.4.0", git = "https://github.com/mozilla/neqo" }
|
||||
mio = "0.6.17"
|
||||
mio-extras = "2.0.5"
|
||||
log = "0.4.0"
|
||||
|
||||
[dependencies.neqo-crypto]
|
||||
tag = "v0.2.4"
|
||||
tag = "v0.4.0"
|
||||
git = "https://github.com/mozilla/neqo"
|
||||
default-features = false
|
||||
features = ["gecko"]
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
use neqo_common::{qdebug, qerror, qinfo, qtrace, Datagram};
|
||||
use neqo_crypto::{init_db, AntiReplay};
|
||||
use neqo_http3::{Error, Http3Server, Http3ServerEvent};
|
||||
use neqo_qpack::QpackSettings;
|
||||
use neqo_transport::{FixedConnectionIdManager, Output};
|
||||
use std::env;
|
||||
|
||||
|
@ -48,7 +49,9 @@ fn process_events(server: &mut Http3Server) {
|
|||
),
|
||||
];
|
||||
|
||||
let path_hdr = headers.iter().find(|(k, _)| k == ":path");
|
||||
let path_hdr = headers
|
||||
.as_ref()
|
||||
.and_then(|h| h.iter().find(|(k, _)| k == ":path"));
|
||||
match path_hdr {
|
||||
Some((_, path)) if !path.is_empty() => {
|
||||
qtrace!("Serve request {}", path);
|
||||
|
@ -61,15 +64,15 @@ fn process_events(server: &mut Http3Server) {
|
|||
.stream_reset(Error::HttpVersionFallback.code())
|
||||
.unwrap();
|
||||
} else if path == "/EarlyResponse" {
|
||||
request
|
||||
.stream_reset(Error::HttpEarlyResponse.code())
|
||||
.unwrap();
|
||||
request.stream_reset(Error::HttpNoError.code()).unwrap();
|
||||
} else if path == "/RequestRejected" {
|
||||
request
|
||||
.stream_reset(Error::HttpRequestRejected.code())
|
||||
.unwrap();
|
||||
} else if path == "/.well-known/http-opportunistic" {
|
||||
let host_hdr = headers.iter().find(|(k, _)| k == ":authority");
|
||||
let host_hdr = headers
|
||||
.as_ref()
|
||||
.and_then(|h| h.iter().find(|(k, _)| k == ":authority"));
|
||||
match host_hdr {
|
||||
Some((_, host)) if !host.is_empty() => {
|
||||
let mut content = b"[\"http://".to_vec();
|
||||
|
@ -92,11 +95,13 @@ fn process_events(server: &mut Http3Server) {
|
|||
content.len().to_string(),
|
||||
),
|
||||
],
|
||||
content,
|
||||
&content,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
_ => request.set_response(&default_headers, default_ret).unwrap(),
|
||||
_ => request
|
||||
.set_response(&default_headers, &default_ret)
|
||||
.unwrap(),
|
||||
}
|
||||
} else {
|
||||
match path.trim_matches(|p| p == '/').parse::<usize>() {
|
||||
|
@ -110,17 +115,19 @@ fn process_events(server: &mut Http3Server) {
|
|||
),
|
||||
(String::from("content-length"), v.to_string()),
|
||||
],
|
||||
vec![b'a'; v],
|
||||
&vec![b'a'; v],
|
||||
)
|
||||
.unwrap(),
|
||||
Err(_) => {
|
||||
request.set_response(&default_headers, default_ret).unwrap()
|
||||
}
|
||||
Err(_) => request
|
||||
.set_response(&default_headers, &default_ret)
|
||||
.unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
request.set_response(&default_headers, default_ret).unwrap();
|
||||
request
|
||||
.set_response(&default_headers, &default_ret)
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -229,6 +236,13 @@ fn main() -> Result<(), io::Error> {
|
|||
)?;
|
||||
|
||||
let mut svr_timeout = None;
|
||||
|
||||
let qpack_settings = QpackSettings {
|
||||
max_table_size_encoder: MAX_TABLE_SIZE,
|
||||
max_table_size_decoder: MAX_TABLE_SIZE,
|
||||
max_blocked_streams: MAX_BLOCKED_STREAMS,
|
||||
};
|
||||
|
||||
let mut server = Http3Server::new(
|
||||
Instant::now(),
|
||||
&[" HTTP2 Test Cert"],
|
||||
|
@ -236,8 +250,7 @@ fn main() -> Result<(), io::Error> {
|
|||
AntiReplay::new(Instant::now(), Duration::from_secs(10), 7, 14)
|
||||
.expect("unable to setup anti-replay"),
|
||||
Rc::new(RefCell::new(FixedConnectionIdManager::new(5))),
|
||||
MAX_TABLE_SIZE,
|
||||
MAX_BLOCKED_STREAMS,
|
||||
qpack_settings,
|
||||
)
|
||||
.expect("We cannot make a server!");
|
||||
|
||||
|
|
|
@ -195,6 +195,7 @@ Please commit or stash these changes before vendoring, or re-run with `--ignore-
|
|||
'cloudabi',
|
||||
'Inflector',
|
||||
'mach',
|
||||
'qlog',
|
||||
],
|
||||
'BSD-3-Clause': [
|
||||
]
|
||||
|
|
|
@ -1 +1 @@
|
|||
{"files":{"Cargo.toml":"5014d3a1cafb9fea549a210316879e93af6b6cde4f56af8f33579f58efd688f1","src/codec.rs":"00846df0051f32ec8b75b2f8e0344422e0693acbd4151aaec31e3ae02d6e696c","src/datagram.rs":"4beb13d5ea7927df6801fbe684dc231626c1856010eaef975d866ee66e894a45","src/incrdecoder.rs":"7b7b7fba57714a3baf0fe881010a9f5a9814bf26b9283a6d56d1c44010cbd822","src/lib.rs":"c5abb57c37bdd913bfa237b274fce08c0b06fe852a6518159f60180eb478a861","src/log.rs":"943e4e332400d94805d60f965d1d0ae7aad180f6d5b50936d0bd9e085bbc1502","src/timer.rs":"ad70a231bd1f5eb4c54021ce03e4af29ff8cf5e28b0b28127bb14411c64de39c","tests/log.rs":"480b165b7907ec642c508b303d63005eee1427115d6973a349eaf6b2242ed18d"},"package":null}
|
||||
{"files":{"Cargo.toml":"1ae9f454f1303aac9492c99376ce4dea910cb22a92b87a8e67184e4e336defd5","src/codec.rs":"c33b458cf1631073587edf6b6bd1baafecc7fe9e18d3eb5c3ddc6aaa00bd70c5","src/datagram.rs":"569f8d9e34d7ee17144bf63d34136ecd9778da0d337e513f338738c50284615e","src/incrdecoder.rs":"8d44c4437461cae023448312cab4045ad6e3f0c9eb8af2383f7132be40a9d917","src/lib.rs":"ae4fd37c38e71ffe5a2df59cc4c6db102724e16fab047ddefb213706c66bade1","src/log.rs":"b8da388073f72a21128d52b0d0c963e07a3d3cf3368438ae3a50be34b8add3a4","src/qlog.rs":"4f131c7e4c2bb5862b33a0d1746348a4f63f9b57cfa7a85dce1b59f80ecfaa7b","src/timer.rs":"706333bf1b07f65df9d18904b1cb269e4b80dee93a9b239dd8cb128b293955ae","tests/log.rs":"480b165b7907ec642c508b303d63005eee1427115d6973a349eaf6b2242ed18d"},"package":null}
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "neqo-common"
|
||||
version = "0.2.4"
|
||||
version = "0.4.0"
|
||||
authors = ["Bobby Holley <bobbyholley@gmail.com>"]
|
||||
edition = "2018"
|
||||
license = "MIT/Apache-2.0"
|
||||
|
@ -10,6 +10,8 @@ num-traits = "0.2"
|
|||
log = {version = "0.4.0", default-features = false}
|
||||
env_logger = "0.6.1"
|
||||
lazy_static = "1.3.0"
|
||||
qlog = "0.2.0"
|
||||
chrono = "0.4.10"
|
||||
|
||||
[features]
|
||||
default = ["deny-warnings"]
|
||||
|
|
|
@ -8,7 +8,7 @@ use std::convert::TryFrom;
|
|||
use std::fmt::Debug;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
use crate::hex;
|
||||
use crate::hex_with_len;
|
||||
|
||||
/// Decoder is a view into a byte array that has a read offset. Use it for parsing.
|
||||
pub struct Decoder<'a> {
|
||||
|
@ -165,7 +165,7 @@ impl<'a> Deref for Decoder<'a> {
|
|||
|
||||
impl<'a> Debug for Decoder<'a> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
f.write_str(&hex(self))
|
||||
f.write_str(&hex_with_len(self))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -348,7 +348,7 @@ impl Encoder {
|
|||
|
||||
impl Debug for Encoder {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
f.write_str(&hex(self))
|
||||
f.write_str(&hex_with_len(self))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -391,7 +391,7 @@ impl DerefMut for Encoder {
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use super::{Decoder, Encoder};
|
||||
|
||||
#[test]
|
||||
fn decode() {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
use std::net::SocketAddr;
|
||||
use std::ops::Deref;
|
||||
|
||||
use crate::hex;
|
||||
use crate::hex_with_len;
|
||||
|
||||
#[derive(PartialEq, Clone)]
|
||||
pub struct Datagram {
|
||||
|
@ -51,7 +51,7 @@ impl std::fmt::Debug for Datagram {
|
|||
"Datagram {:?}->{:?}: {}",
|
||||
self.src,
|
||||
self.dst,
|
||||
hex(&self.d)
|
||||
hex_with_len(&self.d)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -224,7 +224,7 @@ impl Default for IncrementalDecoder {
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use super::{Decoder, IncrementalDecoder, IncrementalDecoderResult};
|
||||
use crate::codec::Encoder;
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -11,6 +11,7 @@ mod codec;
|
|||
mod datagram;
|
||||
mod incrdecoder;
|
||||
pub mod log;
|
||||
pub mod qlog;
|
||||
pub mod timer;
|
||||
|
||||
pub use self::codec::{Decoder, Encoder};
|
||||
|
@ -33,6 +34,15 @@ macro_rules! matches {
|
|||
|
||||
#[must_use]
|
||||
pub fn hex(buf: &[u8]) -> String {
|
||||
let mut ret = String::with_capacity(buf.len() * 2);
|
||||
for b in buf {
|
||||
ret.push_str(&format!("{:02x}", b));
|
||||
}
|
||||
ret
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn hex_with_len(buf: &[u8]) -> String {
|
||||
let mut ret = String::with_capacity(10 + buf.len() * 3);
|
||||
ret.push_str(&format!("[{}]: ", buf.len()));
|
||||
for b in buf {
|
||||
|
@ -49,3 +59,26 @@ pub const fn const_max(a: usize, b: usize) -> usize {
|
|||
pub const fn const_min(a: usize, b: usize) -> usize {
|
||||
[a, b][(a >= b) as usize]
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Copy, Clone)]
|
||||
/// Client or Server.
|
||||
pub enum Role {
|
||||
Client,
|
||||
Server,
|
||||
}
|
||||
|
||||
impl Role {
|
||||
#[must_use]
|
||||
pub fn remote(self) -> Self {
|
||||
match self {
|
||||
Self::Client => Self::Server,
|
||||
Self::Server => Self::Client,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Display for Role {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
write!(f, "{:?}", self)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,11 +4,38 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use env_logger::Builder;
|
||||
use std::io::Write;
|
||||
use std::sync::Once;
|
||||
use std::time::Instant;
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! do_log {
|
||||
(target: $target:expr, $lvl:expr, $($arg:tt)+) => ({
|
||||
let lvl = $lvl;
|
||||
if lvl <= ::log::max_level() {
|
||||
::log::__private_api_log(
|
||||
::log::__log_format_args!($($arg)+),
|
||||
lvl,
|
||||
&($target, ::log::__log_module_path!(), ::log::__log_file!(), ::log::__log_line!()),
|
||||
);
|
||||
}
|
||||
});
|
||||
($lvl:expr, $($arg:tt)+) => ($crate::do_log!(target: ::log::__log_module_path!(), $lvl, $($arg)+))
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! log_subject {
|
||||
($lvl:expr, $subject:expr) => {{
|
||||
if $lvl <= ::log::max_level() {
|
||||
format!("{}", $subject)
|
||||
} else {
|
||||
String::new()
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
use env_logger::Builder;
|
||||
|
||||
static INIT_ONCE: Once = Once::new();
|
||||
|
||||
lazy_static! {
|
||||
|
@ -30,42 +57,42 @@ pub fn init() {
|
|||
)
|
||||
});
|
||||
if let Err(e) = builder.try_init() {
|
||||
::log::log!(::log::Level::Info, "Logging initialization error {:?}", e);
|
||||
do_log!(::log::Level::Info, "Logging initialization error {:?}", e);
|
||||
} else {
|
||||
::log::log!(::log::Level::Info, "Logging initialized");
|
||||
do_log!(::log::Level::Info, "Logging initialized");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! qlog {
|
||||
macro_rules! log_invoke {
|
||||
($lvl:expr, $ctx:expr, $($arg:tt)*) => ( {
|
||||
::neqo_common::log::init();
|
||||
::log::log!($lvl, "[{}] {}", $ctx, format!($($arg)*));
|
||||
::neqo_common::do_log!($lvl, "[{}] {}", $ctx, format!($($arg)*));
|
||||
} )
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! qerror {
|
||||
([$ctx:expr], $($arg:tt)*) => (::neqo_common::qlog!(::log::Level::Error, $ctx, $($arg)*););
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(); ::log::log!(::log::Level::Error, $($arg)*); } );
|
||||
([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Error, $ctx, $($arg)*););
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Error, $($arg)*); } );
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! qwarn {
|
||||
([$ctx:expr], $($arg:tt)*) => (::neqo_common::qlog!(::log::Level::Warn, $ctx, $($arg)*););
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(); ::log::log!(::log::Level::Warn, $($arg)*); } );
|
||||
([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Warn, $ctx, $($arg)*););
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Warn, $($arg)*); } );
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! qinfo {
|
||||
([$ctx:expr], $($arg:tt)*) => (::neqo_common::qlog!(::log::Level::Info, $ctx, $($arg)*););
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(); ::log::log!(::log::Level::Info, $($arg)*); } );
|
||||
([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Info, $ctx, $($arg)*););
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Info, $($arg)*); } );
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! qdebug {
|
||||
([$ctx:expr], $($arg:tt)*) => (::neqo_common::qlog!(::log::Level::Debug, $ctx, $($arg)*););
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(); ::log::log!(::log::Level::Debug, $($arg)*); } );
|
||||
([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Debug, $ctx, $($arg)*););
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Debug, $($arg)*); } );
|
||||
}
|
||||
#[macro_export]
|
||||
macro_rules! qtrace {
|
||||
([$ctx:expr], $($arg:tt)*) => (::neqo_common::qlog!(::log::Level::Trace, $ctx, $($arg)*););
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(); ::log::log!(::log::Level::Trace, $($arg)*); } );
|
||||
([$ctx:expr], $($arg:tt)*) => (::neqo_common::log_invoke!(::log::Level::Trace, $ctx, $($arg)*););
|
||||
($($arg:tt)*) => ( { ::neqo_common::log::init(); ::neqo_common::do_log!(::log::Level::Trace, $($arg)*); } );
|
||||
}
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use std::fmt;
|
||||
use std::path::PathBuf;
|
||||
use std::time::SystemTime;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use qlog::{
|
||||
self, CommonFields, Configuration, QlogStreamer, TimeUnits, Trace, VantagePoint,
|
||||
VantagePointType,
|
||||
};
|
||||
|
||||
use crate::Role;
|
||||
#[allow(clippy::module_name_repetitions)]
|
||||
pub struct NeqoQlog {
|
||||
qlog_path: PathBuf,
|
||||
streamer: QlogStreamer,
|
||||
}
|
||||
|
||||
impl NeqoQlog {
|
||||
/// # Errors
|
||||
///
|
||||
/// Will return `qlog::Error` if cannot write to the new log.
|
||||
pub fn new(mut streamer: QlogStreamer, qlog_path: PathBuf) -> Result<Self, qlog::Error> {
|
||||
streamer.start_log()?;
|
||||
|
||||
Ok(Self {
|
||||
streamer,
|
||||
qlog_path,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn stream(&mut self) -> &mut QlogStreamer {
|
||||
&mut self.streamer
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for NeqoQlog {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "NeqoQlog writing to {}", self.qlog_path.display())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for NeqoQlog {
|
||||
fn drop(&mut self) {
|
||||
if let Err(e) = self.streamer.finish_log() {
|
||||
crate::do_log!(::log::Level::Error, "Error dropping NeqoQlog: {}", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn new_trace(role: Role) -> qlog::Trace {
|
||||
Trace {
|
||||
vantage_point: VantagePoint {
|
||||
name: Some(format!("neqo-{}", role)),
|
||||
ty: match role {
|
||||
Role::Client => VantagePointType::Client,
|
||||
Role::Server => VantagePointType::Server,
|
||||
},
|
||||
flow: None,
|
||||
},
|
||||
title: Some(format!("neqo-{} trace", role)),
|
||||
description: Some("Example qlog trace description".to_string()),
|
||||
configuration: Some(Configuration {
|
||||
time_offset: Some("0".into()),
|
||||
time_units: Some(TimeUnits::Us),
|
||||
original_uris: None,
|
||||
}),
|
||||
common_fields: Some(CommonFields {
|
||||
group_id: None,
|
||||
protocol_type: None,
|
||||
reference_time: Some({
|
||||
let system_time = SystemTime::now();
|
||||
let datetime: DateTime<Utc> = system_time.into();
|
||||
datetime.to_rfc3339()
|
||||
}),
|
||||
}),
|
||||
event_fields: vec![
|
||||
"relative_time".to_string(),
|
||||
"category".to_string(),
|
||||
"event".to_string(),
|
||||
"data".to_string(),
|
||||
],
|
||||
events: Vec::new(),
|
||||
}
|
||||
}
|
|
@ -235,7 +235,7 @@ impl<T> Timer<T> {
|
|||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use super::{Duration, Instant, Timer};
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
lazy_static! {
|
||||
|
|
|
@ -1 +1 @@
|
|||
{"files":{"Cargo.toml":"7b06f80130df7e4c49abd2efe4ddcc6bfedacb591623e9205bfd039f157ddf83","TODO":"ac0f1c2ebcca03f5b3c0cc56c5aedbb030a4b511e438bc07a57361c789f91e9f","bindings/bindings.toml":"00ff7348732c956b4f8829f00df2b18b3a7211f5fa2a4cea4ae40c0f859e5f50","bindings/mozpkix.hpp":"77072c8bb0f6eb6bfe8cbadc111dcd92e0c79936d13f2e501aae1e5d289a6675","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"4250bc8cecf44ec155b2bf4119303f3a0bbcccce461db237e3f34da1636e8511","src/aead.rs":"2013408fbcf9e93331ae14d9d6bdd096966f125b3cf48f83e671f537e89d4e77","src/agent.rs":"4c7ad4f0e64aa64e743af0b6aa8aeef3971215324def68239208a09f333582e6","src/agentio.rs":"27c33e821234134f65de74bb7215947bce995833603fa2c95e509e561ded33d6","src/auth.rs":"71ac7e297a5f872d26cf67b6bbd96e4548ea38374bdd84c1094f76a5de4ed1cb","src/cert.rs":"fd3fd2bbb38754bdcee3898549feae412943c9f719032531c1ad6e61783b5394","src/constants.rs":"e756c07525bd7c2ff271e504708f903b3ede0a3ae821446bd37701055eb11f5f","src/err.rs":"04f38831ca62d29d8aadfe9daf95fd29e68ece184e6d3e00bfb9ee1d12744033","src/exp.rs":"61586662407359c1ecb8ed4987bc3c702f26ba2e203a091a51b6d6363cbd510f","src/ext.rs":"bf7b5f23caf26ab14fba3baf0823dd093e4194f759779e4cfd608478312ed58c","src/hkdf.rs":"1bb57806bbf67af74966bb2bb724de9d6b0094c6f5cddbe12d46292d58ba1f16","src/hp.rs":"0384bc676d8cc66a2cfec7be9df176f04557e4f1424c6d19d03ba5687920ac86","src/lib.rs":"81c51aef34744748498e88ad8480883ae69c0537c2a80ae299f44f6bc237f8b0","src/once.rs":"d8b2bf7a9e3ce83bdd7f29d8f73ce7ad0268c9618ae7255028fea9f90c9c9fd6","src/p11.rs":"6e94cbb594b709c3081449bf50d9961d36648b5db95fb824779bff4f45125ad2","src/prio.rs":"bc4e97049563b136cb7b39f5171e7909d56a77ed46690aaacb781eeb4a4743e0","src/replay.rs":"9bc5826cc8be6afe787f0d403b3958245efce9bfbc7b3100734e5aec3f8b9753","src/result.rs":"cef34dfcb907723e195b56501132e4560e250b327783cb5e41201da5b63e9b5c","src/secrets.rs":"a60d6c5c949f575382b0eabaa217bb7dfed2361b2513242f62b4028cb112d0ee","src/selfencrypt.rs":"02e963e8b9ea0802f7ee64384e5ccef3e31420e75bc1aacd02270dd504ffbdb1","src/ssl.rs":"29c514427809ba3658ea0d9a4159fa6cf16809a9c19e034c4f02598f6bb42731","src/time.rs":"96d13a955a849249620b4a1dad15bb5964e89c288e6a5035b31ef8f4d35f1e0a","tests/aead.rs":"cccac271087fe37d0a890e5da04984bbfacb4bc12331473dfc189e4d6ebff5f2","tests/agent.rs":"4fa8fa803266b985e9b6329e6a218fe7bd779200b8e0cfa94f5813e0ccc10995","tests/ext.rs":"f5edc1f229703f786ec31a8035465c00275223f14a3c4abe52f3c7cf2686cc03","tests/handshake.rs":"bcc687c0e1b485658847faf28a9f5dbfdb297812bed1bd2e80593d5f9e1fee36","tests/hkdf.rs":"0e4853f629050ba4d8069be52b7a441b670d1abaf6b8cd670a8215e0b88beb37","tests/hp.rs":"e6dd3cb4bceebc6fca8f270d8302ef34e14bda6c91fc4f9342ba1681be57ee03","tests/init.rs":"55df7cb95deb629f8701b55a8bcb91e797f30fb10e847a36a0a5a4e80488b002","tests/selfencrypt.rs":"60bfe8a0729cdaa6c2171146083266fa0e625a1d98b5f8735cd22b725d32398b"},"package":null}
|
||||
{"files":{"Cargo.toml":"2123157cae77dac2ae0e3f40f08e5720a3662f7c1fdb5259cb1f8a9e72387020","TODO":"ac0f1c2ebcca03f5b3c0cc56c5aedbb030a4b511e438bc07a57361c789f91e9f","bindings/bindings.toml":"0ae7922bb20f2b8cf54b307cd642303e65b00cfbc3e681877e2a7a86f7b22530","bindings/mozpkix.hpp":"77072c8bb0f6eb6bfe8cbadc111dcd92e0c79936d13f2e501aae1e5d289a6675","bindings/nspr_err.h":"2d5205d017b536c2d838bcf9bc4ec79f96dd50e7bb9b73892328781f1ee6629d","bindings/nspr_error.h":"e41c03c77b8c22046f8618832c9569fbcc7b26d8b9bbc35eea7168f35e346889","bindings/nspr_io.h":"085b289849ef0e77f88512a27b4d9bdc28252bd4d39c6a17303204e46ef45f72","bindings/nspr_time.h":"2e637fd338a5cf0fd3fb0070a47f474a34c2a7f4447f31b6875f5a9928d0a261","bindings/nss_ciphers.h":"95ec6344a607558b3c5ba8510f463b6295f3a2fb3f538a01410531045a5f62d1","bindings/nss_init.h":"ef49045063782fb612aff459172cc6a89340f15005808608ade5320ca9974310","bindings/nss_p11.h":"0b81e64fe6db49b2ecff94edd850be111ef99ec11220e88ceb1c67be90143a78","bindings/nss_secerr.h":"713e8368bdae5159af7893cfa517dabfe5103cede051dee9c9557c850a2defc6","bindings/nss_ssl.h":"af222fb957b989e392e762fa2125c82608a0053aff4fb97e556691646c88c335","bindings/nss_sslerr.h":"24b97f092183d8486f774cdaef5030d0249221c78343570d83a4ee5b594210ae","bindings/nss_sslopt.h":"b7807eb7abdad14db6ad7bc51048a46b065a0ea65a4508c95a12ce90e59d1eea","build.rs":"979eeb1d7da5342d3a5fd0836132a37f074711a6056d0dc64a91f86717bbd501","src/aead.rs":"b0ed05d399a32e4c923d4d385e4ae72d675d24c6cc86471f59a4edbe5cb2159a","src/agent.rs":"2841e3ae0b9a86990a44a0ecaa0da061b4eea15d782bd35232367b99482fa1f7","src/agentio.rs":"2ab0ad6349276a8c94dfd85bfaedcad24c8fff464b82f7369c1bd45038f79cfd","src/auth.rs":"71ac7e297a5f872d26cf67b6bbd96e4548ea38374bdd84c1094f76a5de4ed1cb","src/cert.rs":"fd3fd2bbb38754bdcee3898549feae412943c9f719032531c1ad6e61783b5394","src/constants.rs":"c39ee506a10d685fda77c1d2ddf691b595b067b4e1044ac7a21e360119d6002b","src/err.rs":"04f38831ca62d29d8aadfe9daf95fd29e68ece184e6d3e00bfb9ee1d12744033","src/exp.rs":"61586662407359c1ecb8ed4987bc3c702f26ba2e203a091a51b6d6363cbd510f","src/ext.rs":"2a7eb6bc2992679e5c04cf5561a4ce886ecbf549454ed927b32abeb09019632d","src/hkdf.rs":"40e44f4280497ef525c2b4c465f14f06d241150851668b264ee958f74321cfbe","src/hp.rs":"7fce64e0cc3a6a7e744bc797886bcfaa39679f0a81853b2e55ea0f54fb6bf700","src/lib.rs":"5f3c4c05c0a5ecb4e4cfc6c4d242e7603566f287bdb0f0ca46f6a773aa7714e9","src/once.rs":"b9850384899a1a016e839743d3489c0d4d916e1973746ef8c89872105d7d9736","src/p11.rs":"0b62ee5938aefb82e8faee5aa14e990a00442cc9744e8ba22eda80b32030c42c","src/prio.rs":"bc4e97049563b136cb7b39f5171e7909d56a77ed46690aaacb781eeb4a4743e0","src/replay.rs":"40924865994396441a68e6009ecbdf352d6a02fdf539aa65604124e26bffb4d3","src/result.rs":"cef34dfcb907723e195b56501132e4560e250b327783cb5e41201da5b63e9b5c","src/secrets.rs":"acb5befa74e06281c6f80d7298efc58f568bb4e6d949b4225c335e3f392be741","src/selfencrypt.rs":"f8d04728353fcbbdd50ad19217c9fd34974ffa8872c0d9d5d6d896d05f04baa5","src/ssl.rs":"d64c20ed2a0b63c5fa3aee674a622408a89a764ee225098f18d0c61ce6c6df29","src/time.rs":"13231bafe24e3c24b3ca582805929cc6dac017180cff7400e062894f22df5735","tests/aead.rs":"a1d8eb69f5672e064f84dce3d214b347a396718e3de56d57ccc108ee87f1cbc1","tests/agent.rs":"89c21e97bf9c8d893380c1d4ab91f4e12526e1a51acc0f19159e643ef8da2a4f","tests/ext.rs":"5f5de777599cbe1295a4461b32c249de74666edb0a13173f76948f2939963dfd","tests/handshake.rs":"6f12fb9a02d36f64254ffe49385de69fce8bc95b73af80be011f0e065d65a5a3","tests/hkdf.rs":"539235e9dcf2a56b72961a9a04f0080409adf6bf465bfad7c30026421b2d4326","tests/hp.rs":"e52a7d2f4387f2dfe8bfe1da5867e8e0d3eb51e171c6904e18b18c4343536af8","tests/init.rs":"20aad800ac793aaf83059cf860593750509fdedeeff0c08a648e7a5cb398dae0","tests/selfencrypt.rs":"46e9a1a09c2ae577eb106d23a5cdacf762575c0dea1948aedab06ef7389ce713"},"package":null}
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "neqo-crypto"
|
||||
version = "0.2.4"
|
||||
version = "0.4.0"
|
||||
authors = ["Martin Thomson <mt@lowentropy.net>"]
|
||||
edition = "2018"
|
||||
build = "build.rs"
|
||||
|
|
|
@ -95,6 +95,7 @@ variables = [
|
|||
"SSL_RECORD_SIZE_LIMIT",
|
||||
"SSL_ENABLE_TLS13_COMPAT_MODE",
|
||||
"SSL_ENABLE_HELLO_DOWNGRADE_CHECK",
|
||||
"SSL_SUPPRESS_END_OF_EARLY_DATA",
|
||||
]
|
||||
|
||||
[nss_ciphers]
|
||||
|
|
|
@ -14,7 +14,6 @@ use std::env;
|
|||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
use toml;
|
||||
|
||||
const BINDINGS_DIR: &str = "bindings";
|
||||
const BINDINGS_CONFIG: &str = "bindings.toml";
|
||||
|
@ -94,6 +93,8 @@ fn nss_dir() -> PathBuf {
|
|||
Command::new("hg")
|
||||
.args(&[
|
||||
"clone",
|
||||
"-u",
|
||||
"NSS_3_53_RTM",
|
||||
"https://hg.mozilla.org/projects/nss",
|
||||
dir.to_str().unwrap(),
|
||||
])
|
||||
|
@ -105,13 +106,15 @@ fn nss_dir() -> PathBuf {
|
|||
Command::new("hg")
|
||||
.args(&[
|
||||
"clone",
|
||||
"-u",
|
||||
"NSPR_4_25_RTM",
|
||||
"https://hg.mozilla.org/projects/nspr",
|
||||
nspr_dir.to_str().unwrap(),
|
||||
])
|
||||
.status()
|
||||
.expect("can't clone nspr");
|
||||
}
|
||||
dir.to_path_buf()
|
||||
dir
|
||||
};
|
||||
assert!(dir.is_dir());
|
||||
// Note that this returns a relative path because UNC
|
||||
|
@ -384,7 +387,7 @@ fn main() {
|
|||
let config_file = PathBuf::from(BINDINGS_DIR).join(BINDINGS_CONFIG);
|
||||
println!("cargo:rerun-if-changed={}", config_file.to_str().unwrap());
|
||||
let config = fs::read_to_string(config_file).expect("unable to read binding configuration");
|
||||
let config: HashMap<String, Bindings> = toml::from_str(&config).unwrap();
|
||||
let config: HashMap<String, Bindings> = ::toml::from_str(&config).unwrap();
|
||||
|
||||
for (k, v) in &config {
|
||||
build_bindings(k, v, &flags[..], cfg!(feature = "gecko"));
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use crate::constants::*;
|
||||
use crate::constants::{Cipher, Version};
|
||||
use crate::err::{Error, Res};
|
||||
use crate::p11::{PK11SymKey, SymKey};
|
||||
use crate::ssl;
|
||||
|
|
|
@ -9,7 +9,9 @@ use crate::agentio::{AgentIo, METHODS};
|
|||
use crate::assert_initialized;
|
||||
use crate::auth::AuthenticationStatus;
|
||||
pub use crate::cert::CertificateInfo;
|
||||
use crate::constants::*;
|
||||
use crate::constants::{
|
||||
Alert, Cipher, Epoch, Extension, Group, SignatureScheme, Version, TLS_VERSION_1_3,
|
||||
};
|
||||
use crate::err::{is_blocked, secstatus_to_res, Error, PRErrorCode, Res};
|
||||
use crate::ext::{ExtensionHandler, ExtensionTracker};
|
||||
use crate::p11;
|
||||
|
@ -19,7 +21,7 @@ use crate::secrets::SecretHolder;
|
|||
use crate::ssl::{self, PRBool};
|
||||
use crate::time::TimeHolder;
|
||||
|
||||
use neqo_common::{matches, qdebug, qinfo, qtrace, qwarn};
|
||||
use neqo_common::{hex, matches, qdebug, qinfo, qtrace, qwarn};
|
||||
use std::cell::RefCell;
|
||||
use std::convert::TryFrom;
|
||||
use std::ffi::CString;
|
||||
|
@ -94,7 +96,7 @@ macro_rules! preinfo_arg {
|
|||
pub fn $v(&self) -> Option<$t> {
|
||||
match self.info.valuesSet & ssl::$m {
|
||||
0 => None,
|
||||
_ => Some(self.info.$f as $t)
|
||||
_ => Some($t::from(self.info.$f)),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -220,9 +222,6 @@ pub struct SecretAgent {
|
|||
|
||||
extension_handlers: Vec<ExtensionTracker>,
|
||||
inf: Option<SecretAgentInfo>,
|
||||
|
||||
/// Whether or not EndOfEarlyData should be suppressed.
|
||||
no_eoed: bool,
|
||||
}
|
||||
|
||||
impl SecretAgent {
|
||||
|
@ -242,8 +241,6 @@ impl SecretAgent {
|
|||
|
||||
extension_handlers: Vec::new(),
|
||||
inf: None,
|
||||
|
||||
no_eoed: false,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -349,10 +346,7 @@ impl SecretAgent {
|
|||
/// # Errors
|
||||
/// If the range of versions isn't supported.
|
||||
pub fn set_version_range(&mut self, min: Version, max: Version) -> Res<()> {
|
||||
let range = ssl::SSLVersionRange {
|
||||
min: min as ssl::PRUint16,
|
||||
max: max as ssl::PRUint16,
|
||||
};
|
||||
let range = ssl::SSLVersionRange { min, max };
|
||||
secstatus_to_res(unsafe { ssl::SSL_VersionRangeSet(self.fd, &range) })
|
||||
}
|
||||
|
||||
|
@ -361,18 +355,23 @@ impl SecretAgent {
|
|||
/// # Errors
|
||||
/// If NSS can't enable or disable ciphers.
|
||||
pub fn enable_ciphers(&mut self, ciphers: &[Cipher]) -> Res<()> {
|
||||
if self.state != HandshakeState::New {
|
||||
qwarn!([self], "Cannot enable ciphers in state {:?}", self.state);
|
||||
return Err(Error::InternalError);
|
||||
}
|
||||
|
||||
let all_ciphers = unsafe { ssl::SSL_GetImplementedCiphers() };
|
||||
let cipher_count = unsafe { ssl::SSL_GetNumImplementedCiphers() } as usize;
|
||||
let cipher_count = usize::from(unsafe { ssl::SSL_GetNumImplementedCiphers() });
|
||||
for i in 0..cipher_count {
|
||||
let p = all_ciphers.wrapping_add(i);
|
||||
secstatus_to_res(unsafe {
|
||||
ssl::SSL_CipherPrefSet(self.fd, i32::from(*p), false as ssl::PRBool)
|
||||
ssl::SSL_CipherPrefSet(self.fd, i32::from(*p), ssl::PRBool::from(false))
|
||||
})?;
|
||||
}
|
||||
|
||||
for c in ciphers {
|
||||
secstatus_to_res(unsafe {
|
||||
ssl::SSL_CipherPrefSet(self.fd, i32::from(*c), true as ssl::PRBool)
|
||||
ssl::SSL_CipherPrefSet(self.fd, i32::from(*c), ssl::PRBool::from(true))
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
|
@ -400,9 +399,7 @@ impl SecretAgent {
|
|||
/// # Errors
|
||||
/// Returns an error if the option or option value is invalid; i.e., never.
|
||||
pub fn set_option(&mut self, opt: ssl::Opt, value: bool) -> Res<()> {
|
||||
secstatus_to_res(unsafe {
|
||||
ssl::SSL_OptionSet(self.fd, opt.as_int(), opt.map_enabled(value))
|
||||
})
|
||||
opt.set(self.fd, value)
|
||||
}
|
||||
|
||||
/// Enable 0-RTT.
|
||||
|
@ -414,8 +411,11 @@ impl SecretAgent {
|
|||
}
|
||||
|
||||
/// Disable the `EndOfEarlyData` message.
|
||||
pub fn disable_end_of_early_data(&mut self) {
|
||||
self.no_eoed = true;
|
||||
///
|
||||
/// # Errors
|
||||
/// See `set_option`.
|
||||
pub fn disable_end_of_early_data(&mut self) -> Res<()> {
|
||||
self.set_option(ssl::Opt::SuppressEndOfEarlyData, true)
|
||||
}
|
||||
|
||||
/// `set_alpn` sets a list of preferred protocols, starting with the most preferred.
|
||||
|
@ -602,28 +602,6 @@ impl SecretAgent {
|
|||
self.capture_error(RecordList::setup(self.fd))
|
||||
}
|
||||
|
||||
fn inject_eoed(&mut self) -> Res<()> {
|
||||
// EndOfEarlyData is as follows:
|
||||
// struct {
|
||||
// HandshakeType msg_type = end_of_early_data(5);
|
||||
// uint24 length = 0;
|
||||
// };
|
||||
const END_OF_EARLY_DATA: &[u8] = &[5, 0, 0, 0];
|
||||
|
||||
if self.no_eoed {
|
||||
let mut read_epoch: u16 = 0;
|
||||
unsafe { ssl::SSL_GetCurrentEpoch(self.fd, &mut read_epoch, null_mut()) }?;
|
||||
if read_epoch == 1 {
|
||||
// It's waiting for EndOfEarlyData, so feed one in.
|
||||
// Note that this is the test that ensures that we only do this for the server.
|
||||
let eoed = Record::new(1, 22, END_OF_EARLY_DATA);
|
||||
self.capture_error(eoed.write(self.fd))?;
|
||||
self.no_eoed = false;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Drive the TLS handshake, but get the raw content of records, not
|
||||
/// protected records as bytes. This function is incompatible with
|
||||
/// `handshake()`; use either this or `handshake()` exclusively.
|
||||
|
@ -635,7 +613,7 @@ impl SecretAgent {
|
|||
/// When the handshake fails this returns an error.
|
||||
pub fn handshake_raw(&mut self, now: Instant, input: Option<Record>) -> Res<RecordList> {
|
||||
self.now.set(now)?;
|
||||
let mut records = self.setup_raw()?;
|
||||
let records = self.setup_raw()?;
|
||||
|
||||
// Fire off any authentication we might need to complete.
|
||||
if let HandshakeState::Authenticated(ref err) = self.state {
|
||||
|
@ -648,9 +626,6 @@ impl SecretAgent {
|
|||
|
||||
// Feed in any records.
|
||||
if let Some(rec) = input {
|
||||
if rec.epoch == 2 {
|
||||
self.inject_eoed()?;
|
||||
}
|
||||
self.capture_error(rec.write(self.fd))?;
|
||||
}
|
||||
|
||||
|
@ -658,10 +633,6 @@ impl SecretAgent {
|
|||
let rv = secstatus_to_res(unsafe { ssl::SSL_ForceHandshake(self.fd) });
|
||||
self.update_state(rv)?;
|
||||
|
||||
if self.no_eoed {
|
||||
records.remove_eoed();
|
||||
}
|
||||
|
||||
Ok(*Pin::into_inner(records))
|
||||
}
|
||||
|
||||
|
@ -749,7 +720,7 @@ impl Client {
|
|||
let resumption = resumption_ptr.as_mut().unwrap();
|
||||
let mut v = Vec::with_capacity(len as usize);
|
||||
v.extend_from_slice(std::slice::from_raw_parts(token, len as usize));
|
||||
qdebug!([format!("{:p}", fd)], "Got resumption token");
|
||||
qinfo!([format!("{:p}", fd)], "Got resumption token {}", hex(&v));
|
||||
*resumption = Some(v);
|
||||
ssl::SECSuccess
|
||||
}
|
||||
|
|
|
@ -4,12 +4,12 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use crate::constants::*;
|
||||
use crate::constants::{ContentType, Epoch};
|
||||
use crate::err::{nspr, Error, PR_SetError, Res};
|
||||
use crate::prio;
|
||||
use crate::ssl;
|
||||
|
||||
use neqo_common::{hex, qtrace};
|
||||
use neqo_common::{hex, hex_with_len, qtrace};
|
||||
use std::cmp::min;
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::fmt;
|
||||
|
@ -35,7 +35,7 @@ pub fn as_c_void<T: Unpin>(pin: &mut Pin<Box<T>>) -> *mut c_void {
|
|||
#[derive(Default, Debug)]
|
||||
struct RecordLength {
|
||||
epoch: Epoch,
|
||||
ct: ssl::SSLContentType::Type,
|
||||
ct: ContentType,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
|
@ -43,13 +43,13 @@ struct RecordLength {
|
|||
#[derive(Default)]
|
||||
pub struct Record {
|
||||
pub epoch: Epoch,
|
||||
pub ct: ssl::SSLContentType::Type,
|
||||
pub ct: ContentType,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Record {
|
||||
#[must_use]
|
||||
pub fn new(epoch: Epoch, ct: ssl::SSLContentType::Type, data: &[u8]) -> Self {
|
||||
pub fn new(epoch: Epoch, ct: ContentType, data: &[u8]) -> Self {
|
||||
Self {
|
||||
epoch,
|
||||
ct,
|
||||
|
@ -64,7 +64,7 @@ impl Record {
|
|||
ssl::SSL_RecordLayerData(
|
||||
fd,
|
||||
self.epoch,
|
||||
self.ct,
|
||||
ssl::SSLContentType::Type::from(self.ct),
|
||||
self.data.as_ptr(),
|
||||
c_uint::try_from(self.data.len())?,
|
||||
)
|
||||
|
@ -79,7 +79,7 @@ impl fmt::Debug for Record {
|
|||
"Record {:?}:{:?} {}",
|
||||
self.epoch,
|
||||
self.ct,
|
||||
hex(&self.data[..])
|
||||
hex_with_len(&self.data[..])
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ pub struct RecordList {
|
|||
}
|
||||
|
||||
impl RecordList {
|
||||
fn append(&mut self, epoch: Epoch, ct: ssl::SSLContentType::Type, data: &[u8]) {
|
||||
fn append(&mut self, epoch: Epoch, ct: ContentType, data: &[u8]) {
|
||||
self.records.push(Record::new(epoch, ct, data));
|
||||
}
|
||||
|
||||
|
@ -112,7 +112,7 @@ impl RecordList {
|
|||
let records = a.as_mut().unwrap();
|
||||
|
||||
let slice = std::slice::from_raw_parts(data, len as usize);
|
||||
records.append(epoch, ct, slice);
|
||||
records.append(epoch, ContentType::try_from(ct).unwrap(), slice);
|
||||
ssl::SECSuccess
|
||||
}
|
||||
|
||||
|
|
|
@ -87,6 +87,16 @@ remap_enum! {
|
|||
}
|
||||
}
|
||||
|
||||
remap_enum! {
|
||||
ContentType: u8 => ssl::SSLContentType {
|
||||
TLS_CT_CHANGE_CIPHER_SPEC = ssl_ct_change_cipher_spec,
|
||||
TLS_CT_ALERT = ssl_ct_alert,
|
||||
TLS_CT_HANDSHAKE = ssl_ct_handshake,
|
||||
TLS_CT_APPLICATION_DATA = ssl_ct_application_data,
|
||||
TLS_CT_ACK = ssl_ct_ack,
|
||||
}
|
||||
}
|
||||
|
||||
remap_enum! {
|
||||
Extension: u16 => ssl::SSLExtensionType {
|
||||
TLS_EXT_SERVER_NAME = ssl_server_name_xtn,
|
||||
|
|
|
@ -5,7 +5,9 @@
|
|||
// except according to those terms.
|
||||
|
||||
use crate::agentio::as_c_void;
|
||||
use crate::constants::*;
|
||||
use crate::constants::{
|
||||
Extension, HandshakeMessage, TLS_HS_CLIENT_HELLO, TLS_HS_ENCRYPTED_EXTENSIONS,
|
||||
};
|
||||
use crate::err::Res;
|
||||
use crate::ssl::{
|
||||
PRBool, PRFileDesc, SECFailure, SECStatus, SECSuccess, SSLAlertDescription,
|
||||
|
|
|
@ -4,7 +4,10 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use crate::constants::*;
|
||||
use crate::constants::{
|
||||
Cipher, Version, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256,
|
||||
TLS_VERSION_1_3,
|
||||
};
|
||||
use crate::err::{Error, Res};
|
||||
use crate::p11::{
|
||||
random, PK11Origin, PK11SymKey, PK11_GetInternalSlot, PK11_ImportSymKey, SECItem, SECItemType,
|
||||
|
|
|
@ -4,7 +4,9 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use crate::constants::*;
|
||||
use crate::constants::{
|
||||
Cipher, Version, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256,
|
||||
};
|
||||
use crate::err::{secstatus_to_res, Error, Res};
|
||||
use crate::p11::{
|
||||
PK11SymKey, PK11_Encrypt, PK11_GetBlockSize, PK11_GetMechanism, SECItem, SECItemType, SymKey,
|
||||
|
|
|
@ -39,13 +39,14 @@ pub use self::agent::{
|
|||
Agent, Client, HandshakeState, Record, RecordList, SecretAgent, SecretAgentInfo,
|
||||
SecretAgentPreInfo, Server, ZeroRttCheckResult, ZeroRttChecker,
|
||||
};
|
||||
pub use self::auth::AuthenticationStatus;
|
||||
pub use self::constants::*;
|
||||
pub use self::err::{Error, PRErrorCode, Res};
|
||||
pub use self::ext::{ExtensionHandler, ExtensionHandlerResult, ExtensionWriterResult};
|
||||
pub use self::p11::{random, SymKey};
|
||||
pub use self::replay::AntiReplay;
|
||||
pub use self::secrets::SecretDirection;
|
||||
pub use auth::AuthenticationStatus;
|
||||
pub use self::ssl::Opt;
|
||||
|
||||
use self::once::OnceResult;
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ impl<T> OnceResult<T> {
|
|||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use super::OnceResult;
|
||||
|
||||
static mut STATIC_ONCE_RESULT: OnceResult<u64> = OnceResult::new();
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
use crate::err::{secstatus_to_res, Error, Res};
|
||||
|
||||
use neqo_common::hex;
|
||||
use neqo_common::hex_with_len;
|
||||
|
||||
use std::convert::TryInto;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
|
@ -94,7 +94,7 @@ impl Clone for SymKey {
|
|||
impl std::fmt::Debug for SymKey {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
if let Ok(b) = self.as_bytes() {
|
||||
write!(f, "SymKey {}", hex(b))
|
||||
write!(f, "SymKey {}", hex_with_len(b))
|
||||
} else {
|
||||
write!(f, "Opaque SymKey")
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ experimental_api!(SSL_CreateAntiReplayContext(
|
|||
bits: c_uint,
|
||||
ctx: *mut *mut SSLAntiReplayContext,
|
||||
));
|
||||
experimental_api!(SSL_ReleaseAntiReplayContext(ctx: *mut SSLAntiReplayContext,));
|
||||
experimental_api!(SSL_ReleaseAntiReplayContext(ctx: *mut SSLAntiReplayContext));
|
||||
experimental_api!(SSL_SetAntiReplayContext(
|
||||
fd: *mut PRFileDesc,
|
||||
ctx: *mut SSLAntiReplayContext,
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
// except according to those terms.
|
||||
|
||||
use crate::agentio::as_c_void;
|
||||
use crate::constants::*;
|
||||
use crate::constants::Epoch;
|
||||
use crate::err::Res;
|
||||
use crate::p11::{PK11SymKey, PK11_ReferenceSymKey, SymKey};
|
||||
use crate::ssl::{PRFileDesc, SSLSecretCallback, SSLSecretDirection};
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
// except according to those terms.
|
||||
|
||||
use crate::aead::Aead;
|
||||
use crate::constants::*;
|
||||
use crate::constants::{Cipher, Version};
|
||||
use crate::err::{Error, Res};
|
||||
use crate::hkdf;
|
||||
use crate::p11::{random, SymKey};
|
||||
|
|
|
@ -7,7 +7,8 @@
|
|||
#![allow(dead_code, non_upper_case_globals, non_snake_case)]
|
||||
#![allow(clippy::cognitive_complexity, clippy::too_many_lines)]
|
||||
|
||||
use crate::constants::*;
|
||||
use crate::constants::Epoch;
|
||||
use crate::err::{secstatus_to_res, Res};
|
||||
|
||||
use std::os::raw::{c_uint, c_void};
|
||||
|
||||
|
@ -38,12 +39,13 @@ pub enum Opt {
|
|||
RecordSizeLimit,
|
||||
Tls13CompatMode,
|
||||
HelloDowngradeCheck,
|
||||
SuppressEndOfEarlyData,
|
||||
}
|
||||
|
||||
impl Opt {
|
||||
// Cast is safe here because SSLOptions are within the i32 range
|
||||
#[allow(clippy::cast_possible_wrap)]
|
||||
pub fn as_int(self) -> PRInt32 {
|
||||
pub(crate) fn as_int(self) -> PRInt32 {
|
||||
let i = match self {
|
||||
Self::Locking => SSLOption::SSL_NO_LOCKS,
|
||||
Self::Tickets => SSLOption::SSL_ENABLE_SESSION_TICKETS,
|
||||
|
@ -55,18 +57,23 @@ impl Opt {
|
|||
Self::RecordSizeLimit => SSLOption::SSL_RECORD_SIZE_LIMIT,
|
||||
Self::Tls13CompatMode => SSLOption::SSL_ENABLE_TLS13_COMPAT_MODE,
|
||||
Self::HelloDowngradeCheck => SSLOption::SSL_ENABLE_HELLO_DOWNGRADE_CHECK,
|
||||
Self::SuppressEndOfEarlyData => SSLOption::SSL_SUPPRESS_END_OF_EARLY_DATA,
|
||||
};
|
||||
i as PRInt32
|
||||
}
|
||||
|
||||
// Some options are backwards, like SSL_NO_LOCKS, so use this to manage that.
|
||||
pub fn map_enabled(self, enabled: bool) -> PRIntn {
|
||||
fn map_enabled(self, enabled: bool) -> PRIntn {
|
||||
let v = match self {
|
||||
Self::Locking => !enabled,
|
||||
_ => enabled,
|
||||
};
|
||||
PRIntn::from(v)
|
||||
}
|
||||
|
||||
pub(crate) fn set(self, fd: *mut PRFileDesc, value: bool) -> Res<()> {
|
||||
secstatus_to_res(unsafe { SSL_OptionSet(fd, self.as_int(), self.map_enabled(value)) })
|
||||
}
|
||||
}
|
||||
|
||||
experimental_api!(SSL_GetCurrentEpoch(
|
||||
|
|
|
@ -204,7 +204,10 @@ impl Default for TimeHolder {
|
|||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use super::{get_base, init, Interval, PRTime, Time};
|
||||
use crate::err::Res;
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[test]
|
||||
fn convert_stable() {
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#![warn(clippy::pedantic)]
|
||||
|
||||
use neqo_crypto::aead::Aead;
|
||||
use neqo_crypto::constants::*;
|
||||
use neqo_crypto::constants::{Cipher, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3};
|
||||
use neqo_crypto::hkdf;
|
||||
use test_fixture::fixture_init;
|
||||
|
||||
|
|
|
@ -1,12 +1,19 @@
|
|||
#![cfg_attr(feature = "deny-warnings", deny(warnings))]
|
||||
#![warn(clippy::pedantic)]
|
||||
|
||||
use neqo_crypto::*;
|
||||
use neqo_crypto::{
|
||||
AuthenticationStatus, Client, HandshakeState, SecretAgentPreInfo, Server, ZeroRttCheckResult,
|
||||
ZeroRttChecker, TLS_AES_128_GCM_SHA256, TLS_CHACHA20_POLY1305_SHA256, TLS_GRP_EC_SECP256R1,
|
||||
TLS_VERSION_1_3,
|
||||
};
|
||||
|
||||
use std::boxed::Box;
|
||||
|
||||
mod handshake;
|
||||
use crate::handshake::*;
|
||||
use crate::handshake::{
|
||||
connect, connect_fail, forward_records, resumption_setup, PermissiveZeroRttChecker, Resumption,
|
||||
ZERO_RTT_TOKEN_DATA,
|
||||
};
|
||||
use test_fixture::{fixture_init, now};
|
||||
|
||||
#[test]
|
||||
|
@ -307,7 +314,9 @@ fn zero_rtt_no_eoed() {
|
|||
.set_resumption_token(&token[..])
|
||||
.expect("should accept token");
|
||||
client.enable_0rtt().expect("should enable 0-RTT");
|
||||
client.disable_end_of_early_data();
|
||||
client
|
||||
.disable_end_of_early_data()
|
||||
.expect("should disable EOED");
|
||||
server
|
||||
.enable_0rtt(
|
||||
anti_replay.as_ref().unwrap(),
|
||||
|
@ -315,7 +324,9 @@ fn zero_rtt_no_eoed() {
|
|||
Box::new(PermissiveZeroRttChecker::default()),
|
||||
)
|
||||
.expect("should enable 0-RTT");
|
||||
server.disable_end_of_early_data();
|
||||
server
|
||||
.disable_end_of_early_data()
|
||||
.expect("should disable EOED");
|
||||
|
||||
connect(&mut client, &mut server);
|
||||
assert!(client.info().unwrap().early_data_accepted());
|
||||
|
@ -357,6 +368,7 @@ fn reject_zero_rtt() {
|
|||
|
||||
#[test]
|
||||
fn close() {
|
||||
fixture_init();
|
||||
let mut client = Client::new("server.example").expect("should create client");
|
||||
let mut server = Server::new(&["key"]).expect("should create server");
|
||||
connect(&mut client, &mut server);
|
||||
|
@ -366,6 +378,7 @@ fn close() {
|
|||
|
||||
#[test]
|
||||
fn close_client_twice() {
|
||||
fixture_init();
|
||||
let mut client = Client::new("server.example").expect("should create client");
|
||||
let mut server = Server::new(&["key"]).expect("should create server");
|
||||
connect(&mut client, &mut server);
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
#![cfg_attr(feature = "deny-warnings", deny(warnings))]
|
||||
#![warn(clippy::pedantic)]
|
||||
|
||||
use neqo_crypto::*;
|
||||
use neqo_crypto::constants::{HandshakeMessage, TLS_HS_CLIENT_HELLO, TLS_HS_ENCRYPTED_EXTENSIONS};
|
||||
use neqo_crypto::ext::{ExtensionHandler, ExtensionHandlerResult, ExtensionWriterResult};
|
||||
use neqo_crypto::{Client, Server};
|
||||
use std::cell::RefCell;
|
||||
use std::rc::Rc;
|
||||
use test_fixture::fixture_init;
|
||||
|
||||
mod handshake;
|
||||
use crate::handshake::*;
|
||||
use crate::handshake::connect;
|
||||
|
||||
struct NoopExtensionHandler;
|
||||
impl ExtensionHandler for NoopExtensionHandler {}
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
#![allow(dead_code)]
|
||||
|
||||
use neqo_common::qinfo;
|
||||
use neqo_crypto::*;
|
||||
use neqo_crypto::{
|
||||
AntiReplay, AuthenticationStatus, Client, HandshakeState, RecordList, Res, SecretAgent, Server,
|
||||
ZeroRttCheckResult, ZeroRttChecker,
|
||||
};
|
||||
use std::mem;
|
||||
use std::time::Instant;
|
||||
use test_fixture::{anti_replay, fixture_init, now};
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
#![cfg_attr(feature = "deny-warnings", deny(warnings))]
|
||||
#![warn(clippy::pedantic)]
|
||||
|
||||
use neqo_crypto::constants::*;
|
||||
use neqo_crypto::constants::{
|
||||
Cipher, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256,
|
||||
TLS_VERSION_1_3,
|
||||
};
|
||||
use neqo_crypto::{hkdf, SymKey};
|
||||
use test_fixture::fixture_init;
|
||||
|
||||
|
|
|
@ -1,7 +1,9 @@
|
|||
#![cfg_attr(feature = "deny-warnings", deny(warnings))]
|
||||
#![warn(clippy::pedantic)]
|
||||
|
||||
use neqo_crypto::constants::*;
|
||||
use neqo_crypto::constants::{
|
||||
Cipher, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_VERSION_1_3,
|
||||
};
|
||||
use neqo_crypto::hkdf;
|
||||
use neqo_crypto::hp::HpKey;
|
||||
use test_fixture::fixture_init;
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
// a different version of init_db. That causes explosions as they get
|
||||
// different versions of the Once instance they use and they initialize NSS
|
||||
// twice, probably likely in parallel. That doesn't work out well.
|
||||
use neqo_crypto::*;
|
||||
use neqo_crypto::{assert_initialized, init_db};
|
||||
|
||||
// Pull in the NSS internals so that we can ask NSS if it thinks that
|
||||
// it is properly initialized.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#![cfg_attr(feature = "deny-warnings", deny(warnings))]
|
||||
#![warn(clippy::pedantic)]
|
||||
|
||||
use neqo_crypto::constants::*;
|
||||
use neqo_crypto::constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3};
|
||||
use neqo_crypto::{init, selfencrypt::SelfEncrypt, Error};
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -1 +1 @@
|
|||
{"files":{"Cargo.toml":"852873c28cf410a18b26b7e9903bb03f10d0f16127637011350764aca717e52e","src/client_events.rs":"8e77e6e92c3d5933621f2baee3baacab230486ad8b6df1eca321ea74ed7cdcbd","src/connection.rs":"dc0736489ce1c0abf441eb6e9a10e48c0e6bc406ea628ae05fd26358c291b1f3","src/connection_client.rs":"8e9f2ece451d6fb1c7db9274aab3b342639337d9c36dba97280fb11842e7a88b","src/connection_server.rs":"0f55d4564bd7863bd2c3f83cb52cce78c4ad9271855376ce6b9562b9975e03ff","src/control_stream_local.rs":"e67877e148264fd7c19dfd13ef5a54ba48ef8f0b3109ae73d38f84417c4f2d0e","src/control_stream_remote.rs":"c205633af8539cd55f289071c6845b5bb2b0a9778f15976829c5d4a492360e19","src/hframe.rs":"5b580d431ae9639bebfa1868a8f4f358e46884c34b81011126745e64244e4323","src/hsettings_frame.rs":"349a4413ce13f03e05264e6c4b22d231276a1c96e3983aada4478b038ec89dbc","src/lib.rs":"9fd22f4da0854a08720f58163f2f64bee41717e62c45d162fed50566f786caf2","src/response_stream.rs":"808c40e0bc51599e2b253cf9d535ae8404abaccc7fd875f8e08eafdfaeab3012","src/server.rs":"9fe5fdbaa770d02059b8a4357b0cef0d52dba0d89ab58cb094aa3c70cbc7d84a","src/server_connection_events.rs":"d2b973a095f29cb0ac6fb84705165b034960d09b2dde7693bab96e6b802c6fba","src/server_events.rs":"f997bd329d45115f6a527eba8f0f1ecf21c0dd9a3184f08fc5002e34f4cfe2f0","src/stream_type_reader.rs":"da2b7b0358cb4829493cb964cae67c85e9efdf4127958aade7a56733ddc4f12e","src/transaction_client.rs":"8a96f2acb0cd6f7c09d1e9b92a71171a474d46c7e21b55e546aa4d6c330981d3","src/transaction_server.rs":"1af45a238950acfc563484d2b5ebe805a83bdcc1096f5c03ed8d180c4e7428d5","tests/httpconn.rs":"7955f6ac4406b5d770e0fb10258aff529a1c01020374dfc5f85d8608abb68f6f"},"package":null}
|
||||
{"files":{"Cargo.toml":"e155725388085fb02552cfd19ebc1a5f3391dbd31557f85ad50caa728ce68c19","src/client_events.rs":"8c19986b7372b36d1265d4a848b07b5c334511cbdfc02f313c54e551523c08f5","src/connection.rs":"cebdb535151ec5598161091c6b709c38852aa32f32855cee8f2c6129a2bcb5a0","src/connection_client.rs":"4ce2aec1b869ab4d683dc5599eeb266899768ed13e6fe277fa7bdb8b4df9682e","src/connection_server.rs":"c96e347cbc53a6abb12ef86f548cac52526069884ee97d384aa91a1ae3cd43aa","src/control_stream_local.rs":"03d6259599543da2154388d5e48efbc06271e079429d3d946278c4c58c0521c7","src/control_stream_remote.rs":"121ac329dafc43efbfa3a0d26ab42b164d6a467523bc34193d03b7a0597ffd21","src/hframe.rs":"16bed02bec406f897ca847e0e2e060fa4e0e03d0397fab40e640c24679652228","src/hsettings_frame.rs":"f7cd5dc349c4e3d77b35effae620578492631cf33e7db9c93418a119b90cadc3","src/lib.rs":"fc76582370c384294d08e55f9e3d026ca2645b4418b1e989328f61d2f280ffff","src/push_controller.rs":"36e70602887fe685986cbe7960ec5270feda61b7f5a87474f585248fe13104fd","src/recv_message.rs":"d651428ed86cdea0f54102c3218eb4c7f99aac6fcad739b5538f6fb8fee9c918","src/send_message.rs":"4aa03879c64e9382fc8e6fcdda5c7674878e61a374e99af58139408eaf520c0f","src/server.rs":"b8e815be0e297aa422fe9067e2ff62c55e948258710b605ec82d4f8ea6b95698","src/server_connection_events.rs":"d95c9c441b278d9cc7577e5a2d8c1b00cf2e20f43a538d8c34db39629b415b01","src/server_events.rs":"27f23dc49f649fb66113c5a71345d9af30e7de04f791d4e1928d32c66b47d3f1","src/stream_type_reader.rs":"9eadcdf4ea223258f6a115c3c7e8c37228e4d7baee8eb8eb944175ed91a5cf36","tests/httpconn.rs":"32b5162ad8963a7079704858d537f9d7f3ba40d428620e5bc80fe1845908ce75"},"package":null}
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "neqo-http3"
|
||||
version = "0.2.4"
|
||||
version = "0.4.0"
|
||||
authors = ["Dragana Damjanovic <dragana.damjano@gmail.com>"]
|
||||
edition = "2018"
|
||||
license = "MIT/Apache-2.0"
|
||||
|
|
|
@ -4,7 +4,12 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#![allow(clippy::module_name_repetitions)]
|
||||
|
||||
use crate::connection::Http3State;
|
||||
use crate::recv_message::RecvMessageEvents;
|
||||
use crate::send_message::SendMessageEvents;
|
||||
use crate::Header;
|
||||
use neqo_common::matches;
|
||||
use neqo_transport::{AppError, StreamType};
|
||||
|
||||
|
@ -15,7 +20,11 @@ use std::rc::Rc;
|
|||
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Clone)]
|
||||
pub enum Http3ClientEvent {
|
||||
/// Space available in the buffer for an application write to succeed.
|
||||
HeaderReady { stream_id: u64 },
|
||||
HeaderReady {
|
||||
stream_id: u64,
|
||||
headers: Option<Vec<Header>>,
|
||||
fin: bool,
|
||||
},
|
||||
/// A stream can accept new data.
|
||||
DataWritable { stream_id: u64 },
|
||||
/// New bytes available for reading.
|
||||
|
@ -43,20 +52,32 @@ pub struct Http3ClientEvents {
|
|||
events: Rc<RefCell<VecDeque<Http3ClientEvent>>>,
|
||||
}
|
||||
|
||||
impl Http3ClientEvents {
|
||||
pub fn header_ready(&self, stream_id: u64) {
|
||||
self.insert(Http3ClientEvent::HeaderReady { stream_id });
|
||||
impl RecvMessageEvents for Http3ClientEvents {
|
||||
/// Add a new `HeaderReady` event.
|
||||
fn header_ready(&self, stream_id: u64, headers: Option<Vec<Header>>, fin: bool) {
|
||||
self.insert(Http3ClientEvent::HeaderReady {
|
||||
stream_id,
|
||||
headers,
|
||||
fin,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn data_writable(&self, stream_id: u64) {
|
||||
self.insert(Http3ClientEvent::DataWritable { stream_id });
|
||||
}
|
||||
|
||||
pub fn data_readable(&self, stream_id: u64) {
|
||||
/// Add a new `DataReadable` event
|
||||
fn data_readable(&self, stream_id: u64) {
|
||||
self.insert(Http3ClientEvent::DataReadable { stream_id });
|
||||
}
|
||||
}
|
||||
|
||||
pub fn stop_sending(&self, stream_id: u64, error: AppError) {
|
||||
impl SendMessageEvents for Http3ClientEvents {
|
||||
/// Add a new `DataWritable` event.
|
||||
fn data_writable(&self, stream_id: u64) {
|
||||
self.insert(Http3ClientEvent::DataWritable { stream_id });
|
||||
}
|
||||
}
|
||||
|
||||
impl Http3ClientEvents {
|
||||
/// Add a new `StopSending` event
|
||||
pub(crate) fn stop_sending(&self, stream_id: u64, error: AppError) {
|
||||
// Remove DataWritable event if any.
|
||||
self.remove(|evt| {
|
||||
matches!(evt, Http3ClientEvent::DataWritable {
|
||||
|
@ -70,33 +91,40 @@ impl Http3ClientEvents {
|
|||
// self.insert(Http3ClientEvent::NewPushStream { stream_id });
|
||||
// }
|
||||
|
||||
pub fn new_requests_creatable(&self, stream_type: StreamType) {
|
||||
/// Add a new `RequestCreatable` event
|
||||
pub(crate) fn new_requests_creatable(&self, stream_type: StreamType) {
|
||||
if stream_type == StreamType::BiDi {
|
||||
self.insert(Http3ClientEvent::RequestsCreatable);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn authentication_needed(&self) {
|
||||
/// Add a new `AuthenticationNeeded` event
|
||||
pub(crate) fn authentication_needed(&self) {
|
||||
self.insert(Http3ClientEvent::AuthenticationNeeded);
|
||||
}
|
||||
|
||||
pub fn zero_rtt_rejected(&self) {
|
||||
/// Add a new `ZeroRttRejected` event.
|
||||
pub(crate) fn zero_rtt_rejected(&self) {
|
||||
self.insert(Http3ClientEvent::ZeroRttRejected);
|
||||
}
|
||||
|
||||
pub fn goaway_received(&self) {
|
||||
/// Add a new `GoawayReceived` event.
|
||||
pub(crate) fn goaway_received(&self) {
|
||||
self.remove(|evt| matches!(evt, Http3ClientEvent::RequestsCreatable));
|
||||
self.insert(Http3ClientEvent::GoawayReceived);
|
||||
}
|
||||
|
||||
pub fn events(&self) -> impl Iterator<Item = Http3ClientEvent> {
|
||||
/// Take all events currently in the queue.
|
||||
pub(crate) fn events(&self) -> impl Iterator<Item = Http3ClientEvent> {
|
||||
self.events.replace(VecDeque::new()).into_iter()
|
||||
}
|
||||
|
||||
/// Check if there is any event present.
|
||||
pub fn has_events(&self) -> bool {
|
||||
!self.events.borrow().is_empty()
|
||||
}
|
||||
|
||||
/// Take the first event.
|
||||
pub fn next_event(&self) -> Option<Http3ClientEvent> {
|
||||
self.events.borrow_mut().pop_front()
|
||||
}
|
||||
|
@ -112,12 +140,14 @@ impl Http3ClientEvents {
|
|||
self.events.borrow_mut().retain(|evt| !f(evt))
|
||||
}
|
||||
|
||||
pub fn reset(&self, stream_id: u64, error: AppError) {
|
||||
/// Add a new `Reset` event.
|
||||
pub(crate) fn reset(&self, stream_id: u64, error: AppError) {
|
||||
self.remove_events_for_stream_id(stream_id);
|
||||
self.insert(Http3ClientEvent::Reset { stream_id, error });
|
||||
}
|
||||
|
||||
pub fn connection_state_change(&self, state: Http3State) {
|
||||
/// Add a new `StateChange` event.
|
||||
pub(crate) fn connection_state_change(&self, state: Http3State) {
|
||||
// If closing, existing events no longer relevant.
|
||||
match state {
|
||||
Http3State::Closing { .. } | Http3State::Closed(_) => self.events.borrow_mut().clear(),
|
||||
|
@ -126,10 +156,11 @@ impl Http3ClientEvents {
|
|||
self.insert(Http3ClientEvent::StateChange(state));
|
||||
}
|
||||
|
||||
pub fn remove_events_for_stream_id(&self, stream_id: u64) {
|
||||
/// Remove all events for a stream
|
||||
pub(crate) fn remove_events_for_stream_id(&self, stream_id: u64) {
|
||||
self.remove(|evt| {
|
||||
matches!(evt,
|
||||
Http3ClientEvent::HeaderReady { stream_id: x }
|
||||
Http3ClientEvent::HeaderReady { stream_id: x, .. }
|
||||
| Http3ClientEvent::DataWritable { stream_id: x }
|
||||
| Http3ClientEvent::DataReadable { stream_id: x }
|
||||
| Http3ClientEvent::NewPushStream { stream_id: x }
|
||||
|
|
|
@ -4,14 +4,19 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#![allow(clippy::module_name_repetitions)]
|
||||
|
||||
use crate::control_stream_local::{ControlStreamLocal, HTTP3_UNI_STREAM_TYPE_CONTROL};
|
||||
use crate::control_stream_remote::ControlStreamRemote;
|
||||
use crate::hframe::HFrame;
|
||||
use crate::hsettings_frame::{HSetting, HSettingType, HSettings};
|
||||
use crate::recv_message::RecvMessage;
|
||||
use crate::send_message::SendMessage;
|
||||
use crate::stream_type_reader::NewStreamTypeReader;
|
||||
use neqo_common::{matches, qdebug, qerror, qinfo, qtrace, qwarn};
|
||||
use neqo_qpack::decoder::{QPackDecoder, QPACK_UNI_STREAM_TYPE_DECODER};
|
||||
use neqo_qpack::encoder::{QPackEncoder, QPACK_UNI_STREAM_TYPE_ENCODER};
|
||||
use neqo_qpack::QpackSettings;
|
||||
use neqo_transport::{AppError, CloseError, Connection, State, StreamType};
|
||||
use std::collections::{BTreeSet, HashMap};
|
||||
use std::fmt::Debug;
|
||||
|
@ -20,6 +25,7 @@ use std::mem;
|
|||
use crate::{Error, Res};
|
||||
|
||||
const HTTP3_UNI_STREAM_TYPE_PUSH: u64 = 0x1;
|
||||
const QPACK_TABLE_SIZE_LIMIT: u64 = 1 << 30;
|
||||
|
||||
pub(crate) enum HandleReadableOutput {
|
||||
NoOutput,
|
||||
|
@ -27,16 +33,6 @@ pub(crate) enum HandleReadableOutput {
|
|||
ControlFrames(Vec<HFrame>),
|
||||
}
|
||||
|
||||
pub trait Http3Transaction: Debug {
|
||||
fn send(&mut self, conn: &mut Connection, encoder: &mut QPackEncoder) -> Res<()>;
|
||||
fn receive(&mut self, conn: &mut Connection, decoder: &mut QPackDecoder) -> Res<()>;
|
||||
fn has_data_to_send(&self) -> bool;
|
||||
fn reset_receiving_side(&mut self);
|
||||
fn stop_sending(&mut self);
|
||||
fn done(&self) -> bool;
|
||||
fn close_send(&mut self, conn: &mut Connection) -> Res<()>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
enum Http3RemoteSettingsState {
|
||||
NotReceived,
|
||||
|
@ -44,26 +40,27 @@ enum Http3RemoteSettingsState {
|
|||
ZeroRtt(HSettings),
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, PartialOrd, Ord, Eq, Clone)]
|
||||
struct LocalSettings {
|
||||
max_table_size: u64,
|
||||
max_blocked_streams: u16,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, PartialOrd, Ord, Eq, Clone)]
|
||||
pub enum Http3State {
|
||||
Initializing,
|
||||
ZeroRtt,
|
||||
Connected,
|
||||
GoingAway,
|
||||
GoingAway(u64),
|
||||
Closing(CloseError),
|
||||
Closed(CloseError),
|
||||
}
|
||||
|
||||
impl Http3State {
|
||||
#[must_use]
|
||||
pub fn active(&self) -> bool {
|
||||
matches!(self, Http3State::Connected | Http3State::GoingAway(_) | Http3State::ZeroRtt)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Http3Connection<T: Http3Transaction> {
|
||||
pub(crate) struct Http3Connection {
|
||||
pub state: Http3State,
|
||||
local_settings: LocalSettings,
|
||||
local_qpack_settings: QpackSettings,
|
||||
control_stream_local: ControlStreamLocal,
|
||||
control_stream_remote: ControlStreamRemote,
|
||||
new_streams: HashMap<u64, NewStreamTypeReader>,
|
||||
|
@ -71,34 +68,36 @@ pub struct Http3Connection<T: Http3Transaction> {
|
|||
pub qpack_decoder: QPackDecoder,
|
||||
settings_state: Http3RemoteSettingsState,
|
||||
streams_have_data_to_send: BTreeSet<u64>,
|
||||
pub transactions: HashMap<u64, T>,
|
||||
pub send_streams: HashMap<u64, SendMessage>,
|
||||
pub recv_streams: HashMap<u64, RecvMessage>,
|
||||
}
|
||||
|
||||
impl<T: Http3Transaction> ::std::fmt::Display for Http3Connection<T> {
|
||||
impl ::std::fmt::Display for Http3Connection {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
write!(f, "Http3 connection")
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Http3Transaction> Http3Connection<T> {
|
||||
pub fn new(max_table_size: u64, max_blocked_streams: u16) -> Self {
|
||||
if max_table_size > (1 << 30) - 1 {
|
||||
impl Http3Connection {
|
||||
/// Create a new connection.
|
||||
pub fn new(local_qpack_settings: QpackSettings) -> Self {
|
||||
if (local_qpack_settings.max_table_size_encoder >= QPACK_TABLE_SIZE_LIMIT)
|
||||
|| (local_qpack_settings.max_table_size_decoder >= QPACK_TABLE_SIZE_LIMIT)
|
||||
{
|
||||
panic!("Wrong max_table_size");
|
||||
}
|
||||
Self {
|
||||
state: Http3State::Initializing,
|
||||
local_settings: LocalSettings {
|
||||
max_table_size,
|
||||
max_blocked_streams,
|
||||
},
|
||||
local_qpack_settings,
|
||||
control_stream_local: ControlStreamLocal::default(),
|
||||
control_stream_remote: ControlStreamRemote::new(),
|
||||
new_streams: HashMap::new(),
|
||||
qpack_encoder: QPackEncoder::new(true),
|
||||
qpack_decoder: QPackDecoder::new(max_table_size, max_blocked_streams),
|
||||
qpack_encoder: QPackEncoder::new(local_qpack_settings, true),
|
||||
qpack_decoder: QPackDecoder::new(local_qpack_settings),
|
||||
settings_state: Http3RemoteSettingsState::NotReceived,
|
||||
streams_have_data_to_send: BTreeSet::new(),
|
||||
transactions: HashMap::new(),
|
||||
send_streams: HashMap::new(),
|
||||
recv_streams: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -113,7 +112,7 @@ impl<T: Http3Transaction> Http3Connection<T> {
|
|||
|
||||
fn send_settings(&mut self) {
|
||||
qdebug!([self], "Send settings.");
|
||||
self.control_stream_local.queue_frame(HFrame::Settings {
|
||||
self.control_stream_local.queue_frame(&HFrame::Settings {
|
||||
settings: HSettings::new(&[
|
||||
HSetting {
|
||||
setting_type: HSettingType::MaxTableCapacity,
|
||||
|
@ -136,25 +135,33 @@ impl<T: Http3Transaction> Http3Connection<T> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Inform a `HttpConnection` that a stream has data to send and that `send` should be called for the stream.
|
||||
pub fn insert_streams_have_data_to_send(&mut self, stream_id: u64) {
|
||||
self.streams_have_data_to_send.insert(stream_id);
|
||||
}
|
||||
|
||||
/// Return true if there is a stream that needs to send data.
|
||||
pub fn has_data_to_send(&self) -> bool {
|
||||
!self.streams_have_data_to_send.is_empty()
|
||||
}
|
||||
|
||||
/// Call `send` for all streams that need to send data.
|
||||
pub fn process_sending(&mut self, conn: &mut Connection) -> Res<()> {
|
||||
// check if control stream has data to send.
|
||||
self.control_stream_local.send(conn)?;
|
||||
|
||||
let to_send = mem::replace(&mut self.streams_have_data_to_send, BTreeSet::new());
|
||||
for stream_id in to_send {
|
||||
if let Some(t) = &mut self.transactions.get_mut(&stream_id) {
|
||||
t.send(conn, &mut self.qpack_encoder)?;
|
||||
if t.has_data_to_send() {
|
||||
let mut remove = false;
|
||||
if let Some(s) = &mut self.send_streams.get_mut(&stream_id) {
|
||||
s.send(conn, &mut self.qpack_encoder)?;
|
||||
if s.has_data_to_send() {
|
||||
self.streams_have_data_to_send.insert(stream_id);
|
||||
}
|
||||
remove = s.done();
|
||||
}
|
||||
if remove {
|
||||
self.send_streams.remove(&stream_id);
|
||||
}
|
||||
}
|
||||
self.qpack_decoder.send(conn)?;
|
||||
|
@ -162,22 +169,16 @@ impl<T: Http3Transaction> Http3Connection<T> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_resumption_settings(
|
||||
&mut self,
|
||||
conn: &mut Connection,
|
||||
settings: HSettings,
|
||||
) -> Res<()> {
|
||||
if let Http3State::Initializing = &self.state {
|
||||
self.state = Http3State::ZeroRtt;
|
||||
self.initialize_http3_connection(conn)?;
|
||||
self.set_qpack_settings(&settings)?;
|
||||
self.settings_state = Http3RemoteSettingsState::ZeroRtt(settings);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::Unexpected)
|
||||
}
|
||||
/// We have a resumption token which remembers previous settings. Update the setting.
|
||||
pub fn set_0rtt_settings(&mut self, conn: &mut Connection, settings: HSettings) -> Res<()> {
|
||||
self.state = Http3State::ZeroRtt;
|
||||
self.initialize_http3_connection(conn)?;
|
||||
self.set_qpack_settings(&settings)?;
|
||||
self.settings_state = Http3RemoteSettingsState::ZeroRtt(settings);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the settings for a connection. This is used for creating a resumption token.
|
||||
pub fn get_settings(&self) -> Option<HSettings> {
|
||||
if let Http3RemoteSettingsState::Received(settings) = &self.settings_state {
|
||||
Some(settings.clone())
|
||||
|
@ -186,16 +187,15 @@ impl<T: Http3Transaction> Http3Connection<T> {
|
|||
}
|
||||
}
|
||||
|
||||
// This function adds a new unidi stream and try to read its type. Http3Connection can handle
|
||||
// a Http3 Control stream, Qpack streams and an unknown stream, but it cannot handle a Push stream.
|
||||
// If a Push stream has been discovered, return true and let the Http3Client/Server handle it.
|
||||
/// This function adds a new unidi stream and try to read its type. `Http3Connection` can handle
|
||||
/// a Http3 Control stream, Qpack streams and an unknown stream, but it cannot handle a Push stream.
|
||||
/// If a Push stream has been discovered, return true and let the `Http3Client`/`Server` handle it.
|
||||
pub fn handle_new_unidi_stream(&mut self, conn: &mut Connection, stream_id: u64) -> Res<bool> {
|
||||
qtrace!([self], "A new stream: {}.", stream_id);
|
||||
debug_assert!(self.state_active());
|
||||
let stream_type;
|
||||
let fin;
|
||||
{
|
||||
let ns = &mut self
|
||||
let ns = self
|
||||
.new_streams
|
||||
.entry(stream_id)
|
||||
.or_insert_with(NewStreamTypeReader::new);
|
||||
|
@ -214,27 +214,20 @@ impl<T: Http3Transaction> Http3Connection<T> {
|
|||
}
|
||||
}
|
||||
|
||||
// This function handles reading from all streams, i.e. control, qpack, request/response
|
||||
// stream and unidi stream that are still do not have a type.
|
||||
// The function cannot handle:
|
||||
// 1) a Push stream (if a unkown unidi stream is decoded to be a push stream)
|
||||
// 2) frames MaxPushId or Goaway must be handled by Http3Client/Server.
|
||||
// The function returns HandleReadableOutput.
|
||||
pub(crate) fn handle_stream_readable(
|
||||
/// This function handles reading from all streams, i.e. control, qpack, request/response
|
||||
/// stream and unidi stream that are still do not have a type.
|
||||
/// The function cannot handle:
|
||||
/// 1) a Push stream (if an unknown unidi stream is decoded to be a push stream)
|
||||
/// 2) frames `MaxPushId` or `Goaway` must be handled by `Http3Client`/`Server`.
|
||||
/// The function returns `HandleReadableOutput`.
|
||||
pub fn handle_stream_readable(
|
||||
&mut self,
|
||||
conn: &mut Connection,
|
||||
stream_id: u64,
|
||||
) -> Res<HandleReadableOutput> {
|
||||
qtrace!([self], "Readable stream {}.", stream_id);
|
||||
|
||||
debug_assert!(self.state_active());
|
||||
|
||||
let label = if ::log::log_enabled!(::log::Level::Debug) {
|
||||
format!("{}", self)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
let label = ::neqo_common::log_subject!(::log::Level::Debug, self);
|
||||
if self.handle_read_stream(conn, stream_id)? {
|
||||
qdebug!([label], "Request/response stream {} read.", stream_id);
|
||||
Ok(HandleReadableOutput::NoOutput)
|
||||
|
@ -310,6 +303,19 @@ impl<T: Http3Transaction> Http3Connection<T> {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn is_critical_stream(&self, stream_id: u64) -> bool {
|
||||
self.qpack_encoder
|
||||
.local_stream_id()
|
||||
.iter()
|
||||
.chain(self.qpack_encoder.remote_stream_id().iter())
|
||||
.chain(self.qpack_decoder.local_stream_id().iter())
|
||||
.chain(self.qpack_decoder.remote_stream_id().iter())
|
||||
.chain(self.control_stream_local.stream_id().iter())
|
||||
.chain(self.control_stream_remote.stream_id().iter())
|
||||
.any(|id| stream_id == *id)
|
||||
}
|
||||
|
||||
/// This is called when a RESET frame has been received.
|
||||
pub fn handle_stream_reset(
|
||||
&mut self,
|
||||
conn: &mut Connection,
|
||||
|
@ -323,23 +329,25 @@ impl<T: Http3Transaction> Http3Connection<T> {
|
|||
app_err
|
||||
);
|
||||
|
||||
debug_assert!(self.state_active());
|
||||
// We want to execute both statements, therefore we use | instead of ||.
|
||||
let found = self.recv_streams.remove(&stream_id).is_some()
|
||||
| self.send_streams.remove(&stream_id).is_some();
|
||||
|
||||
if let Some(t) = self.transactions.get_mut(&stream_id) {
|
||||
// Close both sides of the transaction_client.
|
||||
t.reset_receiving_side();
|
||||
t.stop_sending();
|
||||
// close sending side of the transport stream as well. The server may have done
|
||||
// it as well, but just to be sure.
|
||||
let _ = conn.stream_reset_send(stream_id, app_err);
|
||||
// remove the stream
|
||||
self.transactions.remove(&stream_id);
|
||||
// close sending side of the transport stream as well. The server may have done
|
||||
// it as well, but just to be sure.
|
||||
let _ = conn.stream_reset_send(stream_id, app_err);
|
||||
|
||||
if found {
|
||||
Ok(true)
|
||||
} else if self.is_critical_stream(stream_id) {
|
||||
Err(Error::HttpClosedCriticalStream)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
/// This is called when `neqo_transport::Connection` state has been change to take proper actions in
|
||||
/// the HTTP3 layer.
|
||||
pub fn handle_state_change(&mut self, conn: &mut Connection, state: &State) -> Res<bool> {
|
||||
qdebug!([self], "Handle state change {:?}", state);
|
||||
match state {
|
||||
|
@ -355,77 +363,66 @@ impl<T: Http3Transaction> Http3Connection<T> {
|
|||
Ok(true)
|
||||
}
|
||||
State::Closing { error, .. } => {
|
||||
if !matches!(self.state, Http3State::Closing(_)| Http3State::Closed(_)) {
|
||||
if matches!(self.state, Http3State::Closing(_)| Http3State::Closed(_)) {
|
||||
Ok(false)
|
||||
} else {
|
||||
self.state = Http3State::Closing(error.clone().into());
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
State::Closed(error) => {
|
||||
if !matches!(self.state, Http3State::Closed(_)) {
|
||||
if matches!(self.state, Http3State::Closed(_)) {
|
||||
Ok(false)
|
||||
} else {
|
||||
self.state = Http3State::Closed(error.clone().into());
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
_ => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// This is called when 0RTT has been reseted to clear `send_streams`, `recv_streams` and settings.
|
||||
pub fn handle_zero_rtt_rejected(&mut self) -> Res<()> {
|
||||
if self.state == Http3State::ZeroRtt {
|
||||
self.state = Http3State::Initializing;
|
||||
self.control_stream_local = ControlStreamLocal::default();
|
||||
self.control_stream_remote = ControlStreamRemote::new();
|
||||
self.new_streams.clear();
|
||||
self.qpack_encoder = QPackEncoder::new(true);
|
||||
self.qpack_decoder = QPackDecoder::new(
|
||||
self.local_settings.max_table_size,
|
||||
self.local_settings.max_blocked_streams,
|
||||
);
|
||||
self.qpack_encoder = QPackEncoder::new(self.local_qpack_settings, true);
|
||||
self.qpack_decoder = QPackDecoder::new(self.local_qpack_settings);
|
||||
self.settings_state = Http3RemoteSettingsState::NotReceived;
|
||||
self.streams_have_data_to_send.clear();
|
||||
// TODO: investigate whether this code can automatically retry failed transactions.
|
||||
self.transactions.clear();
|
||||
self.send_streams.clear();
|
||||
self.recv_streams.clear();
|
||||
Ok(())
|
||||
} else {
|
||||
debug_assert!(false, "Zero rtt rejected in the wrong state.");
|
||||
Err(Error::HttpInternalError)
|
||||
Err(Error::HttpInternal)
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_read_stream(&mut self, conn: &mut Connection, stream_id: u64) -> Res<bool> {
|
||||
let label = if ::log::log_enabled!(::log::Level::Debug) {
|
||||
format!("{}", self)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
let label = ::neqo_common::log_subject!(::log::Level::Info, self);
|
||||
|
||||
debug_assert!(self.state_active());
|
||||
let r = self.recv_streams.get_mut(&stream_id);
|
||||
|
||||
if let Some(transaction) = &mut self.transactions.get_mut(&stream_id) {
|
||||
qinfo!(
|
||||
[label],
|
||||
"Request/response stream {} is readable.",
|
||||
stream_id
|
||||
);
|
||||
match transaction.receive(conn, &mut self.qpack_decoder) {
|
||||
Err(e) => {
|
||||
qerror!([label], "Error {} ocurred", e);
|
||||
return Err(e);
|
||||
}
|
||||
Ok(()) => {
|
||||
if transaction.done() {
|
||||
self.transactions.remove(&stream_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(true)
|
||||
} else {
|
||||
Ok(false)
|
||||
if r.is_none() {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let recv_stream = r.unwrap();
|
||||
qinfo!(
|
||||
[label],
|
||||
"Request/response stream {} is readable.",
|
||||
stream_id
|
||||
);
|
||||
recv_stream.receive(conn, &mut self.qpack_decoder)?;
|
||||
if recv_stream.done() {
|
||||
self.recv_streams.remove(&stream_id);
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
// Returns true if it is a push stream.
|
||||
|
@ -449,32 +446,35 @@ impl<T: Http3Transaction> Http3Connection<T> {
|
|||
qinfo!([self], "A new remote qpack encoder stream {}", stream_id);
|
||||
self.qpack_decoder
|
||||
.add_recv_stream(stream_id)
|
||||
.map_err(|_| Error::HttpStreamCreationError)?;
|
||||
.map_err(|_| Error::HttpStreamCreation)?;
|
||||
Ok(false)
|
||||
}
|
||||
QPACK_UNI_STREAM_TYPE_DECODER => {
|
||||
qinfo!([self], "A new remote qpack decoder stream {}", stream_id);
|
||||
self.qpack_encoder
|
||||
.add_recv_stream(stream_id)
|
||||
.map_err(|_| Error::HttpStreamCreationError)?;
|
||||
.map_err(|_| Error::HttpStreamCreation)?;
|
||||
Ok(false)
|
||||
}
|
||||
_ => {
|
||||
conn.stream_stop_sending(stream_id, Error::HttpStreamCreationError.code())?;
|
||||
conn.stream_stop_sending(stream_id, Error::HttpStreamCreation.code())?;
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This is called when an application closes the connection.
|
||||
pub fn close(&mut self, error: AppError) {
|
||||
qinfo!([self], "Close connection error {:?}.", error);
|
||||
self.state = Http3State::Closing(CloseError::Application(error));
|
||||
if !self.transactions.is_empty() && (error == 0) {
|
||||
qwarn!("close() called when streams still active");
|
||||
if (!self.send_streams.is_empty() || !self.recv_streams.is_empty()) && (error == 0) {
|
||||
qwarn!("close(0) called when streams still active");
|
||||
}
|
||||
self.transactions.clear();
|
||||
self.send_streams.clear();
|
||||
self.recv_streams.clear();
|
||||
}
|
||||
|
||||
/// This is called when an application resets a stream.
|
||||
pub fn stream_reset(
|
||||
&mut self,
|
||||
conn: &mut Connection,
|
||||
|
@ -482,29 +482,33 @@ impl<T: Http3Transaction> Http3Connection<T> {
|
|||
error: AppError,
|
||||
) -> Res<()> {
|
||||
qinfo!([self], "Reset stream {} error={}.", stream_id, error);
|
||||
let mut transaction = self
|
||||
.transactions
|
||||
.remove(&stream_id)
|
||||
.ok_or(Error::InvalidStreamId)?;
|
||||
transaction.stop_sending();
|
||||
|
||||
// We want to execute both statements, therefore we use | instead of ||.
|
||||
let found = self.send_streams.remove(&stream_id).is_some()
|
||||
| self.recv_streams.remove(&stream_id).is_some();
|
||||
|
||||
// Stream maybe already be closed and we may get an error here, but we do not care.
|
||||
let _ = conn.stream_reset_send(stream_id, error);
|
||||
transaction.reset_receiving_side();
|
||||
// Stream maybe already be closed and we may get an error here, but we do not care.
|
||||
conn.stream_stop_sending(stream_id, error)?;
|
||||
Ok(())
|
||||
let _ = conn.stream_stop_sending(stream_id, error);
|
||||
if found {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::InvalidStreamId)
|
||||
}
|
||||
}
|
||||
|
||||
/// This is called when an application wants to close the sending side of a stream.
|
||||
pub fn stream_close_send(&mut self, conn: &mut Connection, stream_id: u64) -> Res<()> {
|
||||
qinfo!([self], "Close sending side for stream {}.", stream_id);
|
||||
debug_assert!(self.state_active() || self.state_zero_rtt());
|
||||
let transaction = self
|
||||
.transactions
|
||||
qinfo!([self], "Close the sending side for stream {}.", stream_id);
|
||||
debug_assert!(self.state.active());
|
||||
let send_stream = self
|
||||
.send_streams
|
||||
.get_mut(&stream_id)
|
||||
.ok_or(Error::InvalidStreamId)?;
|
||||
transaction.close_send(conn)?;
|
||||
if transaction.done() {
|
||||
self.transactions.remove(&stream_id);
|
||||
send_stream.close(conn)?;
|
||||
if send_stream.done() {
|
||||
self.send_streams.remove(&stream_id);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -528,7 +532,6 @@ impl<T: Http3Transaction> Http3Connection<T> {
|
|||
self.handle_settings(settings)?;
|
||||
Ok(None)
|
||||
}
|
||||
HFrame::CancelPush { .. } => Err(Error::HttpFrameUnexpected),
|
||||
HFrame::Goaway { .. } | HFrame::MaxPushId { .. } => Ok(Some(f)),
|
||||
_ => Err(Error::HttpFrameUnexpected),
|
||||
};
|
||||
|
@ -578,7 +581,7 @@ impl<T: Http3Transaction> Http3Connection<T> {
|
|||
zero_rtt_value,
|
||||
st
|
||||
);
|
||||
return Err(Error::HttpSettingsError);
|
||||
return Err(Error::HttpSettings);
|
||||
}
|
||||
|
||||
match st {
|
||||
|
@ -603,22 +606,22 @@ impl<T: Http3Transaction> Http3Connection<T> {
|
|||
}
|
||||
}
|
||||
|
||||
fn state_active(&self) -> bool {
|
||||
matches!(self.state, Http3State::Connected | Http3State::GoingAway)
|
||||
}
|
||||
|
||||
fn state_zero_rtt(&self) -> bool {
|
||||
matches!(self.state, Http3State::ZeroRtt)
|
||||
}
|
||||
|
||||
/// Return the current state on `Http3Connection`.
|
||||
pub fn state(&self) -> Http3State {
|
||||
self.state.clone()
|
||||
}
|
||||
|
||||
pub fn add_transaction(&mut self, stream_id: u64, transaction: T) {
|
||||
if transaction.has_data_to_send() {
|
||||
/// Adds a new transaction.
|
||||
pub fn add_streams(
|
||||
&mut self,
|
||||
stream_id: u64,
|
||||
send_stream: SendMessage,
|
||||
recv_stream: RecvMessage,
|
||||
) {
|
||||
if send_stream.has_data_to_send() {
|
||||
self.streams_have_data_to_send.insert(stream_id);
|
||||
}
|
||||
self.transactions.insert(stream_id, transaction);
|
||||
self.send_streams.insert(stream_id, send_stream);
|
||||
self.recv_streams.insert(stream_id, recv_stream);
|
||||
}
|
||||
}
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -4,18 +4,20 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use crate::connection::{HandleReadableOutput, Http3Connection, Http3State, Http3Transaction};
|
||||
use crate::connection::{HandleReadableOutput, Http3Connection, Http3State};
|
||||
use crate::hframe::HFrame;
|
||||
use crate::recv_message::RecvMessage;
|
||||
use crate::send_message::SendMessage;
|
||||
use crate::server_connection_events::{Http3ServerConnEvent, Http3ServerConnEvents};
|
||||
use crate::transaction_server::TransactionServer;
|
||||
use crate::{Error, Header, Res};
|
||||
use neqo_common::{qdebug, qinfo, qtrace};
|
||||
use neqo_common::{matches, qdebug, qinfo, qtrace};
|
||||
use neqo_qpack::QpackSettings;
|
||||
use neqo_transport::{AppError, Connection, ConnectionEvent, StreamType};
|
||||
use std::time::Instant;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Http3ServerHandler {
|
||||
base_handler: Http3Connection<TransactionServer>,
|
||||
base_handler: Http3Connection,
|
||||
events: Http3ServerConnEvents,
|
||||
needs_processing: bool,
|
||||
}
|
||||
|
@ -27,24 +29,32 @@ impl ::std::fmt::Display for Http3ServerHandler {
|
|||
}
|
||||
|
||||
impl Http3ServerHandler {
|
||||
pub fn new(max_table_size: u64, max_blocked_streams: u16) -> Self {
|
||||
pub(crate) fn new(qpack_settings: QpackSettings) -> Self {
|
||||
Self {
|
||||
base_handler: Http3Connection::new(max_table_size, max_blocked_streams),
|
||||
base_handler: Http3Connection::new(qpack_settings),
|
||||
events: Http3ServerConnEvents::default(),
|
||||
needs_processing: false,
|
||||
}
|
||||
}
|
||||
pub fn set_response(&mut self, stream_id: u64, headers: &[Header], data: Vec<u8>) -> Res<()> {
|
||||
|
||||
/// Supply a response for a request.
|
||||
pub(crate) fn set_response(
|
||||
&mut self,
|
||||
stream_id: u64,
|
||||
headers: &[Header],
|
||||
data: &[u8],
|
||||
) -> Res<()> {
|
||||
self.base_handler
|
||||
.transactions
|
||||
.send_streams
|
||||
.get_mut(&stream_id)
|
||||
.ok_or(Error::InvalidStreamId)?
|
||||
.set_response(headers, data, &mut self.base_handler.qpack_encoder);
|
||||
.set_message(headers, Some(data))?;
|
||||
self.base_handler
|
||||
.insert_streams_have_data_to_send(stream_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Reset a request.
|
||||
pub fn stream_reset(
|
||||
&mut self,
|
||||
conn: &mut Connection,
|
||||
|
@ -57,30 +67,27 @@ impl Http3ServerHandler {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Process HTTTP3 layer.
|
||||
pub fn process_http3(&mut self, conn: &mut Connection, now: Instant) {
|
||||
qtrace!([self], "Process http3 internal.");
|
||||
match self.base_handler.state() {
|
||||
Http3State::Connected | Http3State::GoingAway => {
|
||||
let res = self.check_connection_events(conn);
|
||||
if self.check_result(conn, now, res) {
|
||||
return;
|
||||
}
|
||||
let res = self.base_handler.process_sending(conn);
|
||||
self.check_result(conn, now, res);
|
||||
}
|
||||
Http3State::Closed { .. } => {}
|
||||
_ => {
|
||||
let res = self.check_connection_events(conn);
|
||||
let _ = self.check_result(conn, now, res);
|
||||
}
|
||||
if matches!(self.base_handler.state(), Http3State::Closed(..)) {
|
||||
return;
|
||||
}
|
||||
|
||||
let res = self.check_connection_events(conn);
|
||||
if !self.check_result(conn, now, &res) && self.base_handler.state().active() {
|
||||
let res = self.base_handler.process_sending(conn);
|
||||
self.check_result(conn, now, &res);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn next_event(&mut self) -> Option<Http3ServerConnEvent> {
|
||||
/// Take the next available event.
|
||||
pub(crate) fn next_event(&mut self) -> Option<Http3ServerConnEvent> {
|
||||
self.events.next_event()
|
||||
}
|
||||
|
||||
pub fn should_be_processed(&mut self) -> bool {
|
||||
/// Whether this connection has events to process or data to send.
|
||||
pub(crate) fn should_be_processed(&mut self) -> bool {
|
||||
if self.needs_processing {
|
||||
self.needs_processing = false;
|
||||
return true;
|
||||
|
@ -90,20 +97,24 @@ impl Http3ServerHandler {
|
|||
|
||||
// This function takes the provided result and check for an error.
|
||||
// An error results in closing the connection.
|
||||
fn check_result<ERR>(&mut self, conn: &mut Connection, now: Instant, res: Res<ERR>) -> bool {
|
||||
fn check_result<ERR>(&mut self, conn: &mut Connection, now: Instant, res: &Res<ERR>) -> bool {
|
||||
match &res {
|
||||
Err(e) => {
|
||||
qinfo!([self], "Connection error: {}.", e);
|
||||
conn.close(now, e.code(), &format!("{}", e));
|
||||
self.base_handler.close(e.code());
|
||||
self.events
|
||||
.connection_state_change(self.base_handler.state());
|
||||
self.close(conn, now, e);
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn close(&mut self, conn: &mut Connection, now: Instant, err: &Error) {
|
||||
qinfo!([self], "Connection error: {}.", err);
|
||||
conn.close(now, err.code(), &format!("{}", err));
|
||||
self.base_handler.close(err.code());
|
||||
self.events
|
||||
.connection_state_change(self.base_handler.state());
|
||||
}
|
||||
|
||||
// If this return an error the connection must be closed.
|
||||
fn check_connection_events(&mut self, conn: &mut Connection) -> Res<()> {
|
||||
qtrace!([self], "Check connection events.");
|
||||
|
@ -114,17 +125,17 @@ impl Http3ServerHandler {
|
|||
stream_id,
|
||||
stream_type,
|
||||
} => match stream_type {
|
||||
StreamType::BiDi => self.base_handler.add_transaction(
|
||||
StreamType::BiDi => self.base_handler.add_streams(
|
||||
stream_id,
|
||||
TransactionServer::new(stream_id, self.events.clone()),
|
||||
SendMessage::new(stream_id, Box::new(self.events.clone())),
|
||||
RecvMessage::new(stream_id, Box::new(self.events.clone()), None),
|
||||
),
|
||||
StreamType::UniDi => {
|
||||
if self.base_handler.handle_new_unidi_stream(conn, stream_id)? {
|
||||
return Err(Error::HttpStreamCreationError);
|
||||
return Err(Error::HttpStreamCreation);
|
||||
}
|
||||
}
|
||||
},
|
||||
ConnectionEvent::SendStreamWritable { .. } => {}
|
||||
ConnectionEvent::RecvStreamReadable { stream_id } => {
|
||||
self.handle_stream_readable(conn, stream_id)?
|
||||
}
|
||||
|
@ -139,17 +150,19 @@ impl Http3ServerHandler {
|
|||
ConnectionEvent::SendStreamStopSending {
|
||||
stream_id,
|
||||
app_error,
|
||||
} => self.handle_stream_stop_sending(conn, stream_id, app_error),
|
||||
ConnectionEvent::SendStreamComplete { .. } => {}
|
||||
ConnectionEvent::SendStreamCreatable { .. } => {}
|
||||
ConnectionEvent::AuthenticationNeeded => return Err(Error::HttpInternalError),
|
||||
} => self.handle_stream_stop_sending(conn, stream_id, app_error)?,
|
||||
ConnectionEvent::StateChange(state) => {
|
||||
if self.base_handler.handle_state_change(conn, &state)? {
|
||||
self.events
|
||||
.connection_state_change(self.base_handler.state());
|
||||
}
|
||||
}
|
||||
ConnectionEvent::ZeroRttRejected => return Err(Error::HttpInternalError),
|
||||
ConnectionEvent::AuthenticationNeeded | ConnectionEvent::ZeroRttRejected => {
|
||||
return Err(Error::HttpInternal)
|
||||
}
|
||||
ConnectionEvent::SendStreamWritable { .. }
|
||||
| ConnectionEvent::SendStreamComplete { .. }
|
||||
| ConnectionEvent::SendStreamCreatable { .. } => {}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -157,9 +170,9 @@ impl Http3ServerHandler {
|
|||
|
||||
fn handle_stream_readable(&mut self, conn: &mut Connection, stream_id: u64) -> Res<()> {
|
||||
match self.base_handler.handle_stream_readable(conn, stream_id)? {
|
||||
HandleReadableOutput::PushStream => Err(Error::HttpStreamCreationError),
|
||||
HandleReadableOutput::PushStream => Err(Error::HttpStreamCreation),
|
||||
HandleReadableOutput::ControlFrames(control_frames) => {
|
||||
for f in control_frames.into_iter() {
|
||||
for f in control_frames {
|
||||
match f {
|
||||
HFrame::MaxPushId { .. } => {
|
||||
// TODO implement push
|
||||
|
@ -182,14 +195,55 @@ impl Http3ServerHandler {
|
|||
conn: &mut Connection,
|
||||
stop_stream_id: u64,
|
||||
app_err: AppError,
|
||||
) {
|
||||
if let Some(t) = self.base_handler.transactions.get_mut(&stop_stream_id) {
|
||||
// close sending side.
|
||||
t.stop_sending();
|
||||
) -> Res<()> {
|
||||
if self
|
||||
.base_handler
|
||||
.send_streams
|
||||
.remove(&stop_stream_id)
|
||||
.is_some()
|
||||
{
|
||||
// receiving side may be closed already, just ignore an error in the following line.
|
||||
let _ = conn.stream_stop_sending(stop_stream_id, app_err);
|
||||
t.reset_receiving_side();
|
||||
self.base_handler.transactions.remove(&stop_stream_id);
|
||||
self.base_handler.recv_streams.remove(&stop_stream_id);
|
||||
} else if self.base_handler.is_critical_stream(stop_stream_id) {
|
||||
return Err(Error::HttpClosedCriticalStream);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Response data are read directly into a buffer supplied as a parameter of this function to avoid copying
|
||||
/// data.
|
||||
/// # Errors
|
||||
/// It returns an error if a stream does not exist or an error happen while reading a stream, e.g.
|
||||
/// early close, protocol error, etc.
|
||||
pub fn read_request_data(
|
||||
&mut self,
|
||||
conn: &mut Connection,
|
||||
now: Instant,
|
||||
stream_id: u64,
|
||||
buf: &mut [u8],
|
||||
) -> Res<(usize, bool)> {
|
||||
qinfo!([self], "read_data from stream {}.", stream_id);
|
||||
match self.base_handler.recv_streams.get_mut(&stream_id) {
|
||||
None => {
|
||||
self.close(conn, now, &Error::Internal);
|
||||
Err(Error::Internal)
|
||||
}
|
||||
Some(recv_stream) => {
|
||||
match recv_stream.read_data(conn, &mut self.base_handler.qpack_decoder, buf) {
|
||||
Ok((amount, fin)) => {
|
||||
if recv_stream.done() {
|
||||
self.base_handler.recv_streams.remove(&stream_id);
|
||||
}
|
||||
Ok((amount, fin))
|
||||
}
|
||||
Err(e) => {
|
||||
self.close(conn, now, &e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ pub const HTTP3_UNI_STREAM_TYPE_CONTROL: u64 = 0x0;
|
|||
|
||||
// The local control stream, responsible for encoding frames and sending them
|
||||
#[derive(Default, Debug)]
|
||||
pub struct ControlStreamLocal {
|
||||
pub(crate) struct ControlStreamLocal {
|
||||
stream_id: Option<u64>,
|
||||
buf: Vec<u8>,
|
||||
}
|
||||
|
@ -25,12 +25,14 @@ impl ::std::fmt::Display for ControlStreamLocal {
|
|||
}
|
||||
|
||||
impl ControlStreamLocal {
|
||||
pub fn queue_frame(&mut self, f: HFrame) {
|
||||
/// Add a new frame that needs to be send.
|
||||
pub fn queue_frame(&mut self, f: &HFrame) {
|
||||
let mut enc = Encoder::default();
|
||||
f.encode(&mut enc);
|
||||
self.buf.append(&mut enc.into());
|
||||
}
|
||||
|
||||
/// Send control data if available.
|
||||
pub fn send(&mut self, conn: &mut Connection) -> Res<()> {
|
||||
if let Some(stream_id) = self.stream_id {
|
||||
if !self.buf.is_empty() {
|
||||
|
@ -47,6 +49,7 @@ impl ControlStreamLocal {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a control stream.
|
||||
pub fn create(&mut self, conn: &mut Connection) -> Res<()> {
|
||||
qtrace!([self], "Create a control stream.");
|
||||
self.stream_id = Some(conn.stream_create(StreamType::UniDi)?);
|
||||
|
@ -55,4 +58,9 @@ impl ControlStreamLocal {
|
|||
self.buf.append(&mut enc.into());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn stream_id(&self) -> Option<u64> {
|
||||
self.stream_id
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,9 +9,9 @@ use crate::{Error, Res};
|
|||
use neqo_common::{qdebug, qinfo};
|
||||
use neqo_transport::Connection;
|
||||
|
||||
// The remote control stream is responsible only for reading frames. The frames are handled by Http3Connection
|
||||
/// The remote control stream is responsible only for reading frames. The frames are handled by `Http3Connection`.
|
||||
#[derive(Debug)]
|
||||
pub struct ControlStreamRemote {
|
||||
pub(crate) struct ControlStreamRemote {
|
||||
stream_id: Option<u64>,
|
||||
frame_reader: HFrameReader,
|
||||
fin: bool,
|
||||
|
@ -32,16 +32,18 @@ impl ControlStreamRemote {
|
|||
}
|
||||
}
|
||||
|
||||
/// A remote control stream has been received. Inform `ControlStreamRemote`.
|
||||
pub fn add_remote_stream(&mut self, stream_id: u64) -> Res<()> {
|
||||
qinfo!([self], "A new control stream {}.", stream_id);
|
||||
if self.stream_id.is_some() {
|
||||
qdebug!([self], "A control stream already exists");
|
||||
return Err(Error::HttpStreamCreationError);
|
||||
return Err(Error::HttpStreamCreation);
|
||||
}
|
||||
self.stream_id = Some(stream_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if a stream is the control stream and read received data.
|
||||
pub fn receive_if_this_stream(&mut self, conn: &mut Connection, stream_id: u64) -> Res<bool> {
|
||||
if let Some(id) = self.stream_id {
|
||||
if id == stream_id {
|
||||
|
@ -64,4 +66,9 @@ impl ControlStreamRemote {
|
|||
pub fn get_frame(&mut self) -> Res<HFrame> {
|
||||
self.frame_reader.get_frame()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn stream_id(&self) -> Option<u64> {
|
||||
self.stream_id
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,35 +6,27 @@
|
|||
|
||||
use crate::hsettings_frame::HSettings;
|
||||
use neqo_common::{
|
||||
hex, qdebug, qtrace, Decoder, Encoder, IncrementalDecoder, IncrementalDecoderResult,
|
||||
hex_with_len, qdebug, qtrace, Decoder, Encoder, IncrementalDecoder, IncrementalDecoderResult,
|
||||
};
|
||||
use neqo_transport::Connection;
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::mem;
|
||||
|
||||
use crate::{Error, Res};
|
||||
|
||||
pub type HFrameType = u64;
|
||||
pub(crate) type HFrameType = u64;
|
||||
|
||||
pub const H3_FRAME_TYPE_DATA: HFrameType = 0x0;
|
||||
pub const H3_FRAME_TYPE_HEADERS: HFrameType = 0x1;
|
||||
pub(crate) const H3_FRAME_TYPE_DATA: HFrameType = 0x0;
|
||||
pub(crate) const H3_FRAME_TYPE_HEADERS: HFrameType = 0x1;
|
||||
const H3_FRAME_TYPE_CANCEL_PUSH: HFrameType = 0x3;
|
||||
const H3_FRAME_TYPE_SETTINGS: HFrameType = 0x4;
|
||||
const H3_FRAME_TYPE_PUSH_PROMISE: HFrameType = 0x5;
|
||||
const H3_FRAME_TYPE_GOAWAY: HFrameType = 0x7;
|
||||
const H3_FRAME_TYPE_MAX_PUSH_ID: HFrameType = 0xd;
|
||||
const H3_FRAME_TYPE_DUPLICATE_PUSH: HFrameType = 0xe;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq)]
|
||||
pub enum HStreamType {
|
||||
Control,
|
||||
Request,
|
||||
Push,
|
||||
}
|
||||
|
||||
// data for DATA frame is not read into HFrame::Data.
|
||||
#[derive(PartialEq, Debug)]
|
||||
pub enum HFrame {
|
||||
pub(crate) enum HFrame {
|
||||
Data {
|
||||
len: u64, // length of the data
|
||||
},
|
||||
|
@ -57,9 +49,6 @@ pub enum HFrame {
|
|||
MaxPushId {
|
||||
push_id: u64,
|
||||
},
|
||||
DuplicatePush {
|
||||
push_id: u64,
|
||||
},
|
||||
}
|
||||
|
||||
impl HFrame {
|
||||
|
@ -72,7 +61,6 @@ impl HFrame {
|
|||
Self::PushPromise { .. } => H3_FRAME_TYPE_PUSH_PROMISE,
|
||||
Self::Goaway { .. } => H3_FRAME_TYPE_GOAWAY,
|
||||
Self::MaxPushId { .. } => H3_FRAME_TYPE_MAX_PUSH_ID,
|
||||
Self::DuplicatePush { .. } => H3_FRAME_TYPE_DUPLICATE_PUSH,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -113,24 +101,6 @@ impl HFrame {
|
|||
enc_inner.encode_varint(*push_id);
|
||||
});
|
||||
}
|
||||
Self::DuplicatePush { push_id } => {
|
||||
enc.encode_vvec_with(|enc_inner| {
|
||||
enc_inner.encode_varint(*push_id);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_allowed(&self, s: HStreamType) -> bool {
|
||||
match self {
|
||||
Self::Data { .. } => !(s == HStreamType::Control),
|
||||
Self::Headers { .. } => !(s == HStreamType::Control),
|
||||
Self::CancelPush { .. } => (s == HStreamType::Control),
|
||||
Self::Settings { .. } => (s == HStreamType::Control),
|
||||
Self::PushPromise { .. } => (s == HStreamType::Request),
|
||||
Self::Goaway { .. } => (s == HStreamType::Control),
|
||||
Self::MaxPushId { .. } => (s == HStreamType::Control),
|
||||
Self::DuplicatePush { .. } => (s == HStreamType::Request),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -146,7 +116,7 @@ enum HFrameReaderState {
|
|||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct HFrameReader {
|
||||
pub(crate) struct HFrameReader {
|
||||
state: HFrameReaderState,
|
||||
decoder: IncrementalDecoder,
|
||||
hframe_type: u64,
|
||||
|
@ -161,6 +131,7 @@ impl Default for HFrameReader {
|
|||
}
|
||||
|
||||
impl HFrameReader {
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
state: HFrameReaderState::BeforeFrame,
|
||||
|
@ -176,18 +147,21 @@ impl HFrameReader {
|
|||
self.decoder = IncrementalDecoder::decode_varint();
|
||||
}
|
||||
|
||||
// returns true if quic stream was closed.
|
||||
#[allow(clippy::too_many_lines)]
|
||||
/// returns true if quic stream was closed.
|
||||
/// # Errors
|
||||
/// returns an error if frame is not complete.
|
||||
pub fn receive(&mut self, conn: &mut Connection, stream_id: u64) -> Res<bool> {
|
||||
loop {
|
||||
let to_read = std::cmp::min(self.decoder.min_remaining(), 4096);
|
||||
let mut buf = vec![0; to_read];
|
||||
let fin;
|
||||
let mut input = match conn.stream_recv(stream_id, &mut buf[..]) {
|
||||
let mut input = match conn.stream_recv(stream_id, &mut buf) {
|
||||
Ok((0, true)) => {
|
||||
qtrace!([conn], "HFrameReader::receive: stream has been closed");
|
||||
break match self.state {
|
||||
HFrameReaderState::BeforeFrame => Ok(true),
|
||||
_ => Err(Error::HttpFrameError),
|
||||
_ => Err(Error::HttpFrame),
|
||||
};
|
||||
}
|
||||
Ok((0, false)) => break Ok(false),
|
||||
|
@ -246,13 +220,14 @@ impl HFrameReader {
|
|||
| H3_FRAME_TYPE_SETTINGS
|
||||
| H3_FRAME_TYPE_GOAWAY
|
||||
| H3_FRAME_TYPE_MAX_PUSH_ID
|
||||
| H3_FRAME_TYPE_DUPLICATE_PUSH
|
||||
| H3_FRAME_TYPE_PUSH_PROMISE
|
||||
| H3_FRAME_TYPE_HEADERS => {
|
||||
if len == 0 {
|
||||
HFrameReaderState::Done
|
||||
} else {
|
||||
self.decoder = IncrementalDecoder::decode(len as usize);
|
||||
self.decoder = IncrementalDecoder::decode(
|
||||
usize::try_from(len).or(Err(Error::HttpFrame))?,
|
||||
);
|
||||
HFrameReaderState::GetData
|
||||
}
|
||||
}
|
||||
|
@ -261,7 +236,9 @@ impl HFrameReader {
|
|||
self.decoder = IncrementalDecoder::decode_varint();
|
||||
HFrameReaderState::BeforeFrame
|
||||
} else {
|
||||
self.decoder = IncrementalDecoder::ignore(len as usize);
|
||||
self.decoder = IncrementalDecoder::ignore(
|
||||
usize::try_from(len).or(Err(Error::HttpFrame))?,
|
||||
);
|
||||
HFrameReaderState::UnknownFrameDischargeData
|
||||
}
|
||||
}
|
||||
|
@ -278,7 +255,7 @@ impl HFrameReader {
|
|||
[conn],
|
||||
"received frame {}: {}",
|
||||
self.hframe_type,
|
||||
hex(&data[..])
|
||||
hex_with_len(&data[..])
|
||||
);
|
||||
self.payload = data;
|
||||
self.state = HFrameReaderState::Done;
|
||||
|
@ -309,16 +286,19 @@ impl HFrameReader {
|
|||
if self.state == HFrameReaderState::BeforeFrame {
|
||||
break Ok(fin);
|
||||
} else {
|
||||
break Err(Error::HttpFrameError);
|
||||
break Err(Error::HttpFrame);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn done(&self) -> bool {
|
||||
self.state == HFrameReaderState::Done
|
||||
}
|
||||
|
||||
/// # Errors
|
||||
/// May return `NotEnoughData` if frame is not completely read.
|
||||
pub fn get_frame(&mut self) -> Res<HFrame> {
|
||||
if self.state != HFrameReaderState::Done {
|
||||
return Err(Error::NotEnoughData);
|
||||
|
@ -366,12 +346,6 @@ impl HFrameReader {
|
|||
_ => return Err(Error::NotEnoughData),
|
||||
},
|
||||
},
|
||||
H3_FRAME_TYPE_DUPLICATE_PUSH => HFrame::DuplicatePush {
|
||||
push_id: match dec.decode_varint() {
|
||||
Some(v) => v,
|
||||
_ => return Err(Error::NotEnoughData),
|
||||
},
|
||||
},
|
||||
_ => panic!("We should not be in state Done with unknown frame type!"),
|
||||
};
|
||||
self.reset();
|
||||
|
@ -381,12 +355,12 @@ impl HFrameReader {
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use super::{Encoder, Error, HFrame, HFrameReader, HSettings};
|
||||
use crate::hsettings_frame::{HSetting, HSettingType};
|
||||
use neqo_crypto::AuthenticationStatus;
|
||||
use neqo_transport::StreamType;
|
||||
use num_traits::Num;
|
||||
use test_fixture::*;
|
||||
use test_fixture::{connect, default_client, default_server, now};
|
||||
|
||||
#[allow(clippy::many_single_char_names)]
|
||||
fn enc_dec(f: &HFrame, st: &str, remaining: usize) {
|
||||
|
@ -430,7 +404,7 @@ mod tests {
|
|||
assert_eq!(Ok(false), fr.receive(&mut conn_c, stream_id));
|
||||
|
||||
// Check remaining data.
|
||||
let mut buf = [0u8; 100];
|
||||
let mut buf = [0_u8; 100];
|
||||
let (amount, _) = conn_c.stream_recv(stream_id, &mut buf).unwrap();
|
||||
assert_eq!(amount, remaining);
|
||||
|
||||
|
@ -491,12 +465,6 @@ mod tests {
|
|||
enc_dec(&f, "0d0105", 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_duplicate_push_frame4() {
|
||||
let f = HFrame::DuplicatePush { push_id: 5 };
|
||||
enc_dec(&f, "0e0105", 0);
|
||||
}
|
||||
|
||||
// We have 2 code paths in frame_reader:
|
||||
// 1) All frames except DATA (here we test SETTING and SETTINGS with larger varints and PUSH_PROMISE)
|
||||
// 1) DATA
|
||||
|
@ -655,7 +623,7 @@ mod tests {
|
|||
|
||||
// headers are still on the stream.
|
||||
// assert that we do not have any more date on the stream
|
||||
let mut buf = [0u8; 100];
|
||||
let mut buf = [0_u8; 100];
|
||||
let (amount, _) = conn_c.stream_recv(stream_id, &mut buf).unwrap();
|
||||
assert_eq!(amount, 0);
|
||||
|
||||
|
@ -694,7 +662,7 @@ mod tests {
|
|||
|
||||
// payloead is still on the stream.
|
||||
// assert that we have 3 bytes in the stream
|
||||
let mut buf = [0u8; 100];
|
||||
let mut buf = [0_u8; 100];
|
||||
let (amount, _) = conn_c.stream_recv(stream_id, &mut buf).unwrap();
|
||||
assert_eq!(amount, 3);
|
||||
|
||||
|
@ -711,6 +679,9 @@ mod tests {
|
|||
// Test an unknown frame
|
||||
#[test]
|
||||
fn test_unknown_frame() {
|
||||
// Construct an unknown frame.
|
||||
const UNKNOWN_FRAME_LEN: usize = 832;
|
||||
|
||||
let (mut conn_c, mut conn_s) = connect();
|
||||
|
||||
// create a stream
|
||||
|
@ -718,10 +689,8 @@ mod tests {
|
|||
|
||||
let mut fr: HFrameReader = HFrameReader::new();
|
||||
|
||||
// Construct an unknown frame.
|
||||
const UNKNOWN_FRAME_LEN: usize = 832;
|
||||
let mut enc = Encoder::with_capacity(UNKNOWN_FRAME_LEN + 4);
|
||||
enc.encode_varint(1028u64); // Arbitrary type.
|
||||
enc.encode_varint(1028_u64); // Arbitrary type.
|
||||
enc.encode_varint(UNKNOWN_FRAME_LEN as u64);
|
||||
let mut buf: Vec<_> = enc.into();
|
||||
buf.resize(UNKNOWN_FRAME_LEN + buf.len(), 0);
|
||||
|
@ -762,8 +731,8 @@ mod tests {
|
|||
|
||||
fn test_reading_frame(
|
||||
buf: &[u8],
|
||||
test_to_send: FrameReadingTestSend,
|
||||
expected_result: FrameReadingTestExpect,
|
||||
test_to_send: &FrameReadingTestSend,
|
||||
expected_result: &FrameReadingTestExpect,
|
||||
) {
|
||||
let (mut conn_c, mut conn_s) = connect();
|
||||
|
||||
|
@ -788,7 +757,7 @@ mod tests {
|
|||
let rv = fr.receive(&mut conn_c, stream_id);
|
||||
|
||||
match expected_result {
|
||||
FrameReadingTestExpect::Error => assert_eq!(Err(Error::HttpFrameError), rv),
|
||||
FrameReadingTestExpect::Error => assert_eq!(Err(Error::HttpFrame), rv),
|
||||
FrameReadingTestExpect::Incomplete => {
|
||||
assert_eq!(Ok(false), rv);
|
||||
assert_eq!(false, fr.done());
|
||||
|
@ -813,7 +782,7 @@ mod tests {
|
|||
// Construct an unknown frame.
|
||||
const UNKNOWN_FRAME_LEN: usize = 832;
|
||||
let mut enc = Encoder::with_capacity(UNKNOWN_FRAME_LEN + 4);
|
||||
enc.encode_varint(1028u64); // Arbitrary type.
|
||||
enc.encode_varint(1028_u64); // Arbitrary type.
|
||||
enc.encode_varint(UNKNOWN_FRAME_LEN as u64);
|
||||
let mut buf: Vec<_> = enc.into();
|
||||
buf.resize(UNKNOWN_FRAME_LEN + buf.len(), 0);
|
||||
|
@ -822,34 +791,34 @@ mod tests {
|
|||
for i in 1..len {
|
||||
test_reading_frame(
|
||||
&buf[..i],
|
||||
FrameReadingTestSend::OnlyData,
|
||||
FrameReadingTestExpect::Incomplete,
|
||||
&FrameReadingTestSend::OnlyData,
|
||||
&FrameReadingTestExpect::Incomplete,
|
||||
);
|
||||
test_reading_frame(
|
||||
&buf[..i],
|
||||
FrameReadingTestSend::DataWithFin,
|
||||
FrameReadingTestExpect::Error,
|
||||
&FrameReadingTestSend::DataWithFin,
|
||||
&FrameReadingTestExpect::Error,
|
||||
);
|
||||
test_reading_frame(
|
||||
&buf[..i],
|
||||
FrameReadingTestSend::DataThenFin,
|
||||
FrameReadingTestExpect::Error,
|
||||
&FrameReadingTestSend::DataThenFin,
|
||||
&FrameReadingTestExpect::Error,
|
||||
);
|
||||
}
|
||||
test_reading_frame(
|
||||
&buf,
|
||||
FrameReadingTestSend::OnlyData,
|
||||
FrameReadingTestExpect::Incomplete,
|
||||
&FrameReadingTestSend::OnlyData,
|
||||
&FrameReadingTestExpect::Incomplete,
|
||||
);
|
||||
test_reading_frame(
|
||||
&buf,
|
||||
FrameReadingTestSend::DataWithFin,
|
||||
FrameReadingTestExpect::StreamDoneWithoutFrame,
|
||||
&FrameReadingTestSend::DataWithFin,
|
||||
&FrameReadingTestExpect::StreamDoneWithoutFrame,
|
||||
);
|
||||
test_reading_frame(
|
||||
&buf,
|
||||
FrameReadingTestSend::DataThenFin,
|
||||
FrameReadingTestExpect::StreamDoneWithoutFrame,
|
||||
&FrameReadingTestSend::DataThenFin,
|
||||
&FrameReadingTestExpect::StreamDoneWithoutFrame,
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -863,53 +832,53 @@ mod tests {
|
|||
for i in 1..len {
|
||||
test_reading_frame(
|
||||
&buf[..i],
|
||||
FrameReadingTestSend::OnlyData,
|
||||
&FrameReadingTestSend::OnlyData,
|
||||
if i >= done_state {
|
||||
FrameReadingTestExpect::FrameComplete
|
||||
&FrameReadingTestExpect::FrameComplete
|
||||
} else {
|
||||
FrameReadingTestExpect::Incomplete
|
||||
&FrameReadingTestExpect::Incomplete
|
||||
},
|
||||
);
|
||||
test_reading_frame(
|
||||
&buf[..i],
|
||||
FrameReadingTestSend::DataWithFin,
|
||||
&FrameReadingTestSend::DataWithFin,
|
||||
match i.cmp(&done_state) {
|
||||
Ordering::Greater => FrameReadingTestExpect::FrameComplete,
|
||||
Ordering::Equal => FrameReadingTestExpect::FrameAndStreamComplete,
|
||||
Ordering::Less => FrameReadingTestExpect::Error,
|
||||
Ordering::Greater => &FrameReadingTestExpect::FrameComplete,
|
||||
Ordering::Equal => &FrameReadingTestExpect::FrameAndStreamComplete,
|
||||
Ordering::Less => &FrameReadingTestExpect::Error,
|
||||
},
|
||||
);
|
||||
test_reading_frame(
|
||||
&buf[..i],
|
||||
FrameReadingTestSend::DataThenFin,
|
||||
&FrameReadingTestSend::DataThenFin,
|
||||
match i.cmp(&done_state) {
|
||||
Ordering::Greater => FrameReadingTestExpect::FrameComplete,
|
||||
Ordering::Equal => FrameReadingTestExpect::FrameAndStreamComplete,
|
||||
Ordering::Less => FrameReadingTestExpect::Error,
|
||||
Ordering::Greater => &FrameReadingTestExpect::FrameComplete,
|
||||
Ordering::Equal => &FrameReadingTestExpect::FrameAndStreamComplete,
|
||||
Ordering::Less => &FrameReadingTestExpect::Error,
|
||||
},
|
||||
);
|
||||
}
|
||||
test_reading_frame(
|
||||
buf,
|
||||
FrameReadingTestSend::OnlyData,
|
||||
FrameReadingTestExpect::FrameComplete,
|
||||
&FrameReadingTestSend::OnlyData,
|
||||
&FrameReadingTestExpect::FrameComplete,
|
||||
);
|
||||
test_reading_frame(
|
||||
buf,
|
||||
FrameReadingTestSend::DataWithFin,
|
||||
&FrameReadingTestSend::DataWithFin,
|
||||
if buf.len() == done_state {
|
||||
FrameReadingTestExpect::FrameAndStreamComplete
|
||||
&FrameReadingTestExpect::FrameAndStreamComplete
|
||||
} else {
|
||||
FrameReadingTestExpect::FrameComplete
|
||||
&FrameReadingTestExpect::FrameComplete
|
||||
},
|
||||
);
|
||||
test_reading_frame(
|
||||
buf,
|
||||
FrameReadingTestSend::DataThenFin,
|
||||
&FrameReadingTestSend::DataThenFin,
|
||||
if buf.len() == done_state {
|
||||
FrameReadingTestExpect::FrameAndStreamComplete
|
||||
&FrameReadingTestExpect::FrameAndStreamComplete
|
||||
} else {
|
||||
FrameReadingTestExpect::FrameComplete
|
||||
&FrameReadingTestExpect::FrameComplete
|
||||
},
|
||||
);
|
||||
}
|
||||
|
@ -993,13 +962,6 @@ mod tests {
|
|||
f.encode(&mut enc);
|
||||
let buf: Vec<_> = enc.into();
|
||||
test_complete_and_incomplete_frame(&buf, buf.len());
|
||||
|
||||
// H3_FRAME_TYPE_DUPLICATE_PUSH
|
||||
let f = HFrame::DuplicatePush { push_id: 5 };
|
||||
let mut enc = Encoder::default();
|
||||
f.encode(&mut enc);
|
||||
let buf: Vec<_> = enc.into();
|
||||
test_complete_and_incomplete_frame(&buf, buf.len());
|
||||
}
|
||||
|
||||
// Test closing a stream before any frame is sent should not cause an error.
|
||||
|
|
|
@ -15,7 +15,7 @@ const SETTINGS_QPACK_MAX_TABLE_CAPACITY: SettingsType = 0x1;
|
|||
const SETTINGS_QPACK_BLOCKED_STREAMS: SettingsType = 0x7;
|
||||
|
||||
#[derive(Clone, PartialEq, Debug, Copy)]
|
||||
pub enum HSettingType {
|
||||
pub(crate) enum HSettingType {
|
||||
MaxHeaderListSize,
|
||||
MaxTableCapacity,
|
||||
BlockedStreams,
|
||||
|
@ -24,13 +24,12 @@ pub enum HSettingType {
|
|||
fn hsetting_default(setting_type: HSettingType) -> u64 {
|
||||
match setting_type {
|
||||
HSettingType::MaxHeaderListSize => 1 << 62,
|
||||
HSettingType::MaxTableCapacity => 0,
|
||||
HSettingType::BlockedStreams => 0,
|
||||
HSettingType::MaxTableCapacity | HSettingType::BlockedStreams => 0,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct HSetting {
|
||||
pub(crate) struct HSetting {
|
||||
pub setting_type: HSettingType,
|
||||
pub value: u64,
|
||||
}
|
||||
|
@ -45,7 +44,7 @@ impl HSetting {
|
|||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq)]
|
||||
pub struct HSettings {
|
||||
pub(crate) struct HSettings {
|
||||
settings: Vec<HSetting>,
|
||||
}
|
||||
|
||||
|
@ -65,7 +64,7 @@ impl HSettings {
|
|||
|
||||
pub fn encode_frame_contents(&self, enc: &mut Encoder) {
|
||||
enc.encode_vvec_with(|enc_inner| {
|
||||
for iter in self.settings.iter() {
|
||||
for iter in &self.settings {
|
||||
match iter.setting_type {
|
||||
HSettingType::MaxHeaderListSize => {
|
||||
enc_inner.encode_varint(SETTINGS_MAX_HEADER_LIST_SIZE as u64);
|
||||
|
|
|
@ -5,7 +5,8 @@
|
|||
// except according to those terms.
|
||||
|
||||
#![cfg_attr(feature = "deny-warnings", deny(warnings))]
|
||||
#![warn(clippy::use_self)]
|
||||
#![warn(clippy::pedantic)]
|
||||
#![allow(clippy::pub_enum_variant_names)]
|
||||
|
||||
mod client_events;
|
||||
mod connection;
|
||||
|
@ -15,14 +16,13 @@ mod control_stream_local;
|
|||
mod control_stream_remote;
|
||||
pub mod hframe;
|
||||
mod hsettings_frame;
|
||||
mod response_stream;
|
||||
mod push_controller;
|
||||
mod recv_message;
|
||||
mod send_message;
|
||||
pub mod server;
|
||||
mod server_connection_events;
|
||||
mod server_events;
|
||||
mod stream_type_reader;
|
||||
mod transaction_client;
|
||||
pub mod transaction_server;
|
||||
//pub mod server;
|
||||
|
||||
use neqo_qpack::Error as QpackError;
|
||||
pub use neqo_transport::Output;
|
||||
|
@ -34,62 +34,64 @@ pub use connection_client::Http3Client;
|
|||
pub use neqo_qpack::Header;
|
||||
pub use server::Http3Server;
|
||||
pub use server_events::Http3ServerEvent;
|
||||
pub use transaction_server::TransactionServer;
|
||||
|
||||
type Res<T> = Result<T, Error>;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
HttpNoError,
|
||||
HttpGeneralProtocolError,
|
||||
HttpInternalError,
|
||||
HttpStreamCreationError,
|
||||
HttpGeneralProtocol,
|
||||
HttpInternal,
|
||||
HttpStreamCreation,
|
||||
HttpClosedCriticalStream,
|
||||
HttpFrameUnexpected,
|
||||
HttpFrameError,
|
||||
HttpFrame,
|
||||
HttpExcessiveLoad,
|
||||
HttpIdError,
|
||||
HttpSettingsError,
|
||||
HttpId,
|
||||
HttpSettings,
|
||||
HttpMissingSettings,
|
||||
HttpRequestRejected,
|
||||
HttpRequestCancelled,
|
||||
HttpRequestIncomplete,
|
||||
HttpEarlyResponse,
|
||||
HttpConnectError,
|
||||
HttpConnect,
|
||||
HttpVersionFallback,
|
||||
QpackError(neqo_qpack::Error),
|
||||
|
||||
// Internal errors from here.
|
||||
AlreadyClosed,
|
||||
AlreadyInitialized,
|
||||
DecodingFrame,
|
||||
HttpGoaway,
|
||||
Internal,
|
||||
InvalidResumptionToken,
|
||||
InvalidStreamId,
|
||||
InvalidState,
|
||||
NoMoreData,
|
||||
NotEnoughData,
|
||||
TransportError(TransportError),
|
||||
Unavailable,
|
||||
Unexpected,
|
||||
InvalidResumptionToken,
|
||||
}
|
||||
|
||||
impl Error {
|
||||
#[must_use]
|
||||
pub fn code(&self) -> AppError {
|
||||
match self {
|
||||
Self::HttpNoError => 0x100,
|
||||
Self::HttpGeneralProtocolError => 0x101,
|
||||
Self::HttpInternalError => 0x102,
|
||||
Self::HttpStreamCreationError => 0x103,
|
||||
Self::HttpGeneralProtocol => 0x101,
|
||||
Self::HttpInternal => 0x102,
|
||||
Self::HttpStreamCreation => 0x103,
|
||||
Self::HttpClosedCriticalStream => 0x104,
|
||||
Self::HttpFrameUnexpected => 0x105,
|
||||
Self::HttpFrameError => 0x106,
|
||||
Self::HttpFrame => 0x106,
|
||||
Self::HttpExcessiveLoad => 0x107,
|
||||
Self::HttpIdError => 0x108,
|
||||
Self::HttpSettingsError => 0x109,
|
||||
Self::HttpId => 0x108,
|
||||
Self::HttpSettings => 0x109,
|
||||
Self::HttpMissingSettings => 0x10a,
|
||||
Self::HttpRequestRejected => 0x10b,
|
||||
Self::HttpRequestCancelled => 0x10c,
|
||||
Self::HttpRequestIncomplete => 0x10d,
|
||||
Self::HttpEarlyResponse => 0x10e,
|
||||
Self::HttpConnectError => 0x10f,
|
||||
Self::HttpConnect => 0x10f,
|
||||
Self::HttpVersionFallback => 0x110,
|
||||
Self::QpackError(e) => e.code(),
|
||||
// These are all internal errors.
|
||||
|
@ -114,26 +116,24 @@ impl From<AppError> for Error {
|
|||
fn from(error: AppError) -> Self {
|
||||
match error {
|
||||
0x100 => Self::HttpNoError,
|
||||
0x101 => Self::HttpGeneralProtocolError,
|
||||
0x102 => Self::HttpInternalError,
|
||||
0x103 => Self::HttpStreamCreationError,
|
||||
0x101 => Self::HttpGeneralProtocol,
|
||||
0x103 => Self::HttpStreamCreation,
|
||||
0x104 => Self::HttpClosedCriticalStream,
|
||||
0x105 => Self::HttpFrameUnexpected,
|
||||
0x106 => Self::HttpFrameError,
|
||||
0x106 => Self::HttpFrame,
|
||||
0x107 => Self::HttpExcessiveLoad,
|
||||
0x108 => Self::HttpIdError,
|
||||
0x109 => Self::HttpSettingsError,
|
||||
0x108 => Self::HttpId,
|
||||
0x109 => Self::HttpSettings,
|
||||
0x10a => Self::HttpMissingSettings,
|
||||
0x10b => Self::HttpRequestRejected,
|
||||
0x10c => Self::HttpRequestCancelled,
|
||||
0x10d => Self::HttpRequestIncomplete,
|
||||
0x10e => Self::HttpEarlyResponse,
|
||||
0x10f => Self::HttpConnectError,
|
||||
0x10f => Self::HttpConnect,
|
||||
0x110 => Self::HttpVersionFallback,
|
||||
0x200 => Self::QpackError(QpackError::DecompressionFailed),
|
||||
0x201 => Self::QpackError(QpackError::EncoderStream),
|
||||
0x202 => Self::QpackError(QpackError::DecoderStream),
|
||||
_ => Self::HttpInternalError,
|
||||
_ => Self::HttpInternal,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use crate::{Error, Res};
|
||||
use neqo_common::qtrace;
|
||||
use std::fmt::Debug;
|
||||
use std::fmt::Display;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct PushController {}
|
||||
|
||||
impl PushController {
|
||||
pub fn new() -> Self {
|
||||
PushController {}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for PushController {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
write!(f, "Push controler")
|
||||
}
|
||||
}
|
||||
|
||||
impl PushController {
|
||||
#[allow(clippy::needless_pass_by_value)]
|
||||
pub fn new_push_promise(&self, push_id: u64, header_block: Vec<u8>) -> Res<()> {
|
||||
qtrace!(
|
||||
[self],
|
||||
"New push promise push_id={} header_block={:?}",
|
||||
push_id,
|
||||
header_block
|
||||
);
|
||||
qtrace!("A new push promise {} {:?}", push_id, header_block);
|
||||
Err(Error::HttpId)
|
||||
}
|
||||
}
|
|
@ -4,14 +4,22 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use crate::client_events::Http3ClientEvents;
|
||||
use crate::hframe::{HFrame, HFrameReader};
|
||||
use crate::push_controller::PushController;
|
||||
use crate::{Error, Header, Res};
|
||||
use neqo_common::{matches, qdebug, qinfo, qtrace};
|
||||
use neqo_qpack::decoder::QPackDecoder;
|
||||
use neqo_transport::Connection;
|
||||
use std::cell::RefCell;
|
||||
use std::cmp::min;
|
||||
use std::mem;
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt::Debug;
|
||||
use std::rc::Rc;
|
||||
|
||||
pub(crate) trait RecvMessageEvents: Debug {
|
||||
fn header_ready(&self, stream_id: u64, headers: Option<Vec<Header>>, fin: bool);
|
||||
fn data_readable(&self, stream_id: u64);
|
||||
}
|
||||
|
||||
/*
|
||||
* Response stream state:
|
||||
|
@ -30,7 +38,7 @@ use std::mem;
|
|||
* Closed
|
||||
*/
|
||||
#[derive(PartialEq, Debug)]
|
||||
enum ResponseStreamState {
|
||||
enum RecvMessageState {
|
||||
WaitingForResponseHeaders,
|
||||
DecodingHeaders { header_block: Vec<u8>, fin: bool },
|
||||
WaitingForData,
|
||||
|
@ -40,53 +48,50 @@ enum ResponseStreamState {
|
|||
Closed,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum ResponseHeadersState {
|
||||
NoHeaders,
|
||||
Ready(Option<Vec<Header>>),
|
||||
Read,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct ResponseStream {
|
||||
state: ResponseStreamState,
|
||||
pub(crate) struct RecvMessage {
|
||||
state: RecvMessageState,
|
||||
frame_reader: HFrameReader,
|
||||
response_headers_state: ResponseHeadersState,
|
||||
conn_events: Http3ClientEvents,
|
||||
conn_events: Box<dyn RecvMessageEvents>,
|
||||
push_handler: Option<Rc<RefCell<PushController>>>,
|
||||
stream_id: u64,
|
||||
}
|
||||
|
||||
impl ::std::fmt::Display for ResponseStream {
|
||||
impl ::std::fmt::Display for RecvMessage {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
write!(f, "ResponseStream stream_id:{}", self.stream_id)
|
||||
write!(f, "RecvMessage stream_id:{}", self.stream_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl ResponseStream {
|
||||
pub fn new(stream_id: u64, conn_events: Http3ClientEvents) -> Self {
|
||||
impl RecvMessage {
|
||||
pub fn new(
|
||||
stream_id: u64,
|
||||
conn_events: Box<dyn RecvMessageEvents>,
|
||||
push_handler: Option<Rc<RefCell<PushController>>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
state: ResponseStreamState::WaitingForResponseHeaders,
|
||||
state: RecvMessageState::WaitingForResponseHeaders,
|
||||
frame_reader: HFrameReader::new(),
|
||||
response_headers_state: ResponseHeadersState::NoHeaders,
|
||||
conn_events,
|
||||
push_handler,
|
||||
stream_id,
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_headers_frame(&mut self, header_block: Vec<u8>, fin: bool) -> Res<()> {
|
||||
match self.state {
|
||||
ResponseStreamState::WaitingForResponseHeaders => {
|
||||
RecvMessageState::WaitingForResponseHeaders => {
|
||||
if header_block.is_empty() {
|
||||
self.add_headers(None)?;
|
||||
self.add_headers(None, fin);
|
||||
} else {
|
||||
self.state = ResponseStreamState::DecodingHeaders { header_block, fin };
|
||||
self.state = RecvMessageState::DecodingHeaders { header_block, fin };
|
||||
}
|
||||
}
|
||||
ResponseStreamState::WaitingForData => {
|
||||
RecvMessageState::WaitingForData => {
|
||||
// TODO implement trailers, for now just ignore them.
|
||||
self.state = ResponseStreamState::WaitingForFinAfterTrailers;
|
||||
self.state = RecvMessageState::WaitingForFinAfterTrailers;
|
||||
}
|
||||
ResponseStreamState::WaitingForFinAfterTrailers => {
|
||||
RecvMessageState::WaitingForFinAfterTrailers => {
|
||||
return Err(Error::HttpFrameUnexpected);
|
||||
}
|
||||
_ => unreachable!("This functions is only called in WaitingForResponseHeaders | WaitingForData | WaitingForFinAfterTrailers state.")
|
||||
|
@ -96,16 +101,16 @@ impl ResponseStream {
|
|||
|
||||
fn handle_data_frame(&mut self, len: u64, fin: bool) -> Res<()> {
|
||||
match self.state {
|
||||
ResponseStreamState::WaitingForResponseHeaders | ResponseStreamState::WaitingForFinAfterTrailers => {
|
||||
RecvMessageState::WaitingForResponseHeaders | RecvMessageState::WaitingForFinAfterTrailers => {
|
||||
return Err(Error::HttpFrameUnexpected);
|
||||
}
|
||||
ResponseStreamState::WaitingForData => {
|
||||
RecvMessageState::WaitingForData => {
|
||||
if len > 0 {
|
||||
if fin {
|
||||
return Err(Error::HttpFrameError);
|
||||
return Err(Error::HttpFrame);
|
||||
}
|
||||
self.state = ResponseStreamState::ReadingData {
|
||||
remaining_data_len: len as usize,
|
||||
self.state = RecvMessageState::ReadingData {
|
||||
remaining_data_len: usize::try_from(len).or(Err(Error::HttpFrame))?,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -114,18 +119,15 @@ impl ResponseStream {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn add_headers(&mut self, headers: Option<Vec<Header>>) -> Res<()> {
|
||||
if self.response_headers_state != ResponseHeadersState::NoHeaders {
|
||||
debug_assert!(
|
||||
false,
|
||||
"self.response_headers_state must be in state ResponseHeadersState::NoHeaders."
|
||||
);
|
||||
return Err(Error::HttpInternalError);
|
||||
fn add_headers(&mut self, headers: Option<Vec<Header>>, fin: bool) {
|
||||
if fin {
|
||||
self.conn_events.header_ready(self.stream_id, headers, true);
|
||||
self.state = RecvMessageState::Closed;
|
||||
} else {
|
||||
self.conn_events
|
||||
.header_ready(self.stream_id, headers, false);
|
||||
self.state = RecvMessageState::WaitingForData;
|
||||
}
|
||||
self.response_headers_state = ResponseHeadersState::Ready(headers);
|
||||
self.conn_events.header_ready(self.stream_id);
|
||||
self.state = ResponseStreamState::WaitingForData;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_state_to_close_pending(&mut self) {
|
||||
|
@ -133,101 +135,92 @@ impl ResponseStream {
|
|||
// or data_readable event so that app can pick up the fin.
|
||||
qtrace!(
|
||||
[self],
|
||||
"set_state_to_close_pending: response_headers_state={:?}",
|
||||
self.response_headers_state
|
||||
"set_state_to_close_pending: state={:?}",
|
||||
self.state
|
||||
);
|
||||
match self.response_headers_state {
|
||||
ResponseHeadersState::NoHeaders => {
|
||||
self.conn_events.header_ready(self.stream_id);
|
||||
self.response_headers_state = ResponseHeadersState::Ready(None);
|
||||
|
||||
match self.state {
|
||||
RecvMessageState::WaitingForResponseHeaders => {
|
||||
self.conn_events.header_ready(self.stream_id, None, true);
|
||||
self.state = RecvMessageState::Closed;
|
||||
}
|
||||
// In Ready state we are already waiting for app to pick up headers
|
||||
// it can also pick up fin, so we do not need a new event.
|
||||
ResponseHeadersState::Ready(..) => {}
|
||||
ResponseHeadersState::Read => self.conn_events.data_readable(self.stream_id),
|
||||
RecvMessageState::ReadingData { .. } => {}
|
||||
RecvMessageState::WaitingForData | RecvMessageState::WaitingForFinAfterTrailers => {
|
||||
self.conn_events.data_readable(self.stream_id)
|
||||
}
|
||||
_ => unreachable!("Closing an already closed transaction."),
|
||||
}
|
||||
if !matches!(self.state, RecvMessageState::Closed) {
|
||||
self.state = RecvMessageState::ClosePending;
|
||||
}
|
||||
self.state = ResponseStreamState::ClosePending;
|
||||
}
|
||||
|
||||
fn recv_frame(&mut self, conn: &mut Connection) -> Res<(Option<HFrame>, bool)> {
|
||||
qtrace!([self], "receiving frame header");
|
||||
let fin = self.frame_reader.receive(conn, self.stream_id)?;
|
||||
if !self.frame_reader.done() {
|
||||
Ok((None, fin))
|
||||
} else {
|
||||
if self.frame_reader.done() {
|
||||
qdebug!([self], "A new frame has been received.");
|
||||
Ok((Some(self.frame_reader.get_frame()?), fin))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_response_headers(&mut self) -> Res<(Vec<Header>, bool)> {
|
||||
if let ResponseHeadersState::Ready(ref mut headers) = self.response_headers_state {
|
||||
let hdrs = if let Some(ref mut hdrs) = headers {
|
||||
mem::replace(hdrs, Vec::new())
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
self.response_headers_state = ResponseHeadersState::Read;
|
||||
|
||||
let fin = if self.state == ResponseStreamState::ClosePending {
|
||||
self.state = ResponseStreamState::Closed;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
};
|
||||
Ok((hdrs, fin))
|
||||
} else {
|
||||
Err(Error::Unavailable)
|
||||
Ok((None, fin))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_response_data(
|
||||
pub fn read_data(
|
||||
&mut self,
|
||||
conn: &mut Connection,
|
||||
decoder: &mut QPackDecoder,
|
||||
buf: &mut [u8],
|
||||
) -> Res<(usize, bool)> {
|
||||
match self.state {
|
||||
ResponseStreamState::ReadingData {
|
||||
ref mut remaining_data_len,
|
||||
} => {
|
||||
let to_read = min(*remaining_data_len, buf.len());
|
||||
let (amount, fin) = conn.stream_recv(self.stream_id, &mut buf[..to_read])?;
|
||||
debug_assert!(amount <= to_read);
|
||||
*remaining_data_len -= amount;
|
||||
let mut written = 0;
|
||||
loop {
|
||||
match self.state {
|
||||
RecvMessageState::ReadingData {
|
||||
ref mut remaining_data_len,
|
||||
} => {
|
||||
let to_read = min(*remaining_data_len, buf.len() - written);
|
||||
let (amount, fin) =
|
||||
conn.stream_recv(self.stream_id, &mut buf[written..written + to_read])?;
|
||||
debug_assert!(amount <= to_read);
|
||||
*remaining_data_len -= amount;
|
||||
written += amount;
|
||||
|
||||
if fin {
|
||||
if *remaining_data_len > 0 {
|
||||
return Err(Error::HttpFrameError);
|
||||
if fin {
|
||||
if *remaining_data_len > 0 {
|
||||
return Err(Error::HttpFrame);
|
||||
}
|
||||
self.state = RecvMessageState::Closed;
|
||||
break Ok((written, fin));
|
||||
} else if *remaining_data_len == 0 {
|
||||
self.state = RecvMessageState::WaitingForData;
|
||||
self.receive_internal(conn, decoder, false)?;
|
||||
} else {
|
||||
break Ok((written, false));
|
||||
}
|
||||
self.state = ResponseStreamState::Closed;
|
||||
} else if *remaining_data_len == 0 {
|
||||
self.state = ResponseStreamState::WaitingForData;
|
||||
}
|
||||
|
||||
Ok((amount, fin))
|
||||
RecvMessageState::ClosePending => {
|
||||
self.state = RecvMessageState::Closed;
|
||||
break Ok((written, true));
|
||||
}
|
||||
_ => break Ok((written, false)),
|
||||
}
|
||||
ResponseStreamState::ClosePending => {
|
||||
self.state = ResponseStreamState::Closed;
|
||||
Ok((0, true))
|
||||
}
|
||||
_ => Ok((0, false)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn receive(&mut self, conn: &mut Connection, decoder: &mut QPackDecoder) -> Res<()> {
|
||||
let label = if ::log::log_enabled!(::log::Level::Debug) {
|
||||
format!("{}", self)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
fn receive_internal(
|
||||
&mut self,
|
||||
conn: &mut Connection,
|
||||
decoder: &mut QPackDecoder,
|
||||
post_readable_event: bool,
|
||||
) -> Res<()> {
|
||||
let label = ::neqo_common::log_subject!(::log::Level::Debug, self);
|
||||
loop {
|
||||
qdebug!([label], "state={:?}.", self.state);
|
||||
match self.state {
|
||||
// In the following 3 states we need to read frames.
|
||||
ResponseStreamState::WaitingForResponseHeaders
|
||||
| ResponseStreamState::WaitingForData
|
||||
| ResponseStreamState::WaitingForFinAfterTrailers => {
|
||||
RecvMessageState::WaitingForResponseHeaders
|
||||
| RecvMessageState::WaitingForData
|
||||
| RecvMessageState::WaitingForFinAfterTrailers => {
|
||||
match self.recv_frame(conn)? {
|
||||
(None, true) => {
|
||||
self.set_state_to_close_pending();
|
||||
|
@ -246,55 +239,61 @@ impl ResponseStream {
|
|||
self.handle_headers_frame(header_block, fin)?
|
||||
}
|
||||
HFrame::Data { len } => self.handle_data_frame(len, fin)?,
|
||||
HFrame::PushPromise { .. } | HFrame::DuplicatePush { .. } => {
|
||||
break Err(Error::HttpIdError)
|
||||
}
|
||||
HFrame::PushPromise {
|
||||
push_id,
|
||||
header_block,
|
||||
} => self
|
||||
.push_handler
|
||||
.as_ref()
|
||||
.ok_or(Error::HttpId)?
|
||||
.borrow()
|
||||
.new_push_promise(push_id, header_block)?,
|
||||
_ => break Err(Error::HttpFrameUnexpected),
|
||||
}
|
||||
if fin
|
||||
&& !matches!(self.state, ResponseStreamState::DecodingHeaders{..})
|
||||
{
|
||||
if matches!(self.state, RecvMessageState::Closed) {
|
||||
break Ok(());
|
||||
}
|
||||
if fin && !matches!(self.state, RecvMessageState::DecodingHeaders{..}) {
|
||||
self.set_state_to_close_pending();
|
||||
break Ok(());
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
ResponseStreamState::DecodingHeaders {
|
||||
RecvMessageState::DecodingHeaders {
|
||||
ref header_block,
|
||||
fin,
|
||||
} => match decoder.decode_header_block(header_block, self.stream_id)? {
|
||||
Some(headers) => {
|
||||
self.add_headers(Some(headers))?;
|
||||
} => {
|
||||
if let Some(headers) =
|
||||
decoder.decode_header_block(header_block, self.stream_id)?
|
||||
{
|
||||
self.add_headers(Some(headers), fin);
|
||||
if fin {
|
||||
self.set_state_to_close_pending();
|
||||
break Ok(());
|
||||
}
|
||||
}
|
||||
None => {
|
||||
} else {
|
||||
qinfo!([self], "decoding header is blocked.");
|
||||
break Ok(());
|
||||
}
|
||||
},
|
||||
ResponseStreamState::ReadingData { .. } => {
|
||||
self.conn_events.data_readable(self.stream_id);
|
||||
}
|
||||
RecvMessageState::ReadingData { .. } => {
|
||||
if post_readable_event {
|
||||
self.conn_events.data_readable(self.stream_id);
|
||||
}
|
||||
break Ok(());
|
||||
}
|
||||
ResponseStreamState::ClosePending => {
|
||||
panic!("Stream readable after being closed!");
|
||||
}
|
||||
ResponseStreamState::Closed => {
|
||||
RecvMessageState::ClosePending | RecvMessageState::Closed => {
|
||||
panic!("Stream readable after being closed!");
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_closed(&self) -> bool {
|
||||
self.state == ResponseStreamState::Closed
|
||||
pub fn receive(&mut self, conn: &mut Connection, decoder: &mut QPackDecoder) -> Res<()> {
|
||||
self.receive_internal(conn, decoder, true)
|
||||
}
|
||||
|
||||
pub fn close(&mut self) {
|
||||
self.state = ResponseStreamState::Closed;
|
||||
pub fn done(&self) -> bool {
|
||||
self.state == RecvMessageState::Closed
|
||||
}
|
||||
}
|
|
@ -0,0 +1,280 @@
|
|||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use crate::hframe::HFrame;
|
||||
use crate::Header;
|
||||
use crate::{Error, Res};
|
||||
use neqo_common::{matches, qdebug, qinfo, qtrace, Encoder};
|
||||
use neqo_qpack::encoder::QPackEncoder;
|
||||
use neqo_transport::Connection;
|
||||
use std::cmp::min;
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt::Debug;
|
||||
|
||||
const MAX_DATA_HEADER_SIZE_2: usize = (1 << 6) - 1; // Maximal amount of data with DATA frame header size 2
|
||||
const MAX_DATA_HEADER_SIZE_2_LIMIT: usize = MAX_DATA_HEADER_SIZE_2 + 3; // 63 + 3 (size of the next buffer data frame header)
|
||||
const MAX_DATA_HEADER_SIZE_3: usize = (1 << 14) - 1; // Maximal amount of data with DATA frame header size 3
|
||||
const MAX_DATA_HEADER_SIZE_3_LIMIT: usize = MAX_DATA_HEADER_SIZE_3 + 5; // 16383 + 5 (size of the next buffer data frame header)
|
||||
const MAX_DATA_HEADER_SIZE_5: usize = (1 << 30) - 1; // Maximal amount of data with DATA frame header size 3
|
||||
const MAX_DATA_HEADER_SIZE_5_LIMIT: usize = MAX_DATA_HEADER_SIZE_5 + 9; // 1073741823 + 9 (size of the next buffer data frame header)
|
||||
|
||||
pub(crate) trait SendMessageEvents: Debug {
|
||||
fn data_writable(&self, stream_id: u64);
|
||||
}
|
||||
|
||||
/*
|
||||
* SendMessage states:
|
||||
* Uninitialized
|
||||
* Initialized : Headers are present but still not encoded. A message body may be present as well.
|
||||
* The client side sends a message body using the send_body() function that directly
|
||||
* writes into a transport stream. The server side sets headers and body when
|
||||
* initializing a send message (TODO: make server use send_body as well)
|
||||
* SendingInitialMessage : sending headers and maybe message body. From here we may switch to
|
||||
* SendingData or Closed (if the app does not want to send data and
|
||||
* has already closed the send stream).
|
||||
* SendingData : We are sending request data until the app closes the stream.
|
||||
* Closed
|
||||
*/
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
enum SendMessageState {
|
||||
Uninitialized,
|
||||
Initialized {
|
||||
headers: Vec<Header>,
|
||||
data: Option<Vec<u8>>,
|
||||
fin: bool,
|
||||
},
|
||||
SendingInitialMessage {
|
||||
buf: Vec<u8>,
|
||||
fin: bool,
|
||||
},
|
||||
SendingData,
|
||||
Closed,
|
||||
}
|
||||
|
||||
impl SendMessageState {
|
||||
pub fn is_sending_closed(&self) -> bool {
|
||||
match self {
|
||||
Self::Initialized { fin, .. } | Self::SendingInitialMessage { fin, .. } => *fin,
|
||||
Self::SendingData => false,
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn done(&self) -> bool {
|
||||
matches!(self, Self::Closed)
|
||||
}
|
||||
|
||||
pub fn is_state_sending_data(&self) -> bool {
|
||||
matches!(self, Self::SendingData)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct SendMessage {
|
||||
state: SendMessageState,
|
||||
stream_id: u64,
|
||||
conn_events: Box<dyn SendMessageEvents>,
|
||||
}
|
||||
|
||||
impl SendMessage {
|
||||
pub fn new(stream_id: u64, conn_events: Box<dyn SendMessageEvents>) -> Self {
|
||||
qinfo!("Create a request stream_id={}", stream_id);
|
||||
Self {
|
||||
state: SendMessageState::Uninitialized,
|
||||
stream_id,
|
||||
conn_events,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_with_headers(
|
||||
stream_id: u64,
|
||||
headers: Vec<Header>,
|
||||
conn_events: Box<dyn SendMessageEvents>,
|
||||
) -> Self {
|
||||
qinfo!("Create a request stream_id={}", stream_id);
|
||||
Self {
|
||||
state: SendMessageState::Initialized {
|
||||
headers,
|
||||
data: None,
|
||||
fin: false,
|
||||
},
|
||||
stream_id,
|
||||
conn_events,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_message(&mut self, headers: &[Header], data: Option<&[u8]>) -> Res<()> {
|
||||
if !matches!(self.state, SendMessageState::Uninitialized) {
|
||||
return Err(Error::AlreadyInitialized);
|
||||
}
|
||||
|
||||
self.state = SendMessageState::Initialized {
|
||||
headers: headers.to_vec(),
|
||||
data: if let Some(d) = data {
|
||||
Some(d.to_vec())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
fin: true,
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn send_body(&mut self, conn: &mut Connection, buf: &[u8]) -> Res<usize> {
|
||||
qinfo!(
|
||||
[self],
|
||||
"send_request_body: state={:?} len={}",
|
||||
self.state,
|
||||
buf.len()
|
||||
);
|
||||
match self.state {
|
||||
SendMessageState::Uninitialized
|
||||
| SendMessageState::Initialized { .. }
|
||||
| SendMessageState::SendingInitialMessage { .. } => Ok(0),
|
||||
SendMessageState::SendingData => {
|
||||
let available = usize::try_from(conn.stream_avail_send_space(self.stream_id)?)
|
||||
.unwrap_or(usize::max_value());
|
||||
if available <= 2 {
|
||||
return Ok(0);
|
||||
}
|
||||
let to_send;
|
||||
if available <= MAX_DATA_HEADER_SIZE_2_LIMIT {
|
||||
// 63 + 3
|
||||
to_send = min(min(buf.len(), available - 2), MAX_DATA_HEADER_SIZE_2);
|
||||
} else if available <= MAX_DATA_HEADER_SIZE_3_LIMIT {
|
||||
// 16383 + 5
|
||||
to_send = min(min(buf.len(), available - 3), MAX_DATA_HEADER_SIZE_3);
|
||||
} else if available <= MAX_DATA_HEADER_SIZE_5 {
|
||||
// 1073741823 + 9
|
||||
to_send = min(min(buf.len(), available - 5), MAX_DATA_HEADER_SIZE_5_LIMIT);
|
||||
} else {
|
||||
to_send = min(buf.len(), available - 9);
|
||||
}
|
||||
|
||||
qinfo!(
|
||||
[self],
|
||||
"send_request_body: available={} to_send={}.",
|
||||
available,
|
||||
to_send
|
||||
);
|
||||
|
||||
let data_frame = HFrame::Data {
|
||||
len: to_send as u64,
|
||||
};
|
||||
let mut enc = Encoder::default();
|
||||
data_frame.encode(&mut enc);
|
||||
match conn.stream_send(self.stream_id, &enc) {
|
||||
Ok(sent) => {
|
||||
debug_assert_eq!(sent, enc.len());
|
||||
}
|
||||
Err(e) => return Err(Error::TransportError(e)),
|
||||
}
|
||||
match conn.stream_send(self.stream_id, &buf[..to_send]) {
|
||||
Ok(sent) => Ok(sent),
|
||||
Err(e) => Err(Error::TransportError(e)),
|
||||
}
|
||||
}
|
||||
SendMessageState::Closed => Err(Error::AlreadyClosed),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_sending_closed(&self) -> bool {
|
||||
self.state.is_sending_closed()
|
||||
}
|
||||
|
||||
pub fn done(&self) -> bool {
|
||||
self.state.done()
|
||||
}
|
||||
|
||||
pub fn is_state_sending_data(&self) -> bool {
|
||||
self.state.is_state_sending_data()
|
||||
}
|
||||
|
||||
fn ensure_encoded(&mut self, conn: &mut Connection, encoder: &mut QPackEncoder) -> Res<()> {
|
||||
if let SendMessageState::Initialized { headers, data, fin } = &self.state {
|
||||
qdebug!([self], "Encoding headers");
|
||||
let header_block = encoder.encode_header_block(conn, &headers, self.stream_id)?;
|
||||
let hframe = HFrame::Headers {
|
||||
header_block: header_block.to_vec(),
|
||||
};
|
||||
let mut d = Encoder::default();
|
||||
hframe.encode(&mut d);
|
||||
if let Some(buf) = data {
|
||||
qdebug!([self], "Encoding data");
|
||||
let d_frame = HFrame::Data {
|
||||
len: buf.len() as u64,
|
||||
};
|
||||
d_frame.encode(&mut d);
|
||||
d.encode(&buf);
|
||||
}
|
||||
|
||||
self.state = SendMessageState::SendingInitialMessage {
|
||||
buf: d.into(),
|
||||
fin: *fin,
|
||||
};
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn send(&mut self, conn: &mut Connection, encoder: &mut QPackEncoder) -> Res<()> {
|
||||
self.ensure_encoded(conn, encoder)?;
|
||||
|
||||
let label = if ::log::log_enabled!(::log::Level::Debug) {
|
||||
format!("{}", self)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
if let SendMessageState::SendingInitialMessage { ref mut buf, fin } = self.state {
|
||||
let sent = conn.stream_send(self.stream_id, &buf)?;
|
||||
qtrace!([label], "{} bytes sent", sent);
|
||||
|
||||
if sent == buf.len() {
|
||||
if fin {
|
||||
conn.stream_close_send(self.stream_id)?;
|
||||
self.state = SendMessageState::Closed;
|
||||
qtrace!([label], "done sending request");
|
||||
} else {
|
||||
self.state = SendMessageState::SendingData;
|
||||
self.conn_events.data_writable(self.stream_id);
|
||||
qtrace!([label], "change to state SendingData");
|
||||
}
|
||||
} else {
|
||||
let b = buf.split_off(sent);
|
||||
*buf = b;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// SendMessage owns headers and sends them. It may also own data for the server side.
|
||||
// This method returns if they're still being sent. Request body (if any) is sent by
|
||||
// http client afterwards using `send_request_body` after receiving DataWritable event.
|
||||
pub fn has_data_to_send(&self) -> bool {
|
||||
matches!(self.state, SendMessageState::Initialized {..} | SendMessageState::SendingInitialMessage { .. } )
|
||||
}
|
||||
|
||||
pub fn close(&mut self, conn: &mut Connection) -> Res<()> {
|
||||
match self.state {
|
||||
SendMessageState::SendingInitialMessage { ref mut fin, .. }
|
||||
| SendMessageState::Initialized { ref mut fin, .. } => {
|
||||
*fin = true;
|
||||
}
|
||||
_ => {
|
||||
self.state = SendMessageState::Closed;
|
||||
conn.stream_close_send(self.stream_id)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Display for SendMessage {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
write!(f, "SendMesage {}", self.stream_id)
|
||||
}
|
||||
}
|
|
@ -4,6 +4,8 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#![allow(clippy::module_name_repetitions)]
|
||||
|
||||
use crate::connection::Http3State;
|
||||
use crate::connection_server::Http3ServerHandler;
|
||||
use crate::server_connection_events::Http3ServerConnEvent;
|
||||
|
@ -11,19 +13,23 @@ use crate::server_events::{ClientRequestStream, Http3ServerEvent, Http3ServerEve
|
|||
use crate::Res;
|
||||
use neqo_common::{qtrace, Datagram};
|
||||
use neqo_crypto::AntiReplay;
|
||||
use neqo_qpack::QpackSettings;
|
||||
use neqo_transport::server::{ActiveConnectionRef, Server};
|
||||
use neqo_transport::{ConnectionIdManager, Output};
|
||||
use std::cell::RefCell;
|
||||
use std::cell::RefMut;
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::rc::Rc;
|
||||
use std::time::Instant;
|
||||
|
||||
type HandlerRef = Rc<RefCell<Http3ServerHandler>>;
|
||||
|
||||
const MAX_EVENT_DATA_SIZE: usize = 1024;
|
||||
|
||||
pub struct Http3Server {
|
||||
server: Server,
|
||||
max_table_size: u64,
|
||||
max_blocked_streams: u16,
|
||||
qpack_settings: QpackSettings,
|
||||
http3_handlers: HashMap<ActiveConnectionRef, HandlerRef>,
|
||||
events: Http3ServerEvents,
|
||||
}
|
||||
|
@ -35,24 +41,29 @@ impl ::std::fmt::Display for Http3Server {
|
|||
}
|
||||
|
||||
impl Http3Server {
|
||||
/// # Errors
|
||||
/// Making a `neqo_transport::Server` may produce an error. This can only be a crypto error if
|
||||
/// the socket can't be created or configured.
|
||||
pub fn new(
|
||||
now: Instant,
|
||||
certs: &[impl AsRef<str>],
|
||||
protocols: &[impl AsRef<str>],
|
||||
anti_replay: AntiReplay,
|
||||
cid_manager: Rc<RefCell<dyn ConnectionIdManager>>,
|
||||
max_table_size: u64,
|
||||
max_blocked_streams: u16,
|
||||
qpack_settings: QpackSettings,
|
||||
) -> Res<Self> {
|
||||
Ok(Self {
|
||||
server: Server::new(now, certs, protocols, anti_replay, cid_manager)?,
|
||||
max_table_size,
|
||||
max_blocked_streams,
|
||||
qpack_settings,
|
||||
http3_handlers: HashMap::new(),
|
||||
events: Http3ServerEvents::default(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn set_qlog_dir(&mut self, dir: Option<PathBuf>) {
|
||||
self.server.set_qlog_dir(dir)
|
||||
}
|
||||
|
||||
pub fn process(&mut self, dgram: Option<Datagram>, now: Instant) -> Output {
|
||||
qtrace!([self], "Process.");
|
||||
let out = self.server.process(dgram, now);
|
||||
|
@ -67,6 +78,7 @@ impl Http3Server {
|
|||
}
|
||||
}
|
||||
|
||||
/// Process HTTP3 layer.
|
||||
pub fn process_http3(&mut self, now: Instant) {
|
||||
qtrace!([self], "Process http3 internal.");
|
||||
let mut active_conns = self.server.active_connections();
|
||||
|
@ -75,61 +87,64 @@ impl Http3Server {
|
|||
let mut http3_active: Vec<ActiveConnectionRef> = self
|
||||
.http3_handlers
|
||||
.iter()
|
||||
.filter(|(conn, handler)| {
|
||||
handler.borrow_mut().should_be_processed() && !active_conns.contains(&conn)
|
||||
.filter_map(|(conn, handler)| {
|
||||
if handler.borrow_mut().should_be_processed() && !active_conns.contains(&conn) {
|
||||
Some(conn)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.map(|(conn, _)| conn)
|
||||
.cloned()
|
||||
.collect();
|
||||
// For http_active connection we need to put them in neqo-transport's server
|
||||
// waiting queue.
|
||||
http3_active
|
||||
.iter()
|
||||
.for_each(|conn| self.server.add_to_waiting(conn.clone()));
|
||||
active_conns.append(&mut http3_active);
|
||||
active_conns.dedup();
|
||||
let max_table_size = self.max_table_size;
|
||||
let max_blocked_streams = self.max_blocked_streams;
|
||||
active_conns
|
||||
.iter()
|
||||
.for_each(|conn| self.server.add_to_waiting(conn.clone()));
|
||||
let qpack_settings = self.qpack_settings;
|
||||
for mut conn in active_conns {
|
||||
let handler = self.http3_handlers.entry(conn.clone()).or_insert_with(|| {
|
||||
Rc::new(RefCell::new(Http3ServerHandler::new(
|
||||
max_table_size,
|
||||
max_blocked_streams,
|
||||
)))
|
||||
});
|
||||
let handler = self
|
||||
.http3_handlers
|
||||
.entry(conn.clone())
|
||||
.or_insert_with(|| Rc::new(RefCell::new(Http3ServerHandler::new(qpack_settings))));
|
||||
|
||||
handler
|
||||
.borrow_mut()
|
||||
.process_http3(&mut conn.borrow_mut(), now);
|
||||
let mut remove = false;
|
||||
while let Some(e) = handler.borrow_mut().next_event() {
|
||||
match e {
|
||||
Http3ServerConnEvent::Headers {
|
||||
stream_id,
|
||||
headers,
|
||||
fin,
|
||||
} => self.events.headers(
|
||||
ClientRequestStream::new(conn.clone(), handler.clone(), stream_id),
|
||||
headers,
|
||||
fin,
|
||||
),
|
||||
Http3ServerConnEvent::Data {
|
||||
stream_id,
|
||||
data,
|
||||
fin,
|
||||
} => self.events.data(
|
||||
ClientRequestStream::new(conn.clone(), handler.clone(), stream_id),
|
||||
data,
|
||||
fin,
|
||||
),
|
||||
Http3ServerConnEvent::StateChange(state) => {
|
||||
self.events
|
||||
.connection_state_change(conn.clone(), state.clone());
|
||||
if let Http3State::Closed { .. } = state {
|
||||
remove = true;
|
||||
{
|
||||
let mut handler_borrowed = handler.borrow_mut();
|
||||
while let Some(e) = handler_borrowed.next_event() {
|
||||
match e {
|
||||
Http3ServerConnEvent::Headers {
|
||||
stream_id,
|
||||
headers,
|
||||
fin,
|
||||
} => self.events.headers(
|
||||
ClientRequestStream::new(conn.clone(), handler.clone(), stream_id),
|
||||
headers,
|
||||
fin,
|
||||
),
|
||||
Http3ServerConnEvent::DataReadable { stream_id } => {
|
||||
prepare_data(
|
||||
stream_id,
|
||||
&mut handler_borrowed,
|
||||
&mut conn,
|
||||
&handler,
|
||||
now,
|
||||
&mut self.events,
|
||||
);
|
||||
}
|
||||
Http3ServerConnEvent::StateChange(state) => {
|
||||
self.events
|
||||
.connection_state_change(conn.clone(), state.clone());
|
||||
if let Http3State::Closed { .. } = state {
|
||||
remove = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
if remove {
|
||||
|
@ -139,12 +154,13 @@ impl Http3Server {
|
|||
}
|
||||
|
||||
/// Get all current events. Best used just in debug/testing code, use
|
||||
/// next_event() instead.
|
||||
/// `next_event` instead.
|
||||
pub fn events(&mut self) -> impl Iterator<Item = Http3ServerEvent> {
|
||||
self.events.events()
|
||||
}
|
||||
|
||||
/// Return true if there are outstanding events.
|
||||
#[must_use]
|
||||
pub fn has_events(&self) -> bool {
|
||||
self.events.has_events()
|
||||
}
|
||||
|
@ -156,18 +172,54 @@ impl Http3Server {
|
|||
self.events.next_event()
|
||||
}
|
||||
}
|
||||
fn prepare_data(
|
||||
stream_id: u64,
|
||||
handler_borrowed: &mut RefMut<Http3ServerHandler>,
|
||||
conn: &mut ActiveConnectionRef,
|
||||
handler: &HandlerRef,
|
||||
now: Instant,
|
||||
events: &mut Http3ServerEvents,
|
||||
) {
|
||||
loop {
|
||||
let mut data = vec![0; MAX_EVENT_DATA_SIZE];
|
||||
let res =
|
||||
handler_borrowed.read_request_data(&mut conn.borrow_mut(), now, stream_id, &mut data);
|
||||
if let Ok((amount, fin)) = res {
|
||||
if amount > 0 {
|
||||
if amount < MAX_EVENT_DATA_SIZE {
|
||||
data.resize(amount, 0);
|
||||
}
|
||||
events.data(
|
||||
ClientRequestStream::new(conn.clone(), handler.clone(), stream_id),
|
||||
data,
|
||||
fin,
|
||||
);
|
||||
}
|
||||
if amount < MAX_EVENT_DATA_SIZE || fin {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// Any error will closed the handler, just ignore this event, the next event must
|
||||
// be a state change event.
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::Error;
|
||||
use super::{Http3Server, Http3ServerEvent, Http3State, Rc, RefCell};
|
||||
use crate::{Error, Header};
|
||||
use neqo_common::matches;
|
||||
use neqo_crypto::AuthenticationStatus;
|
||||
use neqo_qpack::encoder::QPackEncoder;
|
||||
use neqo_qpack::QpackSettings;
|
||||
use neqo_transport::{
|
||||
CloseError, Connection, ConnectionEvent, FixedConnectionIdManager, State, StreamType,
|
||||
};
|
||||
use test_fixture::*;
|
||||
use test_fixture::{
|
||||
anti_replay, default_client, fixture_init, now, DEFAULT_ALPN, DEFAULT_KEYS,
|
||||
};
|
||||
|
||||
/// Create a http3 server with default configuration.
|
||||
pub fn default_http3_server() -> Http3Server {
|
||||
|
@ -178,13 +230,16 @@ mod tests {
|
|||
DEFAULT_ALPN,
|
||||
anti_replay(),
|
||||
Rc::new(RefCell::new(FixedConnectionIdManager::new(5))),
|
||||
100,
|
||||
100,
|
||||
QpackSettings {
|
||||
max_table_size_encoder: 100,
|
||||
max_table_size_decoder: 100,
|
||||
max_blocked_streams: 100,
|
||||
},
|
||||
)
|
||||
.expect("create a default server")
|
||||
}
|
||||
|
||||
fn assert_closed(hconn: &mut Http3Server, expected: Error) {
|
||||
fn assert_closed(hconn: &mut Http3Server, expected: &Error) {
|
||||
let err = CloseError::Application(expected.code());
|
||||
let closed = |e| {
|
||||
matches!(e,
|
||||
|
@ -209,6 +264,13 @@ mod tests {
|
|||
assert!(!hconn.events().any(closed));
|
||||
}
|
||||
|
||||
const CLIENT_SIDE_CONTROL_STREAM_ID: u64 = 2;
|
||||
const CLIENT_SIDE_ENCODER_STREAM_ID: u64 = 6;
|
||||
const CLIENT_SIDE_DECODER_STREAM_ID: u64 = 10;
|
||||
const SERVER_SIDE_CONTROL_STREAM_ID: u64 = 3;
|
||||
const SERVER_SIDE_ENCODER_STREAM_ID: u64 = 7;
|
||||
const SERVER_SIDE_DECODER_STREAM_ID: u64 = 11;
|
||||
|
||||
// Start a client/server and check setting frame.
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
fn connect_and_receive_settings() -> (Http3Server, Connection) {
|
||||
|
@ -218,6 +280,8 @@ mod tests {
|
|||
// side sends and also to simulate an incorrectly behaving http3
|
||||
// client.
|
||||
|
||||
const CONTROL_STREAM_DATA: &[u8] = &[0x0, 0x4, 0x6, 0x1, 0x40, 0x64, 0x7, 0x40, 0x64];
|
||||
|
||||
fixture_init();
|
||||
let mut hconn = default_http3_server();
|
||||
let mut neqo_trans_conn = default_client();
|
||||
|
@ -241,29 +305,37 @@ mod tests {
|
|||
stream_id,
|
||||
stream_type,
|
||||
} => {
|
||||
assert!((stream_id == 3) || (stream_id == 7) || (stream_id == 11));
|
||||
assert!(
|
||||
(stream_id == SERVER_SIDE_CONTROL_STREAM_ID)
|
||||
|| (stream_id == SERVER_SIDE_ENCODER_STREAM_ID)
|
||||
|| (stream_id == SERVER_SIDE_DECODER_STREAM_ID)
|
||||
);
|
||||
assert_eq!(stream_type, StreamType::UniDi);
|
||||
}
|
||||
ConnectionEvent::RecvStreamReadable { stream_id } => {
|
||||
if stream_id == 2 || stream_id == 3 {
|
||||
if stream_id == CLIENT_SIDE_CONTROL_STREAM_ID
|
||||
|| stream_id == SERVER_SIDE_CONTROL_STREAM_ID
|
||||
{
|
||||
// the control stream
|
||||
let mut buf = [0u8; 100];
|
||||
let mut buf = [0_u8; 100];
|
||||
let (amount, fin) =
|
||||
neqo_trans_conn.stream_recv(stream_id, &mut buf).unwrap();
|
||||
assert_eq!(fin, false);
|
||||
const CONTROL_STREAM_DATA: &[u8] =
|
||||
&[0x0, 0x4, 0x6, 0x1, 0x40, 0x64, 0x7, 0x40, 0x64];
|
||||
assert_eq!(amount, CONTROL_STREAM_DATA.len());
|
||||
assert_eq!(&buf[..9], CONTROL_STREAM_DATA);
|
||||
} else if stream_id == 6 || stream_id == 7 {
|
||||
let mut buf = [0u8; 100];
|
||||
} else if stream_id == CLIENT_SIDE_ENCODER_STREAM_ID
|
||||
|| stream_id == SERVER_SIDE_ENCODER_STREAM_ID
|
||||
{
|
||||
let mut buf = [0_u8; 100];
|
||||
let (amount, fin) =
|
||||
neqo_trans_conn.stream_recv(stream_id, &mut buf).unwrap();
|
||||
assert_eq!(fin, false);
|
||||
assert_eq!(amount, 1);
|
||||
assert_eq!(buf[..1], [0x2]);
|
||||
} else if stream_id == 10 || stream_id == 11 {
|
||||
let mut buf = [0u8; 100];
|
||||
} else if stream_id == CLIENT_SIDE_DECODER_STREAM_ID
|
||||
|| stream_id == SERVER_SIDE_DECODER_STREAM_ID
|
||||
{
|
||||
let mut buf = [0_u8; 100];
|
||||
let (amount, fin) =
|
||||
neqo_trans_conn.stream_recv(stream_id, &mut buf).unwrap();
|
||||
assert_eq!(fin, false);
|
||||
|
@ -274,7 +346,11 @@ mod tests {
|
|||
}
|
||||
}
|
||||
ConnectionEvent::SendStreamWritable { stream_id } => {
|
||||
assert!((stream_id == 2) || (stream_id == 6) || (stream_id == 10));
|
||||
assert!(
|
||||
(stream_id == CLIENT_SIDE_CONTROL_STREAM_ID)
|
||||
|| (stream_id == CLIENT_SIDE_ENCODER_STREAM_ID)
|
||||
|| (stream_id == CLIENT_SIDE_DECODER_STREAM_ID)
|
||||
);
|
||||
}
|
||||
ConnectionEvent::StateChange(State::Connected) => connected = true,
|
||||
ConnectionEvent::StateChange(_) => (),
|
||||
|
@ -306,14 +382,22 @@ mod tests {
|
|||
&[0x0, 0x4, 0x6, 0x1, 0x40, 0x64, 0x7, 0x40, 0x64],
|
||||
);
|
||||
assert_eq!(sent, Ok(9));
|
||||
let mut encoder = QPackEncoder::new(true);
|
||||
let mut encoder = QPackEncoder::new(
|
||||
QpackSettings {
|
||||
max_table_size_encoder: 100,
|
||||
max_table_size_decoder: 0,
|
||||
max_blocked_streams: 0,
|
||||
},
|
||||
true,
|
||||
);
|
||||
encoder.add_send_stream(neqo_trans_conn.stream_create(StreamType::UniDi).unwrap());
|
||||
encoder.send(&mut neqo_trans_conn).unwrap();
|
||||
let decoder_stream = neqo_trans_conn.stream_create(StreamType::UniDi).unwrap();
|
||||
sent = neqo_trans_conn.stream_send(decoder_stream, &[0x3]);
|
||||
assert_eq!(sent, Ok(1));
|
||||
let out = neqo_trans_conn.process(None, now());
|
||||
hconn.process(out.dgram(), now());
|
||||
let out1 = neqo_trans_conn.process(None, now());
|
||||
let out2 = hconn.process(out1.dgram(), now());
|
||||
neqo_trans_conn.process(out2.dgram(), now());
|
||||
|
||||
// assert no error occured.
|
||||
assert_not_closed(&mut hconn);
|
||||
|
@ -343,7 +427,7 @@ mod tests {
|
|||
.unwrap();
|
||||
let out = peer_conn.conn.process(None, now());
|
||||
hconn.process(out.dgram(), now());
|
||||
assert_closed(&mut hconn, Error::HttpClosedCriticalStream);
|
||||
assert_closed(&mut hconn, &Error::HttpClosedCriticalStream);
|
||||
}
|
||||
|
||||
// Server: test missing SETTINGS frame
|
||||
|
@ -358,7 +442,7 @@ mod tests {
|
|||
assert_eq!(sent, Ok(4));
|
||||
let out = neqo_trans_conn.process(None, now());
|
||||
hconn.process(out.dgram(), now());
|
||||
assert_closed(&mut hconn, Error::HttpMissingSettings);
|
||||
assert_closed(&mut hconn, &Error::HttpMissingSettings);
|
||||
}
|
||||
|
||||
// Server: receiving SETTINGS frame twice causes connection close
|
||||
|
@ -374,7 +458,7 @@ mod tests {
|
|||
assert_eq!(sent, Ok(8));
|
||||
let out = peer_conn.conn.process(None, now());
|
||||
hconn.process(out.dgram(), now());
|
||||
assert_closed(&mut hconn, Error::HttpFrameUnexpected);
|
||||
assert_closed(&mut hconn, &Error::HttpFrameUnexpected);
|
||||
}
|
||||
|
||||
fn test_wrong_frame_on_control_stream(v: &[u8]) {
|
||||
|
@ -385,7 +469,7 @@ mod tests {
|
|||
|
||||
let out = peer_conn.conn.process(None, now());
|
||||
hconn.process(out.dgram(), now());
|
||||
assert_closed(&mut hconn, Error::HttpFrameUnexpected);
|
||||
assert_closed(&mut hconn, &Error::HttpFrameUnexpected);
|
||||
}
|
||||
|
||||
// send DATA frame on a cortrol stream
|
||||
|
@ -406,12 +490,6 @@ mod tests {
|
|||
test_wrong_frame_on_control_stream(&[0x5, 0x2, 0x1, 0x2]);
|
||||
}
|
||||
|
||||
// send DUPLICATE_PUSH frame on a cortrol stream
|
||||
#[test]
|
||||
fn test_server_duplicate_push_frame_on_control_stream() {
|
||||
test_wrong_frame_on_control_stream(&[0xe, 0x2, 0x1, 0x2]);
|
||||
}
|
||||
|
||||
// Server: receive unkonwn stream type
|
||||
// also test getting stream id that does not fit into a single byte.
|
||||
#[test]
|
||||
|
@ -429,7 +507,7 @@ mod tests {
|
|||
let out = hconn.process(None, now());
|
||||
peer_conn.conn.process(out.dgram(), now());
|
||||
|
||||
// check for stop-sending with Error::HttpStreamCreationError.
|
||||
// check for stop-sending with Error::HttpStreamCreation.
|
||||
let mut stop_sending_event_found = false;
|
||||
while let Some(e) = peer_conn.conn.next_event() {
|
||||
if let ConnectionEvent::SendStreamStopSending {
|
||||
|
@ -439,7 +517,7 @@ mod tests {
|
|||
{
|
||||
stop_sending_event_found = true;
|
||||
assert_eq!(stream_id, new_stream_id);
|
||||
assert_eq!(app_error, Error::HttpStreamCreationError.code());
|
||||
assert_eq!(app_error, Error::HttpStreamCreation.code());
|
||||
}
|
||||
}
|
||||
assert!(stop_sending_event_found);
|
||||
|
@ -457,7 +535,7 @@ mod tests {
|
|||
let out = peer_conn.conn.process(None, now());
|
||||
let out = hconn.process(out.dgram(), now());
|
||||
peer_conn.conn.process(out.dgram(), now());
|
||||
assert_closed(&mut hconn, Error::HttpStreamCreationError);
|
||||
assert_closed(&mut hconn, &Error::HttpStreamCreation);
|
||||
}
|
||||
|
||||
//// Test reading of a slowly streamed frame. bytes are received one by one
|
||||
|
@ -544,7 +622,7 @@ mod tests {
|
|||
hconn.process(out.dgram(), now());
|
||||
|
||||
// PUSH_PROMISE on a control stream will cause an error
|
||||
assert_closed(&mut hconn, Error::HttpFrameUnexpected);
|
||||
assert_closed(&mut hconn, &Error::HttpFrameUnexpected);
|
||||
}
|
||||
|
||||
// Test reading of a slowly streamed frame. bytes are received one by one
|
||||
|
@ -559,7 +637,7 @@ mod tests {
|
|||
let out = peer_conn.process(None, now());
|
||||
hconn.process(out.dgram(), now());
|
||||
|
||||
assert_closed(&mut hconn, Error::HttpFrameError);
|
||||
assert_closed(&mut hconn, &Error::HttpFrame);
|
||||
}
|
||||
|
||||
const REQUEST_WITH_BODY: &[u8] = &[
|
||||
|
@ -569,6 +647,19 @@ mod tests {
|
|||
0x0, 0x3, 0x61, 0x62, 0x63, // the second data frame.
|
||||
0x0, 0x3, 0x64, 0x65, 0x66,
|
||||
];
|
||||
const REQUEST_BODY: &[u8] = &[0x61, 0x62, 0x63, 0x64, 0x65, 0x66];
|
||||
|
||||
const RESPONSE_BODY: &[u8] = &[0x67, 0x68, 0x69];
|
||||
|
||||
fn check_request_header(header: &[Header]) {
|
||||
let expected_request_header = &[
|
||||
(String::from(":method"), String::from("GET")),
|
||||
(String::from(":scheme"), String::from("https")),
|
||||
(String::from(":authority"), String::from("something.com")),
|
||||
(String::from(":path"), String::from("/")),
|
||||
];
|
||||
assert_eq!(header, expected_request_header);
|
||||
}
|
||||
|
||||
// Incomplete DATA frame
|
||||
#[test]
|
||||
|
@ -603,19 +694,11 @@ mod tests {
|
|||
|
||||
// Check connection event. There should be 1 Header and 2 data events.
|
||||
let mut headers_frames = 0;
|
||||
let mut data_frames = 0;
|
||||
let mut data_received = 0;
|
||||
while let Some(event) = hconn.next_event() {
|
||||
match event {
|
||||
Http3ServerEvent::Headers { headers, fin, .. } => {
|
||||
assert_eq!(
|
||||
headers,
|
||||
vec![
|
||||
(String::from(":method"), String::from("GET")),
|
||||
(String::from(":scheme"), String::from("https")),
|
||||
(String::from(":authority"), String::from("something.com")),
|
||||
(String::from(":path"), String::from("/"))
|
||||
]
|
||||
);
|
||||
check_request_header(&headers.unwrap());
|
||||
assert_eq!(fin, false);
|
||||
headers_frames += 1;
|
||||
}
|
||||
|
@ -624,28 +707,24 @@ mod tests {
|
|||
data,
|
||||
fin,
|
||||
} => {
|
||||
if data_frames == 0 {
|
||||
assert_eq!(data, &REQUEST_WITH_BODY[20..23]);
|
||||
} else {
|
||||
assert_eq!(data, &REQUEST_WITH_BODY[25..]);
|
||||
assert_eq!(fin, true);
|
||||
request
|
||||
.set_response(
|
||||
&[
|
||||
(String::from(":status"), String::from("200")),
|
||||
(String::from("content-length"), String::from("3")),
|
||||
],
|
||||
vec![0x67, 0x68, 0x69],
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
data_frames += 1;
|
||||
assert_eq!(data, REQUEST_BODY);
|
||||
assert_eq!(fin, true);
|
||||
request
|
||||
.set_response(
|
||||
&[
|
||||
(String::from(":status"), String::from("200")),
|
||||
(String::from("content-length"), String::from("3")),
|
||||
],
|
||||
RESPONSE_BODY,
|
||||
)
|
||||
.unwrap();
|
||||
data_received += 1;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
assert_eq!(headers_frames, 1);
|
||||
assert_eq!(data_frames, 2);
|
||||
assert_eq!(data_received, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -671,19 +750,11 @@ mod tests {
|
|||
headers,
|
||||
fin,
|
||||
} => {
|
||||
assert_eq!(
|
||||
headers,
|
||||
vec![
|
||||
(String::from(":method"), String::from("GET")),
|
||||
(String::from(":scheme"), String::from("https")),
|
||||
(String::from(":authority"), String::from("something.com")),
|
||||
(String::from(":path"), String::from("/"))
|
||||
]
|
||||
);
|
||||
check_request_header(&headers.unwrap());
|
||||
assert_eq!(fin, false);
|
||||
headers_frames += 1;
|
||||
request
|
||||
.stream_stop_sending(Error::HttpEarlyResponse.code())
|
||||
.stream_stop_sending(Error::HttpNoError.code())
|
||||
.unwrap();
|
||||
request
|
||||
.set_response(
|
||||
|
@ -691,7 +762,7 @@ mod tests {
|
|||
(String::from(":status"), String::from("200")),
|
||||
(String::from("content-length"), String::from("3")),
|
||||
],
|
||||
vec![0x67, 0x68, 0x69],
|
||||
RESPONSE_BODY,
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
@ -716,7 +787,7 @@ mod tests {
|
|||
while let Some(event) = hconn.next_event() {
|
||||
match event {
|
||||
Http3ServerEvent::Headers { .. } => {
|
||||
panic!("We should not have a Data event");
|
||||
panic!("We should not have a Header event");
|
||||
}
|
||||
Http3ServerEvent::Data { .. } => {
|
||||
panic!("We should not have a Data event");
|
||||
|
@ -751,15 +822,7 @@ mod tests {
|
|||
headers,
|
||||
fin,
|
||||
} => {
|
||||
assert_eq!(
|
||||
headers,
|
||||
vec![
|
||||
(String::from(":method"), String::from("GET")),
|
||||
(String::from(":scheme"), String::from("https")),
|
||||
(String::from(":authority"), String::from("something.com")),
|
||||
(String::from(":path"), String::from("/"))
|
||||
]
|
||||
);
|
||||
check_request_header(&headers.unwrap());
|
||||
assert_eq!(fin, false);
|
||||
headers_frames += 1;
|
||||
request
|
||||
|
@ -797,4 +860,89 @@ mod tests {
|
|||
assert_eq!(reset, 1);
|
||||
assert_eq!(stop_sending, 1);
|
||||
}
|
||||
|
||||
// Server: Test that the connection will be closed if the local control stream
|
||||
// has been reset.
|
||||
#[test]
|
||||
fn test_server_reset_control_stream() {
|
||||
let (mut hconn, mut peer_conn) = connect();
|
||||
peer_conn
|
||||
.conn
|
||||
.stream_reset_send(CLIENT_SIDE_CONTROL_STREAM_ID, Error::HttpNoError.code())
|
||||
.unwrap();
|
||||
let out = peer_conn.conn.process(None, now());
|
||||
hconn.process(out.dgram(), now());
|
||||
assert_closed(&mut hconn, &Error::HttpClosedCriticalStream);
|
||||
}
|
||||
|
||||
// Server: Test that the connection will be closed if the client side encoder stream
|
||||
// has been reset.
|
||||
#[test]
|
||||
fn test_server_reset_client_side_encoder_stream() {
|
||||
let (mut hconn, mut peer_conn) = connect();
|
||||
peer_conn
|
||||
.conn
|
||||
.stream_reset_send(CLIENT_SIDE_ENCODER_STREAM_ID, Error::HttpNoError.code())
|
||||
.unwrap();
|
||||
let out = peer_conn.conn.process(None, now());
|
||||
hconn.process(out.dgram(), now());
|
||||
assert_closed(&mut hconn, &Error::HttpClosedCriticalStream);
|
||||
}
|
||||
|
||||
// Server: Test that the connection will be closed if the client side decoder stream
|
||||
// has been reset.
|
||||
#[test]
|
||||
fn test_server_reset_client_side_decoder_stream() {
|
||||
let (mut hconn, mut peer_conn) = connect();
|
||||
peer_conn
|
||||
.conn
|
||||
.stream_reset_send(CLIENT_SIDE_DECODER_STREAM_ID, Error::HttpNoError.code())
|
||||
.unwrap();
|
||||
let out = peer_conn.conn.process(None, now());
|
||||
hconn.process(out.dgram(), now());
|
||||
assert_closed(&mut hconn, &Error::HttpClosedCriticalStream);
|
||||
}
|
||||
|
||||
// Server: Test that the connection will be closed if the local control stream
|
||||
// has received a stop_sending.
|
||||
#[test]
|
||||
fn test_client_stop_sending_control_stream() {
|
||||
let (mut hconn, mut peer_conn) = connect();
|
||||
|
||||
peer_conn
|
||||
.conn
|
||||
.stream_stop_sending(SERVER_SIDE_CONTROL_STREAM_ID, Error::HttpNoError.code())
|
||||
.unwrap();
|
||||
let out = peer_conn.conn.process(None, now());
|
||||
hconn.process(out.dgram(), now());
|
||||
assert_closed(&mut hconn, &Error::HttpClosedCriticalStream);
|
||||
}
|
||||
|
||||
// Server: Test that the connection will be closed if the server side encoder stream
|
||||
// has received a stop_sending.
|
||||
#[test]
|
||||
fn test_server_stop_sending_encoder_stream() {
|
||||
let (mut hconn, mut peer_conn) = connect();
|
||||
peer_conn
|
||||
.conn
|
||||
.stream_stop_sending(SERVER_SIDE_ENCODER_STREAM_ID, Error::HttpNoError.code())
|
||||
.unwrap();
|
||||
let out = peer_conn.conn.process(None, now());
|
||||
hconn.process(out.dgram(), now());
|
||||
assert_closed(&mut hconn, &Error::HttpClosedCriticalStream);
|
||||
}
|
||||
|
||||
// Server: Test that the connection will be closed if the server side decoder stream
|
||||
// has received a stop_sending.
|
||||
#[test]
|
||||
fn test_server_stop_sending_decoder_stream() {
|
||||
let (mut hconn, mut peer_conn) = connect();
|
||||
peer_conn
|
||||
.conn
|
||||
.stream_stop_sending(SERVER_SIDE_DECODER_STREAM_ID, Error::HttpNoError.code())
|
||||
.unwrap();
|
||||
let out = peer_conn.conn.process(None, now());
|
||||
hconn.process(out.dgram(), now());
|
||||
assert_closed(&mut hconn, &Error::HttpClosedCriticalStream);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,39 +5,59 @@
|
|||
// except according to those terms.
|
||||
|
||||
use crate::connection::Http3State;
|
||||
use crate::recv_message::RecvMessageEvents;
|
||||
use crate::send_message::SendMessageEvents;
|
||||
use crate::Header;
|
||||
use neqo_common::matches;
|
||||
use neqo_transport::AppError;
|
||||
|
||||
use std::cell::RefCell;
|
||||
use std::collections::VecDeque;
|
||||
use std::rc::Rc;
|
||||
|
||||
#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, Clone)]
|
||||
pub enum Http3ServerConnEvent {
|
||||
pub(crate) enum Http3ServerConnEvent {
|
||||
/// Headers are ready.
|
||||
Headers {
|
||||
stream_id: u64,
|
||||
headers: Vec<Header>,
|
||||
headers: Option<Vec<Header>>,
|
||||
fin: bool,
|
||||
},
|
||||
/// Request data is ready.
|
||||
Data {
|
||||
stream_id: u64,
|
||||
data: Vec<u8>,
|
||||
fin: bool,
|
||||
},
|
||||
/// Peer reset the stream.
|
||||
Reset { stream_id: u64, error: AppError },
|
||||
DataReadable { stream_id: u64 },
|
||||
//TODO: This is never used. Do we need it?
|
||||
// Peer reset the stream.
|
||||
//Reset { stream_id: u64, error: AppError },
|
||||
/// Connection state change.
|
||||
StateChange(Http3State),
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct Http3ServerConnEvents {
|
||||
pub(crate) struct Http3ServerConnEvents {
|
||||
events: Rc<RefCell<VecDeque<Http3ServerConnEvent>>>,
|
||||
}
|
||||
|
||||
impl RecvMessageEvents for Http3ServerConnEvents {
|
||||
/// Add a new `HeaderReady` event.
|
||||
fn header_ready(&self, stream_id: u64, headers: Option<Vec<Header>>, fin: bool) {
|
||||
self.insert(Http3ServerConnEvent::Headers {
|
||||
stream_id,
|
||||
headers,
|
||||
fin,
|
||||
});
|
||||
}
|
||||
|
||||
/// Add a new `DataReadable` event
|
||||
fn data_readable(&self, stream_id: u64) {
|
||||
self.insert(Http3ServerConnEvent::DataReadable { stream_id });
|
||||
}
|
||||
}
|
||||
|
||||
impl SendMessageEvents for Http3ServerConnEvents {
|
||||
fn data_writable(&self, _stream_id: u64) {
|
||||
// Curently not used on the server side.
|
||||
}
|
||||
}
|
||||
|
||||
impl Http3ServerConnEvents {
|
||||
fn insert(&self, event: Http3ServerConnEvent) {
|
||||
self.events.borrow_mut().push_back(event);
|
||||
|
@ -50,10 +70,6 @@ impl Http3ServerConnEvents {
|
|||
self.events.borrow_mut().retain(|evt| !f(evt))
|
||||
}
|
||||
|
||||
pub fn events(&self) -> impl Iterator<Item = Http3ServerConnEvent> {
|
||||
self.events.replace(VecDeque::new()).into_iter()
|
||||
}
|
||||
|
||||
pub fn has_events(&self) -> bool {
|
||||
!self.events.borrow().is_empty()
|
||||
}
|
||||
|
@ -62,26 +78,6 @@ impl Http3ServerConnEvents {
|
|||
self.events.borrow_mut().pop_front()
|
||||
}
|
||||
|
||||
pub fn headers(&self, stream_id: u64, headers: Vec<Header>, fin: bool) {
|
||||
self.insert(Http3ServerConnEvent::Headers {
|
||||
stream_id,
|
||||
headers,
|
||||
fin,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn data(&self, stream_id: u64, data: Vec<u8>, fin: bool) {
|
||||
self.insert(Http3ServerConnEvent::Data {
|
||||
stream_id,
|
||||
data,
|
||||
fin,
|
||||
});
|
||||
}
|
||||
|
||||
pub fn reset(&self, stream_id: u64, error: AppError) {
|
||||
self.insert(Http3ServerConnEvent::Reset { stream_id, error });
|
||||
}
|
||||
|
||||
pub fn connection_state_change(&self, state: Http3State) {
|
||||
self.insert(Http3ServerConnEvent::StateChange(state));
|
||||
}
|
||||
|
@ -89,7 +85,7 @@ impl Http3ServerConnEvents {
|
|||
pub fn remove_events_for_stream_id(&self, stream_id: u64) {
|
||||
self.remove(|evt| {
|
||||
matches!(evt,
|
||||
Http3ServerConnEvent::Reset { stream_id: x, .. } if *x == stream_id)
|
||||
Http3ServerConnEvent::Headers { stream_id: x, .. } | Http3ServerConnEvent::DataReadable { stream_id: x, .. } if *x == stream_id)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#![allow(clippy::module_name_repetitions)]
|
||||
|
||||
use crate::connection::Http3State;
|
||||
use crate::connection_server::Http3ServerHandler;
|
||||
use crate::{Header, Res};
|
||||
|
@ -34,7 +36,7 @@ impl ::std::fmt::Display for ClientRequestStream {
|
|||
}
|
||||
|
||||
impl ClientRequestStream {
|
||||
pub fn new(
|
||||
pub(crate) fn new(
|
||||
conn: ActiveConnectionRef,
|
||||
handler: Rc<RefCell<Http3ServerHandler>>,
|
||||
stream_id: u64,
|
||||
|
@ -45,13 +47,16 @@ impl ClientRequestStream {
|
|||
stream_id,
|
||||
}
|
||||
}
|
||||
pub fn set_response(&mut self, headers: &[Header], data: Vec<u8>) -> Res<()> {
|
||||
|
||||
/// Supply a response to a request.
|
||||
pub fn set_response(&mut self, headers: &[Header], data: &[u8]) -> Res<()> {
|
||||
qinfo!([self], "Set new response.");
|
||||
self.handler
|
||||
.borrow_mut()
|
||||
.set_response(self.stream_id, headers, data)
|
||||
}
|
||||
|
||||
/// Request a peer to stop sending a request.
|
||||
pub fn stream_stop_sending(&mut self, app_error: AppError) -> Res<()> {
|
||||
qdebug!(
|
||||
[self],
|
||||
|
@ -65,6 +70,7 @@ impl ClientRequestStream {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Reset a stream/request.
|
||||
pub fn stream_reset(&mut self, app_error: AppError) -> Res<()> {
|
||||
qdebug!([self], "reset error:{}.", app_error);
|
||||
self.handler.borrow_mut().stream_reset(
|
||||
|
@ -80,7 +86,7 @@ pub enum Http3ServerEvent {
|
|||
/// Headers are ready.
|
||||
Headers {
|
||||
request: ClientRequestStream,
|
||||
headers: Vec<Header>,
|
||||
headers: Option<Vec<Header>>,
|
||||
fin: bool,
|
||||
},
|
||||
/// Request data is ready.
|
||||
|
@ -106,19 +112,28 @@ impl Http3ServerEvents {
|
|||
self.events.borrow_mut().push_back(event);
|
||||
}
|
||||
|
||||
/// Take all events
|
||||
pub fn events(&self) -> impl Iterator<Item = Http3ServerEvent> {
|
||||
self.events.replace(VecDeque::new()).into_iter()
|
||||
}
|
||||
|
||||
/// Whether there is request pending.
|
||||
pub fn has_events(&self) -> bool {
|
||||
!self.events.borrow().is_empty()
|
||||
}
|
||||
|
||||
/// Take the next event if present.
|
||||
pub fn next_event(&self) -> Option<Http3ServerEvent> {
|
||||
self.events.borrow_mut().pop_front()
|
||||
}
|
||||
|
||||
pub fn headers(&self, request: ClientRequestStream, headers: Vec<Header>, fin: bool) {
|
||||
/// Insert a `Headers` event.
|
||||
pub(crate) fn headers(
|
||||
&self,
|
||||
request: ClientRequestStream,
|
||||
headers: Option<Vec<Header>>,
|
||||
fin: bool,
|
||||
) {
|
||||
self.insert(Http3ServerEvent::Headers {
|
||||
request,
|
||||
headers,
|
||||
|
@ -126,11 +141,13 @@ impl Http3ServerEvents {
|
|||
});
|
||||
}
|
||||
|
||||
pub fn connection_state_change(&self, conn: ActiveConnectionRef, state: Http3State) {
|
||||
/// Insert a `StateChange` event.
|
||||
pub(crate) fn connection_state_change(&self, conn: ActiveConnectionRef, state: Http3State) {
|
||||
self.insert(Http3ServerEvent::StateChange { conn, state });
|
||||
}
|
||||
|
||||
pub fn data(&self, request: ClientRequestStream, data: Vec<u8>, fin: bool) {
|
||||
/// Insert a `Data` event.
|
||||
pub(crate) fn data(&self, request: ClientRequestStream, data: Vec<u8>, fin: bool) {
|
||||
self.insert(Http3ServerEvent::Data { request, data, fin });
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,11 +4,13 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#![allow(clippy::module_name_repetitions)]
|
||||
|
||||
use neqo_common::{qdebug, Decoder, IncrementalDecoder, IncrementalDecoderResult};
|
||||
use neqo_transport::Connection;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct NewStreamTypeReader {
|
||||
pub(crate) struct NewStreamTypeReader {
|
||||
reader: IncrementalDecoder,
|
||||
fin: bool,
|
||||
}
|
||||
|
|
|
@ -1,303 +0,0 @@
|
|||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use crate::hframe::HFrame;
|
||||
|
||||
use crate::client_events::Http3ClientEvents;
|
||||
use crate::connection::Http3Transaction;
|
||||
use crate::response_stream::ResponseStream;
|
||||
use crate::Header;
|
||||
use neqo_common::{qinfo, Encoder};
|
||||
use neqo_qpack::decoder::QPackDecoder;
|
||||
use neqo_qpack::encoder::QPackEncoder;
|
||||
use neqo_transport::Connection;
|
||||
|
||||
use crate::{Error, Res};
|
||||
use std::cmp::min;
|
||||
|
||||
const MAX_DATA_HEADER_SIZE_2: usize = (1 << 6) - 1; // Maximal amount of data with DATA frame header size 2
|
||||
const MAX_DATA_HEADER_SIZE_2_LIMIT: usize = MAX_DATA_HEADER_SIZE_2 + 3; // 63 + 3 (size of the next buffer data frame header)
|
||||
const MAX_DATA_HEADER_SIZE_3: usize = (1 << 14) - 1; // Maximal amount of data with DATA frame header size 3
|
||||
const MAX_DATA_HEADER_SIZE_3_LIMIT: usize = MAX_DATA_HEADER_SIZE_3 + 5; // 16383 + 5 (size of the next buffer data frame header)
|
||||
const MAX_DATA_HEADER_SIZE_5: usize = (1 << 30) - 1; // Maximal amount of data with DATA frame header size 3
|
||||
const MAX_DATA_HEADER_SIZE_5_LIMIT: usize = MAX_DATA_HEADER_SIZE_5 + 9; // 1073741823 + 9 (size of the next buffer data frame header)
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
struct Request {
|
||||
method: String,
|
||||
scheme: String,
|
||||
host: String,
|
||||
path: String,
|
||||
headers: Vec<Header>,
|
||||
buf: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl Request {
|
||||
pub fn new(method: &str, scheme: &str, host: &str, path: &str, headers: &[Header]) -> Self {
|
||||
let mut r = Self {
|
||||
method: method.to_owned(),
|
||||
scheme: scheme.to_owned(),
|
||||
host: host.to_owned(),
|
||||
path: path.to_owned(),
|
||||
headers: Vec::new(),
|
||||
buf: None,
|
||||
};
|
||||
r.headers.push((":method".into(), method.to_owned()));
|
||||
r.headers.push((":scheme".into(), r.scheme.clone()));
|
||||
r.headers.push((":authority".into(), r.host.clone()));
|
||||
r.headers.push((":path".into(), r.path.clone()));
|
||||
r.headers.extend_from_slice(headers);
|
||||
r
|
||||
}
|
||||
|
||||
pub fn ensure_encoded(&mut self, encoder: &mut QPackEncoder, stream_id: u64) {
|
||||
if self.buf.is_some() {
|
||||
return;
|
||||
}
|
||||
|
||||
qinfo!([self], "Encoding headers for {}/{}", self.host, self.path);
|
||||
let header_block = encoder.encode_header_block(&self.headers, stream_id);
|
||||
let f = HFrame::Headers {
|
||||
header_block: header_block.to_vec(),
|
||||
};
|
||||
let mut d = Encoder::default();
|
||||
f.encode(&mut d);
|
||||
self.buf = Some(d.into());
|
||||
}
|
||||
|
||||
pub fn send(
|
||||
&mut self,
|
||||
conn: &mut Connection,
|
||||
encoder: &mut QPackEncoder,
|
||||
stream_id: u64,
|
||||
) -> Res<bool> {
|
||||
let label = if ::log::log_enabled!(::log::Level::Debug) {
|
||||
format!("{}", self)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
self.ensure_encoded(encoder, stream_id);
|
||||
if let Some(buf) = &mut self.buf {
|
||||
let sent = conn.stream_send(stream_id, &buf)?;
|
||||
qinfo!([label], "{} bytes sent", sent);
|
||||
|
||||
if sent == buf.len() {
|
||||
qinfo!([label], "done sending request");
|
||||
Ok(true)
|
||||
} else {
|
||||
let b = buf.split_off(sent);
|
||||
self.buf = Some(b);
|
||||
Ok(false)
|
||||
}
|
||||
} else {
|
||||
panic!("We must have buffer in this state")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Display for Request {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
write!(f, "Request {} {}/{}", self.method, self.host, self.path)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Transaction send states:
|
||||
* SendingHeaders : sending headers. From here we may switch to SendingData
|
||||
* or Closed (if the app does not want to send data and
|
||||
* has alreadyclosed the send stream).
|
||||
* SendingData : We are sending request data until the app closes the stream.
|
||||
* Closed
|
||||
*/
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
enum TransactionSendState {
|
||||
SendingHeaders { request: Request, fin: bool },
|
||||
SendingData,
|
||||
Closed,
|
||||
}
|
||||
|
||||
// This is used for normal request/responses.
|
||||
#[derive(Debug)]
|
||||
pub struct TransactionClient {
|
||||
send_state: TransactionSendState,
|
||||
response_stream: ResponseStream,
|
||||
stream_id: u64,
|
||||
conn_events: Http3ClientEvents,
|
||||
}
|
||||
|
||||
impl TransactionClient {
|
||||
pub fn new(
|
||||
stream_id: u64,
|
||||
method: &str,
|
||||
scheme: &str,
|
||||
host: &str,
|
||||
path: &str,
|
||||
headers: &[Header],
|
||||
conn_events: Http3ClientEvents,
|
||||
) -> Self {
|
||||
qinfo!("Create a request stream_id={}", stream_id);
|
||||
Self {
|
||||
send_state: TransactionSendState::SendingHeaders {
|
||||
request: Request::new(method, scheme, host, path, headers),
|
||||
fin: false,
|
||||
},
|
||||
response_stream: ResponseStream::new(stream_id, conn_events.clone()),
|
||||
stream_id,
|
||||
conn_events,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn send_request_body(&mut self, conn: &mut Connection, buf: &[u8]) -> Res<usize> {
|
||||
qinfo!(
|
||||
[self],
|
||||
"send_request_body: send_state={:?} len={}",
|
||||
self.send_state,
|
||||
buf.len()
|
||||
);
|
||||
match self.send_state {
|
||||
TransactionSendState::SendingHeaders { .. } => Ok(0),
|
||||
TransactionSendState::SendingData => {
|
||||
let available = conn.stream_avail_send_space(self.stream_id)? as usize;
|
||||
if available <= 2 {
|
||||
return Ok(0);
|
||||
}
|
||||
let to_send;
|
||||
if available <= MAX_DATA_HEADER_SIZE_2_LIMIT {
|
||||
// 63 + 3
|
||||
to_send = min(min(buf.len(), available - 2), MAX_DATA_HEADER_SIZE_2);
|
||||
} else if available <= MAX_DATA_HEADER_SIZE_3_LIMIT {
|
||||
// 16383 + 5
|
||||
to_send = min(min(buf.len(), available - 3), MAX_DATA_HEADER_SIZE_3);
|
||||
} else if available <= MAX_DATA_HEADER_SIZE_5 {
|
||||
// 1073741823 + 9
|
||||
to_send = min(min(buf.len(), available - 5), MAX_DATA_HEADER_SIZE_5_LIMIT);
|
||||
} else {
|
||||
to_send = min(buf.len(), available - 9);
|
||||
}
|
||||
|
||||
qinfo!(
|
||||
[self],
|
||||
"send_request_body: available={} to_send={}.",
|
||||
available,
|
||||
to_send
|
||||
);
|
||||
|
||||
let data_frame = HFrame::Data {
|
||||
len: to_send as u64,
|
||||
};
|
||||
let mut enc = Encoder::default();
|
||||
data_frame.encode(&mut enc);
|
||||
match conn.stream_send(self.stream_id, &enc) {
|
||||
Ok(sent) => {
|
||||
debug_assert_eq!(sent, enc.len());
|
||||
}
|
||||
Err(e) => return Err(Error::TransportError(e)),
|
||||
}
|
||||
match conn.stream_send(self.stream_id, &buf[..to_send]) {
|
||||
Ok(sent) => Ok(sent),
|
||||
Err(e) => Err(Error::TransportError(e)),
|
||||
}
|
||||
}
|
||||
TransactionSendState::Closed => Err(Error::AlreadyClosed),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_sending_closed(&self) -> bool {
|
||||
match self.send_state {
|
||||
TransactionSendState::SendingHeaders { fin, .. } => fin,
|
||||
TransactionSendState::SendingData => false,
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_response_headers(&mut self) -> Res<(Vec<Header>, bool)> {
|
||||
self.response_stream.read_response_headers()
|
||||
}
|
||||
|
||||
pub fn read_response_data(
|
||||
&mut self,
|
||||
conn: &mut Connection,
|
||||
buf: &mut [u8],
|
||||
) -> Res<(usize, bool)> {
|
||||
self.response_stream.read_response_data(conn, buf)
|
||||
}
|
||||
|
||||
pub fn is_state_sending_data(&self) -> bool {
|
||||
self.send_state == TransactionSendState::SendingData
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Display for TransactionClient {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
write!(f, "TransactionClient {}", self.stream_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl Http3Transaction for TransactionClient {
|
||||
fn send(&mut self, conn: &mut Connection, encoder: &mut QPackEncoder) -> Res<()> {
|
||||
let label = if ::log::log_enabled!(::log::Level::Debug) {
|
||||
format!("{}", self)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
if let TransactionSendState::SendingHeaders {
|
||||
ref mut request,
|
||||
fin,
|
||||
} = self.send_state
|
||||
{
|
||||
if request.send(conn, encoder, self.stream_id)? {
|
||||
if fin {
|
||||
conn.stream_close_send(self.stream_id)?;
|
||||
self.send_state = TransactionSendState::Closed;
|
||||
qinfo!([label], "done sending request");
|
||||
} else {
|
||||
self.send_state = TransactionSendState::SendingData;
|
||||
self.conn_events.data_writable(self.stream_id);
|
||||
qinfo!([label], "change to state SendingData");
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn receive(&mut self, conn: &mut Connection, decoder: &mut QPackDecoder) -> Res<()> {
|
||||
self.response_stream.receive(conn, decoder)
|
||||
}
|
||||
|
||||
fn has_data_to_send(&self) -> bool {
|
||||
if let TransactionSendState::SendingHeaders { .. } = self.send_state {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
fn reset_receiving_side(&mut self) {
|
||||
self.response_stream.close();
|
||||
}
|
||||
|
||||
fn stop_sending(&mut self) {
|
||||
self.send_state = TransactionSendState::Closed;
|
||||
}
|
||||
|
||||
fn done(&self) -> bool {
|
||||
self.send_state == TransactionSendState::Closed && self.response_stream.is_closed()
|
||||
}
|
||||
|
||||
fn close_send(&mut self, conn: &mut Connection) -> Res<()> {
|
||||
match self.send_state {
|
||||
TransactionSendState::SendingHeaders { ref mut fin, .. } => {
|
||||
*fin = true;
|
||||
}
|
||||
_ => {
|
||||
self.send_state = TransactionSendState::Closed;
|
||||
conn.stream_close_send(self.stream_id)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -1,280 +0,0 @@
|
|||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use crate::connection::Http3Transaction;
|
||||
use crate::hframe::{HFrame, HFrameReader};
|
||||
use crate::server_connection_events::Http3ServerConnEvents;
|
||||
use crate::Header;
|
||||
use crate::{Error, Res};
|
||||
use neqo_common::{matches, qdebug, qinfo, qtrace, Encoder};
|
||||
use neqo_qpack::decoder::QPackDecoder;
|
||||
use neqo_qpack::encoder::QPackEncoder;
|
||||
use neqo_transport::Connection;
|
||||
use std::mem;
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
enum TransactionRecvState {
|
||||
WaitingForHeaders,
|
||||
DecodingHeaders { header_block: Vec<u8>, fin: bool },
|
||||
WaitingForData,
|
||||
ReadingData { remaining_data_len: usize },
|
||||
Closed,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Debug)]
|
||||
enum TransactionSendState {
|
||||
Initial,
|
||||
SendingResponse { buf: Vec<u8> },
|
||||
Closed,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct TransactionServer {
|
||||
recv_state: TransactionRecvState,
|
||||
send_state: TransactionSendState,
|
||||
stream_id: u64,
|
||||
frame_reader: HFrameReader,
|
||||
conn_events: Http3ServerConnEvents,
|
||||
}
|
||||
|
||||
impl TransactionServer {
|
||||
pub fn new(stream_id: u64, conn_events: Http3ServerConnEvents) -> Self {
|
||||
qinfo!("Create a request stream_id={}", stream_id);
|
||||
Self {
|
||||
recv_state: TransactionRecvState::WaitingForHeaders,
|
||||
send_state: TransactionSendState::Initial,
|
||||
stream_id,
|
||||
frame_reader: HFrameReader::new(),
|
||||
conn_events,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_response(&mut self, headers: &[Header], data: Vec<u8>, encoder: &mut QPackEncoder) {
|
||||
qdebug!([self], "Encoding headers");
|
||||
let header_block = encoder.encode_header_block(&headers, self.stream_id);
|
||||
let hframe = HFrame::Headers {
|
||||
header_block: header_block.to_vec(),
|
||||
};
|
||||
let mut d = Encoder::default();
|
||||
hframe.encode(&mut d);
|
||||
if !data.is_empty() {
|
||||
qdebug!([self], "Encoding data");
|
||||
let d_frame = HFrame::Data {
|
||||
len: data.len() as u64,
|
||||
};
|
||||
d_frame.encode(&mut d);
|
||||
d.encode(&data);
|
||||
}
|
||||
|
||||
self.send_state = TransactionSendState::SendingResponse { buf: d.into() };
|
||||
}
|
||||
|
||||
fn recv_frame(&mut self, conn: &mut Connection) -> Res<(Option<HFrame>, bool)> {
|
||||
qtrace!([self], "receiving a frame");
|
||||
let fin = self.frame_reader.receive(conn, self.stream_id)?;
|
||||
if !self.frame_reader.done() {
|
||||
Ok((None, fin))
|
||||
} else {
|
||||
qinfo!([self], "A new frame has been received.");
|
||||
Ok((Some(self.frame_reader.get_frame()?), fin))
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_data_frame(&mut self, len: u64, fin: bool) -> Res<()> {
|
||||
qinfo!([self], "A new data frame len={} fin={}", len, fin);
|
||||
if len > 0 {
|
||||
if fin {
|
||||
return Err(Error::HttpFrameError);
|
||||
}
|
||||
self.recv_state = TransactionRecvState::ReadingData {
|
||||
remaining_data_len: len as usize,
|
||||
};
|
||||
} else if fin {
|
||||
self.conn_events.data(self.stream_id, Vec::new(), true);
|
||||
self.recv_state = TransactionRecvState::Closed;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Display for TransactionServer {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
write!(f, "TransactionServer {}", self.stream_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl Http3Transaction for TransactionServer {
|
||||
fn send(&mut self, conn: &mut Connection, _encoder: &mut QPackEncoder) -> Res<()> {
|
||||
qtrace!([self], "Sending response.");
|
||||
let label = if ::log::log_enabled!(::log::Level::Debug) {
|
||||
format!("{}", self)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
if let TransactionSendState::SendingResponse { ref mut buf } = self.send_state {
|
||||
let sent = conn.stream_send(self.stream_id, &buf[..])?;
|
||||
qinfo!([label], "{} bytes sent", sent);
|
||||
if sent == buf.len() {
|
||||
conn.stream_close_send(self.stream_id)?;
|
||||
self.send_state = TransactionSendState::Closed;
|
||||
qinfo!([label], "done sending request");
|
||||
} else {
|
||||
let mut b = buf.split_off(sent);
|
||||
mem::swap(buf, &mut b);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn receive(&mut self, conn: &mut Connection, decoder: &mut QPackDecoder) -> Res<()> {
|
||||
let label = if ::log::log_enabled!(::log::Level::Debug) {
|
||||
format!("{}", self)
|
||||
} else {
|
||||
String::new()
|
||||
};
|
||||
|
||||
loop {
|
||||
qtrace!(
|
||||
[label],
|
||||
"[recv_state={:?}] receiving data.",
|
||||
self.recv_state
|
||||
);
|
||||
match self.recv_state {
|
||||
TransactionRecvState::WaitingForHeaders => {
|
||||
match self.recv_frame(conn)? {
|
||||
(None, true) => {
|
||||
// Stream has been closed without any data, just ignore it.
|
||||
self.recv_state = TransactionRecvState::Closed;
|
||||
return Ok(());
|
||||
}
|
||||
(None, false) => {
|
||||
// We do not have a complete frame.
|
||||
return Ok(());
|
||||
}
|
||||
(Some(HFrame::Headers { header_block }), fin) => {
|
||||
if !header_block.is_empty() {
|
||||
// Next step decoding headers.
|
||||
self.recv_state =
|
||||
TransactionRecvState::DecodingHeaders { header_block, fin };
|
||||
} else {
|
||||
self.conn_events.headers(self.stream_id, Vec::new(), fin);
|
||||
if fin {
|
||||
self.recv_state = TransactionRecvState::Closed;
|
||||
return Ok(());
|
||||
} else {
|
||||
self.recv_state = TransactionRecvState::WaitingForData;
|
||||
}
|
||||
}
|
||||
}
|
||||
// server can only receive a Header frame at this point.
|
||||
_ => {
|
||||
return Err(Error::HttpFrameUnexpected);
|
||||
}
|
||||
}
|
||||
}
|
||||
TransactionRecvState::DecodingHeaders {
|
||||
ref mut header_block,
|
||||
fin,
|
||||
} => match decoder.decode_header_block(header_block, self.stream_id)? {
|
||||
Some(headers) => {
|
||||
self.conn_events.headers(self.stream_id, headers, fin);
|
||||
if fin {
|
||||
self.recv_state = TransactionRecvState::Closed;
|
||||
return Ok(());
|
||||
} else {
|
||||
self.recv_state = TransactionRecvState::WaitingForData;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
qinfo!([self], "decoding header is blocked.");
|
||||
return Ok(());
|
||||
}
|
||||
},
|
||||
TransactionRecvState::WaitingForData => {
|
||||
match self.recv_frame(conn)? {
|
||||
(None, true) => {
|
||||
// Inform the app tthat tthe stream is done.
|
||||
self.conn_events.data(self.stream_id, Vec::new(), true);
|
||||
self.recv_state = TransactionRecvState::Closed;
|
||||
return Ok(());
|
||||
}
|
||||
(None, false) => {
|
||||
// Still reading a frame.
|
||||
return Ok(());
|
||||
}
|
||||
(Some(HFrame::Data { len }), fin) => {
|
||||
self.handle_data_frame(len, fin)?;
|
||||
if fin {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(Error::HttpFrameUnexpected);
|
||||
}
|
||||
};
|
||||
}
|
||||
TransactionRecvState::ReadingData {
|
||||
ref mut remaining_data_len,
|
||||
} => {
|
||||
// TODO add available(stream_id) to neqo_transport.
|
||||
assert!(*remaining_data_len > 0);
|
||||
while *remaining_data_len != 0 {
|
||||
let to_read = if *remaining_data_len > 1024 {
|
||||
1024
|
||||
} else {
|
||||
*remaining_data_len
|
||||
};
|
||||
|
||||
let mut data = vec![0x0; to_read];
|
||||
let (amount, fin) = conn.stream_recv(self.stream_id, &mut data[..])?;
|
||||
assert!(amount <= to_read);
|
||||
if amount > 0 {
|
||||
data.truncate(amount);
|
||||
self.conn_events.data(self.stream_id, data, fin);
|
||||
*remaining_data_len -= amount;
|
||||
}
|
||||
if fin {
|
||||
if *remaining_data_len > 0 {
|
||||
return Err(Error::HttpFrameError);
|
||||
}
|
||||
self.recv_state = TransactionRecvState::Closed;
|
||||
return Ok(());
|
||||
} else if *remaining_data_len == 0 {
|
||||
self.recv_state = TransactionRecvState::WaitingForData;
|
||||
break;
|
||||
}
|
||||
if amount == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
TransactionRecvState::Closed => {
|
||||
panic!("Stream readable after being closed!");
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
fn has_data_to_send(&self) -> bool {
|
||||
matches!(self.send_state, TransactionSendState::SendingResponse { .. })
|
||||
}
|
||||
|
||||
fn reset_receiving_side(&mut self) {
|
||||
self.recv_state = TransactionRecvState::Closed;
|
||||
}
|
||||
|
||||
fn stop_sending(&mut self) {}
|
||||
|
||||
fn done(&self) -> bool {
|
||||
self.send_state == TransactionSendState::Closed
|
||||
&& self.recv_state == TransactionRecvState::Closed
|
||||
}
|
||||
|
||||
fn close_send(&mut self, _conn: &mut Connection) -> Res<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
|
@ -24,12 +24,12 @@ fn process_server_events(server: &mut Http3Server) {
|
|||
{
|
||||
assert_eq!(
|
||||
headers,
|
||||
vec![
|
||||
Some(vec![
|
||||
(String::from(":method"), String::from("GET")),
|
||||
(String::from(":scheme"), String::from("https")),
|
||||
(String::from(":authority"), String::from("something.com")),
|
||||
(String::from(":path"), String::from("/"))
|
||||
]
|
||||
])
|
||||
);
|
||||
assert_eq!(fin, true);
|
||||
request
|
||||
|
@ -38,7 +38,7 @@ fn process_server_events(server: &mut Http3Server) {
|
|||
(String::from(":status"), String::from("200")),
|
||||
(String::from("content-length"), String::from("3")),
|
||||
],
|
||||
RESPONSE_DATA.to_vec(),
|
||||
RESPONSE_DATA,
|
||||
)
|
||||
.unwrap();
|
||||
request_found = true;
|
||||
|
@ -52,18 +52,15 @@ fn process_client_events(conn: &mut Http3Client) {
|
|||
let mut response_data_found = false;
|
||||
while let Some(event) = conn.next_event() {
|
||||
match event {
|
||||
Http3ClientEvent::HeaderReady { stream_id } => {
|
||||
let h = conn.read_response_headers(stream_id);
|
||||
Http3ClientEvent::HeaderReady { headers, fin, .. } => {
|
||||
assert_eq!(
|
||||
h,
|
||||
Ok((
|
||||
vec![
|
||||
(String::from(":status"), String::from("200")),
|
||||
(String::from("content-length"), String::from("3")),
|
||||
],
|
||||
false
|
||||
))
|
||||
headers,
|
||||
Some(vec![
|
||||
(String::from(":status"), String::from("200")),
|
||||
(String::from("content-length"), String::from("3")),
|
||||
])
|
||||
);
|
||||
assert_eq!(fin, false);
|
||||
response_header_found = true;
|
||||
}
|
||||
Http3ClientEvent::DataReadable { stream_id } => {
|
||||
|
|
|
@ -1 +1 @@
|
|||
{"files":{"Cargo.toml":"562a8d2e09f2e2484280044c9741c671d079941b1e5fa187555b5e165a8813dc","src/decoder.rs":"ab0d1170cbc44ac86a64613e3c63bf940b3d62302124d25d672b33926d800de2","src/decoder_instructions.rs":"52ae03b662200efc40862ddcd924cb0082638f6ccbbf8f7a676340c033392733","src/encoder.rs":"1d1084853eca860b55080c01ec4aa8157ebd95e5f74597a9b83de859d0c8b403","src/encoder_instructions.rs":"66c9276a52179bbe7caaee3a67d1e736098fa063c172ac3e7ce5ec36f8811bdb","src/header_block.rs":"2333ca6f134ca72718c1b685b381aefb4e6e5525ca3bb4402839f0b2e685b23b","src/huffman.rs":"0e49f818d1d186bef2208e47b70650d64a4b5389a3422af004a003e61286a0e0","src/huffman_decode_helper.rs":"1fb1423ee1cc0ceb27d05a515a994835359ae9af58c6885b37806e5e0fbf98c5","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"1fd424fd8219d0244e56b81af168a3023801319f6d1c81c2eb0cac7e1e9e382e","src/prefix.rs":"8807126dbf4e7aff9afe164072c523b8fbfa0be5d4c57bb8291b510c8f0e8ca5","src/qpack_send_buf.rs":"8c1b97d17220038c8e040edb5d5bfc39eb72ab50dfa325016e799740acc47558","src/reader.rs":"ec7203354584d299ac843cc9565442a726f788dbc8a48082ac8e1cad21698547","src/static_table.rs":"fda9d5c6f38f94b0bf92d3afdf8432dce6e27e189736596e16727090c77b78ec","src/table.rs":"7de73afcd901d1191f20c0a18ccd5f3917f739ac78ded173f684818aa4b55486"},"package":null}
|
||||
{"files":{"Cargo.toml":"23a69b4d2c9d585cb41be938442a7003beb73dd75c4ede25151d7ecf9365fde9","src/decoder.rs":"468b51d03aee3363d7998bb9e8a70d998a8279eb0089702a9fe4423f8c577f1e","src/decoder_instructions.rs":"a8e04dff5fc4c658322a10daadab947dc2e41932c00c3f8d387671a86d0516af","src/encoder.rs":"03cc95df1c57972cd8a19ffb77920f8daac52b4e220eac6182d05671d04e9d58","src/encoder_instructions.rs":"4fab8d6a86482139275f81fd30f9f8c462d6312faf0cdb9143fed1a1a514623f","src/header_block.rs":"3be489222ba59d1c9022d9bf813330a5d68f994fd3fe92fe6934f796cbab42f2","src/huffman.rs":"68fa0bada0c35d20f793980596accdcc548970214841f71789290fc334e51fc1","src/huffman_decode_helper.rs":"089ab15eda49eae035c8ac95f780ec495713ce5918dd1e5a5e4a6206e3c65e66","src/huffman_table.rs":"06fea766a6276ac56c7ee0326faed800a742c15fda1f33bf2513e6cc6a5e6d27","src/lib.rs":"4413c57c8707c5fe47cdc4f40b6e2e9baa4f18fae9cecd895c11e509973ccc2a","src/prefix.rs":"8807126dbf4e7aff9afe164072c523b8fbfa0be5d4c57bb8291b510c8f0e8ca5","src/qlog.rs":"ee4eb799cadcd31e2569690ab1f7cbfb22179033ee7aec7ed217749eb0210fc9","src/qpack_send_buf.rs":"8c1b97d17220038c8e040edb5d5bfc39eb72ab50dfa325016e799740acc47558","src/reader.rs":"4bcea0de1d7dc09ec0cdff364d8f62da54bbbe1f6db55a495f943f31369b4074","src/static_table.rs":"fda9d5c6f38f94b0bf92d3afdf8432dce6e27e189736596e16727090c77b78ec","src/table.rs":"fc927a57c02a7556b0ea7152c713cfd84825b14343f809c7ed0cc2a2a011482f"},"package":null}
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "neqo-qpack"
|
||||
version = "0.2.4"
|
||||
version = "0.4.0"
|
||||
authors = ["Dragana Damjanovic <dragana.damjano@gmail.com>"]
|
||||
edition = "2018"
|
||||
license = "MIT/Apache-2.0"
|
||||
|
@ -12,6 +12,8 @@ neqo-crypto = { path = "./../neqo-crypto" }
|
|||
log = {version = "0.4.0", default-features = false}
|
||||
static_assertions = "1.1.0"
|
||||
num-traits = "0.2"
|
||||
qlog = "0.2.0"
|
||||
lazy_static = "1.3.0"
|
||||
|
||||
[dev-dependencies]
|
||||
test-fixture = { path = "../test-fixture" }
|
||||
|
|
|
@ -5,13 +5,12 @@
|
|||
// except according to those terms.
|
||||
|
||||
use crate::decoder_instructions::DecoderInstruction;
|
||||
use crate::encoder_instructions::{EncoderInstruction, EncoderInstructionReader};
|
||||
use crate::encoder_instructions::{DecodedEncoderInstruction, EncoderInstructionReader};
|
||||
use crate::header_block::{HeaderDecoder, HeaderDecoderResult};
|
||||
use crate::qpack_send_buf::QPData;
|
||||
use crate::reader::ReceiverConnWrapper;
|
||||
use crate::table::HeaderTable;
|
||||
use crate::Header;
|
||||
use crate::{Error, Res};
|
||||
use crate::{Error, Header, QpackSettings, Res};
|
||||
use neqo_common::qdebug;
|
||||
use neqo_transport::Connection;
|
||||
use std::convert::TryInto;
|
||||
|
@ -22,7 +21,7 @@ pub const QPACK_UNI_STREAM_TYPE_DECODER: u64 = 0x3;
|
|||
pub struct QPackDecoder {
|
||||
instruction_reader: EncoderInstructionReader,
|
||||
table: HeaderTable,
|
||||
total_num_of_inserts: u64,
|
||||
acked_inserts: u64,
|
||||
max_entries: u64,
|
||||
send_buf: QPData,
|
||||
local_stream_id: Option<u64>,
|
||||
|
@ -34,18 +33,18 @@ pub struct QPackDecoder {
|
|||
|
||||
impl QPackDecoder {
|
||||
#[must_use]
|
||||
pub fn new(max_table_size: u64, max_blocked_streams: u16) -> Self {
|
||||
pub fn new(qpack_settings: QpackSettings) -> Self {
|
||||
qdebug!("Decoder: creating a new qpack decoder.");
|
||||
Self {
|
||||
instruction_reader: EncoderInstructionReader::new(),
|
||||
table: HeaderTable::new(false),
|
||||
total_num_of_inserts: 0,
|
||||
max_entries: max_table_size >> 5,
|
||||
acked_inserts: 0,
|
||||
max_entries: qpack_settings.max_table_size_decoder >> 5,
|
||||
send_buf: QPData::default(),
|
||||
local_stream_id: None,
|
||||
remote_stream_id: None,
|
||||
max_table_size,
|
||||
max_blocked_streams: max_blocked_streams.try_into().unwrap(),
|
||||
max_table_size: qpack_settings.max_table_size_decoder,
|
||||
max_blocked_streams: qpack_settings.max_blocked_streams.try_into().unwrap(),
|
||||
blocked_streams: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
@ -70,47 +69,59 @@ impl QPackDecoder {
|
|||
/// May return: `ClosedCriticalStream` if stream has been closed or `EncoderStream`
|
||||
/// in case of any other transport error.
|
||||
pub fn receive(&mut self, conn: &mut Connection, stream_id: u64) -> Res<Vec<u64>> {
|
||||
self.read_instructions(conn, stream_id)?;
|
||||
let base = self.table.base();
|
||||
let base_old = self.table.base();
|
||||
self.read_instructions(conn, stream_id)
|
||||
.map_err(|e| map_error(&e))?;
|
||||
let base_new = self.table.base();
|
||||
if base_old == base_new {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let r = self
|
||||
.blocked_streams
|
||||
.iter()
|
||||
.filter_map(|(id, req)| if *req <= base { Some(*id) } else { None })
|
||||
.filter_map(|(id, req)| if *req <= base_new { Some(*id) } else { None })
|
||||
.collect::<Vec<_>>();
|
||||
self.blocked_streams.retain(|(_, req)| *req > base);
|
||||
self.blocked_streams.retain(|(_, req)| *req > base_new);
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
fn read_instructions(&mut self, conn: &mut Connection, stream_id: u64) -> Res<()> {
|
||||
let mut recv = ReceiverConnWrapper::new(conn, stream_id);
|
||||
loop {
|
||||
let mut recv = ReceiverConnWrapper::new(conn, stream_id);
|
||||
match self.instruction_reader.read_instructions(&mut recv)? {
|
||||
Some(instruction) => self.execute_instruction(instruction)?,
|
||||
None => break Ok(()),
|
||||
match self.instruction_reader.read_instructions(&mut recv) {
|
||||
Ok(instruction) => self.execute_instruction(instruction)?,
|
||||
Err(Error::NeedMoreData) => break Ok(()),
|
||||
Err(e) => break Err(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn execute_instruction(&mut self, instruction: EncoderInstruction) -> Res<()> {
|
||||
fn execute_instruction(&mut self, instruction: DecodedEncoderInstruction) -> Res<()> {
|
||||
match instruction {
|
||||
EncoderInstruction::Capacity { value } => self.set_capacity(value)?,
|
||||
EncoderInstruction::InsertWithNameRefStatic { index, value } => {
|
||||
self.table.insert_with_name_ref(true, index, &value)?;
|
||||
self.total_num_of_inserts += 1;
|
||||
DecodedEncoderInstruction::Capacity { value } => self.set_capacity(value)?,
|
||||
DecodedEncoderInstruction::InsertWithNameRefStatic { index, value } => {
|
||||
self.table
|
||||
.insert_with_name_ref(true, index, &value)
|
||||
.map_err(|_| Error::EncoderStream)?;
|
||||
}
|
||||
EncoderInstruction::InsertWithNameRefDynamic { index, value } => {
|
||||
self.table.insert_with_name_ref(false, index, &value)?;
|
||||
self.total_num_of_inserts += 1;
|
||||
DecodedEncoderInstruction::InsertWithNameRefDynamic { index, value } => {
|
||||
self.table
|
||||
.insert_with_name_ref(false, index, &value)
|
||||
.map_err(|_| Error::EncoderStream)?;
|
||||
}
|
||||
EncoderInstruction::InsertWithNameLiteral { name, value } => {
|
||||
self.table.insert(&name, &value).map(|_| ())?;
|
||||
self.total_num_of_inserts += 1;
|
||||
DecodedEncoderInstruction::InsertWithNameLiteral { name, value } => {
|
||||
self.table
|
||||
.insert(&name, &value)
|
||||
.map(|_| ())
|
||||
.map_err(|_| Error::EncoderStream)?;
|
||||
}
|
||||
EncoderInstruction::Duplicate { index } => {
|
||||
self.table.duplicate(index)?;
|
||||
self.total_num_of_inserts += 1;
|
||||
DecodedEncoderInstruction::Duplicate { index } => {
|
||||
self.table
|
||||
.duplicate(index)
|
||||
.map_err(|_| Error::EncoderStream)?;
|
||||
}
|
||||
EncoderInstruction::NoInstruction => {
|
||||
DecodedEncoderInstruction::NoInstruction => {
|
||||
unreachable!("This can be call only with an instruction.")
|
||||
}
|
||||
}
|
||||
|
@ -122,18 +133,13 @@ impl QPackDecoder {
|
|||
if cap > self.max_table_size {
|
||||
return Err(Error::EncoderStream);
|
||||
}
|
||||
self.table
|
||||
.set_capacity(cap)
|
||||
.map_err(|_| Error::EncoderStream)
|
||||
self.table.set_capacity(cap)
|
||||
}
|
||||
|
||||
fn header_ack(&mut self, stream_id: u64, required_inserts: u64) {
|
||||
DecoderInstruction::HeaderAck { stream_id }.marshal(&mut self.send_buf);
|
||||
if required_inserts > self.table.get_acked_inserts_cnt() {
|
||||
let ack_increment_delta = required_inserts - self.table.get_acked_inserts_cnt();
|
||||
self.table
|
||||
.increment_acked(ack_increment_delta)
|
||||
.expect("This should never happen");
|
||||
if required_inserts > self.acked_inserts {
|
||||
self.acked_inserts = required_inserts;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -142,30 +148,22 @@ impl QPackDecoder {
|
|||
}
|
||||
|
||||
/// # Errors
|
||||
/// May return DecoderStream in case of any transport error.
|
||||
/// May return an error in case of any transport error. TODO: define transport errors.
|
||||
pub fn send(&mut self, conn: &mut Connection) -> Res<()> {
|
||||
// Encode increment instruction if needed.
|
||||
let increment = self.total_num_of_inserts - self.table.get_acked_inserts_cnt();
|
||||
let increment = self.table.base() - self.acked_inserts;
|
||||
if increment > 0 {
|
||||
DecoderInstruction::InsertCountIncrement { increment }.marshal(&mut self.send_buf);
|
||||
self.table
|
||||
.increment_acked(increment)
|
||||
.expect("This should never happen");
|
||||
self.acked_inserts = self.table.base();
|
||||
}
|
||||
if self.send_buf.len() == 0 {
|
||||
Ok(())
|
||||
} else if let Some(stream_id) = self.local_stream_id {
|
||||
match conn.stream_send(stream_id, &self.send_buf[..]) {
|
||||
Err(_) => Err(Error::DecoderStream),
|
||||
Ok(r) => {
|
||||
qdebug!([self], "{} bytes sent.", r);
|
||||
self.send_buf.read(r as usize);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Ok(())
|
||||
if self.send_buf.len() != 0 && self.local_stream_id.is_some() {
|
||||
let r = conn
|
||||
.stream_send(self.local_stream_id.unwrap(), &self.send_buf[..])
|
||||
.map_err(|_| Error::DecoderStream)?;
|
||||
qdebug!([self], "{} bytes sent.", r);
|
||||
self.send_buf.read(r as usize);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// This function returns None if the stream is blocked waiting for table insertions.
|
||||
|
@ -176,12 +174,8 @@ impl QPackDecoder {
|
|||
qdebug!([self], "decode header block.");
|
||||
let mut decoder = HeaderDecoder::new(buf);
|
||||
|
||||
match decoder.decode_header_block(
|
||||
&self.table,
|
||||
self.max_entries,
|
||||
self.total_num_of_inserts,
|
||||
)? {
|
||||
HeaderDecoderResult::Blocked(req_insert_cnt) => {
|
||||
match decoder.decode_header_block(&self.table, self.max_entries, self.table.base()) {
|
||||
Ok(HeaderDecoderResult::Blocked(req_insert_cnt)) => {
|
||||
self.blocked_streams.push((stream_id, req_insert_cnt));
|
||||
if self.blocked_streams.len() > self.max_blocked_streams {
|
||||
Err(Error::DecompressionFailed)
|
||||
|
@ -189,12 +183,13 @@ impl QPackDecoder {
|
|||
Ok(None)
|
||||
}
|
||||
}
|
||||
HeaderDecoderResult::Headers(h) => {
|
||||
Ok(HeaderDecoderResult::Headers(h)) => {
|
||||
if decoder.get_req_insert_cnt() != 0 {
|
||||
self.header_ack(stream_id, decoder.get_req_insert_cnt());
|
||||
}
|
||||
Ok(Some(h))
|
||||
}
|
||||
Err(_) => Err(Error::DecompressionFailed),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -224,6 +219,16 @@ impl QPackDecoder {
|
|||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn local_stream_id(&self) -> Option<u64> {
|
||||
self.local_stream_id
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn remote_stream_id(&self) -> Option<u64> {
|
||||
self.remote_stream_id
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Display for QPackDecoder {
|
||||
|
@ -232,9 +237,18 @@ impl ::std::fmt::Display for QPackDecoder {
|
|||
}
|
||||
}
|
||||
|
||||
fn map_error(err: &Error) -> Error {
|
||||
if *err == Error::ClosedCriticalStream {
|
||||
Error::ClosedCriticalStream
|
||||
} else {
|
||||
Error::EncoderStream
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{Connection, Error, Header, QPackDecoder, Res};
|
||||
use crate::QpackSettings;
|
||||
use neqo_transport::StreamType;
|
||||
use std::convert::TryInto;
|
||||
use test_fixture::now;
|
||||
|
@ -255,7 +269,11 @@ mod tests {
|
|||
let send_stream_id = conn.stream_create(StreamType::UniDi).unwrap();
|
||||
|
||||
// create a decoder
|
||||
let mut decoder = QPackDecoder::new(300, 100);
|
||||
let mut decoder = QPackDecoder::new(QpackSettings {
|
||||
max_table_size_encoder: 0,
|
||||
max_table_size_decoder: 300,
|
||||
max_blocked_streams: 100,
|
||||
});
|
||||
decoder.add_send_stream(send_stream_id);
|
||||
|
||||
TestDecoder {
|
||||
|
@ -338,7 +356,7 @@ mod tests {
|
|||
test_instruction(
|
||||
0,
|
||||
&[0xc4, 0x04, 0x31, 0x32, 0x33, 0x34],
|
||||
&Err(Error::DecoderStream),
|
||||
&Err(Error::EncoderStream),
|
||||
&[0x03],
|
||||
0,
|
||||
);
|
||||
|
@ -728,4 +746,31 @@ mod tests {
|
|||
|
||||
decode_headers(&mut decoder, HEADER_BLOCK_2, &headers, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_base_larger_than_entry_count() {
|
||||
// Test for issue https://github.com/mozilla/neqo/issues/533
|
||||
// Send instruction that inserts 2 fields into the dynamic table and send a header that
|
||||
// uses base larger than 2.
|
||||
const ENCODER_INST: &[u8] = &[
|
||||
0x4a, 0x6d, 0x79, 0x2d, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x61, 0x09, 0x6d, 0x79,
|
||||
0x2d, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x61, 0x4a, 0x6d, 0x79, 0x2d, 0x68, 0x65, 0x61,
|
||||
0x64, 0x65, 0x72, 0x62, 0x09, 0x6d, 0x79, 0x2d, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x62,
|
||||
];
|
||||
|
||||
const HEADER_BLOCK: &[u8] = &[0x03, 0x03, 0x83, 0x84];
|
||||
|
||||
let headers = vec![
|
||||
(String::from("my-headerb"), String::from("my-valueb")),
|
||||
(String::from("my-headera"), String::from("my-valuea")),
|
||||
];
|
||||
|
||||
let mut decoder = connect();
|
||||
|
||||
assert!(decoder.decoder.set_capacity(200).is_ok());
|
||||
|
||||
recv_instruction(&mut decoder, ENCODER_INST, &Ok(()));
|
||||
|
||||
decode_headers(&mut decoder, HEADER_BLOCK, &headers, 0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ use crate::prefix::{
|
|||
};
|
||||
use crate::qpack_send_buf::QPData;
|
||||
use crate::reader::{IntReader, ReadByte};
|
||||
use crate::{Error, Res};
|
||||
use crate::Res;
|
||||
use neqo_common::{qdebug, qtrace};
|
||||
use std::mem;
|
||||
|
||||
|
@ -76,52 +76,45 @@ impl DecoderInstructionReader {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn read_instructions<R: ReadByte>(
|
||||
&mut self,
|
||||
recv: &mut R,
|
||||
) -> Res<Option<DecoderInstruction>> {
|
||||
/// ### Errors
|
||||
/// 1) `NeedMoreData` if the reader needs more data
|
||||
/// 2) `ClosedCriticalStream`
|
||||
/// 3) other errors will be translated to `DecoderStream` by the caller of this function.
|
||||
pub fn read_instructions<R: ReadByte>(&mut self, recv: &mut R) -> Res<DecoderInstruction> {
|
||||
qdebug!([self], "read a new instraction");
|
||||
loop {
|
||||
match &mut self.state {
|
||||
DecoderInstructionReaderState::ReadInstruction => match recv.read_byte() {
|
||||
Ok(b) => {
|
||||
self.instruction = DecoderInstruction::get_instruction(b);
|
||||
self.state = DecoderInstructionReaderState::ReadInt {
|
||||
reader: IntReader::make(
|
||||
b,
|
||||
&[
|
||||
DECODER_HEADER_ACK,
|
||||
DECODER_STREAM_CANCELLATION,
|
||||
DECODER_INSERT_COUNT_INCREMENT,
|
||||
],
|
||||
),
|
||||
};
|
||||
}
|
||||
Err(Error::NoMoreData) => break Ok(None),
|
||||
Err(Error::ClosedCriticalStream) => break Err(Error::ClosedCriticalStream),
|
||||
_ => break Err(Error::DecoderStream),
|
||||
},
|
||||
DecoderInstructionReaderState::ReadInt { reader } => match reader.read(recv) {
|
||||
Ok(Some(val)) => {
|
||||
qtrace!([self], "varint read {}", val);
|
||||
match &mut self.instruction {
|
||||
DecoderInstruction::InsertCountIncrement { increment: v }
|
||||
| DecoderInstruction::HeaderAck { stream_id: v }
|
||||
| DecoderInstruction::StreamCancellation { stream_id: v } => {
|
||||
*v = val;
|
||||
self.state = DecoderInstructionReaderState::ReadInstruction;
|
||||
break Ok(Some(mem::replace(
|
||||
&mut self.instruction,
|
||||
DecoderInstruction::NoInstruction,
|
||||
)));
|
||||
}
|
||||
_ => unreachable!("This instruction cannot be in this state."),
|
||||
DecoderInstructionReaderState::ReadInstruction => {
|
||||
let b = recv.read_byte()?;
|
||||
self.instruction = DecoderInstruction::get_instruction(b);
|
||||
self.state = DecoderInstructionReaderState::ReadInt {
|
||||
reader: IntReader::make(
|
||||
b,
|
||||
&[
|
||||
DECODER_HEADER_ACK,
|
||||
DECODER_STREAM_CANCELLATION,
|
||||
DECODER_INSERT_COUNT_INCREMENT,
|
||||
],
|
||||
),
|
||||
};
|
||||
}
|
||||
DecoderInstructionReaderState::ReadInt { reader } => {
|
||||
let val = reader.read(recv)?;
|
||||
qtrace!([self], "varint read {}", val);
|
||||
match &mut self.instruction {
|
||||
DecoderInstruction::InsertCountIncrement { increment: v }
|
||||
| DecoderInstruction::HeaderAck { stream_id: v }
|
||||
| DecoderInstruction::StreamCancellation { stream_id: v } => {
|
||||
*v = val;
|
||||
self.state = DecoderInstructionReaderState::ReadInstruction;
|
||||
break Ok(mem::replace(
|
||||
&mut self.instruction,
|
||||
DecoderInstruction::NoInstruction,
|
||||
));
|
||||
}
|
||||
_ => unreachable!("This instruction cannot be in this state."),
|
||||
}
|
||||
Ok(None) => break Ok(None),
|
||||
Err(Error::ClosedCriticalStream) => break Err(Error::ClosedCriticalStream),
|
||||
Err(_) => break Err(Error::DecoderStream),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -130,8 +123,9 @@ impl DecoderInstructionReader {
|
|||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use super::{DecoderInstruction, DecoderInstructionReader, Error, QPData};
|
||||
use super::{DecoderInstruction, DecoderInstructionReader, QPData};
|
||||
use crate::reader::test_receiver::TestReceiver;
|
||||
use crate::Error;
|
||||
|
||||
fn test_encoding_decoding(instruction: DecoderInstruction) {
|
||||
let mut buf = QPData::default();
|
||||
|
@ -140,10 +134,7 @@ mod test {
|
|||
test_receiver.write(&buf);
|
||||
let mut decoder = DecoderInstructionReader::new();
|
||||
assert_eq!(
|
||||
decoder
|
||||
.read_instructions(&mut test_receiver)
|
||||
.unwrap()
|
||||
.unwrap(),
|
||||
decoder.read_instructions(&mut test_receiver).unwrap(),
|
||||
instruction
|
||||
);
|
||||
}
|
||||
|
@ -167,17 +158,14 @@ mod test {
|
|||
let mut decoder = DecoderInstructionReader::new();
|
||||
for i in 0..buf.len() - 1 {
|
||||
test_receiver.write(&buf[i..=i]);
|
||||
assert!(decoder
|
||||
.read_instructions(&mut test_receiver)
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert_eq!(
|
||||
decoder.read_instructions(&mut test_receiver),
|
||||
Err(Error::NeedMoreData)
|
||||
);
|
||||
}
|
||||
test_receiver.write(&buf[buf.len() - 1..buf.len()]);
|
||||
assert_eq!(
|
||||
decoder
|
||||
.read_instructions(&mut test_receiver)
|
||||
.unwrap()
|
||||
.unwrap(),
|
||||
decoder.read_instructions(&mut test_receiver).unwrap(),
|
||||
instruction
|
||||
);
|
||||
}
|
||||
|
@ -203,7 +191,7 @@ mod test {
|
|||
let mut decoder = DecoderInstructionReader::new();
|
||||
assert_eq!(
|
||||
decoder.read_instructions(&mut test_receiver),
|
||||
Err(Error::DecoderStream)
|
||||
Err(Error::IntegerOverflow)
|
||||
);
|
||||
|
||||
let mut test_receiver: TestReceiver = TestReceiver::default();
|
||||
|
@ -214,7 +202,7 @@ mod test {
|
|||
let mut decoder = DecoderInstructionReader::new();
|
||||
assert_eq!(
|
||||
decoder.read_instructions(&mut test_receiver),
|
||||
Err(Error::DecoderStream)
|
||||
Err(Error::IntegerOverflow)
|
||||
);
|
||||
|
||||
let mut test_receiver: TestReceiver = TestReceiver::default();
|
||||
|
@ -225,7 +213,7 @@ mod test {
|
|||
let mut decoder = DecoderInstructionReader::new();
|
||||
assert_eq!(
|
||||
decoder.read_instructions(&mut test_receiver),
|
||||
Err(Error::DecoderStream)
|
||||
Err(Error::IntegerOverflow)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -10,21 +10,25 @@ use crate::prefix::{
|
|||
};
|
||||
use crate::qpack_send_buf::QPData;
|
||||
use crate::reader::{IntReader, LiteralReader, ReadByte, Reader};
|
||||
use crate::{Error, Res};
|
||||
use crate::Res;
|
||||
use neqo_common::{matches, qdebug, qtrace};
|
||||
use std::mem;
|
||||
|
||||
// The encoder only uses InsertWithNameLiteral, therefore clippy is complaining about dead_code.
|
||||
// We may decide to use othe instruction in the future.
|
||||
// All instructions are used for testing, therefore they are defined.
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum EncoderInstruction {
|
||||
pub enum EncoderInstruction<'a> {
|
||||
Capacity { value: u64 },
|
||||
InsertWithNameRefStatic { index: u64, value: Vec<u8> },
|
||||
InsertWithNameRefDynamic { index: u64, value: Vec<u8> },
|
||||
InsertWithNameLiteral { name: Vec<u8>, value: Vec<u8> },
|
||||
InsertWithNameRefStatic { index: u64, value: &'a [u8] },
|
||||
InsertWithNameRefDynamic { index: u64, value: &'a [u8] },
|
||||
InsertWithNameLiteral { name: &'a [u8], value: &'a [u8] },
|
||||
Duplicate { index: u64 },
|
||||
NoInstruction,
|
||||
}
|
||||
|
||||
impl EncoderInstruction {
|
||||
impl<'a> EncoderInstruction<'a> {
|
||||
pub(crate) fn marshal(&self, enc: &mut QPData, use_huffman: bool) {
|
||||
match self {
|
||||
Self::Capacity { value } => {
|
||||
|
@ -59,10 +63,48 @@ enum EncoderInstructionReaderState {
|
|||
Done,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
pub enum DecodedEncoderInstruction {
|
||||
Capacity { value: u64 },
|
||||
InsertWithNameRefStatic { index: u64, value: Vec<u8> },
|
||||
InsertWithNameRefDynamic { index: u64, value: Vec<u8> },
|
||||
InsertWithNameLiteral { name: Vec<u8>, value: Vec<u8> },
|
||||
Duplicate { index: u64 },
|
||||
NoInstruction,
|
||||
}
|
||||
|
||||
impl<'a> From<&'a EncoderInstruction<'a>> for DecodedEncoderInstruction {
|
||||
fn from(inst: &'a EncoderInstruction) -> Self {
|
||||
match inst {
|
||||
EncoderInstruction::Capacity { value } => Self::Capacity { value: *value },
|
||||
EncoderInstruction::InsertWithNameRefStatic { index, value } => {
|
||||
Self::InsertWithNameRefStatic {
|
||||
index: *index,
|
||||
value: value.to_vec(),
|
||||
}
|
||||
}
|
||||
EncoderInstruction::InsertWithNameRefDynamic { index, value } => {
|
||||
Self::InsertWithNameRefDynamic {
|
||||
index: *index,
|
||||
value: value.to_vec(),
|
||||
}
|
||||
}
|
||||
EncoderInstruction::InsertWithNameLiteral { name, value } => {
|
||||
Self::InsertWithNameLiteral {
|
||||
name: name.to_vec(),
|
||||
value: value.to_vec(),
|
||||
}
|
||||
}
|
||||
EncoderInstruction::Duplicate { index } => Self::Duplicate { index: *index },
|
||||
EncoderInstruction::NoInstruction => Self::NoInstruction,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct EncoderInstructionReader {
|
||||
state: EncoderInstructionReaderState,
|
||||
instruction: EncoderInstruction,
|
||||
instruction: DecodedEncoderInstruction,
|
||||
}
|
||||
|
||||
impl ::std::fmt::Display for EncoderInstructionReader {
|
||||
|
@ -79,156 +121,141 @@ impl EncoderInstructionReader {
|
|||
pub fn new() -> Self {
|
||||
Self {
|
||||
state: EncoderInstructionReaderState::ReadInstruction,
|
||||
instruction: EncoderInstruction::NoInstruction,
|
||||
instruction: DecodedEncoderInstruction::NoInstruction,
|
||||
}
|
||||
}
|
||||
|
||||
fn decode_instruction_from_byte(&mut self, b: u8) {
|
||||
self.instruction = if ENCODER_INSERT_WITH_NAME_REF_STATIC.cmp_prefix(b) {
|
||||
EncoderInstruction::InsertWithNameRefStatic {
|
||||
DecodedEncoderInstruction::InsertWithNameRefStatic {
|
||||
index: 0,
|
||||
value: Vec::new(),
|
||||
}
|
||||
} else if ENCODER_INSERT_WITH_NAME_REF_DYNAMIC.cmp_prefix(b) {
|
||||
EncoderInstruction::InsertWithNameRefDynamic {
|
||||
DecodedEncoderInstruction::InsertWithNameRefDynamic {
|
||||
index: 0,
|
||||
value: Vec::new(),
|
||||
}
|
||||
} else if ENCODER_INSERT_WITH_NAME_LITERAL.cmp_prefix(b) {
|
||||
EncoderInstruction::InsertWithNameLiteral {
|
||||
DecodedEncoderInstruction::InsertWithNameLiteral {
|
||||
name: Vec::new(),
|
||||
value: Vec::new(),
|
||||
}
|
||||
} else if ENCODER_CAPACITY.cmp_prefix(b) {
|
||||
EncoderInstruction::Capacity { value: 0 }
|
||||
DecodedEncoderInstruction::Capacity { value: 0 }
|
||||
} else if ENCODER_DUPLICATE.cmp_prefix(b) {
|
||||
EncoderInstruction::Duplicate { index: 0 }
|
||||
DecodedEncoderInstruction::Duplicate { index: 0 }
|
||||
} else {
|
||||
unreachable!("The above patterns match everything.");
|
||||
};
|
||||
qdebug!([self], "instruction decoded");
|
||||
}
|
||||
|
||||
fn decode_instruction_type<T: ReadByte + Reader>(&mut self, recv: &mut T) -> Res<bool> {
|
||||
match recv.read_byte() {
|
||||
Ok(b) => {
|
||||
self.decode_instruction_from_byte(b);
|
||||
match self.instruction {
|
||||
EncoderInstruction::Capacity { .. } | EncoderInstruction::Duplicate { .. } => {
|
||||
self.state = EncoderInstructionReaderState::ReadFirstInt {
|
||||
reader: IntReader::new(b, ENCODER_CAPACITY.len()),
|
||||
}
|
||||
}
|
||||
EncoderInstruction::InsertWithNameRefStatic { .. }
|
||||
| EncoderInstruction::InsertWithNameRefDynamic { .. } => {
|
||||
self.state = EncoderInstructionReaderState::ReadFirstInt {
|
||||
reader: IntReader::new(b, ENCODER_INSERT_WITH_NAME_REF_STATIC.len()),
|
||||
}
|
||||
}
|
||||
EncoderInstruction::InsertWithNameLiteral { .. } => {
|
||||
self.state = EncoderInstructionReaderState::ReadFirstLiteral {
|
||||
reader: LiteralReader::new_with_first_byte(
|
||||
b,
|
||||
ENCODER_INSERT_WITH_NAME_LITERAL.len(),
|
||||
),
|
||||
}
|
||||
}
|
||||
EncoderInstruction::NoInstruction => {
|
||||
unreachable!("We must have instruction at this point.")
|
||||
}
|
||||
fn decode_instruction_type<T: ReadByte + Reader>(&mut self, recv: &mut T) -> Res<()> {
|
||||
let b = recv.read_byte()?;
|
||||
|
||||
self.decode_instruction_from_byte(b);
|
||||
match self.instruction {
|
||||
DecodedEncoderInstruction::Capacity { .. }
|
||||
| DecodedEncoderInstruction::Duplicate { .. } => {
|
||||
self.state = EncoderInstructionReaderState::ReadFirstInt {
|
||||
reader: IntReader::new(b, ENCODER_CAPACITY.len()),
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
Err(Error::NoMoreData) => Ok(false),
|
||||
Err(Error::ClosedCriticalStream) => Err(Error::ClosedCriticalStream),
|
||||
Err(_) => Err(Error::EncoderStream),
|
||||
DecodedEncoderInstruction::InsertWithNameRefStatic { .. }
|
||||
| DecodedEncoderInstruction::InsertWithNameRefDynamic { .. } => {
|
||||
self.state = EncoderInstructionReaderState::ReadFirstInt {
|
||||
reader: IntReader::new(b, ENCODER_INSERT_WITH_NAME_REF_STATIC.len()),
|
||||
}
|
||||
}
|
||||
DecodedEncoderInstruction::InsertWithNameLiteral { .. } => {
|
||||
self.state = EncoderInstructionReaderState::ReadFirstLiteral {
|
||||
reader: LiteralReader::new_with_first_byte(
|
||||
b,
|
||||
ENCODER_INSERT_WITH_NAME_LITERAL.len(),
|
||||
),
|
||||
}
|
||||
}
|
||||
DecodedEncoderInstruction::NoInstruction => {
|
||||
unreachable!("We must have instruction at this point.")
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// ### Errors
|
||||
/// 1) `NeedMoreData` if the reader needs more data
|
||||
/// 2) `ClosedCriticalStream`
|
||||
/// 3) other errors will be translated to `EncoderStream` by the caller of this function.
|
||||
pub fn read_instructions<T: ReadByte + Reader>(
|
||||
&mut self,
|
||||
recv: &mut T,
|
||||
) -> Res<Option<EncoderInstruction>> {
|
||||
) -> Res<DecodedEncoderInstruction> {
|
||||
qdebug!([self], "reading instructions");
|
||||
loop {
|
||||
match &mut self.state {
|
||||
EncoderInstructionReaderState::ReadInstruction => {
|
||||
if !self.decode_instruction_type(recv)? {
|
||||
break Ok(None);
|
||||
self.decode_instruction_type(recv)?
|
||||
}
|
||||
EncoderInstructionReaderState::ReadFirstInt { reader } => {
|
||||
let val = reader.read(recv)?;
|
||||
|
||||
qtrace!([self], "First varint read {}", val);
|
||||
match &mut self.instruction {
|
||||
DecodedEncoderInstruction::Capacity { value: v, .. }
|
||||
| DecodedEncoderInstruction::Duplicate { index: v } => {
|
||||
*v = val;
|
||||
self.state = EncoderInstructionReaderState::Done;
|
||||
}
|
||||
DecodedEncoderInstruction::InsertWithNameRefStatic { index, .. }
|
||||
| DecodedEncoderInstruction::InsertWithNameRefDynamic { index, .. } => {
|
||||
*index = val;
|
||||
self.state = EncoderInstructionReaderState::ReadFirstLiteral {
|
||||
reader: LiteralReader::default(),
|
||||
};
|
||||
}
|
||||
_ => unreachable!("This instruction cannot be in this state."),
|
||||
}
|
||||
}
|
||||
EncoderInstructionReaderState::ReadFirstInt { reader } => match reader.read(recv) {
|
||||
Ok(Some(val)) => {
|
||||
qtrace!([self], "First varint read {}", val);
|
||||
match &mut self.instruction {
|
||||
EncoderInstruction::Capacity { value: v, .. }
|
||||
| EncoderInstruction::Duplicate { index: v } => {
|
||||
*v = val;
|
||||
self.state = EncoderInstructionReaderState::Done;
|
||||
}
|
||||
EncoderInstruction::InsertWithNameRefStatic { index, .. }
|
||||
| EncoderInstruction::InsertWithNameRefDynamic { index, .. } => {
|
||||
*index = val;
|
||||
self.state = EncoderInstructionReaderState::ReadFirstLiteral {
|
||||
reader: LiteralReader::default(),
|
||||
};
|
||||
}
|
||||
_ => unreachable!("This instruction cannot be in this state."),
|
||||
}
|
||||
}
|
||||
Ok(None) => break Ok(None),
|
||||
Err(Error::ClosedCriticalStream) => break Err(Error::ClosedCriticalStream),
|
||||
Err(_) => break Err(Error::EncoderStream),
|
||||
},
|
||||
EncoderInstructionReaderState::ReadFirstLiteral { reader } => {
|
||||
match reader.read(recv) {
|
||||
Ok(Some(val)) => {
|
||||
qtrace!([self], "first literal read {:?}", val);
|
||||
match &mut self.instruction {
|
||||
EncoderInstruction::InsertWithNameRefStatic { value, .. }
|
||||
| EncoderInstruction::InsertWithNameRefDynamic { value, .. } => {
|
||||
*value = val;
|
||||
self.state = EncoderInstructionReaderState::Done;
|
||||
}
|
||||
EncoderInstruction::InsertWithNameLiteral { name, .. } => {
|
||||
*name = val;
|
||||
self.state = EncoderInstructionReaderState::ReadSecondLiteral {
|
||||
reader: LiteralReader::default(),
|
||||
};
|
||||
}
|
||||
_ => unreachable!("This instruction cannot be in this state."),
|
||||
}
|
||||
let val = reader.read(recv)?;
|
||||
|
||||
qtrace!([self], "first literal read {:?}", val);
|
||||
match &mut self.instruction {
|
||||
DecodedEncoderInstruction::InsertWithNameRefStatic { value, .. }
|
||||
| DecodedEncoderInstruction::InsertWithNameRefDynamic { value, .. } => {
|
||||
*value = val;
|
||||
self.state = EncoderInstructionReaderState::Done;
|
||||
}
|
||||
Ok(None) => break Ok(None),
|
||||
Err(Error::ClosedCriticalStream) => break Err(Error::ClosedCriticalStream),
|
||||
Err(_) => break Err(Error::EncoderStream),
|
||||
DecodedEncoderInstruction::InsertWithNameLiteral { name, .. } => {
|
||||
*name = val;
|
||||
self.state = EncoderInstructionReaderState::ReadSecondLiteral {
|
||||
reader: LiteralReader::default(),
|
||||
};
|
||||
}
|
||||
_ => unreachable!("This instruction cannot be in this state."),
|
||||
}
|
||||
}
|
||||
EncoderInstructionReaderState::ReadSecondLiteral { reader } => {
|
||||
match reader.read(recv) {
|
||||
Ok(Some(val)) => {
|
||||
qtrace!([self], "second literal read {:?}", val);
|
||||
match &mut self.instruction {
|
||||
EncoderInstruction::InsertWithNameLiteral { value, .. } => {
|
||||
*value = val;
|
||||
self.state = EncoderInstructionReaderState::Done;
|
||||
}
|
||||
_ => unreachable!("This instruction cannot be in this state."),
|
||||
}
|
||||
let val = reader.read(recv)?;
|
||||
|
||||
qtrace!([self], "second literal read {:?}", val);
|
||||
match &mut self.instruction {
|
||||
DecodedEncoderInstruction::InsertWithNameLiteral { value, .. } => {
|
||||
*value = val;
|
||||
self.state = EncoderInstructionReaderState::Done;
|
||||
}
|
||||
Ok(None) => break Ok(None),
|
||||
Err(Error::ClosedCriticalStream) => break Err(Error::ClosedCriticalStream),
|
||||
Err(_) => break Err(Error::EncoderStream),
|
||||
_ => unreachable!("This instruction cannot be in this state."),
|
||||
}
|
||||
}
|
||||
EncoderInstructionReaderState::Done => {}
|
||||
}
|
||||
if matches!(self.state, EncoderInstructionReaderState::Done) {
|
||||
self.state = EncoderInstructionReaderState::ReadInstruction;
|
||||
break Ok(Some(mem::replace(
|
||||
break Ok(mem::replace(
|
||||
&mut self.instruction,
|
||||
EncoderInstruction::NoInstruction,
|
||||
)));
|
||||
DecodedEncoderInstruction::NoInstruction,
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -237,8 +264,9 @@ impl EncoderInstructionReader {
|
|||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use super::{EncoderInstruction, EncoderInstructionReader, Error, QPData};
|
||||
use super::{EncoderInstruction, EncoderInstructionReader, QPData};
|
||||
use crate::reader::test_receiver::TestReceiver;
|
||||
use crate::Error;
|
||||
|
||||
fn test_encoding_decoding(instruction: &EncoderInstruction, use_huffman: bool) {
|
||||
let mut buf = QPData::default();
|
||||
|
@ -247,11 +275,8 @@ mod test {
|
|||
test_receiver.write(&buf);
|
||||
let mut reader = EncoderInstructionReader::new();
|
||||
assert_eq!(
|
||||
reader
|
||||
.read_instructions(&mut test_receiver)
|
||||
.unwrap()
|
||||
.unwrap(),
|
||||
*instruction
|
||||
reader.read_instructions(&mut test_receiver).unwrap(),
|
||||
instruction.into()
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -263,28 +288,28 @@ mod test {
|
|||
test_encoding_decoding(
|
||||
&EncoderInstruction::InsertWithNameRefStatic {
|
||||
index: 1,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
false,
|
||||
);
|
||||
test_encoding_decoding(
|
||||
&EncoderInstruction::InsertWithNameRefStatic {
|
||||
index: 1,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
true,
|
||||
);
|
||||
test_encoding_decoding(
|
||||
&EncoderInstruction::InsertWithNameRefStatic {
|
||||
index: 10_000,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
false,
|
||||
);
|
||||
test_encoding_decoding(
|
||||
&EncoderInstruction::InsertWithNameRefStatic {
|
||||
index: 10_000,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
true,
|
||||
);
|
||||
|
@ -292,43 +317,43 @@ mod test {
|
|||
test_encoding_decoding(
|
||||
&EncoderInstruction::InsertWithNameRefDynamic {
|
||||
index: 1,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
false,
|
||||
);
|
||||
test_encoding_decoding(
|
||||
&EncoderInstruction::InsertWithNameRefDynamic {
|
||||
index: 1,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
true,
|
||||
);
|
||||
test_encoding_decoding(
|
||||
&EncoderInstruction::InsertWithNameRefDynamic {
|
||||
index: 10_000,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
false,
|
||||
);
|
||||
test_encoding_decoding(
|
||||
&EncoderInstruction::InsertWithNameRefDynamic {
|
||||
index: 10_000,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
true,
|
||||
);
|
||||
|
||||
test_encoding_decoding(
|
||||
&EncoderInstruction::InsertWithNameLiteral {
|
||||
name: vec![0x62, 0x64, 0x65],
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
name: &[0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
false,
|
||||
);
|
||||
test_encoding_decoding(
|
||||
&EncoderInstruction::InsertWithNameLiteral {
|
||||
name: vec![0x62, 0x64, 0x65],
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
name: &[0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
true,
|
||||
);
|
||||
|
@ -344,18 +369,15 @@ mod test {
|
|||
let mut decoder = EncoderInstructionReader::new();
|
||||
for i in 0..buf.len() - 1 {
|
||||
test_receiver.write(&buf[i..=i]);
|
||||
assert!(decoder
|
||||
.read_instructions(&mut test_receiver)
|
||||
.unwrap()
|
||||
.is_none());
|
||||
assert_eq!(
|
||||
decoder.read_instructions(&mut test_receiver),
|
||||
Err(Error::NeedMoreData)
|
||||
);
|
||||
}
|
||||
test_receiver.write(&buf[buf.len() - 1..buf.len()]);
|
||||
assert_eq!(
|
||||
decoder
|
||||
.read_instructions(&mut test_receiver)
|
||||
.unwrap()
|
||||
.unwrap(),
|
||||
*instruction
|
||||
decoder.read_instructions(&mut test_receiver).unwrap(),
|
||||
instruction.into()
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -367,28 +389,28 @@ mod test {
|
|||
test_encoding_decoding_slow_reader(
|
||||
&EncoderInstruction::InsertWithNameRefStatic {
|
||||
index: 1,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
false,
|
||||
);
|
||||
test_encoding_decoding_slow_reader(
|
||||
&EncoderInstruction::InsertWithNameRefStatic {
|
||||
index: 1,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
true,
|
||||
);
|
||||
test_encoding_decoding_slow_reader(
|
||||
&EncoderInstruction::InsertWithNameRefStatic {
|
||||
index: 10_000,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
false,
|
||||
);
|
||||
test_encoding_decoding_slow_reader(
|
||||
&EncoderInstruction::InsertWithNameRefStatic {
|
||||
index: 10_000,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
true,
|
||||
);
|
||||
|
@ -396,43 +418,43 @@ mod test {
|
|||
test_encoding_decoding_slow_reader(
|
||||
&EncoderInstruction::InsertWithNameRefDynamic {
|
||||
index: 1,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
false,
|
||||
);
|
||||
test_encoding_decoding_slow_reader(
|
||||
&EncoderInstruction::InsertWithNameRefDynamic {
|
||||
index: 1,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
true,
|
||||
);
|
||||
test_encoding_decoding_slow_reader(
|
||||
&EncoderInstruction::InsertWithNameRefDynamic {
|
||||
index: 10_000,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
false,
|
||||
);
|
||||
test_encoding_decoding_slow_reader(
|
||||
&EncoderInstruction::InsertWithNameRefDynamic {
|
||||
index: 10_000,
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
true,
|
||||
);
|
||||
|
||||
test_encoding_decoding_slow_reader(
|
||||
&EncoderInstruction::InsertWithNameLiteral {
|
||||
name: vec![0x62, 0x64, 0x65],
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
name: &[0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
false,
|
||||
);
|
||||
test_encoding_decoding_slow_reader(
|
||||
&EncoderInstruction::InsertWithNameLiteral {
|
||||
name: vec![0x62, 0x64, 0x65],
|
||||
value: vec![0x62, 0x64, 0x65],
|
||||
name: &[0x62, 0x64, 0x65],
|
||||
value: &[0x62, 0x64, 0x65],
|
||||
},
|
||||
true,
|
||||
);
|
||||
|
@ -451,7 +473,7 @@ mod test {
|
|||
let mut decoder = EncoderInstructionReader::new();
|
||||
assert_eq!(
|
||||
decoder.read_instructions(&mut test_receiver),
|
||||
Err(Error::EncoderStream)
|
||||
Err(Error::IntegerOverflow)
|
||||
);
|
||||
|
||||
let mut test_receiver: TestReceiver = TestReceiver::default();
|
||||
|
@ -462,16 +484,16 @@ mod test {
|
|||
let mut decoder = EncoderInstructionReader::new();
|
||||
assert_eq!(
|
||||
decoder.read_instructions(&mut test_receiver),
|
||||
Err(Error::EncoderStream)
|
||||
Err(Error::IntegerOverflow)
|
||||
);
|
||||
|
||||
let mut test_receiver: TestReceiver = TestReceiver::default();
|
||||
// EncoderInstruction::InsertWithNameRefStatic with overflow of garbage value.
|
||||
// EncoderInstruction::InsertWithNameRefStatic with a garbage value.
|
||||
test_receiver.write(&[0xc1, 0x81, 0x00]);
|
||||
let mut decoder = EncoderInstructionReader::new();
|
||||
assert_eq!(
|
||||
decoder.read_instructions(&mut test_receiver),
|
||||
Err(Error::EncoderStream)
|
||||
Err(Error::HuffmanDecompressionFailed)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -192,7 +192,7 @@ impl Deref for HeaderEncoder {
|
|||
}
|
||||
}
|
||||
|
||||
pub struct HeaderDecoder<'a> {
|
||||
pub(crate) struct HeaderDecoder<'a> {
|
||||
buf: ReceiverBufferWrapper<'a>,
|
||||
base: u64,
|
||||
req_insert_cnt: u64,
|
||||
|
@ -225,7 +225,8 @@ impl<'a> HeaderDecoder<'a> {
|
|||
max_entries: u64,
|
||||
total_num_of_inserts: u64,
|
||||
) -> Res<HeaderDecoderResult> {
|
||||
self.read_base(max_entries, total_num_of_inserts)?;
|
||||
self.read_base(max_entries, total_num_of_inserts)
|
||||
.map_err(|_| Error::DecompressionFailed)?;
|
||||
|
||||
if table.base() < self.req_insert_cnt {
|
||||
qtrace!(
|
||||
|
@ -237,31 +238,50 @@ impl<'a> HeaderDecoder<'a> {
|
|||
}
|
||||
let mut h: Vec<Header> = Vec::new();
|
||||
|
||||
loop {
|
||||
if self.buf.done() {
|
||||
qtrace!([self], "done decoding header block.");
|
||||
break Ok(HeaderDecoderResult::Headers(h));
|
||||
}
|
||||
|
||||
let b = self.buf.peek()?;
|
||||
while !self.buf.done() {
|
||||
let b = self.buf.peek().map_err(|_| Error::DecompressionFailed)?;
|
||||
if HEADER_FIELD_INDEX_STATIC.cmp_prefix(b) {
|
||||
h.push(self.read_indexed_static()?);
|
||||
h.push(
|
||||
self.read_indexed_static()
|
||||
.map_err(|_| Error::DecompressionFailed)?,
|
||||
);
|
||||
} else if HEADER_FIELD_INDEX_DYNAMIC.cmp_prefix(b) {
|
||||
h.push(self.read_indexed_dynamic(table)?);
|
||||
h.push(
|
||||
self.read_indexed_dynamic(table)
|
||||
.map_err(|_| Error::DecompressionFailed)?,
|
||||
);
|
||||
} else if HEADER_FIELD_INDEX_DYNAMIC_POST.cmp_prefix(b) {
|
||||
h.push(self.read_indexed_dynamic_post(table)?);
|
||||
h.push(
|
||||
self.read_indexed_dynamic_post(table)
|
||||
.map_err(|_| Error::DecompressionFailed)?,
|
||||
);
|
||||
} else if HEADER_FIELD_LITERAL_NAME_REF_STATIC.cmp_prefix(b) {
|
||||
h.push(self.read_literal_with_name_ref_static()?);
|
||||
h.push(
|
||||
self.read_literal_with_name_ref_static()
|
||||
.map_err(|_| Error::DecompressionFailed)?,
|
||||
);
|
||||
} else if HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC.cmp_prefix(b) {
|
||||
h.push(self.read_literal_with_name_ref_dynamic(table)?);
|
||||
h.push(
|
||||
self.read_literal_with_name_ref_dynamic(table)
|
||||
.map_err(|_| Error::DecompressionFailed)?,
|
||||
);
|
||||
} else if HEADER_FIELD_LITERAL_NAME_LITERAL.cmp_prefix(b) {
|
||||
h.push(self.read_literal_with_name_literal()?);
|
||||
h.push(
|
||||
self.read_literal_with_name_literal()
|
||||
.map_err(|_| Error::DecompressionFailed)?,
|
||||
);
|
||||
} else if HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC_POST.cmp_prefix(b) {
|
||||
h.push(self.read_literal_with_name_ref_dynamic_post(table)?);
|
||||
h.push(
|
||||
self.read_literal_with_name_ref_dynamic_post(table)
|
||||
.map_err(|_| Error::DecompressionFailed)?,
|
||||
);
|
||||
} else {
|
||||
unreachable!("All prefixes are covered");
|
||||
}
|
||||
}
|
||||
|
||||
qtrace!([self], "done decoding header block.");
|
||||
Ok(HeaderDecoderResult::Headers(h))
|
||||
}
|
||||
|
||||
pub fn get_req_insert_cnt(&self) -> u64 {
|
||||
|
@ -321,11 +341,8 @@ impl<'a> HeaderDecoder<'a> {
|
|||
.buf
|
||||
.read_prefixed_int(HEADER_FIELD_INDEX_STATIC.len())?;
|
||||
qtrace!([self], "decoder static indexed {}.", index);
|
||||
|
||||
match HeaderTable::get_static(index) {
|
||||
Ok(entry) => Ok((to_string(entry.name())?, to_string(entry.value())?)),
|
||||
Err(_) => Err(Error::DecompressionFailed),
|
||||
}
|
||||
let entry = HeaderTable::get_static(index)?;
|
||||
Ok((to_string(entry.name())?, to_string(entry.value())?))
|
||||
}
|
||||
|
||||
fn read_indexed_dynamic(&mut self, table: &HeaderTable) -> Res<Header> {
|
||||
|
@ -333,10 +350,8 @@ impl<'a> HeaderDecoder<'a> {
|
|||
.buf
|
||||
.read_prefixed_int(HEADER_FIELD_INDEX_DYNAMIC.len())?;
|
||||
qtrace!([self], "decoder dynamic indexed {}.", index);
|
||||
match table.get_dynamic(index, self.base, false) {
|
||||
Ok(entry) => Ok((to_string(entry.name())?, to_string(entry.value())?)),
|
||||
Err(_) => Err(Error::DecompressionFailed),
|
||||
}
|
||||
let entry = table.get_dynamic(index, self.base, false)?;
|
||||
Ok((to_string(entry.name())?, to_string(entry.value())?))
|
||||
}
|
||||
|
||||
fn read_indexed_dynamic_post(&mut self, table: &HeaderTable) -> Res<Header> {
|
||||
|
@ -344,10 +359,8 @@ impl<'a> HeaderDecoder<'a> {
|
|||
.buf
|
||||
.read_prefixed_int(HEADER_FIELD_INDEX_DYNAMIC_POST.len())?;
|
||||
qtrace!([self], "decode post-based {}.", index);
|
||||
match table.get_dynamic(index, self.base, true) {
|
||||
Ok(entry) => Ok((to_string(entry.name())?, to_string(entry.value())?)),
|
||||
Err(_) => Err(Error::DecompressionFailed),
|
||||
}
|
||||
let entry = table.get_dynamic(index, self.base, true)?;
|
||||
Ok((to_string(entry.name())?, to_string(entry.value())?))
|
||||
}
|
||||
|
||||
fn read_literal_with_name_ref_static(&mut self) -> Res<Header> {
|
||||
|
@ -360,13 +373,10 @@ impl<'a> HeaderDecoder<'a> {
|
|||
.buf
|
||||
.read_prefixed_int(HEADER_FIELD_LITERAL_NAME_REF_STATIC.len())?;
|
||||
|
||||
match HeaderTable::get_static(index) {
|
||||
Ok(entry) => Ok((
|
||||
to_string(entry.name())?,
|
||||
self.buf.read_literal_from_buffer(0)?,
|
||||
)),
|
||||
Err(_) => Err(Error::DecompressionFailed),
|
||||
}
|
||||
Ok((
|
||||
to_string(HeaderTable::get_static(index)?.name())?,
|
||||
self.buf.read_literal_from_buffer(0)?,
|
||||
))
|
||||
}
|
||||
|
||||
fn read_literal_with_name_ref_dynamic(&mut self, table: &HeaderTable) -> Res<Header> {
|
||||
|
@ -379,13 +389,10 @@ impl<'a> HeaderDecoder<'a> {
|
|||
.buf
|
||||
.read_prefixed_int(HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC.len())?;
|
||||
|
||||
match table.get_dynamic(index, self.base, false) {
|
||||
Ok(entry) => Ok((
|
||||
to_string(entry.name())?,
|
||||
self.buf.read_literal_from_buffer(0)?,
|
||||
)),
|
||||
Err(_) => Err(Error::DecompressionFailed),
|
||||
}
|
||||
Ok((
|
||||
to_string(table.get_dynamic(index, self.base, false)?.name())?,
|
||||
self.buf.read_literal_from_buffer(0)?,
|
||||
))
|
||||
}
|
||||
|
||||
fn read_literal_with_name_ref_dynamic_post(&mut self, table: &HeaderTable) -> Res<Header> {
|
||||
|
@ -395,13 +402,10 @@ impl<'a> HeaderDecoder<'a> {
|
|||
.buf
|
||||
.read_prefixed_int(HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC_POST.len())?;
|
||||
|
||||
match table.get_dynamic(index, self.base, true) {
|
||||
Ok(entry) => Ok((
|
||||
to_string(entry.name())?,
|
||||
self.buf.read_literal_from_buffer(0)?,
|
||||
)),
|
||||
Err(_) => Err(Error::DecompressionFailed),
|
||||
}
|
||||
Ok((
|
||||
to_string(table.get_dynamic(index, self.base, true)?.name())?,
|
||||
self.buf.read_literal_from_buffer(0)?,
|
||||
))
|
||||
}
|
||||
|
||||
fn read_literal_with_name_literal(&mut self) -> Res<Header> {
|
||||
|
|
|
@ -4,124 +4,106 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
#![allow(clippy::pedantic)]
|
||||
|
||||
use crate::huffman_decode_helper::{HuffmanDecodeTable, HUFFMAN_DECODE_ROOT};
|
||||
use crate::huffman_decode_helper::{HuffmanDecoderNode, HUFFMAN_DECODE_ROOT};
|
||||
use crate::huffman_table::HUFFMAN_TABLE;
|
||||
use crate::{Error, Res};
|
||||
use std::convert::TryFrom;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Huffman {
|
||||
// we read whole bytes from an input and stored them in incoming_bytes.
|
||||
// Some bits will be transfer to decoding_byte and incoming_bits_left is number of bits still
|
||||
// left in incoming_bytes.
|
||||
input_byte: u8,
|
||||
input_bits_left: u8,
|
||||
// byte used for decoding
|
||||
decoding_byte: u8,
|
||||
// bits left in decoding_byte that are not decoded yet.
|
||||
decoding_bits_left: u8,
|
||||
struct BitReader<'a> {
|
||||
input: &'a [u8],
|
||||
offset: usize,
|
||||
current_bit: u8,
|
||||
}
|
||||
|
||||
impl Huffman {
|
||||
pub fn decode(&mut self, input: &[u8]) -> Res<Vec<u8>> {
|
||||
let mut output: Vec<u8> = Vec::new();
|
||||
let mut read: usize = 0; // bytes read from the input.
|
||||
let len = input.len();
|
||||
impl<'a> BitReader<'a> {
|
||||
pub fn new(input: &'a [u8]) -> Self {
|
||||
BitReader {
|
||||
input,
|
||||
offset: 0,
|
||||
current_bit: 8,
|
||||
}
|
||||
}
|
||||
|
||||
while self.has_more_data(len, read) {
|
||||
if let Some(c) =
|
||||
self.decode_huffman_character(HUFFMAN_DECODE_ROOT, input, len, &mut read)?
|
||||
{
|
||||
output.push(c);
|
||||
pub fn read_bit(&mut self) -> Res<u8> {
|
||||
if self.input.len() == self.offset {
|
||||
return Err(Error::NeedMoreData);
|
||||
}
|
||||
|
||||
if self.current_bit == 0 {
|
||||
self.offset += 1;
|
||||
if self.offset == self.input.len() {
|
||||
return Err(Error::NeedMoreData);
|
||||
}
|
||||
self.current_bit = 8;
|
||||
}
|
||||
self.current_bit -= 1;
|
||||
Ok((self.input[self.offset] >> self.current_bit) & 0x01)
|
||||
}
|
||||
|
||||
pub fn verify_ending(&mut self, i: u8) -> Res<()> {
|
||||
if (i + self.current_bit) > 7 {
|
||||
return Err(Error::HuffmanDecompressionFailed);
|
||||
}
|
||||
|
||||
if self.decoding_bits_left > 7 {
|
||||
return Err(Error::DecompressionFailed);
|
||||
if self.input.is_empty() {
|
||||
Ok(())
|
||||
} else if self.offset != self.input.len() {
|
||||
Err(Error::HuffmanDecompressionFailed)
|
||||
} else if self.input[self.input.len() - 1] & ((0x1 << (i + self.current_bit)) - 1)
|
||||
== ((0x1 << (i + self.current_bit)) - 1)
|
||||
{
|
||||
self.current_bit = 0;
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::HuffmanDecompressionFailed)
|
||||
}
|
||||
if self.decoding_bits_left > 0 {
|
||||
let mask: u8 = ((1 << self.decoding_bits_left) - 1) << (8 - self.decoding_bits_left);
|
||||
let bits: u8 = self.decoding_byte & mask;
|
||||
if bits != mask {
|
||||
return Err(Error::DecompressionFailed);
|
||||
}
|
||||
|
||||
pub fn has_more_data(&self) -> bool {
|
||||
!self.input.is_empty() && (self.offset != self.input.len() || (self.current_bit != 0))
|
||||
}
|
||||
}
|
||||
|
||||
/// Decodes huffman encoded input.
|
||||
/// ### Errors
|
||||
/// This function may return `HuffmanDecompressionFailed` if `input` is not a correct huffman-encoded array of bits.
|
||||
pub fn decode_huffman(input: &[u8]) -> Res<Vec<u8>> {
|
||||
let mut reader = BitReader::new(input);
|
||||
let mut output = Vec::new();
|
||||
while reader.has_more_data() {
|
||||
if let Some(c) = decode_character(&mut reader)? {
|
||||
if c == 256 {
|
||||
return Err(Error::HuffmanDecompressionFailed);
|
||||
}
|
||||
}
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
fn has_more_data(&self, len: usize, read: usize) -> bool {
|
||||
len > read || self.input_bits_left > 0
|
||||
}
|
||||
|
||||
fn extract_byte(&mut self, input: &[u8], len: usize, read: &mut usize) {
|
||||
// if self.decoding_bits_left > 0 the 'left' bits will be in proper place and the rest will be 0.
|
||||
// for self.decoding_bits_left == 0 we need to do it here.
|
||||
if self.decoding_bits_left == 0 {
|
||||
self.decoding_byte = 0x00;
|
||||
}
|
||||
|
||||
let from_current = std::cmp::min(8 - self.decoding_bits_left, self.input_bits_left);
|
||||
if from_current > 0 {
|
||||
let mask = (1 << from_current) - 1;
|
||||
let bits = (self.input_byte >> (self.input_bits_left - from_current)) & mask;
|
||||
self.decoding_byte |= bits << (8 - self.decoding_bits_left - from_current);
|
||||
self.decoding_bits_left += from_current;
|
||||
self.input_bits_left -= from_current;
|
||||
}
|
||||
if self.decoding_bits_left < 8 && *read < len {
|
||||
// get bits from the next byte.
|
||||
self.input_byte = input[*read];
|
||||
*read += 1;
|
||||
self.decoding_byte |= self.input_byte >> self.decoding_bits_left;
|
||||
self.input_bits_left = self.decoding_bits_left;
|
||||
self.decoding_bits_left = 8;
|
||||
output.push(u8::try_from(c).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
fn decode_huffman_character(
|
||||
&mut self,
|
||||
table: &HuffmanDecodeTable,
|
||||
input: &[u8],
|
||||
len: usize,
|
||||
read: &mut usize,
|
||||
) -> Res<Option<u8>> {
|
||||
self.extract_byte(input, len, read);
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
if table.index_has_a_next_table(self.decoding_byte) {
|
||||
if !self.has_more_data(len, *read) {
|
||||
// This is the last bit and it is padding.
|
||||
fn decode_character(reader: &mut BitReader) -> Res<Option<u16>> {
|
||||
let mut node: &HuffmanDecoderNode = &HUFFMAN_DECODE_ROOT;
|
||||
let mut i = 0;
|
||||
while node.value.is_none() {
|
||||
match reader.read_bit() {
|
||||
Err(_) => {
|
||||
reader.verify_ending(i)?;
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
self.decoding_bits_left = 0;
|
||||
return self.decode_huffman_character(
|
||||
table.next_table(self.decoding_byte),
|
||||
input,
|
||||
len,
|
||||
read,
|
||||
);
|
||||
Ok(b) => {
|
||||
i += 1;
|
||||
if let Some(next) = &node.next[usize::from(b)] {
|
||||
node = &next;
|
||||
} else {
|
||||
reader.verify_ending(i)?;
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let entry = table.entry(self.decoding_byte);
|
||||
if entry.val == 256 {
|
||||
return Err(Error::DecompressionFailed);
|
||||
}
|
||||
|
||||
if entry.prefix_len > self.decoding_bits_left {
|
||||
assert!(!self.has_more_data(len, *read));
|
||||
// This is the last bit and it is padding.
|
||||
return Ok(None);
|
||||
}
|
||||
let c = u8::try_from(entry.val).unwrap();
|
||||
|
||||
self.decoding_bits_left -= entry.prefix_len;
|
||||
if self.decoding_bits_left > 0 {
|
||||
self.decoding_byte <<= entry.prefix_len;
|
||||
}
|
||||
Ok(Some(c))
|
||||
}
|
||||
debug_assert!(node.value.is_some());
|
||||
Ok(node.value)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
|
@ -139,7 +121,7 @@ pub fn encode_huffman(input: &[u8]) -> Vec<u8> {
|
|||
left -= e.len;
|
||||
e.len = 0;
|
||||
} else {
|
||||
let v: u8 = (e.val >> (e.len - left)) as u8;
|
||||
let v: u8 = u8::try_from(e.val >> (e.len - left)).unwrap();
|
||||
saved |= v;
|
||||
output.push(saved);
|
||||
e.len -= left;
|
||||
|
@ -147,14 +129,16 @@ pub fn encode_huffman(input: &[u8]) -> Vec<u8> {
|
|||
saved = 0;
|
||||
}
|
||||
|
||||
// Write full bytes
|
||||
while e.len >= 8 {
|
||||
let v: u8 = (e.val >> (e.len - 8)) as u8;
|
||||
let v: u8 = u8::try_from((e.val >> (e.len - 8)) & 0xFF).unwrap();
|
||||
output.push(v);
|
||||
e.len -= 8;
|
||||
}
|
||||
|
||||
// Write the rest into saved.
|
||||
if e.len > 0 {
|
||||
saved = ((e.val & ((1 << e.len) - 1)) as u8) << (8 - e.len);
|
||||
saved = u8::try_from(e.val & ((1 << e.len) - 1)).unwrap() << (8 - e.len);
|
||||
left = 8 - e.len;
|
||||
}
|
||||
}
|
||||
|
@ -170,7 +154,7 @@ pub fn encode_huffman(input: &[u8]) -> Vec<u8> {
|
|||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use super::{decode_huffman, encode_huffman, Error};
|
||||
|
||||
struct TestElement {
|
||||
pub val: &'static [u8],
|
||||
|
@ -239,6 +223,8 @@ mod tests {
|
|||
},
|
||||
];
|
||||
|
||||
const WRONG_END: &[u8] = &[0xa8, 0xeb, 0x10, 0x64, 0x9c, 0xaf];
|
||||
|
||||
#[test]
|
||||
fn test_encoder() {
|
||||
for e in TEST_CASES {
|
||||
|
@ -250,9 +236,17 @@ mod tests {
|
|||
#[test]
|
||||
fn test_decoder() {
|
||||
for e in TEST_CASES {
|
||||
let res = Huffman::default().decode(e.res);
|
||||
let res = decode_huffman(e.res);
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap()[..], *e.val);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn decoder_error_wrong_ending() {
|
||||
assert_eq!(
|
||||
decode_huffman(WRONG_END),
|
||||
Err(Error::HuffmanDecompressionFailed)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -20,6 +20,7 @@ pub mod huffman;
|
|||
mod huffman_decode_helper;
|
||||
pub mod huffman_table;
|
||||
mod prefix;
|
||||
mod qlog;
|
||||
mod qpack_send_buf;
|
||||
pub mod reader;
|
||||
mod static_table;
|
||||
|
@ -28,16 +29,11 @@ mod table;
|
|||
pub type Header = (String, String);
|
||||
type Res<T> = Result<T, Error>;
|
||||
|
||||
#[derive(Debug)]
|
||||
enum QPackSide {
|
||||
Encoder,
|
||||
Decoder,
|
||||
}
|
||||
|
||||
impl ::std::fmt::Display for QPackSide {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
write!(f, "{:?}", self)
|
||||
}
|
||||
#[derive(Debug, PartialEq, PartialOrd, Ord, Eq, Clone, Copy)]
|
||||
pub struct QpackSettings {
|
||||
pub max_table_size_decoder: u64,
|
||||
pub max_table_size_encoder: u64,
|
||||
pub max_blocked_streams: u16,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
|
@ -47,15 +43,22 @@ pub enum Error {
|
|||
DecoderStream,
|
||||
ClosedCriticalStream,
|
||||
|
||||
// These are internal errors, they will be transfromed into one of the above.
|
||||
// These are internal errors, they will be transformed into one of the above.
|
||||
NeedMoreData, // Return when an input stream does not have more data that a decoder needs.(It does not mean that a stream is closed.)
|
||||
HeaderLookup,
|
||||
NoMoreData,
|
||||
HuffmanDecompressionFailed,
|
||||
ToStringFailed,
|
||||
ChangeCapacity,
|
||||
DynamicTableFull,
|
||||
IncrementAck,
|
||||
IntegerOverflow,
|
||||
WrongStreamCount,
|
||||
Decoding, // Decoding internal error that is not one of the above.
|
||||
EncoderStreamBlocked,
|
||||
Internal,
|
||||
Decoding, // this will be translated into Encoder/DecoderStreamError or DecompressionFailed depending on the caller
|
||||
|
||||
TransportError(neqo_transport::Error),
|
||||
QlogError,
|
||||
}
|
||||
|
||||
impl Error {
|
||||
|
@ -92,3 +95,9 @@ impl From<neqo_transport::Error> for Error {
|
|||
Self::TransportError(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<::qlog::Error> for Error {
|
||||
fn from(_err: ::qlog::Error) -> Self {
|
||||
Self::QlogError
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
// Functions that handle capturing QLOG traces.
|
||||
|
||||
use crate::Res;
|
||||
use neqo_common::hex;
|
||||
use neqo_common::qlog::NeqoQlog;
|
||||
use qlog::{event::Event, QPackInstruction, QpackInstructionTypeName};
|
||||
|
||||
pub fn qpack_read_insert_count_increment_instruction(
|
||||
qlog: &mut Option<NeqoQlog>,
|
||||
increment: u64,
|
||||
data: &[u8],
|
||||
) -> Res<()> {
|
||||
if let Some(qlog) = qlog {
|
||||
let event = Event::qpack_instruction_received(
|
||||
QPackInstruction::InsertCountIncrementInstruction {
|
||||
instruction_type: QpackInstructionTypeName::InsertCountIncrementInstruction,
|
||||
increment,
|
||||
},
|
||||
Some(8.to_string()),
|
||||
Some(hex(data)),
|
||||
);
|
||||
|
||||
qlog.stream().add_event(event)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
|
@ -4,7 +4,7 @@
|
|||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
use crate::huffman::Huffman;
|
||||
use crate::huffman::decode_huffman;
|
||||
use crate::prefix::Prefix;
|
||||
use crate::{Error, Res};
|
||||
use neqo_common::{qdebug, qerror};
|
||||
|
@ -37,7 +37,7 @@ impl<'a> ReadByte for ReceiverConnWrapper<'a> {
|
|||
let mut b = [0];
|
||||
match self.conn.stream_recv(self.stream_id, &mut b)? {
|
||||
(_, true) => Err(Error::ClosedCriticalStream),
|
||||
(0, false) => Err(Error::NoMoreData),
|
||||
(0, false) => Err(Error::NeedMoreData),
|
||||
_ => Ok(b[0]),
|
||||
}
|
||||
}
|
||||
|
@ -105,10 +105,7 @@ impl<'a> ReceiverBufferWrapper<'a> {
|
|||
|
||||
let first_byte = self.read_byte()?;
|
||||
let mut reader = IntReader::new(first_byte, prefix_len);
|
||||
match reader.read(self) {
|
||||
Ok(Some(val)) => Ok(val),
|
||||
_ => Err(Error::DecompressionFailed),
|
||||
}
|
||||
reader.read(self)
|
||||
}
|
||||
|
||||
/// Do not use `LiteralReader` here to avoid copying data.
|
||||
|
@ -130,11 +127,10 @@ impl<'a> ReceiverBufferWrapper<'a> {
|
|||
let mut int_reader = IntReader::new(first_byte, prefix_len + 1);
|
||||
let length: usize = int_reader
|
||||
.read(self)?
|
||||
.ok_or(Error::DecompressionFailed)?
|
||||
.try_into()
|
||||
.or(Err(Error::DecompressionFailed))?;
|
||||
if use_huffman {
|
||||
Ok(to_string(&Huffman::default().decode(self.slice(length)?)?)?)
|
||||
Ok(to_string(&decode_huffman(self.slice(length)?)?)?)
|
||||
} else {
|
||||
Ok(to_string(self.slice(length)?)?)
|
||||
}
|
||||
|
@ -189,34 +185,17 @@ impl IntReader {
|
|||
unreachable!();
|
||||
}
|
||||
|
||||
/// This function reads more bytes until the varint is decoded or until stream/buffer does not
|
||||
/// This function reads bytes until the varint is decoded or until stream/buffer does not
|
||||
/// have any more date.
|
||||
/// # Errors
|
||||
/// Possible errors are:
|
||||
/// 1) `IntegerOverflow`
|
||||
/// 2) Any `ReadByte`'s error
|
||||
/// It returns Some(value) if reading the varint is done or None if it needs more data.
|
||||
pub fn read<R: ReadByte>(&mut self, s: &mut R) -> Res<Option<u64>> {
|
||||
// If it is not finished yet read more data.
|
||||
// A varint may take only one byte, In that case already the first by has set state to done.
|
||||
if !self.done {
|
||||
self.read_more(s)?;
|
||||
}
|
||||
|
||||
if self.done {
|
||||
return Ok(Some(self.value));
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
fn read_more<R: ReadByte>(&mut self, s: &mut R) -> Res<()> {
|
||||
/// 1) `NeedMoreData` if the reader needs more data,
|
||||
/// 2) `IntegerOverflow`,
|
||||
/// 3) Any `ReadByte`'s error
|
||||
pub fn read<R: ReadByte>(&mut self, s: &mut R) -> Res<u64> {
|
||||
let mut b: u8;
|
||||
while !self.done {
|
||||
b = match s.read_byte() {
|
||||
Ok(b) => b,
|
||||
Err(Error::NoMoreData) => return Ok(()),
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
b = s.read_byte()?;
|
||||
|
||||
if (self.cnt == 63) && (b > 1 || (b == 1 && ((self.value >> 63) == 1))) {
|
||||
qerror!("Error decoding prefixed encoded int - IntegerOverflow");
|
||||
|
@ -231,7 +210,7 @@ impl IntReader {
|
|||
self.done = true;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
Ok(self.value)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -282,45 +261,40 @@ impl LiteralReader {
|
|||
/// have any more date ready.
|
||||
/// # Errors
|
||||
/// Possible errors are:
|
||||
/// 1) `IntegerOverflow`
|
||||
/// 2) Any `ReadByte`'s error
|
||||
/// It returns Some(value) if reading the literal is done or None if it needs more data.
|
||||
pub fn read<T: ReadByte + Reader>(&mut self, s: &mut T) -> Res<Option<Vec<u8>>> {
|
||||
/// 1) `NeedMoreData` if the reader needs more data,
|
||||
/// 2) `IntegerOverflow`
|
||||
/// 3) Any `ReadByte`'s error
|
||||
/// It returns value if reading the literal is done or None if it needs more data.
|
||||
pub fn read<T: ReadByte + Reader>(&mut self, s: &mut T) -> Res<Vec<u8>> {
|
||||
loop {
|
||||
qdebug!("state = {:?}", self.state);
|
||||
match &mut self.state {
|
||||
LiteralReaderState::ReadHuffman => {
|
||||
let b = match s.read_byte() {
|
||||
Ok(b) => b,
|
||||
Err(Error::NoMoreData) => return Ok(None),
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
let b = s.read_byte()?;
|
||||
|
||||
self.use_huffman = (b & 0x80) != 0;
|
||||
self.state = LiteralReaderState::ReadLength {
|
||||
reader: IntReader::new(b, 1),
|
||||
};
|
||||
}
|
||||
LiteralReaderState::ReadLength { reader } => match reader.read(s)? {
|
||||
Some(v) => {
|
||||
self.literal
|
||||
.resize(v.try_into().or(Err(Error::Decoding))?, 0x0);
|
||||
self.state = LiteralReaderState::ReadLiteral { offset: 0 };
|
||||
}
|
||||
None => break Ok(None),
|
||||
},
|
||||
LiteralReaderState::ReadLength { reader } => {
|
||||
let v = reader.read(s)?;
|
||||
self.literal
|
||||
.resize(v.try_into().or(Err(Error::Decoding))?, 0x0);
|
||||
self.state = LiteralReaderState::ReadLiteral { offset: 0 };
|
||||
}
|
||||
LiteralReaderState::ReadLiteral { offset } => {
|
||||
let amount = s.read(&mut self.literal[*offset..])?;
|
||||
*offset += amount;
|
||||
if *offset == self.literal.len() {
|
||||
self.state = LiteralReaderState::Done;
|
||||
if self.use_huffman {
|
||||
break Ok(Some(Huffman::default().decode(&self.literal)?));
|
||||
break Ok(decode_huffman(&self.literal)?);
|
||||
} else {
|
||||
break Ok(Some(mem::replace(&mut self.literal, Vec::new())));
|
||||
break Ok(mem::replace(&mut self.literal, Vec::new()));
|
||||
}
|
||||
} else {
|
||||
break Ok(None);
|
||||
break Err(Error::NeedMoreData);
|
||||
}
|
||||
}
|
||||
LiteralReaderState::Done => {
|
||||
|
@ -334,11 +308,11 @@ impl LiteralReader {
|
|||
/// This is a helper function used only by `ReceiverBufferWrapper`, therefore it returns
|
||||
/// `DecompressionFailed` if any error happens.
|
||||
/// # Errors
|
||||
/// If an parsing error occurred, the function returns `DecompressionFailed`.
|
||||
/// If an parsing error occurred, the function returns `ToStringFailed`.
|
||||
pub fn to_string(v: &[u8]) -> Res<String> {
|
||||
match str::from_utf8(v) {
|
||||
Ok(s) => Ok(s.to_string()),
|
||||
Err(_) => Err(Error::DecompressionFailed),
|
||||
Err(_) => Err(Error::ToStringFailed),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -362,7 +336,7 @@ pub(crate) mod test_receiver {
|
|||
|
||||
impl ReadByte for TestReceiver {
|
||||
fn read_byte(&mut self) -> Res<u8> {
|
||||
self.buf.pop_back().ok_or(Error::NoMoreData)
|
||||
self.buf.pop_back().ok_or(Error::NeedMoreData)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -374,7 +348,7 @@ pub(crate) mod test_receiver {
|
|||
buf.len()
|
||||
};
|
||||
for item in buf.iter_mut().take(len) {
|
||||
*item = self.buf.pop_back().ok_or(Error::NoMoreData)?;
|
||||
*item = self.buf.pop_back().ok_or(Error::NeedMoreData)?;
|
||||
}
|
||||
Ok(len)
|
||||
}
|
||||
|
@ -414,7 +388,7 @@ mod tests {
|
|||
let mut reader = IntReader::new(buf[0], *prefix_len);
|
||||
let mut test_receiver: TestReceiver = TestReceiver::default();
|
||||
test_receiver.write(&buf[1..]);
|
||||
assert_eq!(reader.read(&mut test_receiver), Ok(Some(*value)));
|
||||
assert_eq!(reader.read(&mut test_receiver), Ok(*value));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -426,7 +400,7 @@ mod tests {
|
|||
test_receiver.write(&buf[1..]);
|
||||
// add some more data
|
||||
test_receiver.write(&[0x0, 0x0, 0x0]);
|
||||
assert_eq!(reader.read(&mut test_receiver), Ok(Some(*value)));
|
||||
assert_eq!(reader.read(&mut test_receiver), Ok(*value));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -436,28 +410,28 @@ mod tests {
|
|||
let mut reader = IntReader::new(buf[0], *prefix_len);
|
||||
let mut test_receiver: TestReceiver = TestReceiver::default();
|
||||
|
||||
// data has not been received yet, reading IntReader will return Ok(None).
|
||||
assert_eq!(reader.read(&mut test_receiver), Ok(None));
|
||||
// data has not been received yet, reading IntReader will return Err(Error::NeedMoreData).
|
||||
assert_eq!(reader.read(&mut test_receiver), Err(Error::NeedMoreData));
|
||||
|
||||
// Write one byte.
|
||||
test_receiver.write(&buf[1..2]);
|
||||
// data has not been received yet, reading IntReader will return Ok(None).
|
||||
assert_eq!(reader.read(&mut test_receiver), Ok(None));
|
||||
// data has not been received yet, reading IntReader will return Err(Error::NeedMoreData).
|
||||
assert_eq!(reader.read(&mut test_receiver), Err(Error::NeedMoreData));
|
||||
|
||||
// Write one byte.
|
||||
test_receiver.write(&buf[2..]);
|
||||
// Now prefixed int is complete.
|
||||
assert_eq!(reader.read(&mut test_receiver), Ok(Some(*value)));
|
||||
assert_eq!(reader.read(&mut test_receiver), Ok(*value));
|
||||
}
|
||||
|
||||
type TestSetup = (&'static [u8], u8, Res<Option<u64>>);
|
||||
type TestSetup = (&'static [u8], u8, Res<u64>);
|
||||
const TEST_CASES_BIG_NUMBERS: [TestSetup; 3] = [
|
||||
(
|
||||
&[
|
||||
0xFF, 0x80, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x01,
|
||||
],
|
||||
0,
|
||||
Ok(Some(0xFFFF_FFFF_FFFF_FFFF)),
|
||||
Ok(0xFFFF_FFFF_FFFF_FFFF),
|
||||
),
|
||||
(
|
||||
&[
|
||||
|
@ -558,7 +532,7 @@ mod tests {
|
|||
let mut test_receiver: TestReceiver = TestReceiver::default();
|
||||
test_receiver.write(&buf[1..]);
|
||||
assert_eq!(
|
||||
to_string(&reader.read(&mut test_receiver).unwrap().unwrap()).unwrap(),
|
||||
to_string(&reader.read(&mut test_receiver).unwrap()).unwrap(),
|
||||
*value
|
||||
);
|
||||
}
|
||||
|
@ -569,7 +543,7 @@ mod tests {
|
|||
for (buf, prefix_len, value) in &TEST_CASES_NUMBERS {
|
||||
let mut buffer = ReceiverBufferWrapper::new(buf);
|
||||
let mut reader = IntReader::new(buffer.read_byte().unwrap(), *prefix_len);
|
||||
assert_eq!(reader.read(&mut buffer), Ok(Some(*value)));
|
||||
assert_eq!(reader.read(&mut buffer), Ok(*value));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -5,11 +5,13 @@
|
|||
// except according to those terms.
|
||||
|
||||
use crate::static_table::{StaticTableEntry, HEADER_STATIC_TABLE};
|
||||
use crate::{Error, QPackSide, Res};
|
||||
use crate::{Error, Res};
|
||||
use neqo_common::qtrace;
|
||||
use std::collections::VecDeque;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
pub const ADDITIONAL_TABLE_ENTRY_SIZE: usize = 32;
|
||||
|
||||
pub struct LookupResult {
|
||||
pub index: u64,
|
||||
pub static_table: bool,
|
||||
|
@ -17,11 +19,12 @@ pub struct LookupResult {
|
|||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct DynamicTableEntry {
|
||||
pub(crate) struct DynamicTableEntry {
|
||||
base: u64,
|
||||
name: Vec<u8>,
|
||||
value: Vec<u8>,
|
||||
/// Number of streams that refer this entry.
|
||||
/// Number of header blocks that refer this entry.
|
||||
/// This is only used by the encoder.
|
||||
refs: u64,
|
||||
}
|
||||
|
||||
|
@ -30,8 +33,8 @@ impl DynamicTableEntry {
|
|||
self.refs == 0 && self.base < first_not_acked
|
||||
}
|
||||
|
||||
pub fn size(&self) -> u64 {
|
||||
(self.name.len() + self.value.len() + 32) as u64
|
||||
pub fn size(&self) -> usize {
|
||||
self.name.len() + self.value.len() + ADDITIONAL_TABLE_ENTRY_SIZE
|
||||
}
|
||||
|
||||
pub fn add_ref(&mut self) {
|
||||
|
@ -57,17 +60,17 @@ impl DynamicTableEntry {
|
|||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct HeaderTable {
|
||||
qpack_side: QPackSide,
|
||||
pub(crate) struct HeaderTable {
|
||||
dynamic: VecDeque<DynamicTableEntry>,
|
||||
// The total capacity (in QPACK bytes) of the table. This is set by
|
||||
// configuration.
|
||||
/// The total capacity (in QPACK bytes) of the table. This is set by
|
||||
/// configuration.
|
||||
capacity: u64,
|
||||
// The amount of used capacity.
|
||||
/// The amount of used capacity.
|
||||
used: u64,
|
||||
// The total number of inserts thus far.
|
||||
/// The total number of inserts thus far.
|
||||
base: u64,
|
||||
// This is number of inserts that are acked. this correspond to index of the first not acked.
|
||||
/// This is number of inserts that are acked. this correspond to index of the first not acked.
|
||||
/// This is only used by thee encoder.
|
||||
acked_inserts_cnt: u64,
|
||||
}
|
||||
|
||||
|
@ -75,8 +78,8 @@ impl ::std::fmt::Display for HeaderTable {
|
|||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"HeaderTable for {} (base={} acked_inserts_cnt={} capacity={})",
|
||||
self.qpack_side, self.base, self.acked_inserts_cnt, self.capacity
|
||||
"HeaderTable for (base={} acked_inserts_cnt={} capacity={})",
|
||||
self.base, self.acked_inserts_cnt, self.capacity
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -84,36 +87,40 @@ impl ::std::fmt::Display for HeaderTable {
|
|||
impl HeaderTable {
|
||||
pub fn new(encoder: bool) -> Self {
|
||||
Self {
|
||||
qpack_side: if encoder {
|
||||
QPackSide::Encoder
|
||||
} else {
|
||||
QPackSide::Decoder
|
||||
},
|
||||
dynamic: VecDeque::new(),
|
||||
capacity: 0,
|
||||
used: 0,
|
||||
base: 0,
|
||||
acked_inserts_cnt: 0,
|
||||
acked_inserts_cnt: if encoder { 0 } else { u64::max_value() },
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns number of inserts.
|
||||
pub fn base(&self) -> u64 {
|
||||
self.base
|
||||
}
|
||||
|
||||
/// Returns capacity of the dynamic table
|
||||
pub fn capacity(&self) -> u64 {
|
||||
self.capacity
|
||||
}
|
||||
|
||||
/// Change the dynamic table capacity.
|
||||
/// ### Errors
|
||||
/// `ChangeCapacity` if table capacity cannot be reduced.
|
||||
/// The table cannot be reduce if there are entries that are referred at the moment or their inserts are unacked.
|
||||
pub fn set_capacity(&mut self, cap: u64) -> Res<()> {
|
||||
qtrace!([self], "set capacity to {}", cap);
|
||||
if !self.evict_to(cap) {
|
||||
return Err(Error::Internal);
|
||||
return Err(Error::ChangeCapacity);
|
||||
}
|
||||
self.capacity = cap;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get a static entry with `index`.
|
||||
/// ### Errors
|
||||
/// `HeaderLookup` if the index does not exist in the static table.
|
||||
pub fn get_static(index: u64) -> Res<&'static StaticTableEntry> {
|
||||
let inx = usize::try_from(index).or(Err(Error::HeaderLookup))?;
|
||||
if inx > HEADER_STATIC_TABLE.len() {
|
||||
|
@ -124,8 +131,8 @@ impl HeaderTable {
|
|||
|
||||
fn get_dynamic_with_abs_index(&mut self, index: u64) -> Res<&mut DynamicTableEntry> {
|
||||
if self.base <= index {
|
||||
debug_assert!(false, "This is an iternal error");
|
||||
return Err(Error::Internal);
|
||||
debug_assert!(false, "This is an internal error");
|
||||
return Err(Error::HeaderLookup);
|
||||
}
|
||||
let inx = self.base - index - 1;
|
||||
let inx = usize::try_from(inx).or(Err(Error::HeaderLookup))?;
|
||||
|
@ -143,24 +150,26 @@ impl HeaderTable {
|
|||
Ok(&self.dynamic[inx])
|
||||
}
|
||||
|
||||
/// Get a entry in the dynamic table.
|
||||
/// ### Errors
|
||||
/// `HeaderLookup` if entry does not exist.
|
||||
pub fn get_dynamic(&self, index: u64, base: u64, post: bool) -> Res<&DynamicTableEntry> {
|
||||
if self.base < base {
|
||||
return Err(Error::HeaderLookup);
|
||||
}
|
||||
let inx: u64;
|
||||
let base_rel = self.base - base;
|
||||
if post {
|
||||
if base_rel <= index {
|
||||
let inx = if post {
|
||||
if self.base < (base + index + 1) {
|
||||
return Err(Error::HeaderLookup);
|
||||
}
|
||||
inx = base_rel - index - 1;
|
||||
self.base - (base + index + 1)
|
||||
} else {
|
||||
inx = base_rel + index;
|
||||
}
|
||||
if (self.base + index) < base {
|
||||
return Err(Error::HeaderLookup);
|
||||
}
|
||||
(self.base + index) - base
|
||||
};
|
||||
|
||||
self.get_dynamic_with_relative_index(inx)
|
||||
}
|
||||
|
||||
/// Remove a reference to a dynamic table entry.
|
||||
pub fn remove_ref(&mut self, index: u64) {
|
||||
qtrace!([self], "remove reference to entry {}", index);
|
||||
self.get_dynamic_with_abs_index(index)
|
||||
|
@ -168,6 +177,7 @@ impl HeaderTable {
|
|||
.remove_ref();
|
||||
}
|
||||
|
||||
/// Add a reference to a dynamic table entry.
|
||||
pub fn add_ref(&mut self, index: u64) {
|
||||
qtrace!([self], "add reference to entry {}", index);
|
||||
self.get_dynamic_with_abs_index(index)
|
||||
|
@ -175,6 +185,9 @@ impl HeaderTable {
|
|||
.add_ref();
|
||||
}
|
||||
|
||||
/// Look for a header pair.
|
||||
/// The function returns `LookupResult`: `index`, `static_table` (if it is a static table entry) and `value_matches`
|
||||
/// (if the header value matches as well not only header name)
|
||||
pub fn lookup(&mut self, name: &[u8], value: &[u8], can_block: bool) -> Option<LookupResult> {
|
||||
qtrace!(
|
||||
[self],
|
||||
|
@ -229,27 +242,47 @@ impl HeaderTable {
|
|||
name_match
|
||||
}
|
||||
|
||||
pub fn evict_to(&mut self, reduce: u64) -> bool {
|
||||
fn evict_to(&mut self, reduce: u64) -> bool {
|
||||
self.evict_to_internal(reduce, false)
|
||||
}
|
||||
|
||||
fn test_evict_to(&mut self, reduce: u64) -> bool {
|
||||
self.evict_to_internal(reduce, true)
|
||||
}
|
||||
|
||||
pub fn evict_to_internal(&mut self, reduce: u64, only_check: bool) -> bool {
|
||||
qtrace!(
|
||||
[self],
|
||||
"reduce table to {}, currently used:{}",
|
||||
"reduce table to {}, currently used:{} only_check:{}",
|
||||
reduce,
|
||||
self.used
|
||||
self.used,
|
||||
only_check
|
||||
);
|
||||
while (!self.dynamic.is_empty()) && self.used > reduce {
|
||||
let mut used = self.used;
|
||||
while (!self.dynamic.is_empty()) && used > reduce {
|
||||
if let Some(e) = self.dynamic.back() {
|
||||
if let QPackSide::Encoder = self.qpack_side {
|
||||
if !e.can_reduce(self.acked_inserts_cnt) {
|
||||
return false;
|
||||
}
|
||||
if !e.can_reduce(self.acked_inserts_cnt) {
|
||||
return false;
|
||||
}
|
||||
used -= u64::try_from(e.size()).unwrap();
|
||||
if !only_check {
|
||||
self.used -= u64::try_from(e.size()).unwrap();
|
||||
self.dynamic.pop_back();
|
||||
}
|
||||
self.used -= e.size();
|
||||
self.dynamic.pop_back();
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
pub fn insert_possible(&mut self, size: usize) -> bool {
|
||||
u64::try_from(size).unwrap() <= self.capacity
|
||||
&& self.test_evict_to(self.capacity - u64::try_from(size).unwrap())
|
||||
}
|
||||
|
||||
/// Insert a new entry.
|
||||
/// ### Errors
|
||||
/// `DynamicTableFull` if an entry cannot be added to the table because there is not enough space and/or
|
||||
/// other entry cannot be evicted.
|
||||
pub fn insert(&mut self, name: &[u8], value: &[u8]) -> Res<u64> {
|
||||
qtrace!([self], "insert name={:?} value={:?}", name, value);
|
||||
let entry = DynamicTableEntry {
|
||||
|
@ -258,25 +291,29 @@ impl HeaderTable {
|
|||
base: self.base,
|
||||
refs: 0,
|
||||
};
|
||||
if entry.size() > self.capacity || !self.evict_to(self.capacity - entry.size()) {
|
||||
match self.qpack_side {
|
||||
QPackSide::Encoder => return Err(Error::EncoderStream),
|
||||
QPackSide::Decoder => return Err(Error::DecoderStream),
|
||||
}
|
||||
if u64::try_from(entry.size()).unwrap() > self.capacity
|
||||
|| !self.evict_to(self.capacity - u64::try_from(entry.size()).unwrap())
|
||||
{
|
||||
return Err(Error::DynamicTableFull);
|
||||
}
|
||||
self.base += 1;
|
||||
self.used += entry.size();
|
||||
self.used += u64::try_from(entry.size()).unwrap();
|
||||
let index = entry.index();
|
||||
self.dynamic.push_front(entry);
|
||||
Ok(index)
|
||||
}
|
||||
|
||||
/// Insert a new entry with the name refer to by a index to static or dynamic table.
|
||||
/// ### Errors
|
||||
/// `DynamicTableFull` if an entry cannot be added to the table because there is not enough space and/or
|
||||
/// other entry cannot be evicted.
|
||||
/// `HeaderLookup` if the index dos not exits in the static/dynamic table.
|
||||
pub fn insert_with_name_ref(
|
||||
&mut self,
|
||||
name_static_table: bool,
|
||||
name_index: u64,
|
||||
value: &[u8],
|
||||
) -> Res<()> {
|
||||
) -> Res<u64> {
|
||||
qtrace!(
|
||||
[self],
|
||||
"insert with ref to index={} in {} value={:?}",
|
||||
|
@ -295,12 +332,16 @@ impl HeaderTable {
|
|||
.name()
|
||||
.to_vec()
|
||||
};
|
||||
self.insert(&name, value)?;
|
||||
Ok(())
|
||||
self.insert(&name, value)
|
||||
}
|
||||
|
||||
pub fn duplicate(&mut self, index: u64) -> Res<()> {
|
||||
qtrace!([self], "dumplicate entry={}", index);
|
||||
/// Duplicate an entry.
|
||||
/// ### Errors
|
||||
/// `DynamicTableFull` if an entry cannot be added to the table because there is not enough space and/or
|
||||
/// other entry cannot be evicted.
|
||||
/// `HeaderLookup` if the index dos not exits in the static/dynamic table.
|
||||
pub fn duplicate(&mut self, index: u64) -> Res<u64> {
|
||||
qtrace!([self], "duplicate entry={}", index);
|
||||
// need to remember name and value because insert may delete the entry.
|
||||
let name: Vec<u8>;
|
||||
let value: Vec<u8>;
|
||||
|
@ -310,19 +351,22 @@ impl HeaderTable {
|
|||
value = entry.value().to_vec();
|
||||
qtrace!([self], "dumplicate name={:?} value={:?}", name, value);
|
||||
}
|
||||
self.insert(&name, &value)?;
|
||||
Ok(())
|
||||
self.insert(&name, &value)
|
||||
}
|
||||
|
||||
/// Increment number of acknowledge entries.
|
||||
/// ### Errors
|
||||
/// `IncrementAck` if ack is greater than actual number of inserts.
|
||||
pub fn increment_acked(&mut self, increment: u64) -> Res<()> {
|
||||
qtrace!([self], "increment acked by {}", increment);
|
||||
self.acked_inserts_cnt += increment;
|
||||
if self.base < self.acked_inserts_cnt {
|
||||
return Err(Error::Internal);
|
||||
return Err(Error::IncrementAck);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return number of acknowledge inserts.
|
||||
pub fn get_acked_inserts_cnt(&self) -> u64 {
|
||||
self.acked_inserts_cnt
|
||||
}
|
||||
|
|
|
@ -1 +1 @@
|
|||
{"files":{"Cargo.toml":"b828688e4d9b7b8a5c8eac8c8959f60e2260d8d52bae80b554ebc88792c2bdfe","TODO":"d759cb804b32fa9d96ea8d3574a3c4073da9fe6a0b02b708a0e22cce5a5b4a0f","src/cc.rs":"b4639a63f4c4112a42223f164e71bd230419bd2a0777cbdfc1852d2a32ba3f3d","src/cid.rs":"f2add30abfa0406c8dbda402f12d15dfb1549085bf2ed1a5e7b287922cc92e93","src/connection.rs":"c088ccb5123cd06070f039b41f446bb7902a4e824ebeccbfdd3759ac0b16792c","src/crypto.rs":"4147bb67d9967d47b84af1cadccb447762050c3d252255af7a5c3f3c20ecfde0","src/dump.rs":"d69ccb0e3b240823b886a791186afbac9f2e26d1f1f67a55dbf86f8cd3e6203e","src/events.rs":"deee6ef1112034582af770212bb70ed2cdcd2b1b8bb167c9118ef39bbf454451","src/flow_mgr.rs":"0b1c6e7587e411635723207ecface6c62d1243599dd017745c61eb94805b9886","src/frame.rs":"246d4d8a3fe877c1bd6b187f5ea80b8b461bd040d0ff474ecdb5e2f551b954e3","src/lib.rs":"1d6404e9fc11edc51ca58d227e0016ca79947b74420f71ff19030d2c356a42fe","src/packet.rs":"b36d68083a5c35c7c40f4ba319d22fa85e5e32c22d04acab63741bf8f05787e2","src/path.rs":"829f5fd788f21525bbc6698abf04651594cfa79380f0ab1c6152a5d391669e58","src/recovery.rs":"5c00435186a28279a9c07cceda5ad00e56a02f334332e8778aa91d06a3b93296","src/recv_stream.rs":"c961df90c50df0ba45d603d270bbccc17abcd6335f054a745f29d62d8d0c6688","src/send_stream.rs":"4d8b929139092216ea2e2388184ac97782cfa92991933ed0ef350bc55b3d1aef","src/server.rs":"6eb850380f17e12c3cc79cb94ab93149eb36afb5c4d26dd9d9c2629f58f7f03e","src/stats.rs":"a276bd9902939091578d3bcc95aa7dd0b32f5d29e528c12e7b93a0ab88474478","src/stream_id.rs":"b3158cf2c6072da79bf6e77a31f71f2f3b970429221142a9ff1dd6cd07df2442","src/tparams.rs":"ef0e8d42ebd51f9494198525060c215ab8fe723c569dc25186de0af558034b66","src/tracking.rs":"4c6690be7a6f83566ba5203e59ed50204e943179ad93ad627e25dd4ac8af63b9","tests/conn_vectors.rs":"b541537a360886f4b9d7e85105bd38faeba83f1ef85b1b1f9ae79891481348a5","tests/connection.rs":"a93985c199a9ef987106f4a20b35ebf803cdbbb855c07b1362b403eed7101ef8","tests/server.rs":"d516bf134a63377c98ff4ac2cca2d4cc6562f48ea262b10a23866d38c4c82af3"},"package":null}
|
||||
{"files":{"Cargo.toml":"0a1d4ce17ba9db0b2a0b55cded951ffb59a4e72298b1ac876adbfc799c07994a","TODO":"d759cb804b32fa9d96ea8d3574a3c4073da9fe6a0b02b708a0e22cce5a5b4a0f","src/cc.rs":"60d6de4376ba77801490a400a90078f93b5300aa24b16838b38a1bad805b784e","src/cid.rs":"54e0f9c330014d296450f80f51b7031eee674b250d4ce24193b6d785b1d27f4f","src/connection.rs":"410937048e364eccd993872950e80b3b65314a61c65e47cdcbbd03d7ffabcb73","src/crypto.rs":"d00318e169b9634ab241e1dad0ec83ecfb93f9944b44891c850108e73f81ccd2","src/dump.rs":"d69ccb0e3b240823b886a791186afbac9f2e26d1f1f67a55dbf86f8cd3e6203e","src/events.rs":"deee6ef1112034582af770212bb70ed2cdcd2b1b8bb167c9118ef39bbf454451","src/flow_mgr.rs":"0b1c6e7587e411635723207ecface6c62d1243599dd017745c61eb94805b9886","src/frame.rs":"5f08b061f8fb7c2cc809116c5638c64d72646d914c688648b39345b775bb0246","src/lib.rs":"70b1b384d538b0e14c1843077848d4a00619e1f8316c23cfb6ba7ec3cb02cee0","src/pace.rs":"2cbc141722d75c87e99eb4e35da1479c47d55d8945f6c252c221543720c283ae","src/packet.rs":"b621ff1521d223345ff54650d94184f54596089b5d34edcfa68b4aae64d6be52","src/path.rs":"140365a03b9e226c0b03a1c3dba6e5235829006168da055c78b69d26bd0c98c7","src/qlog.rs":"92fff8f49c817090a7dbe39060543c4c7a9e45863cf5a430abc2e124d9050d6f","src/recovery.rs":"93e58b57438803709a6468345124655fc3ccc96d13d6514c4fb3e702a15b6f56","src/recv_stream.rs":"0650949aa8baae9e20c57eb2283490b782c41e6c67e9c093fc358145149e386f","src/send_stream.rs":"cd834ebd4386a776d897b1a5e52cad839784e389da8e2543ba1fb57649225cbd","src/server.rs":"84340521f8d29d4e6f6de6af787ce0f4c640df871fad72364d18fb3721151dd9","src/stats.rs":"a276bd9902939091578d3bcc95aa7dd0b32f5d29e528c12e7b93a0ab88474478","src/stream_id.rs":"db11def2ed81ef7c456c7c47827121c0a9ff54ac85228e6db6c790db8944febe","src/tparams.rs":"3c419a9f0fb9cc7edf6ae5b63bab9ecb5abbca3663b4615b1df37447b425f610","src/tracking.rs":"965d0bffc4c6171833a1e3ccb3d7cc31242a0e9f3c5ff77f925aa3a6594b167f","tests/conn_vectors.rs":"3de474009e75e0d283792daae4cb0eac7a1be00a4a6cfab6132c775fbefb1363","tests/connection.rs":"a93985c199a9ef987106f4a20b35ebf803cdbbb855c07b1362b403eed7101ef8","tests/server.rs":"1daefe9fea17ab9f45c57b805d357538d115068e168dc4d6bfc46f71403a986f"},"package":null}
|
|
@ -1,6 +1,6 @@
|
|||
[package]
|
||||
name = "neqo-transport"
|
||||
version = "0.2.4"
|
||||
version = "0.4.0"
|
||||
authors = ["EKR <ekr@rtfm.com>", "Andy Grover <agrover@mozilla.com>"]
|
||||
edition = "2018"
|
||||
license = "MIT/Apache-2.0"
|
||||
|
@ -11,6 +11,7 @@ neqo-common = { path = "../neqo-common" }
|
|||
lazy_static = "1.3.0"
|
||||
log = {version = "0.4.0", default-features = false}
|
||||
smallvec = "1.0.0"
|
||||
qlog = "0.2.0"
|
||||
|
||||
[dev-dependencies]
|
||||
test-fixture = { path = "../test-fixture" }
|
||||
|
|
|
@ -10,6 +10,7 @@ use std::cmp::max;
|
|||
use std::fmt::{self, Display};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use crate::pace::Pacer;
|
||||
use crate::path::PATH_MTU_V6;
|
||||
use crate::tracking::SentPacket;
|
||||
use neqo_common::{const_max, const_min, qdebug, qinfo, qtrace};
|
||||
|
@ -21,6 +22,8 @@ const INITIAL_WINDOW: usize = const_min(
|
|||
const_max(2 * MAX_DATAGRAM_SIZE, 14720),
|
||||
);
|
||||
pub const MIN_CONG_WINDOW: usize = MAX_DATAGRAM_SIZE * 2;
|
||||
/// The number of packets we allow to burst from the pacer.
|
||||
pub(crate) const PACING_BURST_SIZE: usize = 2;
|
||||
const PERSISTENT_CONG_THRESH: u32 = 3;
|
||||
|
||||
#[derive(Debug)]
|
||||
|
@ -29,6 +32,7 @@ pub struct CongestionControl {
|
|||
bytes_in_flight: usize,
|
||||
congestion_recovery_start_time: Option<Instant>,
|
||||
ssthresh: usize,
|
||||
pacer: Option<Pacer>,
|
||||
}
|
||||
|
||||
impl Default for CongestionControl {
|
||||
|
@ -38,6 +42,7 @@ impl Default for CongestionControl {
|
|||
bytes_in_flight: 0,
|
||||
congestion_recovery_start_time: None,
|
||||
ssthresh: std::usize::MAX,
|
||||
pacer: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -47,8 +52,12 @@ impl Display for CongestionControl {
|
|||
write!(
|
||||
f,
|
||||
"CongCtrl {}/{} ssthresh {}",
|
||||
self.bytes_in_flight, self.congestion_window, self.ssthresh
|
||||
)
|
||||
self.bytes_in_flight, self.congestion_window, self.ssthresh,
|
||||
)?;
|
||||
if let Some(p) = &self.pacer {
|
||||
write!(f, " {}", p)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -74,11 +83,7 @@ impl CongestionControl {
|
|||
|
||||
// Multi-packet version of OnPacketAckedCC
|
||||
pub fn on_packets_acked(&mut self, acked_pkts: &[SentPacket]) {
|
||||
for pkt in acked_pkts
|
||||
.iter()
|
||||
.filter(|pkt| pkt.in_flight)
|
||||
.filter(|pkt| pkt.time_declared_lost.is_none())
|
||||
{
|
||||
for pkt in acked_pkts.iter().filter(|pkt| pkt.cc_outstanding()) {
|
||||
assert!(self.bytes_in_flight >= pkt.size);
|
||||
self.bytes_in_flight -= pkt.size;
|
||||
|
||||
|
@ -104,7 +109,7 @@ impl CongestionControl {
|
|||
pub fn on_packets_lost(
|
||||
&mut self,
|
||||
now: Instant,
|
||||
largest_acked_sent: Option<Instant>,
|
||||
prev_largest_acked_sent: Option<Instant>,
|
||||
pto: Duration,
|
||||
lost_packets: &[SentPacket],
|
||||
) {
|
||||
|
@ -112,7 +117,7 @@ impl CongestionControl {
|
|||
return;
|
||||
}
|
||||
|
||||
for pkt in lost_packets.iter().filter(|pkt| pkt.in_flight) {
|
||||
for pkt in lost_packets.iter().filter(|pkt| pkt.cc_in_flight()) {
|
||||
assert!(self.bytes_in_flight >= pkt.size);
|
||||
self.bytes_in_flight -= pkt.size;
|
||||
}
|
||||
|
@ -122,34 +127,35 @@ impl CongestionControl {
|
|||
let last_lost_pkt = lost_packets.last().unwrap();
|
||||
self.on_congestion_event(now, last_lost_pkt.time_sent);
|
||||
|
||||
let in_persistent_congestion = {
|
||||
let congestion_period = pto * PERSISTENT_CONG_THRESH;
|
||||
let congestion_period = pto * PERSISTENT_CONG_THRESH;
|
||||
|
||||
match largest_acked_sent {
|
||||
Some(las) => las < last_lost_pkt.time_sent - congestion_period,
|
||||
None => {
|
||||
// Nothing has ever been acked. Could still be PC.
|
||||
let first_lost_pkt_sent = lost_packets.first().unwrap().time_sent;
|
||||
last_lost_pkt.time_sent - first_lost_pkt_sent > congestion_period
|
||||
}
|
||||
// Simpler to ignore any acked pkts in between first and last lost pkts
|
||||
if let Some(first) = lost_packets
|
||||
.iter()
|
||||
.find(|p| Some(p.time_sent) > prev_largest_acked_sent)
|
||||
{
|
||||
if last_lost_pkt.time_sent.duration_since(first.time_sent) > congestion_period {
|
||||
self.congestion_window = MIN_CONG_WINDOW;
|
||||
qinfo!([self], "persistent congestion");
|
||||
}
|
||||
};
|
||||
if in_persistent_congestion {
|
||||
qinfo!([self], "persistent congestion");
|
||||
self.congestion_window = MIN_CONG_WINDOW;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn discard(&mut self, pkt: &SentPacket) {
|
||||
if pkt.in_flight && pkt.time_declared_lost.is_none() {
|
||||
if pkt.cc_outstanding() {
|
||||
assert!(self.bytes_in_flight >= pkt.size);
|
||||
self.bytes_in_flight -= pkt.size;
|
||||
qtrace!([self], "Ignore pkt with size {}", pkt.size);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn on_packet_sent(&mut self, pkt: &SentPacket) {
|
||||
if !pkt.in_flight {
|
||||
pub fn on_packet_sent(&mut self, pkt: &SentPacket, rtt: Duration) {
|
||||
self.pacer
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.spend(pkt.time_sent, rtt, self.congestion_window, pkt.size);
|
||||
|
||||
if !pkt.cc_in_flight() {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -194,4 +200,27 @@ impl CongestionControl {
|
|||
//TODO(agrover): how do we get this info??
|
||||
false
|
||||
}
|
||||
|
||||
pub fn start_pacer(&mut self, now: Instant) {
|
||||
// Start the pacer with a small burst size.
|
||||
self.pacer = Some(Pacer::new(
|
||||
now,
|
||||
MAX_DATAGRAM_SIZE * PACING_BURST_SIZE,
|
||||
MAX_DATAGRAM_SIZE,
|
||||
));
|
||||
}
|
||||
|
||||
pub fn next_paced(&self, rtt: Duration) -> Option<Instant> {
|
||||
// Only pace if there are bytes in flight.
|
||||
if self.bytes_in_flight > 0 {
|
||||
Some(
|
||||
self.pacer
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.next(rtt, self.congestion_window),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
// Encoding and decoding packets off the wire.
|
||||
|
||||
use neqo_common::{hex, matches, Decoder};
|
||||
use neqo_common::{hex, hex_with_len, matches, Decoder};
|
||||
use neqo_crypto::random;
|
||||
|
||||
use std::borrow::Borrow;
|
||||
|
@ -70,7 +70,7 @@ impl std::ops::Deref for ConnectionId {
|
|||
|
||||
impl ::std::fmt::Debug for ConnectionId {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
write!(f, "CID {}", hex(&self.cid))
|
||||
write!(f, "CID {}", hex_with_len(&self.cid))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -93,13 +93,13 @@ pub struct ConnectionIdRef<'a> {
|
|||
|
||||
impl<'a> ::std::fmt::Debug for ConnectionIdRef<'a> {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
write!(f, "CID {}", hex(&self.cid))
|
||||
write!(f, "CID {}", hex_with_len(&self.cid))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> ::std::fmt::Display for ConnectionIdRef<'a> {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
write!(f, "{}", hex(&self.cid))
|
||||
write!(f, "{}", hex_with_len(&self.cid))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -11,16 +11,15 @@ use std::ops::{Index, IndexMut, Range};
|
|||
use std::rc::Rc;
|
||||
use std::time::Instant;
|
||||
|
||||
use neqo_common::{hex, matches, qdebug, qerror, qinfo, qtrace};
|
||||
use neqo_common::{hex, matches, qdebug, qerror, qinfo, qtrace, Role};
|
||||
use neqo_crypto::aead::Aead;
|
||||
use neqo_crypto::hp::HpKey;
|
||||
use neqo_crypto::{
|
||||
hkdf, Agent, AntiReplay, Cipher, Epoch, RecordList, SymKey, TLS_AES_128_GCM_SHA256,
|
||||
TLS_AES_256_GCM_SHA384, TLS_EPOCH_APPLICATION_DATA, TLS_EPOCH_HANDSHAKE, TLS_EPOCH_INITIAL,
|
||||
TLS_EPOCH_ZERO_RTT, TLS_VERSION_1_3,
|
||||
hkdf, Agent, AntiReplay, Cipher, Epoch, HandshakeState, Record, RecordList, SymKey,
|
||||
TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CT_HANDSHAKE, TLS_EPOCH_APPLICATION_DATA,
|
||||
TLS_EPOCH_HANDSHAKE, TLS_EPOCH_INITIAL, TLS_EPOCH_ZERO_RTT, TLS_VERSION_1_3,
|
||||
};
|
||||
|
||||
use crate::connection::Role;
|
||||
use crate::frame::Frame;
|
||||
use crate::packet::PacketNumber;
|
||||
use crate::recovery::RecoveryToken;
|
||||
|
@ -49,7 +48,7 @@ impl Crypto {
|
|||
agent.set_version_range(TLS_VERSION_1_3, TLS_VERSION_1_3)?;
|
||||
agent.enable_ciphers(&[TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384])?;
|
||||
agent.set_alpn(protocols)?;
|
||||
agent.disable_end_of_early_data();
|
||||
agent.disable_end_of_early_data()?;
|
||||
match &mut agent {
|
||||
Agent::Client(c) => c.enable_0rtt()?,
|
||||
Agent::Server(s) => s.enable_0rtt(
|
||||
|
@ -66,6 +65,42 @@ impl Crypto {
|
|||
})
|
||||
}
|
||||
|
||||
pub fn handshake(
|
||||
&mut self,
|
||||
now: Instant,
|
||||
space: PNSpace,
|
||||
data: Option<&[u8]>,
|
||||
) -> Res<&HandshakeState> {
|
||||
let input = data.map(|d| {
|
||||
qtrace!("Handshake record received {:0x?} ", d);
|
||||
let epoch = match space {
|
||||
PNSpace::Initial => TLS_EPOCH_INITIAL,
|
||||
PNSpace::Handshake => TLS_EPOCH_HANDSHAKE,
|
||||
// Our epoch progresses forward, but the TLS epoch is fixed to 3.
|
||||
PNSpace::ApplicationData => TLS_EPOCH_APPLICATION_DATA,
|
||||
};
|
||||
Record {
|
||||
ct: TLS_CT_HANDSHAKE,
|
||||
epoch,
|
||||
data: d.to_vec(),
|
||||
}
|
||||
});
|
||||
|
||||
match self.tls.handshake_raw(now, input) {
|
||||
Ok(output) => {
|
||||
self.buffer_records(output)?;
|
||||
Ok(self.tls.state())
|
||||
}
|
||||
Err(e) => {
|
||||
qinfo!("Handshake failed");
|
||||
Err(match self.tls.alert() {
|
||||
Some(a) => Error::CryptoAlert(*a),
|
||||
_ => Error::CryptoError(e),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Enable 0-RTT and return `true` if it is enabled successfully.
|
||||
pub fn enable_0rtt(&mut self, role: Role) -> Res<bool> {
|
||||
let info = self.tls.preinfo()?;
|
||||
|
@ -155,12 +190,15 @@ impl Crypto {
|
|||
}
|
||||
|
||||
/// Buffer crypto records for sending.
|
||||
pub fn buffer_records(&mut self, records: RecordList) {
|
||||
pub fn buffer_records(&mut self, records: RecordList) -> Res<()> {
|
||||
for r in records {
|
||||
assert_eq!(r.ct, 22);
|
||||
if r.ct != TLS_CT_HANDSHAKE {
|
||||
return Err(Error::ProtocolViolation);
|
||||
}
|
||||
qtrace!([self], "Adding CRYPTO data {:?}", r);
|
||||
self.streams.send(PNSpace::from(r.epoch), &r.data);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn acked(&mut self, token: CryptoRecoveryToken) {
|
||||
|
@ -872,7 +910,7 @@ impl CryptoStreams {
|
|||
self.get(space).map_or(false, |cs| cs.rx.data_ready())
|
||||
}
|
||||
|
||||
pub fn read_to_end(&mut self, space: PNSpace, buf: &mut Vec<u8>) -> Res<u64> {
|
||||
pub fn read_to_end(&mut self, space: PNSpace, buf: &mut Vec<u8>) -> usize {
|
||||
self.get_mut(space).unwrap().rx.read_to_end(buf)
|
||||
}
|
||||
|
||||
|
|
|
@ -11,8 +11,7 @@ use neqo_common::{matches, qdebug, qtrace, Decoder, Encoder};
|
|||
use crate::cid::MAX_CONNECTION_ID_LEN;
|
||||
use crate::packet::PacketType;
|
||||
use crate::stream_id::{StreamId, StreamIndex};
|
||||
use crate::{AppError, TransportError};
|
||||
use crate::{ConnectionError, Error, Res};
|
||||
use crate::{AppError, ConnectionError, Error, Res, TransportError, ERROR_APPLICATION_CLOSE};
|
||||
|
||||
use std::cmp::{min, Ordering};
|
||||
use std::convert::TryFrom;
|
||||
|
@ -42,14 +41,23 @@ const FRAME_TYPE_NEW_CONNECTION_ID: FrameType = 0x18;
|
|||
const FRAME_TYPE_RETIRE_CONNECTION_ID: FrameType = 0x19;
|
||||
const FRAME_TYPE_PATH_CHALLENGE: FrameType = 0x1a;
|
||||
const FRAME_TYPE_PATH_RESPONSE: FrameType = 0x1b;
|
||||
const FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT: FrameType = 0x1c;
|
||||
const FRAME_TYPE_CONNECTION_CLOSE_APPLICATION: FrameType = 0x1d;
|
||||
pub const FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT: FrameType = 0x1c;
|
||||
pub const FRAME_TYPE_CONNECTION_CLOSE_APPLICATION: FrameType = 0x1d;
|
||||
const FRAME_TYPE_HANDSHAKE_DONE: FrameType = 0x1e;
|
||||
|
||||
const STREAM_FRAME_BIT_FIN: u64 = 0x01;
|
||||
const STREAM_FRAME_BIT_LEN: u64 = 0x02;
|
||||
const STREAM_FRAME_BIT_OFF: u64 = 0x04;
|
||||
|
||||
/// `FRAME_APPLICATION_CLOSE` is the default CONNECTION_CLOSE frame that
|
||||
/// is sent when an application error code needs to be sent in an
|
||||
/// Initial or Handshake packet.
|
||||
const FRAME_APPLICATION_CLOSE: &Frame = &Frame::ConnectionClose {
|
||||
error_code: CloseError::Transport(ERROR_APPLICATION_CLOSE),
|
||||
frame_type: 0,
|
||||
reason_phrase: Vec::new(),
|
||||
};
|
||||
|
||||
#[derive(PartialEq, Debug, Copy, Clone, PartialOrd, Eq, Ord, Hash)]
|
||||
/// Bi-Directional or Uni-Directional.
|
||||
pub enum StreamType {
|
||||
|
@ -95,7 +103,7 @@ impl CloseError {
|
|||
}
|
||||
}
|
||||
|
||||
fn code(&self) -> u64 {
|
||||
pub fn code(&self) -> u64 {
|
||||
match self {
|
||||
Self::Transport(c) | Self::Application(c) => *c,
|
||||
}
|
||||
|
@ -446,6 +454,19 @@ impl Frame {
|
|||
}
|
||||
}
|
||||
|
||||
/// Convert a CONNECTION_CLOSE into a nicer CONNECTION_CLOSE.
|
||||
pub fn sanitize_close(&self) -> &Self {
|
||||
if let Self::ConnectionClose { error_code, .. } = &self {
|
||||
if let CloseError::Application(_) = error_code {
|
||||
FRAME_APPLICATION_CLOSE
|
||||
} else {
|
||||
self
|
||||
}
|
||||
} else {
|
||||
panic!("Attempted to sanitize a non-close frame");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn ack_eliciting(&self) -> bool {
|
||||
!matches!(self, Self::Ack { .. } | Self::Padding | Self::ConnectionClose { .. })
|
||||
}
|
||||
|
@ -594,9 +615,13 @@ impl Frame {
|
|||
}),
|
||||
FRAME_TYPE_CRYPTO => {
|
||||
let o = dv!(dec);
|
||||
let data = d!(dec.decode_vvec());
|
||||
if o + u64::try_from(data.len()).unwrap() > ((1 << 62) - 1) {
|
||||
return Err(Error::FrameEncodingError);
|
||||
}
|
||||
Ok(Self::Crypto {
|
||||
offset: o,
|
||||
data: d!(dec.decode_vvec()).to_vec(), // TODO(mt) unnecessary copy
|
||||
data: data.to_vec(), // TODO(mt) unnecessary copy
|
||||
})
|
||||
}
|
||||
FRAME_TYPE_NEW_TOKEN => {
|
||||
|
@ -619,6 +644,9 @@ impl Frame {
|
|||
qtrace!("STREAM frame, with length");
|
||||
d!(dec.decode_vvec())
|
||||
};
|
||||
if o + u64::try_from(data.len()).unwrap() > ((1 << 62) - 1) {
|
||||
return Err(Error::FrameEncodingError);
|
||||
}
|
||||
Ok(Self::Stream {
|
||||
fin: (t & STREAM_FRAME_BIT_FIN) != 0,
|
||||
stream_id: s.into(),
|
||||
|
@ -634,11 +662,16 @@ impl Frame {
|
|||
stream_id: dv!(dec).into(),
|
||||
maximum_stream_data: dv!(dec),
|
||||
}),
|
||||
FRAME_TYPE_MAX_STREAMS_BIDI | FRAME_TYPE_MAX_STREAMS_UNIDI => Ok(Self::MaxStreams {
|
||||
stream_type: StreamType::from_type_bit(t),
|
||||
maximum_streams: StreamIndex::new(dv!(dec)),
|
||||
}),
|
||||
|
||||
FRAME_TYPE_MAX_STREAMS_BIDI | FRAME_TYPE_MAX_STREAMS_UNIDI => {
|
||||
let m = dv!(dec);
|
||||
if m > (1 << 60) {
|
||||
return Err(Error::StreamLimitError);
|
||||
}
|
||||
Ok(Self::MaxStreams {
|
||||
stream_type: StreamType::from_type_bit(t),
|
||||
maximum_streams: StreamIndex::new(m),
|
||||
})
|
||||
}
|
||||
FRAME_TYPE_DATA_BLOCKED => Ok(Self::DataBlocked {
|
||||
data_limit: dv!(dec),
|
||||
}),
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
#![warn(clippy::use_self)]
|
||||
|
||||
use neqo_common::qinfo;
|
||||
use neqo_crypto;
|
||||
|
||||
mod cc;
|
||||
mod cid;
|
||||
|
@ -18,8 +17,10 @@ mod dump;
|
|||
mod events;
|
||||
mod flow_mgr;
|
||||
mod frame;
|
||||
mod pace;
|
||||
mod packet;
|
||||
mod path;
|
||||
mod qlog;
|
||||
mod recovery;
|
||||
mod recv_stream;
|
||||
mod send_stream;
|
||||
|
@ -29,19 +30,21 @@ mod stream_id;
|
|||
pub mod tparams;
|
||||
mod tracking;
|
||||
|
||||
pub use self::cid::ConnectionIdManager;
|
||||
pub use self::connection::{Connection, FixedConnectionIdManager, Output, Role, State};
|
||||
pub use self::cid::{ConnectionId, ConnectionIdManager};
|
||||
pub use self::connection::{Connection, FixedConnectionIdManager, Output, State, ZeroRttState};
|
||||
pub use self::events::{ConnectionEvent, ConnectionEvents};
|
||||
pub use self::frame::CloseError;
|
||||
pub use self::frame::StreamType;
|
||||
pub use self::stream_id::StreamId;
|
||||
|
||||
/// The supported version of the QUIC protocol.
|
||||
pub type Version = u32;
|
||||
pub const QUIC_VERSION: Version = 0xff00_0000 + 27;
|
||||
|
||||
const LOCAL_IDLE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(60); // 1 minute
|
||||
const LOCAL_IDLE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(30); // 30 second
|
||||
|
||||
type TransportError = u64;
|
||||
const ERROR_APPLICATION_CLOSE: TransportError = 12;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, PartialOrd, Ord, Eq)]
|
||||
#[allow(clippy::pub_enum_variant_names)]
|
||||
|
@ -56,8 +59,10 @@ pub enum Error {
|
|||
FrameEncodingError,
|
||||
TransportParameterError,
|
||||
ProtocolViolation,
|
||||
InvalidMigration,
|
||||
InvalidToken,
|
||||
ApplicationError,
|
||||
CryptoError(neqo_crypto::Error),
|
||||
QlogError,
|
||||
CryptoAlert(u8),
|
||||
|
||||
// All internal errors from here.
|
||||
|
@ -69,6 +74,7 @@ pub enum Error {
|
|||
IdleTimeout,
|
||||
IntegerOverflow,
|
||||
InvalidInput,
|
||||
InvalidMigration,
|
||||
InvalidPacket,
|
||||
InvalidResumptionToken,
|
||||
InvalidRetry,
|
||||
|
@ -81,6 +87,7 @@ pub enum Error {
|
|||
NoMoreData,
|
||||
NotConnected,
|
||||
PacketNumberOverlap,
|
||||
PeerApplicationError(AppError),
|
||||
PeerError(TransportError),
|
||||
TooMuchData,
|
||||
UnexpectedMessage,
|
||||
|
@ -93,7 +100,10 @@ pub enum Error {
|
|||
impl Error {
|
||||
pub fn code(&self) -> TransportError {
|
||||
match self {
|
||||
Self::NoError | Self::IdleTimeout => 0,
|
||||
Self::NoError
|
||||
| Self::IdleTimeout
|
||||
| Self::PeerError(_)
|
||||
| Self::PeerApplicationError(_) => 0,
|
||||
Self::ServerBusy => 2,
|
||||
Self::FlowControlError => 3,
|
||||
Self::StreamLimitError => 4,
|
||||
|
@ -102,9 +112,9 @@ impl Error {
|
|||
Self::FrameEncodingError => 7,
|
||||
Self::TransportParameterError => 8,
|
||||
Self::ProtocolViolation => 10,
|
||||
Self::InvalidMigration => 12,
|
||||
Self::InvalidToken => 11,
|
||||
Self::ApplicationError => ERROR_APPLICATION_CLOSE,
|
||||
Self::CryptoAlert(a) => 0x100 + u64::from(*a),
|
||||
Self::PeerError(a) => *a,
|
||||
// All the rest are internal errors.
|
||||
_ => 1,
|
||||
}
|
||||
|
@ -118,6 +128,12 @@ impl From<neqo_crypto::Error> for Error {
|
|||
}
|
||||
}
|
||||
|
||||
impl From<::qlog::Error> for Error {
|
||||
fn from(_err: ::qlog::Error) -> Self {
|
||||
Self::QlogError
|
||||
}
|
||||
}
|
||||
|
||||
impl From<std::num::TryFromIntError> for Error {
|
||||
fn from(_: std::num::TryFromIntError) -> Self {
|
||||
Self::IntegerOverflow
|
||||
|
|
|
@ -0,0 +1,145 @@
|
|||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
// Pacer
|
||||
#![deny(clippy::pedantic)]
|
||||
|
||||
use neqo_common::qtrace;
|
||||
|
||||
use std::cmp::min;
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt::{Debug, Display};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
/// This value determines how much faster the pacer operates than the
|
||||
/// congestion window.
|
||||
///
|
||||
/// A value of 1 would cause all packets to be spaced over the entire RTT,
|
||||
/// which is a little slow and might act as an additional restriction in
|
||||
/// the case the congestion controller increases the congestion window.
|
||||
/// This value spaces packets over half the congestion window, which matches
|
||||
/// our current congestion controller, which double the window every RTT.
|
||||
const PACER_SPEEDUP: usize = 2;
|
||||
|
||||
/// A pacer that uses a leaky bucket.
|
||||
pub struct Pacer {
|
||||
/// The last update time.
|
||||
t: Instant,
|
||||
/// The maximum capacity, or burst size, in bytes.
|
||||
m: usize,
|
||||
/// The current capacity, in bytes.
|
||||
c: usize,
|
||||
/// The packet size or minimum capacity for sending, in bytes.
|
||||
p: usize,
|
||||
}
|
||||
|
||||
impl Pacer {
|
||||
/// Create a new `Pacer`. This takes the current time and the
|
||||
/// initial congestion window.
|
||||
///
|
||||
/// The value of `m` is the maximum capacity. `m` primes the pacer
|
||||
/// with credit and determines the burst size. `m` must not exceed
|
||||
/// the initial congestion window, but it should probably be lower.
|
||||
///
|
||||
/// The value of `p` is the packet size, which determines the minimum
|
||||
/// credit needed before a packet is sent. This should be a substantial
|
||||
/// fraction of the maximum packet size, if not the packet size.
|
||||
pub fn new(now: Instant, m: usize, p: usize) -> Self {
|
||||
assert!(m >= p, "maximum capacity has to be at least one packet");
|
||||
Self { t: now, m, c: m, p }
|
||||
}
|
||||
|
||||
/// Determine when the next packet will be available based on the provided RTT
|
||||
/// and congestion window. This doesn't update state.
|
||||
/// This returns a time, which could be in the past (this object doesn't know what
|
||||
/// the current time is).
|
||||
pub fn next(&self, rtt: Duration, cwnd: usize) -> Instant {
|
||||
if self.c >= self.p {
|
||||
qtrace!([self], "next {}/{:?} no wait: {:?}", cwnd, rtt, self.t);
|
||||
self.t
|
||||
} else {
|
||||
// This is the inverse of the function in `spend`:
|
||||
// self.t + rtt * (self.p - self.c) / (PACER_SPEEDUP * cwnd)
|
||||
let r = rtt.as_nanos();
|
||||
let d = r.saturating_mul(u128::try_from(self.p - self.c).unwrap());
|
||||
let add = d / u128::try_from(cwnd * PACER_SPEEDUP).unwrap();
|
||||
let dt = u64::try_from(add).map(Duration::from_nanos).unwrap_or(rtt);
|
||||
qtrace!(
|
||||
[self],
|
||||
"next {}/{:?} wait {:?}: {:?}",
|
||||
cwnd,
|
||||
rtt,
|
||||
dt,
|
||||
self.t + dt
|
||||
);
|
||||
self.t + dt
|
||||
}
|
||||
}
|
||||
|
||||
/// Spend credit. This cannot fail; users of this API are expected to call
|
||||
/// next() to determine when to spend. This takes the current time (`now`),
|
||||
/// an estimate of the round trip time (`rtt`), the estimated congestion
|
||||
/// window (`cwnd`), and the number of bytes that were sent (`count`).
|
||||
pub fn spend(&mut self, now: Instant, rtt: Duration, cwnd: usize, count: usize) {
|
||||
qtrace!([self], "spend {} over {}, {:?}", count, cwnd, rtt);
|
||||
// Increase the capacity by:
|
||||
// `(now - self.t) * PACER_SPEEDUP * cwnd / rtt`
|
||||
// That is, the elapsed fraction of the RTT times rate that data is added.
|
||||
let incr = now
|
||||
.saturating_duration_since(self.t)
|
||||
.as_nanos()
|
||||
.saturating_mul(u128::try_from(cwnd * PACER_SPEEDUP).unwrap())
|
||||
.checked_div(rtt.as_nanos())
|
||||
.map(|i| usize::try_from(i).ok())
|
||||
.flatten()
|
||||
.unwrap_or(self.m);
|
||||
|
||||
// Add the capacity up to a limit of `self.m`, then subtract `count`.
|
||||
self.c = min(self.m, (self.c + incr).saturating_sub(count));
|
||||
self.t = now;
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Pacer {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(f, "Pacer {}/{}", self.c, self.p)
|
||||
}
|
||||
}
|
||||
|
||||
impl Debug for Pacer {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(f, "Pacer@{:?} {}/{}..{}", self.t, self.c, self.p, self.m)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(tests)]
|
||||
mod tests {
|
||||
use super::Pacer;
|
||||
use test_fixture::now;
|
||||
|
||||
const RTT: Duration = Duration::from_millis(1000);
|
||||
const PACKET: usize = 1000;
|
||||
const CWND: usize = PACKET * 10;
|
||||
|
||||
#[test]
|
||||
fn even() {
|
||||
let mut n = now();
|
||||
let p = Pacer::new(n, PACKET, PACKET);
|
||||
assert_eq!(p.next(RTT, CWND), None);
|
||||
p.spend(n, RTT, CWND, PACKET);
|
||||
assert_eq!(p.next(RTT, CWND), Some(n + (RTT / 10)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn backwards_in_time() {
|
||||
let mut n = now();
|
||||
let p = Pacer::new(n + RTT, PACKET, PACKET);
|
||||
assert_eq!(p.next(RTT, CWND), None);
|
||||
// Now spend some credit in the past using a time machine.
|
||||
p.spend(n, RTT, CWND, PACKET);
|
||||
assert_eq!(p.next(RTT, CWND), Some(n + (RTT / 10)));
|
||||
}
|
||||
}
|
|
@ -10,7 +10,7 @@ use crate::crypto::{CryptoDxState, CryptoStates};
|
|||
use crate::tracking::PNSpace;
|
||||
use crate::{Error, Res, Version, QUIC_VERSION};
|
||||
|
||||
use neqo_common::{hex, qerror, qtrace, Decoder, Encoder};
|
||||
use neqo_common::{hex, hex_with_len, qerror, qtrace, Decoder, Encoder};
|
||||
use neqo_crypto::{aead::Aead, hkdf, random, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3};
|
||||
|
||||
use std::cell::RefCell;
|
||||
|
@ -622,8 +622,8 @@ impl fmt::Debug for PublicPacket<'_> {
|
|||
f,
|
||||
"{:?}: {} {}",
|
||||
self.packet_type(),
|
||||
hex(&self.data[..self.header_len]),
|
||||
hex(&self.data[self.header_len..])
|
||||
hex_with_len(&self.data[..self.header_len]),
|
||||
hex_with_len(&self.data[self.header_len..])
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -721,7 +721,7 @@ mod tests {
|
|||
padded.extend_from_slice(EXTRA);
|
||||
let (packet, remainder) = PublicPacket::decode(&padded, &cid_mgr()).unwrap();
|
||||
assert_eq!(packet.packet_type(), PacketType::Initial);
|
||||
assert_eq!(&packet.dcid()[..], &[]);
|
||||
assert_eq!(&packet.dcid()[..], &[] as &[u8]);
|
||||
assert_eq!(&packet.scid()[..], SERVER_CID);
|
||||
assert!(packet.token().is_empty());
|
||||
assert_eq!(remainder, EXTRA);
|
||||
|
|
|
@ -94,4 +94,14 @@ impl Path {
|
|||
pub fn datagram<V: Into<Vec<u8>>>(&self, payload: V) -> Datagram {
|
||||
Datagram::new(self.local, self.remote, payload)
|
||||
}
|
||||
|
||||
/// Get local address as `SocketAddr`
|
||||
pub fn local_address(&self) -> &SocketAddr {
|
||||
&self.local
|
||||
}
|
||||
|
||||
/// Get remote address as `SocketAddr`
|
||||
pub fn remote_address(&self) -> &SocketAddr {
|
||||
&self.remote
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,294 @@
|
|||
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
||||
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
||||
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
||||
// option. This file may not be copied, modified, or distributed
|
||||
// except according to those terms.
|
||||
|
||||
// Functions that handle capturing QLOG traces.
|
||||
|
||||
use std::string::String;
|
||||
|
||||
use qlog::{self, event::Event, PacketHeader, QuicFrame};
|
||||
|
||||
use neqo_common::{hex, qinfo, qlog::NeqoQlog, Decoder};
|
||||
|
||||
use crate::frame::{self, Frame};
|
||||
use crate::packet::{DecryptedPacket, PacketNumber, PacketType};
|
||||
use crate::path::Path;
|
||||
use crate::tparams::{self, TransportParametersHandler};
|
||||
use crate::{Res, QUIC_VERSION};
|
||||
|
||||
pub fn connection_tparams_set(
|
||||
qlog: &mut Option<NeqoQlog>,
|
||||
tph: &TransportParametersHandler,
|
||||
) -> Res<()> {
|
||||
if let Some(qlog) = qlog {
|
||||
let remote = tph.remote();
|
||||
let event = Event::transport_parameters_set(
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
if let Some(ocid) = remote.get_bytes(tparams::ORIGINAL_CONNECTION_ID) {
|
||||
// Cannot use packet::ConnectionId's Display trait implementation
|
||||
// because it does not include the 0x prefix.
|
||||
Some(hex(&ocid))
|
||||
} else {
|
||||
None
|
||||
},
|
||||
if let Some(srt) = remote.get_bytes(tparams::STATELESS_RESET_TOKEN) {
|
||||
Some(hex(&srt))
|
||||
} else {
|
||||
None
|
||||
},
|
||||
if remote.get_empty(tparams::DISABLE_MIGRATION).is_some() {
|
||||
Some(true)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
Some(remote.get_integer(tparams::IDLE_TIMEOUT)),
|
||||
Some(remote.get_integer(tparams::MAX_PACKET_SIZE)),
|
||||
Some(remote.get_integer(tparams::ACK_DELAY_EXPONENT)),
|
||||
Some(remote.get_integer(tparams::MAX_ACK_DELAY)),
|
||||
// TODO(hawkinsw@obs.cr): We do not yet handle ACTIVE_CONNECTION_ID_LIMIT in tparams yet.
|
||||
None,
|
||||
Some(format!("{}", remote.get_integer(tparams::INITIAL_MAX_DATA))),
|
||||
Some(format!(
|
||||
"{}",
|
||||
remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_BIDI_LOCAL)
|
||||
)),
|
||||
Some(format!(
|
||||
"{}",
|
||||
remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_BIDI_REMOTE)
|
||||
)),
|
||||
Some(format!(
|
||||
"{}",
|
||||
remote.get_integer(tparams::INITIAL_MAX_STREAM_DATA_UNI)
|
||||
)),
|
||||
Some(format!(
|
||||
"{}",
|
||||
remote.get_integer(tparams::INITIAL_MAX_STREAMS_BIDI)
|
||||
)),
|
||||
Some(format!(
|
||||
"{}",
|
||||
remote.get_integer(tparams::INITIAL_MAX_STREAMS_UNI)
|
||||
)),
|
||||
// TODO(hawkinsw@obs.cr): We do not yet handle PREFERRED_ADDRESS in tparams yet.
|
||||
None,
|
||||
);
|
||||
|
||||
qlog.stream().add_event(event)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn server_connection_started(qlog: &mut Option<NeqoQlog>, path: &Path) -> Res<()> {
|
||||
connection_started(qlog, path)
|
||||
}
|
||||
|
||||
pub fn client_connection_started(qlog: &mut Option<NeqoQlog>, path: &Path) -> Res<()> {
|
||||
connection_started(qlog, path)
|
||||
}
|
||||
|
||||
pub fn packet_sent(
|
||||
qlog: &mut Option<NeqoQlog>,
|
||||
pt: PacketType,
|
||||
pn: PacketNumber,
|
||||
body: &[u8],
|
||||
) -> Res<()> {
|
||||
if let Some(qlog) = qlog {
|
||||
let mut d = Decoder::from(body);
|
||||
|
||||
qlog.stream().add_event(Event::packet_sent_min(
|
||||
to_qlog_pkt_type(pt),
|
||||
PacketHeader::new(pn, None, None, None, None, None),
|
||||
Some(Vec::new()),
|
||||
))?;
|
||||
|
||||
while d.remaining() > 0 {
|
||||
match Frame::decode(&mut d) {
|
||||
Ok(f) => qlog.stream().add_frame(frame_to_qlogframe(&f), false)?,
|
||||
Err(_) => {
|
||||
qinfo!("qlog: invalid frame");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
qlog.stream().finish_frames()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn packet_received(qlog: &mut Option<NeqoQlog>, payload: &DecryptedPacket) -> Res<()> {
|
||||
if let Some(qlog) = qlog {
|
||||
let mut d = Decoder::from(&payload[..]);
|
||||
|
||||
qlog.stream().add_event(Event::packet_received(
|
||||
to_qlog_pkt_type(payload.packet_type()),
|
||||
PacketHeader::new(payload.pn(), None, None, None, None, None),
|
||||
Some(Vec::new()),
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
))?;
|
||||
|
||||
while d.remaining() > 0 {
|
||||
match Frame::decode(&mut d) {
|
||||
Ok(f) => qlog.stream().add_frame(frame_to_qlogframe(&f), false)?,
|
||||
Err(_) => {
|
||||
qinfo!("qlog: invalid frame");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
qlog.stream().finish_frames()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn connection_started(qlog: &mut Option<NeqoQlog>, path: &Path) -> Res<()> {
|
||||
if let Some(qlog) = qlog {
|
||||
qlog.stream().add_event(Event::connection_started(
|
||||
if path.local_address().ip().is_ipv4() {
|
||||
"ipv4".into()
|
||||
} else {
|
||||
"ipv6".into()
|
||||
},
|
||||
format!("{}", path.local_address().ip()),
|
||||
format!("{}", path.remote_address().ip()),
|
||||
Some("QUIC".into()),
|
||||
path.local_address().port().into(),
|
||||
path.remote_address().port().into(),
|
||||
Some(format!("{:x}", QUIC_VERSION)),
|
||||
Some(format!("{}", path.local_cid())),
|
||||
Some(format!("{}", path.remote_cid())),
|
||||
))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Helper functions
|
||||
|
||||
fn frame_to_qlogframe(frame: &Frame) -> QuicFrame {
|
||||
match frame {
|
||||
Frame::Padding => QuicFrame::padding(),
|
||||
Frame::Ping => QuicFrame::ping(),
|
||||
Frame::Ack { ack_delay, .. } => {
|
||||
QuicFrame::ack(Some(ack_delay.to_string()), None, None, None, None)
|
||||
}
|
||||
Frame::ResetStream {
|
||||
stream_id,
|
||||
application_error_code,
|
||||
final_size,
|
||||
} => QuicFrame::reset_stream(
|
||||
stream_id.as_u64().to_string(),
|
||||
*application_error_code,
|
||||
final_size.to_string(),
|
||||
),
|
||||
Frame::StopSending {
|
||||
stream_id,
|
||||
application_error_code,
|
||||
} => QuicFrame::stop_sending(stream_id.as_u64().to_string(), *application_error_code),
|
||||
Frame::Crypto { offset, data } => {
|
||||
QuicFrame::crypto(offset.to_string(), data.len().to_string())
|
||||
}
|
||||
Frame::NewToken { token } => QuicFrame::new_token(token.len().to_string(), hex(&token)),
|
||||
Frame::Stream {
|
||||
fin,
|
||||
stream_id,
|
||||
offset,
|
||||
data,
|
||||
..
|
||||
} => QuicFrame::stream(
|
||||
stream_id.as_u64().to_string(),
|
||||
offset.to_string(),
|
||||
data.len().to_string(),
|
||||
*fin,
|
||||
None,
|
||||
),
|
||||
Frame::MaxData { maximum_data } => QuicFrame::max_data(maximum_data.to_string()),
|
||||
Frame::MaxStreamData {
|
||||
stream_id,
|
||||
maximum_stream_data,
|
||||
} => QuicFrame::max_stream_data(
|
||||
stream_id.as_u64().to_string(),
|
||||
maximum_stream_data.to_string(),
|
||||
),
|
||||
Frame::MaxStreams {
|
||||
stream_type,
|
||||
maximum_streams,
|
||||
} => QuicFrame::max_streams(
|
||||
match stream_type {
|
||||
frame::StreamType::BiDi => qlog::StreamType::Bidirectional,
|
||||
frame::StreamType::UniDi => qlog::StreamType::Unidirectional,
|
||||
},
|
||||
maximum_streams.as_u64().to_string(),
|
||||
),
|
||||
Frame::DataBlocked { data_limit } => QuicFrame::data_blocked(data_limit.to_string()),
|
||||
Frame::StreamDataBlocked {
|
||||
stream_id,
|
||||
stream_data_limit,
|
||||
} => QuicFrame::stream_data_blocked(
|
||||
stream_id.as_u64().to_string(),
|
||||
stream_data_limit.to_string(),
|
||||
),
|
||||
Frame::StreamsBlocked {
|
||||
stream_type,
|
||||
stream_limit,
|
||||
} => QuicFrame::streams_blocked(
|
||||
match stream_type {
|
||||
frame::StreamType::BiDi => qlog::StreamType::Bidirectional,
|
||||
frame::StreamType::UniDi => qlog::StreamType::Unidirectional,
|
||||
},
|
||||
stream_limit.as_u64().to_string(),
|
||||
),
|
||||
Frame::NewConnectionId {
|
||||
sequence_number,
|
||||
retire_prior,
|
||||
connection_id,
|
||||
stateless_reset_token,
|
||||
} => QuicFrame::new_connection_id(
|
||||
sequence_number.to_string(),
|
||||
retire_prior.to_string(),
|
||||
connection_id.len() as u64,
|
||||
hex(&connection_id),
|
||||
hex(stateless_reset_token),
|
||||
),
|
||||
Frame::RetireConnectionId { sequence_number } => {
|
||||
QuicFrame::retire_connection_id(sequence_number.to_string())
|
||||
}
|
||||
Frame::PathChallenge { data } => QuicFrame::path_challenge(Some(hex(data))),
|
||||
Frame::PathResponse { data } => QuicFrame::path_response(Some(hex(data))),
|
||||
Frame::ConnectionClose {
|
||||
error_code,
|
||||
frame_type,
|
||||
reason_phrase,
|
||||
} => QuicFrame::connection_close(
|
||||
match error_code {
|
||||
frame::CloseError::Transport(_) => qlog::ErrorSpace::TransportError,
|
||||
frame::CloseError::Application(_) => qlog::ErrorSpace::ApplicationError,
|
||||
},
|
||||
error_code.code(),
|
||||
0,
|
||||
String::from_utf8_lossy(&reason_phrase).to_string(),
|
||||
Some(frame_type.to_string()),
|
||||
),
|
||||
Frame::HandshakeDone => QuicFrame::unknown(0x1e),
|
||||
}
|
||||
}
|
||||
|
||||
fn to_qlog_pkt_type(ptype: PacketType) -> qlog::PacketType {
|
||||
match ptype {
|
||||
PacketType::Initial => qlog::PacketType::Initial,
|
||||
PacketType::Handshake => qlog::PacketType::Handshake,
|
||||
PacketType::ZeroRtt => qlog::PacketType::ZeroRtt,
|
||||
PacketType::Short => qlog::PacketType::OneRtt,
|
||||
PacketType::Retry => qlog::PacketType::Retry,
|
||||
PacketType::VersionNegotiation => qlog::PacketType::VersionNegotiation,
|
||||
PacketType::OtherVersion => qlog::PacketType::Unknown,
|
||||
}
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -10,7 +10,7 @@
|
|||
use std::cell::RefCell;
|
||||
use std::cmp::{max, min};
|
||||
use std::collections::BTreeMap;
|
||||
use std::convert::TryInto;
|
||||
use std::convert::TryFrom;
|
||||
use std::mem;
|
||||
use std::ops::Bound::{Included, Unbounded};
|
||||
use std::rc::Rc;
|
||||
|
@ -219,9 +219,9 @@ impl RxStreamOrderer {
|
|||
}
|
||||
|
||||
/// Copy received data (if any) into the buffer. Returns bytes copied.
|
||||
fn read(&mut self, buf: &mut [u8]) -> Res<u64> {
|
||||
fn read(&mut self, buf: &mut [u8]) -> usize {
|
||||
qtrace!("Reading {} bytes, {} available", buf.len(), self.buffered());
|
||||
let mut buf_remaining = buf.len() as usize;
|
||||
let mut buf_remaining = buf.len();
|
||||
let mut copied = 0;
|
||||
|
||||
for (&range_start, range_data) in &mut self.data_ranges {
|
||||
|
@ -230,7 +230,8 @@ impl RxStreamOrderer {
|
|||
|
||||
// Convert to offset into data vec and move past bytes we
|
||||
// already have
|
||||
let copy_offset = (max(range_start, self.retired) - range_start).try_into()?;
|
||||
let copy_offset =
|
||||
usize::try_from(max(range_start, self.retired) - range_start).unwrap();
|
||||
let copy_bytes = min(range_data.len() - copy_offset, buf_remaining);
|
||||
let copy_slc = &mut range_data[copy_offset..copy_offset + copy_bytes];
|
||||
buf[copied..copied + copy_bytes].copy_from_slice(copy_slc);
|
||||
|
@ -253,11 +254,11 @@ impl RxStreamOrderer {
|
|||
self.data_ranges.remove(&key);
|
||||
}
|
||||
|
||||
Ok(copied as u64)
|
||||
copied
|
||||
}
|
||||
|
||||
/// Extend the given Vector with any available data.
|
||||
pub fn read_to_end(&mut self, buf: &mut Vec<u8>) -> Res<u64> {
|
||||
pub fn read_to_end(&mut self, buf: &mut Vec<u8>) -> usize {
|
||||
let orig_len = buf.len();
|
||||
buf.resize(orig_len + self.bytes_ready(), 0);
|
||||
self.read(&mut buf[orig_len..])
|
||||
|
@ -501,12 +502,12 @@ impl RecvStream {
|
|||
.map_or(false, RxStreamOrderer::data_ready)
|
||||
}
|
||||
|
||||
pub fn read(&mut self, buf: &mut [u8]) -> Res<(u64, bool)> {
|
||||
pub fn read(&mut self, buf: &mut [u8]) -> Res<(usize, bool)> {
|
||||
let res = match &mut self.state {
|
||||
RecvStreamState::Recv { recv_buf, .. }
|
||||
| RecvStreamState::SizeKnown { recv_buf, .. } => Ok((recv_buf.read(buf)?, false)),
|
||||
| RecvStreamState::SizeKnown { recv_buf, .. } => Ok((recv_buf.read(buf), false)),
|
||||
RecvStreamState::DataRecvd { recv_buf } => {
|
||||
let bytes_read = recv_buf.read(buf)?;
|
||||
let bytes_read = recv_buf.read(buf);
|
||||
let fin_read = recv_buf.buffered() == 0;
|
||||
if fin_read {
|
||||
self.set_state(RecvStreamState::DataRead)
|
||||
|
@ -703,7 +704,10 @@ mod tests {
|
|||
s.inbound_stream_frame(false, 0, frame1).unwrap();
|
||||
s.maybe_send_flowc_update();
|
||||
assert_eq!(s.flow_mgr.borrow().peek(), None);
|
||||
assert_eq!(s.read(&mut buf).unwrap(), (RX_STREAM_DATA_WINDOW, false));
|
||||
assert_eq!(
|
||||
s.read(&mut buf).unwrap(),
|
||||
(RX_STREAM_DATA_WINDOW as usize, false)
|
||||
);
|
||||
assert_eq!(s.data_ready(), false);
|
||||
s.maybe_send_flowc_update();
|
||||
|
||||
|
@ -750,7 +754,7 @@ mod tests {
|
|||
assert_eq!(rx_ord.buffered(), 6);
|
||||
assert_eq!(rx_ord.retired(), 0);
|
||||
// read some so there's an offset into the first frame
|
||||
rx_ord.read(&mut buf[..2]).unwrap();
|
||||
rx_ord.read(&mut buf[..2]);
|
||||
assert_eq!(rx_ord.bytes_ready(), 4);
|
||||
assert_eq!(rx_ord.buffered(), 4);
|
||||
assert_eq!(rx_ord.retired(), 2);
|
||||
|
|
|
@ -571,11 +571,13 @@ impl SendStream {
|
|||
|
||||
/// Bytes sendable on stream. Constrained by stream credit available,
|
||||
/// connection credit available, and space in the tx buffer.
|
||||
pub fn avail(&self) -> u64 {
|
||||
pub fn avail(&self) -> usize {
|
||||
min(
|
||||
min(self.state.tx_avail(), self.credit_avail()),
|
||||
self.flow_mgr.borrow().conn_credit_avail(),
|
||||
)
|
||||
.try_into()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub fn max_stream_data(&self) -> u64 {
|
||||
|
@ -608,6 +610,26 @@ impl SendStream {
|
|||
}
|
||||
|
||||
pub fn send(&mut self, buf: &[u8]) -> Res<usize> {
|
||||
self.send_internal(buf, false)
|
||||
}
|
||||
|
||||
pub fn send_atomic(&mut self, buf: &[u8]) -> Res<usize> {
|
||||
self.send_internal(buf, true)
|
||||
}
|
||||
|
||||
fn send_blocked(&mut self, len: u64) {
|
||||
if self.credit_avail() < len {
|
||||
self.flow_mgr
|
||||
.borrow_mut()
|
||||
.stream_data_blocked(self.stream_id, self.max_stream_data);
|
||||
}
|
||||
|
||||
if self.flow_mgr.borrow().conn_credit_avail() < len {
|
||||
self.flow_mgr.borrow_mut().data_blocked();
|
||||
}
|
||||
}
|
||||
|
||||
fn send_internal(&mut self, buf: &[u8], atomic: bool) -> Res<usize> {
|
||||
if buf.is_empty() {
|
||||
qerror!("zero-length send on stream {}", self.stream_id.as_u64());
|
||||
return Err(Error::InvalidInput);
|
||||
|
@ -623,13 +645,18 @@ impl SendStream {
|
|||
return Err(Error::FinalSizeError);
|
||||
}
|
||||
|
||||
let can_send_bytes = min(self.avail(), buf.len() as u64);
|
||||
|
||||
if can_send_bytes == 0 {
|
||||
let buf = if buf.is_empty() || (self.avail() == 0) {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
let buf = &buf[..can_send_bytes.try_into()?];
|
||||
} else if self.avail() < buf.len() {
|
||||
self.send_blocked(buf.len() as u64);
|
||||
if atomic {
|
||||
return Ok(0);
|
||||
} else {
|
||||
&buf[..self.avail()]
|
||||
}
|
||||
} else {
|
||||
buf
|
||||
};
|
||||
|
||||
let sent = match &mut self.state {
|
||||
SendStreamState::Ready => unreachable!(),
|
||||
|
@ -1100,7 +1127,7 @@ mod tests {
|
|||
|
||||
// Unblocking both by a large amount will cause avail() to be limited by
|
||||
// tx buffer size.
|
||||
assert_eq!(s.avail(), u64::try_from(TxBuffer::BUFFER_SIZE - 4).unwrap());
|
||||
assert_eq!(s.avail(), TxBuffer::BUFFER_SIZE - 4);
|
||||
|
||||
assert_eq!(
|
||||
s.send(&[b'a'; TxBuffer::BUFFER_SIZE]).unwrap(),
|
||||
|
@ -1232,4 +1259,64 @@ mod tests {
|
|||
assert!(matches!(&f4_token, Some(RecoveryToken::Stream(x)) if x.length == 10));
|
||||
assert!(matches!(&f4_token, Some(RecoveryToken::Stream(x)) if x.fin));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn send_atomic() {
|
||||
let flow_mgr = Rc::new(RefCell::new(FlowMgr::default()));
|
||||
flow_mgr.borrow_mut().conn_increase_max_credit(5);
|
||||
let conn_events = ConnectionEvents::default();
|
||||
|
||||
let mut s = SendStream::new(4.into(), 0, Rc::clone(&flow_mgr), conn_events);
|
||||
s.set_max_stream_data(2);
|
||||
|
||||
// Stream is initially blocked (conn:5, stream:2)
|
||||
// and will not accept atomic write of 3 bytes.
|
||||
assert_eq!(s.send_atomic(b"abc").unwrap(), 0);
|
||||
|
||||
// assert that STREAM_DATA_BLOCKED is sent.
|
||||
assert_eq!(
|
||||
flow_mgr.borrow_mut().next().unwrap(),
|
||||
Frame::StreamDataBlocked {
|
||||
stream_id: 4.into(),
|
||||
stream_data_limit: 0x2
|
||||
}
|
||||
);
|
||||
|
||||
// assert non-atomic write works
|
||||
assert_eq!(s.send(b"abc").unwrap(), 2);
|
||||
// assert that STREAM_DATA_BLOCKED is sent.
|
||||
assert_eq!(
|
||||
flow_mgr.borrow_mut().next().unwrap(),
|
||||
Frame::StreamDataBlocked {
|
||||
stream_id: 4.into(),
|
||||
stream_data_limit: 0x2
|
||||
}
|
||||
);
|
||||
|
||||
// increasing to (conn:5, stream:10)
|
||||
s.set_max_stream_data(10);
|
||||
// will not accept atomic write of 4 bytes.
|
||||
assert_eq!(s.send_atomic(b"abcd").unwrap(), 0);
|
||||
|
||||
// assert that STREAM_DATA_BLOCKED is sent.
|
||||
assert_eq!(
|
||||
flow_mgr.borrow_mut().next().unwrap(),
|
||||
Frame::DataBlocked { data_limit: 0x5 }
|
||||
);
|
||||
|
||||
// assert non-atomic write works
|
||||
assert_eq!(s.send(b"abcd").unwrap(), 3);
|
||||
// assert that STREAM_DATA_BLOCKED is sent.
|
||||
assert_eq!(
|
||||
flow_mgr.borrow_mut().next().unwrap(),
|
||||
Frame::DataBlocked { data_limit: 0x5 }
|
||||
);
|
||||
|
||||
// increasing to (conn:15, stream:15)
|
||||
s.set_max_stream_data(15);
|
||||
flow_mgr.borrow_mut().conn_increase_max_credit(15);
|
||||
|
||||
// assert that atomic writing 10 byte works
|
||||
assert_eq!(s.send_atomic(b"abcdefghij").unwrap(), 10);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,7 +7,8 @@
|
|||
// This file implements a server that can handle multiple connections.
|
||||
|
||||
use neqo_common::{
|
||||
hex, matches, qerror, qinfo, qtrace, qwarn, timer::Timer, Datagram, Decoder, Encoder,
|
||||
self as common, hex, matches, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn,
|
||||
timer::Timer, Datagram, Decoder, Encoder, Role,
|
||||
};
|
||||
use neqo_crypto::{
|
||||
constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3},
|
||||
|
@ -23,9 +24,11 @@ use crate::Res;
|
|||
use std::cell::RefCell;
|
||||
use std::collections::{HashMap, HashSet, VecDeque};
|
||||
use std::convert::TryFrom;
|
||||
use std::fs::OpenOptions;
|
||||
use std::mem;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::path::PathBuf;
|
||||
use std::rc::Rc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
|
@ -48,6 +51,7 @@ type ConnectionTableRef = Rc<RefCell<HashMap<ConnectionId, StateRef>>>;
|
|||
#[derive(Debug)]
|
||||
pub struct ServerConnectionState {
|
||||
c: Connection,
|
||||
active_attempt: Option<AttemptKey>,
|
||||
last_timer: Instant,
|
||||
}
|
||||
|
||||
|
@ -177,6 +181,16 @@ impl RetryToken {
|
|||
}
|
||||
}
|
||||
|
||||
/// A `AttemptKey` is used to disambiguate connection attempts.
|
||||
/// Multiple connection attempts with the same key won't produce multiple connections.
|
||||
#[derive(Clone, Debug, Hash, PartialEq, Eq)]
|
||||
struct AttemptKey {
|
||||
// Using the remote address is sufficient for disambiguation,
|
||||
// until we support multiple local socket addresses.
|
||||
remote_address: SocketAddr,
|
||||
odcid: ConnectionId,
|
||||
}
|
||||
|
||||
pub struct Server {
|
||||
/// The names of certificates.
|
||||
certs: Vec<String>,
|
||||
|
@ -185,6 +199,10 @@ pub struct Server {
|
|||
anti_replay: AntiReplay,
|
||||
/// A connection ID manager.
|
||||
cid_manager: CidMgr,
|
||||
/// Active connection attempts, keyed by `AttemptKey`. Initial packets with
|
||||
/// the same key are routed to the connection that was first accepted.
|
||||
/// This is cleared out when the connection is closed or established.
|
||||
active_attempts: HashMap<AttemptKey, StateRef>,
|
||||
/// All connections, keyed by ConnectionId.
|
||||
connections: ConnectionTableRef,
|
||||
/// The connections that have new events.
|
||||
|
@ -196,6 +214,8 @@ pub struct Server {
|
|||
/// Whether a Retry packet will be sent in response to new
|
||||
/// Initial packets.
|
||||
retry: RetryToken,
|
||||
/// Directory to create qlog traces in
|
||||
qlog_dir: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl Server {
|
||||
|
@ -218,14 +238,21 @@ impl Server {
|
|||
protocols: protocols.iter().map(|x| String::from(x.as_ref())).collect(),
|
||||
anti_replay,
|
||||
cid_manager,
|
||||
active_attempts: HashMap::default(),
|
||||
connections: Rc::default(),
|
||||
active: HashSet::default(),
|
||||
waiting: VecDeque::default(),
|
||||
timers: Timer::new(now, TIMER_GRANULARITY, TIMER_CAPACITY),
|
||||
retry: RetryToken::new(now)?,
|
||||
qlog_dir: None,
|
||||
})
|
||||
}
|
||||
|
||||
/// Set or clear directory to create logs of connection events in QLOG format.
|
||||
pub fn set_qlog_dir(&mut self, dir: Option<PathBuf>) {
|
||||
self.qlog_dir = dir;
|
||||
}
|
||||
|
||||
pub fn set_retry_required(&mut self, require_retry: bool) {
|
||||
self.retry.set_retry_required(require_retry);
|
||||
}
|
||||
|
@ -265,7 +292,16 @@ impl Server {
|
|||
qtrace!([self], "Connection active: {:?}", c);
|
||||
self.active.insert(ActiveConnectionRef { c: c.clone() });
|
||||
}
|
||||
|
||||
if *c.borrow().state() > State::Handshaking {
|
||||
// Remove any active connection attempt now that this is no longer handshaking.
|
||||
if let Some(k) = c.borrow_mut().active_attempt.take() {
|
||||
self.active_attempts.remove(&k);
|
||||
}
|
||||
}
|
||||
|
||||
if matches!(c.borrow().state(), State::Closed(_)) {
|
||||
c.borrow_mut().set_qlog(None);
|
||||
self.connections
|
||||
.borrow_mut()
|
||||
.retain(|_, v| !Rc::ptr_eq(v, &c));
|
||||
|
@ -275,7 +311,7 @@ impl Server {
|
|||
|
||||
fn connection(&self, cid: &ConnectionIdRef) -> Option<StateRef> {
|
||||
if let Some(c) = self.connections.borrow().get(&cid[..]) {
|
||||
Some(c.clone())
|
||||
Some(Rc::clone(&c))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
|
@ -289,10 +325,13 @@ impl Server {
|
|||
dgram: Datagram,
|
||||
now: Instant,
|
||||
) -> Option<Datagram> {
|
||||
qdebug!([self], "Handle initial packet");
|
||||
match self.retry.validate(&token, dgram.source(), now) {
|
||||
RetryTokenResult::Invalid => None,
|
||||
RetryTokenResult::Pass => self.accept_connection(None, dgram, now),
|
||||
RetryTokenResult::Valid(dcid) => self.accept_connection(Some(dcid), dgram, now),
|
||||
RetryTokenResult::Pass => self.connection_attempt(dcid, None, dgram, now),
|
||||
RetryTokenResult::Valid(orig_dcid) => {
|
||||
self.connection_attempt(dcid, Some(orig_dcid), dgram, now)
|
||||
}
|
||||
RetryTokenResult::Validate => {
|
||||
qinfo!([self], "Send retry for {:?}", dcid);
|
||||
|
||||
|
@ -316,13 +355,88 @@ impl Server {
|
|||
}
|
||||
}
|
||||
|
||||
fn accept_connection(
|
||||
fn connection_attempt(
|
||||
&mut self,
|
||||
odcid: Option<ConnectionId>,
|
||||
dcid: ConnectionId,
|
||||
orig_dcid: Option<ConnectionId>,
|
||||
dgram: Datagram,
|
||||
now: Instant,
|
||||
) -> Option<Datagram> {
|
||||
qinfo!([self], "Accept connection");
|
||||
let attempt_key = AttemptKey {
|
||||
remote_address: dgram.source(),
|
||||
odcid: orig_dcid.as_ref().unwrap_or(&dcid).clone(),
|
||||
};
|
||||
if let Some(c) = self.active_attempts.get(&attempt_key) {
|
||||
qdebug!(
|
||||
[self],
|
||||
"Handle Initial for existing connection attempt {:?}",
|
||||
attempt_key
|
||||
);
|
||||
let c = Rc::clone(c);
|
||||
self.process_connection(c, Some(dgram), now)
|
||||
} else {
|
||||
self.accept_connection(attempt_key, orig_dcid, dgram, now)
|
||||
}
|
||||
}
|
||||
|
||||
fn create_qlog_trace(&self, attempt_key: &AttemptKey) -> Option<NeqoQlog> {
|
||||
if let Some(qlog_dir) = &self.qlog_dir {
|
||||
let mut qlog_path = qlog_dir.to_path_buf();
|
||||
|
||||
// TODO(mt) - the original DCID is not really unique, which means that attackers
|
||||
// can cause us to overwrite our own logs. That's not ideal.
|
||||
qlog_path.push(format!("{}.qlog", attempt_key.odcid));
|
||||
|
||||
match OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.open(&qlog_path)
|
||||
{
|
||||
Ok(f) => {
|
||||
qinfo!("Qlog output to {}", qlog_path.display());
|
||||
|
||||
let streamer = ::qlog::QlogStreamer::new(
|
||||
qlog::QLOG_VERSION.to_string(),
|
||||
Some("Neqo server qlog".to_string()),
|
||||
Some("Neqo server qlog".to_string()),
|
||||
None,
|
||||
std::time::Instant::now(),
|
||||
common::qlog::new_trace(Role::Server),
|
||||
Box::new(f),
|
||||
);
|
||||
let n_qlog = NeqoQlog::new(streamer, qlog_path);
|
||||
match n_qlog {
|
||||
Ok(nql) => Some(nql),
|
||||
Err(e) => {
|
||||
// Keep going but w/o qlogging
|
||||
qerror!("NeqoQlog error: {}", e);
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
qerror!(
|
||||
"Could not open file {} for qlog output: {}",
|
||||
qlog_path.display(),
|
||||
e
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn accept_connection(
|
||||
&mut self,
|
||||
attempt_key: AttemptKey,
|
||||
orig_dcid: Option<ConnectionId>,
|
||||
dgram: Datagram,
|
||||
now: Instant,
|
||||
) -> Option<Datagram> {
|
||||
qinfo!([self], "Accept connection {:?}", attempt_key);
|
||||
// The internal connection ID manager that we use is not used directly.
|
||||
// Instead, wrap it so that we can save connection IDs.
|
||||
let cid_mgr = Rc::new(RefCell::new(ServerConnectionIdManager {
|
||||
|
@ -330,18 +444,27 @@ impl Server {
|
|||
cid_manager: self.cid_manager.clone(),
|
||||
connections: self.connections.clone(),
|
||||
}));
|
||||
|
||||
let sconn = Connection::new_server(
|
||||
&self.certs,
|
||||
&self.protocols,
|
||||
&self.anti_replay,
|
||||
cid_mgr.clone(),
|
||||
);
|
||||
|
||||
if let Ok(mut c) = sconn {
|
||||
if let Some(odcid) = odcid {
|
||||
if let Some(odcid) = orig_dcid {
|
||||
c.original_connection_id(&odcid);
|
||||
}
|
||||
let c = Rc::new(RefCell::new(ServerConnectionState { c, last_timer: now }));
|
||||
c.set_qlog(self.create_qlog_trace(&attempt_key));
|
||||
let c = Rc::new(RefCell::new(ServerConnectionState {
|
||||
c,
|
||||
last_timer: now,
|
||||
active_attempt: Some(attempt_key.clone()),
|
||||
}));
|
||||
cid_mgr.borrow_mut().c = Some(c.clone());
|
||||
let previous_attempt = self.active_attempts.insert(attempt_key, c.clone());
|
||||
debug_assert!(previous_attempt.is_none());
|
||||
self.process_connection(c, Some(dgram), now)
|
||||
} else {
|
||||
qwarn!([self], "Unable to create connection");
|
||||
|
@ -378,16 +501,23 @@ impl Server {
|
|||
qtrace!([self], "Bogus packet: too short");
|
||||
return None;
|
||||
}
|
||||
if packet.packet_type() == PacketType::OtherVersion {
|
||||
let vn = PacketBuilder::version_negotiation(packet.scid(), packet.dcid());
|
||||
return Some(Datagram::new(dgram.destination(), dgram.source(), vn));
|
||||
match packet.packet_type() {
|
||||
PacketType::Initial => {
|
||||
// Copy values from `packet` because they are currently still borrowing from `dgram`.
|
||||
let dcid = ConnectionId::from(packet.dcid());
|
||||
let scid = ConnectionId::from(packet.scid());
|
||||
let token = packet.token().to_vec();
|
||||
self.handle_initial(dcid, scid, token, dgram, now)
|
||||
}
|
||||
PacketType::OtherVersion => {
|
||||
let vn = PacketBuilder::version_negotiation(packet.scid(), packet.dcid());
|
||||
Some(Datagram::new(dgram.destination(), dgram.source(), vn))
|
||||
}
|
||||
_ => {
|
||||
qtrace!([self], "Not an initial packet");
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
// Copy values from `packet` because they are currently still borrowing from `dgram`.
|
||||
let dcid = ConnectionId::from(packet.dcid());
|
||||
let scid = ConnectionId::from(packet.scid());
|
||||
let token = packet.token().to_vec();
|
||||
self.handle_initial(dcid, scid, token, dgram, now)
|
||||
}
|
||||
|
||||
/// Iterate through the pending connections looking for any that might want
|
||||
|
@ -467,7 +597,7 @@ impl ActiveConnectionRef {
|
|||
}
|
||||
|
||||
pub fn connection(&self) -> StateRef {
|
||||
self.c.clone()
|
||||
Rc::clone(&self.c)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,9 @@
|
|||
|
||||
use std::ops::AddAssign;
|
||||
|
||||
use crate::connection::{Role, LOCAL_STREAM_LIMIT_BIDI, LOCAL_STREAM_LIMIT_UNI};
|
||||
use neqo_common::Role;
|
||||
|
||||
use crate::connection::{LOCAL_STREAM_LIMIT_BIDI, LOCAL_STREAM_LIMIT_UNI};
|
||||
use crate::frame::StreamType;
|
||||
|
||||
pub struct StreamIndexes {
|
||||
|
|
|
@ -92,13 +92,16 @@ impl TransportParameter {
|
|||
| INITIAL_MAX_STREAM_DATA_BIDI_LOCAL
|
||||
| INITIAL_MAX_STREAM_DATA_BIDI_REMOTE
|
||||
| INITIAL_MAX_STREAM_DATA_UNI
|
||||
| INITIAL_MAX_STREAMS_BIDI
|
||||
| INITIAL_MAX_STREAMS_UNI
|
||||
| MAX_ACK_DELAY => match d.decode_varint() {
|
||||
Some(v) => Self::Integer(v),
|
||||
None => return Err(Error::TransportParameterError),
|
||||
},
|
||||
|
||||
INITIAL_MAX_STREAMS_BIDI | INITIAL_MAX_STREAMS_UNI => match d.decode_varint() {
|
||||
Some(v) if v <= (1 << 60) => Self::Integer(v),
|
||||
_ => return Err(Error::StreamLimitError),
|
||||
},
|
||||
|
||||
MAX_PACKET_SIZE => match d.decode_varint() {
|
||||
Some(v) if v >= 1200 => Self::Integer(v),
|
||||
_ => return Err(Error::TransportParameterError),
|
||||
|
@ -233,6 +236,18 @@ impl TransportParameters {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn get_empty(&self, tipe: TransportParameterId) -> Option<TransportParameter> {
|
||||
let default = match tipe {
|
||||
DISABLE_MIGRATION => None,
|
||||
_ => panic!("Transport parameter not known or not an Empty"),
|
||||
};
|
||||
match self.params.get(&tipe) {
|
||||
None => default,
|
||||
Some(TransportParameter::Empty) => Some(TransportParameter::Empty),
|
||||
_ => panic!("Internal error"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Return true if the remembered transport parameters are OK for 0-RTT.
|
||||
/// Generally this means that any value that is currently in effect is greater than
|
||||
/// or equal to the promised value.
|
||||
|
|
|
@ -6,19 +6,21 @@
|
|||
|
||||
// Tracking of received packets and generating acks thereof.
|
||||
|
||||
use std::cmp::min;
|
||||
#![deny(clippy::pedantic)]
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::convert::TryInto;
|
||||
use std::ops::{Index, IndexMut};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use neqo_common::{qdebug, qinfo, qtrace, qwarn};
|
||||
use neqo_crypto::{Epoch, TLS_EPOCH_APPLICATION_DATA, TLS_EPOCH_HANDSHAKE, TLS_EPOCH_INITIAL};
|
||||
use neqo_crypto::{Epoch, TLS_EPOCH_HANDSHAKE, TLS_EPOCH_INITIAL};
|
||||
|
||||
use crate::frame::{AckRange, Frame};
|
||||
use crate::packet::{PacketNumber, PacketType};
|
||||
use crate::recovery::RecoveryToken;
|
||||
|
||||
use smallvec::{smallvec, SmallVec};
|
||||
|
||||
// TODO(mt) look at enabling EnumMap for this: https://stackoverflow.com/a/44905797/1375574
|
||||
#[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Ord, Eq)]
|
||||
pub enum PNSpace {
|
||||
|
@ -62,15 +64,15 @@ impl From<PacketType> for PNSpace {
|
|||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SentPacket {
|
||||
pub ack_eliciting: bool,
|
||||
ack_eliciting: bool,
|
||||
pub time_sent: Instant,
|
||||
pub tokens: Vec<RecoveryToken>,
|
||||
|
||||
pub time_declared_lost: Option<Instant>,
|
||||
time_declared_lost: Option<Instant>,
|
||||
/// After a PTO, the packet has been released.
|
||||
pto: bool,
|
||||
|
||||
pub in_flight: bool,
|
||||
in_flight: bool,
|
||||
pub size: usize,
|
||||
}
|
||||
|
||||
|
@ -93,10 +95,54 @@ impl SentPacket {
|
|||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the packet will elicit an ACK.
|
||||
pub fn ack_eliciting(&self) -> bool {
|
||||
self.ack_eliciting
|
||||
}
|
||||
|
||||
/// Returns `true` if the packet counts requires congestion control accounting.
|
||||
/// The specification uses the term "in flight" for this.
|
||||
pub fn cc_in_flight(&self) -> bool {
|
||||
self.in_flight
|
||||
}
|
||||
|
||||
/// Whether the packet has been declared lost.
|
||||
pub fn lost(&self) -> bool {
|
||||
self.time_declared_lost.is_some()
|
||||
}
|
||||
|
||||
/// Whether accounting for the loss or acknowledgement in the
|
||||
/// congestion controller is pending.
|
||||
/// Returns `true` if the packet counts as being "in flight",
|
||||
/// and has not previously been declared lost.
|
||||
pub fn cc_outstanding(&self) -> bool {
|
||||
self.cc_in_flight() && !self.lost()
|
||||
}
|
||||
|
||||
/// Declare the packet as lost. Returns `true` if this is the first time.
|
||||
pub fn declare_lost(&mut self, now: Instant) -> bool {
|
||||
if self.lost() {
|
||||
false
|
||||
} else {
|
||||
self.time_declared_lost = Some(now);
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
/// Ask whether this tracked packet has been declared lost for long enough
|
||||
/// that it can be expired and no longer tracked.
|
||||
pub fn expired(&self, now: Instant, expiration_period: Duration) -> bool {
|
||||
if let Some(loss_time) = self.time_declared_lost {
|
||||
(loss_time + expiration_period) <= now
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// On PTO, we need to get the recovery tokens so that we can ensure that
|
||||
/// the frames we sent can be sent again in the PTO packet(s). Do that just once.
|
||||
pub fn pto(&mut self) -> bool {
|
||||
if self.pto {
|
||||
if self.pto || self.lost() {
|
||||
false
|
||||
} else {
|
||||
self.pto = true;
|
||||
|
@ -105,17 +151,6 @@ impl SentPacket {
|
|||
}
|
||||
}
|
||||
|
||||
impl Into<Epoch> for PNSpace {
|
||||
fn into(self) -> Epoch {
|
||||
match self {
|
||||
Self::Initial => TLS_EPOCH_INITIAL,
|
||||
Self::Handshake => TLS_EPOCH_HANDSHAKE,
|
||||
// Our epoch progresses forward, but the TLS epoch is fixed to 3.
|
||||
Self::ApplicationData => TLS_EPOCH_APPLICATION_DATA,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PNSpace {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
f.write_str(match self {
|
||||
|
@ -188,7 +223,7 @@ impl PacketRange {
|
|||
}
|
||||
|
||||
/// When a packet containing the range `other` is acknowledged,
|
||||
/// clear the ack_needed attribute on this.
|
||||
/// clear the `ack_needed` attribute on this.
|
||||
/// Requires that other is equal to this, or a larger range.
|
||||
pub fn acknowledged(&mut self, other: &Self) {
|
||||
if (other.smallest <= self.smallest) && (other.largest >= self.largest) {
|
||||
|
@ -206,6 +241,7 @@ impl ::std::fmt::Display for PacketRange {
|
|||
|
||||
/// The ACK delay we use.
|
||||
pub const ACK_DELAY: Duration = Duration::from_millis(20); // 20ms
|
||||
pub const MAX_UNACKED_PKTS: u64 = 1;
|
||||
const MAX_TRACKED_RANGES: usize = 32;
|
||||
const MAX_ACKS_PER_FRAME: usize = 32;
|
||||
|
||||
|
@ -228,10 +264,11 @@ pub struct RecvdPackets {
|
|||
largest_pn_time: Option<Instant>,
|
||||
// The time that we should be sending an ACK.
|
||||
ack_time: Option<Instant>,
|
||||
pkts_since_last_ack: u64,
|
||||
}
|
||||
|
||||
impl RecvdPackets {
|
||||
/// Make a new RecvdPackets for the indicated packet number space.
|
||||
/// Make a new `RecvdPackets` for the indicated packet number space.
|
||||
pub fn new(space: PNSpace) -> Self {
|
||||
Self {
|
||||
space,
|
||||
|
@ -239,6 +276,7 @@ impl RecvdPackets {
|
|||
min_tracked: 0,
|
||||
largest_pn_time: None,
|
||||
ack_time: None,
|
||||
pkts_since_last_ack: 0,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -302,16 +340,26 @@ impl RecvdPackets {
|
|||
}
|
||||
|
||||
if ack_eliciting {
|
||||
self.pkts_since_last_ack += 1;
|
||||
|
||||
// Send ACK right away if out-of-order
|
||||
// On the first in-order ack-eliciting packet since sending an ACK,
|
||||
// set a delay. On the second, remove that delay.
|
||||
// set a delay.
|
||||
// Count packets until we exceed MAX_UNACKED_PKTS, then remove the
|
||||
// delay.
|
||||
if pn != next_in_order_pn {
|
||||
self.ack_time = Some(now);
|
||||
} else if self.ack_time.is_none() && self.space == PNSpace::ApplicationData {
|
||||
self.ack_time = Some(now + ACK_DELAY);
|
||||
} else if self.space == PNSpace::ApplicationData {
|
||||
match &mut self.pkts_since_last_ack {
|
||||
0 => unreachable!(),
|
||||
1 => self.ack_time = Some(now + ACK_DELAY),
|
||||
x if *x > MAX_UNACKED_PKTS => self.ack_time = Some(now),
|
||||
_ => debug_assert!(self.ack_time.is_some()),
|
||||
}
|
||||
} else {
|
||||
self.ack_time = Some(now);
|
||||
}
|
||||
qdebug!([self], "Set ACK timer to {:?}", self.ack_time);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -343,33 +391,8 @@ impl RecvdPackets {
|
|||
cur.acknowledged(&ack);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Display for RecvdPackets {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
write!(f, "Recvd-{}", self.space)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AckTracker {
|
||||
spaces: [RecvdPackets; 3],
|
||||
}
|
||||
|
||||
impl AckTracker {
|
||||
pub fn ack_time(&self) -> Option<Instant> {
|
||||
let mut iter = self.spaces.iter().filter_map(RecvdPackets::ack_time);
|
||||
match iter.next() {
|
||||
Some(v) => Some(iter.fold(v, min)),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn acked(&mut self, token: &AckToken) {
|
||||
self.spaces[token.space as usize].acknowledged(&token.ranges);
|
||||
}
|
||||
|
||||
/// Generate an ACK frame.
|
||||
/// Generate an ACK frame for this packet number space.
|
||||
///
|
||||
/// Unlike other frame generators this doesn't modify the underlying instance
|
||||
/// to track what has been sent. This only clears the delayed ACK timer.
|
||||
|
@ -379,21 +402,15 @@ impl AckTracker {
|
|||
///
|
||||
/// We don't send ranges that have been acknowledged, but they still need
|
||||
/// to be tracked so that duplicates can be detected.
|
||||
pub(crate) fn get_frame(
|
||||
&mut self,
|
||||
now: Instant,
|
||||
pn_space: PNSpace,
|
||||
) -> Option<(Frame, Option<RecoveryToken>)> {
|
||||
let space = &mut self[pn_space];
|
||||
|
||||
fn get_frame(&mut self, now: Instant) -> Option<(Frame, Option<RecoveryToken>)> {
|
||||
// Check that we aren't delaying ACKs.
|
||||
if !space.ack_now(now) {
|
||||
if !self.ack_now(now) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Limit the number of ACK ranges we send so that we'll always
|
||||
// have space for data in packets.
|
||||
let ranges: Vec<PacketRange> = space
|
||||
let ranges: Vec<PacketRange> = self
|
||||
.ranges
|
||||
.iter()
|
||||
.filter(|r| r.ack_needed())
|
||||
|
@ -420,9 +437,10 @@ impl AckTracker {
|
|||
}
|
||||
|
||||
// We've sent an ACK, reset the timer.
|
||||
space.ack_time = None;
|
||||
self.ack_time = None;
|
||||
self.pkts_since_last_ack = 0;
|
||||
|
||||
let ack_delay = now.duration_since(space.largest_pn_time.unwrap());
|
||||
let ack_delay = now.duration_since(self.largest_pn_time.unwrap());
|
||||
// We use the default exponent so
|
||||
// ack_delay is in multiples of 8 microseconds.
|
||||
if let Ok(delay) = (ack_delay.as_micros() / 8).try_into() {
|
||||
|
@ -432,13 +450,11 @@ impl AckTracker {
|
|||
first_ack_range: first.len() - 1,
|
||||
ack_ranges,
|
||||
};
|
||||
Some((
|
||||
ack,
|
||||
Some(RecoveryToken::Ack(AckToken {
|
||||
space: pn_space,
|
||||
ranges,
|
||||
})),
|
||||
))
|
||||
let token = RecoveryToken::Ack(AckToken {
|
||||
space: self.space,
|
||||
ranges,
|
||||
});
|
||||
Some((ack, Some(token)))
|
||||
} else {
|
||||
qwarn!(
|
||||
"ack_delay.as_micros() did not fit a u64 {:?}",
|
||||
|
@ -449,35 +465,94 @@ impl AckTracker {
|
|||
}
|
||||
}
|
||||
|
||||
impl ::std::fmt::Display for RecvdPackets {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
write!(f, "Recvd-{}", self.space)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AckTracker {
|
||||
/// This stores information about received packets in *reverse* order
|
||||
/// by spaces. Why reverse? Because we ultimately only want to keep
|
||||
/// `ApplicationData` and this allows us to drop other spaces easily.
|
||||
spaces: SmallVec<[RecvdPackets; 1]>,
|
||||
}
|
||||
|
||||
impl AckTracker {
|
||||
pub fn drop_space(&mut self, space: PNSpace) {
|
||||
let sp = match space {
|
||||
PNSpace::Initial => self.spaces.pop(),
|
||||
PNSpace::Handshake => {
|
||||
let sp = self.spaces.pop();
|
||||
self.spaces.shrink_to_fit();
|
||||
sp
|
||||
}
|
||||
_ => panic!("discarding application space"),
|
||||
};
|
||||
assert_eq!(sp.unwrap().space, space, "dropping spaces out of order");
|
||||
}
|
||||
|
||||
pub fn get_mut(&mut self, space: PNSpace) -> Option<&mut RecvdPackets> {
|
||||
self.spaces.get_mut(match space {
|
||||
PNSpace::ApplicationData => 0,
|
||||
PNSpace::Handshake => 1,
|
||||
PNSpace::Initial => 2,
|
||||
})
|
||||
}
|
||||
|
||||
/// Determine the earliest time that an ACK might be needed.
|
||||
pub fn ack_time(&self, now: Instant) -> Option<Instant> {
|
||||
if self.spaces.len() == 1 {
|
||||
self.spaces[0].ack_time()
|
||||
} else {
|
||||
// Ignore any time that is in the past relative to `now`.
|
||||
// That is something of a hack, but there are cases where we can't send ACK
|
||||
// frames for all spaces, which can mean that one space is stuck in the past.
|
||||
// That isn't a problem because we guarantee that earlier spaces will always
|
||||
// be able to send ACK frames.
|
||||
self.spaces
|
||||
.iter()
|
||||
.filter_map(|recvd| recvd.ack_time().filter(|t| *t > now))
|
||||
.min()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn acked(&mut self, token: &AckToken) {
|
||||
if let Some(space) = self.get_mut(token.space) {
|
||||
space.acknowledged(&token.ranges);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn get_frame(
|
||||
&mut self,
|
||||
now: Instant,
|
||||
pn_space: PNSpace,
|
||||
) -> Option<(Frame, Option<RecoveryToken>)> {
|
||||
self.get_mut(pn_space)
|
||||
.map(|space| space.get_frame(now))
|
||||
.flatten()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AckTracker {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
spaces: [
|
||||
RecvdPackets::new(PNSpace::Initial),
|
||||
RecvdPackets::new(PNSpace::Handshake),
|
||||
spaces: smallvec![
|
||||
RecvdPackets::new(PNSpace::ApplicationData),
|
||||
RecvdPackets::new(PNSpace::Handshake),
|
||||
RecvdPackets::new(PNSpace::Initial),
|
||||
],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<PNSpace> for AckTracker {
|
||||
type Output = RecvdPackets;
|
||||
|
||||
fn index(&self, space: PNSpace) -> &Self::Output {
|
||||
&self.spaces[space as usize]
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexMut<PNSpace> for AckTracker {
|
||||
fn index_mut(&mut self, space: PNSpace) -> &mut Self::Output {
|
||||
&mut self.spaces[space as usize]
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use super::{
|
||||
AckTracker, Duration, Instant, PNSpace, RecoveryToken, RecvdPackets, ACK_DELAY,
|
||||
MAX_TRACKED_RANGES, MAX_UNACKED_PKTS,
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashSet;
|
||||
|
||||
|
@ -511,7 +586,7 @@ mod tests {
|
|||
|
||||
// Check that the ranges include the right values.
|
||||
let mut in_ranges = HashSet::new();
|
||||
for range in rp.ranges.iter() {
|
||||
for range in &rp.ranges {
|
||||
for included in range.smallest..=range.largest {
|
||||
in_ranges.insert(included);
|
||||
}
|
||||
|
@ -564,14 +639,16 @@ mod tests {
|
|||
assert!(rp.ack_time().is_none());
|
||||
assert!(!rp.ack_now(*NOW));
|
||||
|
||||
// One packet won't cause an ACK to be needed.
|
||||
rp.set_received(*NOW, 0, true);
|
||||
assert_eq!(Some(*NOW + ACK_DELAY), rp.ack_time());
|
||||
assert!(!rp.ack_now(*NOW));
|
||||
assert!(rp.ack_now(*NOW + ACK_DELAY));
|
||||
// Some packets won't cause an ACK to be needed.
|
||||
for num in 0..MAX_UNACKED_PKTS {
|
||||
rp.set_received(*NOW, num, true);
|
||||
assert_eq!(Some(*NOW + ACK_DELAY), rp.ack_time());
|
||||
assert!(!rp.ack_now(*NOW));
|
||||
assert!(rp.ack_now(*NOW + ACK_DELAY));
|
||||
}
|
||||
|
||||
// A second packet will move the ACK time to now.
|
||||
rp.set_received(*NOW, 1, true);
|
||||
// Exceeding MAX_UNACKED_PKTS will move the ACK time to now.
|
||||
rp.set_received(*NOW, MAX_UNACKED_PKTS, true);
|
||||
assert_eq!(Some(*NOW), rp.ack_time());
|
||||
assert!(rp.ack_now(*NOW));
|
||||
}
|
||||
|
@ -612,16 +689,92 @@ mod tests {
|
|||
fn aggregate_ack_time() {
|
||||
let mut tracker = AckTracker::default();
|
||||
// This packet won't trigger an ACK.
|
||||
tracker[PNSpace::Handshake].set_received(*NOW, 0, false);
|
||||
assert_eq!(None, tracker.ack_time());
|
||||
tracker
|
||||
.get_mut(PNSpace::Handshake)
|
||||
.unwrap()
|
||||
.set_received(*NOW, 0, false);
|
||||
assert_eq!(None, tracker.ack_time(*NOW));
|
||||
|
||||
// This should be delayed.
|
||||
tracker[PNSpace::ApplicationData].set_received(*NOW, 0, true);
|
||||
assert_eq!(Some(*NOW + ACK_DELAY), tracker.ack_time());
|
||||
tracker
|
||||
.get_mut(PNSpace::ApplicationData)
|
||||
.unwrap()
|
||||
.set_received(*NOW, 0, true);
|
||||
assert_eq!(Some(*NOW + ACK_DELAY), tracker.ack_time(*NOW));
|
||||
|
||||
// This should move the time forward.
|
||||
let later = *NOW + ACK_DELAY.checked_div(2).unwrap();
|
||||
tracker[PNSpace::Initial].set_received(later, 0, true);
|
||||
assert_eq!(Some(later), tracker.ack_time());
|
||||
tracker
|
||||
.get_mut(PNSpace::Initial)
|
||||
.unwrap()
|
||||
.set_received(later, 0, true);
|
||||
assert_eq!(Some(later), tracker.ack_time(*NOW));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "discarding application space")]
|
||||
fn drop_app() {
|
||||
let mut tracker = AckTracker::default();
|
||||
tracker.drop_space(PNSpace::ApplicationData);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "dropping spaces out of order")]
|
||||
fn drop_out_of_order() {
|
||||
let mut tracker = AckTracker::default();
|
||||
tracker.drop_space(PNSpace::Handshake);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn drop_spaces() {
|
||||
let mut tracker = AckTracker::default();
|
||||
tracker
|
||||
.get_mut(PNSpace::Initial)
|
||||
.unwrap()
|
||||
.set_received(*NOW, 0, true);
|
||||
// The reference time for `ack_time` has to be in the past or we filter out the timer.
|
||||
assert!(tracker.ack_time(*NOW - Duration::from_millis(1)).is_some());
|
||||
let (_ack, token) = tracker.get_frame(*NOW, PNSpace::Initial).unwrap();
|
||||
assert!(token.is_some());
|
||||
|
||||
// Mark another packet as received so we have cause to send another ACK in that space.
|
||||
tracker
|
||||
.get_mut(PNSpace::Initial)
|
||||
.unwrap()
|
||||
.set_received(*NOW, 1, true);
|
||||
assert!(tracker.ack_time(*NOW - Duration::from_millis(1)).is_some());
|
||||
|
||||
// Now drop that space.
|
||||
tracker.drop_space(PNSpace::Initial);
|
||||
|
||||
assert!(tracker.get_mut(PNSpace::Initial).is_none());
|
||||
assert!(tracker.ack_time(*NOW - Duration::from_millis(1)).is_none());
|
||||
assert!(tracker.get_frame(*NOW, PNSpace::Initial).is_none());
|
||||
if let RecoveryToken::Ack(tok) = token.as_ref().unwrap() {
|
||||
tracker.acked(tok); // Should be a noop.
|
||||
} else {
|
||||
panic!("not an ACK token");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ack_time_elapsed() {
|
||||
let mut tracker = AckTracker::default();
|
||||
|
||||
// While we have multiple PN spaces, we ignore ACK timers from the past.
|
||||
// Send out of order to cause the delayed ack timer to be set to `*NOW`.
|
||||
tracker
|
||||
.get_mut(PNSpace::ApplicationData)
|
||||
.unwrap()
|
||||
.set_received(*NOW, 3, true);
|
||||
assert!(tracker.ack_time(*NOW + Duration::from_millis(1)).is_none());
|
||||
|
||||
// When we are reduced to one space, that filter is off.
|
||||
tracker.drop_space(PNSpace::Initial);
|
||||
tracker.drop_space(PNSpace::Handshake);
|
||||
assert_eq!(
|
||||
tracker.ack_time(*NOW + Duration::from_millis(1)),
|
||||
Some(*NOW)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче