From 8169002f3d1217690d67e3d1ecaea9215856d09c Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Thu, 25 Jan 2024 09:43:49 +1100 Subject: [PATCH 01/41] Create a security policy (#1573) * Create a security policy A simple document describing how we handle advisories is probably long overdue. * Review feedback Co-authored-by: Lars Eggert * spacing --------- Co-authored-by: Lars Eggert --- SECURITY.md | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..5b70d7ba3b --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,31 @@ +# Security Policy + +This document describes how security vulnerabilities in this project should be reported. + +## Supported Versions + +Support for neqo is based on the Firefox version in which it has landed. +Versions of neqo in [current versions of Firefox](https://whattrainisitnow.com/calendar/) are actively supported. + +The version of neqo that is active can be found in the Firefox repositories: + +- [release](https://hg.mozilla.org/mozilla-unified/file/release/third_party/rust/neqo-transport/Cargo.toml), +- [beta](https://hg.mozilla.org/mozilla-unified/file/beta/third_party/rust/neqo-transport/Cargo.toml), and +- [trunk/central](https://hg.mozilla.org/mozilla-unified/file/central/third_party/rust/neqo-transport/Cargo.toml), +- [ESR 115](https://hg.mozilla.org/mozilla-unified/file/esr115/third_party/rust/neqo-transport/Cargo.toml). + +The listed version in these files corresponds to [tags](https://github.com/mozilla/neqo/tags) on this repository. +Releases do not always correspond to a branch. + +We welcome reports of security vulnerabilities in any of these released versions or the latest code on the `main` branch. + +## Reporting a Vulnerability + +To report a security problem with neqo, create a bug in Mozilla's Bugzilla instance in the [Core :: Networking](https://bugzilla.mozilla.org/enter_bug.cgi?product=Core&component=Networking) component. + +**IMPORTANT: For security issues, please make sure that you check the box labelled "Many users could be harmed by this security problem".** +We advise that you check this option for anything that involves anything security-relevant, including memory safety, crashes, race conditions, and handling of confidential information. + +Review Mozilla's [guides on bug reporting](https://bugzilla.mozilla.org/page.cgi?id=bug-writing.html) before you open a bug. + +Mozilla operates a [bug bounty program](https://www.mozilla.org/en-US/security/bug-bounty/), for which this project is eligible. From 4e7d9031435f428e500e7217beb137bc6475bd91 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 25 Jan 2024 00:46:25 +0200 Subject: [PATCH 02/41] Remove docker/Dockerfile, which is unused (#1579) We don't use a docker image for CI anymore --- docker/Dockerfile | 64 ----------------------------------------------- 1 file changed, 64 deletions(-) delete mode 100644 docker/Dockerfile diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index e3a7dfbadf..0000000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,64 +0,0 @@ -# This image is used for running CI tests. -# The image is not built unless the `docker` branch is updated. -# Push to `docker` to trigger a build: -# $ git push origin main:docker - -FROM ubuntu:20.04 -LABEL maintainer="Martin Thomson " - -RUN apt-get update && apt-get install -y --no-install-recommends \ - ca-certificates \ - coreutils \ - curl \ - git \ - make \ - mercurial \ - ssh \ - build-essential \ - clang \ - llvm \ - libclang-dev \ - lld \ - gyp \ - ninja-build \ - pkg-config \ - python-is-python3 \ - python3 \ - python3-pip \ - sudo \ - zlib1g-dev \ - && apt-get autoremove -y && apt-get clean -y \ - && rm -rf /var/lib/apt/lists/* - -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH \ - RUST_VERSION=1.51.0 - -RUN set -eux; \ - curl -sSLf "https://static.rust-lang.org/rustup/archive/1.20.2/x86_64-unknown-linux-gnu/rustup-init" -o rustup-init; \ - echo 'e68f193542c68ce83c449809d2cad262cc2bbb99640eb47c58fc1dc58cc30add *rustup-init' | sha256sum -c -; \ - chmod +x rustup-init; \ - ./rustup-init -y -q --no-modify-path --profile minimal --component rustfmt --component clippy --default-toolchain "$RUST_VERSION"; \ - rm -f rustup-init; \ - chmod -R a+w "$RUSTUP_HOME" "$CARGO_HOME" - -ENV USER neqo -ENV LOGNAME $USER -ENV HOSTNAME $USER -ENV HOME /home/$USER -ENV SHELL /bin/bash - -RUN useradd -d "$HOME" -s "$SHELL" -m "$USER" -RUN echo "$USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers -WORKDIR $HOME -USER $USER - -ENV NSS_DIR=$HOME/nss \ - NSPR_DIR=$HOME/nspr \ - LD_LIBRARY_PATH=$HOME/dist/Debug/lib \ - RUSTFLAGS="-C link-arg=-fuse-ld=lld" - -RUN set -eux; \ - hg clone -u c7a1c91cd9be https://hg.mozilla.org/projects/nss "$NSS_DIR"; \ - hg clone -u NSPR_4_25_RTM https://hg.mozilla.org/projects/nspr "$NSPR_DIR" From b164ab01aa7a20b957f62f28485b1cd33959289f Mon Sep 17 00:00:00 2001 From: Kershaw Date: Thu, 25 Jan 2024 10:04:39 +0100 Subject: [PATCH 03/41] add support to log rtt (#1522) * Log RttEstimate * address comment * Update neqo-transport/src/cc/classic_cc.rs Co-authored-by: Lars Eggert * Update neqo-transport/src/cc/classic_cc.rs Co-authored-by: Lars Eggert * Update neqo-transport/src/sender.rs Co-authored-by: Lars Eggert * Update neqo-transport/src/sender.rs Co-authored-by: Lars Eggert * Update neqo-transport/src/cc/mod.rs Co-authored-by: Lars Eggert * Update neqo-transport/src/cc/classic_cc.rs Co-authored-by: Lars Eggert * Update neqo-transport/src/cc/classic_cc.rs Co-authored-by: Lars Eggert --------- Co-authored-by: Lars Eggert --- neqo-transport/src/cc/classic_cc.rs | 22 +++++++++++++--------- neqo-transport/src/cc/mod.rs | 4 ++-- neqo-transport/src/cc/tests/cubic.rs | 4 +++- neqo-transport/src/cc/tests/new_reno.rs | 21 ++++++++++++++------- neqo-transport/src/path.rs | 3 +-- neqo-transport/src/rtt.rs | 12 ++++++++++++ neqo-transport/src/sender.rs | 10 ++++++++-- 7 files changed, 53 insertions(+), 23 deletions(-) diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index 000d9bf4d5..c1d8fd08a6 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -19,6 +19,7 @@ use crate::{ cc::MAX_DATAGRAM_SIZE, packet::PacketNumber, qlog::{self, QlogMetric}, + rtt::RttEstimate, sender::PACING_BURST_SIZE, tracking::SentPacket, }; @@ -161,17 +162,18 @@ impl CongestionControl for ClassicCongestionControl { } // Multi-packet version of OnPacketAckedCC - fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], min_rtt: Duration, now: Instant) { + fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], rtt_est: &RttEstimate, now: Instant) { let mut is_app_limited = true; let mut new_acked = 0; for pkt in acked_pkts { qinfo!( - "packet_acked this={:p}, pn={}, ps={}, ignored={}, lost={}", + "packet_acked this={:p}, pn={}, ps={}, ignored={}, lost={}, rtt_est={:?}", self, pkt.pn, pkt.size, i32::from(!pkt.cc_outstanding()), - i32::from(pkt.lost()) + i32::from(pkt.lost()), + rtt_est, ); if !pkt.cc_outstanding() { continue; @@ -222,7 +224,7 @@ impl CongestionControl for ClassicCongestionControl { let bytes_for_increase = self.cc_algorithm.bytes_for_cwnd_increase( self.congestion_window, new_acked, - min_rtt, + rtt_est.minimum(), now, ); debug_assert!(bytes_for_increase > 0); @@ -546,6 +548,7 @@ mod tests { CongestionControl, CongestionControlAlgorithm, CWND_INITIAL_PKTS, MAX_DATAGRAM_SIZE, }, packet::{PacketNumber, PacketType}, + rtt::RttEstimate, tracking::SentPacket, }; use neqo_common::qinfo; @@ -557,6 +560,7 @@ mod tests { const PTO: Duration = Duration::from_millis(100); const RTT: Duration = Duration::from_millis(98); + const RTT_ESTIMATE: RttEstimate = RttEstimate::from_duration(Duration::from_millis(98)); const ZERO: Duration = Duration::from_secs(0); const EPSILON: Duration = Duration::from_nanos(1); const GAP: Duration = Duration::from_secs(1); @@ -1025,7 +1029,7 @@ mod tests { } assert_eq!(cc.bytes_in_flight(), packet_burst_size * MAX_DATAGRAM_SIZE); now += RTT; - cc.on_packets_acked(&pkts, RTT, now); + cc.on_packets_acked(&pkts, &RTT_ESTIMATE, now); assert_eq!(cc.bytes_in_flight(), 0); assert_eq!(cc.acked_bytes, 0); assert_eq!(cwnd, cc.congestion_window); // CWND doesn't grow because we're app limited @@ -1054,7 +1058,7 @@ mod tests { now += RTT; // Check if congestion window gets increased for all packets currently in flight for (i, pkt) in pkts.into_iter().enumerate() { - cc.on_packets_acked(&[pkt], RTT, now); + cc.on_packets_acked(&[pkt], &RTT_ESTIMATE, now); assert_eq!( cc.bytes_in_flight(), @@ -1101,7 +1105,7 @@ mod tests { ); cc.on_packet_sent(&p_not_lost); now += RTT; - cc.on_packets_acked(&[p_not_lost], RTT, now); + cc.on_packets_acked(&[p_not_lost], &RTT_ESTIMATE, now); cwnd_is_halved(&cc); // cc is app limited therefore cwnd in not increased. assert_eq!(cc.acked_bytes, 0); @@ -1129,7 +1133,7 @@ mod tests { assert_eq!(cc.bytes_in_flight(), packet_burst_size * MAX_DATAGRAM_SIZE); now += RTT; for (i, pkt) in pkts.into_iter().enumerate() { - cc.on_packets_acked(&[pkt], RTT, now); + cc.on_packets_acked(&[pkt], &RTT_ESTIMATE, now); assert_eq!( cc.bytes_in_flight(), @@ -1164,7 +1168,7 @@ mod tests { let mut last_acked_bytes = 0; // Check if congestion window gets increased for all packets currently in flight for (i, pkt) in pkts.into_iter().enumerate() { - cc.on_packets_acked(&[pkt], RTT, now); + cc.on_packets_acked(&[pkt], &RTT_ESTIMATE, now); assert_eq!( cc.bytes_in_flight(), diff --git a/neqo-transport/src/cc/mod.rs b/neqo-transport/src/cc/mod.rs index 675168367a..0321ab1de5 100644 --- a/neqo-transport/src/cc/mod.rs +++ b/neqo-transport/src/cc/mod.rs @@ -7,7 +7,7 @@ // Congestion control #![deny(clippy::pedantic)] -use crate::{path::PATH_MTU_V6, tracking::SentPacket, Error}; +use crate::{path::PATH_MTU_V6, rtt::RttEstimate, tracking::SentPacket, Error}; use neqo_common::qlog::NeqoQlog; use std::{ @@ -42,7 +42,7 @@ pub trait CongestionControl: Display + Debug { #[must_use] fn cwnd_avail(&self) -> usize; - fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], min_rtt: Duration, now: Instant); + fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], rtt_est: &RttEstimate, now: Instant); /// Returns true if the congestion window was reduced. fn on_packets_lost( diff --git a/neqo-transport/src/cc/tests/cubic.rs b/neqo-transport/src/cc/tests/cubic.rs index 1229e6307f..b24f1fc118 100644 --- a/neqo-transport/src/cc/tests/cubic.rs +++ b/neqo-transport/src/cc/tests/cubic.rs @@ -17,6 +17,7 @@ use crate::{ CongestionControl, MAX_DATAGRAM_SIZE, MAX_DATAGRAM_SIZE_F64, }, packet::PacketType, + rtt::RttEstimate, tracking::SentPacket, }; use std::{ @@ -27,6 +28,7 @@ use std::{ use test_fixture::now; const RTT: Duration = Duration::from_millis(100); +const RTT_ESTIMATE: RttEstimate = RttEstimate::from_duration(Duration::from_millis(100)); const CWND_INITIAL_F64: f64 = 10.0 * MAX_DATAGRAM_SIZE_F64; const CWND_INITIAL_10_F64: f64 = 10.0 * CWND_INITIAL_F64; const CWND_INITIAL_10: usize = 10 * CWND_INITIAL; @@ -59,7 +61,7 @@ fn ack_packet(cc: &mut ClassicCongestionControl, pn: u64, now: Instant) { Vec::new(), // tokens MAX_DATAGRAM_SIZE, // size ); - cc.on_packets_acked(&[acked], RTT, now); + cc.on_packets_acked(&[acked], &RTT_ESTIMATE, now); } fn packet_lost(cc: &mut ClassicCongestionControl, pn: u64) { diff --git a/neqo-transport/src/cc/tests/new_reno.rs b/neqo-transport/src/cc/tests/new_reno.rs index 0e4322c08c..f86e87b953 100644 --- a/neqo-transport/src/cc/tests/new_reno.rs +++ b/neqo-transport/src/cc/tests/new_reno.rs @@ -7,15 +7,22 @@ // Congestion control #![deny(clippy::pedantic)] -use crate::cc::new_reno::NewReno; -use crate::cc::{ClassicCongestionControl, CongestionControl, CWND_INITIAL, MAX_DATAGRAM_SIZE}; -use crate::packet::PacketType; -use crate::tracking::SentPacket; +use crate::{ + cc::{ + new_reno::NewReno, ClassicCongestionControl, CongestionControl, CWND_INITIAL, + MAX_DATAGRAM_SIZE, + }, + packet::PacketType, + rtt::RttEstimate, + tracking::SentPacket, +}; + use std::time::Duration; use test_fixture::now; const PTO: Duration = Duration::from_millis(100); const RTT: Duration = Duration::from_millis(98); +const RTT_ESTIMATE: RttEstimate = RttEstimate::from_duration(Duration::from_millis(98)); fn cwnd_is_default(cc: &ClassicCongestionControl) { assert_eq!(cc.cwnd(), CWND_INITIAL); @@ -117,7 +124,7 @@ fn issue_876() { assert_eq!(cc.bytes_in_flight(), 6 * MAX_DATAGRAM_SIZE - 5); // and ack it. cwnd increases slightly - cc.on_packets_acked(&sent_packets[6..], RTT, time_now); + cc.on_packets_acked(&sent_packets[6..], &RTT_ESTIMATE, time_now); assert_eq!(cc.acked_bytes(), sent_packets[6].size); cwnd_is_halved(&cc); assert_eq!(cc.bytes_in_flight(), 5 * MAX_DATAGRAM_SIZE - 2); @@ -181,7 +188,7 @@ fn issue_1465() { // the acked packets before on_packet_sent were the cause of // https://github.com/mozilla/neqo/pull/1465 - cc.on_packets_acked(&[p2], RTT, now); + cc.on_packets_acked(&[p2], &RTT_ESTIMATE, now); assert_eq!(cc.bytes_in_flight(), 0); @@ -189,7 +196,7 @@ fn issue_1465() { let p4 = send_next(&mut cc, now); cc.on_packet_sent(&p4); now += RTT; - cc.on_packets_acked(&[p4], RTT, now); + cc.on_packets_acked(&[p4], &RTT_ESTIMATE, now); // do the same as in the first rtt but now the bug appears let p5 = send_next(&mut cc, now); diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 2b357e0bb1..4430bb2bdb 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -973,8 +973,7 @@ impl Path { /// Record packets as acknowledged with the sender. pub fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], now: Instant) { debug_assert!(self.is_primary()); - self.sender - .on_packets_acked(acked_pkts, self.rtt.minimum(), now); + self.sender.on_packets_acked(acked_pkts, &self.rtt, now); } /// Record packets as lost with the sender. diff --git a/neqo-transport/src/rtt.rs b/neqo-transport/src/rtt.rs index 3d6d0e70f8..a5ceb37da2 100644 --- a/neqo-transport/src/rtt.rs +++ b/neqo-transport/src/rtt.rs @@ -47,6 +47,18 @@ impl RttEstimate { self.rttvar = rtt / 2; } + #[cfg(test)] + pub const fn from_duration(rtt: Duration) -> Self { + Self { + first_sample_time: None, + latest_rtt: rtt, + smoothed_rtt: rtt, + rttvar: Duration::from_millis(0), + min_rtt: rtt, + ack_delay: PeerAckDelay::Fixed(Duration::from_millis(25)), + } + } + pub fn set_initial(&mut self, rtt: Duration) { qtrace!("initial RTT={:?}", rtt); if rtt >= GRANULARITY { diff --git a/neqo-transport/src/sender.rs b/neqo-transport/src/sender.rs index 3d8302369c..0c1e66ff9a 100644 --- a/neqo-transport/src/sender.rs +++ b/neqo-transport/src/sender.rs @@ -12,6 +12,7 @@ use crate::cc::{ ClassicCongestionControl, CongestionControl, CongestionControlAlgorithm, Cubic, NewReno, }; use crate::pace::Pacer; +use crate::rtt::RttEstimate; use crate::tracking::SentPacket; use neqo_common::qlog::NeqoQlog; @@ -68,8 +69,13 @@ impl PacketSender { self.cc.cwnd_avail() } - pub fn on_packets_acked(&mut self, acked_pkts: &[SentPacket], min_rtt: Duration, now: Instant) { - self.cc.on_packets_acked(acked_pkts, min_rtt, now); + pub fn on_packets_acked( + &mut self, + acked_pkts: &[SentPacket], + rtt_est: &RttEstimate, + now: Instant, + ) { + self.cc.on_packets_acked(acked_pkts, rtt_est, now); } /// Called when packets are lost. Returns true if the congestion window was reduced. From b51cad763f319698ae4c0a8210e2f2bc3b1f2698 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Thu, 25 Jan 2024 20:32:56 +1100 Subject: [PATCH 04/41] Simplify timeouts (#1583) * Simplify timeouts We use `Option` but then always use this with `.or_else(|| Some(Duration::new(0, 0)))`. We don't need the option. * Simplify timeouts No need to wrap the timeout in Option when we always use it with `.or_else(|| Some(...))` --- neqo-client/src/main.rs | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index a61a3ced88..1a038ddba3 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -405,12 +405,9 @@ fn process_loop( ) -> Res { let buf = &mut [0u8; 2048]; let mut events = Events::with_capacity(1024); - let mut timeout: Option = None; + let mut timeout = Duration::new(0, 0); loop { - poll.poll( - &mut events, - timeout.or_else(|| Some(Duration::from_millis(0))), - )?; + poll.poll(&mut events, Some(timeout))?; let mut datagrams: Vec = Vec::new(); 'read: loop { @@ -465,7 +462,7 @@ fn process_loop( } } Output::Callback(new_timeout) => { - timeout = Some(new_timeout); + timeout = new_timeout; break 'write; } Output::None => { @@ -1347,12 +1344,9 @@ mod old { ) -> Res { let buf = &mut [0u8; 2048]; let mut events = Events::with_capacity(1024); - let mut timeout: Option = None; + let mut timeout = Duration::new(0, 0); loop { - poll.poll( - &mut events, - timeout.or_else(|| Some(Duration::from_millis(0))), - )?; + poll.poll(&mut events, Some(timeout))?; 'read: loop { match socket.recv_from(&mut buf[..]) { @@ -1403,7 +1397,7 @@ mod old { } } Output::Callback(new_timeout) => { - timeout = Some(new_timeout); + timeout = new_timeout; break 'write; } Output::None => { From 29926165d593715b6caa6abc727d82f9fcb4e36c Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Thu, 25 Jan 2024 20:37:48 +1100 Subject: [PATCH 05/41] Lars owns this now (#1584) --- .github/CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index bf1ad1efba..42caa3deee 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,4 +1,4 @@ -* @KershawChang @martinthomson +* @KershawChang @martinthomson @larseggert /docker/ @martinthomson /hooks/ @martinthomson /neqo-crypto/ @martinthomson From f4f6cc3c002c650af4d6d572063901fe0071872d Mon Sep 17 00:00:00 2001 From: jesup Date: Thu, 25 Jan 2024 11:30:54 -0500 Subject: [PATCH 06/41] Improve coalesce_acked_from_zero perf by avoiding an extra vector (#1585) --- neqo-transport/src/send_stream.rs | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index ed227b2a31..e171dfab83 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -276,8 +276,6 @@ impl RangeTracker { .map(|(len, _)| *len); if let Some(len_from_zero) = acked_range_from_zero { - let mut to_remove = SmallVec::<[_; 8]>::new(); - let mut new_len_from_zero = len_from_zero; // See if there's another Acked range entry contiguous to this one @@ -286,17 +284,14 @@ impl RangeTracker { .get(&new_len_from_zero) .filter(|(_, state)| *state == RangeState::Acked) { - to_remove.push(new_len_from_zero); + let to_remove = new_len_from_zero; new_len_from_zero += *next_len; + self.used.remove(&to_remove); } if len_from_zero != new_len_from_zero { self.used.get_mut(&0).expect("must be there").0 = new_len_from_zero; } - - for val in to_remove { - self.used.remove(&val); - } } } From 04def1eaceec5c3227edcd59c73cea83b607e906 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Fri, 26 Jan 2024 00:51:40 +0100 Subject: [PATCH 07/41] log SendProfile when nothing to send (#1586) --- neqo-transport/src/connection/mod.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index f3519f7daa..49e4ec43aa 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -2260,6 +2260,7 @@ impl Connection { } if encoder.is_empty() { + qinfo!("TX blocked, profile={:?} ", profile); Ok(SendOption::No(profile.paced())) } else { // Perform additional padding for Initial packets as necessary. From 34c062b3fed13356eb57d316bdac019debf7af8d Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 26 Jan 2024 18:02:36 +0200 Subject: [PATCH 08/41] chore: Fix a beta toolchain clippy warning that pops up in CI (#1591) --- neqo-transport/src/pace.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neqo-transport/src/pace.rs b/neqo-transport/src/pace.rs index 6b86575eb5..f1cec80ac6 100644 --- a/neqo-transport/src/pace.rs +++ b/neqo-transport/src/pace.rs @@ -82,7 +82,7 @@ impl Pacer { } /// Spend credit. This cannot fail; users of this API are expected to call - /// next() to determine when to spend. This takes the current time (`now`), + /// `next()` to determine when to spend. This takes the current time (`now`), /// an estimate of the round trip time (`rtt`), the estimated congestion /// window (`cwnd`), and the number of bytes that were sent (`count`). pub fn spend(&mut self, now: Instant, rtt: Duration, cwnd: usize, count: usize) { From 5496be25e7c4a651bf77a2cf77d1b61f213cbba0 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 26 Jan 2024 18:03:12 +0200 Subject: [PATCH 09/41] ci: Use lld to link (#1590) * ci: Use lld to link To make things a bit faster * Add the various lld packages * Add lld to PATH on MacOS * Also log level during transfer to `warn`, to make it less noisy --- .github/workflows/check.yml | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 685d36360d..4df5f3c2a1 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -9,6 +9,7 @@ on: env: CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 + RUSTFLAGS: -C link-arg=-fuse-ld=lld jobs: check: @@ -35,7 +36,7 @@ jobs: env: DEBIAN_FRONTEND: noninteractive run: | - sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build + sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash # In addition to installing dependencies, first make sure System Integrity Protection (SIP) @@ -47,7 +48,8 @@ jobs: if: runner.os == 'MacOS' run: | csrutil status | grep disabled - brew install ninja mercurial cargo-binstall + brew install ninja mercurial cargo-binstall llvm + echo "/usr/local/opt/llvm/bin" >> "$GITHUB_PATH" # python3 -m pip install gyp-next # Above does not work, since pypi only has gyp 0.15.0, which is too old # for the homebrew python3. Install from source instead. @@ -65,7 +67,7 @@ jobs: run: | echo "C:\\msys64\\usr\\bin" >> "$GITHUB_PATH" echo "C:\\msys64\\mingw64\\bin" >> "$GITHUB_PATH" - /c/msys64/usr/bin/pacman -S --noconfirm nsinstall + /c/msys64/usr/bin/pacman -S --noconfirm nsinstall lld python3 -m pip install git+https://github.com/nodejs/gyp-next echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" @@ -123,7 +125,7 @@ jobs: env: HOST: localhost SIZE: 54321 - RUST_LOG: info + RUST_LOG: warn - name: Check formatting run: cargo +${{ matrix.rust-toolchain }} fmt --all -- --check From 37f121de3d21f50a84f2061bf1489fc3171c5261 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 29 Jan 2024 00:49:18 +0200 Subject: [PATCH 10/41] chore: Remove some outdated files (#1588) The .gitignore files serve no purpose, and if there is anything in the TODO files left to be done, those things should become GitHub issues. --- neqo-crypto/.gitignore | 6 ------ neqo-crypto/TODO | 4 ---- neqo-transport/.gitignore | 3 --- neqo-transport/TODO | 9 --------- 4 files changed, 22 deletions(-) delete mode 100644 neqo-crypto/.gitignore delete mode 100644 neqo-crypto/TODO delete mode 100644 neqo-transport/.gitignore delete mode 100755 neqo-transport/TODO diff --git a/neqo-crypto/.gitignore b/neqo-crypto/.gitignore deleted file mode 100644 index 0136220822..0000000000 --- a/neqo-crypto/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -Cargo.lock -/target -**/*.rs.bk -/nss -/nspr -/dist diff --git a/neqo-crypto/TODO b/neqo-crypto/TODO deleted file mode 100644 index b0552ea10f..0000000000 --- a/neqo-crypto/TODO +++ /dev/null @@ -1,4 +0,0 @@ -early data - API in place for inspection, but depends on resumption -handle panics more gracefully for extension handlers -client certificates -read/write - probably never \ No newline at end of file diff --git a/neqo-transport/.gitignore b/neqo-transport/.gitignore deleted file mode 100644 index aa085cd807..0000000000 --- a/neqo-transport/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -Cargo.lock -/target -**/*.rs.bk diff --git a/neqo-transport/TODO b/neqo-transport/TODO deleted file mode 100755 index 151dbd1753..0000000000 --- a/neqo-transport/TODO +++ /dev/null @@ -1,9 +0,0 @@ -Use stream events in h3 // grover or dragana? -harmonize our rust usage: - - use foo::* or use foo::{bar, baz} and ordering/grouping - - remove extern crate - - sort #[derive()] args -cleanup public API -write docs for public API -write docs for everything else -CI From fca21791a2a0f32671959b08aec9b50c532c9980 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 29 Jan 2024 10:29:51 +0200 Subject: [PATCH 11/41] chore: Make the qns image multiarch & shrink the build context further (#1589) * chore: Make the qns image multiarch, and shrink the build context further * Remove some more things * Update .dockerignore Co-authored-by: Max Inden * Update qns/Dockerfile Co-authored-by: Martin Thomson * Update qns/Dockerfile * Update .dockerignore --------- Co-authored-by: Max Inden Co-authored-by: Martin Thomson --- .dockerignore | 12 +++++++++--- qns/Dockerfile | 17 ++++++----------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/.dockerignore b/.dockerignore index 8012c0d3ae..cc95fda49e 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,3 +1,9 @@ -nss -nspr -target +# Ignore everything: +* +# Except for the following: +!**/*.toml +!**/*.rs +!**/*.h +!**/*.hpp +!qns +!Cargo.lock diff --git a/qns/Dockerfile b/qns/Dockerfile index 051cf5b8a5..f3a45c23a5 100644 --- a/qns/Dockerfile +++ b/qns/Dockerfile @@ -1,9 +1,9 @@ FROM martenseemann/quic-network-simulator-endpoint:latest AS buildimage RUN apt-get update && apt-get install -y --no-install-recommends \ - ca-certificates coreutils curl git make mercurial ssh \ - build-essential clang llvm libclang-dev lld \ - gyp ninja-build pkg-config zlib1g-dev python \ + curl git mercurial \ + build-essential libclang-dev lld \ + gyp ninja-build zlib1g-dev python \ && apt-get autoremove -y && apt-get clean -y \ && rm -rf /var/lib/apt/lists/* @@ -13,20 +13,15 @@ ENV RUSTUP_HOME=/usr/local/rustup \ CARGO_HOME=/usr/local/cargo \ PATH=/usr/local/cargo/bin:$PATH -RUN set -eux; \ - curl -sSLf "https://static.rust-lang.org/rustup/archive/1.26.0/x86_64-unknown-linux-gnu/rustup-init" -o rustup-init; \ - echo '0b2f6c8f85a3d02fde2efc0ced4657869d73fccfce59defb4e8d29233116e6db *rustup-init' | sha256sum -c -; \ - chmod +x rustup-init; \ - ./rustup-init -y -q --no-modify-path --profile minimal --default-toolchain "$RUST_VERSION"; \ - rm -f rustup-init; \ - chmod -R a+w "$RUSTUP_HOME" "$CARGO_HOME" +RUN curl https://sh.rustup.rs -sSf | \ + sh -s -- -y -q --no-modify-path --profile minimal --default-toolchain $RUST_VERSION ENV NSS_DIR=/nss \ NSPR_DIR=/nspr \ LD_LIBRARY_PATH=/dist/Release/lib RUN set -eux; \ - hg clone https://hg.mozilla.org/projects/nss "$NSS_DIR"; \ + git clone --depth=1 https://github.com/nss-dev/nss "$NSS_DIR"; \ hg clone https://hg.mozilla.org/projects/nspr "$NSPR_DIR" RUN "$NSS_DIR"/build.sh --static -Ddisable_tests=1 -o From 2bbbfed6546f53693590432492bb8953fea95898 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 30 Jan 2024 09:38:50 +0200 Subject: [PATCH 12/41] fix: Don't send superfluous PING-only Initial packets during handshake (#1598) * fix: Don't send superfluous PING-only Initial packets during handshake This limits the Initial packet number space to sending one packet when a PTO fires (other packet number spaces will continue to send two.) This stops PING-only Initial packets during handshake. Some tests based in the assumption that those PINGs would be sent. Fix those, too. I'd appreciate if someone could esp. double-check the test modifications, esp. to the `idle_caching` test, which is gnarly. Fixes #744 * Rework logic Only limits the PTO packet count to one if we have not received any packet from the peer yet. --- .../src/connection/tests/recovery.rs | 11 +++-- neqo-transport/src/recovery.rs | 41 ++++++++++++++----- neqo-transport/tests/retry.rs | 6 +-- 3 files changed, 38 insertions(+), 20 deletions(-) diff --git a/neqo-transport/src/connection/tests/recovery.rs b/neqo-transport/src/connection/tests/recovery.rs index 073f1ca156..421dded3d4 100644 --- a/neqo-transport/src/connection/tests/recovery.rs +++ b/neqo-transport/src/connection/tests/recovery.rs @@ -13,7 +13,9 @@ use super::{ use crate::{ cc::CWND_MIN, path::PATH_MTU_V6, - recovery::{FAST_PTO_SCALE, MAX_OUTSTANDING_UNACK, MIN_OUTSTANDING_UNACK, PTO_PACKET_COUNT}, + recovery::{ + FAST_PTO_SCALE, MAX_OUTSTANDING_UNACK, MAX_PTO_PACKET_COUNT, MIN_OUTSTANDING_UNACK, + }, rtt::GRANULARITY, stats::MAX_PTO_COUNTS, tparams::TransportParameter, @@ -173,10 +175,6 @@ fn pto_initial() { assert!(pkt2.is_some()); assert_eq!(pkt2.unwrap().len(), PATH_MTU_V6); - let pkt3 = client.process(None, now).dgram(); - assert!(pkt3.is_some()); - assert_eq!(pkt3.unwrap().len(), PATH_MTU_V6); - let delay = client.process(None, now).callback(); // PTO has doubled. assert_eq!(delay, INITIAL_PTO * 2); @@ -468,7 +466,8 @@ fn ack_after_pto() { // Jump forward to the PTO and drain the PTO packets. now += AT_LEAST_PTO; - for _ in 0..PTO_PACKET_COUNT { + // We can use MAX_PTO_PACKET_COUNT, because we know the handshake is over. + for _ in 0..MAX_PTO_PACKET_COUNT { let dgram = client.process(None, now).dgram(); assert!(dgram.is_some()); } diff --git a/neqo-transport/src/recovery.rs b/neqo-transport/src/recovery.rs index 23c296949d..8318c66c06 100644 --- a/neqo-transport/src/recovery.rs +++ b/neqo-transport/src/recovery.rs @@ -40,9 +40,9 @@ pub(crate) const PACKET_THRESHOLD: u64 = 3; /// `ACK_ONLY_SIZE_LIMIT` is the minimum size of the congestion window. /// If the congestion window is this small, we will only send ACK frames. pub(crate) const ACK_ONLY_SIZE_LIMIT: usize = 256; -/// The number of packets we send on a PTO. -/// And the number to declare lost when the PTO timer is hit. -pub const PTO_PACKET_COUNT: usize = 2; +/// The maximum number of packets we send on a PTO. +/// And the maximum number to declare lost when the PTO timer is hit. +pub const MAX_PTO_PACKET_COUNT: usize = 2; /// The preferred limit on the number of packets that are tracked. /// If we exceed this number, we start sending `PING` frames sooner to /// force the peer to acknowledge some of them. @@ -520,21 +520,34 @@ struct PtoState { } impl PtoState { - pub fn new(space: PacketNumberSpace, probe: PacketNumberSpaceSet) -> Self { + /// The number of packets we send on a PTO. + /// And the number to declare lost when the PTO timer is hit. + fn pto_packet_count(space: PacketNumberSpace, rx_count: usize) -> usize { + if space == PacketNumberSpace::Initial && rx_count == 0 { + // For the Initial space, we only send one packet on PTO if we have not received any packets + // from the peer yet. This avoids sending useless PING-only packets when the Client Initial + // is deemed lost. + 1 + } else { + MAX_PTO_PACKET_COUNT + } + } + + pub fn new(space: PacketNumberSpace, probe: PacketNumberSpaceSet, rx_count: usize) -> Self { debug_assert!(probe[space]); Self { space, count: 1, - packets: PTO_PACKET_COUNT, + packets: Self::pto_packet_count(space, rx_count), probe, } } - pub fn pto(&mut self, space: PacketNumberSpace, probe: PacketNumberSpaceSet) { + pub fn pto(&mut self, space: PacketNumberSpace, probe: PacketNumberSpaceSet, rx_count: usize) { debug_assert!(probe[space]); self.space = space; self.count += 1; - self.packets = PTO_PACKET_COUNT; + self.packets = Self::pto_packet_count(space, rx_count); self.probe = probe; } @@ -877,10 +890,11 @@ impl LossRecovery { } fn fire_pto(&mut self, pn_space: PacketNumberSpace, allow_probes: PacketNumberSpaceSet) { + let rx_count = self.stats.borrow().packets_rx; if let Some(st) = &mut self.pto_state { - st.pto(pn_space, allow_probes); + st.pto(pn_space, allow_probes, rx_count); } else { - self.pto_state = Some(PtoState::new(pn_space, allow_probes)); + self.pto_state = Some(PtoState::new(pn_space, allow_probes, rx_count)); } self.pto_state @@ -910,7 +924,14 @@ impl LossRecovery { if t <= now { qdebug!([self], "PTO timer fired for {}", pn_space); let space = self.spaces.get_mut(*pn_space).unwrap(); - lost.extend(space.pto_packets(PTO_PACKET_COUNT).cloned()); + lost.extend( + space + .pto_packets(PtoState::pto_packet_count( + *pn_space, + self.stats.borrow().packets_rx, + )) + .cloned(), + ); pto_space = pto_space.or(Some(*pn_space)); } diff --git a/neqo-transport/tests/retry.rs b/neqo-transport/tests/retry.rs index 3fffcba3da..eb20b8144a 100644 --- a/neqo-transport/tests/retry.rs +++ b/neqo-transport/tests/retry.rs @@ -326,10 +326,8 @@ fn retry_after_pto() { // Let PTO fire on the client and then let it exhaust its PTO packets. now += Duration::from_secs(1); - let pto1 = client.process(None, now).dgram(); - assert!(pto1.unwrap().len() >= 1200); - let pto2 = client.process(None, now).dgram(); - assert!(pto2.unwrap().len() >= 1200); + let pto = client.process(None, now).dgram(); + assert!(pto.unwrap().len() >= 1200); let cb = client.process(None, now).callback(); assert_ne!(cb, Duration::new(0, 0)); From 9192b69ba10d7deb69445b2b6f64efb9196c4aed Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Tue, 30 Jan 2024 19:05:59 +1100 Subject: [PATCH 13/41] Don't use lld on windows (#1600) * Don't use lld on windows * Remove merge markers * Maybe without the quoting * Install bench * Trial and error, mostly error * OK, try this now * Try lld.exe --------- Co-authored-by: Lars Eggert --- .github/workflows/check.yml | 7 ++++++- neqo-transport/Cargo.toml | 1 + 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 4df5f3c2a1..7ec34331e0 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -9,7 +9,6 @@ on: env: CARGO_TERM_COLOR: always RUST_BACKTRACE: 1 - RUSTFLAGS: -C link-arg=-fuse-ld=lld jobs: check: @@ -38,6 +37,7 @@ jobs: run: | sudo apt-get install -y --no-install-recommends gyp mercurial ninja-build lld curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash + echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" # In addition to installing dependencies, first make sure System Integrity Protection (SIP) # is disabled on this MacOS runner. This is needed to allow the NSS libraries to be loaded @@ -56,6 +56,7 @@ jobs: python3 -m pip install git+https://github.com/nodejs/gyp-next python3 -m pip install packaging echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" + echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld" >> "$GITHUB_ENV" - name: Install dependencies (Windows) if: runner.os == 'Windows' @@ -70,6 +71,7 @@ jobs: /c/msys64/usr/bin/pacman -S --noconfirm nsinstall lld python3 -m pip install git+https://github.com/nodejs/gyp-next echo "$(python3 -m site --user-base)/bin" >> "$GITHUB_PATH" + echo "RUSTFLAGS=-C link-arg=-fuse-ld=lld.exe" >> "$GITHUB_ENV" - name: Set up MSVC build environment (Windows) if: runner.os == 'Windows' @@ -114,6 +116,9 @@ jobs: - name: Run tests and determine coverage run: cargo +${{ matrix.rust-toolchain }} llvm-cov nextest --features ci --all-targets --no-fail-fast --lcov --output-path lcov.info + - name: Benches should at least build + run: cargo +${{ matrix.rust-toolchain }} build --features bench --benches + - name: Run client/server transfer run: | cargo +${{ matrix.rust-toolchain }} build --bin neqo-client --bin neqo-server diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 3263991be9..8b6130197f 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -19,5 +19,6 @@ smallvec = "1.11.1" test-fixture = { path = "../test-fixture" } [features] +bench = [] deny-warnings = [] fuzzing = ["neqo-crypto/fuzzing"] From 8b507d17312da4bae72b69c0d8844f1d3de629c0 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Wed, 31 Jan 2024 19:45:31 +1100 Subject: [PATCH 14/41] Cap the PTO count when working out the period (#1605) The previous code would overflow and produce a 0ns timeout when the PTO count got large (well over 24). This is because the `fast_pto` value tends to have trailing zero bits and shift values between 32-(number of zeros) and 31 (inclusive) would still shift left. This code instead stops increasing the PTO once it hits 24. That should be enough for most cases. Closes #1602. --- neqo-transport/src/recovery.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/neqo-transport/src/recovery.rs b/neqo-transport/src/recovery.rs index 8318c66c06..3e4e9134bb 100644 --- a/neqo-transport/src/recovery.rs +++ b/neqo-transport/src/recovery.rs @@ -848,11 +848,7 @@ impl LossRecovery { // where F = fast_pto / FAST_PTO_SCALE (== 1 by default) let pto_count = pto_state.map_or(0, |p| u32::try_from(p.count).unwrap_or(0)); rtt.pto(pn_space) - .checked_mul( - u32::from(fast_pto) - .checked_shl(pto_count) - .unwrap_or(u32::MAX), - ) + .checked_mul(u32::from(fast_pto) << min(pto_count, u32::BITS - u8::BITS)) .map_or(Duration::from_secs(3600), |p| p / u32::from(FAST_PTO_SCALE)) } From e43b9a72bde926649e2e4377b186ce5653bb2069 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 31 Jan 2024 12:12:19 +0200 Subject: [PATCH 15/41] ci: Clone NSS from hg and not the GitHub mirror (#1606) * ci: Clone NSS from hg and not the GitHub mirror Because the latter can be outdated. This is why #1081 is failing CI. * Also make this change in the qns Dockerfile --- .github/workflows/check.yml | 3 ++- qns/Dockerfile | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 7ec34331e0..ec541066bb 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -87,10 +87,11 @@ jobs: # version of NSS. Ubuntu 20.04 only has 3.49, which is far too old. # (neqo-crypto/build.rs would also need to query pkg-config to get the # right build flags rather than building NSS.) + # Clone from the main hg repo, because the GitHub mirror can be out of date. - name: Fetch NSS and NSPR run: | hg clone https://hg.mozilla.org/projects/nspr "$NSPR_DIR" - git clone --depth=1 https://github.com/nss-dev/nss "$NSS_DIR" + hg clone https://hg.mozilla.org/projects/nss "$NSS_DIR" echo "NSS_DIR=$NSS_DIR" >> "$GITHUB_ENV" echo "NSPR_DIR=$NSPR_DIR" >> "$GITHUB_ENV" env: diff --git a/qns/Dockerfile b/qns/Dockerfile index f3a45c23a5..ff9cc7c8f9 100644 --- a/qns/Dockerfile +++ b/qns/Dockerfile @@ -21,7 +21,7 @@ ENV NSS_DIR=/nss \ LD_LIBRARY_PATH=/dist/Release/lib RUN set -eux; \ - git clone --depth=1 https://github.com/nss-dev/nss "$NSS_DIR"; \ + hg clone https://hg.mozilla.org/projects/nss "$NSS_DIR"; \ hg clone https://hg.mozilla.org/projects/nspr "$NSPR_DIR" RUN "$NSS_DIR"/build.sh --static -Ddisable_tests=1 -o From fbc5d62697c968dec968941d4c495f4fa6869c9d Mon Sep 17 00:00:00 2001 From: John Schanck Date: Wed, 31 Jan 2024 02:52:11 -0800 Subject: [PATCH 16/41] Add key exchange group configuration knobs (#1599) * Add key exchange group configuration knobs * Change default key exchange group configuration * Add key exchange group config tests * Set minimum NSS version to 3.97 --------- Co-authored-by: Lars Eggert --- neqo-crypto/bindings/bindings.toml | 1 + neqo-crypto/src/agent.rs | 10 +++++ neqo-crypto/src/constants.rs | 1 + neqo-crypto/src/lib.rs | 2 +- neqo-crypto/tests/agent.rs | 65 +++++++++++++++++++++++++++- neqo-transport/src/connection/mod.rs | 22 +++++++++- neqo-transport/src/crypto.rs | 8 ++++ 7 files changed, 106 insertions(+), 3 deletions(-) diff --git a/neqo-crypto/bindings/bindings.toml b/neqo-crypto/bindings/bindings.toml index 7c35a0a224..3e5c1fdf7d 100644 --- a/neqo-crypto/bindings/bindings.toml +++ b/neqo-crypto/bindings/bindings.toml @@ -49,6 +49,7 @@ functions = [ "SSL_PeerSignedCertTimestamps", "SSL_PeerStapledOCSPResponses", "SSL_ResetHandshake", + "SSL_SendAdditionalKeyShares", "SSL_SetNextProtoNego", "SSL_SetURL", "SSL_VersionRangeSet", diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index 3612fec7e3..3868c525bc 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -461,6 +461,16 @@ impl SecretAgent { }) } + /// Set the number of additional key shares that will be sent in the client hello + /// + /// # Errors + /// If the underlying API fails (which shouldn't happen). + pub fn send_additional_key_shares(&mut self, count: usize) -> Res<()> { + secstatus_to_res(unsafe { + ssl::SSL_SendAdditionalKeyShares(self.fd, c_uint::try_from(count)?) + }) + } + /// Set TLS options. /// /// # Errors diff --git a/neqo-crypto/src/constants.rs b/neqo-crypto/src/constants.rs index 21e1a5aceb..76db972290 100644 --- a/neqo-crypto/src/constants.rs +++ b/neqo-crypto/src/constants.rs @@ -62,6 +62,7 @@ remap_enum! { TLS_GRP_EC_SECP384R1 = ssl_grp_ec_secp384r1, TLS_GRP_EC_SECP521R1 = ssl_grp_ec_secp521r1, TLS_GRP_EC_X25519 = ssl_grp_ec_curve25519, + TLS_GRP_KEM_XYBER768D00 = ssl_grp_kem_xyber768d00, } } diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 332e58a033..2533c727e7 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -74,7 +74,7 @@ use std::{ ptr::null, }; -const MINIMUM_NSS_VERSION: &str = "3.74"; +const MINIMUM_NSS_VERSION: &str = "3.97"; #[allow(non_upper_case_globals, clippy::redundant_static_lifetimes)] #[allow(clippy::upper_case_acronyms)] diff --git a/neqo-crypto/tests/agent.rs b/neqo-crypto/tests/agent.rs index 82e105fd1a..27017f0a4e 100644 --- a/neqo-crypto/tests/agent.rs +++ b/neqo-crypto/tests/agent.rs @@ -4,7 +4,7 @@ use neqo_crypto::{ generate_ech_keys, AuthenticationStatus, Client, Error, HandshakeState, SecretAgentPreInfo, Server, ZeroRttCheckResult, ZeroRttChecker, TLS_AES_128_GCM_SHA256, - TLS_CHACHA20_POLY1305_SHA256, TLS_GRP_EC_SECP256R1, TLS_VERSION_1_3, + TLS_CHACHA20_POLY1305_SHA256, TLS_GRP_EC_SECP256R1, TLS_GRP_EC_X25519, TLS_VERSION_1_3, }; use std::boxed::Box; @@ -155,6 +155,48 @@ fn chacha_client() { ); } +#[test] +fn server_prefers_first_client_share() { + fixture_init(); + let mut client = Client::new("server.example", true).expect("should create client"); + let mut server = Server::new(&["key"]).expect("should create server"); + server + .set_groups(&[TLS_GRP_EC_X25519, TLS_GRP_EC_SECP256R1]) + .expect("groups set"); + client + .set_groups(&[TLS_GRP_EC_X25519, TLS_GRP_EC_SECP256R1]) + .expect("groups set"); + client + .send_additional_key_shares(1) + .expect("should set additional key share count"); + + connect(&mut client, &mut server); + + assert_eq!(client.info().unwrap().key_exchange(), TLS_GRP_EC_X25519); + assert_eq!(server.info().unwrap().key_exchange(), TLS_GRP_EC_X25519); +} + +#[test] +fn server_prefers_second_client_share() { + fixture_init(); + let mut client = Client::new("server.example", true).expect("should create client"); + let mut server = Server::new(&["key"]).expect("should create server"); + server + .set_groups(&[TLS_GRP_EC_SECP256R1, TLS_GRP_EC_X25519]) + .expect("groups set"); + client + .set_groups(&[TLS_GRP_EC_X25519, TLS_GRP_EC_SECP256R1]) + .expect("groups set"); + client + .send_additional_key_shares(1) + .expect("should set additional key share count"); + + connect(&mut client, &mut server); + + assert_eq!(client.info().unwrap().key_exchange(), TLS_GRP_EC_SECP256R1); + assert_eq!(server.info().unwrap().key_exchange(), TLS_GRP_EC_SECP256R1); +} + #[test] fn p256_server() { fixture_init(); @@ -170,6 +212,27 @@ fn p256_server() { assert_eq!(server.info().unwrap().key_exchange(), TLS_GRP_EC_SECP256R1); } +#[test] +fn p256_server_hrr() { + fixture_init(); + let mut client = Client::new("server.example", true).expect("should create client"); + let mut server = Server::new(&["key"]).expect("should create server"); + server + .set_groups(&[TLS_GRP_EC_SECP256R1]) + .expect("groups set"); + client + .set_groups(&[TLS_GRP_EC_X25519, TLS_GRP_EC_SECP256R1]) + .expect("groups set"); + client + .send_additional_key_shares(0) + .expect("should set additional key share count"); + + connect(&mut client, &mut server); + + assert_eq!(client.info().unwrap().key_exchange(), TLS_GRP_EC_SECP256R1); + assert_eq!(server.info().unwrap().key_exchange(), TLS_GRP_EC_SECP256R1); +} + #[test] fn alpn() { fixture_init(); diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 49e4ec43aa..7e8c1d4737 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -42,7 +42,7 @@ use neqo_common::{ qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, Role, }; use neqo_crypto::{ - agent::CertificateInfo, random, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, + agent::CertificateInfo, random, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, Group, HandshakeState, PrivateKey, PublicKey, ResumptionToken, SecretAgentInfo, SecretAgentPreInfo, Server, ZeroRttChecker, }; @@ -545,6 +545,26 @@ impl Connection { Ok(()) } + /// Enable a set of key exchange groups. + pub fn set_groups(&mut self, groups: &[Group]) -> Res<()> { + if self.state != State::Init { + qerror!([self], "Cannot enable groups in state {:?}", self.state); + return Err(Error::ConnectionState); + } + self.crypto.tls.set_groups(groups)?; + Ok(()) + } + + /// Set the number of additional key shares to send in the client hello. + pub fn send_additional_key_shares(&mut self, count: usize) -> Res<()> { + if self.state != State::Init { + qerror!([self], "Cannot enable groups in state {:?}", self.state); + return Err(Error::ConnectionState); + } + self.crypto.tls.send_additional_key_shares(count)?; + Ok(()) + } + fn make_resumption_token(&mut self) -> ResumptionToken { debug_assert_eq!(self.role, Role::Client); debug_assert!(self.crypto.has_resumption_token()); diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index 803c049de5..898eb44372 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -22,6 +22,7 @@ use neqo_crypto::{ PrivateKey, PublicKey, Record, RecordList, ResumptionToken, SymKey, ZeroRttChecker, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, TLS_CT_HANDSHAKE, TLS_EPOCH_APPLICATION_DATA, TLS_EPOCH_HANDSHAKE, TLS_EPOCH_INITIAL, TLS_EPOCH_ZERO_RTT, + TLS_GRP_EC_SECP256R1, TLS_GRP_EC_SECP384R1, TLS_GRP_EC_SECP521R1, TLS_GRP_EC_X25519, TLS_VERSION_1_3, }; @@ -78,6 +79,13 @@ impl Crypto { TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, ])?; + agent.set_groups(&[ + TLS_GRP_EC_X25519, + TLS_GRP_EC_SECP256R1, + TLS_GRP_EC_SECP384R1, + TLS_GRP_EC_SECP521R1, + ])?; + agent.send_additional_key_shares(1)?; agent.set_alpn(&protocols)?; agent.disable_end_of_early_data()?; // Always enable 0-RTT on the client, but the server needs From 5d54da7e22f187e4d60daad486830f9d25bb4d83 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Wed, 31 Jan 2024 23:51:15 +0200 Subject: [PATCH 17/41] Do proper congestion control before the first ACK has come in (#1603) * Properly adjust CC state when PTOs fire before ACKs are received * Fix test * Fix test * Fix another test * Progress on the tests * Fix another test * Fix more tests * Simplify assert_idle * Try and fix sim. Doesn't work yet. * Undo * Fix * Update neqo-transport/src/recovery.rs Co-authored-by: Martin Thomson * Make RTT in `idle_timeout_crazy_rtt` less crazy, so #1605 works * Address code review comments * Don't need to look at `tls_info`. --------- Co-authored-by: Martin Thomson --- neqo-common/src/lib.rs | 4 +- neqo-transport/Cargo.toml | 1 + neqo-transport/src/connection/tests/cc.rs | 6 +- .../src/connection/tests/handshake.rs | 3 +- neqo-transport/src/connection/tests/mod.rs | 92 +++++++++++-------- .../src/connection/tests/recovery.rs | 38 ++++---- neqo-transport/src/path.rs | 5 +- neqo-transport/src/recovery.rs | 12 ++- neqo-transport/src/stats.rs | 2 + neqo-transport/tests/network.rs | 4 +- 10 files changed, 96 insertions(+), 71 deletions(-) diff --git a/neqo-common/src/lib.rs b/neqo-common/src/lib.rs index 202f39e0fb..d31f47c664 100644 --- a/neqo-common/src/lib.rs +++ b/neqo-common/src/lib.rs @@ -28,6 +28,8 @@ pub use self::tos::{IpTos, IpTosDscp, IpTosEcn}; use std::fmt::Write; +use enum_map::Enum; + #[must_use] pub fn hex(buf: impl AsRef<[u8]>) -> String { let mut ret = String::with_capacity(buf.as_ref().len() * 2); @@ -77,7 +79,7 @@ pub const fn const_min(a: usize, b: usize) -> usize { [a, b][(a >= b) as usize] } -#[derive(Debug, PartialEq, Eq, Copy, Clone)] +#[derive(Debug, PartialEq, Eq, Copy, Clone, Enum)] /// Client or Server. pub enum Role { Client, diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 8b6130197f..ac2bdaa85d 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -16,6 +16,7 @@ qlog = "0.11.0" smallvec = "1.11.1" [dev-dependencies] +enum-map = "2.7" test-fixture = { path = "../test-fixture" } [features] diff --git a/neqo-transport/src/connection/tests/cc.rs b/neqo-transport/src/connection/tests/cc.rs index f974fd94a0..6c70e424ea 100644 --- a/neqo-transport/src/connection/tests/cc.rs +++ b/neqo-transport/src/connection/tests/cc.rs @@ -7,8 +7,8 @@ use super::super::Output; use super::{ ack_bytes, assert_full_cwnd, connect_rtt_idle, cwnd, cwnd_avail, cwnd_packets, default_client, - default_server, fill_cwnd, induce_persistent_congestion, send_something, DEFAULT_RTT, - FORCE_IDLE_CLIENT_1RTT_PACKETS, POST_HANDSHAKE_CWND, + default_server, fill_cwnd, induce_persistent_congestion, send_something, + CLIENT_HANDSHAKE_1RTT_PACKETS, DEFAULT_RTT, POST_HANDSHAKE_CWND, }; use crate::cc::MAX_DATAGRAM_SIZE; use crate::packet::PacketNumber; @@ -54,7 +54,7 @@ fn cc_slow_start_to_cong_avoidance_recovery_period() { // We have already sent packets in `connect_rtt_idle`, // so include a fudge factor. let flight1_largest = - PacketNumber::try_from(c_tx_dgrams.len() + FORCE_IDLE_CLIENT_1RTT_PACKETS).unwrap(); + PacketNumber::try_from(c_tx_dgrams.len() + CLIENT_HANDSHAKE_1RTT_PACKETS).unwrap(); // Server: Receive and generate ack now += DEFAULT_RTT / 2; diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index 55cd10b667..33aff5d528 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -30,6 +30,7 @@ use std::mem; use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::rc::Rc; use std::time::Duration; +use test_fixture::assertions::assert_coalesced_0rtt; use test_fixture::{self, addr, assertions, datagram, fixture_init, now, split_datagram}; const ECH_CONFIG_ID: u8 = 7; @@ -380,10 +381,10 @@ fn reorder_05rtt_with_0rtt() { // Now PTO at the client and cause the server to re-send handshake packets. now += AT_LEAST_PTO; let c3 = client.process(None, now).dgram(); + assert_coalesced_0rtt(c3.as_ref().unwrap()); now += RTT / 2; let s3 = server.process(c3.as_ref(), now).dgram().unwrap(); - assertions::assert_no_1rtt(&s3[..]); // The client should be able to process the 0.5 RTT now. // This should contain an ACK, so we are processing an ACK from the past. diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index b722feff78..ab520c3198 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -12,6 +12,8 @@ use crate::{ cc::{CWND_INITIAL_PKTS, CWND_MIN}, cid::ConnectionIdRef, events::ConnectionEvent, + frame::FRAME_TYPE_PING, + packet::PacketBuilder, path::PATH_MTU_V6, recovery::ACK_ONLY_SIZE_LIMIT, stats::{FrameStats, Stats, MAX_PTO_COUNTS}, @@ -32,6 +34,8 @@ use neqo_common::{event::Provider, qdebug, qtrace, Datagram, Decoder, Role}; use neqo_crypto::{random, AllowZeroRtt, AuthenticationStatus, ResumptionToken}; use test_fixture::{self, addr, fixture_init, new_neqo_qlog, now}; +use enum_map::enum_map; + // All the tests. mod ackrate; mod cc; @@ -53,7 +57,7 @@ const DEFAULT_RTT: Duration = Duration::from_millis(100); const AT_LEAST_PTO: Duration = Duration::from_secs(1); const DEFAULT_STREAM_DATA: &[u8] = b"message"; /// The number of 1-RTT packets sent in `force_idle` by a client. -const FORCE_IDLE_CLIENT_1RTT_PACKETS: usize = 3; +const CLIENT_HANDSHAKE_1RTT_PACKETS: usize = 1; /// WARNING! In this module, this version of the generator needs to be used. /// This copies the implementation from @@ -151,6 +155,25 @@ pub fn maybe_authenticate(conn: &mut Connection) -> bool { false } +/// Compute the RTT variance after `n` ACKs or other RTT updates. +pub fn rttvar_after_n_updates(n: usize, rtt: Duration) -> Duration { + assert!(n > 0); + let mut rttvar = rtt / 2; + for _ in 1..n { + rttvar = rttvar * 3 / 4; + } + rttvar +} + +/// This inserts a PING frame into packets. +struct PingWriter {} + +impl crate::connection::test_internal::FrameWriter for PingWriter { + fn write_frames(&mut self, builder: &mut PacketBuilder) { + builder.encode_varint(FRAME_TYPE_PING); + } +} + /// Drive the handshake between the client and server. fn handshake( client: &mut Connection, @@ -170,10 +193,28 @@ fn handshake( ) }; + let mut did_ping = enum_map! {_ => false}; while !is_done(a) { _ = maybe_authenticate(a); let had_input = input.is_some(); + // Insert a PING frame into the first application data packet an endpoint sends, + // in order to force the peer to ACK it. For the server, this is depending on the + // client's connection state, which is accessible during the tests. + // + // We're doing this to prevent packet loss from delaying ACKs, which would cause + // cwnd to shrink, and also to prevent the delayed ACK timer from being armed after + // the handshake, which is not something the tests are written to account for. + let should_ping = !did_ping[a.role()] + && (a.role() == Role::Client && *a.state() == State::Connected + || (a.role() == Role::Server && *b.state() == State::Connected)); + if should_ping { + a.test_frame_writer = Some(Box::new(PingWriter {})); + } let output = a.process(input.as_ref(), now).dgram(); + if should_ping { + a.test_frame_writer = None; + did_ping[a.role()] = true; + } assert!(had_input || output.is_some()); input = output; qtrace!("handshake: t += {:?}", rtt / 2); @@ -205,9 +246,9 @@ fn connect_with_rtt( ) -> Instant { fn check_rtt(stats: &Stats, rtt: Duration) { assert_eq!(stats.rtt, rtt); - // Confirmation takes 2 round trips, - // so rttvar is reduced by 1/4 (from rtt/2). - assert_eq!(stats.rttvar, rtt * 3 / 8); + // Validate that rttvar has been computed correctly based on the number of RTT updates. + let n = stats.frame_rx.ack + usize::from(stats.rtt_init_guess); + assert_eq!(stats.rttvar, rttvar_after_n_updates(n, rtt)); } let now = handshake(client, server, now, rtt); assert_eq!(*client.state(), State::Confirmed); @@ -247,51 +288,26 @@ fn exchange_ticket( get_tokens(client).pop().expect("should have token") } -/// Getting the client and server to reach an idle state is surprisingly hard. -/// The server sends `HANDSHAKE_DONE` at the end of the handshake, and the client -/// doesn't immediately acknowledge it. Reordering packets does the trick. -fn force_idle( - client: &mut Connection, - server: &mut Connection, - rtt: Duration, - mut now: Instant, -) -> Instant { - // The client has sent NEW_CONNECTION_ID, so ensure that the server generates - // an acknowledgment by sending some reordered packets. - qtrace!("force_idle: send reordered client packets"); - let c1 = send_something(client, now); - let c2 = send_something(client, now); - now += rtt / 2; - server.process_input(&c2, now); - server.process_input(&c1, now); - - // Now do the same for the server. (The ACK is in the first one.) - qtrace!("force_idle: send reordered server packets"); - let s1 = send_something(server, now); - let s2 = send_something(server, now); - now += rtt / 2; - // Delivering s2 first at the client causes it to want to ACK. - client.process_input(&s2, now); - // Delivering s1 should not have the client change its mind about the ACK. - let ack = client.process(Some(&s1), now); - assert!(ack.as_dgram_ref().is_some()); +/// The `handshake` method inserts PING frames into the first application data packets, +/// which forces each peer to ACK them. As a side effect, that causes both sides of the +/// connection to be idle aftwerwards. This method simply verifies that this is the case. +fn assert_idle(client: &mut Connection, server: &mut Connection, rtt: Duration, now: Instant) { let idle_timeout = min( client.conn_params.get_idle_timeout(), server.conn_params.get_idle_timeout(), ); - assert_eq!(client.process_output(now), Output::Callback(idle_timeout)); - now += rtt / 2; + // Client started its idle period half an RTT before now. assert_eq!( - server.process(ack.as_dgram_ref(), now), - Output::Callback(idle_timeout) + client.process_output(now), + Output::Callback(idle_timeout - rtt / 2) ); - now + assert_eq!(server.process_output(now), Output::Callback(idle_timeout)); } /// Connect with an RTT and then force both peers to be idle. fn connect_rtt_idle(client: &mut Connection, server: &mut Connection, rtt: Duration) -> Instant { let now = connect_with_rtt(client, server, now(), rtt); - let now = force_idle(client, server, rtt, now); + assert_idle(client, server, rtt, now); // Drain events from both as well. _ = client.events().count(); _ = server.events().count(); diff --git a/neqo-transport/src/connection/tests/recovery.rs b/neqo-transport/src/connection/tests/recovery.rs index 421dded3d4..87b2b37839 100644 --- a/neqo-transport/src/connection/tests/recovery.rs +++ b/neqo-transport/src/connection/tests/recovery.rs @@ -29,7 +29,10 @@ use std::{ mem, time::{Duration, Instant}, }; -use test_fixture::{self, now, split_datagram}; +use test_fixture::{ + assertions::{assert_handshake, assert_initial}, + now, split_datagram, +}; #[test] fn pto_works_basic() { @@ -210,14 +213,17 @@ fn pto_handshake_complete() { let mut server = default_server(); let pkt = client.process(None, now).dgram(); + assert_initial(pkt.as_ref().unwrap(), false); let cb = client.process(None, now).callback(); assert_eq!(cb, Duration::from_millis(300)); now += HALF_RTT; let pkt = server.process(pkt.as_ref(), now).dgram(); + assert_initial(pkt.as_ref().unwrap(), false); now += HALF_RTT; let pkt = client.process(pkt.as_ref(), now).dgram(); + assert_handshake(pkt.as_ref().unwrap()); let cb = client.process(None, now).callback(); // The client now has a single RTT estimate (20ms), so @@ -233,7 +239,7 @@ fn pto_handshake_complete() { qdebug!("---- client: SH..FIN -> FIN"); let pkt1 = client.process(None, now).dgram(); - assert!(pkt1.is_some()); + assert_handshake(pkt1.as_ref().unwrap()); assert_eq!(*client.state(), State::Connected); let cb = client.process(None, now).callback(); @@ -247,6 +253,7 @@ fn pto_handshake_complete() { qdebug!("---- client: PTO"); now += HALF_RTT * 6; let pkt2 = client.process(None, now).dgram(); + assert_handshake(pkt2.as_ref().unwrap()); pto_counts[0] = 1; assert_eq!(client.stats.borrow().pto_counts, pto_counts); @@ -257,7 +264,10 @@ fn pto_handshake_complete() { let stream_id = client.stream_create(StreamType::UniDi).unwrap(); client.stream_close_send(stream_id).unwrap(); let pkt3 = client.process(None, now).dgram(); + assert_handshake(pkt3.as_ref().unwrap()); let (pkt3_hs, pkt3_1rtt) = split_datagram(&pkt3.unwrap()); + assert_handshake(&pkt3_hs); + assert!(pkt3_1rtt.is_some()); // PTO has been doubled. let cb = client.process(None, now).callback(); @@ -283,16 +293,21 @@ fn pto_handshake_complete() { // Check that the other packets (pkt2, pkt3) are Handshake packets. // The server discarded the Handshake keys already, therefore they are dropped. // Note that these don't include 1-RTT packets, because 1-RTT isn't send on PTO. + let (pkt2_hs, pkt2_1rtt) = split_datagram(&pkt2.unwrap()); + assert_handshake(&pkt2_hs); + assert!(pkt2_1rtt.is_some()); let dropped_before1 = server.stats().dropped_rx; let server_frames = server.stats().frame_rx.all; - server.process_input(&pkt2.unwrap(), now); + server.process_input(&pkt2_hs, now); assert_eq!(1, server.stats().dropped_rx - dropped_before1); assert_eq!(server.stats().frame_rx.all, server_frames); + server.process_input(&pkt2_1rtt.unwrap(), now); + let server_frames2 = server.stats().frame_rx.all; let dropped_before2 = server.stats().dropped_rx; server.process_input(&pkt3_hs, now); assert_eq!(1, server.stats().dropped_rx - dropped_before2); - assert_eq!(server.stats().frame_rx.all, server_frames); + assert_eq!(server.stats().frame_rx.all, server_frames2); now += HALF_RTT; @@ -307,13 +322,6 @@ fn pto_handshake_complete() { now += cb; let out = client.process(None, now).dgram(); assert!(out.is_some()); - let cb = client.process(None, now).callback(); - // The handshake keys are discarded, but now we're back to the idle timeout. - // We don't send another PING because the handshake space is done and there - // is nothing to probe for. - - let idle_timeout = ConnectionParameters::default().get_idle_timeout(); - assert_eq!(cb, idle_timeout - expected_ack_delay); } /// Test that PTO in the Handshake space contains the right frames. @@ -616,14 +624,6 @@ fn loss_time_past_largest_acked() { let lr_time = client.process(None, now).callback(); assert_ne!(lr_time, Duration::from_secs(0)); assert!(lr_time < (RTT / 2)); - - // Skipping forward by the loss recovery timer should cause the client to - // mark packets as lost and retransmit, after which we should be on the PTO - // timer. - now += lr_time; - let delay = client.process(None, now).callback(); - assert_ne!(delay, Duration::from_secs(0)); - assert!(delay > lr_time); } /// `sender` sends a little, `receiver` acknowledges it. diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 4430bb2bdb..2ab90c169c 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -28,7 +28,7 @@ use crate::{ sender::PacketSender, stats::FrameStats, tracking::{PacketNumberSpace, SentPacket}, - Error, Res, + Error, Res, Stats, }; use neqo_common::{hex, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Encoder, IpTos}; @@ -946,7 +946,7 @@ impl Path { } /// Discard a packet that previously might have been in-flight. - pub fn discard_packet(&mut self, sent: &SentPacket, now: Instant) { + pub fn discard_packet(&mut self, sent: &SentPacket, now: Instant, stats: &mut Stats) { if self.rtt.first_sample_time().is_none() { // When discarding a packet there might not be a good RTT estimate. // But discards only occur after receiving something, so that means @@ -958,6 +958,7 @@ impl Path { "discarding a packet without an RTT estimate; guessing RTT={:?}", now - sent.time_sent ); + stats.rtt_init_guess = true; self.rtt.update( &mut self.qlog, now - sent.time_sent, diff --git a/neqo-transport/src/recovery.rs b/neqo-transport/src/recovery.rs index 3e4e9134bb..a640b75371 100644 --- a/neqo-transport/src/recovery.rs +++ b/neqo-transport/src/recovery.rs @@ -412,7 +412,7 @@ impl LossRecoverySpace { .sent_packets .iter_mut() // BTreeMap iterates in order of ascending PN - .take_while(|(&k, _)| Some(k) < largest_acked) + .take_while(|(&k, _)| k < largest_acked.unwrap_or(PacketNumber::MAX)) { // Packets sent before now - loss_delay are deemed lost. if packet.time_sent + loss_delay <= now { @@ -430,7 +430,9 @@ impl LossRecoverySpace { largest_acked ); } else { - self.first_ooo_time = Some(packet.time_sent); + if largest_acked.is_some() { + self.first_ooo_time = Some(packet.time_sent); + } // No more packets can be declared lost after this one. break; }; @@ -622,7 +624,7 @@ impl LossRecovery { .collect::>(); let mut path = primary_path.borrow_mut(); for p in &mut dropped { - path.discard_packet(p, now); + path.discard_packet(p, now, &mut self.stats.borrow_mut()); } dropped } @@ -762,7 +764,7 @@ impl LossRecovery { .collect::>(); let mut path = primary_path.borrow_mut(); for p in &mut dropped { - path.discard_packet(p, now); + path.discard_packet(p, now, &mut self.stats.borrow_mut()); } dropped } @@ -795,7 +797,7 @@ impl LossRecovery { qdebug!([self], "Reset loss recovery state for {}", space); let mut path = primary_path.borrow_mut(); for p in self.spaces.drop_space(space) { - path.discard_packet(&p, now); + path.discard_packet(&p, now, &mut self.stats.borrow_mut()); } // We just made progress, so discard PTO count. diff --git a/neqo-transport/src/stats.rs b/neqo-transport/src/stats.rs index 9428b61949..9e956eb02e 100644 --- a/neqo-transport/src/stats.rs +++ b/neqo-transport/src/stats.rs @@ -141,6 +141,8 @@ pub struct Stats { pub rtt: Duration, /// The current, estimated round-trip time variation on the primary path. pub rttvar: Duration, + /// Whether the first RTT sample was guessed from a discarded packet. + pub rtt_init_guess: bool, /// Count PTOs. Single PTOs, 2 PTOs in a row, 3 PTOs in row, etc. are counted /// separately. diff --git a/neqo-transport/tests/network.rs b/neqo-transport/tests/network.rs index 3f9d2240a0..e2389090a7 100644 --- a/neqo-transport/tests/network.rs +++ b/neqo-transport/tests/network.rs @@ -67,7 +67,7 @@ simulate!( ))) ] ), - Delay::new(weeks(150)..weeks(150)), + Delay::new(weeks(15)..weeks(15)), Drop::percentage(10), ConnectionNode::new_server( ConnectionParameters::default().idle_timeout(weeks(1000)), @@ -78,7 +78,7 @@ simulate!( ))) ] ), - Delay::new(weeks(100)..weeks(100)), + Delay::new(weeks(10)..weeks(10)), Drop::percentage(10), ], ); From d0ae17fd20b82af60e12d79842edeaf088689e48 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Wed, 31 Jan 2024 23:19:30 +0100 Subject: [PATCH 18/41] Merge pull request from GHSA-5m9j-vr32-g7j5 * fix(transport): bound ACK range count in ACK frame * Update encode_varint comments Co-authored-by: Martin Thomson * Move constant adjacent to code * Mention Ethernet MTU * Fix spacing --------- Co-authored-by: Martin Thomson --- neqo-transport/src/frame.rs | 31 ++++++++++++++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index 7eeba507bc..8081baef6c 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -387,6 +387,17 @@ impl<'a> Frame<'a> { } pub fn decode(dec: &mut Decoder<'a>) -> Res { + /// Maximum ACK Range Count in ACK Frame + /// + /// Given a max UDP datagram size of 64k bytes and a minimum ACK Range size of 2 + /// bytes (2 QUIC varints), a single datagram can at most contain 32k ACK + /// Ranges. + /// + /// Note that the maximum (jumbogram) Ethernet MTU of 9216 or on the + /// Internet the regular Ethernet MTU of 1518 are more realistically to + /// be the limiting factor. Though for simplicity the higher limit is chosen. + const MAX_ACK_RANGE_COUNT: u64 = 32 * 1024; + fn d(v: Option) -> Res { v.ok_or(Error::NoMoreData) } @@ -410,7 +421,13 @@ impl<'a> Frame<'a> { FRAME_TYPE_ACK | FRAME_TYPE_ACK_ECN => { let la = dv(dec)?; let ad = dv(dec)?; - let nr = dv(dec)?; + let nr = dv(dec).and_then(|nr| { + if nr < MAX_ACK_RANGE_COUNT { + Ok(nr) + } else { + Err(Error::TooMuchData) + } + })?; let fa = dv(dec)?; let mut arr: Vec = Vec::with_capacity(nr as usize); for _ in 0..nr { @@ -943,4 +960,16 @@ mod tests { }; just_dec(&f, "403103010203"); } + + #[test] + fn frame_decode_enforces_bound_on_ack_range() { + let mut e = Encoder::new(); + + e.encode_varint(FRAME_TYPE_ACK); + e.encode_varint(0u64); // largest acknowledged + e.encode_varint(0u64); // ACK delay + e.encode_varint(u32::MAX); // ACK range count = huge, but maybe available for allocation + + assert_eq!(Err(Error::TooMuchData), Frame::decode(&mut e.as_decoder())); + } } From f85f10049a59ee45f7185a06f31ac2b9d8bc5763 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Thu, 1 Feb 2024 09:10:13 +0100 Subject: [PATCH 19/41] neqo v0.6.9 (#1608) --- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-crypto/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-interop/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- test-fixture/Cargo.toml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index ca11186f95..fba2110d6d 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-client" -version = "0.6.8" +version = "0.6.9" authors = ["Martin Thomson ", "Dragana Damjanovic ", "Andy Grover "] diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index f6fd952a18..b7136aaa60 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-common" -version = "0.6.8" +version = "0.6.9" authors = ["Bobby Holley "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index c7cad21c87..c5909ac5e5 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-crypto" -version = "0.6.8" +version = "0.6.9" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 8dafbe8b40..9956cef05c 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-http3" -version = "0.6.8" +version = "0.6.9" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index a197aa2203..8b298167f2 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-interop" -version = "0.6.8" +version = "0.6.9" authors = ["EKR "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index c07a7fcec0..31a1bf28e6 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-qpack" -version = "0.6.8" +version = "0.6.9" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 888df43163..b3f8aae462 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-server" -version = "0.6.8" +version = "0.6.9" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index ac2bdaa85d..ae33822018 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-transport" -version = "0.6.8" +version = "0.6.9" authors = ["EKR ", "Andy Grover "] edition = "2018" rust-version = "1.70.0" diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index f142c9a2f4..2c163fbb07 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-fixture" -version = "0.6.8" +version = "0.6.9" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.70.0" From ca7d2271773a572730451112ff2cef54c19abb45 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 1 Feb 2024 14:49:37 +0200 Subject: [PATCH 20/41] chore: Add a `.rustfmt.toml` and reformat (#1607) * chore: Add a `.rustfmt.toml` and reformat Some of the settings in `.rustfmt.toml` require the nightly toolchain, i.e, you need to `cargo +nightly fmt` to have them be used. For VS Code, you can make is use the nightly `rustfmt` by default by adding ``` "rust-analyzer.rustfmt.extraArgs": [ "+nightly" ], ``` to your `settings.json` file. Fixes #1417 * Normalize comment headings * Spacing fixes --- .rustfmt.toml | 7 + neqo-client/src/main.rs | 41 ++- neqo-common/src/codec.rs | 22 ++ neqo-common/src/datagram.rs | 3 +- neqo-common/src/event.rs | 3 +- neqo-common/src/hrtime.rs | 3 +- neqo-common/src/incrdecoder.rs | 6 + neqo-common/src/lib.rs | 16 +- neqo-common/src/log.rs | 5 +- neqo-common/src/qlog.rs | 1 + neqo-common/src/timer.rs | 23 +- neqo-crypto/build.rs | 12 +- neqo-crypto/src/aead.rs | 19 +- neqo-crypto/src/aead_fuzzing.rs | 15 +- neqo-crypto/src/agent.rs | 67 ++-- neqo-crypto/src/agentio.rs | 29 +- neqo-crypto/src/cert.rs | 22 +- neqo-crypto/src/ech.rs | 21 +- neqo-crypto/src/err.rs | 14 +- neqo-crypto/src/ext.rs | 17 +- neqo-crypto/src/hkdf.rs | 16 +- neqo-crypto/src/hp.rs | 23 +- neqo-crypto/src/lib.rs | 30 +- neqo-crypto/src/p11.rs | 20 +- neqo-crypto/src/replay.rs | 12 +- neqo-crypto/src/secrets.rs | 6 +- neqo-crypto/src/selfencrypt.rs | 20 +- neqo-crypto/src/ssl.rs | 8 +- neqo-crypto/src/time.rs | 19 +- neqo-crypto/tests/aead.rs | 7 +- neqo-crypto/tests/agent.rs | 7 +- neqo-crypto/tests/ext.rs | 12 +- neqo-crypto/tests/handshake.rs | 4 +- neqo-crypto/tests/hkdf.rs | 10 +- neqo-crypto/tests/hp.rs | 3 +- neqo-crypto/tests/selfencrypt.rs | 8 +- neqo-http3/src/buffered_send_stream.rs | 7 +- neqo-http3/src/client_events.rs | 17 +- neqo-http3/src/conn_params.rs | 5 +- neqo-http3/src/connection.rs | 119 ++++--- neqo-http3/src/connection_client.rs | 304 ++++++++++++------ neqo-http3/src/connection_server.rs | 57 ++-- neqo-http3/src/control_stream_local.rs | 11 +- neqo-http3/src/control_stream_remote.rs | 10 +- .../src/features/extended_connect/mod.rs | 14 +- .../tests/webtransport/datagrams.rs | 14 +- .../tests/webtransport/mod.rs | 18 +- .../tests/webtransport/negotiation.rs | 12 +- .../tests/webtransport/sessions.rs | 24 +- .../tests/webtransport/streams.rs | 22 +- .../extended_connect/webtransport_session.rs | 26 +- .../extended_connect/webtransport_streams.rs | 9 +- neqo-http3/src/features/mod.rs | 15 +- neqo-http3/src/frames/hframe.rs | 7 +- neqo-http3/src/frames/reader.rs | 20 +- neqo-http3/src/frames/tests/hframe.rs | 7 +- neqo-http3/src/frames/tests/mod.rs | 10 +- neqo-http3/src/frames/tests/reader.rs | 11 +- neqo-http3/src/frames/wtframe.rs | 6 +- neqo-http3/src/headers_checks.rs | 15 +- neqo-http3/src/lib.rs | 48 ++- neqo-http3/src/priority.rs | 19 +- neqo-http3/src/push_controller.rs | 73 +++-- neqo-http3/src/qlog.rs | 5 +- neqo-http3/src/qpack_decoder_receiver.rs | 7 +- neqo-http3/src/qpack_encoder_receiver.rs | 7 +- neqo-http3/src/recv_message.rs | 25 +- neqo-http3/src/request_target.rs | 3 + neqo-http3/src/send_message.rs | 24 +- neqo-http3/src/server.rs | 46 +-- neqo-http3/src/server_connection_events.rs | 13 +- neqo-http3/src/server_events.rs | 56 +++- neqo-http3/src/settings.rs | 7 +- neqo-http3/src/stream_type_reader.rs | 63 ++-- neqo-http3/tests/httpconn.rs | 11 +- neqo-http3/tests/priority.rs | 5 +- neqo-http3/tests/webtransport.rs | 4 +- neqo-interop/src/main.rs | 19 +- neqo-qpack/src/decoder.rs | 48 ++- neqo-qpack/src/decoder_instructions.rs | 31 +- neqo-qpack/src/encoder.rs | 139 +++++--- neqo-qpack/src/encoder_instructions.rs | 32 +- neqo-qpack/src/header_block.rs | 12 +- neqo-qpack/src/huffman.rs | 17 +- neqo-qpack/src/huffman_decode_helper.rs | 6 +- neqo-qpack/src/lib.rs | 9 +- neqo-qpack/src/prefix.rs | 12 +- neqo-qpack/src/qlog.rs | 6 +- neqo-qpack/src/qpack_send_buf.rs | 8 +- neqo-qpack/src/reader.rs | 35 +- neqo-qpack/src/table.rs | 58 ++-- neqo-server/src/main.rs | 7 +- neqo-server/src/old_https.rs | 12 +- neqo-transport/src/ackrate.rs | 14 +- neqo-transport/src/addr_valid.rs | 24 +- neqo-transport/src/cc/classic_cc.rs | 15 +- neqo-transport/src/cc/cubic.rs | 22 +- neqo-transport/src/cc/mod.rs | 7 +- neqo-transport/src/cc/new_reno.rs | 6 +- neqo-transport/src/cc/tests/cubic.rs | 47 +-- neqo-transport/src/cc/tests/new_reno.rs | 11 +- neqo-transport/src/cid.rs | 26 +- neqo-transport/src/connection/idle.rs | 6 +- neqo-transport/src/connection/mod.rs | 91 ++++-- neqo-transport/src/connection/params.rs | 39 ++- neqo-transport/src/connection/saved.rs | 6 +- neqo-transport/src/connection/state.rs | 27 +- .../src/connection/tests/ackrate.rs | 7 +- neqo-transport/src/connection/tests/cc.rs | 28 +- neqo-transport/src/connection/tests/close.rs | 15 +- .../src/connection/tests/datagram.rs | 18 +- .../src/connection/tests/fuzzing.rs | 5 +- .../src/connection/tests/handshake.rs | 47 +-- neqo-transport/src/connection/tests/idle.rs | 26 +- neqo-transport/src/connection/tests/keys.rs | 23 +- .../src/connection/tests/migration.rs | 27 +- neqo-transport/src/connection/tests/mod.rs | 31 +- .../src/connection/tests/priority.rs | 9 +- .../src/connection/tests/recovery.rs | 23 +- .../src/connection/tests/resumption.rs | 16 +- neqo-transport/src/connection/tests/stream.rs | 10 +- neqo-transport/src/connection/tests/vn.rs | 24 +- .../src/connection/tests/zerortt.rs | 16 +- neqo-transport/src/crypto.rs | 5 +- neqo-transport/src/dump.rs | 13 +- neqo-transport/src/events.rs | 17 +- neqo-transport/src/fc.rs | 18 +- neqo-transport/src/frame.rs | 18 +- neqo-transport/src/lib.rs | 7 +- neqo-transport/src/pace.rs | 16 +- neqo-transport/src/packet/mod.rs | 32 +- neqo-transport/src/packet/retry.rs | 5 +- neqo-transport/src/path.rs | 11 +- neqo-transport/src/qlog.rs | 3 +- neqo-transport/src/quic_datagrams.rs | 19 +- neqo-transport/src/recovery.rs | 31 +- neqo-transport/src/recv_stream.rs | 11 +- neqo-transport/src/rtt.rs | 20 +- neqo-transport/src/send_stream.rs | 22 +- neqo-transport/src/sender.rs | 17 +- neqo-transport/src/server.rs | 37 +-- neqo-transport/src/stats.rs | 7 +- neqo-transport/src/stream_id.rs | 3 +- neqo-transport/src/streams.rs | 14 +- neqo-transport/src/tparams.rs | 24 +- neqo-transport/src/tracking.rs | 11 +- neqo-transport/src/version.rs | 6 +- neqo-transport/tests/common/mod.rs | 8 +- neqo-transport/tests/conn_vectors.rs | 5 +- neqo-transport/tests/connection.rs | 3 +- neqo-transport/tests/network.rs | 4 +- neqo-transport/tests/retry.rs | 11 +- neqo-transport/tests/server.rs | 7 +- neqo-transport/tests/sim/connection.rs | 14 +- neqo-transport/tests/sim/delay.rs | 16 +- neqo-transport/tests/sim/drop.rs | 10 +- neqo-transport/tests/sim/mod.rs | 20 +- neqo-transport/tests/sim/rng.rs | 4 +- neqo-transport/tests/sim/taildrop.rs | 16 +- test-fixture/src/assertions.rs | 28 +- test-fixture/src/lib.rs | 48 ++- 161 files changed, 2060 insertions(+), 1345 deletions(-) create mode 100644 .rustfmt.toml diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 0000000000..482732e6a5 --- /dev/null +++ b/.rustfmt.toml @@ -0,0 +1,7 @@ +comment_width=100 +wrap_comments=true + +imports_granularity="Crate" +group_imports="StdExternalCrate" + +format_code_in_doc_comments=true diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 1a038ddba3..3db90aac10 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -7,25 +7,6 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::use_self)] -use common::IpTos; -use qlog::{events::EventImportance, streamer::QlogStreamer}; - -use mio::{net::UdpSocket, Events, Poll, PollOpt, Ready, Token}; - -use neqo_common::{self as common, event::Provider, hex, qlog::NeqoQlog, Datagram, Role}; -use neqo_crypto::{ - constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, - init, AuthenticationStatus, Cipher, ResumptionToken, -}; -use neqo_http3::{ - self, Error, Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Output, - Priority, -}; -use neqo_transport::{ - CongestionControlAlgorithm, Connection, ConnectionId, ConnectionParameters, - EmptyConnectionIdGenerator, Error as TransportError, StreamId, StreamType, Version, -}; - use std::{ cell::RefCell, collections::{HashMap, VecDeque}, @@ -41,6 +22,22 @@ use std::{ time::{Duration, Instant}, }; +use common::IpTos; +use mio::{net::UdpSocket, Events, Poll, PollOpt, Ready, Token}; +use neqo_common::{self as common, event::Provider, hex, qlog::NeqoQlog, Datagram, Role}; +use neqo_crypto::{ + constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, + init, AuthenticationStatus, Cipher, ResumptionToken, +}; +use neqo_http3::{ + self, Error, Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Output, + Priority, +}; +use neqo_transport::{ + CongestionControlAlgorithm, Connection, ConnectionId, ConnectionParameters, + EmptyConnectionIdGenerator, Error as TransportError, StreamId, StreamType, Version, +}; +use qlog::{events::EventImportance, streamer::QlogStreamer}; use structopt::StructOpt; use url::{Origin, Url}; @@ -1140,9 +1137,6 @@ mod old { time::{Duration, Instant}, }; - use url::Url; - - use super::{qlog_new, KeyUpdateState, Res}; use mio::{Events, Poll}; use neqo_common::{event::Provider, Datagram, IpTos}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; @@ -1150,8 +1144,9 @@ mod old { Connection, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, State, StreamId, StreamType, }; + use url::Url; - use super::{emit_datagram, get_output_file, Args}; + use super::{emit_datagram, get_output_file, qlog_new, Args, KeyUpdateState, Res}; struct HandlerOld<'b> { streams: HashMap>, diff --git a/neqo-common/src/codec.rs b/neqo-common/src/codec.rs index 99ba9ec52a..57ff13f39f 100644 --- a/neqo-common/src/codec.rs +++ b/neqo-common/src/codec.rs @@ -34,7 +34,9 @@ impl<'a> Decoder<'a> { } /// Skip n bytes. + /// /// # Panics + /// /// If the remaining quantity is less than `n`. pub fn skip(&mut self, n: usize) { assert!(self.remaining() >= n, "insufficient data"); @@ -90,7 +92,9 @@ impl<'a> Decoder<'a> { } /// Decodes an unsigned integer of length 1..=8. + /// /// # Panics + /// /// This panics if `n` is not in the range `1..=8`. pub fn decode_uint(&mut self, n: usize) -> Option { assert!(n > 0 && n <= 8); @@ -198,7 +202,9 @@ pub struct Encoder { impl Encoder { /// Static helper function for previewing the results of encoding without doing it. + /// /// # Panics + /// /// When `v` is too large. #[must_use] pub const fn varint_len(v: u64) -> usize { @@ -212,7 +218,9 @@ impl Encoder { } /// Static helper to determine how long a varint-prefixed array encodes to. + /// /// # Panics + /// /// When `len` doesn't fit in a `u64`. #[must_use] pub fn vvec_len(len: usize) -> usize { @@ -261,7 +269,9 @@ impl Encoder { } /// Don't use this except in testing. + /// /// # Panics + /// /// When `s` contains non-hex values or an odd number of values. #[must_use] pub fn from_hex(s: impl AsRef) -> Self { @@ -291,7 +301,9 @@ impl Encoder { } /// Encode an integer of any size up to u64. + /// /// # Panics + /// /// When `n` is outside the range `1..=8`. #[allow(clippy::cast_possible_truncation)] pub fn encode_uint>(&mut self, n: usize, v: T) -> &mut Self { @@ -304,7 +316,9 @@ impl Encoder { } /// Encode a QUIC varint. + /// /// # Panics + /// /// When `v >= 1<<62`. pub fn encode_varint>(&mut self, v: T) -> &mut Self { let v = v.into(); @@ -319,7 +333,9 @@ impl Encoder { } /// Encode a vector in TLS style. + /// /// # Panics + /// /// When `v` is longer than 2^64. pub fn encode_vec(&mut self, n: usize, v: &[u8]) -> &mut Self { self.encode_uint(n, u64::try_from(v.as_ref().len()).unwrap()) @@ -327,7 +343,9 @@ impl Encoder { } /// Encode a vector in TLS style using a closure for the contents. + /// /// # Panics + /// /// When `f()` returns a length larger than `2^8n`. #[allow(clippy::cast_possible_truncation)] pub fn encode_vec_with(&mut self, n: usize, f: F) -> &mut Self { @@ -343,7 +361,9 @@ impl Encoder { } /// Encode a vector with a varint length. + /// /// # Panics + /// /// When `v` is longer than 2^64. pub fn encode_vvec(&mut self, v: &[u8]) -> &mut Self { self.encode_varint(u64::try_from(v.as_ref().len()).unwrap()) @@ -351,7 +371,9 @@ impl Encoder { } /// Encode a vector with a varint length using a closure. + /// /// # Panics + /// /// When `f()` writes more than 2^62 bytes. #[allow(clippy::cast_possible_truncation)] pub fn encode_vvec_with(&mut self, f: F) -> &mut Self { diff --git a/neqo-common/src/datagram.rs b/neqo-common/src/datagram.rs index cdd61753a3..1729c8ed8d 100644 --- a/neqo-common/src/datagram.rs +++ b/neqo-common/src/datagram.rs @@ -4,8 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::net::SocketAddr; -use std::ops::Deref; +use std::{net::SocketAddr, ops::Deref}; use crate::{hex_with_len, IpTos}; diff --git a/neqo-common/src/event.rs b/neqo-common/src/event.rs index 8598383e76..26052b7571 100644 --- a/neqo-common/src/event.rs +++ b/neqo-common/src/event.rs @@ -4,8 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::iter::Iterator; -use std::marker::PhantomData; +use std::{iter::Iterator, marker::PhantomData}; /// An event provider is able to generate a stream of events. pub trait Provider { diff --git a/neqo-common/src/hrtime.rs b/neqo-common/src/hrtime.rs index 1187e39a5a..62d2567d42 100644 --- a/neqo-common/src/hrtime.rs +++ b/neqo-common/src/hrtime.rs @@ -379,12 +379,13 @@ impl Drop for Time { not(all(any(target_os = "macos", target_os = "windows"), feature = "ci")) ))] mod test { - use super::Time; use std::{ thread::{sleep, spawn}, time::{Duration, Instant}, }; + use super::Time; + const ONE: Duration = Duration::from_millis(1); const ONE_AND_A_BIT: Duration = Duration::from_micros(1500); /// A limit for when high resolution timers are disabled. diff --git a/neqo-common/src/incrdecoder.rs b/neqo-common/src/incrdecoder.rs index e78a90f786..8468102cb6 100644 --- a/neqo-common/src/incrdecoder.rs +++ b/neqo-common/src/incrdecoder.rs @@ -21,7 +21,9 @@ impl IncrementalDecoderUint { } /// Consume some data. + /// /// # Panics + /// /// Never, but this is not something the compiler can tell. pub fn consume(&mut self, dv: &mut Decoder) -> Option { if let Some(r) = &mut self.remaining { @@ -87,7 +89,9 @@ impl IncrementalDecoderBuffer { } /// Consume some bytes from the decoder. + /// /// # Panics + /// /// Never; but rust doesn't know that. pub fn consume(&mut self, dv: &mut Decoder) -> Option> { let amount = min(self.remaining, dv.remaining()); @@ -109,7 +113,9 @@ pub struct IncrementalDecoderIgnore { impl IncrementalDecoderIgnore { /// Make a new ignoring decoder. + /// /// # Panics + /// /// If the amount to ignore is zero. #[must_use] pub fn new(n: usize) -> Self { diff --git a/neqo-common/src/lib.rs b/neqo-common/src/lib.rs index d31f47c664..853b05705b 100644 --- a/neqo-common/src/lib.rs +++ b/neqo-common/src/lib.rs @@ -18,18 +18,18 @@ pub mod qlog; pub mod timer; pub mod tos; -pub use self::codec::{Decoder, Encoder}; -pub use self::datagram::Datagram; -pub use self::header::Header; -pub use self::incrdecoder::{ - IncrementalDecoderBuffer, IncrementalDecoderIgnore, IncrementalDecoderUint, -}; -pub use self::tos::{IpTos, IpTosDscp, IpTosEcn}; - use std::fmt::Write; use enum_map::Enum; +pub use self::{ + codec::{Decoder, Encoder}, + datagram::Datagram, + header::Header, + incrdecoder::{IncrementalDecoderBuffer, IncrementalDecoderIgnore, IncrementalDecoderUint}, + tos::{IpTos, IpTosDscp, IpTosEcn}, +}; + #[must_use] pub fn hex(buf: impl AsRef<[u8]>) -> String { let mut ret = String::with_capacity(buf.as_ref().len() * 2); diff --git a/neqo-common/src/log.rs b/neqo-common/src/log.rs index e376765523..d9c30b98b1 100644 --- a/neqo-common/src/log.rs +++ b/neqo-common/src/log.rs @@ -6,11 +6,10 @@ #![allow(clippy::module_name_repetitions)] +use std::{io::Write, sync::Once, time::Instant}; + use env_logger::Builder; use lazy_static::lazy_static; -use std::io::Write; -use std::sync::Once; -use std::time::Instant; #[macro_export] macro_rules! do_log { diff --git a/neqo-common/src/qlog.rs b/neqo-common/src/qlog.rs index 5ff74750b0..3da8350990 100644 --- a/neqo-common/src/qlog.rs +++ b/neqo-common/src/qlog.rs @@ -31,6 +31,7 @@ pub struct NeqoQlogShared { impl NeqoQlog { /// Create an enabled `NeqoQlog` configuration. + /// /// # Errors /// /// Will return `qlog::Error` if cannot write to the new log. diff --git a/neqo-common/src/timer.rs b/neqo-common/src/timer.rs index 24cb0abdbc..e8532af442 100644 --- a/neqo-common/src/timer.rs +++ b/neqo-common/src/timer.rs @@ -4,9 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::convert::TryFrom; -use std::mem; -use std::time::{Duration, Instant}; +use std::{ + convert::TryFrom, + mem, + time::{Duration, Instant}, +}; /// Internal structure for a timer item. struct TimerItem { @@ -21,10 +23,10 @@ impl TimerItem { } /// A timer queue. -/// This uses a classic timer wheel arrangement, with some characteristics that might be considered peculiar. -/// Each slot in the wheel is sorted (complexity O(N) insertions, but O(logN) to find cut points). -/// Time is relative, the wheel has an origin time and it is unable to represent times that are more than -/// `granularity * capacity` past that time. +/// This uses a classic timer wheel arrangement, with some characteristics that might be considered +/// peculiar. Each slot in the wheel is sorted (complexity O(N) insertions, but O(logN) to find cut +/// points). Time is relative, the wheel has an origin time and it is unable to represent times that +/// are more than `granularity * capacity` past that time. pub struct Timer { items: Vec>>, now: Instant, @@ -34,7 +36,9 @@ pub struct Timer { impl Timer { /// Construct a new wheel at the given granularity, starting at the given time. + /// /// # Panics + /// /// When `capacity` is too large to fit in `u32` or `granularity` is zero. pub fn new(now: Instant, granularity: Duration, capacity: usize) -> Self { assert!(u32::try_from(capacity).is_ok()); @@ -109,7 +113,9 @@ impl Timer { } /// Asserts if the time given is in the past or too far in the future. + /// /// # Panics + /// /// When `time` is in the past relative to previous calls. pub fn add(&mut self, time: Instant, item: T) { assert!(time >= self.now); @@ -241,9 +247,10 @@ impl Timer { #[cfg(test)] mod test { - use super::{Duration, Instant, Timer}; use lazy_static::lazy_static; + use super::{Duration, Instant, Timer}; + lazy_static! { static ref NOW: Instant = Instant::now(); } diff --git a/neqo-crypto/build.rs b/neqo-crypto/build.rs index c462b3db19..a63c34dedb 100644 --- a/neqo-crypto/build.rs +++ b/neqo-crypto/build.rs @@ -7,13 +7,15 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] +use std::{ + collections::HashMap, + env, fs, + path::{Path, PathBuf}, + process::Command, +}; + use bindgen::Builder; use serde_derive::Deserialize; -use std::collections::HashMap; -use std::env; -use std::fs; -use std::path::{Path, PathBuf}; -use std::process::Command; const BINDINGS_DIR: &str = "bindings"; const BINDINGS_CONFIG: &str = "bindings.toml"; diff --git a/neqo-crypto/src/aead.rs b/neqo-crypto/src/aead.rs index 41cdf66469..a2f009a403 100644 --- a/neqo-crypto/src/aead.rs +++ b/neqo-crypto/src/aead.rs @@ -4,6 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + convert::{TryFrom, TryInto}, + fmt, + ops::{Deref, DerefMut}, + os::raw::{c_char, c_uint}, + ptr::null_mut, +}; + use crate::{ constants::{Cipher, Version}, err::Res, @@ -13,14 +21,6 @@ use crate::{ ssl::{self, PRUint16, PRUint64, PRUint8, SSLAeadContext}, }; -use std::{ - convert::{TryFrom, TryInto}, - fmt, - ops::{Deref, DerefMut}, - os::raw::{c_char, c_uint}, - ptr::null_mut, -}; - experimental_api!(SSL_MakeAead( version: PRUint16, cipher: PRUint16, @@ -62,6 +62,7 @@ impl RealAead { /// Create a new AEAD based on the indicated TLS version and cipher suite. /// /// # Errors + /// /// Returns `Error` when the supporting NSS functions fail. pub fn new( _fuzzing: bool, @@ -107,6 +108,7 @@ impl RealAead { /// the value provided in `Aead::expansion`. /// /// # Errors + /// /// If the input can't be protected or any input is too large for NSS. pub fn encrypt<'a>( &self, @@ -139,6 +141,7 @@ impl RealAead { /// the final result will be shorter. /// /// # Errors + /// /// If the input isn't authenticated or any input is too large for NSS. pub fn decrypt<'a>( &self, diff --git a/neqo-crypto/src/aead_fuzzing.rs b/neqo-crypto/src/aead_fuzzing.rs index 4293d2bc70..4e5a6de07f 100644 --- a/neqo-crypto/src/aead_fuzzing.rs +++ b/neqo-crypto/src/aead_fuzzing.rs @@ -4,12 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::constants::{Cipher, Version}; -use crate::err::{sec::SEC_ERROR_BAD_DATA, Error, Res}; -use crate::p11::SymKey; -use crate::RealAead; use std::fmt; +use crate::{ + constants::{Cipher, Version}, + err::{sec::SEC_ERROR_BAD_DATA, Error, Res}, + p11::SymKey, + RealAead, +}; + pub const FIXED_TAG_FUZZING: &[u8] = &[0x0a; 16]; pub struct FuzzingAead { @@ -76,8 +79,8 @@ impl FuzzingAead { let len_encrypted = input.len() - FIXED_TAG_FUZZING.len(); // Check that: // 1) expansion is all zeros and - // 2) if the encrypted data is also supplied that at least some values - // are no zero (otherwise padding will be interpreted as a valid packet) + // 2) if the encrypted data is also supplied that at least some values are no zero + // (otherwise padding will be interpreted as a valid packet) if &input[len_encrypted..] == FIXED_TAG_FUZZING && (len_encrypted == 0 || input[..len_encrypted].iter().any(|x| *x != 0x0)) { diff --git a/neqo-crypto/src/agent.rs b/neqo-crypto/src/agent.rs index 3868c525bc..cd0bb4cb12 100644 --- a/neqo-crypto/src/agent.rs +++ b/neqo-crypto/src/agent.rs @@ -4,6 +4,21 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + cell::RefCell, + convert::TryFrom, + ffi::{CStr, CString}, + mem::{self, MaybeUninit}, + ops::{Deref, DerefMut}, + os::raw::{c_uint, c_void}, + pin::Pin, + ptr::{null, null_mut}, + rc::Rc, + time::Instant, +}; + +use neqo_common::{hex_snip_middle, hex_with_len, qdebug, qinfo, qtrace, qwarn}; + pub use crate::{ agentio::{as_c_void, Record, RecordList}, cert::CertificateInfo, @@ -25,19 +40,6 @@ use crate::{ ssl::{self, PRBool}, time::{Time, TimeHolder}, }; -use neqo_common::{hex_snip_middle, hex_with_len, qdebug, qinfo, qtrace, qwarn}; -use std::{ - cell::RefCell, - convert::TryFrom, - ffi::{CStr, CString}, - mem::{self, MaybeUninit}, - ops::{Deref, DerefMut}, - os::raw::{c_uint, c_void}, - pin::Pin, - ptr::{null, null_mut}, - rc::Rc, - time::Instant, -}; /// The maximum number of tickets to remember for a given connection. const MAX_TICKETS: usize = 4; @@ -157,6 +159,7 @@ impl SecretAgentPreInfo { } /// # Panics + /// /// If `usize` is less than 32 bits and the value is too large. #[must_use] pub fn max_early_data(&self) -> usize { @@ -183,6 +186,7 @@ impl SecretAgentPreInfo { /// which contains a valid ECH configuration. /// /// # Errors + /// /// When the public name is not valid UTF-8. (Note: names should be ASCII.) pub fn ech_public_name(&self) -> Res> { if self.info.valuesSet & ssl::ssl_preinfo_ech == 0 || self.info.echPublicName.is_null() { @@ -395,6 +399,7 @@ impl SecretAgent { /// Default configuration. /// /// # Errors + /// /// If `set_version_range` fails. fn configure(&mut self, grease: bool) -> Res<()> { self.set_version_range(TLS_VERSION_1_3, TLS_VERSION_1_3)?; @@ -411,6 +416,7 @@ impl SecretAgent { /// Set the versions that are supported. /// /// # Errors + /// /// If the range of versions isn't supported. pub fn set_version_range(&mut self, min: Version, max: Version) -> Res<()> { let range = ssl::SSLVersionRange { min, max }; @@ -420,6 +426,7 @@ impl SecretAgent { /// Enable a set of ciphers. Note that the order of these is not respected. /// /// # Errors + /// /// If NSS can't enable or disable ciphers. pub fn set_ciphers(&mut self, ciphers: &[Cipher]) -> Res<()> { if self.state != HandshakeState::New { @@ -447,6 +454,7 @@ impl SecretAgent { /// Set key exchange groups. /// /// # Errors + /// /// If the underlying API fails (which shouldn't happen). pub fn set_groups(&mut self, groups: &[Group]) -> Res<()> { // SSLNamedGroup is a different size to Group, so copy one by one. @@ -464,6 +472,7 @@ impl SecretAgent { /// Set the number of additional key shares that will be sent in the client hello /// /// # Errors + /// /// If the underlying API fails (which shouldn't happen). pub fn send_additional_key_shares(&mut self, count: usize) -> Res<()> { secstatus_to_res(unsafe { @@ -474,6 +483,7 @@ impl SecretAgent { /// Set TLS options. /// /// # Errors + /// /// Returns an error if the option or option value is invalid; i.e., never. pub fn set_option(&mut self, opt: ssl::Opt, value: bool) -> Res<()> { opt.set(self.fd, value) @@ -482,6 +492,7 @@ impl SecretAgent { /// Enable 0-RTT. /// /// # Errors + /// /// See `set_option`. pub fn enable_0rtt(&mut self) -> Res<()> { self.set_option(ssl::Opt::EarlyData, true) @@ -490,6 +501,7 @@ impl SecretAgent { /// Disable the `EndOfEarlyData` message. /// /// # Errors + /// /// See `set_option`. pub fn disable_end_of_early_data(&mut self) -> Res<()> { self.set_option(ssl::Opt::SuppressEndOfEarlyData, true) @@ -503,8 +515,11 @@ impl SecretAgent { /// 255 octets in length. /// /// # Errors + /// /// This should always panic rather than return an error. + /// /// # Panics + /// /// If any of the provided `protocols` are more than 255 bytes long. /// /// [RFC7301]: https://datatracker.ietf.org/doc/html/rfc7301 @@ -549,11 +564,12 @@ impl SecretAgent { /// Install an extension handler. /// - /// This can be called multiple times with different values for `ext`. The handler is provided as - /// `Rc>` so that the caller is able to hold a reference to the handler and later - /// access any state that it accumulates. + /// This can be called multiple times with different values for `ext`. The handler is provided + /// as `Rc>` so that the caller is able to hold a reference to the handler + /// and later access any state that it accumulates. /// /// # Errors + /// /// When the extension handler can't be successfully installed. pub fn extension_handler( &mut self, @@ -597,6 +613,7 @@ impl SecretAgent { /// Calling this function collects all the relevant information. /// /// # Errors + /// /// When the underlying socket functions fail. pub fn preinfo(&self) -> Res { SecretAgentPreInfo::new(self.fd) @@ -615,7 +632,9 @@ impl SecretAgent { } /// Call this function to mark the peer as authenticated. + /// /// # Panics + /// /// If the handshake doesn't need to be authenticated. pub fn authenticated(&mut self, status: AuthenticationStatus) { assert!(self.state.authentication_needed()); @@ -664,6 +683,7 @@ impl SecretAgent { /// function if you want to proceed, because this will mark the certificate as OK. /// /// # Errors + /// /// When the handshake fails this returns an error. pub fn handshake(&mut self, now: Instant, input: &[u8]) -> Res> { self.now.set(now)?; @@ -700,6 +720,7 @@ impl SecretAgent { /// If you send data from multiple epochs, you might end up being sad. /// /// # Errors + /// /// When the handshake fails this returns an error. pub fn handshake_raw(&mut self, now: Instant, input: Option) -> Res { self.now.set(now)?; @@ -727,6 +748,7 @@ impl SecretAgent { } /// # Panics + /// /// If setup fails. #[allow(unknown_lints, clippy::branches_sharing_code)] pub fn close(&mut self) { @@ -832,6 +854,7 @@ impl Client { /// Create a new client agent. /// /// # Errors + /// /// Errors returned if the socket can't be created or configured. pub fn new(server_name: impl Into, grease: bool) -> Res { let server_name = server_name.into(); @@ -921,6 +944,7 @@ impl Client { /// Enable resumption, using a token previously provided. /// /// # Errors + /// /// Error returned when the resumption token is invalid or /// the socket is not able to use the value. pub fn enable_resumption(&mut self, token: impl AsRef<[u8]>) -> Res<()> { @@ -944,6 +968,7 @@ impl Client { /// ECH greasing. When that is done, there is no need to look for `EchRetry` /// /// # Errors + /// /// Error returned when the configuration is invalid. pub fn enable_ech(&mut self, ech_config_list: impl AsRef<[u8]>) -> Res<()> { let config = ech_config_list.as_ref(); @@ -996,7 +1021,8 @@ pub enum ZeroRttCheckResult { Fail, } -/// A `ZeroRttChecker` is used by the agent to validate the application token (as provided by `send_ticket`) +/// A `ZeroRttChecker` is used by the agent to validate the application token (as provided by +/// `send_ticket`) pub trait ZeroRttChecker: std::fmt::Debug + std::marker::Unpin { fn check(&self, token: &[u8]) -> ZeroRttCheckResult; } @@ -1037,6 +1063,7 @@ impl Server { /// Create a new server agent. /// /// # Errors + /// /// Errors returned when NSS fails. pub fn new(certificates: &[impl AsRef]) -> Res { let mut agent = SecretAgent::new()?; @@ -1090,7 +1117,8 @@ impl Server { ssl::SSLHelloRetryRequestAction::ssl_hello_retry_reject_0rtt } ZeroRttCheckResult::HelloRetryRequest(tok) => { - // Don't bother propagating errors from this, because it should be caught in testing. + // Don't bother propagating errors from this, because it should be caught in + // testing. assert!(tok.len() <= usize::try_from(retry_token_max).unwrap()); let slc = std::slice::from_raw_parts_mut(retry_token, tok.len()); slc.copy_from_slice(&tok); @@ -1104,6 +1132,7 @@ impl Server { /// via the Deref implementation on Server. /// /// # Errors + /// /// Returns an error if the underlying NSS functions fail. pub fn enable_0rtt( &mut self, @@ -1131,6 +1160,7 @@ impl Server { /// The records that are sent are captured and returned. /// /// # Errors + /// /// If NSS is unable to send a ticket, or if this agent is incorrectly configured. pub fn send_ticket(&mut self, now: Instant, extra: &[u8]) -> Res { self.agent.now.set(now)?; @@ -1146,6 +1176,7 @@ impl Server { /// Enable encrypted client hello (ECH). /// /// # Errors + /// /// Fails when NSS cannot create a key pair. pub fn enable_ech( &mut self, diff --git a/neqo-crypto/src/agentio.rs b/neqo-crypto/src/agentio.rs index 1d39b2398a..2bcc540530 100644 --- a/neqo-crypto/src/agentio.rs +++ b/neqo-crypto/src/agentio.rs @@ -4,21 +4,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::constants::{ContentType, Epoch}; -use crate::err::{nspr, Error, PR_SetError, Res}; -use crate::prio; -use crate::ssl; +use std::{ + cmp::min, + convert::{TryFrom, TryInto}, + fmt, mem, + ops::Deref, + os::raw::{c_uint, c_void}, + pin::Pin, + ptr::{null, null_mut}, + vec::Vec, +}; use neqo_common::{hex, hex_with_len, qtrace}; -use std::cmp::min; -use std::convert::{TryFrom, TryInto}; -use std::fmt; -use std::mem; -use std::ops::Deref; -use std::os::raw::{c_uint, c_void}; -use std::pin::Pin; -use std::ptr::{null, null_mut}; -use std::vec::Vec; + +use crate::{ + constants::{ContentType, Epoch}, + err::{nspr, Error, PR_SetError, Res}, + prio, ssl, +}; // Alias common types. type PrFd = *mut prio::PRFileDesc; diff --git a/neqo-crypto/src/cert.rs b/neqo-crypto/src/cert.rs index 14d91843d3..64e63ec71a 100644 --- a/neqo-crypto/src/cert.rs +++ b/neqo-crypto/src/cert.rs @@ -4,18 +4,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::err::secstatus_to_res; -use crate::p11::{CERTCertListNode, CERT_GetCertificateDer, CertList, Item, SECItem, SECItemArray}; -use crate::ssl::{ - PRFileDesc, SSL_PeerCertificateChain, SSL_PeerSignedCertTimestamps, - SSL_PeerStapledOCSPResponses, +use std::{ + convert::TryFrom, + ptr::{addr_of, NonNull}, + slice, }; -use neqo_common::qerror; -use std::convert::TryFrom; -use std::ptr::{addr_of, NonNull}; +use neqo_common::qerror; -use std::slice; +use crate::{ + err::secstatus_to_res, + p11::{CERTCertListNode, CERT_GetCertificateDer, CertList, Item, SECItem, SECItemArray}, + ssl::{ + PRFileDesc, SSL_PeerCertificateChain, SSL_PeerSignedCertTimestamps, + SSL_PeerStapledOCSPResponses, + }, +}; pub struct CertificateInfo { certs: CertList, diff --git a/neqo-crypto/src/ech.rs b/neqo-crypto/src/ech.rs index c4b33b0bee..1f54c4592e 100644 --- a/neqo-crypto/src/ech.rs +++ b/neqo-crypto/src/ech.rs @@ -4,6 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + convert::TryFrom, + ffi::CString, + os::raw::{c_char, c_uint}, + ptr::{addr_of_mut, null_mut}, +}; + +use neqo_common::qtrace; + use crate::{ err::{ssl::SSL_ERROR_ECH_RETRY_WITH_ECH, Error, Res}, experimental_api, @@ -13,14 +22,6 @@ use crate::{ }, ssl::{PRBool, PRFileDesc}, }; -use neqo_common::qtrace; -use std::{ - convert::TryFrom, - ffi::CString, - os::raw::{c_char, c_uint}, - ptr::{addr_of_mut, null_mut}, -}; - pub use crate::{ p11::{HpkeAeadId as AeadId, HpkeKdfId as KdfId, HpkeKemId as KemId}, ssl::HpkeSymmetricSuite as SymmetricSuite, @@ -89,8 +90,11 @@ pub fn convert_ech_error(fd: *mut PRFileDesc, err: Error) -> Error { /// Generate a key pair for encrypted client hello (ECH). /// /// # Errors +/// /// When NSS fails to generate a key pair or when the KEM is not supported. +/// /// # Panics +/// /// When underlying types aren't large enough to hold keys. So never. pub fn generate_keys() -> Res<(PrivateKey, PublicKey)> { let slot = Slot::internal()?; @@ -153,6 +157,7 @@ pub fn generate_keys() -> Res<(PrivateKey, PublicKey)> { /// Encode a configuration for encrypted client hello (ECH). /// /// # Errors +/// /// When NSS fails to generate a valid configuration encoding (i.e., unlikely). pub fn encode_config(config: u8, public_name: &str, pk: &PublicKey) -> Res> { // A sensible fixed value for the maximum length of a name. diff --git a/neqo-crypto/src/err.rs b/neqo-crypto/src/err.rs index fae81f9cb9..187303d2a9 100644 --- a/neqo-crypto/src/err.rs +++ b/neqo-crypto/src/err.rs @@ -7,8 +7,7 @@ #![allow(dead_code)] #![allow(clippy::upper_case_acronyms)] -use std::os::raw::c_char; -use std::str::Utf8Error; +use std::{os::raw::c_char, str::Utf8Error}; use crate::ssl::{SECStatus, SECSuccess}; @@ -19,9 +18,7 @@ mod codes { include!(concat!(env!("OUT_DIR"), "/nss_sslerr.rs")); include!(concat!(env!("OUT_DIR"), "/mozpkix.rs")); } -pub use codes::mozilla_pkix_ErrorCode as mozpkix; -pub use codes::SECErrorCodes as sec; -pub use codes::SSLErrorCodes as ssl; +pub use codes::{mozilla_pkix_ErrorCode as mozpkix, SECErrorCodes as sec, SSLErrorCodes as ssl}; pub mod nspr { include!(concat!(env!("OUT_DIR"), "/nspr_err.rs")); } @@ -137,10 +134,13 @@ pub fn is_blocked(result: &Res<()>) -> bool { #[cfg(test)] mod tests { - use crate::err::{self, is_blocked, secstatus_to_res, Error, PRErrorCode, PR_SetError}; - use crate::ssl::{SECFailure, SECSuccess}; use test_fixture::fixture_init; + use crate::{ + err::{self, is_blocked, secstatus_to_res, Error, PRErrorCode, PR_SetError}, + ssl::{SECFailure, SECSuccess}, + }; + fn set_error_code(code: PRErrorCode) { // This code doesn't work without initializing NSS first. fixture_init(); diff --git a/neqo-crypto/src/ext.rs b/neqo-crypto/src/ext.rs index 010b9f120e..310e87a1b7 100644 --- a/neqo-crypto/src/ext.rs +++ b/neqo-crypto/src/ext.rs @@ -4,6 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + cell::RefCell, + convert::TryFrom, + os::raw::{c_uint, c_void}, + pin::Pin, + rc::Rc, +}; + use crate::{ agentio::as_c_void, constants::{Extension, HandshakeMessage, TLS_HS_CLIENT_HELLO, TLS_HS_ENCRYPTED_EXTENSIONS}, @@ -13,13 +21,6 @@ use crate::{ SSLExtensionHandler, SSLExtensionWriter, SSLHandshakeType, }, }; -use std::{ - cell::RefCell, - convert::TryFrom, - os::raw::{c_uint, c_void}, - pin::Pin, - rc::Rc, -}; experimental_api!(SSL_InstallExtensionHooks( fd: *mut PRFileDesc, @@ -121,11 +122,13 @@ impl ExtensionTracker { /// Use the provided handler to manage an extension. This is quite unsafe. /// /// # Safety + /// /// The holder of this `ExtensionTracker` needs to ensure that it lives at /// least as long as the file descriptor, as NSS provides no way to remove /// an extension handler once it is configured. /// /// # Errors + /// /// If the underlying NSS API fails to register a handler. pub unsafe fn new( fd: *mut PRFileDesc, diff --git a/neqo-crypto/src/hkdf.rs b/neqo-crypto/src/hkdf.rs index 44df30ecfd..e3cf77418c 100644 --- a/neqo-crypto/src/hkdf.rs +++ b/neqo-crypto/src/hkdf.rs @@ -4,6 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + convert::TryFrom, + os::raw::{c_char, c_uint}, + ptr::null_mut, +}; + use crate::{ constants::{ Cipher, Version, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, @@ -16,12 +22,6 @@ use crate::{ }, }; -use std::{ - convert::TryFrom, - os::raw::{c_char, c_uint}, - ptr::null_mut, -}; - experimental_api!(SSL_HkdfExtract( version: Version, cipher: Cipher, @@ -54,6 +54,7 @@ fn key_size(version: Version, cipher: Cipher) -> Res { /// Generate a random key of the right size for the given suite. /// /// # Errors +/// /// Only if NSS fails. pub fn generate_key(version: Version, cipher: Cipher) -> Res { import_key(version, &random(key_size(version, cipher)?)) @@ -62,6 +63,7 @@ pub fn generate_key(version: Version, cipher: Cipher) -> Res { /// Import a symmetric key for use with HKDF. /// /// # Errors +/// /// Errors returned if the key buffer is an incompatible size or the NSS functions fail. pub fn import_key(version: Version, buf: &[u8]) -> Res { if version != TLS_VERSION_1_3 { @@ -85,6 +87,7 @@ pub fn import_key(version: Version, buf: &[u8]) -> Res { /// Extract a PRK from the given salt and IKM using the algorithm defined in RFC 5869. /// /// # Errors +/// /// Errors returned if inputs are too large or the NSS functions fail. pub fn extract( version: Version, @@ -104,6 +107,7 @@ pub fn extract( /// Expand a PRK using the HKDF-Expand-Label function defined in RFC 8446. /// /// # Errors +/// /// Errors returned if inputs are too large or the NSS functions fail. pub fn expand_label( version: Version, diff --git a/neqo-crypto/src/hp.rs b/neqo-crypto/src/hp.rs index 2409521903..2479eff8f5 100644 --- a/neqo-crypto/src/hp.rs +++ b/neqo-crypto/src/hp.rs @@ -4,6 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + cell::RefCell, + convert::TryFrom, + fmt::{self, Debug}, + os::raw::{c_char, c_int, c_uint}, + ptr::{addr_of_mut, null, null_mut}, + rc::Rc, +}; + use crate::{ constants::{ Cipher, Version, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, @@ -16,14 +25,6 @@ use crate::{ CK_CHACHA20_PARAMS, CK_MECHANISM_TYPE, }, }; -use std::{ - cell::RefCell, - convert::TryFrom, - fmt::{self, Debug}, - os::raw::{c_char, c_int, c_uint}, - ptr::{addr_of_mut, null, null_mut}, - rc::Rc, -}; experimental_api!(SSL_HkdfExpandLabelWithMech( version: Version, @@ -62,8 +63,11 @@ impl HpKey { /// QUIC-specific API for extracting a header-protection key. /// /// # Errors + /// /// Errors if HKDF fails or if the label is too long to fit in a `c_uint`. + /// /// # Panics + /// /// When `cipher` is not known to this code. #[allow(clippy::cast_sign_loss)] // Cast for PK11_GetBlockSize is safe. pub fn extract(version: Version, cipher: Cipher, prk: &SymKey, label: &str) -> Res { @@ -141,9 +145,12 @@ impl HpKey { /// Generate a header protection mask for QUIC. /// /// # Errors + /// /// An error is returned if the NSS functions fail; a sample of the /// wrong size is the obvious cause. + /// /// # Panics + /// /// When the mechanism for our key is not supported. pub fn mask(&self, sample: &[u8]) -> Res> { let mut output = vec![0_u8; self.block_size()]; diff --git a/neqo-crypto/src/lib.rs b/neqo-crypto/src/lib.rs index 2533c727e7..05424ee1f3 100644 --- a/neqo-crypto/src/lib.rs +++ b/neqo-crypto/src/lib.rs @@ -37,15 +37,19 @@ pub mod selfencrypt; mod ssl; mod time; +use std::{ + ffi::CString, + path::{Path, PathBuf}, + ptr::null, +}; + #[cfg(not(feature = "fuzzing"))] pub use self::aead::RealAead as Aead; - -#[cfg(feature = "fuzzing")] -pub use self::aead_fuzzing::FuzzingAead as Aead; - #[cfg(feature = "fuzzing")] pub use self::aead::RealAead; - +#[cfg(feature = "fuzzing")] +pub use self::aead_fuzzing::FuzzingAead as Aead; +use self::once::OnceResult; pub use self::{ agent::{ Agent, AllowZeroRtt, Client, HandshakeState, Record, RecordList, ResumptionToken, @@ -66,14 +70,6 @@ pub use self::{ ssl::Opt, }; -use self::once::OnceResult; - -use std::{ - ffi::CString, - path::{Path, PathBuf}, - ptr::null, -}; - const MINIMUM_NSS_VERSION: &str = "3.97"; #[allow(non_upper_case_globals, clippy::redundant_static_lifetimes)] @@ -119,8 +115,11 @@ fn version_check() { ); } -/// Initialize NSS. This only executes the initialization routines once, so if there is any chance that +/// Initialize NSS. This only executes the initialization routines once, so if there is any chance +/// that +/// /// # Panics +/// /// When NSS initialization fails. pub fn init() { // Set time zero. @@ -153,7 +152,9 @@ fn enable_ssl_trace() { } /// Initialize with a database. +/// /// # Panics +/// /// If NSS cannot be initialized. pub fn init_db>(dir: P) { time::init(); @@ -196,6 +197,7 @@ pub fn init_db>(dir: P) { } /// # Panics +/// /// If NSS isn't initialized. pub fn assert_initialized() { unsafe { diff --git a/neqo-crypto/src/p11.rs b/neqo-crypto/src/p11.rs index ebd641c17e..508d240062 100644 --- a/neqo-crypto/src/p11.rs +++ b/neqo-crypto/src/p11.rs @@ -9,8 +9,6 @@ #![allow(non_camel_case_types)] #![allow(non_snake_case)] -use crate::err::{secstatus_to_res, Error, Res}; -use neqo_common::hex_with_len; use std::{ convert::TryFrom, mem, @@ -19,6 +17,10 @@ use std::{ ptr::null_mut, }; +use neqo_common::hex_with_len; + +use crate::err::{secstatus_to_res, Error, Res}; + #[allow(clippy::upper_case_acronyms)] #[allow(clippy::unreadable_literal)] #[allow(unknown_lints, clippy::borrow_as_ptr)] @@ -39,6 +41,7 @@ macro_rules! scoped_ptr { /// Create a new instance of `$scoped` from a pointer. /// /// # Errors + /// /// When passed a null pointer generates an error. pub fn from_ptr(ptr: *mut $target) -> Result { if ptr.is_null() { @@ -80,8 +83,11 @@ impl PublicKey { /// Get the HPKE serialization of the public key. /// /// # Errors + /// /// When the key cannot be exported, which can be because the type is not supported. + /// /// # Panics + /// /// When keys are too large to fit in `c_uint/usize`. So only on programming error. pub fn key_data(&self) -> Res> { let mut buf = vec![0; 100]; @@ -124,9 +130,12 @@ impl PrivateKey { /// Get the bits of the private key. /// /// # Errors + /// /// When the key cannot be exported, which can be because the type is not supported /// or because the key data cannot be extracted from the PKCS#11 module. + /// /// # Panics + /// /// When the values are too large to fit. So never. pub fn key_data(&self) -> Res> { let mut key_item = Item::make_empty(); @@ -188,6 +197,7 @@ impl SymKey { /// You really don't want to use this. /// /// # Errors + /// /// Internal errors in case of failures in NSS. pub fn as_bytes(&self) -> Res<&[u8]> { secstatus_to_res(unsafe { PK11_ExtractKeyValue(self.ptr) })?; @@ -269,6 +279,7 @@ impl Item { /// content that is referenced there. /// /// # Safety + /// /// This dereferences two pointers. It doesn't get much less safe. pub unsafe fn into_vec(self) -> Vec { let b = self.ptr.as_ref().unwrap(); @@ -280,7 +291,9 @@ impl Item { } /// Generate a randomized buffer. +/// /// # Panics +/// /// When `size` is too large or NSS fails. #[must_use] pub fn random(size: usize) -> Vec { @@ -294,9 +307,10 @@ pub fn random(size: usize) -> Vec { #[cfg(test)] mod test { - use super::random; use test_fixture::fixture_init; + use super::random; + #[test] fn randomness() { fixture_init(); diff --git a/neqo-crypto/src/replay.rs b/neqo-crypto/src/replay.rs index 8f35ed6401..d4d3677f5c 100644 --- a/neqo-crypto/src/replay.rs +++ b/neqo-crypto/src/replay.rs @@ -4,11 +4,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{ - err::Res, - ssl::PRFileDesc, - time::{Interval, PRTime, Time}, -}; use std::{ convert::{TryFrom, TryInto}, ops::{Deref, DerefMut}, @@ -17,6 +12,12 @@ use std::{ time::{Duration, Instant}, }; +use crate::{ + err::Res, + ssl::PRFileDesc, + time::{Interval, PRTime, Time}, +}; + // This is an opaque struct in NSS. #[allow(clippy::upper_case_acronyms)] #[allow(clippy::empty_enum)] @@ -55,6 +56,7 @@ impl AntiReplay { /// See the documentation in NSS for advice on how to set these values. /// /// # Errors + /// /// Returns an error if `now` is in the past relative to our baseline or /// NSS is unable to generate an anti-replay context. pub fn new(now: Instant, window: Duration, k: usize, bits: usize) -> Res { diff --git a/neqo-crypto/src/secrets.rs b/neqo-crypto/src/secrets.rs index 7fff5d4f68..75677636b6 100644 --- a/neqo-crypto/src/secrets.rs +++ b/neqo-crypto/src/secrets.rs @@ -4,6 +4,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{os::raw::c_void, pin::Pin}; + +use neqo_common::qdebug; + use crate::{ agentio::as_c_void, constants::Epoch, @@ -11,8 +15,6 @@ use crate::{ p11::{PK11SymKey, PK11_ReferenceSymKey, SymKey}, ssl::{PRFileDesc, SSLSecretCallback, SSLSecretDirection}, }; -use neqo_common::qdebug; -use std::{os::raw::c_void, pin::Pin}; experimental_api!(SSL_SecretCallback( fd: *mut PRFileDesc, diff --git a/neqo-crypto/src/selfencrypt.rs b/neqo-crypto/src/selfencrypt.rs index 62d7057435..b8a63153fd 100644 --- a/neqo-crypto/src/selfencrypt.rs +++ b/neqo-crypto/src/selfencrypt.rs @@ -4,14 +4,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::constants::{Cipher, Version}; -use crate::err::{Error, Res}; -use crate::p11::{random, SymKey}; -use crate::{hkdf, Aead}; +use std::mem; use neqo_common::{hex, qinfo, qtrace, Encoder}; -use std::mem; +use crate::{ + constants::{Cipher, Version}, + err::{Error, Res}, + hkdf, + p11::{random, SymKey}, + Aead, +}; #[derive(Debug)] pub struct SelfEncrypt { @@ -27,6 +30,7 @@ impl SelfEncrypt { const SALT_LENGTH: usize = 16; /// # Errors + /// /// Failure to generate a new HKDF key using NSS results in an error. pub fn new(version: Version, cipher: Cipher) -> Res { let key = hkdf::generate_key(version, cipher)?; @@ -46,9 +50,11 @@ impl SelfEncrypt { Aead::new(false, self.version, self.cipher, &secret, "neqo self") } - /// Rotate keys. This causes any previous key that is being held to be replaced by the current key. + /// Rotate keys. This causes any previous key that is being held to be replaced by the current + /// key. /// /// # Errors + /// /// Failure to generate a new HKDF key using NSS results in an error. pub fn rotate(&mut self) -> Res<()> { let new_key = hkdf::generate_key(self.version, self.cipher)?; @@ -65,6 +71,7 @@ impl SelfEncrypt { /// caller is responsible for carrying the AAD as appropriate. /// /// # Errors + /// /// Failure to protect using NSS AEAD APIs produces an error. pub fn seal(&self, aad: &[u8], plaintext: &[u8]) -> Res> { // Format is: @@ -117,6 +124,7 @@ impl SelfEncrypt { /// Open the protected `ciphertext`. /// /// # Errors + /// /// Returns an error when the self-encrypted object is invalid; /// when the keys have been rotated; or when NSS fails. #[allow(clippy::similar_names)] // aad is similar to aead diff --git a/neqo-crypto/src/ssl.rs b/neqo-crypto/src/ssl.rs index 08776f34ba..8aaacffae6 100644 --- a/neqo-crypto/src/ssl.rs +++ b/neqo-crypto/src/ssl.rs @@ -15,11 +15,13 @@ clippy::borrow_as_ptr )] -use crate::constants::Epoch; -use crate::err::{secstatus_to_res, Res}; - use std::os::raw::{c_uint, c_void}; +use crate::{ + constants::Epoch, + err::{secstatus_to_res, Res}, +}; + include!(concat!(env!("OUT_DIR"), "/nss_ssl.rs")); mod SSLOption { include!(concat!(env!("OUT_DIR"), "/nss_sslopt.rs")); diff --git a/neqo-crypto/src/time.rs b/neqo-crypto/src/time.rs index 981ac6f420..84dbfdb4a5 100644 --- a/neqo-crypto/src/time.rs +++ b/neqo-crypto/src/time.rs @@ -6,13 +6,6 @@ #![allow(clippy::upper_case_acronyms)] -use crate::{ - agentio::as_c_void, - err::{Error, Res}, - once::OnceResult, - ssl::{PRFileDesc, SSLTimeFunc}, -}; - use std::{ boxed::Box, convert::{TryFrom, TryInto}, @@ -22,6 +15,13 @@ use std::{ time::{Duration, Instant}, }; +use crate::{ + agentio::as_c_void, + err::{Error, Res}, + once::OnceResult, + ssl::{PRFileDesc, SSLTimeFunc}, +}; + include!(concat!(env!("OUT_DIR"), "/nspr_time.rs")); experimental_api!(SSL_SetTimeFunc( @@ -207,13 +207,14 @@ impl Default for TimeHolder { #[cfg(test)] mod test { - use super::{get_base, init, Interval, PRTime, Time}; - use crate::err::Res; use std::{ convert::{TryFrom, TryInto}, time::{Duration, Instant}, }; + use super::{get_base, init, Interval, PRTime, Time}; + use crate::err::Res; + #[test] fn convert_stable() { init(); diff --git a/neqo-crypto/tests/aead.rs b/neqo-crypto/tests/aead.rs index b9721e3d64..0ee1e66c38 100644 --- a/neqo-crypto/tests/aead.rs +++ b/neqo-crypto/tests/aead.rs @@ -2,9 +2,10 @@ #![warn(clippy::pedantic)] #![cfg(not(feature = "fuzzing"))] -use neqo_crypto::constants::{Cipher, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}; -use neqo_crypto::hkdf; -use neqo_crypto::Aead; +use neqo_crypto::{ + constants::{Cipher, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, + hkdf, Aead, +}; use test_fixture::fixture_init; const AAD: &[u8] = &[ diff --git a/neqo-crypto/tests/agent.rs b/neqo-crypto/tests/agent.rs index 27017f0a4e..c2c83c467c 100644 --- a/neqo-crypto/tests/agent.rs +++ b/neqo-crypto/tests/agent.rs @@ -1,20 +1,21 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] +use std::boxed::Box; + use neqo_crypto::{ generate_ech_keys, AuthenticationStatus, Client, Error, HandshakeState, SecretAgentPreInfo, Server, ZeroRttCheckResult, ZeroRttChecker, TLS_AES_128_GCM_SHA256, TLS_CHACHA20_POLY1305_SHA256, TLS_GRP_EC_SECP256R1, TLS_GRP_EC_X25519, TLS_VERSION_1_3, }; -use std::boxed::Box; - mod handshake; +use test_fixture::{fixture_init, now}; + use crate::handshake::{ connect, connect_fail, forward_records, resumption_setup, PermissiveZeroRttChecker, Resumption, ZERO_RTT_TOKEN_DATA, }; -use test_fixture::{fixture_init, now}; #[test] fn make_client() { diff --git a/neqo-crypto/tests/ext.rs b/neqo-crypto/tests/ext.rs index 02d78603b6..9ae81133f5 100644 --- a/neqo-crypto/tests/ext.rs +++ b/neqo-crypto/tests/ext.rs @@ -1,11 +1,13 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] -use neqo_crypto::constants::{HandshakeMessage, TLS_HS_CLIENT_HELLO, TLS_HS_ENCRYPTED_EXTENSIONS}; -use neqo_crypto::ext::{ExtensionHandler, ExtensionHandlerResult, ExtensionWriterResult}; -use neqo_crypto::{Client, Server}; -use std::cell::RefCell; -use std::rc::Rc; +use std::{cell::RefCell, rc::Rc}; + +use neqo_crypto::{ + constants::{HandshakeMessage, TLS_HS_CLIENT_HELLO, TLS_HS_ENCRYPTED_EXTENSIONS}, + ext::{ExtensionHandler, ExtensionHandlerResult, ExtensionWriterResult}, + Client, Server, +}; use test_fixture::fixture_init; mod handshake; diff --git a/neqo-crypto/tests/handshake.rs b/neqo-crypto/tests/handshake.rs index 779ec5ac22..b2d8b9cc34 100644 --- a/neqo-crypto/tests/handshake.rs +++ b/neqo-crypto/tests/handshake.rs @@ -1,12 +1,12 @@ #![allow(dead_code)] +use std::{mem, time::Instant}; + use neqo_common::qinfo; use neqo_crypto::{ AntiReplay, AuthenticationStatus, Client, HandshakeState, RecordList, Res, ResumptionToken, SecretAgent, Server, ZeroRttCheckResult, ZeroRttChecker, }; -use std::mem; -use std::time::Instant; use test_fixture::{anti_replay, fixture_init, now}; /// Consume records until the handshake state changes. diff --git a/neqo-crypto/tests/hkdf.rs b/neqo-crypto/tests/hkdf.rs index 10a66f10b7..b4dde482f8 100644 --- a/neqo-crypto/tests/hkdf.rs +++ b/neqo-crypto/tests/hkdf.rs @@ -1,11 +1,13 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] -use neqo_crypto::constants::{ - Cipher, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, - TLS_VERSION_1_3, +use neqo_crypto::{ + constants::{ + Cipher, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, + TLS_VERSION_1_3, + }, + hkdf, SymKey, }; -use neqo_crypto::{hkdf, SymKey}; use test_fixture::fixture_init; const SALT: &[u8] = &[ diff --git a/neqo-crypto/tests/hp.rs b/neqo-crypto/tests/hp.rs index 2e0aea6b8a..43b96869d8 100644 --- a/neqo-crypto/tests/hp.rs +++ b/neqo-crypto/tests/hp.rs @@ -1,6 +1,8 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] +use std::mem; + use neqo_crypto::{ constants::{ Cipher, TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256, @@ -9,7 +11,6 @@ use neqo_crypto::{ hkdf, hp::HpKey, }; -use std::mem; use test_fixture::fixture_init; fn make_hp(cipher: Cipher) -> HpKey { diff --git a/neqo-crypto/tests/selfencrypt.rs b/neqo-crypto/tests/selfencrypt.rs index 5828f09392..fd9d4ea1ea 100644 --- a/neqo-crypto/tests/selfencrypt.rs +++ b/neqo-crypto/tests/selfencrypt.rs @@ -2,8 +2,12 @@ #![warn(clippy::pedantic)] #![cfg(not(feature = "fuzzing"))] -use neqo_crypto::constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}; -use neqo_crypto::{init, selfencrypt::SelfEncrypt, Error}; +use neqo_crypto::{ + constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, + init, + selfencrypt::SelfEncrypt, + Error, +}; #[test] fn se_create() { diff --git a/neqo-http3/src/buffered_send_stream.rs b/neqo-http3/src/buffered_send_stream.rs index 2a7d01bb74..4f6761fa80 100644 --- a/neqo-http3/src/buffered_send_stream.rs +++ b/neqo-http3/src/buffered_send_stream.rs @@ -4,10 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::Res; use neqo_common::qtrace; use neqo_transport::{Connection, StreamId}; +use crate::Res; + #[derive(Debug, PartialEq, Eq)] pub enum BufferedStream { Uninitialized, @@ -36,6 +37,7 @@ impl BufferedStream { } /// # Panics + /// /// If the `BufferedStream` is initialized more than one it will panic. pub fn init(&mut self, stream_id: StreamId) { debug_assert!(&Self::Uninitialized == self); @@ -46,6 +48,7 @@ impl BufferedStream { } /// # Panics + /// /// This functon cannot be called before the `BufferedStream` is initialized. pub fn buffer(&mut self, to_buf: &[u8]) { if let Self::Initialized { buf, .. } = self { @@ -56,6 +59,7 @@ impl BufferedStream { } /// # Errors + /// /// Returns `neqo_transport` errors. pub fn send_buffer(&mut self, conn: &mut Connection) -> Res { let label = ::neqo_common::log_subject!(::log::Level::Debug, self); @@ -76,6 +80,7 @@ impl BufferedStream { } /// # Errors + /// /// Returns `neqo_transport` errors. pub fn send_atomic(&mut self, conn: &mut Connection, to_send: &[u8]) -> Res { // First try to send anything that is in the buffer. diff --git a/neqo-http3/src/client_events.rs b/neqo-http3/src/client_events.rs index f21ec5929e..4b2ebc6c30 100644 --- a/neqo-http3/src/client_events.rs +++ b/neqo-http3/src/client_events.rs @@ -6,19 +6,18 @@ #![allow(clippy::module_name_repetitions)] -use crate::connection::Http3State; -use crate::settings::HSettingType; -use crate::{ - features::extended_connect::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason}, - CloseType, Http3StreamInfo, HttpRecvStreamEvents, RecvStreamEvents, SendStreamEvents, -}; +use std::{cell::RefCell, collections::VecDeque, rc::Rc}; + use neqo_common::{event::Provider as EventProvider, Header}; use neqo_crypto::ResumptionToken; use neqo_transport::{AppError, StreamId, StreamType}; -use std::cell::RefCell; -use std::collections::VecDeque; -use std::rc::Rc; +use crate::{ + connection::Http3State, + features::extended_connect::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason}, + settings::HSettingType, + CloseType, Http3StreamInfo, HttpRecvStreamEvents, RecvStreamEvents, SendStreamEvents, +}; #[derive(Debug, PartialEq, Eq, Clone)] pub enum WebTransportEvent { diff --git a/neqo-http3/src/conn_params.rs b/neqo-http3/src/conn_params.rs index 1ba2a601ad..23a5d2cc67 100644 --- a/neqo-http3/src/conn_params.rs +++ b/neqo-http3/src/conn_params.rs @@ -4,9 +4,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::cmp::min; + use neqo_qpack::QpackSettings; use neqo_transport::ConnectionParameters; -use std::cmp::min; const QPACK_MAX_TABLE_SIZE_DEFAULT: u64 = 65536; const QPACK_TABLE_SIZE_LIMIT: u64 = (1 << 30) - 1; @@ -53,6 +54,7 @@ impl Http3Parameters { } /// # Panics + /// /// The table size must be smaller than 1 << 30 by the spec. #[must_use] pub fn max_table_size_encoder(mut self, mut max_table: u64) -> Self { @@ -68,6 +70,7 @@ impl Http3Parameters { } /// # Panics + /// /// The table size must be smaller than 1 << 30 by the spec. #[must_use] pub fn max_table_size_decoder(mut self, mut max_table: u64) -> Self { diff --git a/neqo-http3/src/connection.rs b/neqo-http3/src/connection.rs index f2d0f28806..bb2b5a6ce0 100644 --- a/neqo-http3/src/connection.rs +++ b/neqo-http3/src/connection.rs @@ -6,41 +6,43 @@ #![allow(clippy::module_name_repetitions)] -use crate::control_stream_local::ControlStreamLocal; -use crate::control_stream_remote::ControlStreamRemote; -use crate::features::extended_connect::{ - webtransport_session::WebTransportSession, - webtransport_streams::{WebTransportRecvStream, WebTransportSendStream}, - ExtendedConnectEvents, ExtendedConnectFeature, ExtendedConnectType, -}; -use crate::frames::HFrame; -use crate::push_controller::PushController; -use crate::qpack_decoder_receiver::DecoderRecvStream; -use crate::qpack_encoder_receiver::EncoderRecvStream; -use crate::recv_message::{RecvMessage, RecvMessageInfo}; -use crate::request_target::{AsRequestTarget, RequestTarget}; -use crate::send_message::SendMessage; -use crate::settings::{HSettingType, HSettings, HttpZeroRttChecker}; -use crate::stream_type_reader::NewStreamHeadReader; -use crate::{ - client_events::Http3ClientEvents, CloseType, Http3Parameters, Http3StreamType, - HttpRecvStreamEvents, NewStreamType, Priority, PriorityHandler, ReceiveOutput, RecvStream, - RecvStreamEvents, SendStream, SendStreamEvents, +use std::{ + cell::RefCell, + collections::{BTreeSet, HashMap}, + fmt::Debug, + mem, + rc::Rc, }; + use neqo_common::{qdebug, qerror, qinfo, qtrace, qwarn, Decoder, Header, MessageType, Role}; -use neqo_qpack::decoder::QPackDecoder; -use neqo_qpack::encoder::QPackEncoder; +use neqo_qpack::{decoder::QPackDecoder, encoder::QPackEncoder}; use neqo_transport::{ streams::SendOrder, AppError, Connection, ConnectionError, DatagramTracking, State, StreamId, StreamType, ZeroRttState, }; -use std::cell::RefCell; -use std::collections::{BTreeSet, HashMap}; -use std::fmt::Debug; -use std::mem; -use std::rc::Rc; -use crate::{Error, Res}; +use crate::{ + client_events::Http3ClientEvents, + control_stream_local::ControlStreamLocal, + control_stream_remote::ControlStreamRemote, + features::extended_connect::{ + webtransport_session::WebTransportSession, + webtransport_streams::{WebTransportRecvStream, WebTransportSendStream}, + ExtendedConnectEvents, ExtendedConnectFeature, ExtendedConnectType, + }, + frames::HFrame, + push_controller::PushController, + qpack_decoder_receiver::DecoderRecvStream, + qpack_encoder_receiver::EncoderRecvStream, + recv_message::{RecvMessage, RecvMessageInfo}, + request_target::{AsRequestTarget, RequestTarget}, + send_message::SendMessage, + settings::{HSettingType, HSettings, HttpZeroRttChecker}, + stream_type_reader::NewStreamHeadReader, + CloseType, Error, Http3Parameters, Http3StreamType, HttpRecvStreamEvents, NewStreamType, + Priority, PriorityHandler, ReceiveOutput, RecvStream, RecvStreamEvents, Res, SendStream, + SendStreamEvents, +}; pub(crate) struct RequestDescription<'b, 't, T> where @@ -79,8 +81,8 @@ enum Http3RemoteSettingsState { /// - `ZeroRtt`: 0-RTT has been enabled and is active /// - Connected /// - GoingAway(StreamId): The connection has received a `GOAWAY` frame -/// - Closing(ConnectionError): The connection is closed. The closing has been initiated by this -/// end of the connection, e.g., the `CONNECTION_CLOSE` frame has been sent. In this state, the +/// - Closing(ConnectionError): The connection is closed. The closing has been initiated by this end +/// of the connection, e.g., the `CONNECTION_CLOSE` frame has been sent. In this state, the /// connection waits a certain amount of time to retransmit the `CONNECTION_CLOSE` frame if /// needed. /// - Closed(ConnectionError): This is the final close state: closing has been initialized by the @@ -384,7 +386,8 @@ impl Http3Connection { Ok(()) } - /// Inform a `HttpConnection` that a stream has data to send and that `send` should be called for the stream. + /// Inform a `HttpConnection` that a stream has data to send and that `send` should be called + /// for the stream. pub fn stream_has_pending_data(&mut self, stream_id: StreamId) { self.streams_with_pending_data.insert(stream_id); } @@ -502,8 +505,8 @@ impl Http3Connection { /// stream and unidi stream that are still do not have a type. /// The function cannot handle: /// 1) a `Push(_)`, `Htttp` or `WebTransportStream(_)` stream - /// 2) frames `MaxPushId`, `PriorityUpdateRequest`, `PriorityUpdateRequestPush` or `Goaway` - /// must be handled by `Http3Client`/`Server`. + /// 2) frames `MaxPushId`, `PriorityUpdateRequest`, `PriorityUpdateRequestPush` or `Goaway` must + /// be handled by `Http3Client`/`Server`. /// The function returns `ReceiveOutput`. pub fn handle_stream_readable( &mut self, @@ -579,8 +582,8 @@ impl Http3Connection { Ok(()) } - /// This is called when `neqo_transport::Connection` state has been change to take proper actions in - /// the HTTP3 layer. + /// This is called when `neqo_transport::Connection` state has been change to take proper + /// actions in the HTTP3 layer. pub fn handle_state_change(&mut self, conn: &mut Connection, state: &State) -> Res { qdebug!([self], "Handle state change {:?}", state); match state { @@ -626,7 +629,8 @@ impl Http3Connection { } } - /// This is called when 0RTT has been reset to clear `send_streams`, `recv_streams` and settings. + /// This is called when 0RTT has been reset to clear `send_streams`, `recv_streams` and + /// settings. pub fn handle_zero_rtt_rejected(&mut self) -> Res<()> { if self.state == Http3State::ZeroRtt { self.state = Http3State::Initializing; @@ -774,16 +778,16 @@ impl Http3Connection { /// This function will not handle the output of the function completely, but only /// handle the indication that a stream is closed. There are 2 cases: /// - an error occurred or - /// - the stream is done, i.e. the second value in `output` tuple is true if - /// the stream is done and can be removed from the `recv_streams` + /// - the stream is done, i.e. the second value in `output` tuple is true if the stream is done + /// and can be removed from the `recv_streams` /// How it is handling `output`: /// - if the stream is done, it removes the stream from `recv_streams` /// - if the stream is not done and there is no error, return `output` and the caller will /// handle it. /// - in case of an error: - /// - if it is only a stream error and the stream is not critical, send `STOP_SENDING` - /// frame, remove the stream from `recv_streams` and inform the listener that the stream - /// has been reset. + /// - if it is only a stream error and the stream is not critical, send `STOP_SENDING` frame, + /// remove the stream from `recv_streams` and inform the listener that the stream has been + /// reset. /// - otherwise this is a connection error. In this case, propagate the error to the caller /// that will handle it properly. fn handle_stream_manipulation_output( @@ -861,7 +865,8 @@ impl Http3Connection { } fn create_bidi_transport_stream(&self, conn: &mut Connection) -> Res { - // Requests cannot be created when a connection is in states: Initializing, GoingAway, Closing and Closed. + // Requests cannot be created when a connection is in states: Initializing, GoingAway, + // Closing and Closed. match self.state() { Http3State::GoingAway(..) | Http3State::Closing(..) | Http3State::Closed(..) => { return Err(Error::AlreadyClosed) @@ -927,8 +932,9 @@ impl Http3Connection { )), ); - // Call immediately send so that at least headers get sent. This will make Firefox faster, since - // it can send request body immediately in most cases and does not need to do a complete process loop. + // Call immediately send so that at least headers get sent. This will make Firefox faster, + // since it can send request body immediately in most cases and does not need to do + // a complete process loop. self.send_streams .get_mut(&stream_id) .ok_or(Error::InvalidStreamId)? @@ -936,11 +942,13 @@ impl Http3Connection { Ok(()) } - /// Stream data are read directly into a buffer supplied as a parameter of this function to avoid copying - /// data. + /// Stream data are read directly into a buffer supplied as a parameter of this function to + /// avoid copying data. + /// /// # Errors - /// It returns an error if a stream does not exist or an error happens while reading a stream, e.g. - /// early close, protocol error, etc. + /// + /// It returns an error if a stream does not exist or an error happens while reading a stream, + /// e.g. early close, protocol error, etc. pub fn read_data( &mut self, conn: &mut Connection, @@ -1004,7 +1012,9 @@ impl Http3Connection { } /// Set the stream `SendOrder`. + /// /// # Errors + /// /// Returns `InvalidStreamId` if the stream id doesn't exist pub fn stream_set_sendorder( conn: &mut Connection, @@ -1018,7 +1028,9 @@ impl Http3Connection { /// Set the stream Fairness. Fair streams will share bandwidth with other /// streams of the same sendOrder group (or the unordered group). Unfair streams /// will give bandwidth preferentially to the lowest streamId with data to send. + /// /// # Errors + /// /// Returns `InvalidStreamId` if the stream id doesn't exist pub fn stream_set_fairness( conn: &mut Connection, @@ -1088,8 +1100,8 @@ impl Http3Connection { .send_streams .get_mut(&stream_id) .ok_or(Error::InvalidStreamId)?; - // The following function may return InvalidStreamId from the transport layer if the stream has been closed - // already. It is ok to ignore it here. + // The following function may return InvalidStreamId from the transport layer if the stream + // has been closed already. It is ok to ignore it here. mem::drop(send_stream.close(conn)); if send_stream.done() { self.remove_send_stream(stream_id, conn); @@ -1184,7 +1196,8 @@ impl Http3Connection { .is_ok() { mem::drop(self.stream_close_send(conn, stream_id)); - // TODO issue 1294: add a timer to clean up the recv_stream if the peer does not do that in a short time. + // TODO issue 1294: add a timer to clean up the recv_stream if the peer does not + // do that in a short time. self.streams_with_pending_data.insert(stream_id); } else { self.cancel_fetch(stream_id, Error::HttpRequestRejected.code(), conn)?; @@ -1571,8 +1584,8 @@ impl Http3Connection { for id in recv { qtrace!("Remove the extended connect sub receiver stream {}", id); - // Use CloseType::ResetRemote so that an event will be sent. CloseType::LocalError would have - // the same effect. + // Use CloseType::ResetRemote so that an event will be sent. CloseType::LocalError would + // have the same effect. if let Some(mut s) = self.recv_streams.remove(&id) { mem::drop(s.reset(CloseType::ResetRemote(Error::HttpRequestCancelled.code()))); } diff --git a/neqo-http3/src/connection_client.rs b/neqo-http3/src/connection_client.rs index 0be8acaa04..5cc0541c0c 100644 --- a/neqo-http3/src/connection_client.rs +++ b/neqo-http3/src/connection_client.rs @@ -4,16 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{ - client_events::{Http3ClientEvent, Http3ClientEvents}, - connection::{Http3Connection, Http3State, RequestDescription}, - frames::HFrame, - push_controller::{PushController, RecvPushEvents}, - recv_message::{RecvMessage, RecvMessageInfo}, - request_target::AsRequestTarget, - settings::HSettings, - Http3Parameters, Http3StreamType, NewStreamType, Priority, PriorityHandler, ReceiveOutput, +use std::{ + cell::RefCell, + convert::TryFrom, + fmt::{Debug, Display}, + mem, + net::SocketAddr, + rc::Rc, + time::Instant, }; + use neqo_common::{ event::Provider as EventProvider, hex, hex_with_len, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Decoder, Encoder, Header, MessageType, Role, @@ -25,20 +25,21 @@ use neqo_transport::{ DatagramTracking, Output, RecvStreamStats, SendStreamStats, Stats as TransportStats, StreamId, StreamType, Version, ZeroRttState, }; -use std::{ - cell::RefCell, - convert::TryFrom, - fmt::{Debug, Display}, - mem, - net::SocketAddr, - rc::Rc, - time::Instant, -}; -use crate::{Error, Res}; +use crate::{ + client_events::{Http3ClientEvent, Http3ClientEvents}, + connection::{Http3Connection, Http3State, RequestDescription}, + frames::HFrame, + push_controller::{PushController, RecvPushEvents}, + recv_message::{RecvMessage, RecvMessageInfo}, + request_target::AsRequestTarget, + settings::HSettings, + Error, Http3Parameters, Http3StreamType, NewStreamType, Priority, PriorityHandler, + ReceiveOutput, Res, +}; -// This is used for filtering send_streams and recv_Streams with a stream_ids greater than or equal a given id. -// Only the same type (bidirectional or unidirectionsl) streams are filtered. +// This is used for filtering send_streams and recv_Streams with a stream_ids greater than or equal +// a given id. Only the same type (bidirectional or unidirectionsl) streams are filtered. fn id_gte(base: StreamId) -> impl FnMut((&StreamId, &U)) -> Option + 'static where U: ?Sized, @@ -161,7 +162,7 @@ fn alpn_from_quic_version(version: Version) -> &'static str { /// } /// } /// } -///``` +/// ``` /// /// ### Creating a `WebTransport` session /// @@ -198,8 +199,7 @@ fn alpn_from_quic_version(version: Version) -> &'static str { /// } /// } /// } -/// -///``` +/// ``` /// /// ### `WebTransport`: create a stream, send and receive data on the stream /// @@ -287,7 +287,6 @@ fn alpn_from_quic_version(version: Version) -> &'static str { /// } /// } /// ``` -/// pub struct Http3Client { conn: Connection, base_handler: Http3Connection, @@ -303,8 +302,9 @@ impl Display for Http3Client { impl Http3Client { /// # Errors - /// Making a `neqo-transport::connection` may produce an error. This can only be a crypto error if - /// the crypto context can't be created or configured. + /// + /// Making a `neqo-transport::connection` may produce an error. This can only be a crypto error + /// if the crypto context can't be created or configured. pub fn new( server_name: impl Into, cid_manager: Rc>, @@ -391,6 +391,7 @@ impl Http3Client { /// Enable encrypted client hello (ECH). /// /// # Errors + /// /// Fails when the configuration provided is bad. pub fn enable_ech(&mut self, ech_config_list: impl AsRef<[u8]>) -> Res<()> { self.conn.client_enable_ech(ech_config_list)?; @@ -399,7 +400,9 @@ impl Http3Client { /// Get the connection id, which is useful for disambiguating connections to /// the same origin. + /// /// # Panics + /// /// Never, because clients always have this field. #[must_use] pub fn connection_id(&self) -> &ConnectionId { @@ -433,14 +436,18 @@ impl Http3Client { .and_then(|t| self.encode_resumption_token(&t)) } - /// This may be call if an application has a resumption token. This must be called before connection starts. + /// This may be call if an application has a resumption token. This must be called before + /// connection starts. /// /// The resumption token also contains encoded HTTP/3 settings. The settings will be decoded /// and used until the setting are received from the server. /// /// # Errors + /// /// An error is return if token cannot be decoded or a connection is is a wrong state. + /// /// # Panics + /// /// On closing if the base handler can't handle it (debug only). pub fn enable_resumption(&mut self, now: Instant, token: impl AsRef<[u8]>) -> Res<()> { if self.base_handler.state != Http3State::Initializing { @@ -499,7 +506,9 @@ impl Http3Client { } /// Attempt to force a key update. + /// /// # Errors + /// /// If the connection isn't confirmed, or there is an outstanding key update, this /// returns `Err(Error::TransportError(neqo_transport::Error::KeyUpdateBlocked))`. pub fn initiate_key_update(&mut self) -> Res<()> { @@ -512,9 +521,13 @@ impl Http3Client { /// The function fetches a resource using `method`, `target` and `headers`. A response body /// may be added by calling `send_data`. `stream_close_send` must be sent to finish the request /// even if request data are not sent. + /// /// # Errors + /// /// If a new stream cannot be created an error will be return. + /// /// # Panics + /// /// `SendMessage` implements `http_stream` so it will not panic. pub fn fetch<'x, 't: 'x, T>( &mut self, @@ -550,7 +563,9 @@ impl Http3Client { /// Send an [`PRIORITY_UPDATE`-frame][1] on next `Http3Client::process_output()` call. /// Returns if the priority got changed. + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist /// /// [1]: https://datatracker.ietf.org/doc/html/draft-kazuho-httpbis-priority-04#section-5.2 @@ -560,7 +575,9 @@ impl Http3Client { /// An application may cancel a stream(request). /// Both sides, the receiviing and sending side, sending and receiving side, will be closed. + /// /// # Errors + /// /// An error will be return if a stream does not exist. pub fn cancel_fetch(&mut self, stream_id: StreamId, error: AppError) -> Res<()> { qinfo!([self], "reset_stream {} error={}.", stream_id, error); @@ -569,7 +586,9 @@ impl Http3Client { } /// This is call when application is done sending a request. + /// /// # Errors + /// /// An error will be return if stream does not exist. pub fn stream_close_send(&mut self, stream_id: StreamId) -> Res<()> { qinfo!([self], "Close sending side stream={}.", stream_id); @@ -578,6 +597,7 @@ impl Http3Client { } /// # Errors + /// /// An error will be return if a stream does not exist. pub fn stream_reset_send(&mut self, stream_id: StreamId, error: AppError) -> Res<()> { qinfo!([self], "stream_reset_send {} error={}.", stream_id, error); @@ -586,6 +606,7 @@ impl Http3Client { } /// # Errors + /// /// An error will be return if a stream does not exist. pub fn stream_stop_sending(&mut self, stream_id: StreamId, error: AppError) -> Res<()> { qinfo!([self], "stream_stop_sending {} error={}.", stream_id, error); @@ -598,11 +619,13 @@ impl Http3Client { /// headers are supplied through the `fetch` function. /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist, /// `AlreadyClosed` if the stream has already been closed. - /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output` - /// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.) - /// `InvalidInput` if an empty buffer has been supplied. + /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if + /// `process_output` has not been called when needed, and HTTP3 layer has not picked up the + /// info that the stream has been closed.) `InvalidInput` if an empty buffer has been + /// supplied. pub fn send_data(&mut self, stream_id: StreamId, buf: &[u8]) -> Res { qinfo!( [self], @@ -617,11 +640,13 @@ impl Http3Client { .send_data(&mut self.conn, buf) } - /// Response data are read directly into a buffer supplied as a parameter of this function to avoid copying - /// data. + /// Response data are read directly into a buffer supplied as a parameter of this function to + /// avoid copying data. + /// /// # Errors - /// It returns an error if a stream does not exist or an error happen while reading a stream, e.g. - /// early close, protocol error, etc. + /// + /// It returns an error if a stream does not exist or an error happen while reading a stream, + /// e.g. early close, protocol error, etc. pub fn read_data( &mut self, now: Instant, @@ -641,7 +666,9 @@ impl Http3Client { // API: Push streams /// Cancel a push + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist. pub fn cancel_push(&mut self, push_id: u64) -> Res<()> { self.push_handler @@ -651,9 +678,11 @@ impl Http3Client { /// Push response data are read directly into a buffer supplied as a parameter of this function /// to avoid copying data. + /// /// # Errors - /// It returns an error if a stream does not exist(`InvalidStreamId`) or an error has happened while - /// reading a stream, e.g. early close, protocol error, etc. + /// + /// It returns an error if a stream does not exist(`InvalidStreamId`) or an error has happened + /// while reading a stream, e.g. early close, protocol error, etc. pub fn push_read_data( &mut self, now: Instant, @@ -670,8 +699,9 @@ impl Http3Client { } // API WebTransport - + // /// # Errors + /// /// If `WebTransport` cannot be created, e.g. the `WebTransport` support is /// not negotiated or the HTTP/3 connection is closed. pub fn webtransport_create_session<'x, 't: 'x, T>( @@ -699,11 +729,14 @@ impl Http3Client { } /// Close `WebTransport` cleanly + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist, - /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output` - /// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.) - /// `InvalidInput` if an empty buffer has been supplied. + /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if + /// `process_output` has not been called when needed, and HTTP3 layer has not picked up the + /// info that the stream has been closed.) `InvalidInput` if an empty buffer has been + /// supplied. pub fn webtransport_close_session( &mut self, session_id: StreamId, @@ -715,6 +748,7 @@ impl Http3Client { } /// # Errors + /// /// This may return an error if the particular session does not exist /// or the connection is not in the active state. pub fn webtransport_create_stream( @@ -732,7 +766,9 @@ impl Http3Client { } /// Send `WebTransport` datagram. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. /// The function returns `TooMuchData` if the supply buffer is bigger than /// the allowed remote datagram size. @@ -749,10 +785,14 @@ impl Http3Client { /// Returns the current max size of a datagram that can fit into a packet. /// The value will change over time depending on the encoded size of the - /// packet number, ack frames, etc. + /// packet number, ack frames, etc. + /// /// # Errors + /// /// The function returns `NotAvailable` if datagrams are not enabled. + /// /// # Panics + /// /// This cannot panic. The max varint length is 8. pub fn webtransport_max_datagram_size(&self, session_id: StreamId) -> Res { Ok(self.conn.max_datagram_size()? @@ -760,9 +800,13 @@ impl Http3Client { } /// Sets the `SendOrder` for a given stream + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. + /// /// # Panics + /// /// This cannot panic. pub fn webtransport_set_sendorder( &mut self, @@ -773,16 +817,22 @@ impl Http3Client { } /// Sets the `Fairness` for a given stream + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. + /// /// # Panics + /// /// This cannot panic. pub fn webtransport_set_fairness(&mut self, stream_id: StreamId, fairness: bool) -> Res<()> { Http3Connection::stream_set_fairness(&mut self.conn, stream_id, fairness) } /// Returns the current `SendStreamStats` of a `WebTransportSendStream`. + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist. pub fn webtransport_send_stream_stats(&mut self, stream_id: StreamId) -> Res { self.base_handler @@ -793,7 +843,9 @@ impl Http3Client { } /// Returns the current `RecvStreamStats` of a `WebTransportRecvStream`. + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist. pub fn webtransport_recv_stream_stats(&mut self, stream_id: StreamId) -> Res { self.base_handler @@ -883,7 +935,8 @@ impl Http3Client { /// /// `process_output` can return: /// - a [`Output::Datagram(Datagram)`][1]: data that should be sent as a UDP payload, - /// - a [`Output::Callback(Duration)`][1]: the duration of a timer. `process_output` should be called at least after the time expires, + /// - a [`Output::Callback(Duration)`][1]: the duration of a timer. `process_output` should be + /// called at least after the time expires, /// - [`Output::None`][1]: this is returned when `Nttp3Client` is done and can be destroyed. /// /// The application should call this function repeatedly until a timer value or None is @@ -938,14 +991,14 @@ impl Http3Client { } } - /// This function checks [`ConnectionEvent`][2]s emitted by the QUIC layer, e.g. connection change - /// state events, new incoming stream data is available, a stream is was reset, etc. The HTTP/3 - /// layer needs to handle these events. Most of the events are handled by + /// This function checks [`ConnectionEvent`][2]s emitted by the QUIC layer, e.g. connection + /// change state events, new incoming stream data is available, a stream is was reset, etc. + /// The HTTP/3 layer needs to handle these events. Most of the events are handled by /// [`Http3Connection`][1] by calling appropriate functions, e.g. `handle_state_change`, /// `handle_stream_reset`, etc. [`Http3Connection`][1] handle functionalities that are common /// for the client and server side. Some of the functionalities are specific to the client and - /// they are handled by `Http3Client`. For example, [`ConnectionEvent::RecvStreamReadable`][3] event - /// is handled by `Http3Client::handle_stream_readable`. The function calls + /// they are handled by `Http3Client`. For example, [`ConnectionEvent::RecvStreamReadable`][3] + /// event is handled by `Http3Client::handle_stream_readable`. The function calls /// `Http3Connection::handle_stream_readable` and then hands the return value as appropriate /// for the client-side. /// @@ -958,11 +1011,11 @@ impl Http3Client { qdebug!([self], "check_connection_events - event {:?}.", e); match e { ConnectionEvent::NewStream { stream_id } => { - // During this event we only add a new stream to the Http3Connection stream list, - // with NewStreamHeadReader stream handler. + // During this event we only add a new stream to the Http3Connection stream + // list, with NewStreamHeadReader stream handler. // This function will not read from the stream and try to decode the stream. - // RecvStreamReadable will be emitted after this event and reading, i.e. decoding - // of a stream will happen during that event. + // RecvStreamReadable will be emitted after this event and reading, i.e. + // decoding of a stream will happen during that event. self.base_handler.add_new_stream(stream_id); } ConnectionEvent::SendStreamWritable { stream_id } => { @@ -1036,12 +1089,12 @@ impl Http3Client { /// - `ReceiveOutput::NewStream(NewStreamType::WebTransportStream(_))` - because /// `Http3ClientEvents`is needed and events handler is specific to the client. /// - `ReceiveOutput::ControlFrames(control_frames)` - some control frame handling differs - /// between the client and the server: + /// between the client and the server: /// - `HFrame::CancelPush` - only the client-side may receive it, /// - `HFrame::MaxPushId { .. }`, `HFrame::PriorityUpdateRequest { .. } ` and - /// `HFrame::PriorityUpdatePush` can only be receive on the server side, + /// `HFrame::PriorityUpdatePush` can only be receive on the server side, /// - `HFrame::Goaway { stream_id }` needs specific handling by the client by the protocol - /// specification. + /// specification. /// /// [1]: https://github.com/mozilla/neqo/blob/main/neqo-http3/src/connection.rs fn handle_stream_readable(&mut self, stream_id: StreamId) -> Res<()> { @@ -1194,7 +1247,9 @@ impl Http3Client { } /// Increases `max_stream_data` for a `stream_id`. + /// /// # Errors + /// /// Returns `InvalidStreamId` if a stream does not exist or the receiving /// side is closed. pub fn set_stream_max_data(&mut self, stream_id: StreamId, max_data: u64) -> Res<()> { @@ -1241,16 +1296,8 @@ impl EventProvider for Http3Client { #[cfg(test)] mod tests { - use super::{ - AuthenticationStatus, Connection, Error, HSettings, Header, Http3Client, Http3ClientEvent, - Http3Parameters, Http3State, Rc, RefCell, - }; - use crate::{ - frames::{HFrame, H3_FRAME_TYPE_SETTINGS, H3_RESERVED_FRAME_TYPES}, - qpack_encoder_receiver::EncoderRecvStream, - settings::{HSetting, HSettingType, H3_RESERVED_SETTINGS}, - Http3Server, Priority, RecvStream, - }; + use std::{convert::TryFrom, mem, time::Duration}; + use neqo_common::{event::Provider, qtrace, Datagram, Decoder, Encoder}; use neqo_crypto::{AllowZeroRtt, AntiReplay, ResumptionToken}; use neqo_qpack::{encoder::QPackEncoder, QpackSettings}; @@ -1258,12 +1305,22 @@ mod tests { ConnectionError, ConnectionEvent, ConnectionParameters, Output, State, StreamId, StreamType, Version, RECV_BUFFER_SIZE, SEND_BUFFER_SIZE, }; - use std::{convert::TryFrom, mem, time::Duration}; use test_fixture::{ addr, anti_replay, default_server_h3, fixture_init, new_server, now, CountingConnectionIdGenerator, DEFAULT_ALPN_H3, DEFAULT_KEYS, DEFAULT_SERVER_NAME, }; + use super::{ + AuthenticationStatus, Connection, Error, HSettings, Header, Http3Client, Http3ClientEvent, + Http3Parameters, Http3State, Rc, RefCell, + }; + use crate::{ + frames::{HFrame, H3_FRAME_TYPE_SETTINGS, H3_RESERVED_FRAME_TYPES}, + qpack_encoder_receiver::EncoderRecvStream, + settings::{HSetting, HSettingType, H3_RESERVED_SETTINGS}, + Http3Server, Priority, RecvStream, + }; + fn assert_closed(client: &Http3Client, expected: &Error) { match client.state() { Http3State::Closing(err) | Http3State::Closed(err) => { @@ -1710,8 +1767,8 @@ mod tests { 0x43, 0xd3, 0xc1, ]; - // For fetch request fetch("GET", "https", "something.com", "/", &[(String::from("myheaders", "myvalue"))]) - // the following request header frame will be sent: + // For fetch request fetch("GET", "https", "something.com", "/", &[(String::from("myheaders", + // "myvalue"))]) the following request header frame will be sent: const EXPECTED_REQUEST_HEADER_FRAME_VERSION2: &[u8] = &[ 0x01, 0x11, 0x02, 0x80, 0xd1, 0xd7, 0x50, 0x89, 0x41, 0xe9, 0x2a, 0x67, 0x35, 0x53, 0x2e, 0x43, 0xd3, 0xc1, 0x10, @@ -1719,8 +1776,8 @@ mod tests { const HTTP_HEADER_FRAME_0: &[u8] = &[0x01, 0x06, 0x00, 0x00, 0xd9, 0x54, 0x01, 0x30]; - // The response header from HTTP_HEADER_FRAME (0x01, 0x06, 0x00, 0x00, 0xd9, 0x54, 0x01, 0x30) are - // decoded into: + // The response header from HTTP_HEADER_FRAME (0x01, 0x06, 0x00, 0x00, 0xd9, 0x54, 0x01, 0x30) + // are decoded into: fn check_response_header_0(header: &[Header]) { let expected_response_header_0 = &[ Header::new(":status", "200"), @@ -2487,7 +2544,8 @@ mod tests { #[test] fn fetch_basic() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(true); // send response - 200 Content-Length: 7 @@ -2627,7 +2685,8 @@ mod tests { // Send a request with the request body. #[test] fn fetch_with_data() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Get DataWritable for the request stream so that we can write the request body. @@ -2669,9 +2728,11 @@ mod tests { read_response(&mut client, &mut server.conn, request_stream_id); } - // send a request with request body containing request_body. We expect to receive expected_data_frame_header. + // send a request with request body containing request_body. We expect to receive + // expected_data_frame_header. fn fetch_with_data_length_xbytes(request_body: &[u8], expected_data_frame_header: &[u8]) { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Get DataWritable for the request stream so that we can write the request body. @@ -2757,7 +2818,8 @@ mod tests { expected_second_data_frame_header: &[u8], expected_second_data_frame: &[u8], ) { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Get DataWritable for the request stream so that we can write the request body. @@ -2783,7 +2845,7 @@ mod tests { out = client.process(out.as_dgram_ref(), now()); } - // check received frames and send a response. + // Check received frames and send a response. while let Some(e) = server.conn.next_event() { if let ConnectionEvent::RecvStreamReadable { stream_id } = e { if stream_id == request_stream_id { @@ -2872,7 +2934,8 @@ mod tests { } // Send 2 frames. For the second one we can only send 16383 bytes. - // After the first frame there is exactly 16383+4 bytes left in the send buffer, but we can only send 16383 bytes. + // After the first frame there is exactly 16383+4 bytes left in the send buffer, but we can only + // send 16383 bytes. #[test] fn fetch_two_data_frame_second_16383bytes_place_for_16387() { let (buf, hdr) = alloc_buffer(SEND_BUFFER_SIZE - 16410); @@ -2880,7 +2943,8 @@ mod tests { } // Send 2 frames. For the second one we can only send 16383 bytes. - // After the first frame there is exactly 16383+5 bytes left in the send buffer, but we can only send 16383 bytes. + // After the first frame there is exactly 16383+5 bytes left in the send buffer, but we can only + // send 16383 bytes. #[test] fn fetch_two_data_frame_second_16383bytes_place_for_16388() { let (buf, hdr) = alloc_buffer(SEND_BUFFER_SIZE - 16411); @@ -2888,7 +2952,8 @@ mod tests { } // Send 2 frames. For the second one we can send 16384 bytes. - // After the first frame there is exactly 16384+5 bytes left in the send buffer, but we can send 16384 bytes. + // After the first frame there is exactly 16384+5 bytes left in the send buffer, but we can send + // 16384 bytes. #[test] fn fetch_two_data_frame_second_16384bytes_place_for_16389() { let (buf, hdr) = alloc_buffer(SEND_BUFFER_SIZE - 16412); @@ -2898,7 +2963,8 @@ mod tests { // Test receiving STOP_SENDING with the HttpNoError error code. #[test] fn test_stop_sending_early_response() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Stop sending with early_response. @@ -2975,7 +3041,8 @@ mod tests { // Server sends stop sending and reset. #[test] fn test_stop_sending_other_error_with_reset() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Stop sending with RequestRejected. @@ -3038,7 +3105,8 @@ mod tests { // Server sends stop sending with RequestRejected, but it does not send reset. #[test] fn test_stop_sending_other_error_wo_reset() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Stop sending with RequestRejected. @@ -3085,7 +3153,8 @@ mod tests { // in client.events. The events will be removed. #[test] fn test_stop_sending_and_reset_other_error_with_events() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // send response - 200 Content-Length: 3 @@ -3158,7 +3227,8 @@ mod tests { // The events will be removed. #[test] fn test_stop_sending_other_error_with_events() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // send response - 200 Content-Length: 3 @@ -3221,7 +3291,8 @@ mod tests { // Server sends a reset. We will close sending side as well. #[test] fn test_reset_wo_stop_sending() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(false); // Send a reset. @@ -3958,7 +4029,8 @@ mod tests { header_block: encoded_headers.to_vec(), }; - // Send the encoder instructions, but delay them so that the stream is blocked on decoding headers. + // Send the encoder instructions, but delay them so that the stream is blocked on decoding + // headers. let encoder_inst_pkt = server.conn.process(None, now()); // Send response @@ -4026,7 +4098,8 @@ mod tests { header_block: encoded_headers.to_vec(), }; - // Send the encoder instructions, but delay them so that the stream is blocked on decoding headers. + // Send the encoder instructions, but delay them so that the stream is blocked on decoding + // headers. let encoder_inst_pkt = server.conn.process(None, now()); let mut d = Encoder::default(); @@ -4790,7 +4863,8 @@ mod tests { #[test] fn no_data_ready_events_after_fin() { - // Connect exchange headers and send a request. Also check if the correct header frame has been sent. + // Connect exchange headers and send a request. Also check if the correct header frame has + // been sent. let (mut client, mut server, request_stream_id) = connect_and_send_request(true); // send response - 200 Content-Length: 7 @@ -5003,7 +5077,8 @@ mod tests { assert_eq!(client.state(), Http3State::Connected); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); } @@ -5094,7 +5169,8 @@ mod tests { assert_eq!(client.state(), Http3State::Connected); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); assert_eq!(client.cancel_push(1), Err(Error::InvalidStreamId)); } @@ -5444,7 +5520,7 @@ mod tests { assert!(!client.events().any(push_event)); } - // Test that max_push_id is enforced when a push promise frame is received. + // Test that max_push_id is enforced when a push promise frame is received. #[test] fn exceed_max_push_id_promise() { // Connect and send a request @@ -5608,7 +5684,8 @@ mod tests { ))); } - // Test CANCEL_PUSH frame: after cancel push any new PUSH_PROMISE or push stream will be ignored. + // Test CANCEL_PUSH frame: after cancel push any new PUSH_PROMISE or push stream will be + // ignored. #[test] fn cancel_push_ignore_promise() { // Connect and send a request @@ -5624,7 +5701,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); // Check that the push has been canceled by the client. @@ -5653,7 +5731,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); // Check that the push has been canceled by the client. @@ -5681,7 +5760,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); // Check that the push has been canceled by the client. @@ -5694,7 +5774,8 @@ mod tests { assert_eq!(client.state(), Http3State::Connected); } - // Test a push stream reset after a new PUSH_PROMISE or/and push stream. The events will be ignored. + // Test a push stream reset after a new PUSH_PROMISE or/and push stream. The events will be + // ignored. #[test] fn cancel_push_stream_after_push_promise_and_push_stream() { // Connect and send a request @@ -5715,7 +5796,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); assert_eq!(client.state(), Http3State::Connected); @@ -5743,7 +5825,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); assert_eq!(client.state(), Http3State::Connected); @@ -5762,13 +5845,15 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); assert_eq!(client.state(), Http3State::Connected); } - // Test that push_promise and push data events will be removed after application calls cancel_push. + // Test that push_promise and push data events will be removed after application calls + // cancel_push. #[test] fn app_cancel_push_after_push_promise_and_push_stream() { // Connect and send a request @@ -5785,7 +5870,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); // Check that the push has been canceled by the client. @@ -5817,7 +5903,8 @@ mod tests { // Assert that we do not have any push event. assert!(!check_push_events(&mut client)); - // Check that the push has been closed, e.g. calling cancel_push should return InvalidStreamId. + // Check that the push has been closed, e.g. calling cancel_push should return + // InvalidStreamId. assert_eq!(client.cancel_push(0), Err(Error::InvalidStreamId)); // Check that the push has been canceled by the client. @@ -5899,7 +5986,8 @@ mod tests { header_block: encoded_headers.to_vec(), }; - // Send the encoder instructions, but delay them so that the stream is blocked on decoding headers. + // Send the encoder instructions, but delay them so that the stream is blocked on decoding + // headers. let encoder_inst_pkt = server.conn.process(None, now()).dgram(); assert!(encoder_inst_pkt.is_some()); @@ -7106,11 +7194,12 @@ mod tests { let out = server.conn.process(out.as_dgram_ref(), now()); // the server increased the max_data during the second read if that isn't the case - // in the future and therefore this asserts fails, the request data on stream 0 could be read - // to cause a max_update frame + // in the future and therefore this asserts fails, the request data on stream 0 could be + // read to cause a max_update frame assert_eq!(md_before + 1, server.conn.stats().frame_tx.max_data); - // make sure that the server didn't receive a priority_update on client control stream (stream_id 2) yet + // make sure that the server didn't receive a priority_update on client control stream + // (stream_id 2) yet let mut buf = [0; 32]; assert_eq!( server.conn.stream_recv(StreamId::new(2), &mut buf), @@ -7149,7 +7238,8 @@ mod tests { header_block: encoded_headers.to_vec(), }; - // Send the encoder instructions, but delay them so that the stream is blocked on decoding headers. + // Send the encoder instructions, but delay them so that the stream is blocked on decoding + // headers. let encoder_inst_pkt = server.conn.process(None, now()); // Send response diff --git a/neqo-http3/src/connection_server.rs b/neqo-http3/src/connection_server.rs index c8cab52dd0..097209a226 100644 --- a/neqo-http3/src/connection_server.rs +++ b/neqo-http3/src/connection_server.rs @@ -4,21 +4,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::connection::{Http3Connection, Http3State, WebTransportSessionAcceptAction}; -use crate::frames::HFrame; -use crate::recv_message::{RecvMessage, RecvMessageInfo}; -use crate::send_message::SendMessage; -use crate::server_connection_events::{Http3ServerConnEvent, Http3ServerConnEvents}; -use crate::{ - Error, Http3Parameters, Http3StreamType, NewStreamType, Priority, PriorityHandler, - ReceiveOutput, Res, -}; +use std::{rc::Rc, time::Instant}; + use neqo_common::{event::Provider, qdebug, qinfo, qtrace, Header, MessageType, Role}; use neqo_transport::{ AppError, Connection, ConnectionEvent, DatagramTracking, StreamId, StreamType, }; -use std::rc::Rc; -use std::time::Instant; + +use crate::{ + connection::{Http3Connection, Http3State, WebTransportSessionAcceptAction}, + frames::HFrame, + recv_message::{RecvMessage, RecvMessageInfo}, + send_message::SendMessage, + server_connection_events::{Http3ServerConnEvent, Http3ServerConnEvents}, + Error, Http3Parameters, Http3StreamType, NewStreamType, Priority, PriorityHandler, + ReceiveOutput, Res, +}; #[derive(Debug)] pub struct Http3ServerHandler { @@ -48,12 +49,15 @@ impl Http3ServerHandler { } /// Supply a response for a request. + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist, /// `AlreadyClosed` if the stream has already been closed. - /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output` - /// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.) - /// `InvalidInput` if an empty buffer has been supplied. + /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if + /// `process_output` has not been called when needed, and HTTP3 layer has not picked up the + /// info that the stream has been closed.) `InvalidInput` if an empty buffer has been + /// supplied. pub(crate) fn send_data( &mut self, stream_id: StreamId, @@ -89,7 +93,9 @@ impl Http3ServerHandler { } /// This is called when application is done sending a request. + /// /// # Errors + /// /// An error will be returned if stream does not exist. pub fn stream_close_send(&mut self, stream_id: StreamId, conn: &mut Connection) -> Res<()> { qinfo!([self], "Close sending side stream={}.", stream_id); @@ -101,7 +107,9 @@ impl Http3ServerHandler { /// An application may reset a stream(request). /// Both sides, sending and receiving side, will be closed. + /// /// # Errors + /// /// An error will be return if a stream does not exist. pub fn cancel_fetch( &mut self, @@ -154,11 +162,14 @@ impl Http3ServerHandler { } /// Close `WebTransport` cleanly + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist, - /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output` - /// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.) - /// `InvalidInput` if an empty buffer has been supplied. + /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if + /// `process_output` has not been called when needed, and HTTP3 layer has not picked up the + /// info that the stream has been closed.) `InvalidInput` if an empty buffer has been + /// supplied. pub fn webtransport_close_session( &mut self, conn: &mut Connection, @@ -354,7 +365,7 @@ impl Http3ServerHandler { } HFrame::PriorityUpdatePush { element_id, priority } => { // TODO: check if the element_id references a promised push stream or - // is greater than the maximum Push ID. + // is greater than the maximum Push ID. self.events.priority_update(StreamId::from(element_id), priority); Ok(()) } @@ -383,11 +394,13 @@ impl Http3ServerHandler { } } - /// Response data are read directly into a buffer supplied as a parameter of this function to avoid copying - /// data. + /// Response data are read directly into a buffer supplied as a parameter of this function to + /// avoid copying data. + /// /// # Errors - /// It returns an error if a stream does not exist or an error happen while reading a stream, e.g. - /// early close, protocol error, etc. + /// + /// It returns an error if a stream does not exist or an error happen while reading a stream, + /// e.g. early close, protocol error, etc. pub fn read_data( &mut self, conn: &mut Connection, diff --git a/neqo-http3/src/control_stream_local.rs b/neqo-http3/src/control_stream_local.rs index e6d63c3502..62676ee391 100644 --- a/neqo-http3/src/control_stream_local.rs +++ b/neqo-http3/src/control_stream_local.rs @@ -4,12 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::frames::HFrame; -use crate::{BufferedStream, Http3StreamType, RecvStream, Res}; +use std::{ + collections::{HashMap, VecDeque}, + convert::TryFrom, +}; + use neqo_common::{qtrace, Encoder}; use neqo_transport::{Connection, StreamId, StreamType}; -use std::collections::{HashMap, VecDeque}; -use std::convert::TryFrom; + +use crate::{frames::HFrame, BufferedStream, Http3StreamType, RecvStream, Res}; pub const HTTP3_UNI_STREAM_TYPE_CONTROL: u64 = 0x0; diff --git a/neqo-http3/src/control_stream_remote.rs b/neqo-http3/src/control_stream_remote.rs index 7b42ed2b11..aef4b4c0a4 100644 --- a/neqo-http3/src/control_stream_remote.rs +++ b/neqo-http3/src/control_stream_remote.rs @@ -4,12 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::frames::{FrameReader, HFrame, StreamReaderConnectionWrapper}; -use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; use neqo_common::qdebug; use neqo_transport::{Connection, StreamId}; -/// The remote control stream is responsible only for reading frames. The frames are handled by `Http3Connection`. +use crate::{ + frames::{FrameReader, HFrame, StreamReaderConnectionWrapper}, + CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream, +}; + +/// The remote control stream is responsible only for reading frames. The frames are handled by +/// `Http3Connection`. #[derive(Debug)] pub(crate) struct ControlStreamRemote { stream_id: StreamId, diff --git a/neqo-http3/src/features/extended_connect/mod.rs b/neqo-http3/src/features/extended_connect/mod.rs index 6be92dabba..77655833f7 100644 --- a/neqo-http3/src/features/extended_connect/mod.rs +++ b/neqo-http3/src/features/extended_connect/mod.rs @@ -9,15 +9,19 @@ pub(crate) mod webtransport_session; pub(crate) mod webtransport_streams; -use crate::client_events::Http3ClientEvents; -use crate::features::NegotiationState; -use crate::settings::{HSettingType, HSettings}; -use crate::{CloseType, Http3StreamInfo, Http3StreamType}; +use std::fmt::Debug; + use neqo_common::Header; use neqo_transport::{AppError, StreamId}; -use std::fmt::Debug; pub(crate) use webtransport_session::WebTransportSession; +use crate::{ + client_events::Http3ClientEvents, + features::NegotiationState, + settings::{HSettingType, HSettings}, + CloseType, Http3StreamInfo, Http3StreamType, +}; + #[derive(Debug, PartialEq, Eq, Clone)] pub enum SessionCloseReason { Error(AppError), diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs index 1b9511b255..1c58596dd3 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/datagrams.rs @@ -4,13 +4,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::features::extended_connect::tests::webtransport::{ - wt_default_parameters, WtTest, DATAGRAM_SIZE, -}; -use crate::{Error, Http3Parameters, WebTransportRequest}; +use std::convert::TryFrom; + use neqo_common::Encoder; use neqo_transport::Error as TransportError; -use std::convert::TryFrom; + +use crate::{ + features::extended_connect::tests::webtransport::{ + wt_default_parameters, WtTest, DATAGRAM_SIZE, + }, + Error, Http3Parameters, WebTransportRequest, +}; const DGRAM: &[u8] = &[0, 100]; diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs index fcdcff0fe1..51dc47e4c1 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/mod.rs @@ -8,7 +8,15 @@ mod datagrams; mod negotiation; mod sessions; mod streams; +use std::{cell::RefCell, rc::Rc, time::Duration}; + use neqo_common::event::Provider; +use neqo_crypto::AuthenticationStatus; +use neqo_transport::{ConnectionParameters, StreamId, StreamType}; +use test_fixture::{ + addr, anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ALPN_H3, + DEFAULT_KEYS, DEFAULT_SERVER_NAME, +}; use crate::{ features::extended_connect::SessionCloseReason, Error, Header, Http3Client, Http3ClientEvent, @@ -16,16 +24,6 @@ use crate::{ RecvStreamStats, SendStreamStats, WebTransportEvent, WebTransportRequest, WebTransportServerEvent, WebTransportSessionAcceptAction, }; -use neqo_crypto::AuthenticationStatus; -use neqo_transport::{ConnectionParameters, StreamId, StreamType}; -use std::cell::RefCell; -use std::rc::Rc; -use std::time::Duration; - -use test_fixture::{ - addr, anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ALPN_H3, - DEFAULT_KEYS, DEFAULT_SERVER_NAME, -}; const DATAGRAM_SIZE: u64 = 1200; diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs index e838646ab2..27f669861d 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/negotiation.rs @@ -4,17 +4,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::time::Duration; + +use neqo_common::{event::Provider, Encoder}; +use neqo_crypto::AuthenticationStatus; +use neqo_transport::{Connection, ConnectionError, StreamType}; +use test_fixture::{default_server_h3, now}; + use super::{connect, default_http3_client, default_http3_server, exchange_packets}; use crate::{ settings::{HSetting, HSettingType, HSettings}, Error, HFrame, Http3Client, Http3ClientEvent, Http3Parameters, Http3Server, Http3State, WebTransportEvent, }; -use neqo_common::{event::Provider, Encoder}; -use neqo_crypto::AuthenticationStatus; -use neqo_transport::{Connection, ConnectionError, StreamType}; -use std::time::Duration; -use test_fixture::{default_server_h3, now}; fn check_wt_event(client: &mut Http3Client, wt_enable_client: bool, wt_enable_server: bool) { let wt_event = client.events().find_map(|e| { diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs index 06d9318b87..5f929d0e4b 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/sessions.rs @@ -4,19 +4,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::features::extended_connect::tests::webtransport::{ - default_http3_client, default_http3_server, wt_default_parameters, WtTest, -}; -use crate::{ - features::extended_connect::SessionCloseReason, frames::WebTransportFrame, Error, Header, - Http3ClientEvent, Http3OrWebTransportStream, Http3Server, Http3ServerEvent, Http3State, - Priority, WebTransportEvent, WebTransportServerEvent, WebTransportSessionAcceptAction, -}; +use std::mem; + use neqo_common::{event::Provider, Encoder}; use neqo_transport::StreamType; -use std::mem; use test_fixture::now; +use crate::{ + features::extended_connect::{ + tests::webtransport::{ + default_http3_client, default_http3_server, wt_default_parameters, WtTest, + }, + SessionCloseReason, + }, + frames::WebTransportFrame, + Error, Header, Http3ClientEvent, Http3OrWebTransportStream, Http3Server, Http3ServerEvent, + Http3State, Priority, WebTransportEvent, WebTransportServerEvent, + WebTransportSessionAcceptAction, +}; + #[test] fn wt_session() { let mut wt = WtTest::new(); diff --git a/neqo-http3/src/features/extended_connect/tests/webtransport/streams.rs b/neqo-http3/src/features/extended_connect/tests/webtransport/streams.rs index a50c45d518..b898dbb31e 100644 --- a/neqo-http3/src/features/extended_connect/tests/webtransport/streams.rs +++ b/neqo-http3/src/features/extended_connect/tests/webtransport/streams.rs @@ -4,11 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::features::extended_connect::tests::webtransport::WtTest; -use crate::{features::extended_connect::SessionCloseReason, Error}; -use neqo_transport::StreamType; use std::mem; +use neqo_transport::StreamType; + +use crate::{ + features::extended_connect::{tests::webtransport::WtTest, SessionCloseReason}, + Error, +}; + #[test] fn wt_client_stream_uni() { const BUF_CLIENT: &[u8] = &[0; 10]; @@ -287,13 +291,17 @@ fn wt_server_stream_bidi_stop_sending() { // 1) Both sides of a bidirectional client stream are opened. // 2) A client unidirectional stream is opened. // 3) A client unidirectional stream has been closed and both sides consumed the closing info. -// 4) A client unidirectional stream has been closed, but only the server has consumed the closing info. -// 5) A client unidirectional stream has been closed, but only the client has consum the closing info. +// 4) A client unidirectional stream has been closed, but only the server has consumed the closing +// info. +// 5) A client unidirectional stream has been closed, but only the client has consum the closing +// info. // 6) Both sides of a bidirectional server stream are opened. // 7) A server unidirectional stream is opened. // 8) A server unidirectional stream has been closed and both sides consumed the closing info. -// 9) A server unidirectional stream has been closed, but only the server has consumed the closing info. -// 10) A server unidirectional stream has been closed, but only the client has consumed the closing info. +// 9) A server unidirectional stream has been closed, but only the server has consumed the closing +// info. +// 10) A server unidirectional stream has been closed, but only the client has consumed the closing +// info. // 11) Both sides of a bidirectional stream have been closed and consumed by both sides. // 12) Both sides of a bidirectional stream have been closed, but not consumed by both sides. // 13) Multiples open streams diff --git a/neqo-http3/src/features/extended_connect/webtransport_session.rs b/neqo-http3/src/features/extended_connect/webtransport_session.rs index c446fd3843..adbdf07e11 100644 --- a/neqo-http3/src/features/extended_connect/webtransport_session.rs +++ b/neqo-http3/src/features/extended_connect/webtransport_session.rs @@ -6,6 +6,12 @@ #![allow(clippy::module_name_repetitions)] +use std::{any::Any, cell::RefCell, collections::BTreeSet, mem, rc::Rc}; + +use neqo_common::{qtrace, Encoder, Header, MessageType, Role}; +use neqo_qpack::{QPackDecoder, QPackEncoder}; +use neqo_transport::{streams::SendOrder, Connection, DatagramTracking, StreamId}; + use super::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason}; use crate::{ frames::{FrameReader, StreamReaderRecvStreamWrapper, WebTransportFrame}, @@ -15,14 +21,6 @@ use crate::{ HttpRecvStreamEvents, Priority, PriorityHandler, ReceiveOutput, RecvStream, RecvStreamEvents, Res, SendStream, SendStreamEvents, Stream, }; -use neqo_common::{qtrace, Encoder, Header, MessageType, Role}; -use neqo_qpack::{QPackDecoder, QPackEncoder}; -use neqo_transport::{streams::SendOrder, Connection, DatagramTracking, StreamId}; -use std::any::Any; -use std::cell::RefCell; -use std::collections::BTreeSet; -use std::mem; -use std::rc::Rc; #[derive(Debug, PartialEq)] enum SessionState { @@ -100,6 +98,7 @@ impl WebTransportSession { } /// # Panics + /// /// This function is only called with `RecvStream` and `SendStream` that also implement /// the http specific functions and `http_stream()` will never return `None`. #[must_use] @@ -134,8 +133,11 @@ impl WebTransportSession { } /// # Errors + /// /// The function can only fail if supplied headers are not valid http headers. + /// /// # Panics + /// /// `control_stream_send` implements the http specific functions and `http_stream()` /// will never return `None`. pub fn send_request(&mut self, headers: &[Header], conn: &mut Connection) -> Res<()> { @@ -220,6 +222,7 @@ impl WebTransportSession { } /// # Panics + /// /// This cannot panic because headers are checked before this function called. pub fn maybe_check_headers(&mut self) { if SessionState::Negotiating != self.state { @@ -335,6 +338,7 @@ impl WebTransportSession { } /// # Errors + /// /// It may return an error if the frame is not correctly decoded. pub fn read_control_stream(&mut self, conn: &mut Connection) -> Res<()> { let (f, fin) = self @@ -373,8 +377,9 @@ impl WebTransportSession { } /// # Errors - /// Return an error if the stream was closed on the transport layer, but that information is not yet - /// consumed on the http/3 layer. + /// + /// Return an error if the stream was closed on the transport layer, but that information is not + /// yet consumed on the http/3 layer. pub fn close_session(&mut self, conn: &mut Connection, error: u32, message: &str) -> Res<()> { self.state = SessionState::Done; let close_frame = WebTransportFrame::CloseSession { @@ -399,6 +404,7 @@ impl WebTransportSession { } /// # Errors + /// /// Returns an error if the datagram exceeds the remote datagram size limit. pub fn send_datagram( &self, diff --git a/neqo-http3/src/features/extended_connect/webtransport_streams.rs b/neqo-http3/src/features/extended_connect/webtransport_streams.rs index ca918dce9e..84dcd20618 100644 --- a/neqo-http3/src/features/extended_connect/webtransport_streams.rs +++ b/neqo-http3/src/features/extended_connect/webtransport_streams.rs @@ -4,15 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cell::RefCell, rc::Rc}; + +use neqo_common::Encoder; +use neqo_transport::{Connection, RecvStreamStats, SendStreamStats, StreamId}; + use super::WebTransportSession; use crate::{ CloseType, Http3StreamInfo, Http3StreamType, ReceiveOutput, RecvStream, RecvStreamEvents, Res, SendStream, SendStreamEvents, Stream, }; -use neqo_common::Encoder; -use neqo_transport::{Connection, RecvStreamStats, SendStreamStats, StreamId}; -use std::cell::RefCell; -use std::rc::Rc; pub const WEBTRANSPORT_UNI_STREAM: u64 = 0x54; pub const WEBTRANSPORT_STREAM: u64 = 0x41; diff --git a/neqo-http3/src/features/mod.rs b/neqo-http3/src/features/mod.rs index 0e045ed80b..34e21f50ac 100644 --- a/neqo-http3/src/features/mod.rs +++ b/neqo-http3/src/features/mod.rs @@ -4,23 +4,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{fmt::Debug, mem}; + +use neqo_common::qtrace; + use crate::{ client_events::Http3ClientEvents, settings::{HSettingType, HSettings}, }; -use neqo_common::qtrace; -use std::fmt::Debug; -use std::mem; pub mod extended_connect; /// States: /// - `Disable` - it is not turned on for this connection. -/// - `Negotiating` - the feature is enabled locally, but settings from the peer -/// have not been received yet. +/// - `Negotiating` - the feature is enabled locally, but settings from the peer have not been +/// received yet. /// - `Negotiated` - the settings have been received and both sides support the feature. -/// - `NegotiationFailed` - the settings have been received and the peer does not -/// support the feature. +/// - `NegotiationFailed` - the settings have been received and the peer does not support the +/// feature. #[derive(Debug)] pub enum NegotiationState { Disabled, diff --git a/neqo-http3/src/frames/hframe.rs b/neqo-http3/src/frames/hframe.rs index 28ce7608f9..83e69ba894 100644 --- a/neqo-http3/src/frames/hframe.rs +++ b/neqo-http3/src/frames/hframe.rs @@ -4,12 +4,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{frames::reader::FrameDecoder, settings::HSettings, Error, Priority, Res}; +use std::{fmt::Debug, io::Write}; + use neqo_common::{Decoder, Encoder}; use neqo_crypto::random; use neqo_transport::StreamId; -use std::fmt::Debug; -use std::io::Write; + +use crate::{frames::reader::FrameDecoder, settings::HSettings, Error, Priority, Res}; pub(crate) type HFrameType = u64; diff --git a/neqo-http3/src/frames/reader.rs b/neqo-http3/src/frames/reader.rs index 9d81f2c1c1..5017c666a4 100644 --- a/neqo-http3/src/frames/reader.rs +++ b/neqo-http3/src/frames/reader.rs @@ -6,34 +6,39 @@ #![allow(clippy::module_name_repetitions)] -use crate::{Error, RecvStream, Res}; +use std::{convert::TryFrom, fmt::Debug}; + use neqo_common::{ hex_with_len, qtrace, Decoder, IncrementalDecoderBuffer, IncrementalDecoderIgnore, IncrementalDecoderUint, }; use neqo_transport::{Connection, StreamId}; -use std::convert::TryFrom; -use std::fmt::Debug; + +use crate::{Error, RecvStream, Res}; const MAX_READ_SIZE: usize = 4096; pub(crate) trait FrameDecoder { fn is_known_type(frame_type: u64) -> bool; /// # Errors + /// /// Returns `HttpFrameUnexpected` if frames is not alowed, i.e. is a `H3_RESERVED_FRAME_TYPES`. fn frame_type_allowed(_frame_type: u64) -> Res<()> { Ok(()) } + /// # Errors + /// /// If a frame cannot be properly decoded. fn decode(frame_type: u64, frame_len: u64, data: Option<&[u8]>) -> Res>; } pub(crate) trait StreamReader { /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. - /// Return an error if the stream was closed on the transport layer, but that information is not yet - /// consumed on the http/3 layer. + /// Return an error if the stream was closed on the transport layer, but that information is not + /// yet consumed on the http/3 layer. fn read_data(&mut self, buf: &mut [u8]) -> Res<(usize, bool)>; } @@ -50,6 +55,7 @@ impl<'a> StreamReaderConnectionWrapper<'a> { impl<'a> StreamReader for StreamReaderConnectionWrapper<'a> { /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. fn read_data(&mut self, buf: &mut [u8]) -> Res<(usize, bool)> { let res = self.conn.stream_recv(self.stream_id, buf)?; @@ -70,6 +76,7 @@ impl<'a> StreamReaderRecvStreamWrapper<'a> { impl<'a> StreamReader for StreamReaderRecvStreamWrapper<'a> { /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. fn read_data(&mut self, buf: &mut [u8]) -> Res<(usize, bool)> { self.recv_stream.read_data(self.conn, buf) @@ -146,7 +153,9 @@ impl FrameReader { } /// returns true if quic stream was closed. + /// /// # Errors + /// /// May return `HttpFrame` if a frame cannot be decoded. /// and `TransportStreamDoesNotExist` if `stream_recv` fails. pub fn receive>( @@ -186,6 +195,7 @@ impl FrameReader { } /// # Errors + /// /// May return `HttpFrame` if a frame cannot be decoded. fn consume>(&mut self, mut input: Decoder) -> Res> { match &mut self.state { diff --git a/neqo-http3/src/frames/tests/hframe.rs b/neqo-http3/src/frames/tests/hframe.rs index 54b7c94c8e..3da7e7fc36 100644 --- a/neqo-http3/src/frames/tests/hframe.rs +++ b/neqo-http3/src/frames/tests/hframe.rs @@ -4,15 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use neqo_common::{Decoder, Encoder}; +use neqo_transport::StreamId; +use test_fixture::fixture_init; + use super::enc_dec_hframe; use crate::{ frames::HFrame, settings::{HSetting, HSettingType, HSettings}, Priority, }; -use neqo_common::{Decoder, Encoder}; -use neqo_transport::StreamId; -use test_fixture::fixture_init; #[test] fn test_data_frame() { diff --git a/neqo-http3/src/frames/tests/mod.rs b/neqo-http3/src/frames/tests/mod.rs index 086af90300..33eea5497a 100644 --- a/neqo-http3/src/frames/tests/mod.rs +++ b/neqo-http3/src/frames/tests/mod.rs @@ -4,15 +4,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::frames::{ - reader::FrameDecoder, FrameReader, HFrame, StreamReaderConnectionWrapper, WebTransportFrame, -}; +use std::mem; + use neqo_common::Encoder; use neqo_crypto::AuthenticationStatus; use neqo_transport::StreamType; -use std::mem; use test_fixture::{default_client, default_server, now}; +use crate::frames::{ + reader::FrameDecoder, FrameReader, HFrame, StreamReaderConnectionWrapper, WebTransportFrame, +}; + #[allow(clippy::many_single_char_names)] pub(crate) fn enc_dec>(d: &Encoder, st: &str, remaining: usize) -> T { // For data, headers and push_promise we do not read all bytes from the buffer diff --git a/neqo-http3/src/frames/tests/reader.rs b/neqo-http3/src/frames/tests/reader.rs index 8923a0994b..fed1477ba4 100644 --- a/neqo-http3/src/frames/tests/reader.rs +++ b/neqo-http3/src/frames/tests/reader.rs @@ -4,6 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{fmt::Debug, mem}; + +use neqo_common::Encoder; +use neqo_transport::{Connection, StreamId, StreamType}; +use test_fixture::{connect, now}; + use crate::{ frames::{ reader::FrameDecoder, FrameReader, HFrame, StreamReaderConnectionWrapper, WebTransportFrame, @@ -11,11 +17,6 @@ use crate::{ settings::{HSetting, HSettingType, HSettings}, Error, }; -use neqo_common::Encoder; -use neqo_transport::{Connection, StreamId, StreamType}; -use std::fmt::Debug; -use std::mem; -use test_fixture::{connect, now}; struct FrameReaderTest { pub fr: FrameReader, diff --git a/neqo-http3/src/frames/wtframe.rs b/neqo-http3/src/frames/wtframe.rs index b5f76161c5..deb7a026a0 100644 --- a/neqo-http3/src/frames/wtframe.rs +++ b/neqo-http3/src/frames/wtframe.rs @@ -4,10 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{frames::reader::FrameDecoder, Error, Res}; -use neqo_common::{Decoder, Encoder}; use std::convert::TryFrom; +use neqo_common::{Decoder, Encoder}; + +use crate::{frames::reader::FrameDecoder, Error, Res}; + pub(crate) type WebTransportFrameType = u64; const WT_FRAME_CLOSE_SESSION: WebTransportFrameType = 0x2843; diff --git a/neqo-http3/src/headers_checks.rs b/neqo-http3/src/headers_checks.rs index 7d679409ad..9bf661c8fe 100644 --- a/neqo-http3/src/headers_checks.rs +++ b/neqo-http3/src/headers_checks.rs @@ -6,10 +6,12 @@ #![allow(clippy::unused_unit)] // see https://github.com/Lymia/enumset/issues/44 -use crate::{Error, MessageType, Res}; +use std::convert::TryFrom; + use enumset::{enum_set, EnumSet, EnumSetType}; use neqo_common::Header; -use std::convert::TryFrom; + +use crate::{Error, MessageType, Res}; #[derive(EnumSetType, Debug)] enum PseudoHeaderState { @@ -45,7 +47,9 @@ impl TryFrom<(MessageType, &str)> for PseudoHeaderState { } /// Check whether the response is informational(1xx). +/// /// # Errors +/// /// Returns an error if response headers do not contain /// a status header or if the value of the header is 101 or cannot be parsed. pub fn is_interim(headers: &[Header]) -> Res { @@ -89,7 +93,9 @@ fn track_pseudo( /// Checks if request/response headers are well formed, i.e. contain /// allowed pseudo headers and in a right order, etc. +/// /// # Errors +/// /// Returns an error if headers are not well formed. pub fn headers_valid(headers: &[Header], message_type: MessageType) -> Res<()> { let mut method_value: Option<&str> = None; @@ -155,7 +161,9 @@ pub fn headers_valid(headers: &[Header], message_type: MessageType) -> Res<()> { /// Checks if trailers are well formed, i.e. pseudo headers are not /// allowed in trailers. +/// /// # Errors +/// /// Returns an error if trailers are not well formed. pub fn trailers_valid(headers: &[Header]) -> Res<()> { for header in headers { @@ -168,9 +176,10 @@ pub fn trailers_valid(headers: &[Header]) -> Res<()> { #[cfg(test)] mod tests { + use neqo_common::Header; + use super::headers_valid; use crate::MessageType; - use neqo_common::Header; fn create_connect_headers() -> Vec
{ vec![ diff --git a/neqo-http3/src/lib.rs b/neqo-http3/src/lib.rs index e0dc4c3c93..635707ca7c 100644 --- a/neqo-http3/src/lib.rs +++ b/neqo-http3/src/lib.rs @@ -160,14 +160,8 @@ mod server_events; mod settings; mod stream_type_reader; -use neqo_qpack::Error as QpackError; -pub use neqo_transport::{streams::SendOrder, Output, StreamId}; -use neqo_transport::{ - AppError, Connection, Error as TransportError, RecvStreamStats, SendStreamStats, -}; -use std::fmt::Debug; +use std::{any::Any, cell::RefCell, fmt::Debug, rc::Rc}; -use crate::priority::PriorityHandler; use buffered_send_stream::BufferedStream; pub use client_events::{Http3ClientEvent, WebTransportEvent}; pub use conn_params::Http3Parameters; @@ -177,23 +171,28 @@ use features::extended_connect::WebTransportSession; use frames::HFrame; pub use neqo_common::Header; use neqo_common::MessageType; +use neqo_qpack::Error as QpackError; +pub use neqo_transport::{streams::SendOrder, Output, StreamId}; +use neqo_transport::{ + AppError, Connection, Error as TransportError, RecvStreamStats, SendStreamStats, +}; pub use priority::Priority; pub use server::Http3Server; pub use server_events::{ Http3OrWebTransportStream, Http3ServerEvent, WebTransportRequest, WebTransportServerEvent, }; -use std::any::Any; -use std::cell::RefCell; -use std::rc::Rc; use stream_type_reader::NewStreamType; +use crate::priority::PriorityHandler; + type Res = Result; #[derive(Clone, Debug, PartialEq, Eq)] pub enum Error { HttpNoError, HttpGeneralProtocol, - HttpGeneralProtocolStream, //this is the same as the above but it should only close a stream not a connection. + HttpGeneralProtocolStream, /* this is the same as the above but it should only close a + * stream not a connection. */ // When using this error, you need to provide a value that is unique, which // will allow the specific error to be identified. This will be validated in CI. HttpInternal(u16), @@ -288,6 +287,7 @@ impl Error { } /// # Panics + /// /// On unexpected errors, in debug mode. #[must_use] pub fn map_stream_send_errors(err: &Error) -> Self { @@ -304,6 +304,7 @@ impl Error { } /// # Panics + /// /// On unexpected errors, in debug mode. #[must_use] pub fn map_stream_create_errors(err: &TransportError) -> Self { @@ -318,6 +319,7 @@ impl Error { } /// # Panics + /// /// On unexpected errors, in debug mode. #[must_use] pub fn map_stream_recv_errors(err: &Error) -> Self { @@ -345,8 +347,11 @@ impl Error { } /// # Errors - /// Any error is mapped to the indicated type. + /// + /// Any error is mapped to the indicated type. + /// /// # Panics + /// /// On internal errors, in debug mode. fn map_error(r: Result>, err: Self) -> Result { r.map_err(|e| { @@ -450,16 +455,23 @@ trait RecvStream: Stream { /// The stream reads data from the corresponding quic stream and returns `ReceiveOutput`. /// The function also returns true as the second parameter if the stream is done and /// could be forgotten, i.e. removed from all records. + /// /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. fn receive(&mut self, conn: &mut Connection) -> Res<(ReceiveOutput, bool)>; + /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, etc. fn reset(&mut self, close_type: CloseType) -> Res<()>; + /// The function allows an app to read directly from the quic stream. The function /// returns the number of bytes written into `buf` and true/false if the stream is /// completely done and can be forgotten, i.e. removed from all records. + /// /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. fn read_data(&mut self, _conn: &mut Connection, _buf: &mut [u8]) -> Res<(usize, bool)> { Err(Error::InvalidStreamId) @@ -483,7 +495,9 @@ trait HttpRecvStream: RecvStream { /// This function is similar to the receive function and has the same output, i.e. /// a `ReceiveOutput` enum and bool. The bool is true if the stream is completely done /// and can be forgotten, i.e. removed from all records. + /// /// # Errors + /// /// An error may happen while reading a stream, e.g. early close, protocol error, etc. fn header_unblocked(&mut self, conn: &mut Connection) -> Res<(ReceiveOutput, bool)>; @@ -552,6 +566,7 @@ trait HttpRecvStreamEvents: RecvStreamEvents { trait SendStream: Stream { /// # Errors + /// /// Error my occur during sending data, e.g. protocol error, etc. fn send(&mut self, conn: &mut Connection) -> Res<()>; fn has_data_to_send(&self) -> bool; @@ -559,14 +574,19 @@ trait SendStream: Stream { fn done(&self) -> bool; fn set_sendorder(&mut self, conn: &mut Connection, sendorder: Option) -> Res<()>; fn set_fairness(&mut self, conn: &mut Connection, fairness: bool) -> Res<()>; + /// # Errors + /// /// Error my occur during sending data, e.g. protocol error, etc. fn send_data(&mut self, _conn: &mut Connection, _buf: &[u8]) -> Res; /// # Errors + /// /// It may happen that the transport stream is already close. This is unlikely. fn close(&mut self, conn: &mut Connection) -> Res<()>; + /// # Errors + /// /// It may happen that the transport stream is already close. This is unlikely. fn close_with_message( &mut self, @@ -576,6 +596,7 @@ trait SendStream: Stream { ) -> Res<()> { Err(Error::InvalidStreamId) } + /// This function is called when sending side is closed abruptly by the peer or /// the application. fn handle_stop_sending(&mut self, close_type: CloseType); @@ -584,6 +605,7 @@ trait SendStream: Stream { } /// # Errors + /// /// It may happen that the transport stream is already close. This is unlikely. fn send_data_atomic(&mut self, _conn: &mut Connection, _buf: &[u8]) -> Res<()> { Err(Error::InvalidStreamId) @@ -599,7 +621,9 @@ trait HttpSendStream: SendStream { /// This function is used to supply headers to a http message. The /// function is used for request headers, response headers, 1xx response and /// trailers. + /// /// # Errors + /// /// This can also return an error if the underlying stream is closed. fn send_headers(&mut self, headers: &[Header], conn: &mut Connection) -> Res<()>; fn set_new_listener(&mut self, _conn_events: Box) {} diff --git a/neqo-http3/src/priority.rs b/neqo-http3/src/priority.rs index 6a391de578..f2651d3bb5 100644 --- a/neqo-http3/src/priority.rs +++ b/neqo-http3/src/priority.rs @@ -1,8 +1,9 @@ -use crate::{frames::HFrame, Error, Header, Res}; +use std::{convert::TryFrom, fmt}; + use neqo_transport::StreamId; use sfv::{BareItem, Item, ListEntry, Parser}; -use std::convert::TryFrom; -use std::fmt; + +use crate::{frames::HFrame, Error, Header, Res}; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub struct Priority { @@ -21,6 +22,7 @@ impl Default for Priority { impl Priority { /// # Panics + /// /// If an invalid urgency (>7 is given) #[must_use] pub fn new(urgency: u8, incremental: bool) -> Priority { @@ -44,9 +46,13 @@ impl Priority { } /// Constructs a priority from raw bytes (either a field value of frame content). + /// /// # Errors + /// /// When the contained syntax is invalid. + /// /// # Panics + /// /// Never, but the compiler is not smart enough to work that out. pub fn from_bytes(bytes: &[u8]) -> Res { let dict = Parser::parse_dictionary(bytes).map_err(|_| Error::HttpFrame)?; @@ -149,10 +155,10 @@ impl PriorityHandler { #[cfg(test)] mod test { - use crate::priority::PriorityHandler; - use crate::{HFrame, Priority}; use neqo_transport::StreamId; + use crate::{priority::PriorityHandler, HFrame, Priority}; + #[test] fn priority_updates_ignore_same() { let mut p = PriorityHandler::new(false, Priority::new(5, false)); @@ -183,7 +189,8 @@ mod test { let mut p = PriorityHandler::new(false, Priority::new(5, false)); assert!(p.maybe_update_priority(Priority::new(6, false))); assert!(p.maybe_update_priority(Priority::new(7, false))); - // updating two times with a different priority -> the last priority update should be in the next frame + // updating two times with a different priority -> the last priority update should be in the + // next frame let expected = HFrame::PriorityUpdateRequest { element_id: 4, priority: Priority::new(7, false), diff --git a/neqo-http3/src/push_controller.rs b/neqo-http3/src/push_controller.rs index 62171039e3..c4591991ae 100644 --- a/neqo-http3/src/push_controller.rs +++ b/neqo-http3/src/push_controller.rs @@ -3,28 +3,33 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::client_events::{Http3ClientEvent, Http3ClientEvents}; -use crate::connection::Http3Connection; -use crate::frames::HFrame; -use crate::{CloseType, Error, Http3StreamInfo, HttpRecvStreamEvents, RecvStreamEvents, Res}; +use std::{ + cell::RefCell, + collections::VecDeque, + convert::TryFrom, + fmt::{Debug, Display}, + mem, + rc::Rc, + slice::SliceIndex, +}; + use neqo_common::{qerror, qinfo, qtrace, Header}; use neqo_transport::{Connection, StreamId}; -use std::cell::RefCell; -use std::collections::VecDeque; -use std::convert::TryFrom; -use std::fmt::Debug; -use std::fmt::Display; -use std::mem; -use std::rc::Rc; -use std::slice::SliceIndex; + +use crate::{ + client_events::{Http3ClientEvent, Http3ClientEvents}, + connection::Http3Connection, + frames::HFrame, + CloseType, Error, Http3StreamInfo, HttpRecvStreamEvents, RecvStreamEvents, Res, +}; /// `PushStates`: -/// `Init`: there is no push stream nor a push promise. This state is only used to keep track of opened and closed -/// push streams. +/// `Init`: there is no push stream nor a push promise. This state is only used to keep track of +/// opened and closed push streams. /// `PushPromise`: the push has only ever receive a pushpromise frame -/// `OnlyPushStream`: there is only a push stream. All push stream events, i.e. `PushHeaderReady` and -/// `PushDataReadable` will be delayed until a push promise is received (they are kept in -/// `events`). +/// `OnlyPushStream`: there is only a push stream. All push stream events, i.e. `PushHeaderReady` +/// and `PushDataReadable` will be delayed until a push promise is received +/// (they are kept in `events`). /// `Active`: there is a push steam and at least one push promise frame. /// `Close`: the push stream has been closed or reset already. #[derive(Debug, PartialEq, Clone)] @@ -122,21 +127,22 @@ impl ActivePushStreams { /// `PushController` keeps information about push stream states. /// -/// A `PushStream` calls `add_new_push_stream` that may change the push state from Init to `OnlyPushStream` or from -/// `PushPromise` to `Active`. If a stream has already been closed `add_new_push_stream` returns false (the `PushStream` -/// will close the transport stream). +/// A `PushStream` calls `add_new_push_stream` that may change the push state from Init to +/// `OnlyPushStream` or from `PushPromise` to `Active`. If a stream has already been closed +/// `add_new_push_stream` returns false (the `PushStream` will close the transport stream). /// A `PushStream` calls `push_stream_reset` if the transport stream has been canceled. /// When a push stream is done it calls `close`. /// /// The `PushController` handles: -/// `PUSH_PROMISE` frame: frames may change the push state from Init to `PushPromise` and from `OnlyPushStream` to -/// `Active`. Frames for a closed steams are ignored. -/// `CANCEL_PUSH` frame: (`handle_cancel_push` will be called). If a push is in state `PushPromise` or `Active`, any -/// posted events will be removed and a `PushCanceled` event will be posted. If a push is in -/// state `OnlyPushStream` or `Active` the transport stream and the `PushStream` will be closed. -/// The frame will be ignored for already closed pushes. -/// Application calling cancel: the actions are similar to the `CANCEL_PUSH` frame. The difference is that -/// `PushCanceled` will not be posted and a `CANCEL_PUSH` frame may be sent. +/// `PUSH_PROMISE` frame: frames may change the push state from Init to `PushPromise` and from +/// `OnlyPushStream` to `Active`. Frames for a closed steams are ignored. +/// `CANCEL_PUSH` frame: (`handle_cancel_push` will be called). If a push is in state `PushPromise` +/// or `Active`, any posted events will be removed and a `PushCanceled` event +/// will be posted. If a push is in state `OnlyPushStream` or `Active` the +/// transport stream and the `PushStream` will be closed. The frame will be +/// ignored for already closed pushes. Application calling cancel: the actions are similar to the +/// `CANCEL_PUSH` frame. The difference is that `PushCanceled` will not +/// be posted and a `CANCEL_PUSH` frame may be sent. #[derive(Debug)] pub(crate) struct PushController { max_concurent_push: u64, @@ -145,8 +151,8 @@ pub(crate) struct PushController { // We keep a stream until the stream has been closed. push_streams: ActivePushStreams, // The keeps the next consecutive push_id that should be open. - // All push_id < next_push_id_to_open are in the push_stream lists. If they are not in the list they have - // been already closed. + // All push_id < next_push_id_to_open are in the push_stream lists. If they are not in the list + // they have been already closed. conn_events: Http3ClientEvents, } @@ -169,7 +175,9 @@ impl Display for PushController { impl PushController { /// A new `push_promise` has been received. + /// /// # Errors + /// /// `HttpId` if `push_id` greater than it is allowed has been received. pub fn new_push_promise( &mut self, @@ -338,8 +346,9 @@ impl PushController { match self.push_streams.get(push_id) { None => { qtrace!("Push has already been closed."); - // If we have some events for the push_id in the event queue, the caller still does not - // not know that the push has been closed. Otherwise return InvalidStreamId. + // If we have some events for the push_id in the event queue, the caller still does + // not not know that the push has been closed. Otherwise return + // InvalidStreamId. if self.conn_events.has_push(push_id) { self.conn_events.remove_events_for_push_id(push_id); Ok(()) diff --git a/neqo-http3/src/qlog.rs b/neqo-http3/src/qlog.rs index 84c13dad43..c3a13fd19f 100644 --- a/neqo-http3/src/qlog.rs +++ b/neqo-http3/src/qlog.rs @@ -8,14 +8,13 @@ use std::convert::TryFrom; +use neqo_common::qlog::NeqoQlog; +use neqo_transport::StreamId; use qlog::{ self, events::{DataRecipient, EventData}, }; -use neqo_common::qlog::NeqoQlog; -use neqo_transport::StreamId; - pub fn h3_data_moved_up(qlog: &mut NeqoQlog, stream_id: StreamId, amount: usize) { qlog.add_event_data(|| { let ev_data = EventData::DataMoved(qlog::events::quic::DataMoved { diff --git a/neqo-http3/src/qpack_decoder_receiver.rs b/neqo-http3/src/qpack_decoder_receiver.rs index 3cdfdf74cd..46b9ca590b 100644 --- a/neqo-http3/src/qpack_decoder_receiver.rs +++ b/neqo-http3/src/qpack_decoder_receiver.rs @@ -4,11 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; +use std::{cell::RefCell, rc::Rc}; + use neqo_qpack::QPackDecoder; use neqo_transport::{Connection, StreamId}; -use std::cell::RefCell; -use std::rc::Rc; + +use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; #[derive(Debug)] pub(crate) struct DecoderRecvStream { diff --git a/neqo-http3/src/qpack_encoder_receiver.rs b/neqo-http3/src/qpack_encoder_receiver.rs index efe234173f..76c779bcf2 100644 --- a/neqo-http3/src/qpack_encoder_receiver.rs +++ b/neqo-http3/src/qpack_encoder_receiver.rs @@ -4,11 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; +use std::{cell::RefCell, rc::Rc}; + use neqo_qpack::QPackEncoder; use neqo_transport::{Connection, StreamId}; -use std::cell::RefCell; -use std::rc::Rc; + +use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; #[derive(Debug)] pub(crate) struct EncoderRecvStream { diff --git a/neqo-http3/src/recv_message.rs b/neqo-http3/src/recv_message.rs index dd27c51337..36e8f65b19 100644 --- a/neqo-http3/src/recv_message.rs +++ b/neqo-http3/src/recv_message.rs @@ -4,24 +4,22 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::frames::{FrameReader, HFrame, StreamReaderConnectionWrapper, H3_FRAME_TYPE_HEADERS}; -use crate::push_controller::PushController; +use std::{ + any::Any, cell::RefCell, cmp::min, collections::VecDeque, convert::TryFrom, fmt::Debug, rc::Rc, +}; + +use neqo_common::{qdebug, qinfo, qtrace, Header}; +use neqo_qpack::decoder::QPackDecoder; +use neqo_transport::{Connection, StreamId}; + use crate::{ + frames::{FrameReader, HFrame, StreamReaderConnectionWrapper, H3_FRAME_TYPE_HEADERS}, headers_checks::{headers_valid, is_interim}, priority::PriorityHandler, + push_controller::PushController, qlog, CloseType, Error, Http3StreamInfo, Http3StreamType, HttpRecvStream, HttpRecvStreamEvents, MessageType, Priority, ReceiveOutput, RecvStream, Res, Stream, }; -use neqo_common::{qdebug, qinfo, qtrace, Header}; -use neqo_qpack::decoder::QPackDecoder; -use neqo_transport::{Connection, StreamId}; -use std::any::Any; -use std::cell::RefCell; -use std::cmp::min; -use std::collections::VecDeque; -use std::convert::TryFrom; -use std::fmt::Debug; -use std::rc::Rc; #[allow(clippy::module_name_repetitions)] pub(crate) struct RecvMessageInfo { @@ -348,7 +346,8 @@ impl RecvMessage { panic!("Stream readable after being closed!"); } RecvMessageState::ExtendedConnect => { - // Ignore read event, this request is waiting to be picked up by a new WebTransportSession + // Ignore read event, this request is waiting to be picked up by a new + // WebTransportSession break Ok(()); } }; diff --git a/neqo-http3/src/request_target.rs b/neqo-http3/src/request_target.rs index a58445b5d7..28bc22ac2d 100644 --- a/neqo-http3/src/request_target.rs +++ b/neqo-http3/src/request_target.rs @@ -7,6 +7,7 @@ #![allow(clippy::module_name_repetitions)] use std::fmt::{Debug, Formatter}; + use url::{ParseError, Url}; pub trait RequestTarget: Debug { @@ -58,7 +59,9 @@ pub trait AsRequestTarget<'x> { type Target: RequestTarget; type Error; /// Produce a `RequestTarget` that refers to `self`. + /// /// # Errors + /// /// This method can generate an error of type `Self::Error` /// if the conversion is unsuccessful. fn as_request_target(&'x self) -> Result; diff --git a/neqo-http3/src/send_message.rs b/neqo-http3/src/send_message.rs index 531f804937..96156938a0 100644 --- a/neqo-http3/src/send_message.rs +++ b/neqo-http3/src/send_message.rs @@ -4,22 +4,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::frames::HFrame; +use std::{any::Any, cell::RefCell, cmp::min, fmt::Debug, rc::Rc}; + +use neqo_common::{qdebug, qinfo, qtrace, Encoder, Header, MessageType}; +use neqo_qpack::encoder::QPackEncoder; +use neqo_transport::{streams::SendOrder, Connection, StreamId}; + use crate::{ + frames::HFrame, headers_checks::{headers_valid, is_interim, trailers_valid}, qlog, BufferedStream, CloseType, Error, Http3StreamInfo, Http3StreamType, HttpSendStream, Res, SendStream, SendStreamEvents, Stream, }; -use neqo_common::{qdebug, qinfo, qtrace, Encoder, Header, MessageType}; -use neqo_qpack::encoder::QPackEncoder; -use neqo_transport::{streams::SendOrder, Connection, StreamId}; -use std::any::Any; -use std::cell::RefCell; -use std::cmp::min; -use std::fmt::Debug; -use std::rc::Rc; - const MAX_DATA_HEADER_SIZE_2: usize = (1 << 6) - 1; // Maximal amount of data with DATA frame header size 2 const MAX_DATA_HEADER_SIZE_2_LIMIT: usize = MAX_DATA_HEADER_SIZE_2 + 3; // 63 + 3 (size of the next buffer data frame header) const MAX_DATA_HEADER_SIZE_3: usize = (1 << 14) - 1; // Maximal amount of data with DATA frame header size 3 @@ -134,6 +131,7 @@ impl SendMessage { } /// # Errors + /// /// `ClosedCriticalStream` if the encoder stream is closed. /// `InternalError` if an unexpected error occurred. fn encode( @@ -236,11 +234,13 @@ impl SendStream for SendMessage { } /// # Errors + /// /// `InternalError` if an unexpected error occurred. /// `InvalidStreamId` if the stream does not exist, /// `AlreadyClosed` if the stream has already been closed. - /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if `process_output` - /// has not been called when needed, and HTTP3 layer has not picked up the info that the stream has been closed.) + /// `TransportStreamDoesNotExist` if the transport stream does not exist (this may happen if + /// `process_output` has not been called when needed, and HTTP3 layer has not picked up the + /// info that the stream has been closed.) fn send(&mut self, conn: &mut Connection) -> Res<()> { let sent = Error::map_error(self.stream.send_buffer(conn), Error::HttpInternal(5))?; qlog::h3_data_moved_down(conn.qlog_mut(), self.stream_id(), sent); diff --git a/neqo-http3/src/server.rs b/neqo-http3/src/server.rs index c432039972..b29f715451 100644 --- a/neqo-http3/src/server.rs +++ b/neqo-http3/src/server.rs @@ -6,6 +6,21 @@ #![allow(clippy::module_name_repetitions)] +use std::{ + cell::{RefCell, RefMut}, + collections::HashMap, + path::PathBuf, + rc::Rc, + time::Instant, +}; + +use neqo_common::{qtrace, Datagram}; +use neqo_crypto::{AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttChecker}; +use neqo_transport::{ + server::{ActiveConnectionRef, Server, ValidateAddress}, + ConnectionIdGenerator, Output, +}; + use crate::{ connection::Http3State, connection_server::Http3ServerHandler, @@ -16,19 +31,6 @@ use crate::{ settings::HttpZeroRttChecker, Http3Parameters, Http3StreamInfo, Res, }; -use neqo_common::{qtrace, Datagram}; -use neqo_crypto::{AntiReplay, Cipher, PrivateKey, PublicKey, ZeroRttChecker}; -use neqo_transport::{ - server::{ActiveConnectionRef, Server, ValidateAddress}, - ConnectionIdGenerator, Output, -}; -use std::{ - cell::{RefCell, RefMut}, - collections::HashMap, - path::PathBuf, - rc::Rc, - time::Instant, -}; type HandlerRef = Rc>; @@ -49,6 +51,7 @@ impl ::std::fmt::Display for Http3Server { impl Http3Server { /// # Errors + /// /// Making a `neqo_transport::Server` may produce an error. This can only be a crypto error if /// the socket can't be created or configured. pub fn new( @@ -92,6 +95,7 @@ impl Http3Server { /// Enable encrypted client hello (ECH). /// /// # Errors + /// /// Only when NSS can't serialize a configuration. pub fn enable_ech( &mut self, @@ -309,24 +313,26 @@ fn prepare_data( #[cfg(test)] mod tests { - use super::{Http3Server, Http3ServerEvent, Http3State, Rc, RefCell}; - use crate::{Error, HFrame, Header, Http3Parameters, Priority}; + use std::{ + collections::HashMap, + mem, + ops::{Deref, DerefMut}, + }; + use neqo_common::{event::Provider, Encoder}; use neqo_crypto::{AuthenticationStatus, ZeroRttCheckResult, ZeroRttChecker}; use neqo_qpack::{encoder::QPackEncoder, QpackSettings}; use neqo_transport::{ Connection, ConnectionError, ConnectionEvent, State, StreamId, StreamType, ZeroRttState, }; - use std::{ - collections::HashMap, - mem, - ops::{Deref, DerefMut}, - }; use test_fixture::{ anti_replay, default_client, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ALPN, DEFAULT_KEYS, }; + use super::{Http3Server, Http3ServerEvent, Http3State, Rc, RefCell}; + use crate::{Error, HFrame, Header, Http3Parameters, Priority}; + const DEFAULT_SETTINGS: QpackSettings = QpackSettings { max_table_size_encoder: 100, max_table_size_decoder: 100, diff --git a/neqo-http3/src/server_connection_events.rs b/neqo-http3/src/server_connection_events.rs index f56288e204..cbc8e6d56e 100644 --- a/neqo-http3/src/server_connection_events.rs +++ b/neqo-http3/src/server_connection_events.rs @@ -4,17 +4,16 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::connection::Http3State; +use std::{cell::RefCell, collections::VecDeque, rc::Rc}; + +use neqo_common::Header; +use neqo_transport::{AppError, StreamId}; + use crate::{ + connection::Http3State, features::extended_connect::{ExtendedConnectEvents, ExtendedConnectType, SessionCloseReason}, CloseType, Http3StreamInfo, HttpRecvStreamEvents, Priority, RecvStreamEvents, SendStreamEvents, }; -use neqo_common::Header; -use neqo_transport::AppError; -use neqo_transport::StreamId; -use std::cell::RefCell; -use std::collections::VecDeque; -use std::rc::Rc; #[derive(Debug, PartialEq, Eq, Clone)] pub(crate) enum Http3ServerConnEvent { diff --git a/neqo-http3/src/server_events.rs b/neqo-http3/src/server_events.rs index e0cc84ed4c..4be48363df 100644 --- a/neqo-http3/src/server_events.rs +++ b/neqo-http3/src/server_events.rs @@ -6,20 +6,25 @@ #![allow(clippy::module_name_repetitions)] -use crate::connection::{Http3State, WebTransportSessionAcceptAction}; -use crate::connection_server::Http3ServerHandler; -use crate::{ - features::extended_connect::SessionCloseReason, Http3StreamInfo, Http3StreamType, Priority, Res, +use std::{ + cell::RefCell, + collections::VecDeque, + convert::TryFrom, + ops::{Deref, DerefMut}, + rc::Rc, }; + use neqo_common::{qdebug, qinfo, Encoder, Header}; -use neqo_transport::server::ActiveConnectionRef; -use neqo_transport::{AppError, Connection, DatagramTracking, StreamId, StreamType}; +use neqo_transport::{ + server::ActiveConnectionRef, AppError, Connection, DatagramTracking, StreamId, StreamType, +}; -use std::cell::RefCell; -use std::collections::VecDeque; -use std::convert::TryFrom; -use std::ops::{Deref, DerefMut}; -use std::rc::Rc; +use crate::{ + connection::{Http3State, WebTransportSessionAcceptAction}, + connection_server::Http3ServerHandler, + features::extended_connect::SessionCloseReason, + Http3StreamInfo, Http3StreamType, Priority, Res, +}; #[derive(Debug, Clone)] pub struct StreamHandler { @@ -57,7 +62,9 @@ impl StreamHandler { } /// Supply a response header to a request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn send_headers(&mut self, headers: &[Header]) -> Res<()> { self.handler.borrow_mut().send_headers( @@ -68,7 +75,9 @@ impl StreamHandler { } /// Supply response data to a request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn send_data(&mut self, buf: &[u8]) -> Res { self.handler @@ -77,7 +86,9 @@ impl StreamHandler { } /// Close sending side. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn stream_close_send(&mut self) -> Res<()> { self.handler @@ -86,7 +97,9 @@ impl StreamHandler { } /// Request a peer to stop sending a stream. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn stream_stop_sending(&mut self, app_error: AppError) -> Res<()> { qdebug!( @@ -103,7 +116,9 @@ impl StreamHandler { } /// Reset sending side of a stream. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn stream_reset_send(&mut self, app_error: AppError) -> Res<()> { qdebug!( @@ -120,7 +135,9 @@ impl StreamHandler { } /// Reset a stream/request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore pub fn cancel_fetch(&mut self, app_error: AppError) -> Res<()> { qdebug!([self], "reset error:{}.", app_error); @@ -159,14 +176,18 @@ impl Http3OrWebTransportStream { } /// Supply a response header to a request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn send_headers(&mut self, headers: &[Header]) -> Res<()> { self.stream_handler.send_headers(headers) } /// Supply response data to a request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn send_data(&mut self, data: &[u8]) -> Res { qinfo!([self], "Set new response."); @@ -174,7 +195,9 @@ impl Http3OrWebTransportStream { } /// Close sending side. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn stream_close_send(&mut self) -> Res<()> { qinfo!([self], "Set new response."); @@ -243,7 +266,9 @@ impl WebTransportRequest { } /// Respond to a `WebTransport` session request. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn response(&mut self, accept: &WebTransportSessionAcceptAction) -> Res<()> { qinfo!([self], "Set a response for a WebTransport session."); @@ -258,6 +283,7 @@ impl WebTransportRequest { } /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. /// Also return an error if the stream was closed on the transport layer, /// but that information is not yet consumed on the http/3 layer. @@ -279,7 +305,9 @@ impl WebTransportRequest { } /// Close sending side. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. pub fn create_stream(&mut self, stream_type: StreamType) -> Res { let session_id = self.stream_handler.stream_id(); @@ -301,7 +329,9 @@ impl WebTransportRequest { } /// Send `WebTransport` datagram. + /// /// # Errors + /// /// It may return `InvalidStreamId` if a stream does not exist anymore. /// The function returns `TooMuchData` if the supply buffer is bigger than /// the allowed remote datagram size. @@ -326,9 +356,13 @@ impl WebTransportRequest { /// Returns the current max size of a datagram that can fit into a packet. /// The value will change over time depending on the encoded size of the /// packet number, ack frames, etc. + /// /// # Errors + /// /// The function returns `NotAvailable` if datagrams are not enabled. + /// /// # Panics + /// /// This cannot panic. The max varint length is 8. pub fn max_datagram_size(&self) -> Res { let max_size = self.stream_handler.conn.borrow().max_datagram_size()?; diff --git a/neqo-http3/src/settings.rs b/neqo-http3/src/settings.rs index 1e952dae6d..9cd4b994b7 100644 --- a/neqo-http3/src/settings.rs +++ b/neqo-http3/src/settings.rs @@ -6,10 +6,12 @@ #![allow(clippy::module_name_repetitions)] -use crate::{Error, Http3Parameters, Res}; +use std::ops::Deref; + use neqo_common::{Decoder, Encoder}; use neqo_crypto::{ZeroRttCheckResult, ZeroRttChecker}; -use std::ops::Deref; + +use crate::{Error, Http3Parameters, Res}; type SettingsType = u64; @@ -120,6 +122,7 @@ impl HSettings { } /// # Errors + /// /// Returns an error if settings types are reserved of settings value are not permitted. pub fn decode_frame_contents(&mut self, dec: &mut Decoder) -> Res<()> { while dec.remaining() > 0 { diff --git a/neqo-http3/src/stream_type_reader.rs b/neqo-http3/src/stream_type_reader.rs index 775d8dc233..f36181d3b1 100644 --- a/neqo-http3/src/stream_type_reader.rs +++ b/neqo-http3/src/stream_type_reader.rs @@ -6,14 +6,15 @@ #![allow(clippy::module_name_repetitions)] -use crate::control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL; -use crate::frames::H3_FRAME_TYPE_HEADERS; -use crate::{CloseType, Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream}; use neqo_common::{qtrace, Decoder, IncrementalDecoderUint, Role}; -use neqo_qpack::decoder::QPACK_UNI_STREAM_TYPE_DECODER; -use neqo_qpack::encoder::QPACK_UNI_STREAM_TYPE_ENCODER; +use neqo_qpack::{decoder::QPACK_UNI_STREAM_TYPE_DECODER, encoder::QPACK_UNI_STREAM_TYPE_ENCODER}; use neqo_transport::{Connection, StreamId, StreamType}; +use crate::{ + control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL, frames::H3_FRAME_TYPE_HEADERS, CloseType, + Error, Http3StreamType, ReceiveOutput, RecvStream, Res, Stream, +}; + pub(crate) const HTTP3_UNI_STREAM_TYPE_PUSH: u64 = 0x1; pub(crate) const WEBTRANSPORT_UNI_STREAM: u64 = 0x54; pub(crate) const WEBTRANSPORT_STREAM: u64 = 0x41; @@ -33,7 +34,9 @@ impl NewStreamType { /// Get the final `NewStreamType` from a stream type. All streams, except Push stream, /// are identified by the type only. This function will return None for the Push stream /// because it needs the ID besides the type. - /// # Error + /// + /// # Errors + /// /// Push streams received by the server are not allowed and this function will return /// `HttpStreamCreation` error. fn final_stream_type( @@ -67,12 +70,11 @@ impl NewStreamType { /// `NewStreamHeadReader` reads the head of an unidirectional stream to identify the stream. /// There are 2 type of streams: -/// - streams identified by the single type (varint encoded). Most streams belong to -/// this category. The `NewStreamHeadReader` will switch from `ReadType`to `Done` state. -/// - streams identified by the type and the ID (both varint encoded). For example, a -/// push stream is identified by the type and `PushId`. After reading the type in -/// the `ReadType` state, `NewStreamHeadReader` changes to `ReadId` state and from there -/// to `Done` state +/// - streams identified by the single type (varint encoded). Most streams belong to this category. +/// The `NewStreamHeadReader` will switch from `ReadType`to `Done` state. +/// - streams identified by the type and the ID (both varint encoded). For example, a push stream +/// is identified by the type and `PushId`. After reading the type in the `ReadType` state, +/// `NewStreamHeadReader` changes to `ReadId` state and from there to `Done` state #[derive(Debug)] pub(crate) enum NewStreamHeadReader { ReadType { @@ -140,12 +142,12 @@ impl NewStreamHeadReader { role, stream_id, .. } => { // final_stream_type may return: - // - an error if a stream type is not allowed for the role, e.g. Push - // stream received at the server. + // - an error if a stream type is not allowed for the role, e.g. Push stream + // received at the server. // - a final type if a stream is only identify by the type // - None - if a stream is not identified by the type only, but it needs - // additional data from the header to produce the final type, e.g. - // a push stream needs pushId as well. + // additional data from the header to produce the final type, e.g. a push + // stream needs pushId as well. let final_type = NewStreamType::final_stream_type(output, stream_id.stream_type(), *role); match (&final_type, fin) { @@ -234,20 +236,23 @@ impl RecvStream for NewStreamHeadReader { #[cfg(test)] mod tests { - use super::{ - NewStreamHeadReader, HTTP3_UNI_STREAM_TYPE_PUSH, WEBTRANSPORT_STREAM, - WEBTRANSPORT_UNI_STREAM, + use std::mem; + + use neqo_common::{Encoder, Role}; + use neqo_qpack::{ + decoder::QPACK_UNI_STREAM_TYPE_DECODER, encoder::QPACK_UNI_STREAM_TYPE_ENCODER, }; use neqo_transport::{Connection, StreamId, StreamType}; - use std::mem; use test_fixture::{connect, now}; - use crate::control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL; - use crate::frames::H3_FRAME_TYPE_HEADERS; - use crate::{CloseType, Error, NewStreamType, ReceiveOutput, RecvStream, Res}; - use neqo_common::{Encoder, Role}; - use neqo_qpack::decoder::QPACK_UNI_STREAM_TYPE_DECODER; - use neqo_qpack::encoder::QPACK_UNI_STREAM_TYPE_ENCODER; + use super::{ + NewStreamHeadReader, HTTP3_UNI_STREAM_TYPE_PUSH, WEBTRANSPORT_STREAM, + WEBTRANSPORT_UNI_STREAM, + }; + use crate::{ + control_stream_local::HTTP3_UNI_STREAM_TYPE_CONTROL, frames::H3_FRAME_TYPE_HEADERS, + CloseType, Error, NewStreamType, ReceiveOutput, RecvStream, Res, + }; struct Test { conn_c: Connection, @@ -397,7 +402,8 @@ mod tests { let mut t = Test::new(StreamType::UniDi, Role::Server); t.decode( - &[H3_FRAME_TYPE_HEADERS], // this is the same as a HTTP3_UNI_STREAM_TYPE_PUSH which is not aallowed on the server side. + &[H3_FRAME_TYPE_HEADERS], /* this is the same as a HTTP3_UNI_STREAM_TYPE_PUSH which + * is not aallowed on the server side. */ false, &Err(Error::HttpStreamCreation), true, @@ -413,7 +419,8 @@ mod tests { let mut t = Test::new(StreamType::UniDi, Role::Client); t.decode( - &[H3_FRAME_TYPE_HEADERS, 0xaaaa_aaaa], // this is the same as a HTTP3_UNI_STREAM_TYPE_PUSH + &[H3_FRAME_TYPE_HEADERS, 0xaaaa_aaaa], /* this is the same as a + * HTTP3_UNI_STREAM_TYPE_PUSH */ false, &Ok(( ReceiveOutput::NewStream(NewStreamType::Push(0xaaaa_aaaa)), diff --git a/neqo-http3/tests/httpconn.rs b/neqo-http3/tests/httpconn.rs index fc49851e5b..a0b2bcdb80 100644 --- a/neqo-http3/tests/httpconn.rs +++ b/neqo-http3/tests/httpconn.rs @@ -6,6 +6,11 @@ #![allow(unused_assignments)] +use std::{ + mem, + time::{Duration, Instant}, +}; + use neqo_common::{event::Provider, qtrace, Datagram}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; use neqo_http3::{ @@ -13,8 +18,6 @@ use neqo_http3::{ Http3ServerEvent, Http3State, Priority, }; use neqo_transport::{ConnectionError, ConnectionParameters, Error, Output, StreamType}; -use std::mem; -use std::time::{Duration, Instant}; use test_fixture::*; const RESPONSE_DATA: &[u8] = &[0x61, 0x62, 0x63]; @@ -96,7 +99,7 @@ fn connect_peers(hconn_c: &mut Http3Client, hconn_s: &mut Http3Server) -> Option let out = hconn_c.process(None, now()); // Initial let out = hconn_s.process(out.as_dgram_ref(), now()); // Initial + Handshake let out = hconn_c.process(out.as_dgram_ref(), now()); // ACK - mem::drop(hconn_s.process(out.as_dgram_ref(), now())); //consume ACK + mem::drop(hconn_s.process(out.as_dgram_ref(), now())); // consume ACK let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); assert!(hconn_c.events().any(authentication_needed)); hconn_c.authenticated(AuthenticationStatus::Ok, now()); @@ -126,7 +129,7 @@ fn connect_peers_with_network_propagation_delay( now += net_delay; let out = hconn_c.process(out.as_dgram_ref(), now); // ACK now += net_delay; - let out = hconn_s.process(out.as_dgram_ref(), now); //consume ACK + let out = hconn_s.process(out.as_dgram_ref(), now); // consume ACK assert!(out.dgram().is_none()); let authentication_needed = |e| matches!(e, Http3ClientEvent::AuthenticationNeeded); assert!(hconn_c.events().any(authentication_needed)); diff --git a/neqo-http3/tests/priority.rs b/neqo-http3/tests/priority.rs index 4ecd2e7a40..cdec161058 100644 --- a/neqo-http3/tests/priority.rs +++ b/neqo-http3/tests/priority.rs @@ -4,14 +4,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use neqo_common::event::Provider; +use std::time::Instant; +use neqo_common::event::Provider; use neqo_crypto::AuthenticationStatus; use neqo_http3::{ Header, Http3Client, Http3ClientEvent, Http3Server, Http3ServerEvent, Http3State, Priority, }; - -use std::time::Instant; use test_fixture::*; fn exchange_packets(client: &mut Http3Client, server: &mut Http3Server) { diff --git a/neqo-http3/tests/webtransport.rs b/neqo-http3/tests/webtransport.rs index fb82350dd3..4e943d86cb 100644 --- a/neqo-http3/tests/webtransport.rs +++ b/neqo-http3/tests/webtransport.rs @@ -4,6 +4,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cell::RefCell, rc::Rc}; + use neqo_common::{event::Provider, Header}; use neqo_crypto::AuthenticationStatus; use neqo_http3::{ @@ -12,8 +14,6 @@ use neqo_http3::{ WebTransportSessionAcceptAction, }; use neqo_transport::{StreamId, StreamType}; -use std::cell::RefCell; -use std::rc::Rc; use test_fixture::{ addr, anti_replay, fixture_init, now, CountingConnectionIdGenerator, DEFAULT_ALPN_H3, DEFAULT_KEYS, DEFAULT_SERVER_NAME, diff --git a/neqo-interop/src/main.rs b/neqo-interop/src/main.rs index 254b953f22..b1dae43c9c 100644 --- a/neqo-interop/src/main.rs +++ b/neqo-interop/src/main.rs @@ -7,14 +7,6 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::use_self)] -use neqo_common::{event::Provider, hex, Datagram, IpTos}; -use neqo_crypto::{init, AuthenticationStatus, ResumptionToken}; -use neqo_http3::{Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Priority}; -use neqo_transport::{ - Connection, ConnectionError, ConnectionEvent, ConnectionParameters, EmptyConnectionIdGenerator, - Error, Output, State, StreamId, StreamType, -}; - use std::{ cell::RefCell, cmp::min, @@ -31,6 +23,14 @@ use std::{ thread, time::{Duration, Instant}, }; + +use neqo_common::{event::Provider, hex, Datagram, IpTos}; +use neqo_crypto::{init, AuthenticationStatus, ResumptionToken}; +use neqo_http3::{Header, Http3Client, Http3ClientEvent, Http3Parameters, Http3State, Priority}; +use neqo_transport::{ + Connection, ConnectionError, ConnectionEvent, ConnectionParameters, EmptyConnectionIdGenerator, + Error, Output, State, StreamId, StreamType, +}; use structopt::StructOpt; #[derive(Debug, StructOpt, Clone)] @@ -560,7 +560,8 @@ fn test_h3(nctx: &NetworkCtx, peer: &Peer, client: Connection, test: &Test) -> R } if *test == Test::D { - // Send another request, when the first one was send we probably did not have the peer's qpack parameter. + // Send another request, when the first one was send we probably did not have the peer's + // qpack parameter. let client_stream_id = hc .h3 .fetch( diff --git a/neqo-qpack/src/decoder.rs b/neqo-qpack/src/decoder.rs index 5b3b93dcee..2119db0256 100644 --- a/neqo-qpack/src/decoder.rs +++ b/neqo-qpack/src/decoder.rs @@ -4,6 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::convert::TryFrom; + +use neqo_common::{qdebug, Header}; +use neqo_transport::{Connection, StreamId}; + use crate::{ decoder_instructions::DecoderInstruction, encoder_instructions::{DecodedEncoderInstruction, EncoderInstructionReader}, @@ -14,9 +19,6 @@ use crate::{ table::HeaderTable, Error, QpackSettings, Res, }; -use neqo_common::{qdebug, Header}; -use neqo_transport::{Connection, StreamId}; -use std::convert::TryFrom; pub const QPACK_UNI_STREAM_TYPE_DECODER: u64 = 0x3; @@ -30,12 +32,13 @@ pub struct QPackDecoder { local_stream_id: Option, max_table_size: u64, max_blocked_streams: usize, - blocked_streams: Vec<(StreamId, u64)>, //stream_id and requested inserts count. + blocked_streams: Vec<(StreamId, u64)>, // stream_id and requested inserts count. stats: Stats, } impl QPackDecoder { /// # Panics + /// /// If settings include invalid values. #[must_use] pub fn new(qpack_settings: &QpackSettings) -> Self { @@ -67,6 +70,7 @@ impl QPackDecoder { } /// # Panics + /// /// If the number of blocked streams is too large. #[must_use] pub fn get_blocked_streams(&self) -> u16 { @@ -74,7 +78,9 @@ impl QPackDecoder { } /// returns a list of unblocked streams + /// /// # Errors + /// /// May return: `ClosedCriticalStream` if stream has been closed or `EncoderStream` /// in case of any other transport error. pub fn receive(&mut self, conn: &mut Connection, stream_id: StreamId) -> Res> { @@ -164,8 +170,11 @@ impl QPackDecoder { } /// # Errors + /// /// May return an error in case of any transport error. TODO: define transport errors. + /// /// # Panics + /// /// Never, but rust doesn't know that. #[allow(clippy::map_err_ignore)] pub fn send(&mut self, conn: &mut Connection) -> Res<()> { @@ -186,6 +195,7 @@ impl QPackDecoder { } /// # Errors + /// /// May return `DecompressionFailed` if header block is incorrect or incomplete. pub fn refers_dynamic_table(&self, buf: &[u8]) -> Res { HeaderDecoder::new(buf).refers_dynamic_table(self.max_entries, self.table.base()) @@ -193,9 +203,13 @@ impl QPackDecoder { /// This function returns None if the stream is blocked waiting for table insertions. /// 'buf' must contain the complete header block. + /// /// # Errors + /// /// May return `DecompressionFailed` if header block is incorrect or incomplete. + /// /// # Panics + /// /// When there is a programming error. pub fn decode_header_block( &mut self, @@ -236,6 +250,7 @@ impl QPackDecoder { } /// # Panics + /// /// When a stream has already been added. pub fn add_send_stream(&mut self, stream_id: StreamId) { assert!( @@ -272,13 +287,15 @@ fn map_error(err: &Error) -> Error { #[cfg(test)] mod tests { - use super::{Connection, Error, QPackDecoder, Res}; - use crate::QpackSettings; + use std::{convert::TryFrom, mem}; + use neqo_common::Header; use neqo_transport::{StreamId, StreamType}; - use std::{convert::TryFrom, mem}; use test_fixture::now; + use super::{Connection, Error, QPackDecoder, Res}; + use crate::QpackSettings; + const STREAM_0: StreamId = StreamId::new(0); struct TestDecoder { @@ -434,7 +451,8 @@ mod tests { ); } - // this test tests header decoding, the header acks command and the insert count increment command. + // this test tests header decoding, the header acks command and the insert count increment + // command. #[test] fn test_duplicate() { let mut decoder = connect(); @@ -467,8 +485,8 @@ mod tests { fn test_encode_incr_encode_header_ack_some() { // 1. Decoder receives an instruction (header and value both as literal) // 2. Decoder process the instruction and sends an increment instruction. - // 3. Decoder receives another two instruction (header and value both as literal) and - // a header block. + // 3. Decoder receives another two instruction (header and value both as literal) and a + // header block. // 4. Now it sends only a header ack and an increment instruction with increment==1. let headers = vec![ Header::new("my-headera", "my-valuea"), @@ -504,8 +522,8 @@ mod tests { fn test_encode_incr_encode_header_ack_all() { // 1. Decoder receives an instruction (header and value both as literal) // 2. Decoder process the instruction and sends an increment instruction. - // 3. Decoder receives another instruction (header and value both as literal) and - // a header block. + // 3. Decoder receives another instruction (header and value both as literal) and a header + // block. // 4. Now it sends only a header ack. let headers = vec![ Header::new("my-headera", "my-valuea"), @@ -604,7 +622,8 @@ mod tests { ], encoder_inst: &[], }, - // test adding a new header and encode_post_base_index, also test fix_header_block_prefix + // test adding a new header and encode_post_base_index, also test + // fix_header_block_prefix TestElement { headers: vec![Header::new("my-header", "my-value")], header_block: &[0x02, 0x80, 0x10], @@ -683,7 +702,8 @@ mod tests { ], encoder_inst: &[], }, - // test adding a new header and encode_post_base_index, also test fix_header_block_prefix + // test adding a new header and encode_post_base_index, also test + // fix_header_block_prefix TestElement { headers: vec![Header::new("my-header", "my-value")], header_block: &[0x02, 0x80, 0x10], diff --git a/neqo-qpack/src/decoder_instructions.rs b/neqo-qpack/src/decoder_instructions.rs index eb8a331f3a..029cd61db6 100644 --- a/neqo-qpack/src/decoder_instructions.rs +++ b/neqo-qpack/src/decoder_instructions.rs @@ -4,15 +4,17 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::prefix::{ - DECODER_HEADER_ACK, DECODER_INSERT_COUNT_INCREMENT, DECODER_STREAM_CANCELLATION, -}; -use crate::qpack_send_buf::QpackData; -use crate::reader::{IntReader, ReadByte}; -use crate::Res; +use std::mem; + use neqo_common::{qdebug, qtrace}; use neqo_transport::StreamId; -use std::mem; + +use crate::{ + prefix::{DECODER_HEADER_ACK, DECODER_INSERT_COUNT_INCREMENT, DECODER_STREAM_CANCELLATION}, + qpack_send_buf::QpackData, + reader::{IntReader, ReadByte}, + Res, +}; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum DecoderInstruction { @@ -81,10 +83,11 @@ impl DecoderInstructionReader { } } - /// ### Errors - /// 1) `NeedMoreData` if the reader needs more data - /// 2) `ClosedCriticalStream` - /// 3) other errors will be translated to `DecoderStream` by the caller of this function. + /// # Errors + /// + /// 1) `NeedMoreData` if the reader needs more data + /// 2) `ClosedCriticalStream` + /// 3) other errors will be translated to `DecoderStream` by the caller of this function. pub fn read_instructions(&mut self, recv: &mut R) -> Res { qdebug!([self], "read a new instraction"); loop { @@ -137,11 +140,11 @@ impl DecoderInstructionReader { #[cfg(test)] mod test { - use super::{DecoderInstruction, DecoderInstructionReader, QpackData}; - use crate::reader::test_receiver::TestReceiver; - use crate::Error; use neqo_transport::StreamId; + use super::{DecoderInstruction, DecoderInstructionReader, QpackData}; + use crate::{reader::test_receiver::TestReceiver, Error}; + fn test_encoding_decoding(instruction: DecoderInstruction) { let mut buf = QpackData::default(); instruction.marshal(&mut buf); diff --git a/neqo-qpack/src/encoder.rs b/neqo-qpack/src/encoder.rs index 9893229dbc..f53cf51d85 100644 --- a/neqo-qpack/src/encoder.rs +++ b/neqo-qpack/src/encoder.rs @@ -4,19 +4,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::decoder_instructions::{DecoderInstruction, DecoderInstructionReader}; -use crate::encoder_instructions::EncoderInstruction; -use crate::header_block::HeaderEncoder; -use crate::qlog; -use crate::qpack_send_buf::QpackData; -use crate::reader::ReceiverConnWrapper; -use crate::stats::Stats; -use crate::table::{HeaderTable, LookupResult, ADDITIONAL_TABLE_ENTRY_SIZE}; -use crate::{Error, QpackSettings, Res}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + convert::TryFrom, +}; + use neqo_common::{qdebug, qerror, qlog::NeqoQlog, qtrace, Header}; use neqo_transport::{Connection, Error as TransportError, StreamId}; -use std::collections::{HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; + +use crate::{ + decoder_instructions::{DecoderInstruction, DecoderInstructionReader}, + encoder_instructions::EncoderInstruction, + header_block::HeaderEncoder, + qlog, + qpack_send_buf::QpackData, + reader::ReceiverConnWrapper, + stats::Stats, + table::{HeaderTable, LookupResult, ADDITIONAL_TABLE_ENTRY_SIZE}, + Error, QpackSettings, Res, +}; pub const QPACK_UNI_STREAM_TYPE_ENCODER: u64 = 0x2; @@ -45,9 +51,9 @@ pub struct QPackEncoder { local_stream: LocalStreamState, max_blocked_streams: u16, // Remember header blocks that are referring to dynamic table. - // There can be multiple header blocks in one stream, headers, trailer, push stream request, etc. - // This HashMap maps a stream ID to a list of header blocks. Each header block is a list of - // referenced dynamic table entries. + // There can be multiple header blocks in one stream, headers, trailer, push stream request, + // etc. This HashMap maps a stream ID to a list of header blocks. Each header block is a + // list of referenced dynamic table entries. unacked_header_blocks: HashMap>>, blocked_stream_cnt: u16, use_huffman: bool, @@ -75,7 +81,9 @@ impl QPackEncoder { /// This function is use for setting encoders table max capacity. The value is received as /// a `SETTINGS_QPACK_MAX_TABLE_CAPACITY` setting parameter. + /// /// # Errors + /// /// `EncoderStream` if value is too big. /// `ChangeCapacity` if table capacity cannot be reduced. pub fn set_max_capacity(&mut self, cap: u64) -> Res<()> { @@ -103,7 +111,9 @@ impl QPackEncoder { /// This function is use for setting encoders max blocked streams. The value is received as /// a `SETTINGS_QPACK_BLOCKED_STREAMS` setting parameter. + /// /// # Errors + /// /// `EncoderStream` if value is too big. pub fn set_max_blocked_streams(&mut self, blocked_streams: u64) -> Res<()> { self.max_blocked_streams = u16::try_from(blocked_streams).or(Err(Error::EncoderStream))?; @@ -111,7 +121,9 @@ impl QPackEncoder { } /// Reads decoder instructions. + /// /// # Errors + /// /// May return: `ClosedCriticalStream` if stream has been closed or `DecoderStream` /// in case of any other transport error. pub fn receive(&mut self, conn: &mut Connection, stream_id: StreamId) -> Res<()> { @@ -221,14 +233,20 @@ impl QPackEncoder { } } - /// Inserts a new entry into a table and sends the corresponding instruction to a peer. An entry is added only - /// if it is possible to send the corresponding instruction immediately, i.e. the encoder stream is not - /// blocked by the flow control (or stream internal buffer(this is very unlikely)). - /// ### Errors + /// Inserts a new entry into a table and sends the corresponding instruction to a peer. An entry + /// is added only if it is possible to send the corresponding instruction immediately, i.e. + /// the encoder stream is not blocked by the flow control (or stream internal buffer(this is + /// very unlikely)). + /// + /// # Errors + /// /// `EncoderStreamBlocked` if the encoder stream is blocked by the flow control. /// `DynamicTableFull` if the dynamic table does not have enough space for the entry. - /// The function can return transport errors: `InvalidStreamId`, `InvalidInput` and `FinalSizeError`. + /// The function can return transport errors: `InvalidStreamId`, `InvalidInput` and + /// `FinalSizeError`. + /// /// # Panics + /// /// When the insertion fails (it should not). pub fn send_and_insert( &mut self, @@ -279,7 +297,8 @@ impl QPackEncoder { stream_id: StreamId, ) -> Res<()> { if let Some(cap) = self.next_capacity { - // Check if it is possible to reduce the capacity, e.g. if enough space can be make free for the reduction. + // Check if it is possible to reduce the capacity, e.g. if enough space can be make free + // for the reduction. if cap < self.table.capacity() && !self.table.can_evict_to(cap) { return Err(Error::DynamicTableFull); } @@ -302,7 +321,9 @@ impl QPackEncoder { } /// Sends any qpack encoder instructions. + /// /// # Errors + /// /// returns `EncoderStream` in case of an error. pub fn send_encoder_updates(&mut self, conn: &mut Connection) -> Res<()> { match self.local_stream { @@ -338,10 +359,14 @@ impl QPackEncoder { } /// Encodes headers + /// /// # Errors + /// /// `ClosedCriticalStream` if the encoder stream is closed. /// `InternalError` if an unexpected error occurred. + /// /// # Panics + /// /// If there is a programming error. pub fn encode_header_block( &mut self, @@ -358,11 +383,9 @@ impl QPackEncoder { // to write to the encoder stream AND if it can't uses // literal instructions. // The errors can be: - // 1) `EncoderStreamBlocked` - this is an error that - // can occur. + // 1) `EncoderStreamBlocked` - this is an error that can occur. // 2) `InternalError` - this is unexpected error. - // 3) `ClosedCriticalStream` - this is error that should - // close the HTTP/3 session. + // 3) `ClosedCriticalStream` - this is error that should close the HTTP/3 session. // The last 2 errors are ignored here and will be picked up // by the main loop. encoder_blocked = true; @@ -406,8 +429,9 @@ impl QPackEncoder { self.table.add_ref(index); } } else if can_block && !encoder_blocked { - // Insert using an InsertWithNameLiteral instruction. This entry name does not match any name in the - // tables therefore we cannot use any other instruction. + // Insert using an InsertWithNameLiteral instruction. This entry name does not match + // any name in the tables therefore we cannot use any other + // instruction. if let Ok(index) = self.send_and_insert(conn, &name, &value) { encoded_h.encode_indexed_dynamic(index); ref_entries.insert(index); @@ -417,16 +441,15 @@ impl QPackEncoder { // to write to the encoder stream AND if it can't uses // literal instructions. // The errors can be: - // 1) `EncoderStreamBlocked` - this is an error that - // can occur. - // 2) `DynamicTableFull` - this is an error that - // can occur. + // 1) `EncoderStreamBlocked` - this is an error that can occur. + // 2) `DynamicTableFull` - this is an error that can occur. // 3) `InternalError` - this is unexpected error. - // 4) `ClosedCriticalStream` - this is error that should - // close the HTTP/3 session. + // 4) `ClosedCriticalStream` - this is error that should close the HTTP/3 + // session. // The last 2 errors are ignored here and will be picked up // by the main loop. - // As soon as one of the instructions cannot be written or the table is full, do not try again. + // As soon as one of the instructions cannot be written or the table is full, do + // not try again. encoder_blocked = true; encoded_h.encode_literal_with_name_literal(&name, &value); } @@ -458,7 +481,9 @@ impl QPackEncoder { } /// Encoder stream has been created. Add the stream id. + /// /// # Panics + /// /// If a stream has already been added. pub fn add_send_stream(&mut self, stream_id: StreamId) { if self.local_stream == LocalStreamState::NoStream { @@ -512,12 +537,14 @@ fn map_stream_send_atomic_error(err: &TransportError) -> Error { #[cfg(test)] mod tests { - use super::{Connection, Error, Header, QPackEncoder, Res}; - use crate::QpackSettings; - use neqo_transport::{ConnectionParameters, StreamId, StreamType}; use std::mem; + + use neqo_transport::{ConnectionParameters, StreamId, StreamType}; use test_fixture::{default_client, default_server, handshake, new_server, now, DEFAULT_ALPN}; + use super::{Connection, Error, Header, QPackEncoder, Res}; + use crate::QpackSettings; + struct TestEncoder { encoder: QPackEncoder, send_stream_id: StreamId, @@ -529,7 +556,8 @@ mod tests { impl TestEncoder { pub fn change_capacity(&mut self, capacity: u64) -> Res<()> { self.encoder.set_max_capacity(capacity).unwrap(); - // We will try to really change the table only when we send the change capacity instruction. + // We will try to really change the table only when we send the change capacity + // instruction. self.encoder.send_encoder_updates(&mut self.conn) } @@ -722,7 +750,8 @@ mod tests { ], encoder_inst: &[], }, - // test adding a new header and encode_post_base_index, also test fix_header_block_prefix + // test adding a new header and encode_post_base_index, also test + // fix_header_block_prefix TestElement { headers: vec![Header::new("my-header", "my-value")], header_block: &[0x02, 0x80, 0x10], @@ -796,7 +825,8 @@ mod tests { ], encoder_inst: &[], }, - // test adding a new header and encode_post_base_index, also test fix_header_block_prefix + // test adding a new header and encode_post_base_index, also test + // fix_header_block_prefix TestElement { headers: vec![Header::new("my-header", "my-value")], header_block: &[0x02, 0x80, 0x10], @@ -870,7 +900,8 @@ mod tests { assert!(res.is_ok()); encoder.send_instructions(HEADER_CONTENT_LENGTH_VALUE_1_NAME_LITERAL); - // insert "content-length: 12345 which will fail because the ntry in the table cannot be evicted. + // insert "content-length: 12345 which will fail because the ntry in the table cannot be + // evicted. let res = encoder .encoder @@ -921,7 +952,8 @@ mod tests { assert_eq!(&buf[..], ENCODE_INDEXED_REF_DYNAMIC); encoder.send_instructions(&[]); - // insert "content-length: 12345 which will fail because the entry in the table cannot be evicted + // insert "content-length: 12345 which will fail because the entry in the table cannot be + // evicted let res = encoder .encoder @@ -1004,8 +1036,8 @@ mod tests { encoder.send_instructions(&[]); - // The next one will not use the dynamic entry because it is exceeding the max_blocked_streams - // limit. + // The next one will not use the dynamic entry because it is exceeding the + // max_blocked_streams limit. let buf = encoder.encoder.encode_header_block( &mut encoder.conn, &[Header::new("content-length", "1234")], @@ -1099,7 +1131,8 @@ mod tests { assert_eq!(encoder.encoder.blocked_stream_cnt(), 1); - // The next one will not create a new entry because the encoder is on max_blocked_streams limit. + // The next one will not create a new entry because the encoder is on max_blocked_streams + // limit. let buf = encoder.encoder.encode_header_block( &mut encoder.conn, &[Header::new("name2", "value2")], @@ -1274,8 +1307,8 @@ mod tests { assert_eq!(encoder.encoder.blocked_stream_cnt(), 2); // receive a stream cancel for the first stream. - // This will remove the first stream as blocking but it will not mark the instruction as acked. - // and the second steam will still be blocking. + // This will remove the first stream as blocking but it will not mark the instruction as + // acked. and the second steam will still be blocking. recv_instruction(&mut encoder, STREAM_CANCELED_ID_1); // The stream is not blocking anymore because header ack also acks the instruction. @@ -1507,9 +1540,10 @@ mod tests { assert!(encoder.encoder.set_max_capacity(1000).is_ok()); encoder.send_instructions(CAP_INSTRUCTION_1000); - // Encode a header block with 2 headers. The first header will be added to the dynamic table. - // The second will not be added to the dynamic table, because the corresponding instruction - // cannot be written immediately due to the flow control limit. + // Encode a header block with 2 headers. The first header will be added to the dynamic + // table. The second will not be added to the dynamic table, because the + // corresponding instruction cannot be written immediately due to the flow control + // limit. let buf1 = encoder.encoder.encode_header_block( &mut encoder.conn, &[ @@ -1524,7 +1558,8 @@ mod tests { // Assert that the second header is encoded as a literal with a name literal assert_eq!(buf1[3] & 0xf0, 0x20); - // Try to encode another header block. Here both headers will be encoded as a literal with a name literal + // Try to encode another header block. Here both headers will be encoded as a literal with a + // name literal let buf2 = encoder.encoder.encode_header_block( &mut encoder.conn, &[ @@ -1542,8 +1577,8 @@ mod tests { let out = encoder.peer_conn.process(None, now()); mem::drop(encoder.conn.process(out.as_dgram_ref(), now())); - // Try writing a new header block. Now, headers will be added to the dynamic table again, because - // instructions can be sent. + // Try writing a new header block. Now, headers will be added to the dynamic table again, + // because instructions can be sent. let buf3 = encoder.encoder.encode_header_block( &mut encoder.conn, &[ diff --git a/neqo-qpack/src/encoder_instructions.rs b/neqo-qpack/src/encoder_instructions.rs index 93be06bf7f..5564af969e 100644 --- a/neqo-qpack/src/encoder_instructions.rs +++ b/neqo-qpack/src/encoder_instructions.rs @@ -4,16 +4,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::prefix::{ - ENCODER_CAPACITY, ENCODER_DUPLICATE, ENCODER_INSERT_WITH_NAME_LITERAL, - ENCODER_INSERT_WITH_NAME_REF_DYNAMIC, ENCODER_INSERT_WITH_NAME_REF_STATIC, NO_PREFIX, -}; -use crate::qpack_send_buf::QpackData; -use crate::reader::{IntReader, LiteralReader, ReadByte, Reader}; -use crate::Res; -use neqo_common::{qdebug, qtrace}; use std::mem; +use neqo_common::{qdebug, qtrace}; + +use crate::{ + prefix::{ + ENCODER_CAPACITY, ENCODER_DUPLICATE, ENCODER_INSERT_WITH_NAME_LITERAL, + ENCODER_INSERT_WITH_NAME_REF_DYNAMIC, ENCODER_INSERT_WITH_NAME_REF_STATIC, NO_PREFIX, + }, + qpack_send_buf::QpackData, + reader::{IntReader, LiteralReader, ReadByte, Reader}, + Res, +}; + // The encoder only uses InsertWithNameLiteral, therefore clippy is complaining about dead_code. // We may decide to use othe instruction in the future. // All instructions are used for testing, therefore they are defined. @@ -183,10 +187,11 @@ impl EncoderInstructionReader { Ok(()) } - /// ### Errors - /// 1) `NeedMoreData` if the reader needs more data - /// 2) `ClosedCriticalStream` - /// 3) other errors will be translated to `EncoderStream` by the caller of this function. + /// # Errors + /// + /// 1) `NeedMoreData` if the reader needs more data + /// 2) `ClosedCriticalStream` + /// 3) other errors will be translated to `EncoderStream` by the caller of this function. pub fn read_instructions( &mut self, recv: &mut T, @@ -265,8 +270,7 @@ impl EncoderInstructionReader { mod test { use super::{EncoderInstruction, EncoderInstructionReader, QpackData}; - use crate::reader::test_receiver::TestReceiver; - use crate::Error; + use crate::{reader::test_receiver::TestReceiver, Error}; fn test_encoding_decoding(instruction: &EncoderInstruction, use_huffman: bool) { let mut buf = QpackData::default(); diff --git a/neqo-qpack/src/header_block.rs b/neqo-qpack/src/header_block.rs index 3b37db120e..2e15bdf1fe 100644 --- a/neqo-qpack/src/header_block.rs +++ b/neqo-qpack/src/header_block.rs @@ -4,6 +4,13 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + mem, + ops::{Deref, Div}, +}; + +use neqo_common::{qtrace, Header}; + use crate::{ prefix::{ BASE_PREFIX_NEGATIVE, BASE_PREFIX_POSITIVE, HEADER_FIELD_INDEX_DYNAMIC, @@ -17,11 +24,6 @@ use crate::{ table::HeaderTable, Error, Res, }; -use neqo_common::{qtrace, Header}; -use std::{ - mem, - ops::{Deref, Div}, -}; #[derive(Default, Debug, PartialEq)] pub struct HeaderEncoder { diff --git a/neqo-qpack/src/huffman.rs b/neqo-qpack/src/huffman.rs index 31657ca826..283a501b32 100644 --- a/neqo-qpack/src/huffman.rs +++ b/neqo-qpack/src/huffman.rs @@ -4,11 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::huffman_decode_helper::{HuffmanDecoderNode, HUFFMAN_DECODE_ROOT}; -use crate::huffman_table::HUFFMAN_TABLE; -use crate::{Error, Res}; use std::convert::TryFrom; +use crate::{ + huffman_decode_helper::{HuffmanDecoderNode, HUFFMAN_DECODE_ROOT}, + huffman_table::HUFFMAN_TABLE, + Error, Res, +}; + struct BitReader<'a> { input: &'a [u8], offset: usize, @@ -65,9 +68,14 @@ impl<'a> BitReader<'a> { } /// Decodes huffman encoded input. +/// /// # Errors -/// This function may return `HuffmanDecompressionFailed` if `input` is not a correct huffman-encoded array of bits. +/// +/// This function may return `HuffmanDecompressionFailed` if `input` is not a correct +/// huffman-encoded array of bits. +/// /// # Panics +/// /// Never, but rust can't know that. pub fn decode_huffman(input: &[u8]) -> Res> { let mut reader = BitReader::new(input); @@ -109,6 +117,7 @@ fn decode_character(reader: &mut BitReader) -> Res> { } /// # Panics +/// /// Never, but rust doesn't know that. #[must_use] pub fn encode_huffman(input: &[u8]) -> Vec { diff --git a/neqo-qpack/src/huffman_decode_helper.rs b/neqo-qpack/src/huffman_decode_helper.rs index 7589ebd11a..122226dd1f 100644 --- a/neqo-qpack/src/huffman_decode_helper.rs +++ b/neqo-qpack/src/huffman_decode_helper.rs @@ -4,10 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::huffman_table::HUFFMAN_TABLE; -use lazy_static::lazy_static; use std::convert::TryFrom; +use lazy_static::lazy_static; + +use crate::huffman_table::HUFFMAN_TABLE; + pub struct HuffmanDecoderNode { pub next: [Option>; 2], pub value: Option, diff --git a/neqo-qpack/src/lib.rs b/neqo-qpack/src/lib.rs index 86ccb11ff8..3f9c7b81f7 100644 --- a/neqo-qpack/src/lib.rs +++ b/neqo-qpack/src/lib.rs @@ -6,7 +6,8 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] -// This is because of Encoder and Decoder structs. TODO: think about a better namings for crate and structs. +// This is because of Encoder and Decoder structs. TODO: think about a better namings for crate and +// structs. #![allow(clippy::module_name_repetitions)] pub mod decoder; @@ -47,7 +48,8 @@ pub enum Error { InternalError(u16), // These are internal errors, they will be transformed into one of the above. - NeedMoreData, // Return when an input stream does not have more data that a decoder needs.(It does not mean that a stream is closed.) + NeedMoreData, /* Return when an input stream does not have more data that a decoder + * needs.(It does not mean that a stream is closed.) */ HeaderLookup, HuffmanDecompressionFailed, BadUtf8, @@ -78,7 +80,8 @@ impl Error { } /// # Errors - /// Any error is mapped to the indicated type. + /// + /// Any error is mapped to the indicated type. fn map_error(r: Result, err: Self) -> Result { r.map_err(|e| { if matches!(e, Self::ClosedCriticalStream) { diff --git a/neqo-qpack/src/prefix.rs b/neqo-qpack/src/prefix.rs index 5019dd7d6d..0085de0df9 100644 --- a/neqo-qpack/src/prefix.rs +++ b/neqo-qpack/src/prefix.rs @@ -16,9 +16,10 @@ pub struct Prefix { impl Prefix { pub fn new(prefix: u8, len: u8) -> Self { // len should never be larger than 7. - // Most of Prefixes are instantiated as consts bellow. The only place where this construcrtor is used - // is in tests and when literals are encoded and the Huffman bit is added to one of the consts bellow. - // create_prefix guaranty that all const have len < 7 so we can safely assert that len is <=7. + // Most of Prefixes are instantiated as consts bellow. The only place where this + // construcrtor is used is in tests and when literals are encoded and the Huffman + // bit is added to one of the consts bellow. create_prefix guaranty that all const + // have len < 7 so we can safely assert that len is <=7. assert!(len <= 7); assert!((len == 0) || (prefix & ((1 << (8 - len)) - 1) == 0)); Self { @@ -110,7 +111,7 @@ create_prefix!(ENCODER_INSERT_WITH_NAME_LITERAL, 0x40, 2); create_prefix!(ENCODER_DUPLICATE, 0x00, 3); //===================================================================== -//Header block encoding prefixes +// Header block encoding prefixes //===================================================================== create_prefix!(BASE_PREFIX_POSITIVE, 0x00, 1); @@ -137,5 +138,6 @@ create_prefix!(HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC, 0x40, 4, 0xD0); create_prefix!(HEADER_FIELD_LITERAL_NAME_REF_DYNAMIC_POST, 0x00, 5, 0xF0); // | 0 | 0 | 1 | N | H | Index(3+) | -// N is ignored and H is not relevant for decoding this prefix, therefore the mask is 1110 0000 = 0xE0 +// N is ignored and H is not relevant for decoding this prefix, therefore the mask is 1110 0000 = +// 0xE0 create_prefix!(HEADER_FIELD_LITERAL_NAME_LITERAL, 0x20, 4, 0xE0); diff --git a/neqo-qpack/src/qlog.rs b/neqo-qpack/src/qlog.rs index c6ae6b5d0f..8d48efb0aa 100644 --- a/neqo-qpack/src/qlog.rs +++ b/neqo-qpack/src/qlog.rs @@ -6,11 +6,9 @@ // Functions that handle capturing QLOG traces. -use neqo_common::hex; -use neqo_common::qlog::NeqoQlog; +use neqo_common::{hex, qlog::NeqoQlog}; use qlog::events::{ - qpack::QpackInstructionTypeName, - qpack::{QPackInstruction, QpackInstructionParsed}, + qpack::{QPackInstruction, QpackInstructionParsed, QpackInstructionTypeName}, EventData, RawInfo, }; diff --git a/neqo-qpack/src/qpack_send_buf.rs b/neqo-qpack/src/qpack_send_buf.rs index 4fbdbf12bd..a443859081 100644 --- a/neqo-qpack/src/qpack_send_buf.rs +++ b/neqo-qpack/src/qpack_send_buf.rs @@ -4,11 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::huffman::encode_huffman; -use crate::prefix::Prefix; +use std::{convert::TryFrom, ops::Deref}; + use neqo_common::Encoder; -use std::convert::TryFrom; -use std::ops::Deref; + +use crate::{huffman::encode_huffman, prefix::Prefix}; #[derive(Default, Debug, PartialEq)] pub(crate) struct QpackData { diff --git a/neqo-qpack/src/reader.rs b/neqo-qpack/src/reader.rs index f47471005d..ff9c42b246 100644 --- a/neqo-qpack/src/reader.rs +++ b/neqo-qpack/src/reader.rs @@ -4,22 +4,26 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{huffman::decode_huffman, prefix::Prefix, Error, Res}; +use std::{convert::TryInto, mem, str}; + use neqo_common::{qdebug, qerror}; use neqo_transport::{Connection, StreamId}; -use std::{convert::TryInto, mem, str}; + +use crate::{huffman::decode_huffman, prefix::Prefix, Error, Res}; pub trait ReadByte { /// # Errors - /// Return error occurred while reading a byte. - /// The exact error depends on trait implementation. + /// + /// Return error occurred while reading a byte. + /// The exact error depends on trait implementation. fn read_byte(&mut self) -> Res; } pub trait Reader { /// # Errors - /// Return error occurred while reading date into a buffer. - /// The exact error depends on trait implementation. + /// + /// Return error occurred while reading date into a buffer. + /// The exact error depends on trait implementation. fn read(&mut self, buf: &mut [u8]) -> Res; } @@ -154,7 +158,9 @@ pub struct IntReader { impl IntReader { /// `IntReader` is created by suppling the first byte anf prefix length. /// A varint may take only one byte, In that case already the first by has set state to done. + /// /// # Panics + /// /// When `prefix_len` is 8 or larger. #[must_use] pub fn new(first_byte: u8, prefix_len: u8) -> Self { @@ -174,6 +180,7 @@ impl IntReader { } /// # Panics + /// /// Never, but rust doesn't know that. #[must_use] pub fn make(first_byte: u8, prefixes: &[Prefix]) -> Self { @@ -187,7 +194,9 @@ impl IntReader { /// This function reads bytes until the varint is decoded or until stream/buffer does not /// have any more date. + /// /// # Errors + /// /// Possible errors are: /// 1) `NeedMoreData` if the reader needs more data, /// 2) `IntegerOverflow`, @@ -245,7 +254,9 @@ impl LiteralReader { /// Creates `LiteralReader` with the first byte. This constructor is always used /// when a litreral has a prefix. /// For literals without a prefix please use the default constructor. + /// /// # Panics + /// /// If `prefix_len` is 8 or more. #[must_use] pub fn new_with_first_byte(first_byte: u8, prefix_len: u8) -> Self { @@ -261,13 +272,17 @@ impl LiteralReader { /// This function reads bytes until the literal is decoded or until stream/buffer does not /// have any more date ready. + /// /// # Errors + /// /// Possible errors are: /// 1) `NeedMoreData` if the reader needs more data, /// 2) `IntegerOverflow` /// 3) Any `ReadByte`'s error /// It returns value if reading the literal is done or None if it needs more data. + /// /// # Panics + /// /// When this object is complete. pub fn read(&mut self, s: &mut T) -> Res> { loop { @@ -309,7 +324,9 @@ impl LiteralReader { /// This is a helper function used only by `ReceiverBufferWrapper`, therefore it returns /// `DecompressionFailed` if any error happens. +/// /// # Errors +/// /// If an parsing error occurred, the function returns `BadUtf8`. pub fn parse_utf8(v: &[u8]) -> Res<&str> { str::from_utf8(v).map_err(|_| Error::BadUtf8) @@ -318,9 +335,10 @@ pub fn parse_utf8(v: &[u8]) -> Res<&str> { #[cfg(test)] pub(crate) mod test_receiver { - use super::{Error, ReadByte, Reader, Res}; use std::collections::VecDeque; + use super::{Error, ReadByte, Reader, Res}; + #[derive(Default)] pub struct TestReceiver { buf: VecDeque, @@ -358,11 +376,12 @@ pub(crate) mod test_receiver { #[cfg(test)] mod tests { + use test_receiver::TestReceiver; + use super::{ parse_utf8, str, test_receiver, Error, IntReader, LiteralReader, ReadByte, ReceiverBufferWrapper, Res, }; - use test_receiver::TestReceiver; const TEST_CASES_NUMBERS: [(&[u8], u8, u64); 7] = [ (&[0xEA], 3, 10), diff --git a/neqo-qpack/src/table.rs b/neqo-qpack/src/table.rs index cc9844ee27..7ce8572542 100644 --- a/neqo-qpack/src/table.rs +++ b/neqo-qpack/src/table.rs @@ -4,11 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::static_table::{StaticTableEntry, HEADER_STATIC_TABLE}; -use crate::{Error, Res}; +use std::{collections::VecDeque, convert::TryFrom}; + use neqo_common::qtrace; -use std::collections::VecDeque; -use std::convert::TryFrom; + +use crate::{ + static_table::{StaticTableEntry, HEADER_STATIC_TABLE}, + Error, Res, +}; pub const ADDITIONAL_TABLE_ENTRY_SIZE: usize = 32; @@ -106,9 +109,12 @@ impl HeaderTable { } /// Change the dynamic table capacity. - /// ### Errors + /// + /// # Errors + /// /// `ChangeCapacity` if table capacity cannot be reduced. - /// The table cannot be reduce if there are entries that are referred at the moment or their inserts are unacked. + /// The table cannot be reduce if there are entries that are referred at the moment or their + /// inserts are unacked. pub fn set_capacity(&mut self, cap: u64) -> Res<()> { qtrace!([self], "set capacity to {}", cap); if !self.evict_to(cap) { @@ -119,7 +125,9 @@ impl HeaderTable { } /// Get a static entry with `index`. - /// ### Errors + /// + /// # Errors + /// /// `HeaderLookup` if the index does not exist in the static table. pub fn get_static(index: u64) -> Res<&'static StaticTableEntry> { let inx = usize::try_from(index).or(Err(Error::HeaderLookup))?; @@ -151,7 +159,9 @@ impl HeaderTable { } /// Get a entry in the dynamic table. - /// ### Errors + /// + /// # Errors + /// /// `HeaderLookup` if entry does not exist. pub fn get_dynamic(&self, index: u64, base: u64, post: bool) -> Res<&DynamicTableEntry> { let inx = if post { @@ -186,8 +196,8 @@ impl HeaderTable { } /// Look for a header pair. - /// The function returns `LookupResult`: `index`, `static_table` (if it is a static table entry) and `value_matches` - /// (if the header value matches as well not only header name) + /// The function returns `LookupResult`: `index`, `static_table` (if it is a static table entry) + /// and `value_matches` (if the header value matches as well not only header name) pub fn lookup(&mut self, name: &[u8], value: &[u8], can_block: bool) -> Option { qtrace!( [self], @@ -280,9 +290,11 @@ impl HeaderTable { } /// Insert a new entry. - /// ### Errors - /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough space and/or - /// other entry cannot be evicted. + /// + /// # Errors + /// + /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough + /// space and/or other entry cannot be evicted. pub fn insert(&mut self, name: &[u8], value: &[u8]) -> Res { qtrace!([self], "insert name={:?} value={:?}", name, value); let entry = DynamicTableEntry { @@ -304,9 +316,11 @@ impl HeaderTable { } /// Insert a new entry with the name refer to by a index to static or dynamic table. - /// ### Errors - /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough space and/or - /// other entry cannot be evicted. + /// + /// # Errors + /// + /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough + /// space and/or other entry cannot be evicted. /// `HeaderLookup` if the index dos not exits in the static/dynamic table. pub fn insert_with_name_ref( &mut self, @@ -336,9 +350,11 @@ impl HeaderTable { } /// Duplicate an entry. - /// ### Errors - /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough space and/or - /// other entry cannot be evicted. + /// + /// # Errors + /// + /// `DynamicTableFull` if an entry cannot be added to the table because there is not enough + /// space and/or other entry cannot be evicted. /// `HeaderLookup` if the index dos not exits in the static/dynamic table. pub fn duplicate(&mut self, index: u64) -> Res { qtrace!([self], "duplicate entry={}", index); @@ -355,7 +371,9 @@ impl HeaderTable { } /// Increment number of acknowledge entries. - /// ### Errors + /// + /// # Errors + /// /// `IncrementAck` if ack is greater than actual number of inserts. pub fn increment_acked(&mut self, increment: u64) -> Res<()> { qtrace!([self], "increment acked by {}", increment); diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index cabae35232..590e0d55db 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -27,9 +27,6 @@ use std::{ use mio::{net::UdpSocket, Events, Poll, PollOpt, Ready, Token}; use mio_extras::timer::{Builder, Timeout, Timer}; -use neqo_transport::ConnectionIdGenerator; -use structopt::StructOpt; - use neqo_common::{hex, qdebug, qinfo, qwarn, Datagram, Header, IpTos}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, @@ -40,8 +37,10 @@ use neqo_http3::{ }; use neqo_transport::{ server::ValidateAddress, tparams::PreferredAddress, CongestionControlAlgorithm, - ConnectionParameters, Output, RandomConnectionIdGenerator, StreamType, Version, + ConnectionIdGenerator, ConnectionParameters, Output, RandomConnectionIdGenerator, StreamType, + Version, }; +use structopt::StructOpt; use crate::old_https::Http09Server; diff --git a/neqo-server/src/old_https.rs b/neqo-server/src/old_https.rs index 61ebd53258..f254446695 100644 --- a/neqo-server/src/old_https.rs +++ b/neqo-server/src/old_https.rs @@ -7,14 +7,9 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::use_self)] -use std::cell::RefCell; -use std::collections::HashMap; -use std::fmt::Display; -use std::path::PathBuf; -use std::rc::Rc; -use std::time::Instant; - -use regex::Regex; +use std::{ + cell::RefCell, collections::HashMap, fmt::Display, path::PathBuf, rc::Rc, time::Instant, +}; use neqo_common::{event::Provider, hex, qdebug, Datagram}; use neqo_crypto::{generate_ech_keys, random, AllowZeroRtt, AntiReplay, Cipher}; @@ -23,6 +18,7 @@ use neqo_transport::{ server::{ActiveConnectionRef, Server, ValidateAddress}, ConnectionEvent, ConnectionIdGenerator, ConnectionParameters, Output, State, StreamId, }; +use regex::Regex; use super::{qns_read_response, Args, HttpServer}; diff --git a/neqo-transport/src/ackrate.rs b/neqo-transport/src/ackrate.rs index 6c4ae44f86..cf68f9021f 100644 --- a/neqo-transport/src/ackrate.rs +++ b/neqo-transport/src/ackrate.rs @@ -7,16 +7,14 @@ // Management of the peer's ack rate. #![deny(clippy::pedantic)] -use crate::connection::params::ACK_RATIO_SCALE; -use crate::frame::FRAME_TYPE_ACK_FREQUENCY; -use crate::packet::PacketBuilder; -use crate::recovery::RecoveryToken; -use crate::stats::FrameStats; +use std::{cmp::max, convert::TryFrom, time::Duration}; use neqo_common::qtrace; -use std::cmp::max; -use std::convert::TryFrom; -use std::time::Duration; + +use crate::{ + connection::params::ACK_RATIO_SCALE, frame::FRAME_TYPE_ACK_FREQUENCY, packet::PacketBuilder, + recovery::RecoveryToken, stats::FrameStats, +}; #[derive(Debug, Clone)] pub struct AckRate { diff --git a/neqo-transport/src/addr_valid.rs b/neqo-transport/src/addr_valid.rs index fcb8106742..9105c89a54 100644 --- a/neqo-transport/src/addr_valid.rs +++ b/neqo-transport/src/addr_valid.rs @@ -6,22 +6,23 @@ // This file implements functions necessary for address validation. +use std::{ + convert::TryFrom, + net::{IpAddr, SocketAddr}, + time::{Duration, Instant}, +}; + use neqo_common::{qinfo, qtrace, Decoder, Encoder, Role}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, selfencrypt::SelfEncrypt, }; - -use crate::cid::ConnectionId; -use crate::packet::PacketBuilder; -use crate::recovery::RecoveryToken; -use crate::stats::FrameStats; -use crate::{Error, Res}; - use smallvec::SmallVec; -use std::convert::TryFrom; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; + +use crate::{ + cid::ConnectionId, packet::PacketBuilder, recovery::RecoveryToken, stats::FrameStats, Error, + Res, +}; /// A prefix we add to Retry tokens to distinguish them from NEW_TOKEN tokens. const TOKEN_IDENTIFIER_RETRY: &[u8] = &[0x52, 0x65, 0x74, 0x72, 0x79]; @@ -460,9 +461,10 @@ impl NewTokenSender { #[cfg(test)] mod tests { - use super::NewTokenState; use neqo_common::Role; + use super::NewTokenState; + const ONE: &[u8] = &[1, 2, 3]; const TWO: &[u8] = &[4, 5]; diff --git a/neqo-transport/src/cc/classic_cc.rs b/neqo-transport/src/cc/classic_cc.rs index c1d8fd08a6..6f4a01d795 100644 --- a/neqo-transport/src/cc/classic_cc.rs +++ b/neqo-transport/src/cc/classic_cc.rs @@ -14,7 +14,6 @@ use std::{ }; use super::CongestionControl; - use crate::{ cc::MAX_DATAGRAM_SIZE, packet::PacketNumber, @@ -537,6 +536,14 @@ impl ClassicCongestionControl { #[cfg(test)] mod tests { + use std::{ + convert::TryFrom, + time::{Duration, Instant}, + }; + + use neqo_common::qinfo; + use test_fixture::now; + use super::{ ClassicCongestionControl, WindowAdjustment, CWND_INITIAL, CWND_MIN, PERSISTENT_CONG_THRESH, }; @@ -551,12 +558,6 @@ mod tests { rtt::RttEstimate, tracking::SentPacket, }; - use neqo_common::qinfo; - use std::{ - convert::TryFrom, - time::{Duration, Instant}, - }; - use test_fixture::now; const PTO: Duration = Duration::from_millis(100); const RTT: Duration = Duration::from_millis(98); diff --git a/neqo-transport/src/cc/cubic.rs b/neqo-transport/src/cc/cubic.rs index 1a2928cdf7..c04a29b443 100644 --- a/neqo-transport/src/cc/cubic.rs +++ b/neqo-transport/src/cc/cubic.rs @@ -6,12 +6,15 @@ #![deny(clippy::pedantic)] -use std::fmt::{self, Display}; -use std::time::{Duration, Instant}; +use std::{ + convert::TryFrom, + fmt::{self, Display}, + time::{Duration, Instant}, +}; -use crate::cc::{classic_cc::WindowAdjustment, MAX_DATAGRAM_SIZE_F64}; use neqo_common::qtrace; -use std::convert::TryFrom; + +use crate::cc::{classic_cc::WindowAdjustment, MAX_DATAGRAM_SIZE_F64}; // CUBIC congestion control @@ -163,8 +166,8 @@ impl WindowAdjustment for Cubic { // of `MAX_DATAGRAM_SIZE` to match the increase of `target - cwnd / cwnd` as defined // in the specification (Sections 4.4 and 4.5). // The amount of data required therefore reduces asymptotically as the target increases. - // If the target is not significantly higher than the congestion window, require a very large - // amount of acknowledged data (effectively block increases). + // If the target is not significantly higher than the congestion window, require a very + // large amount of acknowledged data (effectively block increases). let mut acked_to_increase = MAX_DATAGRAM_SIZE_F64 * curr_cwnd_f64 / (target_cwnd - curr_cwnd_f64).max(1.0); @@ -178,9 +181,10 @@ impl WindowAdjustment for Cubic { fn reduce_cwnd(&mut self, curr_cwnd: usize, acked_bytes: usize) -> (usize, usize) { let curr_cwnd_f64 = convert_to_f64(curr_cwnd); // Fast Convergence - // If congestion event occurs before the maximum congestion window before the last congestion event, - // we reduce the the maximum congestion window and thereby W_max. - // check cwnd + MAX_DATAGRAM_SIZE instead of cwnd because with cwnd in bytes, cwnd may be slightly off. + // If congestion event occurs before the maximum congestion window before the last + // congestion event, we reduce the the maximum congestion window and thereby W_max. + // check cwnd + MAX_DATAGRAM_SIZE instead of cwnd because with cwnd in bytes, cwnd may be + // slightly off. self.last_max_cwnd = if curr_cwnd_f64 + MAX_DATAGRAM_SIZE_F64 < self.last_max_cwnd { curr_cwnd_f64 * CUBIC_FAST_CONVERGENCE } else { diff --git a/neqo-transport/src/cc/mod.rs b/neqo-transport/src/cc/mod.rs index 0321ab1de5..a1a43bd157 100644 --- a/neqo-transport/src/cc/mod.rs +++ b/neqo-transport/src/cc/mod.rs @@ -7,15 +7,16 @@ // Congestion control #![deny(clippy::pedantic)] -use crate::{path::PATH_MTU_V6, rtt::RttEstimate, tracking::SentPacket, Error}; -use neqo_common::qlog::NeqoQlog; - use std::{ fmt::{Debug, Display}, str::FromStr, time::{Duration, Instant}, }; +use neqo_common::qlog::NeqoQlog; + +use crate::{path::PATH_MTU_V6, rtt::RttEstimate, tracking::SentPacket, Error}; + mod classic_cc; mod cubic; mod new_reno; diff --git a/neqo-transport/src/cc/new_reno.rs b/neqo-transport/src/cc/new_reno.rs index d34cdfbab9..e51b3d6cc0 100644 --- a/neqo-transport/src/cc/new_reno.rs +++ b/neqo-transport/src/cc/new_reno.rs @@ -7,10 +7,12 @@ // Congestion control #![deny(clippy::pedantic)] -use std::fmt::{self, Display}; +use std::{ + fmt::{self, Display}, + time::{Duration, Instant}, +}; use crate::cc::classic_cc::WindowAdjustment; -use std::time::{Duration, Instant}; #[derive(Debug, Default)] pub struct NewReno {} diff --git a/neqo-transport/src/cc/tests/cubic.rs b/neqo-transport/src/cc/tests/cubic.rs index b24f1fc118..0c82e47817 100644 --- a/neqo-transport/src/cc/tests/cubic.rs +++ b/neqo-transport/src/cc/tests/cubic.rs @@ -7,6 +7,14 @@ #![allow(clippy::cast_possible_truncation)] #![allow(clippy::cast_sign_loss)] +use std::{ + convert::TryFrom, + ops::Sub, + time::{Duration, Instant}, +}; + +use test_fixture::now; + use crate::{ cc::{ classic_cc::{ClassicCongestionControl, CWND_INITIAL}, @@ -20,12 +28,6 @@ use crate::{ rtt::RttEstimate, tracking::SentPacket, }; -use std::{ - convert::TryFrom, - ops::Sub, - time::{Duration, Instant}, -}; -use test_fixture::now; const RTT: Duration = Duration::from_millis(100); const RTT_ESTIMATE: RttEstimate = RttEstimate::from_duration(Duration::from_millis(100)); @@ -109,7 +111,7 @@ fn tcp_phase() { for _ in 0..num_tcp_increases { let cwnd_rtt_start = cubic.cwnd(); - //Expected acks during a period of RTT / CUBIC_ALPHA. + // Expected acks during a period of RTT / CUBIC_ALPHA. let acks = expected_tcp_acks(cwnd_rtt_start); // The time between acks if they are ideally paced over a RTT. let time_increase = RTT / u32::try_from(cwnd_rtt_start / MAX_DATAGRAM_SIZE).unwrap(); @@ -145,9 +147,10 @@ fn tcp_phase() { let expected_ack_tcp_increase = expected_tcp_acks(cwnd_rtt_start); assert!(num_acks < expected_ack_tcp_increase); - // This first increase after a TCP phase may be shorter than what it would take by a regular cubic phase, - // because of the proper byte counting and the credit it already had before entering this phase. Therefore - // We will perform another round and compare it to expected increase using the cubic equation. + // This first increase after a TCP phase may be shorter than what it would take by a regular + // cubic phase, because of the proper byte counting and the credit it already had before + // entering this phase. Therefore We will perform another round and compare it to expected + // increase using the cubic equation. let cwnd_rtt_start_after_tcp = cubic.cwnd(); let elapsed_time = now - start_time; @@ -167,12 +170,12 @@ fn tcp_phase() { let expected_ack_tcp_increase2 = expected_tcp_acks(cwnd_rtt_start_after_tcp); assert!(num_acks2 < expected_ack_tcp_increase2); - // The time needed to increase cwnd by MAX_DATAGRAM_SIZE using the cubic equation will be calculates from: - // W_cubic(elapsed_time + t_to_increase) - W_cubis(elapsed_time) = MAX_DATAGRAM_SIZE => - // CUBIC_C * (elapsed_time + t_to_increase)^3 * MAX_DATAGRAM_SIZE + CWND_INITIAL - - // CUBIC_C * elapsed_time^3 * MAX_DATAGRAM_SIZE + CWND_INITIAL = MAX_DATAGRAM_SIZE => - // t_to_increase = cbrt((1 + CUBIC_C * elapsed_time^3) / CUBIC_C) - elapsed_time - // (t_to_increase is in seconds) + // The time needed to increase cwnd by MAX_DATAGRAM_SIZE using the cubic equation will be + // calculates from: W_cubic(elapsed_time + t_to_increase) - W_cubis(elapsed_time) = + // MAX_DATAGRAM_SIZE => CUBIC_C * (elapsed_time + t_to_increase)^3 * MAX_DATAGRAM_SIZE + + // CWND_INITIAL - CUBIC_C * elapsed_time^3 * MAX_DATAGRAM_SIZE + CWND_INITIAL = + // MAX_DATAGRAM_SIZE => t_to_increase = cbrt((1 + CUBIC_C * elapsed_time^3) / CUBIC_C) - + // elapsed_time (t_to_increase is in seconds) // number of ack needed is t_to_increase / time_increase. let expected_ack_cubic_increase = ((((1.0 + CUBIC_C * (elapsed_time).as_secs_f64().powi(3)) / CUBIC_C).cbrt() @@ -180,15 +183,16 @@ fn tcp_phase() { / time_increase.as_secs_f64()) .ceil() as u64; // num_acks is very close to the calculated value. The exact value is hard to calculate - // because the proportional increase(i.e. curr_cwnd_f64 / (target - curr_cwnd_f64) * MAX_DATAGRAM_SIZE_F64) - // and the byte counting. + // because the proportional increase(i.e. curr_cwnd_f64 / (target - curr_cwnd_f64) * + // MAX_DATAGRAM_SIZE_F64) and the byte counting. assert_eq!(num_acks2, expected_ack_cubic_increase + 2); } #[test] fn cubic_phase() { let mut cubic = ClassicCongestionControl::new(Cubic::default()); - // Set last_max_cwnd to a higher number make sure that cc is the cubic phase (cwnd is calculated by the cubic equation). + // Set last_max_cwnd to a higher number make sure that cc is the cubic phase (cwnd is calculated + // by the cubic equation). cubic.set_last_max_cwnd(CWND_INITIAL_10_F64); // Set ssthresh to something small to make sure that cc is in the congection avoidance phase. cubic.set_ssthresh(1); @@ -205,7 +209,7 @@ fn cubic_phase() { let num_rtts_w_max = (k / RTT.as_secs_f64()).round() as u64; for _ in 0..num_rtts_w_max { let cwnd_rtt_start = cubic.cwnd(); - //Expected acks + // Expected acks let acks = cwnd_rtt_start / MAX_DATAGRAM_SIZE; let time_increase = RTT / u32::try_from(acks).unwrap(); for _ in 0..acks { @@ -264,7 +268,8 @@ fn congestion_event_congestion_avoidance() { // Set ssthresh to something small to make sure that cc is in the congection avoidance phase. cubic.set_ssthresh(1); - // Set last_max_cwnd to something smaller than cwnd so that the fast convergence is not triggered. + // Set last_max_cwnd to something smaller than cwnd so that the fast convergence is not + // triggered. cubic.set_last_max_cwnd(3.0 * MAX_DATAGRAM_SIZE_F64); _ = fill_cwnd(&mut cubic, 0, now()); diff --git a/neqo-transport/src/cc/tests/new_reno.rs b/neqo-transport/src/cc/tests/new_reno.rs index f86e87b953..a73844a755 100644 --- a/neqo-transport/src/cc/tests/new_reno.rs +++ b/neqo-transport/src/cc/tests/new_reno.rs @@ -7,6 +7,10 @@ // Congestion control #![deny(clippy::pedantic)] +use std::time::Duration; + +use test_fixture::now; + use crate::{ cc::{ new_reno::NewReno, ClassicCongestionControl, CongestionControl, CWND_INITIAL, @@ -17,9 +21,6 @@ use crate::{ tracking::SentPacket, }; -use std::time::Duration; -use test_fixture::now; - const PTO: Duration = Duration::from_millis(100); const RTT: Duration = Duration::from_millis(98); const RTT_ESTIMATE: RttEstimate = RttEstimate::from_duration(Duration::from_millis(98)); @@ -169,8 +170,8 @@ fn issue_1465() { cwnd_is_default(&cc); assert_eq!(cc.bytes_in_flight(), 3 * MAX_DATAGRAM_SIZE); - // advance one rtt to detect lost packet there this simplifies the timers, because on_packet_loss - // would only be called after RTO, but that is not relevant to the problem + // advance one rtt to detect lost packet there this simplifies the timers, because + // on_packet_loss would only be called after RTO, but that is not relevant to the problem now += RTT; cc.on_packets_lost(Some(now), None, PTO, &[p1]); diff --git a/neqo-transport/src/cid.rs b/neqo-transport/src/cid.rs index eefc3104a9..7096ae1874 100644 --- a/neqo-transport/src/cid.rs +++ b/neqo-transport/src/cid.rs @@ -6,15 +6,6 @@ // Representation and management of connection IDs. -use crate::{ - frame::FRAME_TYPE_NEW_CONNECTION_ID, packet::PacketBuilder, recovery::RecoveryToken, - stats::FrameStats, Error, Res, -}; - -use neqo_common::{hex, hex_with_len, qinfo, Decoder, Encoder}; -use neqo_crypto::random; - -use smallvec::SmallVec; use std::{ borrow::Borrow, cell::{Ref, RefCell}, @@ -24,6 +15,15 @@ use std::{ rc::Rc, }; +use neqo_common::{hex, hex_with_len, qinfo, Decoder, Encoder}; +use neqo_crypto::random; +use smallvec::SmallVec; + +use crate::{ + frame::FRAME_TYPE_NEW_CONNECTION_ID, packet::PacketBuilder, recovery::RecoveryToken, + stats::FrameStats, Error, Res, +}; + pub const MAX_CONNECTION_ID_LEN: usize = 20; pub const LOCAL_ACTIVE_CID_LIMIT: usize = 8; pub const CONNECTION_ID_SEQNO_INITIAL: u64 = 0; @@ -421,8 +421,9 @@ pub struct ConnectionIdManager { /// The `ConnectionIdGenerator` instance that is used to create connection IDs. generator: Rc>, /// The connection IDs that we will accept. - /// This includes any we advertise in `NEW_CONNECTION_ID` that haven't been bound to a path yet. - /// During the handshake at the server, it also includes the randomized DCID pick by the client. + /// This includes any we advertise in `NEW_CONNECTION_ID` that haven't been bound to a path + /// yet. During the handshake at the server, it also includes the randomized DCID pick by + /// the client. connection_ids: ConnectionIdStore<()>, /// The maximum number of connection IDs this will accept. This is at least 2 and won't /// be more than `LOCAL_ACTIVE_CID_LIMIT`. @@ -595,9 +596,10 @@ impl ConnectionIdManager { #[cfg(test)] mod tests { - use super::*; use test_fixture::fixture_init; + use super::*; + #[test] fn generate_initial_cid() { fixture_init(); diff --git a/neqo-transport/src/connection/idle.rs b/neqo-transport/src/connection/idle.rs index da1c520777..e33f3defb3 100644 --- a/neqo-transport/src/connection/idle.rs +++ b/neqo-transport/src/connection/idle.rs @@ -4,13 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::recovery::RecoveryToken; -use neqo_common::qtrace; use std::{ cmp::{max, min}, time::{Duration, Instant}, }; +use neqo_common::qtrace; + +use crate::recovery::RecoveryToken; + #[derive(Debug, Clone)] /// There's a little bit of different behavior for resetting idle timeout. See /// -transport 10.2 ("Idle Timeout"). diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 7e8c1d4737..8aaf987db9 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -6,6 +6,29 @@ // The class implementing a QUIC connection. +use std::{ + cell::RefCell, + cmp::{max, min}, + convert::TryFrom, + fmt::{self, Debug}, + mem, + net::{IpAddr, SocketAddr}, + ops::RangeInclusive, + rc::{Rc, Weak}, + time::{Duration, Instant}, +}; + +use neqo_common::{ + event::Provider as EventProvider, hex, hex_snip_middle, hrtime, qdebug, qerror, qinfo, + qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, Role, +}; +use neqo_crypto::{ + agent::CertificateInfo, random, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, Group, + HandshakeState, PrivateKey, PublicKey, ResumptionToken, SecretAgentInfo, SecretAgentPreInfo, + Server, ZeroRttChecker, +}; +use smallvec::SmallVec; + use crate::{ addr_valid::{AddressValidation, NewTokenState}, cid::{ @@ -37,27 +60,6 @@ use crate::{ version::{Version, WireVersion}, AppError, ConnectionError, Error, Res, StreamId, }; -use neqo_common::{ - event::Provider as EventProvider, hex, hex_snip_middle, hrtime, qdebug, qerror, qinfo, - qlog::NeqoQlog, qtrace, qwarn, Datagram, Decoder, Encoder, Role, -}; -use neqo_crypto::{ - agent::CertificateInfo, random, Agent, AntiReplay, AuthenticationStatus, Cipher, Client, Group, - HandshakeState, PrivateKey, PublicKey, ResumptionToken, SecretAgentInfo, SecretAgentPreInfo, - Server, ZeroRttChecker, -}; -use smallvec::SmallVec; -use std::{ - cell::RefCell, - cmp::{max, min}, - convert::TryFrom, - fmt::{self, Debug}, - mem, - net::{IpAddr, SocketAddr}, - ops::RangeInclusive, - rc::{Rc, Weak}, - time::{Duration, Instant}, -}; mod idle; pub mod params; @@ -66,16 +68,16 @@ mod state; #[cfg(test)] pub mod test_internal; -pub use crate::send_stream::{RetransmissionPriority, SendStreamStats, TransmissionPriority}; +use idle::IdleTimeout; pub use params::ConnectionParameters; +use params::PreferredAddressConfig; #[cfg(test)] pub use params::ACK_RATIO_SCALE; -pub use state::{ClosingFrame, State}; - -use idle::IdleTimeout; -use params::PreferredAddressConfig; use saved::SavedDatagrams; use state::StateSignaling; +pub use state::{ClosingFrame, State}; + +pub use crate::send_stream::{RetransmissionPriority, SendStreamStats, TransmissionPriority}; #[derive(Debug, Default)] struct Packet(Vec); @@ -476,7 +478,9 @@ impl Connection { /// Set a local transport parameter, possibly overriding a default value. /// This only sets transport parameters without dealing with other aspects of /// setting the value. + /// /// # Panics + /// /// This panics if the transport parameter is known to this crate. pub fn set_local_tparam(&self, tp: TransportParameterId, value: TransportParameter) -> Res<()> { #[cfg(not(test))] @@ -494,9 +498,9 @@ impl Connection { } /// `odcid` is their original choice for our CID, which we get from the Retry token. - /// `remote_cid` is the value from the Source Connection ID field of - /// an incoming packet: what the peer wants us to use now. - /// `retry_cid` is what we asked them to use when we sent the Retry. + /// `remote_cid` is the value from the Source Connection ID field of an incoming packet: what + /// the peer wants us to use now. `retry_cid` is what we asked them to use when we sent the + /// Retry. pub(crate) fn set_retry_cids( &mut self, odcid: ConnectionId, @@ -642,7 +646,9 @@ impl Connection { /// problem for short-lived connections, where the connection is closed before any events are /// released. This function retrieves the token, without waiting for a `NEW_TOKEN` frame to /// arrive. + /// /// # Panics + /// /// If this is called on a server. pub fn take_resumption_token(&mut self, now: Instant) -> Option { assert_eq!(self.role, Role::Client); @@ -849,8 +855,8 @@ impl Connection { qwarn!([self], "Closing again after error {:?}", err); } State::Init => { - // We have not even sent anything just close the connection without sending any error. - // This may happen when client_start fails. + // We have not even sent anything just close the connection without sending any + // error. This may happen when client_start fails. self.set_state(State::Closed(error)); } State::WaitInitial => { @@ -1672,6 +1678,7 @@ impl Connection { /// Either way, the path is probed and will be abandoned if the probe fails. /// /// # Errors + /// /// Fails if this is not a client, not confirmed, or there are not enough connection /// IDs available to use. pub fn migrate( @@ -2962,7 +2969,9 @@ impl Connection { /// Create a stream. /// Returns new stream id + /// /// # Errors + /// /// `ConnectionState` if the connecton stat does not allow to create streams. /// `StreamLimitError` if we are limiied by server's stream concurence. pub fn stream_create(&mut self, st: StreamType) -> Res { @@ -2984,7 +2993,9 @@ impl Connection { } /// Set the priority of a stream. + /// /// # Errors + /// /// `InvalidStreamId` the stream does not exist. pub fn stream_priority( &mut self, @@ -2999,7 +3010,9 @@ impl Connection { } /// Set the SendOrder of a stream. Re-enqueues to keep the ordering correct + /// /// # Errors + /// /// Returns InvalidStreamId if the stream id doesn't exist pub fn stream_sendorder( &mut self, @@ -3010,7 +3023,9 @@ impl Connection { } /// Set the Fairness of a stream + /// /// # Errors + /// /// Returns InvalidStreamId if the stream id doesn't exist pub fn stream_fairness(&mut self, stream_id: StreamId, fairness: bool) -> Res<()> { self.streams.set_fairness(stream_id, fairness) @@ -3029,7 +3044,9 @@ impl Connection { /// Send data on a stream. /// Returns how many bytes were successfully sent. Could be less /// than total, based on receiver credit space available, etc. + /// /// # Errors + /// /// `InvalidStreamId` the stream does not exist, /// `InvalidInput` if length of `data` is zero, /// `FinalSizeError` if the stream has already been closed. @@ -3040,7 +3057,9 @@ impl Connection { /// Send all data or nothing on a stream. May cause DATA_BLOCKED or /// STREAM_DATA_BLOCKED frames to be sent. /// Returns true if data was successfully sent, otherwise false. + /// /// # Errors + /// /// `InvalidStreamId` the stream does not exist, /// `InvalidInput` if length of `data` is zero, /// `FinalSizeError` if the stream has already been closed. @@ -3081,7 +3100,9 @@ impl Connection { /// Read buffered data from stream. bool says whether read bytes includes /// the final data on stream. + /// /// # Errors + /// /// `InvalidStreamId` if the stream does not exist. /// `NoMoreData` if data and fin bit were previously read by the application. pub fn stream_recv(&mut self, stream_id: StreamId, data: &mut [u8]) -> Res<(usize, bool)> { @@ -3100,7 +3121,9 @@ impl Connection { } /// Increases `max_stream_data` for a `stream_id`. + /// /// # Errors + /// /// Returns `InvalidStreamId` if a stream does not exist or the receiving /// side is closed. pub fn set_stream_max_data(&mut self, stream_id: StreamId, max_data: u64) -> Res<()> { @@ -3114,7 +3137,9 @@ impl Connection { /// (if `keep` is `true`) or no longer important (if `keep` is `false`). If any /// stream is marked this way, PING frames will be used to keep the connection /// alive, even when there is no activity. + /// /// # Errors + /// /// Returns `InvalidStreamId` if a stream does not exist or the receiving /// side is closed. pub fn stream_keep_alive(&mut self, stream_id: StreamId, keep: bool) -> Res<()> { @@ -3128,7 +3153,9 @@ impl Connection { /// Returns the current max size of a datagram that can fit into a packet. /// The value will change over time depending on the encoded size of the /// packet number, ack frames, etc. + /// /// # Error + /// /// The function returns `NotAvailable` if datagrams are not enabled. pub fn max_datagram_size(&self) -> Res { let max_dgram_size = self.quic_datagrams.remote_datagram_size(); @@ -3169,7 +3196,9 @@ impl Connection { } /// Queue a datagram for sending. + /// /// # Error + /// /// The function returns `TooMuchData` if the supply buffer is bigger than /// the allowed remote datagram size. The funcion does not check if the /// datagram can fit into a packet (i.e. MTU limit). This is checked during diff --git a/neqo-transport/src/connection/params.rs b/neqo-transport/src/connection/params.rs index 3d8dff67a6..48aba4303b 100644 --- a/neqo-transport/src/connection/params.rs +++ b/neqo-transport/src/connection/params.rs @@ -4,18 +4,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::connection::{ConnectionIdManager, Role, LOCAL_ACTIVE_CID_LIMIT}; +use std::{cmp::max, convert::TryFrom, time::Duration}; + pub use crate::recovery::FAST_PTO_SCALE; -use crate::recv_stream::RECV_BUFFER_SIZE; -use crate::rtt::GRANULARITY; -use crate::stream_id::StreamType; -use crate::tparams::{self, PreferredAddress, TransportParameter, TransportParametersHandler}; -use crate::tracking::DEFAULT_ACK_DELAY; -use crate::version::{Version, VersionConfig}; -use crate::{CongestionControlAlgorithm, Res}; -use std::cmp::max; -use std::convert::TryFrom; -use std::time::Duration; +use crate::{ + connection::{ConnectionIdManager, Role, LOCAL_ACTIVE_CID_LIMIT}, + recv_stream::RECV_BUFFER_SIZE, + rtt::GRANULARITY, + stream_id::StreamType, + tparams::{self, PreferredAddress, TransportParameter, TransportParametersHandler}, + tracking::DEFAULT_ACK_DELAY, + version::{Version, VersionConfig}, + CongestionControlAlgorithm, Res, +}; const LOCAL_MAX_DATA: u64 = 0x3FFF_FFFF_FFFF_FFFF; // 2^62-1 const LOCAL_STREAM_LIMIT_BIDI: u64 = 16; @@ -49,11 +50,14 @@ pub struct ConnectionParameters { cc_algorithm: CongestionControlAlgorithm, /// Initial connection-level flow control limit. max_data: u64, - /// Initial flow control limit for receiving data on bidirectional streams that the peer creates. + /// Initial flow control limit for receiving data on bidirectional streams that the peer + /// creates. max_stream_data_bidi_remote: u64, - /// Initial flow control limit for receiving data on bidirectional streams that this endpoint creates. + /// Initial flow control limit for receiving data on bidirectional streams that this endpoint + /// creates. max_stream_data_bidi_local: u64, - /// Initial flow control limit for receiving data on unidirectional streams that the peer creates. + /// Initial flow control limit for receiving data on unidirectional streams that the peer + /// creates. max_stream_data_uni: u64, /// Initial limit on bidirectional streams that the peer creates. max_streams_bidi: u64, @@ -147,6 +151,7 @@ impl ConnectionParameters { } /// # Panics + /// /// If v > 2^60 (the maximum allowed by the protocol). pub fn max_streams(mut self, stream_type: StreamType, v: u64) -> Self { assert!(v <= (1 << 60), "max_streams is too large"); @@ -162,7 +167,9 @@ impl ConnectionParameters { } /// Get the maximum stream data that we will accept on different types of streams. + /// /// # Panics + /// /// If `StreamType::UniDi` and `false` are passed as that is not a valid combination. pub fn get_max_stream_data(&self, stream_type: StreamType, remote: bool) -> u64 { match (stream_type, remote) { @@ -176,7 +183,9 @@ impl ConnectionParameters { } /// Set the maximum stream data that we will accept on different types of streams. + /// /// # Panics + /// /// If `StreamType::UniDi` and `false` are passed as that is not a valid combination /// or if v >= 62 (the maximum allowed by the protocol). pub fn max_stream_data(mut self, stream_type: StreamType, remote: bool, v: u64) -> Self { @@ -224,6 +233,7 @@ impl ConnectionParameters { } /// # Panics + /// /// If `timeout` is 2^62 milliseconds or more. pub fn idle_timeout(mut self, timeout: Duration) -> Self { assert!(timeout.as_millis() < (1 << 62), "idle timeout is too long"); @@ -281,6 +291,7 @@ impl ConnectionParameters { /// congestion. /// /// # Panics + /// /// A value of 0 is invalid and will cause a panic. pub fn fast_pto(mut self, scale: u8) -> Self { assert_ne!(scale, 0); diff --git a/neqo-transport/src/connection/saved.rs b/neqo-transport/src/connection/saved.rs index 368a859f5d..f5616c732a 100644 --- a/neqo-transport/src/connection/saved.rs +++ b/neqo-transport/src/connection/saved.rs @@ -4,12 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::mem; -use std::time::Instant; +use std::{mem, time::Instant}; -use crate::crypto::CryptoSpace; use neqo_common::{qdebug, qinfo, Datagram}; +use crate::crypto::CryptoSpace; + /// The number of datagrams that are saved during the handshake when /// keys to decrypt them are not yet available. const MAX_SAVED_DATAGRAMS: usize = 4; diff --git a/neqo-transport/src/connection/state.rs b/neqo-transport/src/connection/state.rs index ffd9f16b51..f739c147ab 100644 --- a/neqo-transport/src/connection/state.rs +++ b/neqo-transport/src/connection/state.rs @@ -4,20 +4,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + cmp::{min, Ordering}, + mem, + rc::Rc, + time::Instant, +}; + use neqo_common::Encoder; -use std::cmp::{min, Ordering}; -use std::mem; -use std::rc::Rc; -use std::time::Instant; -use crate::frame::{ - FrameType, FRAME_TYPE_CONNECTION_CLOSE_APPLICATION, FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT, - FRAME_TYPE_HANDSHAKE_DONE, +use crate::{ + frame::{ + FrameType, FRAME_TYPE_CONNECTION_CLOSE_APPLICATION, FRAME_TYPE_CONNECTION_CLOSE_TRANSPORT, + FRAME_TYPE_HANDSHAKE_DONE, + }, + packet::PacketBuilder, + path::PathRef, + recovery::RecoveryToken, + ConnectionError, Error, Res, }; -use crate::packet::PacketBuilder; -use crate::path::PathRef; -use crate::recovery::RecoveryToken; -use crate::{ConnectionError, Error, Res}; #[derive(Clone, Debug, PartialEq, Eq)] /// The state of the Connection. diff --git a/neqo-transport/src/connection/tests/ackrate.rs b/neqo-transport/src/connection/tests/ackrate.rs index 3c909bcc70..1b83d42acd 100644 --- a/neqo-transport/src/connection/tests/ackrate.rs +++ b/neqo-transport/src/connection/tests/ackrate.rs @@ -4,6 +4,10 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{mem, time::Duration}; + +use test_fixture::{addr_v4, assertions}; + use super::{ super::{ConnectionParameters, ACK_RATIO_SCALE}, ack_bytes, connect_rtt_idle, default_client, default_server, fill_cwnd, increase_cwnd, @@ -11,9 +15,6 @@ use super::{ }; use crate::stream_id::StreamType; -use std::{mem, time::Duration}; -use test_fixture::{addr_v4, assertions}; - /// With the default RTT here (100ms) and default ratio (4), endpoints won't send /// `ACK_FREQUENCY` as the ACK delay isn't different enough from the default. #[test] diff --git a/neqo-transport/src/connection/tests/cc.rs b/neqo-transport/src/connection/tests/cc.rs index 6c70e424ea..b3467ea67c 100644 --- a/neqo-transport/src/connection/tests/cc.rs +++ b/neqo-transport/src/connection/tests/cc.rs @@ -4,23 +4,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::Output; +use std::{convert::TryFrom, mem, time::Duration}; + +use neqo_common::{qdebug, qinfo, Datagram}; + use super::{ - ack_bytes, assert_full_cwnd, connect_rtt_idle, cwnd, cwnd_avail, cwnd_packets, default_client, - default_server, fill_cwnd, induce_persistent_congestion, send_something, + super::Output, ack_bytes, assert_full_cwnd, connect_rtt_idle, cwnd, cwnd_avail, cwnd_packets, + default_client, default_server, fill_cwnd, induce_persistent_congestion, send_something, CLIENT_HANDSHAKE_1RTT_PACKETS, DEFAULT_RTT, POST_HANDSHAKE_CWND, }; -use crate::cc::MAX_DATAGRAM_SIZE; -use crate::packet::PacketNumber; -use crate::recovery::{ACK_ONLY_SIZE_LIMIT, PACKET_THRESHOLD}; -use crate::sender::PACING_BURST_SIZE; -use crate::stream_id::StreamType; -use crate::tracking::DEFAULT_ACK_PACKET_TOLERANCE; - -use neqo_common::{qdebug, qinfo, Datagram}; -use std::convert::TryFrom; -use std::mem; -use std::time::Duration; +use crate::{ + cc::MAX_DATAGRAM_SIZE, + packet::PacketNumber, + recovery::{ACK_ONLY_SIZE_LIMIT, PACKET_THRESHOLD}, + sender::PACING_BURST_SIZE, + stream_id::StreamType, + tracking::DEFAULT_ACK_PACKET_TOLERANCE, +}; #[test] /// Verify initial CWND is honored. diff --git a/neqo-transport/src/connection/tests/close.rs b/neqo-transport/src/connection/tests/close.rs index 39b1106ce0..f45e77e549 100644 --- a/neqo-transport/src/connection/tests/close.rs +++ b/neqo-transport/src/connection/tests/close.rs @@ -4,14 +4,19 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::{Connection, Output, State}; -use super::{connect, connect_force_idle, default_client, default_server, send_something}; -use crate::tparams::{self, TransportParameter}; -use crate::{AppError, ConnectionError, Error, ERROR_APPLICATION_CLOSE}; - use std::time::Duration; + use test_fixture::{self, datagram, now}; +use super::{ + super::{Connection, Output, State}, + connect, connect_force_idle, default_client, default_server, send_something, +}; +use crate::{ + tparams::{self, TransportParameter}, + AppError, ConnectionError, Error, ERROR_APPLICATION_CLOSE, +}; + fn assert_draining(c: &Connection, expected: &Error) { assert!(c.state().closed()); if let State::Draining { diff --git a/neqo-transport/src/connection/tests/datagram.rs b/neqo-transport/src/connection/tests/datagram.rs index 891773ddaa..5b7b8dc0b4 100644 --- a/neqo-transport/src/connection/tests/datagram.rs +++ b/neqo-transport/src/connection/tests/datagram.rs @@ -4,21 +4,23 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cell::RefCell, convert::TryFrom, rc::Rc}; + +use neqo_common::event::Provider; +use test_fixture::now; + use super::{ assert_error, connect_force_idle, default_client, default_server, new_client, new_server, AT_LEAST_PTO, }; -use crate::events::{ConnectionEvent, OutgoingDatagramOutcome}; -use crate::frame::FRAME_TYPE_DATAGRAM; -use crate::packet::PacketBuilder; -use crate::quic_datagrams::MAX_QUIC_DATAGRAM; use crate::{ + events::{ConnectionEvent, OutgoingDatagramOutcome}, + frame::FRAME_TYPE_DATAGRAM, + packet::PacketBuilder, + quic_datagrams::MAX_QUIC_DATAGRAM, send_stream::{RetransmissionPriority, TransmissionPriority}, Connection, ConnectionError, ConnectionParameters, Error, StreamType, }; -use neqo_common::event::Provider; -use std::{cell::RefCell, convert::TryFrom, rc::Rc}; -use test_fixture::now; const DATAGRAM_LEN_MTU: u64 = 1310; const DATA_MTU: &[u8] = &[1; 1310]; @@ -323,7 +325,7 @@ fn datagram_lost() { let pings_sent = client.stats().frame_tx.ping; let dgram_lost = client.stats().datagram_tx.lost; let out = client.process_output(now).dgram(); - assert!(out.is_some()); //PING probing + assert!(out.is_some()); // PING probing // Datagram is not sent again. assert_eq!(client.stats().frame_tx.ping, pings_sent + 1); assert_eq!(client.stats().frame_tx.datagram, dgram_sent2); diff --git a/neqo-transport/src/connection/tests/fuzzing.rs b/neqo-transport/src/connection/tests/fuzzing.rs index 75caa7e857..5425e1a16e 100644 --- a/neqo-transport/src/connection/tests/fuzzing.rs +++ b/neqo-transport/src/connection/tests/fuzzing.rs @@ -8,11 +8,12 @@ #![warn(clippy::pedantic)] #![cfg(feature = "fuzzing")] -use super::{connect_force_idle, default_client, default_server}; -use crate::StreamType; use neqo_crypto::FIXED_TAG_FUZZING; use test_fixture::now; +use super::{connect_force_idle, default_client, default_server}; +use crate::StreamType; + #[test] fn no_encryption() { const DATA_CLIENT: &[u8] = &[2; 40]; diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index 33aff5d528..93385ac1bc 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -4,35 +4,40 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::{Connection, Output, State}; +use std::{ + cell::RefCell, + convert::TryFrom, + mem, + net::{IpAddr, Ipv6Addr, SocketAddr}, + rc::Rc, + time::Duration, +}; + +use neqo_common::{event::Provider, qdebug, Datagram}; +use neqo_crypto::{ + constants::TLS_CHACHA20_POLY1305_SHA256, generate_ech_keys, AuthenticationStatus, +}; +use test_fixture::{ + self, addr, assertions, assertions::assert_coalesced_0rtt, datagram, fixture_init, now, + split_datagram, +}; + use super::{ + super::{Connection, Output, State}, assert_error, connect, connect_force_idle, connect_with_rtt, default_client, default_server, get_tokens, handshake, maybe_authenticate, resumed_server, send_something, CountingConnectionIdGenerator, AT_LEAST_PTO, DEFAULT_RTT, DEFAULT_STREAM_DATA, }; -use crate::connection::AddressValidation; -use crate::events::ConnectionEvent; -use crate::path::PATH_MTU_V6; -use crate::server::ValidateAddress; -use crate::tparams::{TransportParameter, MIN_ACK_DELAY}; -use crate::tracking::DEFAULT_ACK_DELAY; use crate::{ + connection::AddressValidation, + events::ConnectionEvent, + path::PATH_MTU_V6, + server::ValidateAddress, + tparams::{TransportParameter, MIN_ACK_DELAY}, + tracking::DEFAULT_ACK_DELAY, ConnectionError, ConnectionParameters, EmptyConnectionIdGenerator, Error, StreamType, Version, }; -use neqo_common::{event::Provider, qdebug, Datagram}; -use neqo_crypto::{ - constants::TLS_CHACHA20_POLY1305_SHA256, generate_ech_keys, AuthenticationStatus, -}; -use std::cell::RefCell; -use std::convert::TryFrom; -use std::mem; -use std::net::{IpAddr, Ipv6Addr, SocketAddr}; -use std::rc::Rc; -use std::time::Duration; -use test_fixture::assertions::assert_coalesced_0rtt; -use test_fixture::{self, addr, assertions, datagram, fixture_init, now, split_datagram}; - const ECH_CONFIG_ID: u8 = 7; const ECH_PUBLIC_NAME: &str = "public.example"; @@ -128,7 +133,7 @@ fn no_alpn() { handshake(&mut client, &mut server, now(), Duration::new(0, 0)); // TODO (mt): errors are immediate, which means that we never send CONNECTION_CLOSE // and the client never sees the server's rejection of its handshake. - //assert_error(&client, ConnectionError::Transport(Error::CryptoAlert(120))); + // assert_error(&client, ConnectionError::Transport(Error::CryptoAlert(120))); assert_error( &server, &ConnectionError::Transport(Error::CryptoAlert(120)), diff --git a/neqo-transport/src/connection/tests/idle.rs b/neqo-transport/src/connection/tests/idle.rs index 1b7dac9de9..c33726917a 100644 --- a/neqo-transport/src/connection/tests/idle.rs +++ b/neqo-transport/src/connection/tests/idle.rs @@ -4,6 +4,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + mem, + time::{Duration, Instant}, +}; + +use neqo_common::{qtrace, Encoder}; +use test_fixture::{self, now, split_datagram}; + use super::{ super::{Connection, ConnectionParameters, IdleTimeout, Output, State}, connect, connect_force_idle, connect_rtt_idle, connect_with_rtt, default_client, @@ -18,13 +26,6 @@ use crate::{ tracking::PacketNumberSpace, }; -use neqo_common::{qtrace, Encoder}; -use std::{ - mem, - time::{Duration, Instant}, -}; -use test_fixture::{self, now, split_datagram}; - fn default_timeout() -> Duration { ConnectionParameters::default().get_idle_timeout() } @@ -678,11 +679,14 @@ fn keep_alive_uni() { fn keep_alive_with_ack_eliciting_packet_lost() { const RTT: Duration = Duration::from_millis(500); // PTO will be ~1.1125s - // The idle time out will be set to ~ 5 * PTO. (IDLE_TIMEOUT/2 > pto and IDLE_TIMEOUT/2 < pto + 2pto) - // After handshake all packets will be lost. The following steps will happen after the handshake: + // The idle time out will be set to ~ 5 * PTO. (IDLE_TIMEOUT/2 > pto and IDLE_TIMEOUT/2 < pto + // + 2pto) After handshake all packets will be lost. The following steps will happen after + // the handshake: // - data will be sent on a stream that is marked for keep-alive, (at start time) - // - PTO timer will trigger first, and the data will be retransmited toghether with a PING, (at the start time + pto) - // - keep-alive timer will trigger and a keep-alive PING will be sent, (at the start time + IDLE_TIMEOUT / 2) + // - PTO timer will trigger first, and the data will be retransmited toghether with a PING, (at + // the start time + pto) + // - keep-alive timer will trigger and a keep-alive PING will be sent, (at the start time + + // IDLE_TIMEOUT / 2) // - PTO timer will trigger again. (at the start time + pto + 2*pto) // - Idle time out will trigger (at the timeout + IDLE_TIMEOUT) const IDLE_TIMEOUT: Duration = Duration::from_millis(6000); diff --git a/neqo-transport/src/connection/tests/keys.rs b/neqo-transport/src/connection/tests/keys.rs index a0e3b6596e..c247bba670 100644 --- a/neqo-transport/src/connection/tests/keys.rs +++ b/neqo-transport/src/connection/tests/keys.rs @@ -4,19 +4,24 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::super::{ConnectionError, ERROR_AEAD_LIMIT_REACHED}; -use super::super::{Connection, ConnectionParameters, Error, Output, State, StreamType}; +use std::mem; + +use neqo_common::{qdebug, Datagram}; +use test_fixture::{self, now}; + use super::{ + super::{ + super::{ConnectionError, ERROR_AEAD_LIMIT_REACHED}, + Connection, ConnectionParameters, Error, Output, State, StreamType, + }, connect, connect_force_idle, default_client, default_server, maybe_authenticate, send_and_receive, send_something, AT_LEAST_PTO, }; -use crate::crypto::{OVERWRITE_INVOCATIONS, UPDATE_WRITE_KEYS_AT}; -use crate::packet::PacketNumber; -use crate::path::PATH_MTU_V6; - -use neqo_common::{qdebug, Datagram}; -use std::mem; -use test_fixture::{self, now}; +use crate::{ + crypto::{OVERWRITE_INVOCATIONS, UPDATE_WRITE_KEYS_AT}, + packet::PacketNumber, + path::PATH_MTU_V6, +}; fn check_discarded( peer: &mut Connection, diff --git a/neqo-transport/src/connection/tests/migration.rs b/neqo-transport/src/connection/tests/migration.rs index 79c13faa77..8307a7dd84 100644 --- a/neqo-transport/src/connection/tests/migration.rs +++ b/neqo-transport/src/connection/tests/migration.rs @@ -4,6 +4,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + cell::RefCell, + net::{IpAddr, Ipv6Addr, SocketAddr}, + rc::Rc, + time::{Duration, Instant}, +}; + +use neqo_common::{Datagram, Decoder}; +use test_fixture::{ + self, addr, addr_v4, + assertions::{assert_v4_path, assert_v6_path}, + fixture_init, new_neqo_qlog, now, +}; + use super::{ super::{Connection, Output, State, StreamType}, connect_fail, connect_force_idle, connect_rtt_idle, default_client, default_server, @@ -20,19 +34,6 @@ use crate::{ ConnectionParameters, EmptyConnectionIdGenerator, Error, }; -use neqo_common::{Datagram, Decoder}; -use std::{ - cell::RefCell, - net::{IpAddr, Ipv6Addr, SocketAddr}, - rc::Rc, - time::{Duration, Instant}, -}; -use test_fixture::{ - self, addr, addr_v4, - assertions::{assert_v4_path, assert_v6_path}, - fixture_init, new_neqo_qlog, now, -}; - /// This should be a valid-seeming transport parameter. /// And it should have different values to `addr` and `addr_v4`. const SAMPLE_PREFERRED_ADDRESS: &[u8] = &[ diff --git a/neqo-transport/src/connection/tests/mod.rs b/neqo-transport/src/connection/tests/mod.rs index ab520c3198..8a999f4048 100644 --- a/neqo-transport/src/connection/tests/mod.rs +++ b/neqo-transport/src/connection/tests/mod.rs @@ -6,6 +6,20 @@ #![deny(clippy::pedantic)] +use std::{ + cell::RefCell, + cmp::min, + convert::TryFrom, + mem, + rc::Rc, + time::{Duration, Instant}, +}; + +use enum_map::enum_map; +use neqo_common::{event::Provider, qdebug, qtrace, Datagram, Decoder, Role}; +use neqo_crypto::{random, AllowZeroRtt, AuthenticationStatus, ResumptionToken}; +use test_fixture::{self, addr, fixture_init, new_neqo_qlog, now}; + use super::{Connection, ConnectionError, ConnectionId, Output, State}; use crate::{ addr_valid::{AddressValidation, ValidateAddress}, @@ -21,21 +35,6 @@ use crate::{ Version, }; -use std::{ - cell::RefCell, - cmp::min, - convert::TryFrom, - mem, - rc::Rc, - time::{Duration, Instant}, -}; - -use neqo_common::{event::Provider, qdebug, qtrace, Datagram, Decoder, Role}; -use neqo_crypto::{random, AllowZeroRtt, AuthenticationStatus, ResumptionToken}; -use test_fixture::{self, addr, fixture_init, new_neqo_qlog, now}; - -use enum_map::enum_map; - // All the tests. mod ackrate; mod cc; @@ -405,7 +404,9 @@ fn increase_cwnd( } /// Receive multiple packets and generate an ack-only packet. +/// /// # Panics +/// /// The caller is responsible for ensuring that `dest` has received /// enough data that it wants to generate an ACK. This panics if /// no ACK frame is generated. diff --git a/neqo-transport/src/connection/tests/priority.rs b/neqo-transport/src/connection/tests/priority.rs index 5fb27b3a4d..1f86aa22e5 100644 --- a/neqo-transport/src/connection/tests/priority.rs +++ b/neqo-transport/src/connection/tests/priority.rs @@ -4,6 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cell::RefCell, mem, rc::Rc}; + +use neqo_common::event::Provider; +use test_fixture::{self, now}; + use super::{ super::{Connection, Error, Output}, connect, default_client, default_server, fill_cwnd, maybe_authenticate, @@ -14,10 +19,6 @@ use crate::{ ConnectionEvent, StreamId, StreamType, }; -use neqo_common::event::Provider; -use std::{cell::RefCell, mem, rc::Rc}; -use test_fixture::{self, now}; - const BLOCK_SIZE: usize = 4_096; fn fill_stream(c: &mut Connection, id: StreamId) { diff --git a/neqo-transport/src/connection/tests/recovery.rs b/neqo-transport/src/connection/tests/recovery.rs index 87b2b37839..0f12d03107 100644 --- a/neqo-transport/src/connection/tests/recovery.rs +++ b/neqo-transport/src/connection/tests/recovery.rs @@ -4,6 +4,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{ + mem, + time::{Duration, Instant}, +}; + +use neqo_common::qdebug; +use neqo_crypto::AuthenticationStatus; +use test_fixture::{ + assertions::{assert_handshake, assert_initial}, + now, split_datagram, +}; + use super::{ super::{Connection, ConnectionParameters, Output, State}, assert_full_cwnd, connect, connect_force_idle, connect_rtt_idle, connect_with_rtt, cwnd, @@ -23,17 +35,6 @@ use crate::{ StreamType, }; -use neqo_common::qdebug; -use neqo_crypto::AuthenticationStatus; -use std::{ - mem, - time::{Duration, Instant}, -}; -use test_fixture::{ - assertions::{assert_handshake, assert_initial}, - now, split_datagram, -}; - #[test] fn pto_works_basic() { let mut client = default_client(); diff --git a/neqo-transport/src/connection/tests/resumption.rs b/neqo-transport/src/connection/tests/resumption.rs index fa56f6eae2..a8c45a9f06 100644 --- a/neqo-transport/src/connection/tests/resumption.rs +++ b/neqo-transport/src/connection/tests/resumption.rs @@ -4,18 +4,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cell::RefCell, mem, rc::Rc, time::Duration}; + +use test_fixture::{self, assertions, now}; + use super::{ connect, connect_with_rtt, default_client, default_server, exchange_ticket, get_tokens, new_client, resumed_server, send_something, AT_LEAST_PTO, }; -use crate::addr_valid::{AddressValidation, ValidateAddress}; -use crate::{ConnectionParameters, Error, Version}; - -use std::cell::RefCell; -use std::mem; -use std::rc::Rc; -use std::time::Duration; -use test_fixture::{self, assertions, now}; +use crate::{ + addr_valid::{AddressValidation, ValidateAddress}, + ConnectionParameters, Error, Version, +}; #[test] fn resume() { diff --git a/neqo-transport/src/connection/tests/stream.rs b/neqo-transport/src/connection/tests/stream.rs index d83ca07b61..586a537b9d 100644 --- a/neqo-transport/src/connection/tests/stream.rs +++ b/neqo-transport/src/connection/tests/stream.rs @@ -4,6 +4,11 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{cmp::max, collections::HashMap, convert::TryFrom, mem}; + +use neqo_common::{event::Provider, qdebug}; +use test_fixture::now; + use super::{ super::State, assert_error, connect, connect_force_idle, default_client, default_server, maybe_authenticate, new_client, new_server, send_something, DEFAULT_STREAM_DATA, @@ -22,11 +27,6 @@ use crate::{ StreamId, StreamType, }; -use std::collections::HashMap; - -use neqo_common::{event::Provider, qdebug}; -use std::{cmp::max, convert::TryFrom, mem}; -use test_fixture::now; #[test] fn stream_create() { diff --git a/neqo-transport/src/connection/tests/vn.rs b/neqo-transport/src/connection/tests/vn.rs index e289bc654c..22f15c991c 100644 --- a/neqo-transport/src/connection/tests/vn.rs +++ b/neqo-transport/src/connection/tests/vn.rs @@ -4,19 +4,21 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::{ConnectionError, ConnectionEvent, Output, State, ZeroRttState}; +use std::{mem, time::Duration}; + +use neqo_common::{event::Provider, Decoder, Encoder}; +use test_fixture::{self, assertions, datagram, now}; + use super::{ + super::{ConnectionError, ConnectionEvent, Output, State, ZeroRttState}, connect, connect_fail, default_client, default_server, exchange_ticket, new_client, new_server, send_something, }; -use crate::packet::PACKET_BIT_LONG; -use crate::tparams::{self, TransportParameter}; -use crate::{ConnectionParameters, Error, Version}; - -use neqo_common::{event::Provider, Decoder, Encoder}; -use std::mem; -use std::time::Duration; -use test_fixture::{self, assertions, datagram, now}; +use crate::{ + packet::PACKET_BIT_LONG, + tparams::{self, TransportParameter}, + ConnectionParameters, Error, Version, +}; // The expected PTO duration after the first Initial is sent. const INITIAL_PTO: Duration = Duration::from_millis(300); @@ -217,8 +219,8 @@ fn compatible_upgrade() { assert_eq!(server.version(), Version::Version2); } -/// When the first packet from the client is gigantic, the server might generate acknowledgment packets in -/// version 1. Both client and server need to handle that gracefully. +/// When the first packet from the client is gigantic, the server might generate acknowledgment +/// packets in version 1. Both client and server need to handle that gracefully. #[test] fn compatible_upgrade_large_initial() { let params = ConnectionParameters::default().versions( diff --git a/neqo-transport/src/connection/tests/zerortt.rs b/neqo-transport/src/connection/tests/zerortt.rs index f896b30730..0aa5573c98 100644 --- a/neqo-transport/src/connection/tests/zerortt.rs +++ b/neqo-transport/src/connection/tests/zerortt.rs @@ -4,20 +4,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::super::Connection; -use super::{ - connect, default_client, default_server, exchange_ticket, new_server, resumed_server, - CountingConnectionIdGenerator, -}; -use crate::events::ConnectionEvent; -use crate::{ConnectionParameters, Error, StreamType, Version}; +use std::{cell::RefCell, rc::Rc}; use neqo_common::event::Provider; use neqo_crypto::{AllowZeroRtt, AntiReplay}; -use std::cell::RefCell; -use std::rc::Rc; use test_fixture::{self, assertions, now}; +use super::{ + super::Connection, connect, default_client, default_server, exchange_ticket, new_server, + resumed_server, CountingConnectionIdGenerator, +}; +use crate::{events::ConnectionEvent, ConnectionParameters, Error, StreamType, Version}; + #[test] fn zero_rtt_negotiate() { // Note that the two servers in this test will get different anti-replay filters. diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index 898eb44372..4e152db0f2 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -16,7 +16,6 @@ use std::{ }; use neqo_common::{hex, hex_snip_middle, qdebug, qinfo, qtrace, Encoder, Role}; - use neqo_crypto::{ hkdf, hp::HpKey, Aead, Agent, AntiReplay, Cipher, Epoch, Error as CryptoError, HandshakeState, PrivateKey, PublicKey, Record, RecordList, ResumptionToken, SymKey, ZeroRttChecker, @@ -1542,8 +1541,8 @@ impl CryptoStreams { } // Calculate length of data based on the minimum of: // - available data - // - remaining space, less the header, which counts only one byte - // for the length at first to avoid underestimating length + // - remaining space, less the header, which counts only one byte for the length at + // first to avoid underestimating length let length = min(data.len(), builder.remaining() - header_len); header_len += Encoder::varint_len(u64::try_from(length).unwrap()) - 1; let length = min(data.len(), builder.remaining() - header_len); diff --git a/neqo-transport/src/dump.rs b/neqo-transport/src/dump.rs index 7dac137340..5d8a72f300 100644 --- a/neqo-transport/src/dump.rs +++ b/neqo-transport/src/dump.rs @@ -7,13 +7,16 @@ // Enable just this file for logging to just see packets. // e.g. "RUST_LOG=neqo_transport::dump neqo-client ..." -use crate::connection::Connection; -use crate::frame::Frame; -use crate::packet::{PacketNumber, PacketType}; -use crate::path::PathRef; +use std::fmt::Write; + use neqo_common::{qdebug, Decoder}; -use std::fmt::Write; +use crate::{ + connection::Connection, + frame::Frame, + packet::{PacketNumber, PacketType}, + path::PathRef, +}; #[allow(clippy::module_name_repetitions)] pub fn dump_packet( diff --git a/neqo-transport/src/events.rs b/neqo-transport/src/events.rs index 93cb63a86f..88a85250ee 100644 --- a/neqo-transport/src/events.rs +++ b/neqo-transport/src/events.rs @@ -6,17 +6,18 @@ // Collecting a list of events relevant to whoever is using the Connection. -use std::cell::RefCell; -use std::collections::VecDeque; -use std::rc::Rc; - -use crate::connection::State; -use crate::quic_datagrams::DatagramTracking; -use crate::stream_id::{StreamId, StreamType}; -use crate::{AppError, Stats}; +use std::{cell::RefCell, collections::VecDeque, rc::Rc}; + use neqo_common::event::Provider as EventProvider; use neqo_crypto::ResumptionToken; +use crate::{ + connection::State, + quic_datagrams::DatagramTracking, + stream_id::{StreamId, StreamType}, + AppError, Stats, +}; + #[derive(Debug, PartialOrd, Ord, PartialEq, Eq)] pub enum OutgoingDatagramOutcome { DroppedTooBig, diff --git a/neqo-transport/src/fc.rs b/neqo-transport/src/fc.rs index 090afdc538..a219ca7e8d 100644 --- a/neqo-transport/src/fc.rs +++ b/neqo-transport/src/fc.rs @@ -7,6 +7,14 @@ // Tracks possibly-redundant flow control signals from other code and converts // into flow control frames needing to be sent to the remote. +use std::{ + convert::TryFrom, + fmt::Debug, + ops::{Deref, DerefMut, Index, IndexMut}, +}; + +use neqo_common::{qtrace, Role}; + use crate::{ frame::{ FRAME_TYPE_DATA_BLOCKED, FRAME_TYPE_MAX_DATA, FRAME_TYPE_MAX_STREAMS_BIDI, @@ -19,13 +27,6 @@ use crate::{ stream_id::{StreamId, StreamType}, Error, Res, }; -use neqo_common::{qtrace, Role}; - -use std::{ - convert::TryFrom, - fmt::Debug, - ops::{Deref, DerefMut, Index, IndexMut}, -}; #[derive(Debug)] pub struct SenderFlowControl @@ -575,6 +576,8 @@ impl IndexMut for LocalStreamLimits { #[cfg(test)] mod test { + use neqo_common::{Encoder, Role}; + use super::{LocalStreamLimits, ReceiverFlowControl, RemoteStreamLimits, SenderFlowControl}; use crate::{ packet::PacketBuilder, @@ -582,7 +585,6 @@ mod test { stream_id::{StreamId, StreamType}, Error, }; - use neqo_common::{Encoder, Role}; #[test] fn blocked_at_zero() { diff --git a/neqo-transport/src/frame.rs b/neqo-transport/src/frame.rs index 8081baef6c..f3d567ac7c 100644 --- a/neqo-transport/src/frame.rs +++ b/neqo-transport/src/frame.rs @@ -6,15 +6,16 @@ // Directly relating to QUIC frames. -use neqo_common::{qtrace, Decoder}; +use std::{convert::TryFrom, ops::RangeInclusive}; -use crate::cid::MAX_CONNECTION_ID_LEN; -use crate::packet::PacketType; -use crate::stream_id::{StreamId, StreamType}; -use crate::{AppError, ConnectionError, Error, Res, TransportError}; +use neqo_common::{qtrace, Decoder}; -use std::convert::TryFrom; -use std::ops::RangeInclusive; +use crate::{ + cid::MAX_CONNECTION_ID_LEN, + packet::PacketType, + stream_id::{StreamId, StreamType}, + AppError, ConnectionError, Error, Res, TransportError, +}; #[allow(clippy::module_name_repetitions)] pub type FrameType = u64; @@ -612,9 +613,10 @@ impl<'a> Frame<'a> { #[cfg(test)] mod tests { - use super::*; use neqo_common::{Decoder, Encoder}; + use super::*; + fn just_dec(f: &Frame, s: &str) { let encoded = Encoder::from_hex(s); let decoded = Frame::decode(&mut encoded.as_decoder()).unwrap(); diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index 35bdd7d34a..d10ea7e9e6 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -51,16 +51,13 @@ pub use self::{ events::{ConnectionEvent, ConnectionEvents}, frame::CloseError, quic_datagrams::DatagramTracking, + recv_stream::{RecvStreamStats, RECV_BUFFER_SIZE}, + send_stream::{SendStreamStats, SEND_BUFFER_SIZE}, stats::Stats, stream_id::{StreamId, StreamType}, version::Version, }; -pub use self::{ - recv_stream::{RecvStreamStats, RECV_BUFFER_SIZE}, - send_stream::{SendStreamStats, SEND_BUFFER_SIZE}, -}; - pub type TransportError = u64; const ERROR_APPLICATION_CLOSE: TransportError = 12; const ERROR_CRYPTO_BUFFER_EXCEEDED: TransportError = 13; diff --git a/neqo-transport/src/pace.rs b/neqo-transport/src/pace.rs index f1cec80ac6..e5214c1bc8 100644 --- a/neqo-transport/src/pace.rs +++ b/neqo-transport/src/pace.rs @@ -7,12 +7,14 @@ // Pacer #![deny(clippy::pedantic)] -use neqo_common::qtrace; +use std::{ + cmp::min, + convert::TryFrom, + fmt::{Debug, Display}, + time::{Duration, Instant}, +}; -use std::cmp::min; -use std::convert::TryFrom; -use std::fmt::{Debug, Display}; -use std::time::{Duration, Instant}; +use neqo_common::qtrace; /// This value determines how much faster the pacer operates than the /// congestion window. @@ -123,10 +125,12 @@ impl Debug for Pacer { #[cfg(test)] mod tests { - use super::Pacer; use std::time::Duration; + use test_fixture::now; + use super::Pacer; + const RTT: Duration = Duration::from_millis(1000); const PACKET: usize = 1000; const CWND: usize = PACKET * 10; diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 080cf6649a..0968bb9ae2 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -5,16 +5,6 @@ // except according to those terms. // Encoding and decoding packets off the wire. -use crate::{ - cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdRef, MAX_CONNECTION_ID_LEN}, - crypto::{CryptoDxState, CryptoSpace, CryptoStates}, - version::{Version, WireVersion}, - Error, Res, -}; - -use neqo_common::{hex, hex_with_len, qtrace, qwarn, Decoder, Encoder}; -use neqo_crypto::random; - use std::{ cmp::min, convert::TryFrom, @@ -24,6 +14,16 @@ use std::{ time::Instant, }; +use neqo_common::{hex, hex_with_len, qtrace, qwarn, Decoder, Encoder}; +use neqo_crypto::random; + +use crate::{ + cid::{ConnectionId, ConnectionIdDecoder, ConnectionIdRef, MAX_CONNECTION_ID_LEN}, + crypto::{CryptoDxState, CryptoSpace, CryptoStates}, + version::{Version, WireVersion}, + Error, Res, +}; + pub const PACKET_BIT_LONG: u8 = 0x80; const PACKET_BIT_SHORT: u8 = 0x00; const PACKET_BIT_FIXED_QUIC: u8 = 0x40; @@ -501,8 +501,8 @@ pub struct PublicPacket<'a> { dcid: ConnectionIdRef<'a>, /// The source connection ID, if this is a long header packet. scid: Option>, - /// Any token that is included in the packet (Retry always has a token; Initial sometimes does). - /// This is empty when there is no token. + /// Any token that is included in the packet (Retry always has a token; Initial sometimes + /// does). This is empty when there is no token. token: &'a [u8], /// The size of the header, not including the packet number. header_len: usize, @@ -865,13 +865,14 @@ impl Deref for DecryptedPacket { #[cfg(all(test, not(feature = "fuzzing")))] mod tests { + use neqo_common::Encoder; + use test_fixture::{fixture_init, now}; + use super::*; use crate::{ crypto::{CryptoDxState, CryptoStates}, EmptyConnectionIdGenerator, RandomConnectionIdGenerator, Version, }; - use neqo_common::Encoder; - use test_fixture::{fixture_init, now}; const CLIENT_CID: &[u8] = &[0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08]; const SERVER_CID: &[u8] = &[0xf0, 0x67, 0xa5, 0x50, 0x2a, 0x42, 0x62, 0xb5]; @@ -1023,7 +1024,8 @@ mod tests { assert_eq!(&decrypted[..], SAMPLE_SHORT_PAYLOAD); } - /// By telling the decoder that the connection ID is shorter than it really is, we get a decryption error. + /// By telling the decoder that the connection ID is shorter than it really is, we get a + /// decryption error. #[test] fn decode_short_bad_cid() { fixture_init(); diff --git a/neqo-transport/src/packet/retry.rs b/neqo-transport/src/packet/retry.rs index e9a7e90ab9..a1333a0150 100644 --- a/neqo-transport/src/packet/retry.rs +++ b/neqo-transport/src/packet/retry.rs @@ -6,13 +6,12 @@ #![deny(clippy::pedantic)] -use crate::version::Version; -use crate::{Error, Res}; +use std::cell::RefCell; use neqo_common::qerror; use neqo_crypto::{hkdf, Aead, TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}; -use std::cell::RefCell; +use crate::{version::Version, Error, Res}; /// The AEAD used for Retry is fixed, so use thread local storage. fn make_aead(version: Version) -> Aead { diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 2ab90c169c..06cc8a6a61 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -17,6 +17,9 @@ use std::{ time::{Duration, Instant}, }; +use neqo_common::{hex, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Encoder, IpTos}; +use neqo_crypto::random; + use crate::{ ackrate::{AckRate, PeerAckDelay}, cc::CongestionControlAlgorithm, @@ -31,9 +34,6 @@ use crate::{ Error, Res, Stats, }; -use neqo_common::{hex, qdebug, qinfo, qlog::NeqoQlog, qtrace, Datagram, Encoder, IpTos}; -use neqo_crypto::random; - /// This is the MTU that we assume when using IPv6. /// We use this size for Initial packets, so we don't need to worry about probing for support. /// If the path doesn't support this MTU, we will assume that it doesn't support QUIC. @@ -498,7 +498,7 @@ enum ProbeState { } impl ProbeState { - /// Determine whether the current state requires probing. + /// Determine whether the current state requires probing. fn probe_needed(&self) -> bool { matches!(self, Self::ProbeNeeded { .. }) } @@ -1008,7 +1008,8 @@ impl Path { .map_or(usize::MAX, |limit| { let budget = if limit == 0 { // If we have received absolutely nothing thus far, then this endpoint - // is the one initiating communication on this path. Allow enough space for probing. + // is the one initiating communication on this path. Allow enough space for + // probing. self.mtu() * 5 } else { limit diff --git a/neqo-transport/src/qlog.rs b/neqo-transport/src/qlog.rs index 1639da6e74..434395fd23 100644 --- a/neqo-transport/src/qlog.rs +++ b/neqo-transport/src/qlog.rs @@ -13,6 +13,7 @@ use std::{ time::Duration, }; +use neqo_common::{hex, qinfo, qlog::NeqoQlog, Decoder}; use qlog::events::{ connectivity::{ConnectionStarted, ConnectionState, ConnectionStateUpdated}, quic::{ @@ -21,8 +22,6 @@ use qlog::events::{ }, EventData, RawInfo, }; - -use neqo_common::{hex, qinfo, qlog::NeqoQlog, Decoder}; use smallvec::SmallVec; use crate::{ diff --git a/neqo-transport/src/quic_datagrams.rs b/neqo-transport/src/quic_datagrams.rs index e9c4497cde..07f3594768 100644 --- a/neqo-transport/src/quic_datagrams.rs +++ b/neqo-transport/src/quic_datagrams.rs @@ -6,14 +6,17 @@ // https://datatracker.ietf.org/doc/html/draft-ietf-quic-datagram -use crate::frame::{FRAME_TYPE_DATAGRAM, FRAME_TYPE_DATAGRAM_WITH_LEN}; -use crate::packet::PacketBuilder; -use crate::recovery::RecoveryToken; -use crate::{events::OutgoingDatagramOutcome, ConnectionEvents, Error, Res, Stats}; +use std::{cmp::min, collections::VecDeque, convert::TryFrom}; + use neqo_common::Encoder; -use std::cmp::min; -use std::collections::VecDeque; -use std::convert::TryFrom; + +use crate::{ + events::OutgoingDatagramOutcome, + frame::{FRAME_TYPE_DATAGRAM, FRAME_TYPE_DATAGRAM_WITH_LEN}, + packet::PacketBuilder, + recovery::RecoveryToken, + ConnectionEvents, Error, Res, Stats, +}; pub const MAX_QUIC_DATAGRAM: u64 = 65535; @@ -140,7 +143,9 @@ impl QuicDatagrams { } /// Returns true if there was an unsent datagram that has been dismissed. + /// /// # Error + /// /// The function returns `TooMuchData` if the supply buffer is bigger than /// the allowed remote datagram size. The funcion does not check if the /// datagram can fit into a packet (i.e. MTU limit). This is checked during diff --git a/neqo-transport/src/recovery.rs b/neqo-transport/src/recovery.rs index a640b75371..d90989b486 100644 --- a/neqo-transport/src/recovery.rs +++ b/neqo-transport/src/recovery.rs @@ -17,9 +17,8 @@ use std::{ time::{Duration, Instant}, }; -use smallvec::{smallvec, SmallVec}; - use neqo_common::{qdebug, qinfo, qlog::NeqoQlog, qtrace, qwarn}; +use smallvec::{smallvec, SmallVec}; use crate::{ ackrate::AckRate, @@ -464,7 +463,9 @@ impl LossRecoverySpaces { /// Drop a packet number space and return all the packets that were /// outstanding, so that those can be marked as lost. + /// /// # Panics + /// /// If the space has already been removed. pub fn drop_space(&mut self, space: PacketNumberSpace) -> impl IntoIterator { let sp = match space { @@ -526,9 +527,9 @@ impl PtoState { /// And the number to declare lost when the PTO timer is hit. fn pto_packet_count(space: PacketNumberSpace, rx_count: usize) -> usize { if space == PacketNumberSpace::Initial && rx_count == 0 { - // For the Initial space, we only send one packet on PTO if we have not received any packets - // from the peer yet. This avoids sending useless PING-only packets when the Client Initial - // is deemed lost. + // For the Initial space, we only send one packet on PTO if we have not received any + // packets from the peer yet. This avoids sending useless PING-only packets + // when the Client Initial is deemed lost. 1 } else { MAX_PTO_PACKET_COUNT @@ -1017,6 +1018,17 @@ impl ::std::fmt::Display for LossRecovery { #[cfg(test)] mod tests { + use std::{ + cell::RefCell, + convert::TryInto, + ops::{Deref, DerefMut, RangeInclusive}, + rc::Rc, + time::{Duration, Instant}, + }; + + use neqo_common::qlog::NeqoQlog; + use test_fixture::{addr, now}; + use super::{ LossRecovery, LossRecoverySpace, PacketNumberSpace, SendProfile, SentPacket, FAST_PTO_SCALE, }; @@ -1028,15 +1040,6 @@ mod tests { rtt::RttEstimate, stats::{Stats, StatsCell}, }; - use neqo_common::qlog::NeqoQlog; - use std::{ - cell::RefCell, - convert::TryInto, - ops::{Deref, DerefMut, RangeInclusive}, - rc::Rc, - time::{Duration, Instant}, - }; - use test_fixture::{addr, now}; // Shorthand for a time in milliseconds. const fn ms(t: u64) -> Duration { diff --git a/neqo-transport/src/recv_stream.rs b/neqo-transport/src/recv_stream.rs index 04db42d36b..0b2863c425 100644 --- a/neqo-transport/src/recv_stream.rs +++ b/neqo-transport/src/recv_stream.rs @@ -8,6 +8,7 @@ // incoming STREAM frames. use std::{ + cell::RefCell, cmp::max, collections::BTreeMap, convert::TryFrom, @@ -15,6 +16,7 @@ use std::{ rc::{Rc, Weak}, }; +use neqo_common::{qtrace, Role}; use smallvec::SmallVec; use crate::{ @@ -28,8 +30,6 @@ use crate::{ stream_id::StreamId, AppError, Error, Res, }; -use neqo_common::{qtrace, Role}; -use std::cell::RefCell; const RX_STREAM_DATA_WINDOW: u64 = 0x10_0000; // 1MiB @@ -768,6 +768,7 @@ impl RecvStream { } /// # Errors + /// /// `NoMoreData` if data and fin bit were previously read by the application. pub fn read(&mut self, buf: &mut [u8]) -> Res<(usize, bool)> { let data_recvd_state = matches!(self.state, RecvStreamState::DataRecvd { .. }); @@ -965,10 +966,12 @@ impl RecvStream { #[cfg(test)] mod tests { - use super::*; - use neqo_common::Encoder; use std::ops::Range; + use neqo_common::Encoder; + + use super::*; + const SESSION_WINDOW: usize = 1024; fn recv_ranges(ranges: &[Range], available: usize) { diff --git a/neqo-transport/src/rtt.rs b/neqo-transport/src/rtt.rs index a5ceb37da2..4b05198bc9 100644 --- a/neqo-transport/src/rtt.rs +++ b/neqo-transport/src/rtt.rs @@ -8,17 +8,21 @@ #![deny(clippy::pedantic)] -use std::cmp::{max, min}; -use std::time::{Duration, Instant}; +use std::{ + cmp::{max, min}, + time::{Duration, Instant}, +}; use neqo_common::{qlog::NeqoQlog, qtrace}; -use crate::ackrate::{AckRate, PeerAckDelay}; -use crate::packet::PacketBuilder; -use crate::qlog::{self, QlogMetric}; -use crate::recovery::RecoveryToken; -use crate::stats::FrameStats; -use crate::tracking::PacketNumberSpace; +use crate::{ + ackrate::{AckRate, PeerAckDelay}, + packet::PacketBuilder, + qlog::{self, QlogMetric}, + recovery::RecoveryToken, + stats::FrameStats, + tracking::PacketNumberSpace, +}; /// The smallest time that the system timer (via `sleep()`, `nanosleep()`, /// `select()`, or similar) can reliably deliver; see `neqo_common::hrtime`. diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index e171dfab83..5feb785ac6 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -11,16 +11,15 @@ use std::{ cmp::{max, min, Ordering}, collections::{BTreeMap, VecDeque}, convert::TryFrom, + hash::{Hash, Hasher}, mem, ops::Add, rc::Rc, }; use indexmap::IndexMap; -use smallvec::SmallVec; -use std::hash::{Hash, Hasher}; - use neqo_common::{qdebug, qerror, qinfo, qtrace, Encoder, Role}; +use smallvec::SmallVec; use crate::{ events::ConnectionEvents, @@ -1280,7 +1279,8 @@ pub struct OrderGroupIter<'a> { // We store the next position in the OrderGroup. // Otherwise we'd need an explicit "done iterating" call to be made, or implement Drop to // copy the value back. - // This is where next was when we iterated for the first time; when we get back to that we stop. + // This is where next was when we iterated for the first time; when we get back to that we + // stop. started_at: Option, } @@ -1321,7 +1321,10 @@ impl OrderGroup { pub fn insert(&mut self, stream_id: StreamId) { match self.vec.binary_search(&stream_id) { - Ok(_) => panic!("Duplicate stream_id {}", stream_id), // element already in vector @ `pos` + Ok(_) => { + // element already in vector @ `pos` + panic!("Duplicate stream_id {}", stream_id) + } Err(pos) => self.vec.insert(pos, stream_id), } } @@ -1331,7 +1334,10 @@ impl OrderGroup { Ok(pos) => { self.vec.remove(pos); } - Err(_) => panic!("Missing stream_id {}", stream_id), // element already in vector @ `pos` + Err(_) => { + // element already in vector @ `pos` + panic!("Missing stream_id {}", stream_id) + } } } } @@ -1634,10 +1640,10 @@ pub struct SendStreamRecoveryToken { #[cfg(test)] mod tests { - use super::*; + use neqo_common::{event::Provider, hex_with_len, qtrace}; + use super::*; use crate::events::ConnectionEvent; - use neqo_common::{event::Provider, hex_with_len, qtrace}; fn connection_fc(limit: u64) -> Rc>> { Rc::new(RefCell::new(SenderFlowControl::new((), limit))) diff --git a/neqo-transport/src/sender.rs b/neqo-transport/src/sender.rs index 0c1e66ff9a..9a00dfc7a7 100644 --- a/neqo-transport/src/sender.rs +++ b/neqo-transport/src/sender.rs @@ -8,16 +8,19 @@ #![deny(clippy::pedantic)] #![allow(clippy::module_name_repetitions)] -use crate::cc::{ - ClassicCongestionControl, CongestionControl, CongestionControlAlgorithm, Cubic, NewReno, +use std::{ + fmt::{self, Debug, Display}, + time::{Duration, Instant}, }; -use crate::pace::Pacer; -use crate::rtt::RttEstimate; -use crate::tracking::SentPacket; + use neqo_common::qlog::NeqoQlog; -use std::fmt::{self, Debug, Display}; -use std::time::{Duration, Instant}; +use crate::{ + cc::{ClassicCongestionControl, CongestionControl, CongestionControlAlgorithm, Cubic, NewReno}, + pace::Pacer, + rtt::RttEstimate, + tracking::SentPacket, +}; /// The number of packets we allow to burst from the pacer. pub const PACING_BURST_SIZE: usize = 2; diff --git a/neqo-transport/src/server.rs b/neqo-transport/src/server.rs index 288ec1a605..12a7d2f9e0 100644 --- a/neqo-transport/src/server.rs +++ b/neqo-transport/src/server.rs @@ -6,6 +6,18 @@ // This file implements a server that can handle multiple connections. +use std::{ + cell::RefCell, + collections::{HashMap, HashSet, VecDeque}, + fs::OpenOptions, + mem, + net::SocketAddr, + ops::{Deref, DerefMut}, + path::PathBuf, + rc::{Rc, Weak}, + time::{Duration, Instant}, +}; + use neqo_common::{ self as common, event::Provider, hex, qdebug, qerror, qinfo, qlog::NeqoQlog, qtrace, qwarn, timer::Timer, Datagram, Decoder, Role, @@ -25,18 +37,6 @@ use crate::{ ConnectionParameters, Res, Version, }; -use std::{ - cell::RefCell, - collections::{HashMap, HashSet, VecDeque}, - fs::OpenOptions, - mem, - net::SocketAddr, - ops::{Deref, DerefMut}, - path::PathBuf, - rc::{Rc, Weak}, - time::{Duration, Instant}, -}; - pub enum InitialResult { Accept, Drop, @@ -190,11 +190,11 @@ impl Server { /// * `certs` is a list of the certificates that should be configured. /// * `protocols` is the preference list of ALPN values. /// * `anti_replay` is an anti-replay context. - /// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This - /// will be passed the value of the `extra` argument that was passed to - /// `Connection::send_ticket` to see if it is OK. - /// * `cid_generator` is responsible for generating connection IDs and parsing them; - /// connection IDs produced by the manager cannot be zero-length. + /// * `zero_rtt_checker` determines whether 0-RTT should be accepted. This will be passed the + /// value of the `extra` argument that was passed to `Connection::send_ticket` to see if it is + /// OK. + /// * `cid_generator` is responsible for generating connection IDs and parsing them; connection + /// IDs produced by the manager cannot be zero-length. pub fn new( now: Instant, certs: &[impl AsRef], @@ -615,7 +615,8 @@ impl Server { qdebug!([self], "Drop initial: too short"); return None; } - // Copy values from `packet` because they are currently still borrowing from `dgram`. + // Copy values from `packet` because they are currently still borrowing from + // `dgram`. let initial = InitialDetails::new(&packet); self.handle_initial(initial, dgram, now) } diff --git a/neqo-transport/src/stats.rs b/neqo-transport/src/stats.rs index 9e956eb02e..d6c7a911f9 100644 --- a/neqo-transport/src/stats.rs +++ b/neqo-transport/src/stats.rs @@ -7,8 +7,6 @@ // Tracking of some useful statistics. #![deny(clippy::pedantic)] -use crate::packet::PacketNumber; -use neqo_common::qinfo; use std::{ cell::RefCell, fmt::{self, Debug}, @@ -17,6 +15,10 @@ use std::{ time::Duration, }; +use neqo_common::qinfo; + +use crate::packet::PacketNumber; + pub(crate) const MAX_PTO_COUNTS: usize = 16; #[derive(Default, Clone)] @@ -176,6 +178,7 @@ impl Stats { } /// # Panics + /// /// When preconditions are violated. pub fn add_pto_count(&mut self, count: usize) { debug_assert!(count > 0); diff --git a/neqo-transport/src/stream_id.rs b/neqo-transport/src/stream_id.rs index 51df2ca9fb..f3b07b86a8 100644 --- a/neqo-transport/src/stream_id.rs +++ b/neqo-transport/src/stream_id.rs @@ -133,9 +133,10 @@ impl ::std::fmt::Display for StreamId { #[cfg(test)] mod test { - use super::StreamId; use neqo_common::Role; + use super::StreamId; + #[test] fn bidi_stream_properties() { let id1 = StreamId::from(16); diff --git a/neqo-transport/src/streams.rs b/neqo-transport/src/streams.rs index 507cfbc214..7cbb29ce02 100644 --- a/neqo-transport/src/streams.rs +++ b/neqo-transport/src/streams.rs @@ -5,6 +5,10 @@ // except according to those terms. // Stream management for a connection. +use std::{cell::RefCell, cmp::Ordering, rc::Rc}; + +use neqo_common::{qtrace, qwarn, Role}; + use crate::{ fc::{LocalStreamLimits, ReceiverFlowControl, RemoteStreamLimits, SenderFlowControl}, frame::Frame, @@ -17,9 +21,6 @@ use crate::{ tparams::{self, TransportParametersHandler}, ConnectionEvents, Error, Res, }; -use neqo_common::{qtrace, qwarn, Role}; -use std::cmp::Ordering; -use std::{cell::RefCell, rc::Rc}; pub type SendOrder = i64; @@ -438,9 +439,10 @@ impl Streams { if st == StreamType::BiDi { // From the local perspective, this is a local- originated BiDi stream. From the - // remote perspective, this is a remote-originated BiDi stream. Therefore, look at - // the local transport parameters for the INITIAL_MAX_STREAM_DATA_BIDI_LOCAL value - // to decide how much this endpoint will allow its peer to send. + // remote perspective, this is a remote-originated BiDi stream. Therefore, look + // at the local transport parameters for the + // INITIAL_MAX_STREAM_DATA_BIDI_LOCAL value to decide how + // much this endpoint will allow its peer to send. let recv_initial_max_stream_data = self .tps .borrow() diff --git a/neqo-transport/src/tparams.rs b/neqo-transport/src/tparams.rs index ea5f78fc36..1297829094 100644 --- a/neqo-transport/src/tparams.rs +++ b/neqo-transport/src/tparams.rs @@ -6,10 +6,12 @@ // Transport parameters. See -transport section 7.3. -use crate::{ - cid::{ConnectionId, ConnectionIdEntry, CONNECTION_ID_SEQNO_PREFERRED, MAX_CONNECTION_ID_LEN}, - version::{Version, VersionConfig, WireVersion}, - Error, Res, +use std::{ + cell::RefCell, + collections::HashMap, + convert::TryFrom, + net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}, + rc::Rc, }; use neqo_common::{hex, qdebug, qinfo, qtrace, Decoder, Encoder, Role}; @@ -19,12 +21,10 @@ use neqo_crypto::{ random, HandshakeMessage, ZeroRttCheckResult, ZeroRttChecker, }; -use std::{ - cell::RefCell, - collections::HashMap, - convert::TryFrom, - net::{Ipv4Addr, Ipv6Addr, SocketAddrV4, SocketAddrV6}, - rc::Rc, +use crate::{ + cid::{ConnectionId, ConnectionIdEntry, CONNECTION_ID_SEQNO_PREFERRED, MAX_CONNECTION_ID_LEN}, + version::{Version, VersionConfig, WireVersion}, + Error, Res, }; pub type TransportParameterId = u64; @@ -71,6 +71,7 @@ impl PreferredAddress { /// Make a new preferred address configuration. /// /// # Panics + /// /// If neither address is provided, or if either address is of the wrong type. #[must_use] pub fn new(v4: Option, v6: Option) -> Self { @@ -1023,7 +1024,8 @@ mod tests { fn active_connection_id_limit_min_2() { let mut tps = TransportParameters::default(); - // Intentionally set an invalid value for the ACTIVE_CONNECTION_ID_LIMIT transport parameter. + // Intentionally set an invalid value for the ACTIVE_CONNECTION_ID_LIMIT transport + // parameter. tps.params .insert(ACTIVE_CONNECTION_ID_LIMIT, TransportParameter::Integer(1)); diff --git a/neqo-transport/src/tracking.rs b/neqo-transport/src/tracking.rs index 32f1c8d1b7..62e7398ede 100644 --- a/neqo-transport/src/tracking.rs +++ b/neqo-transport/src/tracking.rs @@ -18,6 +18,7 @@ use std::{ use neqo_common::{qdebug, qinfo, qtrace, qwarn}; use neqo_crypto::{Epoch, TLS_EPOCH_HANDSHAKE, TLS_EPOCH_INITIAL}; +use smallvec::{smallvec, SmallVec}; use crate::{ packet::{PacketBuilder, PacketNumber, PacketType}, @@ -26,8 +27,6 @@ use crate::{ Error, Res, }; -use smallvec::{smallvec, SmallVec}; - // TODO(mt) look at enabling EnumMap for this: https://stackoverflow.com/a/44905797/1375574 #[derive(Clone, Copy, Debug, PartialEq, PartialOrd, Ord, Eq)] pub enum PacketNumberSpace { @@ -750,6 +749,11 @@ impl Default for AckTracker { #[cfg(test)] mod tests { + use std::collections::HashSet; + + use lazy_static::lazy_static; + use neqo_common::Encoder; + use super::{ AckTracker, Duration, Instant, PacketNumberSpace, PacketNumberSpaceSet, RecoveryToken, RecvdPackets, MAX_TRACKED_RANGES, @@ -759,9 +763,6 @@ mod tests { packet::{PacketBuilder, PacketNumber}, stats::FrameStats, }; - use lazy_static::lazy_static; - use neqo_common::Encoder; - use std::collections::HashSet; const RTT: Duration = Duration::from_millis(100); lazy_static! { diff --git a/neqo-transport/src/version.rs b/neqo-transport/src/version.rs index 4cb9b964ce..13db0bf024 100644 --- a/neqo-transport/src/version.rs +++ b/neqo-transport/src/version.rs @@ -4,10 +4,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{Error, Res}; -use neqo_common::qdebug; use std::convert::TryFrom; +use neqo_common::qdebug; + +use crate::{Error, Res}; + pub type WireVersion = u32; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] diff --git a/neqo-transport/tests/common/mod.rs b/neqo-transport/tests/common/mod.rs index 1a414df5b0..a43f91e3fe 100644 --- a/neqo-transport/tests/common/mod.rs +++ b/neqo-transport/tests/common/mod.rs @@ -8,6 +8,8 @@ #![warn(clippy::pedantic)] #![allow(unused)] +use std::{cell::RefCell, convert::TryFrom, mem, ops::Range, rc::Rc}; + use neqo_common::{event::Provider, hex_with_len, qtrace, Datagram, Decoder, Role}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_VERSION_1_3}, @@ -21,12 +23,6 @@ use neqo_transport::{ }; use test_fixture::{self, default_client, now, CountingConnectionIdGenerator}; -use std::cell::RefCell; -use std::convert::TryFrom; -use std::mem; -use std::ops::Range; -use std::rc::Rc; - /// Create a server. This is different than the one in the fixture, which is a single connection. pub fn new_server(params: ConnectionParameters) -> Server { Server::new( diff --git a/neqo-transport/tests/conn_vectors.rs b/neqo-transport/tests/conn_vectors.rs index 7597c81621..91dbbf31cc 100644 --- a/neqo-transport/tests/conn_vectors.rs +++ b/neqo-transport/tests/conn_vectors.rs @@ -8,14 +8,13 @@ #![deny(clippy::pedantic)] #![cfg(not(feature = "fuzzing"))] +use std::{cell::RefCell, rc::Rc}; + use neqo_transport::{ Connection, ConnectionParameters, RandomConnectionIdGenerator, State, Version, }; use test_fixture::{self, datagram, now}; -use std::cell::RefCell; -use std::rc::Rc; - const INITIAL_PACKET_V2: &[u8] = &[ 0xd7, 0x6b, 0x33, 0x43, 0xcf, 0x08, 0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08, 0x00, 0x00, 0x44, 0x9e, 0xa0, 0xc9, 0x5e, 0x82, 0xff, 0xe6, 0x7b, 0x6a, 0xbc, 0xdb, 0x42, 0x98, 0xb4, 0x85, diff --git a/neqo-transport/tests/connection.rs b/neqo-transport/tests/connection.rs index 661909fd22..4cbf57f405 100644 --- a/neqo-transport/tests/connection.rs +++ b/neqo-transport/tests/connection.rs @@ -9,12 +9,13 @@ mod common; +use std::convert::TryFrom; + use common::{ apply_header_protection, decode_initial_header, initial_aead_and_hp, remove_header_protection, }; use neqo_common::{Datagram, Decoder, Encoder, Role}; use neqo_transport::{ConnectionError, ConnectionParameters, Error, State, Version}; -use std::convert::TryFrom; use test_fixture::{self, default_client, default_server, new_client, now, split_datagram}; #[test] diff --git a/neqo-transport/tests/network.rs b/neqo-transport/tests/network.rs index e2389090a7..5d67ca7938 100644 --- a/neqo-transport/tests/network.rs +++ b/neqo-transport/tests/network.rs @@ -9,14 +9,14 @@ mod sim; +use std::{ops::Range, time::Duration}; + use neqo_transport::{ConnectionError, ConnectionParameters, Error, State}; use sim::{ connection::{ConnectionNode, ReachState, ReceiveData, SendData}, network::{Delay, Drop, TailDrop}, Simulator, }; -use std::ops::Range; -use std::time::Duration; /// The amount of transfer. Much more than this takes a surprising amount of time. const TRANSFER_AMOUNT: usize = 1 << 20; // 1M diff --git a/neqo-transport/tests/retry.rs b/neqo-transport/tests/retry.rs index eb20b8144a..93759c7df9 100644 --- a/neqo-transport/tests/retry.rs +++ b/neqo-transport/tests/retry.rs @@ -10,6 +10,13 @@ mod common; +use std::{ + convert::TryFrom, + mem, + net::{IpAddr, Ipv4Addr, SocketAddr}, + time::Duration, +}; + use common::{ apply_header_protection, connected_server, decode_initial_header, default_server, generate_ticket, initial_aead_and_hp, remove_header_protection, @@ -17,10 +24,6 @@ use common::{ use neqo_common::{hex_with_len, qdebug, qtrace, Datagram, Encoder, Role}; use neqo_crypto::AuthenticationStatus; use neqo_transport::{server::ValidateAddress, ConnectionError, Error, State, StreamType}; -use std::convert::TryFrom; -use std::mem; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use std::time::Duration; use test_fixture::{self, assertions, datagram, default_client, now, split_datagram}; #[test] diff --git a/neqo-transport/tests/server.rs b/neqo-transport/tests/server.rs index 2f1ee3b493..d6c9c2df95 100644 --- a/neqo-transport/tests/server.rs +++ b/neqo-transport/tests/server.rs @@ -9,11 +9,12 @@ mod common; +use std::{cell::RefCell, convert::TryFrom, mem, net::SocketAddr, rc::Rc, time::Duration}; + use common::{ apply_header_protection, connect, connected_server, decode_initial_header, default_server, find_ticket, generate_ticket, initial_aead_and_hp, new_server, remove_header_protection, }; - use neqo_common::{qtrace, Datagram, Decoder, Encoder, Role}; use neqo_crypto::{ generate_ech_keys, AllowZeroRtt, AuthenticationStatus, ZeroRttCheckResult, ZeroRttChecker, @@ -27,12 +28,12 @@ use test_fixture::{ CountingConnectionIdGenerator, }; -use std::{cell::RefCell, convert::TryFrom, mem, net::SocketAddr, rc::Rc, time::Duration}; - /// Take a pair of connections in any state and complete the handshake. /// The `datagram` argument is a packet that was received from the server. /// See `connect` for what this returns. +/// /// # Panics +/// /// Only when the connection fails. pub fn complete_connection( client: &mut Connection, diff --git a/neqo-transport/tests/sim/connection.rs b/neqo-transport/tests/sim/connection.rs index b624c119bd..45a5234512 100644 --- a/neqo-transport/tests/sim/connection.rs +++ b/neqo-transport/tests/sim/connection.rs @@ -6,18 +6,20 @@ #![allow(clippy::module_name_repetitions)] -use super::{Node, Rng}; -use neqo_common::{event::Provider, qdebug, qtrace, Datagram}; -use neqo_crypto::AuthenticationStatus; -use neqo_transport::{ - Connection, ConnectionEvent, ConnectionParameters, Output, State, StreamId, StreamType, -}; use std::{ cmp::min, fmt::{self, Debug}, time::Instant, }; +use neqo_common::{event::Provider, qdebug, qtrace, Datagram}; +use neqo_crypto::AuthenticationStatus; +use neqo_transport::{ + Connection, ConnectionEvent, ConnectionParameters, Output, State, StreamId, StreamType, +}; + +use super::{Node, Rng}; + /// The status of the processing of an event. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum GoalStatus { diff --git a/neqo-transport/tests/sim/delay.rs b/neqo-transport/tests/sim/delay.rs index 95188c0562..34cb923084 100644 --- a/neqo-transport/tests/sim/delay.rs +++ b/neqo-transport/tests/sim/delay.rs @@ -6,14 +6,18 @@ #![allow(clippy::module_name_repetitions)] -use super::{Node, Rng}; +use std::{ + collections::BTreeMap, + convert::TryFrom, + fmt::{self, Debug}, + ops::Range, + time::{Duration, Instant}, +}; + use neqo_common::Datagram; use neqo_transport::Output; -use std::collections::BTreeMap; -use std::convert::TryFrom; -use std::fmt::{self, Debug}; -use std::ops::Range; -use std::time::{Duration, Instant}; + +use super::{Node, Rng}; /// An iterator that shares a `Random` instance and produces uniformly /// random `Duration`s within a specified range. diff --git a/neqo-transport/tests/sim/drop.rs b/neqo-transport/tests/sim/drop.rs index d42913d99d..629fbf48d3 100644 --- a/neqo-transport/tests/sim/drop.rs +++ b/neqo-transport/tests/sim/drop.rs @@ -6,11 +6,15 @@ #![allow(clippy::module_name_repetitions)] -use super::{Node, Rng}; +use std::{ + fmt::{self, Debug}, + time::Instant, +}; + use neqo_common::{qtrace, Datagram}; use neqo_transport::Output; -use std::fmt::{self, Debug}; -use std::time::Instant; + +use super::{Node, Rng}; /// A random dropper. pub struct Drop { diff --git a/neqo-transport/tests/sim/mod.rs b/neqo-transport/tests/sim/mod.rs index f7646aac56..9ab9d57a4a 100644 --- a/neqo-transport/tests/sim/mod.rs +++ b/neqo-transport/tests/sim/mod.rs @@ -14,23 +14,23 @@ mod drop; pub mod rng; mod taildrop; +use std::{ + cell::RefCell, + cmp::min, + convert::TryFrom, + fmt::Debug, + rc::Rc, + time::{Duration, Instant}, +}; + use neqo_common::{qdebug, qinfo, qtrace, Datagram, Encoder}; use neqo_transport::Output; use rng::Random; -use std::cell::RefCell; -use std::cmp::min; -use std::convert::TryFrom; -use std::fmt::Debug; -use std::rc::Rc; -use std::time::{Duration, Instant}; use test_fixture::{self, now}; - use NodeState::{Active, Idle, Waiting}; pub mod network { - pub use super::delay::Delay; - pub use super::drop::Drop; - pub use super::taildrop::TailDrop; + pub use super::{delay::Delay, drop::Drop, taildrop::TailDrop}; } type Rng = Rc>; diff --git a/neqo-transport/tests/sim/rng.rs b/neqo-transport/tests/sim/rng.rs index d314e8b36f..af4f70eb5f 100644 --- a/neqo-transport/tests/sim/rng.rs +++ b/neqo-transport/tests/sim/rng.rs @@ -4,9 +4,9 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +use std::{convert::TryFrom, ops::Range}; + use neqo_common::Decoder; -use std::convert::TryFrom; -use std::ops::Range; /// An implementation of a xoshiro256** pseudorandom generator. pub struct Random { diff --git a/neqo-transport/tests/sim/taildrop.rs b/neqo-transport/tests/sim/taildrop.rs index 7346b27178..26813800c9 100644 --- a/neqo-transport/tests/sim/taildrop.rs +++ b/neqo-transport/tests/sim/taildrop.rs @@ -6,14 +6,18 @@ #![allow(clippy::module_name_repetitions)] -use super::Node; +use std::{ + cmp::max, + collections::VecDeque, + convert::TryFrom, + fmt::{self, Debug}, + time::{Duration, Instant}, +}; + use neqo_common::{qtrace, Datagram}; use neqo_transport::Output; -use std::cmp::max; -use std::collections::VecDeque; -use std::convert::TryFrom; -use std::fmt::{self, Debug}; -use std::time::{Duration, Instant}; + +use super::Node; /// One second in nanoseconds. const ONE_SECOND_NS: u128 = 1_000_000_000; diff --git a/test-fixture/src/assertions.rs b/test-fixture/src/assertions.rs index 339f11df64..7e772daabf 100644 --- a/test-fixture/src/assertions.rs +++ b/test-fixture/src/assertions.rs @@ -4,12 +4,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use crate::{addr, addr_v4}; +use std::{ + convert::{TryFrom, TryInto}, + net::SocketAddr, +}; + use neqo_common::{Datagram, Decoder}; -use neqo_transport::version::WireVersion; -use neqo_transport::Version; -use std::convert::{TryFrom, TryInto}; -use std::net::SocketAddr; +use neqo_transport::{version::WireVersion, Version}; + +use crate::{addr, addr_v4}; const PACKET_TYPE_MASK: u8 = 0b1011_0000; @@ -32,7 +35,9 @@ fn assert_long_packet_type(b: u8, v1_expected: u8, version: Version) { } /// Simple checks for the version being correct. +/// /// # Panics +/// /// If this is not a long header packet with the given version. pub fn assert_version(payload: &[u8], v: u32) { let mut dec = Decoder::from(payload); @@ -41,7 +46,9 @@ pub fn assert_version(payload: &[u8], v: u32) { } /// Simple checks for a Version Negotiation packet. +/// /// # Panics +/// /// If this is clearly not a Version Negotiation packet. pub fn assert_vn(payload: &[u8]) { let mut dec = Decoder::from(payload); @@ -53,7 +60,9 @@ pub fn assert_vn(payload: &[u8]) { } /// Do a simple decode of the datagram to verify that it is coalesced. +/// /// # Panics +/// /// If the tests fail. pub fn assert_coalesced_0rtt(payload: &[u8]) { assert!(payload.len() >= 1200); @@ -71,6 +80,7 @@ pub fn assert_coalesced_0rtt(payload: &[u8]) { } /// # Panics +/// /// If the tests fail. pub fn assert_retry(payload: &[u8]) { let mut dec = Decoder::from(payload); @@ -80,7 +90,9 @@ pub fn assert_retry(payload: &[u8]) { } /// Assert that this is an Initial packet with (or without) a token. +/// /// # Panics +/// /// If the tests fail. pub fn assert_initial(payload: &[u8], expect_token: bool) { let mut dec = Decoder::from(payload); @@ -94,7 +106,9 @@ pub fn assert_initial(payload: &[u8], expect_token: bool) { } /// Assert that this is a Handshake packet. +/// /// # Panics +/// /// If the tests fail. pub fn assert_handshake(payload: &[u8]) { let mut dec = Decoder::from(payload); @@ -104,6 +118,7 @@ pub fn assert_handshake(payload: &[u8]) { } /// # Panics +/// /// If the tests fail. pub fn assert_no_1rtt(payload: &[u8]) { let mut dec = Decoder::from(payload); @@ -135,6 +150,7 @@ pub fn assert_no_1rtt(payload: &[u8]) { } /// # Panics +/// /// When the path doesn't use the given socket address at both ends. pub fn assert_path(dgram: &Datagram, path_addr: SocketAddr) { assert_eq!(dgram.source(), path_addr); @@ -142,6 +158,7 @@ pub fn assert_path(dgram: &Datagram, path_addr: SocketAddr) { } /// # Panics +/// /// When the path doesn't use the default v4 socket address at both ends. pub fn assert_v4_path(dgram: &Datagram, padded: bool) { assert_path(dgram, addr_v4()); @@ -151,6 +168,7 @@ pub fn assert_v4_path(dgram: &Datagram, padded: bool) { } /// # Panics +/// /// When the path doesn't use the default v6 socket address at both ends. pub fn assert_v6_path(dgram: &Datagram, padded: bool) { assert_path(dgram, addr()); diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index e431ace9a1..8635e8a840 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -7,22 +7,6 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] -use neqo_common::{ - event::Provider, - hex, - qlog::{new_trace, NeqoQlog}, - qtrace, Datagram, Decoder, IpTos, Role, -}; - -use neqo_crypto::{init_db, random, AllowZeroRtt, AntiReplay, AuthenticationStatus}; -use neqo_http3::{Http3Client, Http3Parameters, Http3Server}; -use neqo_transport::{ - version::WireVersion, Connection, ConnectionEvent, ConnectionId, ConnectionIdDecoder, - ConnectionIdGenerator, ConnectionIdRef, ConnectionParameters, State, Version, -}; - -use qlog::{events::EventImportance, streamer::QlogStreamer}; - use std::{ cell::RefCell, cmp::max, @@ -36,6 +20,19 @@ use std::{ }; use lazy_static::lazy_static; +use neqo_common::{ + event::Provider, + hex, + qlog::{new_trace, NeqoQlog}, + qtrace, Datagram, Decoder, IpTos, Role, +}; +use neqo_crypto::{init_db, random, AllowZeroRtt, AntiReplay, AuthenticationStatus}; +use neqo_http3::{Http3Client, Http3Parameters, Http3Server}; +use neqo_transport::{ + version::WireVersion, Connection, ConnectionEvent, ConnectionId, ConnectionIdDecoder, + ConnectionIdGenerator, ConnectionIdRef, ConnectionParameters, State, Version, +}; +use qlog::{events::EventImportance, streamer::QlogStreamer}; pub mod assertions; @@ -64,15 +61,19 @@ fn earlier() -> Instant { /// The current time for the test. Which is in the future, /// because 0-RTT tests need to run at least `ANTI_REPLAY_WINDOW` in the past. +/// /// # Panics +/// /// When the setup fails. #[must_use] pub fn now() -> Instant { earlier().checked_add(ANTI_REPLAY_WINDOW).unwrap() } -// Create a default anti-replay context. +/// Create a default anti-replay context. +/// /// # Panics +/// /// When the setup fails. #[must_use] pub fn anti_replay() -> AntiReplay { @@ -140,7 +141,9 @@ impl ConnectionIdGenerator for CountingConnectionIdGenerator { } /// Create a new client. +/// /// # Panics +/// /// If this doesn't work. #[must_use] pub fn new_client(params: ConnectionParameters) -> Connection { @@ -179,7 +182,9 @@ pub fn default_server_h3() -> Connection { } /// Create a transport server with a configuration. +/// /// # Panics +/// /// If this doesn't work. #[must_use] pub fn new_server(alpn: &[impl AsRef], params: ConnectionParameters) -> Connection { @@ -229,6 +234,7 @@ pub fn handshake(client: &mut Connection, server: &mut Connection) { } /// # Panics +/// /// When the connection fails. #[must_use] pub fn connect() -> (Connection, Connection) { @@ -241,7 +247,9 @@ pub fn connect() -> (Connection, Connection) { } /// Create a http3 client with default configuration. +/// /// # Panics +/// /// When the client can't be created. #[must_use] pub fn default_http3_client() -> Http3Client { @@ -262,7 +270,9 @@ pub fn default_http3_client() -> Http3Client { } /// Create a http3 client. +/// /// # Panics +/// /// When the client can't be created. #[must_use] pub fn http3_client_with_params(params: Http3Parameters) -> Http3Client { @@ -279,7 +289,9 @@ pub fn http3_client_with_params(params: Http3Parameters) -> Http3Client { } /// Create a http3 server with default configuration. +/// /// # Panics +/// /// When the server can't be created. #[must_use] pub fn default_http3_server() -> Http3Server { @@ -366,7 +378,9 @@ impl ToString for SharedVec { /// Returns a pair of new enabled `NeqoQlog` that is backed by a [`Vec`] /// together with a [`Cursor>`] that can be used to read the contents of /// the log. +/// /// # Panics +/// /// Panics if the log cannot be created. #[must_use] pub fn new_neqo_qlog() -> (NeqoQlog, SharedVec) { From 5e3269670aaee2997b49275671a4b6281fdebcc4 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Thu, 1 Feb 2024 17:31:24 +0200 Subject: [PATCH 21/41] ci: Further lower the RTT for the `idle_timeout_crazy_rtt` test (#1611) Since what we had before was still larger than our max. PTO, making deadlock more likely with 10% loss. --- neqo-transport/tests/network.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/neqo-transport/tests/network.rs b/neqo-transport/tests/network.rs index 5d67ca7938..8c388457c5 100644 --- a/neqo-transport/tests/network.rs +++ b/neqo-transport/tests/network.rs @@ -67,7 +67,7 @@ simulate!( ))) ] ), - Delay::new(weeks(15)..weeks(15)), + Delay::new(weeks(6)..weeks(6)), Drop::percentage(10), ConnectionNode::new_server( ConnectionParameters::default().idle_timeout(weeks(1000)), @@ -78,7 +78,7 @@ simulate!( ))) ] ), - Delay::new(weeks(10)..weeks(10)), + Delay::new(weeks(8)..weeks(8)), Drop::percentage(10), ], ); From 20c8e8c77e650de0f45492442bd55b499ff71755 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Thu, 1 Feb 2024 17:30:53 +0100 Subject: [PATCH 22/41] refactor(server): replace mio with tokio (#1581) * refactor(server): replace mio with tokio * Move ready logic into fn * Extend expect docs * Restrict tokio features * Only process datagram once * Remove superfluous pub * fmt * Fix busy loop * Fold `ServersRunner::init into ServersRunner::new * Fix imports --------- Signed-off-by: Max Inden --- neqo-server/Cargo.toml | 4 +- neqo-server/src/main.rs | 251 +++++++++++++++------------------------- 2 files changed, 95 insertions(+), 160 deletions(-) diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index b3f8aae462..1d6b5df86b 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -7,9 +7,8 @@ rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] +futures = "0.3" log = {version = "0.4.17", default-features = false} -mio = "0.6.23" -mio-extras = "2.0.6" neqo-common = { path="./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } @@ -18,6 +17,7 @@ neqo-transport = { path = "./../neqo-transport" } qlog = "0.11.0" regex = "1.9" structopt = "0.3" +tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } [features] deny-warnings = [] diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index 590e0d55db..0000ea4f80 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -10,23 +10,27 @@ use std::{ cell::RefCell, cmp::min, - collections::{HashMap, HashSet}, + collections::HashMap, convert::TryFrom, fmt::{self, Display}, fs::OpenOptions, io, io::Read, - mem, net::{SocketAddr, ToSocketAddrs}, path::PathBuf, + pin::Pin, process::exit, rc::Rc, str::FromStr, time::{Duration, Instant}, }; -use mio::{net::UdpSocket, Events, Poll, PollOpt, Ready, Token}; -use mio_extras::timer::{Builder, Timeout, Timer}; +use futures::{ + future::{select, select_all, Either}, + FutureExt, +}; +use tokio::{net::UdpSocket, time::Sleep}; + use neqo_common::{hex, qdebug, qinfo, qwarn, Datagram, Header, IpTos}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, @@ -44,7 +48,6 @@ use structopt::StructOpt; use crate::old_https::Http09Server; -const TIMER_TOKEN: Token = Token(0xffff_ffff); const ANTI_REPLAY_WINDOW: Duration = Duration::from_secs(10); mod old_https; @@ -316,8 +319,8 @@ impl QuicParameters { } } -fn emit_packet(socket: &mut UdpSocket, out_dgram: Datagram) { - let sent = match socket.send_to(&out_dgram, &out_dgram.destination()) { +async fn emit_packet(socket: &mut UdpSocket, out_dgram: Datagram) { + let sent = match socket.send_to(&out_dgram, &out_dgram.destination()).await { Err(ref err) => { if err.kind() != io::ErrorKind::WouldBlock || err.kind() == io::ErrorKind::Interrupted { eprintln!("UDP send error: {err:?}"); @@ -594,7 +597,7 @@ fn read_dgram( local_address: &SocketAddr, ) -> Result, io::Error> { let buf = &mut [0u8; 2048]; - let (sz, remote_addr) = match socket.recv_from(&mut buf[..]) { + let (sz, remote_addr) = match socket.try_recv_from(&mut buf[..]) { Err(ref err) if err.kind() == io::ErrorKind::WouldBlock || err.kind() == io::ErrorKind::Interrupted => @@ -628,82 +631,36 @@ fn read_dgram( struct ServersRunner { args: Args, - poll: Poll, - hosts: Vec, server: Box, - timeout: Option, - sockets: Vec, - active_sockets: HashSet, - timer: Timer, + timeout: Option>>, + sockets: Vec<(SocketAddr, UdpSocket)>, } impl ServersRunner { pub fn new(args: Args) -> Result { - let server = Self::create_server(&args); - let mut runner = Self { - args, - poll: Poll::new()?, - hosts: Vec::new(), - server, - timeout: None, - sockets: Vec::new(), - active_sockets: HashSet::new(), - timer: Builder::default() - .tick_duration(Duration::from_millis(1)) - .build::(), - }; - runner.init()?; - Ok(runner) - } - - /// Init Poll for all hosts. Create sockets, and a map of the - /// socketaddrs to instances of the HttpServer handling that addr. - fn init(&mut self) -> Result<(), io::Error> { - self.hosts = self.args.listen_addresses(); - if self.hosts.is_empty() { + let hosts = args.listen_addresses(); + if hosts.is_empty() { eprintln!("No valid hosts defined"); return Err(io::Error::new(io::ErrorKind::InvalidInput, "No hosts")); } + let sockets = hosts + .into_iter() + .map(|host| { + let socket = std::net::UdpSocket::bind(host)?; + let local_addr = socket.local_addr()?; + println!("Server waiting for connection on: {local_addr:?}"); + socket.set_nonblocking(true)?; + Ok((host, UdpSocket::from_std(socket)?)) + }) + .collect::>()?; + let server = Self::create_server(&args); - for (i, host) in self.hosts.iter().enumerate() { - let socket = match UdpSocket::bind(host) { - Err(err) => { - eprintln!("Unable to bind UDP socket: {err}"); - return Err(err); - } - Ok(s) => s, - }; - - let local_addr = match socket.local_addr() { - Err(err) => { - eprintln!("Socket local address not bound: {err}"); - return Err(err); - } - Ok(s) => s, - }; - - print!("Server waiting for connection on: {local_addr:?}"); - // On Windows, this is not supported. - #[cfg(not(target_os = "windows"))] - if !socket.only_v6().unwrap_or(true) { - print!(" as well as V4"); - }; - println!(); - - self.poll.register( - &socket, - Token(i), - Ready::readable() | Ready::writable(), - PollOpt::edge(), - )?; - - self.sockets.push(socket); - } - - self.poll - .register(&self.timer, TIMER_TOKEN, Ready::readable(), PollOpt::edge())?; - - Ok(()) + Ok(Self { + args, + server, + timeout: None, + sockets, + }) } fn create_server(args: &Args) -> Box { @@ -741,110 +698,88 @@ impl ServersRunner { /// Tries to find a socket, but then just falls back to sending from the first. fn find_socket(&mut self, addr: SocketAddr) -> &mut UdpSocket { - let (first, rest) = self.sockets.split_first_mut().unwrap(); + let ((_host, first_socket), rest) = self.sockets.split_first_mut().unwrap(); rest.iter_mut() - .find(|s| { - s.local_addr() + .map(|(_host, socket)| socket) + .find(|socket| { + socket + .local_addr() .ok() .map_or(false, |socket_addr| socket_addr == addr) }) - .unwrap_or(first) + .unwrap_or(first_socket) } - fn process(&mut self, inx: usize, dgram: Option<&Datagram>) -> bool { - match self.server.process(dgram, self.args.now()) { - Output::Datagram(dgram) => { - let socket = self.find_socket(dgram.source()); - emit_packet(socket, dgram); - true - } - Output::Callback(new_timeout) => { - if let Some(to) = &self.timeout { - self.timer.cancel_timeout(to); + async fn process(&mut self, mut dgram: Option<&Datagram>) { + loop { + match self.server.process(dgram.take(), self.args.now()) { + Output::Datagram(dgram) => { + let socket = self.find_socket(dgram.source()); + emit_packet(socket, dgram).await; + } + Output::Callback(new_timeout) => { + qinfo!("Setting timeout of {:?}", new_timeout); + self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout))); + break; + } + Output::None => { + qdebug!("Output::None"); + break; } - - qinfo!("Setting timeout of {:?} for socket {}", new_timeout, inx); - self.timeout = Some(self.timer.set_timeout(new_timeout, inx)); - false - } - Output::None => { - qdebug!("Output::None"); - false } } } - fn process_datagrams_and_events( - &mut self, - inx: usize, - read_socket: bool, - ) -> Result<(), io::Error> { - if self.sockets.get_mut(inx).is_some() { - if read_socket { - loop { - let socket = self.sockets.get_mut(inx).unwrap(); - let dgram = read_dgram(socket, &self.hosts[inx])?; + // Wait for any of the sockets to be readable or the timeout to fire. + async fn ready(&mut self) -> Result { + let sockets_ready = select_all( + self.sockets + .iter() + .map(|(_host, socket)| Box::pin(socket.readable())), + ) + .map(|(res, inx, _)| match res { + Ok(()) => Ok(Ready::Socket(inx)), + Err(e) => Err(e), + }); + let timeout_ready = self + .timeout + .as_mut() + .map(Either::Left) + .unwrap_or(Either::Right(futures::future::pending())) + .map(|()| Ok(Ready::Timeout)); + select(sockets_ready, timeout_ready).await.factor_first().0 + } + + async fn run(&mut self) -> Result<(), io::Error> { + loop { + match self.ready().await? { + Ready::Socket(inx) => loop { + let (host, socket) = self.sockets.get_mut(inx).unwrap(); + let dgram = read_dgram(socket, host)?; if dgram.is_none() { break; } - _ = self.process(inx, dgram.as_ref()); + self.process(dgram.as_ref()).await; + }, + Ready::Timeout => { + self.timeout = None; + self.process(None).await; } - } else { - _ = self.process(inx, None); } - self.server.process_events(&self.args, self.args.now()); - if self.process(inx, None) { - self.active_sockets.insert(inx); - } - } - Ok(()) - } - - fn process_active_conns(&mut self) -> Result<(), io::Error> { - let curr_active = mem::take(&mut self.active_sockets); - for inx in curr_active { - self.process_datagrams_and_events(inx, false)?; - } - Ok(()) - } - fn process_timeout(&mut self) -> Result<(), io::Error> { - while let Some(inx) = self.timer.poll() { - qinfo!("Timer expired for {:?}", inx); - self.process_datagrams_and_events(inx, false)?; + self.server.process_events(&self.args, self.args.now()); + self.process(None).await; } - Ok(()) } +} - pub fn run(&mut self) -> Result<(), io::Error> { - let mut events = Events::with_capacity(1024); - loop { - // If there are active servers do not block in poll. - self.poll.poll( - &mut events, - if self.active_sockets.is_empty() { - None - } else { - Some(Duration::from_millis(0)) - }, - )?; - - for event in &events { - if event.token() == TIMER_TOKEN { - self.process_timeout()?; - } else { - if !event.readiness().is_readable() { - continue; - } - self.process_datagrams_and_events(event.token().0, true)?; - } - } - self.process_active_conns()?; - } - } +enum Ready { + Socket(usize), + Timeout, } -fn main() -> Result<(), io::Error> { +#[tokio::main] +async fn main() -> Result<(), io::Error> { const HQ_INTEROP: &str = "hq-interop"; let mut args = Args::from_args(); @@ -896,5 +831,5 @@ fn main() -> Result<(), io::Error> { } let mut servers_runner = ServersRunner::new(args)?; - servers_runner.run() + servers_runner.run().await } From 9493b22861bc87e169b6fca724b1216145c24146 Mon Sep 17 00:00:00 2001 From: Gabriel Grubba <70247653+Grubba27@users.noreply.github.com> Date: Fri, 2 Feb 2024 07:39:40 +0100 Subject: [PATCH 23/41] chore: Removed InternalErrors added in #1085 (#1323) * removed internal errors * adjusted formating * Fix issues * Remove numeric error codes from `InternalError`. * cargo fmt & clippy --------- Signed-off-by: Lars Eggert Co-authored-by: Lars Eggert --- neqo-qpack/src/encoder.rs | 4 +- neqo-qpack/src/lib.rs | 2 +- neqo-server/src/main.rs | 3 +- neqo-transport/src/addr_valid.rs | 6 +- neqo-transport/src/cid.rs | 4 -- neqo-transport/src/connection/mod.rs | 9 +-- neqo-transport/src/connection/state.rs | 3 - neqo-transport/src/crypto.rs | 13 ++--- neqo-transport/src/lib.rs | 2 +- neqo-transport/src/packet/mod.rs | 2 +- neqo-transport/src/packet/retry.rs | 2 +- neqo-transport/src/path.rs | 25 +++----- neqo-transport/src/tracking.rs | 79 +++++++++++--------------- 13 files changed, 56 insertions(+), 98 deletions(-) diff --git a/neqo-qpack/src/encoder.rs b/neqo-qpack/src/encoder.rs index f53cf51d85..c7921ee2c0 100644 --- a/neqo-qpack/src/encoder.rs +++ b/neqo-qpack/src/encoder.rs @@ -312,7 +312,7 @@ impl QPackEncoder { false, "can_evict_to should have checked and make sure this operation is possible" ); - return Err(Error::InternalError(1)); + return Err(Error::InternalError); } self.max_entries = cap / 32; self.next_capacity = None; @@ -530,7 +530,7 @@ fn map_stream_send_atomic_error(err: &TransportError) -> Error { } _ => { debug_assert!(false, "Unexpected error"); - Error::InternalError(2) + Error::InternalError } } } diff --git a/neqo-qpack/src/lib.rs b/neqo-qpack/src/lib.rs index 3f9c7b81f7..1581712017 100644 --- a/neqo-qpack/src/lib.rs +++ b/neqo-qpack/src/lib.rs @@ -45,7 +45,7 @@ pub enum Error { EncoderStream, DecoderStream, ClosedCriticalStream, - InternalError(u16), + InternalError, // These are internal errors, they will be transformed into one of the above. NeedMoreData, /* Return when an input stream does not have more data that a decoder diff --git a/neqo-server/src/main.rs b/neqo-server/src/main.rs index 0000ea4f80..9b924504cc 100644 --- a/neqo-server/src/main.rs +++ b/neqo-server/src/main.rs @@ -29,8 +29,6 @@ use futures::{ future::{select, select_all, Either}, FutureExt, }; -use tokio::{net::UdpSocket, time::Sleep}; - use neqo_common::{hex, qdebug, qinfo, qwarn, Datagram, Header, IpTos}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, @@ -45,6 +43,7 @@ use neqo_transport::{ Version, }; use structopt::StructOpt; +use tokio::{net::UdpSocket, time::Sleep}; use crate::old_https::Http09Server; diff --git a/neqo-transport/src/addr_valid.rs b/neqo-transport/src/addr_valid.rs index 9105c89a54..b5ed2d07d1 100644 --- a/neqo-transport/src/addr_valid.rs +++ b/neqo-transport/src/addr_valid.rs @@ -20,8 +20,7 @@ use neqo_crypto::{ use smallvec::SmallVec; use crate::{ - cid::ConnectionId, packet::PacketBuilder, recovery::RecoveryToken, stats::FrameStats, Error, - Res, + cid::ConnectionId, packet::PacketBuilder, recovery::RecoveryToken, stats::FrameStats, Res, }; /// A prefix we add to Retry tokens to distinguish them from NEW_TOKEN tokens. @@ -434,9 +433,6 @@ impl NewTokenSender { builder.encode_varint(crate::frame::FRAME_TYPE_NEW_TOKEN); builder.encode_vvec(&t.token); - if builder.len() > builder.limit() { - return Err(Error::InternalError(7)); - } tokens.push(RecoveryToken::NewToken(t.seqno)); stats.new_token += 1; diff --git a/neqo-transport/src/cid.rs b/neqo-transport/src/cid.rs index 7096ae1874..be202daf25 100644 --- a/neqo-transport/src/cid.rs +++ b/neqo-transport/src/cid.rs @@ -532,10 +532,6 @@ impl ConnectionIdManager { builder.encode_varint(0u64); builder.encode_vec(1, &entry.cid); builder.encode(&entry.srt); - if builder.len() > builder.limit() { - return Err(Error::InternalError(8)); - } - stats.new_connection_id += 1; Ok(true) } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 8aaf987db9..e42eeabde6 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -1935,9 +1935,6 @@ impl Connection { .as_ref() .unwrap_or(&close) .write_frame(&mut builder); - if builder.len() > builder.limit() { - return Err(Error::InternalError(10)); - } encoder = builder.build(tx)?; } @@ -1982,7 +1979,7 @@ impl Connection { if builder.is_full() { return Ok(()); } - self.paths.write_frames(builder, tokens, frame_stats)?; + self.paths.write_frames(builder, tokens, frame_stats); if builder.is_full() { return Ok(()); } @@ -2107,7 +2104,7 @@ impl Connection { builder, &mut tokens, stats, - )?; + ); } let ack_end = builder.len(); @@ -2122,7 +2119,7 @@ impl Connection { &mut self.stats.borrow_mut().frame_tx, full_mtu, now, - )? { + ) { builder.enable_padding(true); } } diff --git a/neqo-transport/src/connection/state.rs b/neqo-transport/src/connection/state.rs index f739c147ab..9afb42174f 100644 --- a/neqo-transport/src/connection/state.rs +++ b/neqo-transport/src/connection/state.rs @@ -218,9 +218,6 @@ impl StateSignaling { if matches!(self, Self::HandshakeDone) && builder.remaining() >= 1 { *self = Self::Idle; builder.encode_varint(FRAME_TYPE_HANDSHAKE_DONE); - if builder.len() > builder.limit() { - return Err(Error::InternalError(14)); - } Ok(Some(RecoveryToken::HandshakeDone)) } else { Ok(None) diff --git a/neqo-transport/src/crypto.rs b/neqo-transport/src/crypto.rs index 4e152db0f2..f6cc7c0e2f 100644 --- a/neqo-transport/src/crypto.rs +++ b/neqo-transport/src/crypto.rs @@ -225,7 +225,7 @@ impl Crypto { self.tls.read_secret(TLS_EPOCH_ZERO_RTT), ), }; - let secret = secret.ok_or(Error::InternalError(1))?; + let secret = secret.ok_or(Error::InternalError)?; self.states .set_0rtt_keys(version, dir, &secret, cipher.unwrap()); Ok(true) @@ -259,12 +259,12 @@ impl Crypto { let read_secret = self .tls .read_secret(TLS_EPOCH_HANDSHAKE) - .ok_or(Error::InternalError(2))?; + .ok_or(Error::InternalError)?; let cipher = match self.tls.info() { None => self.tls.preinfo()?.cipher_suite(), Some(info) => Some(info.cipher_suite()), } - .ok_or(Error::InternalError(3))?; + .ok_or(Error::InternalError)?; self.states .set_handshake_keys(self.version, &write_secret, &read_secret, cipher); qdebug!([self], "Handshake keys installed"); @@ -288,7 +288,7 @@ impl Crypto { let read_secret = self .tls .read_secret(TLS_EPOCH_APPLICATION_DATA) - .ok_or(Error::InternalError(4))?; + .ok_or(Error::InternalError)?; self.states .set_application_read_key(version, read_secret, expire_0rtt)?; qdebug!([self], "application read keys installed"); @@ -662,7 +662,7 @@ impl CryptoDxState { // The numbers in `Self::limit` assume a maximum packet size of 2^11. if body.len() > 2048 { debug_assert!(false); - return Err(Error::InternalError(12)); + return Err(Error::InternalError); } self.invoked()?; @@ -1550,9 +1550,6 @@ impl CryptoStreams { builder.encode_varint(crate::frame::FRAME_TYPE_CRYPTO); builder.encode_varint(offset); builder.encode_vvec(&data[..length]); - if builder.len() > builder.limit() { - return Err(Error::InternalError(15)); - } cs.tx.mark_as_sent(offset, length); diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index d10ea7e9e6..ecea2453f1 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -68,7 +68,7 @@ pub enum Error { NoError, // Each time tihe error is return a different parameter is supply. // This will be use to distinguish each occurance of this error. - InternalError(u16), + InternalError, ConnectionRefused, FlowControlError, StreamLimitError, diff --git a/neqo-transport/src/packet/mod.rs b/neqo-transport/src/packet/mod.rs index 0968bb9ae2..ccfd212d5f 100644 --- a/neqo-transport/src/packet/mod.rs +++ b/neqo-transport/src/packet/mod.rs @@ -356,7 +356,7 @@ impl PacketBuilder { if self.len() > self.limit { qwarn!("Packet contents are more than the limit"); debug_assert!(false); - return Err(Error::InternalError(5)); + return Err(Error::InternalError); } self.pad_for_crypto(crypto); diff --git a/neqo-transport/src/packet/retry.rs b/neqo-transport/src/packet/retry.rs index a1333a0150..004e9de6e7 100644 --- a/neqo-transport/src/packet/retry.rs +++ b/neqo-transport/src/packet/retry.rs @@ -45,7 +45,7 @@ where .try_with(|aead| f(&aead.borrow())) .map_err(|e| { qerror!("Unable to access Retry AEAD: {:?}", e); - Error::InternalError(6) + Error::InternalError })? } diff --git a/neqo-transport/src/path.rs b/neqo-transport/src/path.rs index 06cc8a6a61..d6920c8d94 100644 --- a/neqo-transport/src/path.rs +++ b/neqo-transport/src/path.rs @@ -31,7 +31,7 @@ use crate::{ sender::PacketSender, stats::FrameStats, tracking::{PacketNumberSpace, SentPacket}, - Error, Res, Stats, + Stats, }; /// This is the MTU that we assume when using IPv6. @@ -415,7 +415,7 @@ impl Paths { builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { + ) { while let Some(seqno) = self.to_retire.pop() { if builder.remaining() < 1 + Encoder::varint_len(seqno) { self.to_retire.push(seqno); @@ -423,9 +423,6 @@ impl Paths { } builder.encode_varint(FRAME_TYPE_RETIRE_CONNECTION_ID); builder.encode_varint(seqno); - if builder.len() > builder.limit() { - return Err(Error::InternalError(20)); - } tokens.push(RecoveryToken::RetireConnectionId(seqno)); stats.retire_connection_id += 1; } @@ -434,8 +431,6 @@ impl Paths { self.primary() .borrow_mut() .write_cc_frames(builder, tokens, stats); - - Ok(()) } pub fn lost_retire_cid(&mut self, lost: u64) { @@ -774,9 +769,9 @@ impl Path { stats: &mut FrameStats, mtu: bool, // Whether the packet we're writing into will be a full MTU. now: Instant, - ) -> Res { + ) -> bool { if builder.remaining() < 9 { - return Ok(false); + return false; } // Send PATH_RESPONSE. @@ -784,9 +779,6 @@ impl Path { qtrace!([self], "Responding to path challenge {}", hex(challenge)); builder.encode_varint(FRAME_TYPE_PATH_RESPONSE); builder.encode(&challenge[..]); - if builder.len() > builder.limit() { - return Err(Error::InternalError(21)); - } // These frames are not retransmitted in the usual fashion. // There is no token, therefore we need to count `all` specially. @@ -794,7 +786,7 @@ impl Path { stats.all += 1; if builder.remaining() < 9 { - return Ok(true); + return true; } true } else { @@ -807,9 +799,6 @@ impl Path { let data = <[u8; 8]>::try_from(&random(8)[..]).unwrap(); builder.encode_varint(FRAME_TYPE_PATH_CHALLENGE); builder.encode(&data); - if builder.len() > builder.limit() { - return Err(Error::InternalError(22)); - } // As above, no recovery token. stats.path_challenge += 1; @@ -821,9 +810,9 @@ impl Path { mtu, sent: now, }; - Ok(true) + true } else { - Ok(resp_sent) + resp_sent } } diff --git a/neqo-transport/src/tracking.rs b/neqo-transport/src/tracking.rs index 62e7398ede..64d00257d3 100644 --- a/neqo-transport/src/tracking.rs +++ b/neqo-transport/src/tracking.rs @@ -24,7 +24,6 @@ use crate::{ packet::{PacketBuilder, PacketNumber, PacketType}, recovery::RecoveryToken, stats::FrameStats, - Error, Res, }; // TODO(mt) look at enabling EnumMap for this: https://stackoverflow.com/a/44905797/1375574 @@ -724,14 +723,10 @@ impl AckTracker { builder: &mut PacketBuilder, tokens: &mut Vec, stats: &mut FrameStats, - ) -> Res<()> { + ) { if let Some(space) = self.get_mut(pn_space) { space.write_frame(now, rtt, builder, tokens, stats); - if builder.len() > builder.limit() { - return Err(Error::InternalError(24)); - } } - Ok(()) } } @@ -1060,16 +1055,14 @@ mod tests { let mut tokens = Vec::new(); let mut stats = FrameStats::default(); - tracker - .write_frame( - PacketNumberSpace::Initial, - *NOW, - RTT, - &mut builder, - &mut tokens, - &mut stats, - ) - .unwrap(); + tracker.write_frame( + PacketNumberSpace::Initial, + *NOW, + RTT, + &mut builder, + &mut tokens, + &mut stats, + ); assert_eq!(stats.ack, 1); // Mark another packet as received so we have cause to send another ACK in that space. @@ -1088,16 +1081,14 @@ mod tests { assert!(tracker .ack_time(NOW.checked_sub(Duration::from_millis(1)).unwrap()) .is_none()); - tracker - .write_frame( - PacketNumberSpace::Initial, - *NOW, - RTT, - &mut builder, - &mut tokens, - &mut stats, - ) - .unwrap(); + tracker.write_frame( + PacketNumberSpace::Initial, + *NOW, + RTT, + &mut builder, + &mut tokens, + &mut stats, + ); assert_eq!(stats.ack, 1); if let RecoveryToken::Ack(tok) = &tokens[0] { tracker.acked(tok); // Should be a noop. @@ -1121,16 +1112,14 @@ mod tests { builder.set_limit(10); let mut stats = FrameStats::default(); - tracker - .write_frame( - PacketNumberSpace::Initial, - *NOW, - RTT, - &mut builder, - &mut Vec::new(), - &mut stats, - ) - .unwrap(); + tracker.write_frame( + PacketNumberSpace::Initial, + *NOW, + RTT, + &mut builder, + &mut Vec::new(), + &mut stats, + ); assert_eq!(stats.ack, 0); assert_eq!(builder.len(), 1); // Only the short packet header has been added. } @@ -1154,16 +1143,14 @@ mod tests { builder.set_limit(32); let mut stats = FrameStats::default(); - tracker - .write_frame( - PacketNumberSpace::Initial, - *NOW, - RTT, - &mut builder, - &mut Vec::new(), - &mut stats, - ) - .unwrap(); + tracker.write_frame( + PacketNumberSpace::Initial, + *NOW, + RTT, + &mut builder, + &mut Vec::new(), + &mut stats, + ); assert_eq!(stats.ack, 1); let mut dec = builder.as_decoder(); From 43e3a3f1073e4b1c8699be83011bd63f083af1cf Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Fri, 2 Feb 2024 19:09:56 +1100 Subject: [PATCH 24/41] ci: Switch to rust nightly for CI canary (#1613) * Switch to rust nightly for CI And fix the pre-commit hook to check using nightly. This is because we now rely on rust nightly for some rustfmt configuration options. This will make our canary builds a little more forward-looking which comes with a little less stability, but I think that's better than adding a specific build just for formatting. * fix: Add a configuration for "Semantic PR" Since this PR hits the "missing config" issue that happens when one isn't super-diligent with semantic commits )which we shouldn't be). * fix: Add quotes * chore: cargo fmt It seem like the CI check works now :-) * fix: Try and avoid `Error: file `false` does not exist` * Skip the quotes * See if Windows likes a non-existent path better * Try an empty temp file * Another go * While I'm here, make the various builds less verbose --------- Co-authored-by: Lars Eggert --- .github/semantic.yml | 3 +++ .github/workflows/check.yml | 16 ++++++++++------ hooks/pre-commit | 12 ++++++++++-- 3 files changed, 23 insertions(+), 8 deletions(-) create mode 100644 .github/semantic.yml diff --git a/.github/semantic.yml b/.github/semantic.yml new file mode 100644 index 0000000000..be3439f6b9 --- /dev/null +++ b/.github/semantic.yml @@ -0,0 +1,3 @@ +enabled: true +titleOnly: true +targetUrl: "https://www.conventionalcommits.org/en/v1.0.0/#summary" diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index ec541066bb..0ac81a64d5 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -17,7 +17,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, macos-13, windows-latest] - rust-toolchain: [1.70.0, stable, beta] + rust-toolchain: [1.70.0, stable, nightly] runs-on: ${{ matrix.os }} defaults: run: @@ -109,7 +109,7 @@ jobs: - name: Build run: | - cargo +${{ matrix.rust-toolchain }} build -v --all-targets + cargo +${{ matrix.rust-toolchain }} build --all-targets echo "LD_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" echo "DYLD_FALLBACK_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" echo "${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_PATH" @@ -134,16 +134,20 @@ jobs: RUST_LOG: warn - name: Check formatting - run: cargo +${{ matrix.rust-toolchain }} fmt --all -- --check + run: | + if [ "${{ matrix.rust-toolchain }}" != "nightly" ]; then + export CONFIG_PATH="--config-path=$(mktemp)" + fi + cargo +${{ matrix.rust-toolchain }} fmt --all -- --check $CONFIG_PATH if: success() || failure() - name: Clippy - run: cargo +${{ matrix.rust-toolchain }} clippy -v --tests -- -D warnings + run: cargo +${{ matrix.rust-toolchain }} clippy --tests -- -D warnings if: success() || failure() - continue-on-error: ${{ matrix.rust-toolchain == 'beta' }} + continue-on-error: ${{ matrix.rust-toolchain == 'nightly' }} - name: Check rustdoc links - run: cargo +${{ matrix.rust-toolchain }} doc --verbose --workspace --no-deps --document-private-items + run: cargo +${{ matrix.rust-toolchain }} doc --workspace --no-deps --document-private-items env: RUSTDOCFLAGS: "--deny rustdoc::broken_intra_doc_links --deny warnings" if: success() || failure() diff --git a/hooks/pre-commit b/hooks/pre-commit index 2a6022b3d4..377a70c89d 100755 --- a/hooks/pre-commit +++ b/hooks/pre-commit @@ -32,12 +32,20 @@ if [[ ./neqo-crypto/bindings/bindings.toml -nt ./neqo-crypto/src/lib.rs ]]; then exit 1 fi +toolchain=nightly +fmtconfig="$root/.rustfmt.toml" +if cargo "+$toolchain" version >/dev/null; then + echo "warning: A rust $toolchain toolchain is recommended to check formatting." + toolchain=stable + fmtconfig=/dev/null +fi + # Check formatting. trap 'git stash pop -q' EXIT git stash push -k -u -q -m "pre-commit stash" -if ! errors=($(cargo fmt -- --check -l)); then +if ! errors=($(cargo "+$toolchain" fmt -- --check -l --config-path="$fmtconfig")); then echo "Formatting errors found." - echo "Run \`cargo fmt\` to fix the following files:" + echo "Run \`cargo fmt +$toolchain\` to fix the following files:" for err in "${errors[@]}"; do echo " $err" done From 72e670c9adbe85fb6c9582eb7be44bf090a48573 Mon Sep 17 00:00:00 2001 From: jesup Date: Fri, 2 Feb 2024 03:38:09 -0500 Subject: [PATCH 25/41] test: Benchmarks for RxStreamOrderer (#1609) * Benchmarks for RxStreamOrderer * ci: Run clippy on benches * Address various code review comments --------- Co-authored-by: Lars Eggert --- .github/workflows/check.yml | 2 +- neqo-transport/Cargo.toml | 8 +++++++- neqo-transport/benches/rx_stream_orderer.rs | 20 ++++++++++++++++++++ neqo-transport/src/lib.rs | 5 ++++- 4 files changed, 32 insertions(+), 3 deletions(-) create mode 100644 neqo-transport/benches/rx_stream_orderer.rs diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 0ac81a64d5..cf87472b87 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -142,7 +142,7 @@ jobs: if: success() || failure() - name: Clippy - run: cargo +${{ matrix.rust-toolchain }} clippy --tests -- -D warnings + run: cargo +${{ matrix.rust-toolchain }} clippy --tests --benches -- -D warnings if: success() || failure() continue-on-error: ${{ matrix.rust-toolchain == 'nightly' }} diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index ae33822018..e119f074c2 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -9,13 +9,14 @@ license = "MIT OR Apache-2.0" [dependencies] indexmap = "1.9.3" lazy_static = "1.4" -log = {version = "0.4.17", default-features = false} +log = { version = "0.4.17", default-features = false } neqo-common = { path = "../neqo-common" } neqo-crypto = { path = "../neqo-crypto" } qlog = "0.11.0" smallvec = "1.11.1" [dev-dependencies] +criterion = "0.5.1" enum-map = "2.7" test-fixture = { path = "../test-fixture" } @@ -23,3 +24,8 @@ test-fixture = { path = "../test-fixture" } bench = [] deny-warnings = [] fuzzing = ["neqo-crypto/fuzzing"] + +[[bench]] +name = "rx_stream_orderer" +harness = false +required-features = ["bench"] diff --git a/neqo-transport/benches/rx_stream_orderer.rs b/neqo-transport/benches/rx_stream_orderer.rs new file mode 100644 index 0000000000..03b401ba06 --- /dev/null +++ b/neqo-transport/benches/rx_stream_orderer.rs @@ -0,0 +1,20 @@ +use criterion::{criterion_group, criterion_main, Criterion}; +use neqo_transport::recv_stream::RxStreamOrderer; + +fn rx_stream_orderer() { + let mut rx = RxStreamOrderer::new(); + let data: &[u8] = &[0; 1337]; + + for i in 0..100000 { + rx.inbound_frame(i * 1337, data); + } +} + +fn criterion_benchmark(c: &mut Criterion) { + c.bench_function("RxStreamOrderer::inbound_frame()", |b| { + b.iter(rx_stream_orderer) + }); +} + +criterion_group!(benches, criterion_benchmark); +criterion_main!(benches); diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index ecea2453f1..de6898f3f8 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -26,6 +26,9 @@ mod path; mod qlog; mod quic_datagrams; mod recovery; +#[cfg(feature = "bench")] +pub mod recv_stream; +#[cfg(not(feature = "bench"))] mod recv_stream; mod rtt; mod send_stream; @@ -181,7 +184,7 @@ impl From for Error { } impl ::std::error::Error for Error { - fn source(&self) -> Option<&(dyn ::std::error::Error + 'static)> { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { Self::CryptoError(e) => Some(e), _ => None, From f1792ed75ef564ed419acea67b7139ad952f0a5a Mon Sep 17 00:00:00 2001 From: jesup Date: Fri, 2 Feb 2024 03:57:01 -0500 Subject: [PATCH 26/41] perf: Optimize packet reception for the common case of adding to the end (#1587) * Optimize packet reception for the common case of adding to the end * Review changes * fix for exact overlap --------- Co-authored-by: Lars Eggert --- neqo-transport/src/recv_stream.rs | 68 +++++++++++++++++++++---------- 1 file changed, 46 insertions(+), 22 deletions(-) diff --git a/neqo-transport/src/recv_stream.rs b/neqo-transport/src/recv_stream.rs index 0b2863c425..06ca59685d 100644 --- a/neqo-transport/src/recv_stream.rs +++ b/neqo-transport/src/recv_stream.rs @@ -200,26 +200,49 @@ impl RxStreamOrderer { false }; - // Now handle possible overlap with next entries - let mut to_remove = SmallVec::<[_; 8]>::new(); let mut to_add = new_data; - - for (&next_start, next_data) in self.data_ranges.range_mut(new_start..) { - let next_end = next_start + u64::try_from(next_data.len()).unwrap(); - let overlap = new_end.saturating_sub(next_start); - if overlap == 0 { - break; - } else if next_end >= new_end { - qtrace!( - "New frame {}-{} overlaps with next frame by {}, truncating", - new_start, - new_end, - overlap - ); - let truncate_to = new_data.len() - usize::try_from(overlap).unwrap(); - to_add = &new_data[..truncate_to]; - break; - } else { + if self + .data_ranges + .last_entry() + .map_or(false, |e| *e.key() >= new_start) + { + // Is this at the end (common case)? If so, nothing to do in this block + // Common case: + // PPPPPP -> PPPPPP + // NNNNNNN NNNNNNN + // or + // PPPPPP -> PPPPPP + // NNNNNNN NNNNNNN + // + // Not the common case, handle possible overlap with next entries + // PPPPPP AAA -> PPPPPP + // NNNNNNN NNNNNNN + // or + // PPPPPP AAAA -> PPPPPP AAAA + // NNNNNNN NNNNN + // or (this is where to_remove is used) + // PPPPPP AA -> PPPPPP + // NNNNNNN NNNNNNN + + let mut to_remove = SmallVec::<[_; 8]>::new(); + + for (&next_start, next_data) in self.data_ranges.range_mut(new_start..) { + let next_end = next_start + u64::try_from(next_data.len()).unwrap(); + let overlap = new_end.saturating_sub(next_start); + if overlap == 0 { + // Fills in the hole, exactly (probably common) + break; + } else if next_end >= new_end { + qtrace!( + "New frame {}-{} overlaps with next frame by {}, truncating", + new_start, + new_end, + overlap + ); + let truncate_to = new_data.len() - usize::try_from(overlap).unwrap(); + to_add = &new_data[..truncate_to]; + break; + } qtrace!( "New frame {}-{} spans entire next frame {}-{}, replacing", new_start, @@ -228,11 +251,12 @@ impl RxStreamOrderer { next_end ); to_remove.push(next_start); + // Continue, since we may have more overlaps } - } - for start in to_remove { - self.data_ranges.remove(&start); + for start in to_remove { + self.data_ranges.remove(&start); + } } if !to_add.is_empty() { From d62e5f45cff954bc0ced546604b3ccb2d946bf28 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Fri, 2 Feb 2024 11:07:33 +0200 Subject: [PATCH 27/41] ci: Use codecov-action@v4 to avoid GitHub warning about node@16 (#1614) --- .github/workflows/check.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index cf87472b87..7951477355 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -153,7 +153,7 @@ jobs: if: success() || failure() - name: Upload coverage reports to Codecov - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 with: file: lcov.info fail_ci_if_error: false From 141b5ea0ba1f7c9067afde2dc747b8f8688039ef Mon Sep 17 00:00:00 2001 From: Max Inden Date: Fri, 2 Feb 2024 12:12:25 +0100 Subject: [PATCH 28/41] fix(transport/dump): check module not max log level and move to conn (#1601) This commit contains two changes: The first change. To avoid expensive packet decoding and string encoding, the `neqo-transport` `dump_packet` function first checks the current log level: ``` rust if ::log::Level::Debug > ::log::max_level() { return; } ``` This is problematic when e.g. setting `RUST_LOG=info,neqo_crypto=debug`. While `::log::max_level` will return `::log::Level::Debug`, given the `neqo_crypto=debug`, the log level of the `neqo_transport` crate and `dump` module will be `info`. Thus the packet will be de-/encoded, but never logged, given that the `qdebug!` call calls `log::debug!`, which will check the module log level and not use the maximum log level. Instead, with this commit, `dump_packet` checks the module log level. ``` rust if !log::log_enabled!(log::Level::Debug) { return; } ``` The second change. `dump_packet` is in `neqo_transport::dump`, but only called in `neqo_transport::connection`. `RUST_LOG=info,neqo_transport::connection=debug` will thus not dump the packet as `dump_packet` checks its own log level, not the log level of its call side. To remove this small footgun, move `dump_packet` to `neqo_transport::connection::dump` and thus enable logging on e.g. `RUST_LOG=info,neqo_transport::connection=debug`. Note that the `dump` module was never exposed beyond `neqo_transport` and thus this is not a breaking change to other crates. An alternative, arguably more complex, approach would be to write `dump_packet` as a proc macro. Co-authored-by: Lars Eggert --- neqo-transport/src/{ => connection}/dump.rs | 2 +- neqo-transport/src/connection/mod.rs | 5 ++--- neqo-transport/src/lib.rs | 1 - 3 files changed, 3 insertions(+), 5 deletions(-) rename neqo-transport/src/{ => connection}/dump.rs (96%) diff --git a/neqo-transport/src/dump.rs b/neqo-transport/src/connection/dump.rs similarity index 96% rename from neqo-transport/src/dump.rs rename to neqo-transport/src/connection/dump.rs index 5d8a72f300..77d51c605c 100644 --- a/neqo-transport/src/dump.rs +++ b/neqo-transport/src/connection/dump.rs @@ -27,7 +27,7 @@ pub fn dump_packet( pn: PacketNumber, payload: &[u8], ) { - if ::log::Level::Debug > ::log::max_level() { + if !log::log_enabled!(log::Level::Debug) { return; } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index e42eeabde6..2de388418a 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -36,7 +36,6 @@ use crate::{ ConnectionIdRef, ConnectionIdStore, LOCAL_ACTIVE_CID_LIMIT, }, crypto::{Crypto, CryptoDxState, CryptoSpace}, - dump::*, events::{ConnectionEvent, ConnectionEvents, OutgoingDatagramOutcome}, frame::{ CloseError, Frame, FrameType, FRAME_TYPE_CONNECTION_CLOSE_APPLICATION, @@ -60,14 +59,14 @@ use crate::{ version::{Version, WireVersion}, AppError, ConnectionError, Error, Res, StreamId, }; - +mod dump; mod idle; pub mod params; mod saved; mod state; #[cfg(test)] pub mod test_internal; - +use dump::dump_packet; use idle::IdleTimeout; pub use params::ConnectionParameters; use params::PreferredAddressConfig; diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index de6898f3f8..ecf7ee2f73 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -16,7 +16,6 @@ mod cc; mod cid; mod connection; mod crypto; -mod dump; mod events; mod fc; mod frame; From 0a68ea25a21284e58ac235bb95df7fca250e5e66 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Fri, 2 Feb 2024 15:34:21 +0100 Subject: [PATCH 29/41] Use temp rev for qlog (#1617) --- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- test-fixture/Cargo.toml | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index fba2110d6d..3e5be383f2 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -15,7 +15,7 @@ neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.11.0" +qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } structopt = "0.3" url = "~2.5.0" diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index b7136aaa60..25b74609d3 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -12,7 +12,7 @@ enum-map = "2.7" env_logger = { version = "0.10", default-features = false } lazy_static = "1.4" log = { version = "0.4", default-features = false } -qlog = "0.11.0" +qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } time = {version = "0.3.23", features = ["formatting"]} [dev-dependencies] diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 9956cef05c..064d884279 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -14,7 +14,7 @@ neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.11.0" +qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } sfv = "0.9.3" smallvec = "1.11.1" url = "2.5" diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 31a1bf28e6..8ac57b1eea 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -12,7 +12,7 @@ log = {version = "~0.4.17", default-features = false} neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.11.0" +qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } static_assertions = "~1.1.0" [dev-dependencies] diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 1d6b5df86b..0129fd04e4 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -14,7 +14,7 @@ neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = "0.11.0" +qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } regex = "1.9" structopt = "0.3" tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index e119f074c2..98df1017cf 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -12,7 +12,7 @@ lazy_static = "1.4" log = { version = "0.4.17", default-features = false } neqo-common = { path = "../neqo-common" } neqo-crypto = { path = "../neqo-crypto" } -qlog = "0.11.0" +qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } smallvec = "1.11.1" [dev-dependencies] diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index 2c163fbb07..751046471e 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -14,7 +14,7 @@ neqo-crypto = { path = "../neqo-crypto" } neqo-http3 = { path = "../neqo-http3" } neqo-qpack = { path = "../neqo-qpack" } neqo-transport = { path = "../neqo-transport" } -qlog = "0.11.0" +qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } [features] deny-warnings = [] From 9489511f7c82786f55bc9c713cddbff825507ed7 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Fri, 2 Feb 2024 15:52:06 +0100 Subject: [PATCH 30/41] v0.7.0 (#1618) --- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-crypto/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-interop/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- test-fixture/Cargo.toml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 3e5be383f2..5419e8a5f8 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-client" -version = "0.6.9" +version = "0.7.0" authors = ["Martin Thomson ", "Dragana Damjanovic ", "Andy Grover "] diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index 25b74609d3..de754531be 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-common" -version = "0.6.9" +version = "0.7.0" authors = ["Bobby Holley "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-crypto/Cargo.toml b/neqo-crypto/Cargo.toml index c5909ac5e5..492e501e58 100644 --- a/neqo-crypto/Cargo.toml +++ b/neqo-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-crypto" -version = "0.6.9" +version = "0.7.0" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index 064d884279..f05cae5f03 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-http3" -version = "0.6.9" +version = "0.7.0" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-interop/Cargo.toml b/neqo-interop/Cargo.toml index 8b298167f2..ce5bd9af8b 100644 --- a/neqo-interop/Cargo.toml +++ b/neqo-interop/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-interop" -version = "0.6.9" +version = "0.7.0" authors = ["EKR "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 8ac57b1eea..96531550bd 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-qpack" -version = "0.6.9" +version = "0.7.0" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 0129fd04e4..09a7d4aa3c 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-server" -version = "0.6.9" +version = "0.7.0" authors = ["Dragana Damjanovic "] edition = "2018" rust-version = "1.70.0" diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 98df1017cf..b1d86fc789 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "neqo-transport" -version = "0.6.9" +version = "0.7.0" authors = ["EKR ", "Andy Grover "] edition = "2018" rust-version = "1.70.0" diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index 751046471e..f0feace31d 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "test-fixture" -version = "0.6.9" +version = "0.7.0" authors = ["Martin Thomson "] edition = "2018" rust-version = "1.70.0" From 0b4b9382a9da349e761bf70017f39508c2ae9a04 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Mon, 5 Feb 2024 08:28:12 +0100 Subject: [PATCH 31/41] Add tc commands for upload testing on Linux. (#1596) * Add tc commands * fix nits * address comments * use eval --------- Co-authored-by: Lars Eggert --- test/upload_test.sh | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/test/upload_test.sh b/test/upload_test.sh index d75bcda1bd..40c3aaaeb9 100755 --- a/test/upload_test.sh +++ b/test/upload_test.sh @@ -5,9 +5,14 @@ set -e server_address=127.0.0.1 server_port=4433 upload_size=8388608 -client="cargo run --release --bin neqo-client -- http://$server_address:$server_port/ --test upload --upload-size $upload_size" +cc=cubic +client="cargo run --release --bin neqo-client -- http://$server_address:$server_port/ --test upload --upload-size $upload_size --cc $cc" server="cargo run --release --bin neqo-server -- --db ../test-fixture/db $server_address:$server_port" server_pid=0 +pacing=true +if [ "$pacing" = true ]; then + client="$client --pacing" +fi # Define two indexed arrays to store network conditions network_conditions=("cable" "3g_slow" "DSL" "LTE" "fast wifi") @@ -17,10 +22,6 @@ plrs=("0.0001" "0.0005" "0.001" "0.002" "0.005") runs=1 -echo -n "Enter root password: " -read -s root_password -echo - setup_network_conditions() { bw="$1" delay_ms="$2" @@ -38,7 +39,8 @@ setup_network_conditions() { # Convert BDP to kilobytes bdp_kb=$(echo "scale=2; $bdp_bits / 8 / 1024" | bc) - bdp_kb_rounded_up=$(printf "%.0f" "$bdp_kb") + bdp_kb_rounded_up=$(LC_NUMERIC=C printf "%.0f" "$bdp_kb") + # if we are on MacOS X, configure the firewall to add delay and queue traffic if [ -x /usr/sbin/dnctl ]; then @@ -50,13 +52,20 @@ setup_network_conditions() { "sudo pfctl -e || true" ) else - # TODO implement commands for linux - return 0 + bw_in_bits_per_sec="${bw%/s}" + bdp_bytes=$(echo "scale=2; $bdp_bits / 8" | bc) + bdp_bytes_rounded_up=$(LC_NUMERIC=C printf "%.0f" "$bdp_bytes") + plr_p=$(echo "scale=4; $plr * 100" | bc) + plr_p=$(LC_NUMERIC=C printf "%.2f" "$plr_p") + set_condition_commands=( + "sudo tc qdisc add dev lo root handle 1: tbf rate $bw_in_bits_per_sec burst $bdp_bytes_rounded_up limit 30000" + "sudo tc qdisc add dev lo parent 1:1 handle 10: netem delay ${delay_ms}ms loss ${plr_p}%" + ) fi for command in "${set_condition_commands[@]}"; do - echo $command - echo $root_password | sudo -S bash -c "$command" + echo "$command" + eval "$command" done } @@ -67,12 +76,13 @@ stop_network_conditions() { "sudo dnctl -q flush" ) else - # TODO implement commands for linux - return 0 + stop_condition_commands=( + "sudo tc qdisc del dev lo root" + ) fi - for command in "${set_condition_commands[@]}"; do - echo $root_password | sudo -S bash -c "$command" + for command in "${stop_condition_commands[@]}"; do + eval "$command" done } @@ -80,7 +90,7 @@ stop_server() { echo "stop server" server_pid=$(pgrep -f "neqo-server") # Kill the server - kill $server_pid + kill "$server_pid" } start_test() { From cb2d6230c250a9716198458c254331965b640efe Mon Sep 17 00:00:00 2001 From: jesup Date: Mon, 5 Feb 2024 02:44:13 -0500 Subject: [PATCH 32/41] perf: Add a cache for first_unmarked_range() (#1582) * Cache results of RangeTracker::first_unmarked_range() Coalesce additions to the tail of a RangeTracker tree to avoid unnecessary tree growth * clean up * more cleanup * review fixes * allow overlap * final_cleanup * Review responses --------- Co-authored-by: Lars Eggert --- neqo-transport/src/send_stream.rs | 61 ++++++++++++++++++++++++++----- 1 file changed, 51 insertions(+), 10 deletions(-) diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index 5feb785ac6..0464b3e490 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -145,8 +145,10 @@ enum RangeState { /// range implies needing-to-be-sent, either initially or as a retransmission. #[derive(Debug, Default, PartialEq)] struct RangeTracker { - // offset, (len, RangeState). Use u64 for len because ranges can exceed 32bits. + /// offset, (len, RangeState). Use u64 for len because ranges can exceed 32bits. used: BTreeMap, + /// this is a cache for first_unmarked_range(), which we check a log + first_unmarked: Option<(u64, Option)>, } impl RangeTracker { @@ -166,19 +168,46 @@ impl RangeTracker { /// Find the first unmarked range. If all are contiguous, this will return /// (highest_offset(), None). - fn first_unmarked_range(&self) -> (u64, Option) { + fn first_unmarked_range(&mut self) -> (u64, Option) { let mut prev_end = 0; + if let Some(first_unmarked) = self.first_unmarked { + return first_unmarked; + } + for (cur_off, (cur_len, _)) in &self.used { if prev_end == *cur_off { prev_end = cur_off + cur_len; } else { - return (prev_end, Some(cur_off - prev_end)); + let res = (prev_end, Some(cur_off - prev_end)); + self.first_unmarked = Some(res); + return res; } } + self.first_unmarked = Some((prev_end, None)); (prev_end, None) } + /// Check for the common case of adding to the end. If we can, do it and + /// return true. + fn extend_final_range(&mut self, new_off: u64, new_len: u64, new_state: RangeState) -> bool { + if let Some(mut last) = self.used.last_entry() { + let prev_off = *last.key(); + let (prev_len, prev_state) = last.get_mut(); + // allow for overlap between new chunk and the last entry + if new_off >= prev_off + && new_off <= prev_off + *prev_len + && new_off + new_len > prev_off + *prev_len + && new_state == *prev_state + { + // simple case, extend the last entry + *prev_len = new_off + new_len - prev_off; + return true; + } + } + false + } + /// Turn one range into a list of subranges that align with existing /// ranges. /// Check impermissible overlaps in subregions: Sent cannot overwrite Acked. @@ -207,6 +236,8 @@ impl RangeTracker { let mut tmp_len = new_len; let mut v = Vec::new(); + // we already handled the case of a simple extension of the last item + // cut previous overlapping range if needed let prev = self.used.range_mut(..tmp_off).next_back(); if let Some((prev_off, (prev_len, prev_state))) = prev { @@ -300,6 +331,10 @@ impl RangeTracker { return; } + self.first_unmarked = None; + if self.extend_final_range(off, len as u64, state) { + return; + } let subranges = self.chunk_range_on_edges(off, len as u64, state); for (sub_off, sub_len, sub_state) in subranges { @@ -315,6 +350,7 @@ impl RangeTracker { return; } + self.first_unmarked = None; let len = u64::try_from(len).unwrap(); let end_off = off + len; @@ -404,7 +440,7 @@ impl TxBuffer { can_buffer } - pub fn next_bytes(&self) -> Option<(u64, &[u8])> { + pub fn next_bytes(&mut self) -> Option<(u64, &[u8])> { let (start, maybe_len) = self.ranges.first_unmarked_range(); if start == self.retired + u64::try_from(self.buffered()).unwrap() { @@ -766,11 +802,13 @@ impl SendStream { /// offset. fn next_bytes(&mut self, retransmission_only: bool) -> Option<(u64, &[u8])> { match self.state { - SendStreamState::Send { ref send_buf, .. } => { - send_buf.next_bytes().and_then(|(offset, slice)| { + SendStreamState::Send { + ref mut send_buf, .. + } => { + let result = send_buf.next_bytes(); + if let Some((offset, slice)) = result { if retransmission_only { qtrace!( - [self], "next_bytes apply retransmission limit at {}", self.retransmission_offset ); @@ -786,13 +824,16 @@ impl SendStream { } else { Some((offset, slice)) } - }) + } else { + None + } } SendStreamState::DataSent { - ref send_buf, + ref mut send_buf, fin_sent, .. } => { + let used = send_buf.used(); // immutable first let bytes = send_buf.next_bytes(); if bytes.is_some() { bytes @@ -800,7 +841,7 @@ impl SendStream { None } else { // Send empty stream frame with fin set - Some((send_buf.used(), &[])) + Some((used, &[])) } } SendStreamState::Ready { .. } From 9260ba3a82ebd7721a6a537120d2c9b5ecf94e62 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Mon, 5 Feb 2024 21:14:36 +1100 Subject: [PATCH 33/41] feat: Use the simulator as a benchmark (#1616) * Moving simulator to the test fixture * Split the simulator into two pieces in preparation for bench * Remove println statements * Fix server fmt * chore: add a file that I missed * Add a fixed seed for the test And increase the time available for benching. --------- Signed-off-by: Lars Eggert Co-authored-by: Lars Eggert --- neqo-transport/Cargo.toml | 7 +- neqo-transport/benches/transfer.rs | 64 ++++++++++ neqo-transport/tests/network.rs | 114 ++++++++++------- test-fixture/Cargo.toml | 2 +- test-fixture/src/lib.rs | 1 + .../src}/sim/connection.rs | 71 ++++++++--- .../tests => test-fixture/src}/sim/delay.rs | 1 + .../tests => test-fixture/src}/sim/drop.rs | 6 + .../tests => test-fixture/src}/sim/mod.rs | 120 +++++++++++++----- .../tests => test-fixture/src}/sim/net.rs | 0 .../tests => test-fixture/src}/sim/rng.rs | 7 +- .../src}/sim/taildrop.rs | 20 ++- 12 files changed, 309 insertions(+), 104 deletions(-) create mode 100644 neqo-transport/benches/transfer.rs rename {neqo-transport/tests => test-fixture/src}/sim/connection.rs (81%) rename {neqo-transport/tests => test-fixture/src}/sim/delay.rs (99%) rename {neqo-transport/tests => test-fixture/src}/sim/drop.rs (90%) rename {neqo-transport/tests => test-fixture/src}/sim/mod.rs (72%) rename {neqo-transport/tests => test-fixture/src}/sim/net.rs (100%) rename {neqo-transport/tests => test-fixture/src}/sim/rng.rs (92%) rename {neqo-transport/tests => test-fixture/src}/sim/taildrop.rs (95%) diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index b1d86fc789..00c46eb37b 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -16,7 +16,7 @@ qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a01307 smallvec = "1.11.1" [dev-dependencies] -criterion = "0.5.1" +criterion = { version = "0.5.1", features = ["html_reports"] } enum-map = "2.7" test-fixture = { path = "../test-fixture" } @@ -25,6 +25,11 @@ bench = [] deny-warnings = [] fuzzing = ["neqo-crypto/fuzzing"] +[[bench]] +name = "transfer" +harness = false +required-features = ["bench"] + [[bench]] name = "rx_stream_orderer" harness = false diff --git a/neqo-transport/benches/transfer.rs b/neqo-transport/benches/transfer.rs new file mode 100644 index 0000000000..59f0264a98 --- /dev/null +++ b/neqo-transport/benches/transfer.rs @@ -0,0 +1,64 @@ +use std::time::Duration; + +use criterion::{criterion_group, criterion_main, BatchSize::SmallInput, Criterion}; +use test_fixture::{ + boxed, + sim::{ + connection::{ConnectionNode, ReceiveData, SendData}, + network::{Delay, TailDrop}, + Simulator, + }, +}; + +const ZERO: Duration = Duration::from_millis(0); +const JITTER: Duration = Duration::from_millis(10); +const TRANSFER_AMOUNT: usize = 1 << 22; // 4Mbyte + +fn benchmark_transfer(c: &mut Criterion, label: &str, seed: Option>) { + c.bench_function(label, |b| { + b.iter_batched( + || { + let nodes = boxed![ + ConnectionNode::default_client(boxed![SendData::new(TRANSFER_AMOUNT)]), + TailDrop::dsl_uplink(), + Delay::new(ZERO..JITTER), + ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]), + TailDrop::dsl_downlink(), + Delay::new(ZERO..JITTER), + ]; + let mut sim = Simulator::new(label, nodes); + if let Some(seed) = &seed { + sim.seed_str(seed); + } + sim.setup() + }, + |sim| { + sim.run(); + }, + SmallInput, + ) + }); +} + +fn benchmark_transfer_variable(c: &mut Criterion) { + benchmark_transfer( + c, + "Run multiple transfers with varying seeds", + std::env::var("SIMULATION_SEED").ok(), + ); +} + +fn benchmark_transfer_fixed(c: &mut Criterion) { + benchmark_transfer( + c, + "Run multiple transfers with the same seed", + Some("62df6933ba1f543cece01db8f27fb2025529b27f93df39e19f006e1db3b8c843"), + ); +} + +criterion_group! { + name = transfer; + config = Criterion::default().warm_up_time(Duration::from_secs(5)).measurement_time(Duration::from_secs(15)); + targets = benchmark_transfer_variable, benchmark_transfer_fixed +} +criterion_main!(transfer); diff --git a/neqo-transport/tests/network.rs b/neqo-transport/tests/network.rs index 8c388457c5..d7a537159b 100644 --- a/neqo-transport/tests/network.rs +++ b/neqo-transport/tests/network.rs @@ -7,15 +7,17 @@ #![cfg_attr(feature = "deny-warnings", deny(warnings))] #![warn(clippy::pedantic)] -mod sim; - use std::{ops::Range, time::Duration}; use neqo_transport::{ConnectionError, ConnectionParameters, Error, State}; -use sim::{ - connection::{ConnectionNode, ReachState, ReceiveData, SendData}, - network::{Delay, Drop, TailDrop}, - Simulator, +use test_fixture::{ + boxed, + sim::{ + connection::{ConnectionNode, ReachState, ReceiveData, SendData}, + network::{Delay, Drop, TailDrop}, + Simulator, + }, + simulate, }; /// The amount of transfer. Much more than this takes a surprising amount of time. @@ -32,26 +34,28 @@ const fn weeks(m: u32) -> Duration { simulate!( connect_direct, [ - ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]), - ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]), + ConnectionNode::new_client( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), + ConnectionNode::new_server( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), ] ); simulate!( idle_timeout, [ - ConnectionNode::default_client(boxed![ - ReachState::new(State::Confirmed), - ReachState::new(State::Closed(ConnectionError::Transport( - Error::IdleTimeout - ))) - ]), - ConnectionNode::default_server(boxed![ - ReachState::new(State::Confirmed), - ReachState::new(State::Closed(ConnectionError::Transport( - Error::IdleTimeout - ))) - ]), + ConnectionNode::default_client(boxed![ReachState::new(State::Closed( + ConnectionError::Transport(Error::IdleTimeout) + ))]), + ConnectionNode::default_server(boxed![ReachState::new(State::Closed( + ConnectionError::Transport(Error::IdleTimeout) + ))]), ] ); @@ -60,23 +64,19 @@ simulate!( [ ConnectionNode::new_client( ConnectionParameters::default().idle_timeout(weeks(1000)), - boxed![ - ReachState::new(State::Confirmed), - ReachState::new(State::Closed(ConnectionError::Transport( - Error::IdleTimeout - ))) - ] + boxed![ReachState::new(State::Confirmed),], + boxed![ReachState::new(State::Closed(ConnectionError::Transport( + Error::IdleTimeout + )))] ), Delay::new(weeks(6)..weeks(6)), Drop::percentage(10), ConnectionNode::new_server( ConnectionParameters::default().idle_timeout(weeks(1000)), - boxed![ - ReachState::new(State::Confirmed), - ReachState::new(State::Closed(ConnectionError::Transport( - Error::IdleTimeout - ))) - ] + boxed![ReachState::new(State::Confirmed),], + boxed![ReachState::new(State::Closed(ConnectionError::Transport( + Error::IdleTimeout + )))] ), Delay::new(weeks(8)..weeks(8)), Drop::percentage(10), @@ -94,9 +94,17 @@ simulate!( simulate!( connect_fixed_rtt, [ - ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]), + ConnectionNode::new_client( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), Delay::new(DELAY..DELAY), - ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]), + ConnectionNode::new_server( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), Delay::new(DELAY..DELAY), ], ); @@ -104,22 +112,38 @@ simulate!( simulate!( connect_taildrop_jitter, [ - ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]), - TailDrop::dsl_uplink(), - Delay::new(ZERO..JITTER), - ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]), + ConnectionNode::new_client( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), TailDrop::dsl_downlink(), Delay::new(ZERO..JITTER), + ConnectionNode::new_server( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), + TailDrop::dsl_uplink(), + Delay::new(ZERO..JITTER), ], ); simulate!( connect_taildrop, [ - ConnectionNode::default_client(boxed![ReachState::new(State::Confirmed)]), - TailDrop::dsl_uplink(), - ConnectionNode::default_server(boxed![ReachState::new(State::Confirmed)]), + ConnectionNode::new_client( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), TailDrop::dsl_downlink(), + ConnectionNode::new_server( + ConnectionParameters::default(), + [], + boxed![ReachState::new(State::Confirmed)] + ), + TailDrop::dsl_uplink(), ], ); @@ -139,9 +163,9 @@ simulate!( transfer_taildrop, [ ConnectionNode::default_client(boxed![SendData::new(TRANSFER_AMOUNT)]), - TailDrop::dsl_uplink(), - ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]), TailDrop::dsl_downlink(), + ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]), + TailDrop::dsl_uplink(), ], ); @@ -149,10 +173,10 @@ simulate!( transfer_taildrop_jitter, [ ConnectionNode::default_client(boxed![SendData::new(TRANSFER_AMOUNT)]), - TailDrop::dsl_uplink(), + TailDrop::dsl_downlink(), Delay::new(ZERO..JITTER), ConnectionNode::default_server(boxed![ReceiveData::new(TRANSFER_AMOUNT)]), - TailDrop::dsl_downlink(), + TailDrop::dsl_uplink(), Delay::new(ZERO..JITTER), ], ); diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index f0feace31d..6dfe8d7f4c 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -17,4 +17,4 @@ neqo-transport = { path = "../neqo-transport" } qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } [features] -deny-warnings = [] +deny-warnings = [] \ No newline at end of file diff --git a/test-fixture/src/lib.rs b/test-fixture/src/lib.rs index 8635e8a840..2c94767a97 100644 --- a/test-fixture/src/lib.rs +++ b/test-fixture/src/lib.rs @@ -35,6 +35,7 @@ use neqo_transport::{ use qlog::{events::EventImportance, streamer::QlogStreamer}; pub mod assertions; +pub mod sim; /// The path for the database used in tests. pub const NSS_DB_PATH: &str = concat!(env!("CARGO_MANIFEST_DIR"), "/db"); diff --git a/neqo-transport/tests/sim/connection.rs b/test-fixture/src/sim/connection.rs similarity index 81% rename from neqo-transport/tests/sim/connection.rs rename to test-fixture/src/sim/connection.rs index 45a5234512..d05979cfca 100644 --- a/neqo-transport/tests/sim/connection.rs +++ b/test-fixture/src/sim/connection.rs @@ -12,13 +12,16 @@ use std::{ time::Instant, }; -use neqo_common::{event::Provider, qdebug, qtrace, Datagram}; +use neqo_common::{event::Provider, qdebug, qinfo, qtrace, Datagram}; use neqo_crypto::AuthenticationStatus; use neqo_transport::{ Connection, ConnectionEvent, ConnectionParameters, Output, State, StreamId, StreamType, }; -use super::{Node, Rng}; +use crate::{ + boxed, + sim::{Node, Rng}, +}; /// The status of the processing of an event. #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -33,7 +36,7 @@ pub enum GoalStatus { /// A goal for the connection. /// Goals can be accomplished in any order. -pub trait ConnectionGoal { +pub trait ConnectionGoal: Debug { fn init(&mut self, _c: &mut Connection, _now: Instant) {} /// Perform some processing. fn process(&mut self, _c: &mut Connection, _now: Instant) -> GoalStatus { @@ -47,36 +50,49 @@ pub trait ConnectionGoal { pub struct ConnectionNode { c: Connection, + setup_goals: Vec>, goals: Vec>, } impl ConnectionNode { pub fn new_client( params: ConnectionParameters, + setup: impl IntoIterator>, goals: impl IntoIterator>, ) -> Self { Self { - c: test_fixture::new_client(params), + c: crate::new_client(params), + setup_goals: setup.into_iter().collect(), goals: goals.into_iter().collect(), } } pub fn new_server( params: ConnectionParameters, + setup: impl IntoIterator>, goals: impl IntoIterator>, ) -> Self { Self { - c: test_fixture::new_server(test_fixture::DEFAULT_ALPN, params), + c: crate::new_server(crate::DEFAULT_ALPN, params), + setup_goals: setup.into_iter().collect(), goals: goals.into_iter().collect(), } } pub fn default_client(goals: impl IntoIterator>) -> Self { - Self::new_client(ConnectionParameters::default(), goals) + Self::new_client( + ConnectionParameters::default(), + boxed![ReachState::new(State::Confirmed)], + goals, + ) } pub fn default_server(goals: impl IntoIterator>) -> Self { - Self::new_server(ConnectionParameters::default(), goals) + Self::new_server( + ConnectionParameters::default(), + boxed![ReachState::new(State::Confirmed)], + goals, + ) } #[allow(dead_code)] @@ -89,13 +105,20 @@ impl ConnectionNode { self.goals.push(goal); } + /// On the first call to this method, the setup goals will turn into the active goals. + /// On the second call, they will be swapped back and the main goals will run. + fn setup_goals(&mut self, now: Instant) { + std::mem::swap(&mut self.goals, &mut self.setup_goals); + for g in &mut self.goals { + g.init(&mut self.c, now); + } + } + /// Process all goals using the given closure and return whether any were active. fn process_goals(&mut self, mut f: F) -> bool where F: FnMut(&mut Box, &mut Connection) -> GoalStatus, { - // Waiting on drain_filter... - // self.goals.drain_filter(|g| f(g, &mut self.c, &e)).count(); let mut active = false; let mut i = 0; while i < self.goals.len() { @@ -114,15 +137,13 @@ impl ConnectionNode { impl Node for ConnectionNode { fn init(&mut self, _rng: Rng, now: Instant) { - for g in &mut self.goals { - g.init(&mut self.c, now); - } + self.setup_goals(now); } - fn process(&mut self, mut d: Option, now: Instant) -> Output { + fn process(&mut self, mut dgram: Option, now: Instant) -> Output { _ = self.process_goals(|goal, c| goal.process(c, now)); loop { - let res = self.c.process(d.take().as_ref(), now); + let res = self.c.process(dgram.take().as_ref(), now); let mut active = false; while let Some(e) = self.c.next_event() { @@ -145,12 +166,18 @@ impl Node for ConnectionNode { } } + fn prepare(&mut self, now: Instant) { + assert!(self.done(), "ConnectionNode::prepare: setup not complete"); + self.setup_goals(now); + assert!(!self.done(), "ConnectionNode::prepare: setup not complete"); + } + fn done(&self) -> bool { self.goals.is_empty() } fn print_summary(&self, test_name: &str) { - println!("{}: {:?}", test_name, self.c.stats()); + qinfo!("{}: {:?}", test_name, self.c.stats()); } } @@ -160,12 +187,15 @@ impl Debug for ConnectionNode { } } +/// A target for a connection that involves reaching a given connection state. #[derive(Debug, Clone)] pub struct ReachState { target: State, } impl ReachState { + /// Create a new instance that intends to reach the indicated state. + #[must_use] pub fn new(target: State) -> Self { Self { target } } @@ -186,13 +216,15 @@ impl ConnectionGoal for ReachState { } } -#[derive(Debug)] +/// A target for a connection that involves sending a given amount of data on the indicated stream. +#[derive(Debug, Clone)] pub struct SendData { remaining: usize, stream_id: Option, } impl SendData { + #[must_use] pub fn new(amount: usize) -> Self { Self { remaining: amount, @@ -248,9 +280,7 @@ impl ConnectionGoal for SendData { match e { ConnectionEvent::SendStreamCreatable { stream_type: StreamType::UniDi, - } - // TODO(mt): remove the second condition when #842 is fixed. - | ConnectionEvent::StateChange(_) => { + } => { self.make_stream(c); GoalStatus::Active } @@ -270,12 +300,13 @@ impl ConnectionGoal for SendData { } /// Receive a prescribed amount of data from any stream. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ReceiveData { remaining: usize, } impl ReceiveData { + #[must_use] pub fn new(amount: usize) -> Self { Self { remaining: amount } } diff --git a/neqo-transport/tests/sim/delay.rs b/test-fixture/src/sim/delay.rs similarity index 99% rename from neqo-transport/tests/sim/delay.rs rename to test-fixture/src/sim/delay.rs index 34cb923084..e66e65f9d8 100644 --- a/neqo-transport/tests/sim/delay.rs +++ b/test-fixture/src/sim/delay.rs @@ -58,6 +58,7 @@ pub struct Delay { } impl Delay { + #[must_use] pub fn new(bounds: Range) -> Self { Self { random: RandomDelay::new(bounds), diff --git a/neqo-transport/tests/sim/drop.rs b/test-fixture/src/sim/drop.rs similarity index 90% rename from neqo-transport/tests/sim/drop.rs rename to test-fixture/src/sim/drop.rs index 629fbf48d3..6529a95d04 100644 --- a/neqo-transport/tests/sim/drop.rs +++ b/test-fixture/src/sim/drop.rs @@ -27,6 +27,7 @@ impl Drop { /// Make a new random drop generator. Each `drop` is called, this generates a /// random value between 0 and `max` (exclusive). If this value is less than /// `threshold` a value of `true` is returned. + #[must_use] pub fn new(threshold: u64, max: u64) -> Self { Self { threshold, @@ -36,11 +37,16 @@ impl Drop { } /// Generate random drops with the given percentage. + #[must_use] pub fn percentage(pct: u8) -> Self { // Multiply by 10 so that the random number generator works more efficiently. Self::new(u64::from(pct) * 10, 1000) } + /// Determine whether or not to drop a packet. + /// # Panics + /// When this is invoked after test configuration has been torn down, + /// such that the RNG is no longer available. pub fn drop(&mut self) -> bool { let mut rng = self.rng.as_ref().unwrap().borrow_mut(); let r = rng.random_from(0..self.max); diff --git a/neqo-transport/tests/sim/mod.rs b/test-fixture/src/sim/mod.rs similarity index 72% rename from neqo-transport/tests/sim/mod.rs rename to test-fixture/src/sim/mod.rs index 9ab9d57a4a..f4b7a52739 100644 --- a/neqo-transport/tests/sim/mod.rs +++ b/test-fixture/src/sim/mod.rs @@ -4,10 +4,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -// Tests with simulated network -#![cfg_attr(feature = "deny-warnings", deny(warnings))] -#![warn(clippy::pedantic)] - +/// Tests with simulated network components. pub mod connection; mod delay; mod drop; @@ -19,6 +16,7 @@ use std::{ cmp::min, convert::TryFrom, fmt::Debug, + ops::{Deref, DerefMut}, rc::Rc, time::{Duration, Instant}, }; @@ -26,9 +24,10 @@ use std::{ use neqo_common::{qdebug, qinfo, qtrace, Datagram, Encoder}; use neqo_transport::Output; use rng::Random; -use test_fixture::{self, now}; use NodeState::{Active, Idle, Waiting}; +use crate::now; + pub mod network { pub use super::{delay::Delay, drop::Drop, taildrop::TailDrop}; } @@ -78,17 +77,21 @@ pub trait Node: Debug { /// Perform processing. This optionally takes a datagram and produces either /// another data, a time that the simulator needs to wait, or nothing. fn process(&mut self, d: Option, now: Instant) -> Output; + /// This is called after setup is complete and before the main processing starts. + fn prepare(&mut self, _now: Instant) {} /// An node can report when it considers itself "done". + /// Prior to calling `prepare`, this should return `true` if it is ready. fn done(&self) -> bool { true } + /// Print out a summary of the state of the node. fn print_summary(&self, _test_name: &str) {} } /// The state of a single node. Nodes will be activated if they are `Active` /// or if the previous node in the loop generated a datagram. Nodes that return /// `true` from `Node::done` will be activated as normal. -#[derive(Debug, PartialEq)] +#[derive(Clone, Copy, Debug, PartialEq)] enum NodeState { /// The node just produced a datagram. It should be activated again as soon as possible. Active, @@ -114,6 +117,19 @@ impl NodeHolder { } } +impl Deref for NodeHolder { + type Target = dyn Node; + fn deref(&self) -> &Self::Target { + self.node.as_ref() + } +} + +impl DerefMut for NodeHolder { + fn deref_mut(&mut self) -> &mut Self::Target { + self.node.as_mut() + } +} + pub struct Simulator { name: String, nodes: Vec, @@ -146,7 +162,8 @@ impl Simulator { } /// Seed from a hex string. - /// Though this is convenient, it panics if this isn't a 64 character hex string. + /// # Panics + /// When the provided string is not 32 bytes of hex (64 characters). pub fn seed_str(&mut self, seed: impl AsRef) { let seed = Encoder::from_hex(seed); self.seed(<[u8; 32]>::try_from(seed.as_ref()).unwrap()); @@ -164,18 +181,8 @@ impl Simulator { next.expect("a node cannot be idle and not done") } - /// Runs the simulation. - pub fn run(mut self) -> Duration { - let start = now(); - let mut now = start; + fn process_loop(&mut self, start: Instant, mut now: Instant) -> Instant { let mut dgram = None; - - for n in &mut self.nodes { - n.node.init(self.rng.clone(), now); - } - println!("{}: seed {}", self.name, self.rng.borrow().seed_str()); - - let real_start = Instant::now(); loop { for n in &mut self.nodes { if dgram.is_none() && !n.ready(now) { @@ -184,7 +191,7 @@ impl Simulator { } qdebug!([self.name], "processing {:?}", n.node); - let res = n.node.process(dgram.take(), now); + let res = n.process(dgram.take(), now); n.state = match res { Output::Datagram(d) => { qtrace!([self.name], " => datagram {}", d.len()); @@ -198,21 +205,14 @@ impl Simulator { } Output::None => { qtrace!([self.name], " => nothing"); - assert!(n.node.done(), "nodes have to be done when they go idle"); + assert!(n.done(), "nodes should be done when they go idle"); Idle } }; } - if self.nodes.iter().all(|n| n.node.done()) { - let real_elapsed = real_start.elapsed(); - println!("{}: real elapsed time: {:?}", self.name, real_elapsed); - let elapsed = now - start; - println!("{}: simulated elapsed time: {:?}", self.name, elapsed); - for n in &self.nodes { - n.node.print_summary(&self.name); - } - return elapsed; + if self.nodes.iter().all(|n| n.done()) { + return now; } if dgram.is_none() { @@ -229,4 +229,66 @@ impl Simulator { } } } + + #[must_use] + pub fn setup(mut self) -> ReadySimulator { + let start = now(); + + qinfo!("{}: seed {}", self.name, self.rng.borrow().seed_str()); + for n in &mut self.nodes { + n.init(self.rng.clone(), start); + } + + let setup_start = Instant::now(); + let now = self.process_loop(start, start); + let setup_time = now - start; + qinfo!( + "{t}: Setup took {wall:?} (wall) {setup_time:?} (simulated)", + t = self.name, + wall = setup_start.elapsed(), + ); + + for n in &mut self.nodes { + n.prepare(now); + } + + ReadySimulator { + sim: self, + start, + now, + } + } + + /// Runs the simulation. + /// # Panics + /// When sanity checks fail in unexpected ways; this is a testing function after all. + pub fn run(self) { + self.setup().run(); + } + + fn print_summary(&self) { + for n in &self.nodes { + n.print_summary(&self.name); + } + } +} + +pub struct ReadySimulator { + sim: Simulator, + start: Instant, + now: Instant, +} + +impl ReadySimulator { + pub fn run(mut self) { + let real_start = Instant::now(); + let end = self.sim.process_loop(self.start, self.now); + let sim_time = end - self.now; + qinfo!( + "{t}: Simulation took {wall:?} (wall) {sim_time:?} (simulated)", + t = self.sim.name, + wall = real_start.elapsed(), + ); + self.sim.print_summary(); + } } diff --git a/neqo-transport/tests/sim/net.rs b/test-fixture/src/sim/net.rs similarity index 100% rename from neqo-transport/tests/sim/net.rs rename to test-fixture/src/sim/net.rs diff --git a/neqo-transport/tests/sim/rng.rs b/test-fixture/src/sim/rng.rs similarity index 92% rename from neqo-transport/tests/sim/rng.rs rename to test-fixture/src/sim/rng.rs index af4f70eb5f..094c5fd791 100644 --- a/neqo-transport/tests/sim/rng.rs +++ b/test-fixture/src/sim/rng.rs @@ -14,6 +14,8 @@ pub struct Random { } impl Random { + #[must_use] + #[allow(clippy::missing_panics_doc)] // These are impossible. pub fn new(seed: [u8; 32]) -> Self { assert!(seed.iter().any(|&x| x != 0)); let mut dec = Decoder::from(&seed); @@ -48,6 +50,7 @@ impl Random { /// Generate a random value from the range. /// If the range is empty or inverted (`range.start > range.end`), then /// this returns the value of `range.start` without generating any random values. + #[must_use] pub fn random_from(&mut self, range: Range) -> u64 { let max = range.end.saturating_sub(range.start); if max == 0 { @@ -55,7 +58,6 @@ impl Random { } let shift = (max - 1).leading_zeros(); - assert_ne!(max, 0); loop { let r = self.random() >> shift; if r < max { @@ -64,7 +66,8 @@ impl Random { } } - /// Get the seed necessary to continue from this point. + /// Get the seed necessary to continue from the current state of the RNG. + #[must_use] pub fn seed_str(&self) -> String { format!( "{:8x}{:8x}{:8x}{:8x}", diff --git a/neqo-transport/tests/sim/taildrop.rs b/test-fixture/src/sim/taildrop.rs similarity index 95% rename from neqo-transport/tests/sim/taildrop.rs rename to test-fixture/src/sim/taildrop.rs index 26813800c9..c23dae10c6 100644 --- a/neqo-transport/tests/sim/taildrop.rs +++ b/test-fixture/src/sim/taildrop.rs @@ -14,7 +14,7 @@ use std::{ time::{Duration, Instant}, }; -use neqo_common::{qtrace, Datagram}; +use neqo_common::{qinfo, qtrace, Datagram}; use neqo_transport::Output; use super::Node; @@ -23,6 +23,7 @@ use super::Node; const ONE_SECOND_NS: u128 = 1_000_000_000; /// This models a link with a tail drop router at the front of it. +#[derive(Clone)] pub struct TailDrop { /// An overhead associated with each entry. This accounts for /// layer 2, IP, and UDP overheads. @@ -60,6 +61,7 @@ pub struct TailDrop { impl TailDrop { /// Make a new taildrop node with the given rate, queue capacity, and link delay. + #[must_use] pub fn new(rate: usize, capacity: usize, delay: Duration) -> Self { Self { overhead: 64, @@ -80,12 +82,14 @@ impl TailDrop { /// A tail drop queue on a 10Mbps link (approximated to 1 million bytes per second) /// with a fat 32k buffer (about 30ms), and the default forward delay of 50ms. - pub fn dsl_uplink() -> Self { + #[must_use] + pub fn dsl_downlink() -> Self { TailDrop::new(1_000_000, 32_768, Duration::from_millis(50)) } - /// Cut downlink to one fifth of the uplink (2Mbps), and reduce the buffer to 1/4. - pub fn dsl_downlink() -> Self { + /// Cut uplink to one fifth of the downlink (2Mbps), and reduce the buffer to 1/4. + #[must_use] + pub fn dsl_uplink() -> Self { TailDrop::new(200_000, 8_192, Duration::from_millis(50)) } @@ -174,9 +178,13 @@ impl Node for TailDrop { } fn print_summary(&self, test_name: &str) { - println!( + qinfo!( "{}: taildrop: rx {} drop {} tx {} maxq {}", - test_name, self.received, self.dropped, self.delivered, self.maxq, + test_name, + self.received, + self.dropped, + self.delivered, + self.maxq, ); } } From 40a9346f4368b34f94f60aa3c192181128c12d02 Mon Sep 17 00:00:00 2001 From: Kershaw Date: Mon, 5 Feb 2024 14:52:07 +0100 Subject: [PATCH 34/41] qlog 0.12.0 (#1621) --- neqo-client/Cargo.toml | 2 +- neqo-common/Cargo.toml | 2 +- neqo-http3/Cargo.toml | 2 +- neqo-qpack/Cargo.toml | 2 +- neqo-server/Cargo.toml | 2 +- neqo-transport/Cargo.toml | 2 +- test-fixture/Cargo.toml | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index 5419e8a5f8..ddf33cca08 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -15,7 +15,7 @@ neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } +qlog = "0.12.0" structopt = "0.3" url = "~2.5.0" diff --git a/neqo-common/Cargo.toml b/neqo-common/Cargo.toml index de754531be..183a4d9450 100644 --- a/neqo-common/Cargo.toml +++ b/neqo-common/Cargo.toml @@ -12,7 +12,7 @@ enum-map = "2.7" env_logger = { version = "0.10", default-features = false } lazy_static = "1.4" log = { version = "0.4", default-features = false } -qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } +qlog = "0.12.0" time = {version = "0.3.23", features = ["formatting"]} [dev-dependencies] diff --git a/neqo-http3/Cargo.toml b/neqo-http3/Cargo.toml index f05cae5f03..1605a2b609 100644 --- a/neqo-http3/Cargo.toml +++ b/neqo-http3/Cargo.toml @@ -14,7 +14,7 @@ neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } +qlog = "0.12.0" sfv = "0.9.3" smallvec = "1.11.1" url = "2.5" diff --git a/neqo-qpack/Cargo.toml b/neqo-qpack/Cargo.toml index 96531550bd..229345e977 100644 --- a/neqo-qpack/Cargo.toml +++ b/neqo-qpack/Cargo.toml @@ -12,7 +12,7 @@ log = {version = "~0.4.17", default-features = false} neqo-common = { path = "./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-transport = { path = "./../neqo-transport" } -qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } +qlog = "0.12.0" static_assertions = "~1.1.0" [dev-dependencies] diff --git a/neqo-server/Cargo.toml b/neqo-server/Cargo.toml index 09a7d4aa3c..d4ee99c9b0 100644 --- a/neqo-server/Cargo.toml +++ b/neqo-server/Cargo.toml @@ -14,7 +14,7 @@ neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } -qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } +qlog = "0.12.0" regex = "1.9" structopt = "0.3" tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index 00c46eb37b..c75142e102 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -12,7 +12,7 @@ lazy_static = "1.4" log = { version = "0.4.17", default-features = false } neqo-common = { path = "../neqo-common" } neqo-crypto = { path = "../neqo-crypto" } -qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } +qlog = "0.12.0" smallvec = "1.11.1" [dev-dependencies] diff --git a/test-fixture/Cargo.toml b/test-fixture/Cargo.toml index 6dfe8d7f4c..ed480c9c26 100644 --- a/test-fixture/Cargo.toml +++ b/test-fixture/Cargo.toml @@ -14,7 +14,7 @@ neqo-crypto = { path = "../neqo-crypto" } neqo-http3 = { path = "../neqo-http3" } neqo-qpack = { path = "../neqo-qpack" } neqo-transport = { path = "../neqo-transport" } -qlog = { git = "https://github.com/cloudflare/quiche", rev = "09ea4b244096a013071cfe2175bbf2945fb7f8d1" } +qlog = "0.12.0" [features] deny-warnings = [] \ No newline at end of file From 9a394e95fe5562070bb8139562ea94b709b9a7f2 Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Mon, 5 Feb 2024 16:29:05 +0200 Subject: [PATCH 35/41] ci: Build docker image for multiple architectures (#1619) * ci: Build docker image for multiple architectures * Print executable name and version * Better way to print versions during qns run --- .github/workflows/qns.yml | 6 +++++- qns/interop.sh | 2 ++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/qns.yml b/.github/workflows/qns.yml index 353d0ae696..ea9c7fb041 100644 --- a/.github/workflows/qns.yml +++ b/.github/workflows/qns.yml @@ -5,7 +5,7 @@ on: - cron: '42 3 * * 2,5' # Runs at 03:42 UTC (m and h chosen arbitrarily) twice a week. workflow_dispatch: pull_request: - branch: ["main"] + branches: ["main"] paths: - 'qns/**' - '.github/workflows/qns.yml' @@ -13,6 +13,9 @@ jobs: docker-image: runs-on: ubuntu-latest steps: + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -47,3 +50,4 @@ jobs: RUST_VERSION=stable cache-from: type=gha cache-to: type=gha,mode=max + platforms: linux/amd64, linux/arm64 diff --git a/qns/interop.sh b/qns/interop.sh index 3c828ead9e..4baa6b7e8f 100755 --- a/qns/interop.sh +++ b/qns/interop.sh @@ -13,6 +13,7 @@ case "$ROLE" in client) /wait-for-it.sh sim:57832 -s -t 30 sleep 5 + neqo-client --help | head -n 1 RUST_LOG=debug RUST_BACKTRACE=1 neqo-client --cc cubic --qns-test "$TESTCASE" \ --qlog-dir "$QLOGDIR" --output-dir /downloads $REQUESTS ;; @@ -27,6 +28,7 @@ case "$ROLE" in -name "$CERT" -passout pass: -out "$P12CERT" pk12util -d "sql:$DB" -i "$P12CERT" -W '' certutil -L -d "sql:$DB" -n "$CERT" + neqo-server --help | head -n 1 RUST_LOG=info RUST_BACKTRACE=1 neqo-server --cc cubic --qns-test "$TESTCASE" \ --qlog-dir "$QLOGDIR" -d "$DB" -k "$CERT" [::]:443 ;; From 3587c23aca441a27266267c5692fb70d29e825b0 Mon Sep 17 00:00:00 2001 From: Max Inden Date: Tue, 6 Feb 2024 06:48:49 +0100 Subject: [PATCH 36/41] refactor(client): replace mio with tokio (#1612) * refactor(client): replace mio with tokio - Use `tokio` instead of `mio`. - Refactor `neqo-client` to be more consistent with `neqo-server`. - Introduce `read_datagram` and `ready`. - Introduce `ClientRunner` and `old::ClientRunner` (consistent with `ServersRunner`). - Fold `handle_test` into `client` (now `ClientRunner::new`). * http3/client_events: remove previous hot-fix * transport/tests/handshake: add (for now failing) test * fix(transport/connection): emit AuthenticationNeeded once * Assert that we have the certificate too; add comments --------- Co-authored-by: Martin Thomson --- neqo-client/Cargo.toml | 4 +- neqo-client/src/main.rs | 609 +++++++++--------- neqo-transport/src/connection/mod.rs | 8 +- .../src/connection/tests/handshake.rs | 51 ++ 4 files changed, 369 insertions(+), 303 deletions(-) diff --git a/neqo-client/Cargo.toml b/neqo-client/Cargo.toml index ddf33cca08..4ca69647e1 100644 --- a/neqo-client/Cargo.toml +++ b/neqo-client/Cargo.toml @@ -9,7 +9,8 @@ rust-version = "1.70.0" license = "MIT OR Apache-2.0" [dependencies] -mio = "~0.6.23" +futures = "0.3" +log = {version = "0.4.17", default-features = false} neqo-common = { path="./../neqo-common" } neqo-crypto = { path = "./../neqo-crypto" } neqo-http3 = { path = "./../neqo-http3" } @@ -17,6 +18,7 @@ neqo-qpack = { path = "./../neqo-qpack" } neqo-transport = { path = "./../neqo-transport" } qlog = "0.12.0" structopt = "0.3" +tokio = { version = "1", features = ["net", "time", "macros", "rt", "rt-multi-thread"] } url = "~2.5.0" [features] diff --git a/neqo-client/src/main.rs b/neqo-client/src/main.rs index 3db90aac10..d8444542ea 100644 --- a/neqo-client/src/main.rs +++ b/neqo-client/src/main.rs @@ -13,9 +13,10 @@ use std::{ convert::TryFrom, fmt::{self, Display}, fs::{create_dir_all, File, OpenOptions}, - io::{self, ErrorKind, Write}, + io::{self, Write}, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, ToSocketAddrs}, path::PathBuf, + pin::Pin, process::exit, rc::Rc, str::FromStr, @@ -23,8 +24,13 @@ use std::{ }; use common::IpTos; -use mio::{net::UdpSocket, Events, Poll, PollOpt, Ready, Token}; -use neqo_common::{self as common, event::Provider, hex, qlog::NeqoQlog, Datagram, Role}; +use futures::{ + future::{select, Either}, + FutureExt, TryFutureExt, +}; +use neqo_common::{ + self as common, event::Provider, hex, qdebug, qinfo, qlog::NeqoQlog, Datagram, Role, +}; use neqo_crypto::{ constants::{TLS_AES_128_GCM_SHA256, TLS_AES_256_GCM_SHA384, TLS_CHACHA20_POLY1305_SHA256}, init, AuthenticationStatus, Cipher, ResumptionToken, @@ -39,6 +45,7 @@ use neqo_transport::{ }; use qlog::{events::EventImportance, streamer::QlogStreamer}; use structopt::StructOpt; +use tokio::{net::UdpSocket, time::Sleep}; use url::{Origin, Url}; #[derive(Debug)] @@ -343,10 +350,17 @@ impl QuicParameters { } } -fn emit_datagram(socket: &mio::net::UdpSocket, d: Datagram) -> io::Result<()> { - let sent = socket.send_to(&d[..], &d.destination())?; - if sent != d.len() { - eprintln!("Unable to send all {} bytes of datagram", d.len()); +async fn emit_datagram(socket: &UdpSocket, out_dgram: Datagram) -> Result<(), io::Error> { + let sent = match socket.send_to(&out_dgram, &out_dgram.destination()).await { + Ok(res) => res, + Err(ref err) if err.kind() != io::ErrorKind::WouldBlock => { + eprintln!("UDP send error: {err:?}"); + 0 + } + Err(e) => return Err(e), + }; + if sent != out_dgram.len() { + eprintln!("Unable to send all {} bytes of datagram", out_dgram.len()); } Ok(()) } @@ -393,86 +407,59 @@ fn get_output_file( } } -fn process_loop( - local_addr: &SocketAddr, +enum Ready { + Socket, + Timeout, +} + +// Wait for the socket to be readable or the timeout to fire. +async fn ready( socket: &UdpSocket, - poll: &Poll, - client: &mut Http3Client, - handler: &mut Handler, -) -> Res { + mut timeout: Option<&mut Pin>>, +) -> Result { + let socket_ready = Box::pin(socket.readable()).map_ok(|()| Ready::Socket); + let timeout_ready = timeout + .as_mut() + .map(Either::Left) + .unwrap_or(Either::Right(futures::future::pending())) + .map(|()| Ok(Ready::Timeout)); + select(socket_ready, timeout_ready).await.factor_first().0 +} + +fn read_dgram( + socket: &UdpSocket, + local_address: &SocketAddr, +) -> Result, io::Error> { let buf = &mut [0u8; 2048]; - let mut events = Events::with_capacity(1024); - let mut timeout = Duration::new(0, 0); - loop { - poll.poll(&mut events, Some(timeout))?; - - let mut datagrams: Vec = Vec::new(); - 'read: loop { - match socket.recv_from(&mut buf[..]) { - Err(ref err) - if err.kind() == ErrorKind::WouldBlock - || err.kind() == ErrorKind::Interrupted => - { - break 'read - } - Err(ref err) => { - eprintln!("UDP error: {err}"); - exit(1); - } - Ok((sz, remote)) => { - if sz == buf.len() { - eprintln!("Received more than {} bytes", buf.len()); - break 'read; - } - if sz > 0 { - let d = - Datagram::new(remote, *local_addr, IpTos::default(), None, &buf[..sz]); - datagrams.push(d); - } - } - }; + let (sz, remote_addr) = match socket.try_recv_from(&mut buf[..]) { + Err(ref err) + if err.kind() == io::ErrorKind::WouldBlock + || err.kind() == io::ErrorKind::Interrupted => + { + return Ok(None) } - if !datagrams.is_empty() { - client.process_multiple_input(&datagrams, Instant::now()); - handler.maybe_key_update(client)?; + Err(err) => { + eprintln!("UDP recv error: {err:?}"); + return Err(err); } + Ok(res) => res, + }; - if let Http3State::Closed(..) = client.state() { - return Ok(client.state()); - } - - let mut exiting = !handler.handle(client)?; - - 'write: loop { - match client.process_output(Instant::now()) { - Output::Datagram(dgram) => { - if let Err(err) = emit_datagram(socket, dgram) { - if err.kind() == ErrorKind::WouldBlock - || err.kind() == ErrorKind::Interrupted - { - break 'write; - } - eprintln!("UDP write error: {err}"); - client.close(Instant::now(), 0, err.to_string()); - exiting = true; - break 'write; - } - } - Output::Callback(new_timeout) => { - timeout = new_timeout; - break 'write; - } - Output::None => { - // Not strictly necessary, since we're about to exit - exiting = true; - break 'write; - } - } - } + if sz == buf.len() { + eprintln!("Might have received more than {} bytes", buf.len()); + } - if exiting { - return Ok(client.state()); - } + if sz == 0 { + eprintln!("zero length datagram received?"); + Ok(None) + } else { + Ok(Some(Datagram::new( + remote_addr, + *local_address, + IpTos::default(), + None, + &buf[..sz], + ))) } } @@ -821,39 +808,122 @@ fn to_headers(values: &[impl AsRef]) -> Vec
{ .collect() } -#[allow(clippy::too_many_arguments)] -fn handle_test( - testcase: &String, - args: &mut Args, - socket: &UdpSocket, - poll: &Poll, +struct ClientRunner<'a> { local_addr: SocketAddr, - remote_addr: SocketAddr, - hostname: &str, - url_queue: VecDeque, - resumption_token: Option, -) -> Res> { - let key_update = KeyUpdateState(args.key_update); - if testcase.as_str() == "upload" { - let mut client = - create_http3_client(args, local_addr, remote_addr, hostname, resumption_token) - .expect("failed to create client"); - args.method = String::from("POST"); + socket: &'a UdpSocket, + client: Http3Client, + handler: Handler<'a>, + timeout: Option>>, + args: &'a Args, +} + +impl<'a> ClientRunner<'a> { + async fn new( + args: &'a mut Args, + socket: &'a UdpSocket, + local_addr: SocketAddr, + remote_addr: SocketAddr, + hostname: &str, + url_queue: VecDeque, + resumption_token: Option, + ) -> Res> { + if let Some(testcase) = &args.test { + if testcase.as_str() != "upload" { + eprintln!("Unsupported test case: {testcase}"); + exit(127) + } + } + + let client = create_http3_client(args, local_addr, remote_addr, hostname, resumption_token) + .expect("failed to create client"); + if args.test.is_some() { + args.method = String::from("POST"); + } + let key_update = KeyUpdateState(args.key_update); let url_handler = URLHandler { url_queue, stream_handlers: HashMap::new(), all_paths: Vec::new(), - handler_type: StreamHandlerType::Upload, + handler_type: if args.test.is_some() { + StreamHandlerType::Upload + } else { + StreamHandlerType::Download + }, args, }; - let mut h = Handler::new(url_handler, key_update, args.output_read_data); - process_loop(&local_addr, socket, poll, &mut client, &mut h)?; - } else { - eprintln!("Unsupported test case: {testcase}"); - exit(127) + let handler = Handler::new(url_handler, key_update, args.output_read_data); + + Ok(Self { + local_addr, + socket, + client, + handler, + timeout: None, + args, + }) + } + + async fn run(mut self) -> Res> { + loop { + if !self.handler.handle(&mut self.client)? { + break; + } + + self.process(None).await?; + + match ready(self.socket, self.timeout.as_mut()).await? { + Ready::Socket => loop { + let dgram = read_dgram(self.socket, &self.local_addr)?; + if dgram.is_none() { + break; + } + self.process(dgram.as_ref()).await?; + self.handler.maybe_key_update(&mut self.client)?; + }, + Ready::Timeout => { + self.timeout = None; + } + } + + if let Http3State::Closed(..) = self.client.state() { + break; + } + } + + let token = if self.args.test.is_none() && self.args.resume { + // If we haven't received an event, take a token if there is one. + // Lots of servers don't provide NEW_TOKEN, but a session ticket + // without NEW_TOKEN is better than nothing. + self.handler + .token + .take() + .or_else(|| self.client.take_resumption_token(Instant::now())) + } else { + None + }; + Ok(token) } - Ok(None) + async fn process(&mut self, mut dgram: Option<&Datagram>) -> Result<(), io::Error> { + loop { + match self.client.process(dgram.take(), Instant::now()) { + Output::Datagram(dgram) => { + emit_datagram(self.socket, dgram).await?; + } + Output::Callback(new_timeout) => { + qinfo!("Setting timeout of {:?}", new_timeout); + self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout))); + break; + } + Output::None => { + qdebug!("Output::None"); + break; + } + } + } + + Ok(()) + } } fn create_http3_client( @@ -899,58 +969,6 @@ fn create_http3_client( Ok(client) } -#[allow(clippy::too_many_arguments)] -fn client( - args: &mut Args, - socket: &UdpSocket, - poll: &Poll, - local_addr: SocketAddr, - remote_addr: SocketAddr, - hostname: &str, - url_queue: VecDeque, - resumption_token: Option, -) -> Res> { - let testcase = args.test.clone(); - if let Some(testcase) = testcase { - return handle_test( - &testcase, - args, - socket, - poll, - local_addr, - remote_addr, - hostname, - url_queue, - resumption_token, - ); - } - - let mut client = create_http3_client(args, local_addr, remote_addr, hostname, resumption_token) - .expect("failed to create client"); - let key_update = KeyUpdateState(args.key_update); - let url_handler = URLHandler { - url_queue, - stream_handlers: HashMap::new(), - all_paths: Vec::new(), - handler_type: StreamHandlerType::Download, - args, - }; - let mut h = Handler::new(url_handler, key_update, args.output_read_data); - - process_loop(&local_addr, socket, poll, &mut client, &mut h)?; - - let token = if args.resume { - // If we haven't received an event, take a token if there is one. - // Lots of servers don't provide NEW_TOKEN, but a session ticket - // without NEW_TOKEN is better than nothing. - h.token - .or_else(|| client.take_resumption_token(Instant::now())) - } else { - None - }; - Ok(token) -} - fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { if let Some(qlog_dir) = &args.qlog_dir { let mut qlog_path = qlog_dir.to_path_buf(); @@ -980,7 +998,8 @@ fn qlog_new(args: &Args, hostname: &str, cid: &ConnectionId) -> Res { } } -fn main() -> Res<()> { +#[tokio::main] +async fn main() -> Res<()> { init(); let mut args = Args::from_args(); @@ -1059,21 +1078,15 @@ fn main() -> Res<()> { SocketAddr::V6(..) => SocketAddr::new(IpAddr::V6(Ipv6Addr::from([0; 16])), 0), }; - let socket = match UdpSocket::bind(&local_addr) { + let socket = match std::net::UdpSocket::bind(local_addr) { Err(e) => { eprintln!("Unable to bind UDP socket: {e}"); exit(1) } Ok(s) => s, }; - - let poll = Poll::new()?; - poll.register( - &socket, - Token(0), - Ready::readable() | Ready::writable(), - PollOpt::edge(), - )?; + socket.set_nonblocking(true)?; + let socket = UdpSocket::from_std(socket)?; let real_local = socket.local_addr().unwrap(); println!( @@ -1096,27 +1109,31 @@ fn main() -> Res<()> { first = false; token = if args.use_old_http { - old::old_client( + old::ClientRunner::new( &args, &socket, - &poll, real_local, remote_addr, &hostname, to_request, token, - )? + ) + .await? + .run() + .await? } else { - client( + ClientRunner::new( &mut args, &socket, - &poll, real_local, remote_addr, &hostname, to_request, token, - )? + ) + .await? + .run() + .await? }; } } @@ -1129,24 +1146,25 @@ mod old { cell::RefCell, collections::{HashMap, VecDeque}, fs::File, - io::{ErrorKind, Write}, + io::{self, Write}, net::SocketAddr, path::PathBuf, - process::exit, + pin::Pin, rc::Rc, - time::{Duration, Instant}, + time::Instant, }; - use mio::{Events, Poll}; - use neqo_common::{event::Provider, Datagram, IpTos}; + use neqo_common::{event::Provider, qdebug, qinfo, Datagram}; use neqo_crypto::{AuthenticationStatus, ResumptionToken}; use neqo_transport::{ Connection, ConnectionEvent, EmptyConnectionIdGenerator, Error, Output, State, StreamId, StreamType, }; + use tokio::{net::UdpSocket, time::Sleep}; use url::Url; - use super::{emit_datagram, get_output_file, qlog_new, Args, KeyUpdateState, Res}; + use super::{get_output_file, qlog_new, read_dgram, ready, Args, KeyUpdateState, Ready, Res}; + use crate::emit_datagram; struct HandlerOld<'b> { streams: HashMap>, @@ -1330,143 +1348,132 @@ mod old { } } - fn process_loop_old( - local_addr: &SocketAddr, - socket: &mio::net::UdpSocket, - poll: &Poll, - client: &mut Connection, - handler: &mut HandlerOld, - ) -> Res { - let buf = &mut [0u8; 2048]; - let mut events = Events::with_capacity(1024); - let mut timeout = Duration::new(0, 0); - loop { - poll.poll(&mut events, Some(timeout))?; - - 'read: loop { - match socket.recv_from(&mut buf[..]) { - Err(ref err) - if err.kind() == ErrorKind::WouldBlock - || err.kind() == ErrorKind::Interrupted => - { - break 'read - } - Err(ref err) => { - eprintln!("UDP error: {err}"); - exit(1); - } - Ok((sz, remote)) => { - if sz == buf.len() { - eprintln!("Received more than {} bytes", buf.len()); - break 'read; - } - if sz > 0 { - let d = Datagram::new( - remote, - *local_addr, - IpTos::default(), - None, - &buf[..sz], - ); - client.process_input(&d, Instant::now()); - handler.maybe_key_update(client)?; + pub struct ClientRunner<'a> { + local_addr: SocketAddr, + socket: &'a UdpSocket, + client: Connection, + handler: HandlerOld<'a>, + timeout: Option>>, + args: &'a Args, + } + + impl<'a> ClientRunner<'a> { + pub async fn new( + args: &'a Args, + socket: &'a UdpSocket, + local_addr: SocketAddr, + remote_addr: SocketAddr, + origin: &str, + url_queue: VecDeque, + token: Option, + ) -> Res> { + let alpn = match args.alpn.as_str() { + "hq-29" | "hq-30" | "hq-31" | "hq-32" => args.alpn.as_str(), + _ => "hq-interop", + }; + + let mut client = Connection::new_client( + origin, + &[alpn], + Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), + local_addr, + remote_addr, + args.quic_parameters.get(alpn), + Instant::now(), + )?; + + if let Some(tok) = token { + client.enable_resumption(Instant::now(), tok)?; + } + + let ciphers = args.get_ciphers(); + if !ciphers.is_empty() { + client.set_ciphers(&ciphers)?; + } + + client.set_qlog(qlog_new(args, origin, client.odcid().unwrap())?); + + let key_update = KeyUpdateState(args.key_update); + let handler = HandlerOld { + streams: HashMap::new(), + url_queue, + all_paths: Vec::new(), + args, + token: None, + key_update, + }; + + Ok(Self { + local_addr, + socket, + client, + handler, + timeout: None, + args, + }) + } + + pub async fn run(mut self) -> Res> { + loop { + if !self.handler.handle(&mut self.client)? { + break; + } + + self.process(None).await?; + + match ready(self.socket, self.timeout.as_mut()).await? { + Ready::Socket => loop { + let dgram = read_dgram(self.socket, &self.local_addr)?; + if dgram.is_none() { + break; } + self.process(dgram.as_ref()).await?; + self.handler.maybe_key_update(&mut self.client)?; + }, + Ready::Timeout => { + self.timeout = None; } - }; - } + } - if let State::Closed(..) = client.state() { - return Ok(client.state().clone()); + if let State::Closed(..) = self.client.state() { + break; + } } - let mut exiting = !handler.handle(client)?; + let token = if self.args.resume { + // If we haven't received an event, take a token if there is one. + // Lots of servers don't provide NEW_TOKEN, but a session ticket + // without NEW_TOKEN is better than nothing. + self.handler + .token + .take() + .or_else(|| self.client.take_resumption_token(Instant::now())) + } else { + None + }; + + Ok(token) + } - 'write: loop { - match client.process_output(Instant::now()) { + async fn process(&mut self, mut dgram: Option<&Datagram>) -> Result<(), io::Error> { + loop { + match self.client.process(dgram.take(), Instant::now()) { Output::Datagram(dgram) => { - if let Err(e) = emit_datagram(socket, dgram) { - eprintln!("UDP write error: {e}"); - client.close(Instant::now(), 0, e.to_string()); - exiting = true; - break 'write; - } + emit_datagram(self.socket, dgram).await?; } Output::Callback(new_timeout) => { - timeout = new_timeout; - break 'write; + qinfo!("Setting timeout of {:?}", new_timeout); + self.timeout = Some(Box::pin(tokio::time::sleep(new_timeout))); + break; } Output::None => { - // Not strictly necessary, since we're about to exit - exiting = true; - break 'write; + qdebug!("Output::None"); + break; } } } - if exiting { - return Ok(client.state().clone()); - } - } - } - - #[allow(clippy::too_many_arguments)] - pub fn old_client( - args: &Args, - socket: &mio::net::UdpSocket, - poll: &Poll, - local_addr: SocketAddr, - remote_addr: SocketAddr, - origin: &str, - url_queue: VecDeque, - token: Option, - ) -> Res> { - let alpn = match args.alpn.as_str() { - "hq-29" | "hq-30" | "hq-31" | "hq-32" => args.alpn.as_str(), - _ => "hq-interop", - }; - - let mut client = Connection::new_client( - origin, - &[alpn], - Rc::new(RefCell::new(EmptyConnectionIdGenerator::default())), - local_addr, - remote_addr, - args.quic_parameters.get(alpn), - Instant::now(), - )?; - - if let Some(tok) = token { - client.enable_resumption(Instant::now(), tok)?; - } - - let ciphers = args.get_ciphers(); - if !ciphers.is_empty() { - client.set_ciphers(&ciphers)?; + Ok(()) } - - client.set_qlog(qlog_new(args, origin, client.odcid().unwrap())?); - - let key_update = KeyUpdateState(args.key_update); - let mut h = HandlerOld { - streams: HashMap::new(), - url_queue, - all_paths: Vec::new(), - args, - token: None, - key_update, - }; - - process_loop_old(&local_addr, socket, poll, &mut client, &mut h)?; - - let token = if args.resume { - // If we haven't received an event, take a token if there is one. - // Lots of servers don't provide NEW_TOKEN, but a session ticket - // without NEW_TOKEN is better than nothing. - h.token - .or_else(|| client.take_resumption_token(Instant::now())) - } else { - None - }; - Ok(token) } } diff --git a/neqo-transport/src/connection/mod.rs b/neqo-transport/src/connection/mod.rs index 2de388418a..1678e0b8bd 100644 --- a/neqo-transport/src/connection/mod.rs +++ b/neqo-transport/src/connection/mod.rs @@ -2585,10 +2585,16 @@ impl Connection { ) -> Res<()> { qtrace!([self], "Handshake space={} data={:0x?}", space, data); + let was_authentication_pending = + *self.crypto.tls.state() == HandshakeState::AuthenticationPending; let try_update = data.is_some(); match self.crypto.handshake(now, space, data)? { HandshakeState::Authenticated(_) | HandshakeState::InProgress => (), - HandshakeState::AuthenticationPending => self.events.authentication_needed(), + HandshakeState::AuthenticationPending => { + if !was_authentication_pending { + self.events.authentication_needed() + } + } HandshakeState::EchFallbackAuthenticationPending(public_name) => self .events .ech_fallback_authentication_needed(public_name.clone()), diff --git a/neqo-transport/src/connection/tests/handshake.rs b/neqo-transport/src/connection/tests/handshake.rs index 93385ac1bc..a91ecf1b4a 100644 --- a/neqo-transport/src/connection/tests/handshake.rs +++ b/neqo-transport/src/connection/tests/handshake.rs @@ -1135,3 +1135,54 @@ fn implicit_rtt_server() { // an RTT estimate from having discarded the Initial packet number space. assert_eq!(server.stats().rtt, RTT); } + +#[test] +fn emit_authentication_needed_once() { + let mut client = default_client(); + + let mut server = Connection::new_server( + test_fixture::LONG_CERT_KEYS, + test_fixture::DEFAULT_ALPN, + Rc::new(RefCell::new(CountingConnectionIdGenerator::default())), + ConnectionParameters::default(), + ) + .expect("create a server"); + + let client1 = client.process(None, now()); + assert!(client1.as_dgram_ref().is_some()); + + // The entire server flight doesn't fit in a single packet because the + // certificate is large, therefore the server will produce 2 packets. + let server1 = server.process(client1.as_dgram_ref(), now()); + assert!(server1.as_dgram_ref().is_some()); + let server2 = server.process(None, now()); + assert!(server2.as_dgram_ref().is_some()); + + let authentication_needed_count = |client: &mut Connection| { + client + .events() + .filter(|e| matches!(e, ConnectionEvent::AuthenticationNeeded)) + .count() + }; + + // Upon receiving the first packet, the client has the server certificate, + // but not yet all required handshake data. It moves to + // `HandshakeState::AuthenticationPending` and emits a + // `ConnectionEvent::AuthenticationNeeded` event. + // + // Note that this is a tiny bit fragile in that it depends on having a certificate + // that is within a fairly narrow range of sizes. It has to fit in a single + // packet, but be large enough that the CertificateVerify message does not + // also fit in the same packet. Our default test setup achieves this, but + // changes to the setup might invalidate this test. + let _ = client.process(server1.as_dgram_ref(), now()); + assert_eq!(1, authentication_needed_count(&mut client)); + assert!(client.peer_certificate().is_some()); + + // The `AuthenticationNeeded` event is still pending a call to + // `Connection::authenticated`. On receiving the second packet from the + // server, the client must not emit a another + // `ConnectionEvent::AuthenticationNeeded`. + let _ = client.process(server2.as_dgram_ref(), now()); + assert_eq!(0, authentication_needed_count(&mut client)); +} From 9ebf23b3d4fa91fdf2ff6934a1fd115eb9658921 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Tue, 6 Feb 2024 18:00:18 +1100 Subject: [PATCH 37/41] Range tracker benchmark (#1622) Co-authored-by: Lars Eggert --- neqo-transport/Cargo.toml | 5 +++ neqo-transport/benches/range_tracker.rs | 44 +++++++++++++++++++++++++ neqo-transport/src/lib.rs | 3 ++ neqo-transport/src/send_stream.rs | 8 ++--- 4 files changed, 56 insertions(+), 4 deletions(-) create mode 100644 neqo-transport/benches/range_tracker.rs diff --git a/neqo-transport/Cargo.toml b/neqo-transport/Cargo.toml index c75142e102..49ece8661c 100644 --- a/neqo-transport/Cargo.toml +++ b/neqo-transport/Cargo.toml @@ -34,3 +34,8 @@ required-features = ["bench"] name = "rx_stream_orderer" harness = false required-features = ["bench"] + +[[bench]] +name = "range_tracker" +harness = false +required-features = ["bench"] \ No newline at end of file diff --git a/neqo-transport/benches/range_tracker.rs b/neqo-transport/benches/range_tracker.rs new file mode 100644 index 0000000000..a328866f7e --- /dev/null +++ b/neqo-transport/benches/range_tracker.rs @@ -0,0 +1,44 @@ +use criterion::{criterion_group, criterion_main, Criterion}; // black_box +use neqo_transport::send_stream::{RangeState, RangeTracker}; + +const CHUNK: u64 = 1000; +const END: u64 = 100_000; +fn build_coalesce(len: u64) -> RangeTracker { + let mut used = RangeTracker::default(); + used.mark_range(0, CHUNK as usize, RangeState::Acked); + used.mark_range(CHUNK, END as usize, RangeState::Sent); + // leave a gap or it will coalesce here + for i in 2..=len { + // These do not get immediately coalesced when marking since they're not at the end or start + used.mark_range(i * CHUNK, CHUNK as usize, RangeState::Acked); + } + used +} + +fn coalesce(c: &mut Criterion, count: u64) { + c.bench_function( + &format!("coalesce_acked_from_zero {count}+1 entries"), + |b| { + b.iter_batched_ref( + || build_coalesce(count), + |used| { + used.mark_range(CHUNK, CHUNK as usize, RangeState::Acked); + let tail = (count + 1) * CHUNK; + used.mark_range(tail, CHUNK as usize, RangeState::Sent); + used.mark_range(tail, CHUNK as usize, RangeState::Acked); + }, + criterion::BatchSize::SmallInput, + ) + }, + ); +} + +fn benchmark_coalesce(c: &mut Criterion) { + coalesce(c, 1); + coalesce(c, 3); + coalesce(c, 10); + coalesce(c, 1000); +} + +criterion_group!(benches, benchmark_coalesce); +criterion_main!(benches); diff --git a/neqo-transport/src/lib.rs b/neqo-transport/src/lib.rs index ecf7ee2f73..2b5ad57579 100644 --- a/neqo-transport/src/lib.rs +++ b/neqo-transport/src/lib.rs @@ -30,6 +30,9 @@ pub mod recv_stream; #[cfg(not(feature = "bench"))] mod recv_stream; mod rtt; +#[cfg(feature = "bench")] +pub mod send_stream; +#[cfg(not(feature = "bench"))] mod send_stream; mod sender; pub mod server; diff --git a/neqo-transport/src/send_stream.rs b/neqo-transport/src/send_stream.rs index 0464b3e490..62373e22f3 100644 --- a/neqo-transport/src/send_stream.rs +++ b/neqo-transport/src/send_stream.rs @@ -135,8 +135,8 @@ impl Default for RetransmissionPriority { } } -#[derive(Debug, PartialEq, Clone, Copy)] -enum RangeState { +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum RangeState { Sent, Acked, } @@ -144,7 +144,7 @@ enum RangeState { /// Track ranges in the stream as sent or acked. Acked implies sent. Not in a /// range implies needing-to-be-sent, either initially or as a retransmission. #[derive(Debug, Default, PartialEq)] -struct RangeTracker { +pub struct RangeTracker { /// offset, (len, RangeState). Use u64 for len because ranges can exceed 32bits. used: BTreeMap, /// this is a cache for first_unmarked_range(), which we check a log @@ -325,7 +325,7 @@ impl RangeTracker { } } - fn mark_range(&mut self, off: u64, len: usize, state: RangeState) { + pub fn mark_range(&mut self, off: u64, len: usize, state: RangeState) { if len == 0 { qinfo!("mark 0-length range at {}", off); return; From a5a2e13f7f0c57bc5d8ac0f11268ac6f9c7ac0a6 Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Tue, 6 Feb 2024 19:07:34 +1100 Subject: [PATCH 38/41] Test benchmarks (#1623) * Don't build benches separately * Use --all-targets -F bench for build, test, and clippy * Restore build before transfer run --------- Co-authored-by: Lars Eggert --- .github/workflows/check.yml | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 7951477355..71b8e5655c 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -109,16 +109,14 @@ jobs: - name: Build run: | - cargo +${{ matrix.rust-toolchain }} build --all-targets + cargo +${{ matrix.rust-toolchain }} build --all-targets --features ci,bench echo "LD_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" echo "DYLD_FALLBACK_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" echo "${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_PATH" - name: Run tests and determine coverage - run: cargo +${{ matrix.rust-toolchain }} llvm-cov nextest --features ci --all-targets --no-fail-fast --lcov --output-path lcov.info - - - name: Benches should at least build - run: cargo +${{ matrix.rust-toolchain }} build --features bench --benches + run: cargo +${{ matrix.rust-toolchain }} llvm-cov nextest \ + --all-targets --features ci,bench --no-fail-fast --lcov --output-path lcov.info - name: Run client/server transfer run: | @@ -142,7 +140,7 @@ jobs: if: success() || failure() - name: Clippy - run: cargo +${{ matrix.rust-toolchain }} clippy --tests --benches -- -D warnings + run: cargo +${{ matrix.rust-toolchain }} clippy --all-targets -- -D warnings if: success() || failure() continue-on-error: ${{ matrix.rust-toolchain == 'nightly' }} From 6ef2c5e84162735f55d0afea83556acceabfd2fa Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 6 Feb 2024 11:46:48 +0200 Subject: [PATCH 39/41] fix: Don't call `write_entry` from inside `debug_assert!` (#1624) * fix: Don't call `write_entry` from inside `debug_assert!` Fixes #1481 * Try and add some release builds/tests to the matrix * Fix * Type * Don't exit * Fix env * Remove echo step * Don't linebreak a `run:` statement, it makes it silently fail * Indicate correct lib dir for build type --------- Signed-off-by: Lars Eggert --- .github/workflows/check.yml | 26 +++++++++++++++++--------- neqo-transport/src/cid.rs | 2 +- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index 71b8e5655c..bf04a94da4 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -18,6 +18,13 @@ jobs: matrix: os: [ubuntu-latest, macos-13, windows-latest] rust-toolchain: [1.70.0, stable, nightly] + type: [debug] + include: + - os: ubuntu-latest + rust-toolchain: stable + type: release + env: + BUILD_TYPE: ${{ matrix.type == 'release' && '--release' || '' }} runs-on: ${{ matrix.os }} defaults: run: @@ -109,21 +116,22 @@ jobs: - name: Build run: | - cargo +${{ matrix.rust-toolchain }} build --all-targets --features ci,bench - echo "LD_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" - echo "DYLD_FALLBACK_LIBRARY_PATH=${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_ENV" - echo "${{ github.workspace }}/dist/Debug/lib" >> "$GITHUB_PATH" + cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --all-targets --features ci,bench + echo "LD_LIBRARY_PATH=${{ github.workspace }}/dist/$LIB_DIR/lib" >> "$GITHUB_ENV" + echo "DYLD_FALLBACK_LIBRARY_PATH=${{ github.workspace }}/dist/$LIB_DIR/lib" >> "$GITHUB_ENV" + echo "${{ github.workspace }}/dist/$LIB_DIR/lib" >> "$GITHUB_PATH" + env: + LIB_DIR: ${{ matrix.type == 'release' && 'Release' || 'Debug' }} - name: Run tests and determine coverage - run: cargo +${{ matrix.rust-toolchain }} llvm-cov nextest \ - --all-targets --features ci,bench --no-fail-fast --lcov --output-path lcov.info + run: cargo +${{ matrix.rust-toolchain }} llvm-cov nextest $BUILD_TYPE --all-targets --features ci,bench --no-fail-fast --lcov --output-path lcov.info - name: Run client/server transfer run: | - cargo +${{ matrix.rust-toolchain }} build --bin neqo-client --bin neqo-server - cargo +${{ matrix.rust-toolchain }} run --bin neqo-server -- $HOST:4433 & + cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --bin neqo-client --bin neqo-server + cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --bin neqo-server -- $HOST:4433 & PID=$! - cargo +${{ matrix.rust-toolchain }} run --bin neqo-client -- --output-dir . https://$HOST:4433/$SIZE + cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --bin neqo-client -- --output-dir . https://$HOST:4433/$SIZE kill $PID [ "$(wc -c <"$SIZE")" -eq "$SIZE" ] || exit 1 env: diff --git a/neqo-transport/src/cid.rs b/neqo-transport/src/cid.rs index be202daf25..429751bef2 100644 --- a/neqo-transport/src/cid.rs +++ b/neqo-transport/src/cid.rs @@ -573,7 +573,7 @@ impl ConnectionIdManager { .add_local(ConnectionIdEntry::new(seqno, cid.clone(), ())); let entry = ConnectionIdEntry::new(seqno, cid, srt); - debug_assert!(self.write_entry(&entry, builder, stats)?); + self.write_entry(&entry, builder, stats)?; tokens.push(RecoveryToken::NewConnectionId(entry)); } } From a8a86863db8d1efa920a12edc99a764e2efeeb9f Mon Sep 17 00:00:00 2001 From: Lars Eggert Date: Tue, 6 Feb 2024 11:57:12 +0200 Subject: [PATCH 40/41] chore: Fix clippy nightly nit (#1625) --- neqo-common/src/codec.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/neqo-common/src/codec.rs b/neqo-common/src/codec.rs index 57ff13f39f..620c669ae6 100644 --- a/neqo-common/src/codec.rs +++ b/neqo-common/src/codec.rs @@ -112,9 +112,7 @@ impl<'a> Decoder<'a> { /// Decodes a QUIC varint. pub fn decode_varint(&mut self) -> Option { - let Some(b1) = self.decode_byte() else { - return None; - }; + let b1 = self.decode_byte()?; match b1 >> 6 { 0 => Some(u64::from(b1 & 0x3f)), 1 => Some((u64::from(b1 & 0x3f) << 8) | self.decode_uint(1)?), From 816182fe204daf23d22328b36db49a35841cb41a Mon Sep 17 00:00:00 2001 From: Martin Thomson Date: Tue, 6 Feb 2024 21:53:40 +1100 Subject: [PATCH 41/41] Run client/server test with the ci,bench features (#1626) Co-authored-by: Lars Eggert --- .github/workflows/check.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/check.yml b/.github/workflows/check.yml index bf04a94da4..fb2e3afcff 100644 --- a/.github/workflows/check.yml +++ b/.github/workflows/check.yml @@ -128,10 +128,10 @@ jobs: - name: Run client/server transfer run: | - cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --bin neqo-client --bin neqo-server - cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --bin neqo-server -- $HOST:4433 & + cargo +${{ matrix.rust-toolchain }} build $BUILD_TYPE --features ci,bench --bin neqo-client --bin neqo-server + cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --features ci,bench --bin neqo-server -- $HOST:4433 & PID=$! - cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --bin neqo-client -- --output-dir . https://$HOST:4433/$SIZE + cargo +${{ matrix.rust-toolchain }} run $BUILD_TYPE --features ci,bench --bin neqo-client -- --output-dir . https://$HOST:4433/$SIZE kill $PID [ "$(wc -c <"$SIZE")" -eq "$SIZE" ] || exit 1 env: