Merge "Merge tag '0.12.0' into new_main" into main
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
index 68b19c6..fd7e828 100644
--- a/.github/workflows/nightly.yml
+++ b/.github/workflows/nightly.yml
@@ -32,20 +32,25 @@
command: fmt
args: -- --check
- - name: Install dependencies
- run: sudo apt-get install libev-dev uthash-dev
-
- name: Run cargo test
uses: actions-rs/cargo@v1
with:
command: test
- args: --tests --examples --verbose --features qlog
+ args: --verbose --all-targets --features=ffi,qlog
+
+ # Need to run doc tests separately.
+ # (https://github.com/rust-lang/cargo/issues/6669)
+ - name: Run cargo doc test
+ uses: actions-rs/cargo@v1
+ with:
+ command: test
+ args: --verbose --doc --features=ffi,qlog
- name: Run cargo package
uses: actions-rs/cargo@v1
with:
command: package
- args: --verbose --allow-dirty
+ args: --verbose --workspace --exclude=quiche_apps --allow-dirty
- name: Run cargo doc
uses: actions-rs/cargo@v1
@@ -54,38 +59,9 @@
args: --no-deps
- name: Build C examples
- run: make -C examples
-
- apps:
- runs-on: ubuntu-latest
- # Only run on "pull_request" event for external PRs. This is to avoid
- # duplicate builds for PRs created from internal branches.
- if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
- steps:
- - name: Checkout sources
- uses: actions/checkout@v2
- with:
- submodules: 'recursive'
-
- - name: Install nightly toolchain
- uses: actions-rs/toolchain@v1
- with:
- profile: minimal
- toolchain: ${{ env.TOOLCHAIN }}
- components: rustfmt
- override: true
-
- - name: Run cargo build
- uses: actions-rs/cargo@v1
- with:
- command: build
- args: --verbose --manifest-path=tools/apps/Cargo.toml
-
- - name: Run cargo fmt
- uses: actions-rs/cargo@v1
- with:
- command: fmt
- args: --manifest-path=tools/apps/Cargo.toml -- --check
+ run: |
+ sudo apt-get install libev-dev uthash-dev
+ make -C quiche/examples
fuzz:
runs-on: ubuntu-latest
@@ -133,37 +109,6 @@
command: fmt
args: --manifest-path=fuzz/Cargo.toml -- --check
- qlog:
- runs-on: ubuntu-latest
- # Only run on "pull_request" event for external PRs. This is to avoid
- # duplicate builds for PRs created from internal branches.
- if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
- steps:
- - name: Checkout sources
- uses: actions/checkout@v2
- with:
- submodules: 'recursive'
-
- - name: Install nightly toolchain
- uses: actions-rs/toolchain@v1
- with:
- profile: minimal
- toolchain: ${{ env.TOOLCHAIN }}
- components: rustfmt
- override: true
-
- - name: Run cargo test
- uses: actions-rs/cargo@v1
- with:
- command: test
- args: --verbose --manifest-path=tools/qlog/Cargo.toml
-
- - name: Run cargo fmt
- uses: actions-rs/cargo@v1
- with:
- command: fmt
- args: --manifest-path=tools/qlog/Cargo.toml -- --check
-
http3_test:
runs-on: ubuntu-latest
# Only run on "pull_request" event for external PRs. This is to avoid
diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml
index 0429a74..2c10d8d 100644
--- a/.github/workflows/stable.yml
+++ b/.github/workflows/stable.yml
@@ -26,26 +26,31 @@
components: clippy
override: true
- - name: Install dependencies
- run: sudo apt-get install libev-dev uthash-dev
-
- name: Run cargo test
uses: actions-rs/cargo@v1
with:
command: test
- args: --tests --examples --verbose --features ffi,qlog
+ args: --verbose --all-targets --features=ffi,qlog
+
+ # Need to run doc tests separately.
+ # (https://github.com/rust-lang/cargo/issues/6669)
+ - name: Run cargo doc test
+ uses: actions-rs/cargo@v1
+ with:
+ command: test
+ args: --verbose --doc --features=ffi,qlog
- name: Run cargo package
uses: actions-rs/cargo@v1
with:
command: package
- args: --verbose --allow-dirty
+ args: --verbose --workspace --exclude=quiche_apps --allow-dirty
- name: Run cargo clippy
uses: actions-rs/cargo@v1
with:
command: clippy
- args: --examples -- -D warnings
+ args: --examples --features=ffi,qlog -- -D warnings
- name: Run cargo doc
uses: actions-rs/cargo@v1
@@ -54,7 +59,9 @@
args: --no-deps
- name: Build C examples
- run: make -C examples
+ run: |
+ sudo apt-get install libev-dev uthash-dev
+ make -C quiche/examples
quiche_macos:
runs-on: macos-latest
@@ -78,7 +85,13 @@
uses: actions-rs/cargo@v1
with:
command: test
- args: --tests --examples --verbose --features ffi,qlog
+ args: --verbose --all-targets --features ffi,qlog
+
+ - name: Build C examples
+ run: |
+ brew install libev
+ curl -o quiche/examples/uthash.h https://raw.githubusercontent.com/troydhanson/uthash/master/src/uthash.h
+ make -C quiche/examples
quiche_ios:
runs-on: macos-latest
@@ -102,6 +115,10 @@
target: ${{ matrix.target }}
override: true
+ - name: Remove cdylib from iOS build
+ run: |
+ sed -i -e 's/, "cdylib"//g' quiche/Cargo.toml
+
- name: Run cargo build
uses: actions-rs/cargo@v1
with:
@@ -139,7 +156,7 @@
uses: actions-rs/cargo@v1
with:
command: test
- args: --target=${{ matrix.target }} --tests --examples --verbose --features ffi,qlog
+ args: --target=${{ matrix.target }} --verbose --all-targets --features=ffi,qlog
quiche_multiarch:
runs-on: ubuntu-latest
@@ -155,81 +172,13 @@
with:
submodules: 'recursive'
- - name: Install stable toolchain
- uses: actions-rs/toolchain@v1
- with:
- profile: minimal
- toolchain: ${{ env.TOOLCHAIN }}
- target: ${{ matrix.target }}
- override: true
-
- name: Run cargo test
uses: actions-rs/cargo@v1
with:
command: test
- args: --target=${{ matrix.target }} --tests --examples --verbose --features ffi,qlog
+ args: --target=${{ matrix.target }} --verbose --all-targets --features=ffi,qlog
use-cross: true
- apps:
- runs-on: ubuntu-latest
- # Only run on "pull_request" event for external PRs. This is to avoid
- # duplicate builds for PRs created from internal branches.
- if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
- steps:
- - name: Checkout sources
- uses: actions/checkout@v2
- with:
- submodules: 'recursive'
-
- - name: Install stable toolchain
- uses: actions-rs/toolchain@v1
- with:
- profile: minimal
- toolchain: ${{ env.TOOLCHAIN }}
- override: true
-
- - name: Run cargo build
- uses: actions-rs/cargo@v1
- with:
- command: build
- args: --verbose --manifest-path=tools/apps/Cargo.toml
-
- - name: Run cargo clippy
- uses: actions-rs/cargo@v1
- with:
- command: clippy
- args: --manifest-path=tools/apps/Cargo.toml -- -D warnings
-
- qlog:
- runs-on: ubuntu-latest
- # Only run on "pull_request" event for external PRs. This is to avoid
- # duplicate builds for PRs created from internal branches.
- if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
- steps:
- - name: Checkout sources
- uses: actions/checkout@v2
- with:
- submodules: 'recursive'
-
- - name: Install stable toolchain
- uses: actions-rs/toolchain@v1
- with:
- profile: minimal
- toolchain: ${{ env.TOOLCHAIN }}
- override: true
-
- - name: Run cargo test
- uses: actions-rs/cargo@v1
- with:
- command: test
- args: --verbose --manifest-path=tools/qlog/Cargo.toml
-
- - name: Run cargo clippy
- uses: actions-rs/cargo@v1
- with:
- command: clippy
- args: --manifest-path=tools/qlog/Cargo.toml -- -D warnings
-
http3_test:
runs-on: ubuntu-latest
# Only run on "pull_request" event for external PRs. This is to avoid
@@ -293,8 +242,8 @@
- name: Build NGINX
run: |
cd nginx-${{ matrix.version }} &&
- patch -p01 < ../extras/nginx/nginx-1.16.patch &&
- ./configure --with-http_ssl_module --with-http_v2_module --with-http_v3_module --with-openssl="../deps/boringssl" --with-quiche=".." --with-debug &&
+ patch -p01 < ../nginx/nginx-1.16.patch &&
+ ./configure --with-http_ssl_module --with-http_v2_module --with-http_v3_module --with-openssl="${{ github.workspace }}/quiche/deps/boringssl" --with-quiche="${{ github.workspace }}" --with-debug &&
make -j`nproc` &&
objs/nginx -V
diff --git a/.gitignore b/.gitignore
index fc3e671..e0d89a2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,4 @@
**/target
**/*.rs.bk
**/Cargo.lock
+**/*.*qlog
diff --git a/.gitmodules b/.gitmodules
index c3033aa..8ce47d3 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,4 +1,4 @@
[submodule "boringssl"]
- path = deps/boringssl
+ path = quiche/deps/boringssl
url = https://github.com/google/boringssl.git
ignore = dirty
diff --git a/BUILD.gn b/BUILD.gn
index 32592b6..3a48977 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -1,57 +1,7 @@
-# Copyright 2019 The Fuchsia Authors. All rights reserved.
+# Copyright 2022 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-import("//build/rust/rustc_library.gni")
-
-# Ignore warnings because this is third-party code.
-config("ignore_rust_warnings") {
- rustflags = [ "-Awarnings" ]
-}
-
-rustc_library("quiche") {
- name = "quiche"
- edition = "2018"
- configs -= [
- "//build/config/rust:2018_idioms",
- "//build/config:werror",
- ]
- configs += [ ":ignore_rust_warnings" ]
- deps = [
- "//third_party/rust_crates:lazy_static",
- "//third_party/rust_crates:libc",
- "//third_party/rust_crates:libm",
- "//third_party/rust_crates:log",
- "//third_party/rust_crates:ring",
- ]
- non_rust_deps = [ "//third_party/boringssl" ]
-
- sources = [
- "src/lib.rs",
- "src/octets.rs",
- "src/h3/qpack/encoder.rs",
- "src/ranges.rs",
- "src/rand.rs",
- "src/h3/qpack/mod.rs",
- "src/h3/frame.rs",
- "src/h3/qpack/decoder.rs",
- "src/stream.rs",
- "src/crypto.rs",
- "src/h3/mod.rs",
- "src/dgram.rs",
- "src/recovery/mod.rs",
- "src/tls.rs",
- "src/recovery/prr.rs",
- "src/minmax.rs",
- "src/packet.rs",
- "src/recovery/delivery_rate.rs",
- "src/h3/qpack/huffman/mod.rs",
- "src/recovery/reno.rs",
- "src/h3/qpack/huffman/table.rs",
- "src/h3/stream.rs",
- "src/h3/qpack/static_table.rs",
- "src/recovery/cubic.rs",
- "src/frame.rs",
- "src/recovery/hystart.rs",
- ]
+group("quiche") {
+ public_deps = [ "quiche:quiche"]
}
diff --git a/Cargo.toml b/Cargo.toml
index 470dbce..86845c0 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,75 +1,9 @@
-[package]
-name = "quiche"
-version = "0.10.0"
-authors = ["Alessandro Ghedini <alessandro@ghedini.me>"]
-edition = "2018"
-build = "src/build.rs"
-description = "🥧 Savoury implementation of the QUIC transport protocol and HTTP/3"
-repository = "https://github.com/cloudflare/quiche"
-readme = "README.md"
-keywords = ["quic", "http3"]
-categories = ["network-programming"]
-license = "BSD-2-Clause"
-include = [
- "/*.md",
- "/*.toml",
- "/CODEOWNERS",
- "/COPYING",
- "/benches",
- "/deps/boringssl/**/*.[chS]",
- "/deps/boringssl/**/*.asm",
- "/deps/boringssl/src/**/*.cc",
- "/deps/boringssl/**/CMakeLists.txt",
- "/deps/boringssl/**/sources.cmake",
- "/deps/boringssl/LICENSE",
- "/examples",
- "/include",
- "/quiche.svg",
- "/src",
-]
-
-[features]
-default = ["boringssl-vendored"]
-
-# Build vendored BoringSSL library.
-boringssl-vendored = []
-
-# Generate pkg-config metadata file for libquiche.
-pkg-config-meta = []
-
-# Equivalent to "--cfg fuzzing", but can also be checked in build.rs.
-fuzzing = []
-
-# Expose the FFI API.
-ffi = []
-
-[package.metadata.docs.rs]
-no-default-features = true
-
-[build-dependencies]
-cmake = "0.1"
-
-[dependencies]
-log = { version = "0.4", features = ["std"] }
-libc = "0.2"
-libm = "0.2"
-ring = "0.16"
-lazy_static = "1"
-boring-sys = { version = "1.0.2", optional = true }
-qlog = { version = "0.5", path = "tools/qlog", optional = true }
-
-[target."cfg(windows)".dependencies]
-winapi = { version = "0.3", features = ["wincrypt"] }
-
-[dev-dependencies]
-mio = "0.6"
-url = "1"
+[workspace]
+members = [ "quiche", "qlog", "apps" ]
+exclude = [ "fuzz", "tools/http3_test" ]
[profile.bench]
debug = true
[profile.release]
debug = true
-
-[lib]
-crate-type = ["lib", "staticlib", "cdylib"]
diff --git a/Dockerfile b/Dockerfile
index 9650ba3..44998bf 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -2,15 +2,14 @@
WORKDIR /build
-COPY deps/ ./deps/
-COPY src/ ./src/
-COPY tools/ ./tools/
-COPY Cargo.toml .
+COPY quiche/ ./quiche/
+COPY qlog/ ./qlog/
+COPY apps/ ./apps/
RUN apt-get update && apt-get install -y cmake && \
rm -rf /var/lib/apt/lists/*
-RUN cargo build --manifest-path tools/apps/Cargo.toml
+RUN cargo build --manifest-path apps/Cargo.toml
##
## quiche-base: quiche image for apps
@@ -21,8 +20,8 @@
rm -rf /var/lib/apt/lists/*
COPY --from=build \
- /build/tools/apps/target/debug/quiche-client \
- /build/tools/apps/target/debug/quiche-server \
+ /build/apps/target/debug/quiche-client \
+ /build/apps/target/debug/quiche-server \
/usr/local/bin/
ENV PATH="/usr/local/bin/:${PATH}"
@@ -38,9 +37,9 @@
WORKDIR /quiche
COPY --from=build \
- /build/tools/apps/target/debug/quiche-client \
- /build/tools/apps/target/debug/quiche-server \
- /build/tools/qns/run_endpoint.sh \
+ /build/apps/target/debug/quiche-client \
+ /build/apps/target/debug/quiche-server \
+ /build/apps/run_endpoint.sh \
./
ENV RUST_LOG=trace
diff --git a/Makefile b/Makefile
index cc67ac1..781fbc0 100644
--- a/Makefile
+++ b/Makefile
@@ -14,7 +14,7 @@
# build quiche-apps only
.PHONY: build-apps
build-apps:
- cargo build --manifest-path tools/apps/Cargo.toml
+ cargo build --package=quiche_apps
# build base image
.PHONY: docker-base
@@ -23,7 +23,7 @@
# build qns image
.PHONY: docker-qns
-docker-qns: Dockerfile tools/qns/run_endpoint.sh
+docker-qns: Dockerfile apps/run_endpoint.sh
$(DOCKER) build --target quiche-qns -t $(QNS_REPO):$(QNS_TAG) .
.PHONY: docker-publish
diff --git a/README.md b/README.md
index efc0985..85dd157 100644
--- a/README.md
+++ b/README.md
@@ -32,12 +32,11 @@
### NGINX (unofficial)
-quiche can be [integrated into NGINX][nginx-http3] using an unofficial patch to
+quiche can be [integrated into NGINX](nginx/) using an unofficial patch to
provide support for HTTP/3.
[cloudflare-http3]: https://blog.cloudflare.com/http3-the-past-present-and-future/
[curl-http3]: https://github.com/curl/curl/blob/master/docs/HTTP3.md#quiche-version
-[nginx-http3]: https://github.com/cloudflare/quiche/tree/master/extras/nginx
Getting Started
---------------
@@ -45,20 +44,18 @@
### Command-line apps
Before diving into the quiche API, here are a few examples on how to use the
-quiche tools provided as part of the [quiche-apps](tools/apps/) crate.
+quiche tools provided as part of the [quiche-apps](apps/) crate.
After cloning the project according to the command mentioned in the [building](#building) section, the client can be run as follows:
```bash
- $ cargo run --manifest-path=tools/apps/Cargo.toml --bin quiche-client -- https://cloudflare-quic.com/
+ $ cargo run --bin quiche-client -- https://cloudflare-quic.com/
```
while the server can be run as follows:
```bash
- $ cargo run --manifest-path=tools/apps/Cargo.toml --bin quiche-server -- \
- --cert tools/apps/src/bin/cert.crt \
- --key tools/apps/src/bin/cert.key
+ $ cargo run --bin quiche-server -- --cert apps/src/bin/cert.crt --key apps/src/bin/cert.key
```
(note that the certificate provided is self-signed and should not be used in
@@ -223,11 +220,11 @@
[`stream_recv()`]: https://docs.quic.tech/quiche/struct.Connection.html#method.stream_recv
[HTTP/3 module]: https://docs.quic.tech/quiche/h3/index.html
-Have a look at the [examples/] directory for more complete examples on how to use
-the quiche API, including examples on how to use quiche in C/C++ applications
-(see below for more information).
+Have a look at the [quiche/examples/] directory for more complete examples on
+how to use the quiche API, including examples on how to use quiche in C/C++
+applications (see below for more information).
-[examples/]: examples/
+[examples/]: quiche/examples/
Calling quiche from C/C++
-------------------------
@@ -244,7 +241,7 @@
Note that in order to enable the FFI API, the ``ffi`` feature must be enabled (it
is disabled by default), by passing ``--features ffi`` to ``cargo``.
-[thin C API]: https://github.com/cloudflare/quiche/blob/master/include/quiche.h
+[thin C API]: https://github.com/cloudflare/quiche/blob/master/quiche/include/quiche.h
Building
--------
diff --git a/tools/apps/Cargo.toml b/apps/Cargo.toml
similarity index 89%
rename from tools/apps/Cargo.toml
rename to apps/Cargo.toml
index 9205197..4b80418 100644
--- a/tools/apps/Cargo.toml
+++ b/apps/Cargo.toml
@@ -24,10 +24,7 @@
url = "1"
log = "0.4"
ring = "0.16"
-quiche = { path = "../../" }
-
-[profile.release]
-debug = true
+quiche = { path = "../quiche" }
[lib]
crate-type = ["lib"]
diff --git a/tools/qns/run_endpoint.sh b/apps/run_endpoint.sh
similarity index 100%
rename from tools/qns/run_endpoint.sh
rename to apps/run_endpoint.sh
diff --git a/tools/apps/src/args.rs b/apps/src/args.rs
similarity index 92%
rename from tools/apps/src/args.rs
rename to apps/src/args.rs
index e1f52de..1b780dc 100644
--- a/tools/apps/src/args.rs
+++ b/apps/src/args.rs
@@ -34,7 +34,9 @@
pub struct CommonArgs {
pub alpns: Vec<u8>,
pub max_data: u64,
+ pub max_window: u64,
pub max_stream_data: u64,
+ pub max_stream_window: u64,
pub max_streams_bidi: u64,
pub max_streams_uni: u64,
pub idle_timeout: u64,
@@ -54,7 +56,9 @@
///
/// --http-version VERSION HTTP version to use.
/// --max-data BYTES Connection-wide flow control limit.
+/// --max-window BYTES Connection-wide max receiver window.
/// --max-stream-data BYTES Per-stream flow control limit.
+/// --max-stream-window BYTES Per-stream max receiver window.
/// --max-streams-bidi STREAMS Number of allowed concurrent streams.
/// --max-streams-uni STREAMS Number of allowed concurrent streams.
/// --dump-packets PATH Dump the incoming packets in PATH.
@@ -63,7 +67,7 @@
/// --disable-hystart Disable HyStart++.
/// --dgram-proto PROTO DATAGRAM application protocol.
/// --dgram-count COUNT Number of DATAGRAMs to send.
-/// --dgram-data DATA DATAGRAM data to send.
+/// --dgram-data DATA DATAGRAM data to send.
///
/// [`Docopt`]: https://docs.rs/docopt/1.1.0/docopt/
impl Args for CommonArgs {
@@ -107,9 +111,15 @@
let max_data = args.get_str("--max-data");
let max_data = max_data.parse::<u64>().unwrap();
+ let max_window = args.get_str("--max-window");
+ let max_window = max_window.parse::<u64>().unwrap();
+
let max_stream_data = args.get_str("--max-stream-data");
let max_stream_data = max_stream_data.parse::<u64>().unwrap();
+ let max_stream_window = args.get_str("--max-stream-window");
+ let max_stream_window = max_stream_window.parse::<u64>().unwrap();
+
let max_streams_bidi = args.get_str("--max-streams-bidi");
let max_streams_bidi = max_streams_bidi.parse::<u64>().unwrap();
@@ -136,7 +146,9 @@
CommonArgs {
alpns,
max_data,
+ max_window,
max_stream_data,
+ max_stream_window,
max_streams_bidi,
max_streams_uni,
idle_timeout,
@@ -157,7 +169,9 @@
CommonArgs {
alpns: alpns::length_prefixed(&alpns::HTTP_3),
max_data: 10000000,
+ max_window: 25165824,
max_stream_data: 1000000,
+ max_stream_window: 16777216,
max_streams_bidi: 100,
max_streams_uni: 100,
idle_timeout: 30000,
@@ -181,7 +195,9 @@
--method METHOD Use the given HTTP request method [default: GET].
--body FILE Send the given file as request body.
--max-data BYTES Connection-wide flow control limit [default: 10000000].
+ --max-window BYTES Connection-wide max receiver window [default: 25165824].
--max-stream-data BYTES Per-stream flow control limit [default: 1000000].
+ --max-stream-window BYTES Per-stream max receiver window [default: 16777216].
--max-streams-bidi STREAMS Number of allowed concurrent streams [default: 100].
--max-streams-uni STREAMS Number of allowed concurrent streams [default: 100].
--idle-timeout TIMEOUT Idle timeout in milliseconds [default: 30000].
@@ -328,10 +344,12 @@
--index <name> The file that will be used as index [default: index.html].
--name <str> Name of the server [default: quic.tech]
--max-data BYTES Connection-wide flow control limit [default: 10000000].
+ --max-window BYTES Connection-wide max receiver window [default: 25165824].
--max-stream-data BYTES Per-stream flow control limit [default: 1000000].
+ --max-stream-window BYTES Per-stream max receiver window [default: 16777216].
--max-streams-bidi STREAMS Number of allowed concurrent streams [default: 100].
--max-streams-uni STREAMS Number of allowed concurrent streams [default: 100].
- --idle-timeout TIMEOUT Idle timeout in milliseconds [default: 30000].
+ --idle-timeout TIMEOUT Idle timeout in milliseconds [default: 30000].
--dump-packets PATH Dump the incoming packets as files in the given directory.
--early-data Enable receiving early data.
--no-retry Disable stateless retry.
diff --git a/tools/apps/src/bin/cert.crt b/apps/src/bin/cert.crt
similarity index 100%
rename from tools/apps/src/bin/cert.crt
rename to apps/src/bin/cert.crt
diff --git a/tools/apps/src/bin/cert.key b/apps/src/bin/cert.key
similarity index 100%
rename from tools/apps/src/bin/cert.key
rename to apps/src/bin/cert.key
diff --git a/tools/apps/src/bin/quiche-client.rs b/apps/src/bin/quiche-client.rs
similarity index 100%
rename from tools/apps/src/bin/quiche-client.rs
rename to apps/src/bin/quiche-client.rs
diff --git a/tools/apps/src/bin/quiche-server.rs b/apps/src/bin/quiche-server.rs
similarity index 99%
rename from tools/apps/src/bin/quiche-server.rs
rename to apps/src/bin/quiche-server.rs
index 0184725..20392e9 100644
--- a/tools/apps/src/bin/quiche-server.rs
+++ b/apps/src/bin/quiche-server.rs
@@ -97,6 +97,9 @@
config.set_initial_max_streams_uni(conn_args.max_streams_uni);
config.set_disable_active_migration(true);
+ config.set_max_connection_window(conn_args.max_window);
+ config.set_max_stream_window(conn_args.max_stream_window);
+
let mut keylog = None;
if let Some(keylog_path) = std::env::var_os("SSLKEYLOGFILE") {
diff --git a/tools/apps/src/client.rs b/apps/src/client.rs
similarity index 98%
rename from tools/apps/src/client.rs
rename to apps/src/client.rs
index b0bd5ad..affbc42 100644
--- a/tools/apps/src/client.rs
+++ b/apps/src/client.rs
@@ -109,6 +109,9 @@
config.set_initial_max_streams_uni(conn_args.max_streams_uni);
config.set_disable_active_migration(true);
+ config.set_max_connection_window(conn_args.max_window);
+ config.set_max_stream_window(conn_args.max_stream_window);
+
let mut keylog = None;
if let Some(keylog_path) = std::env::var_os("SSLKEYLOGFILE") {
diff --git a/tools/apps/src/common.rs b/apps/src/common.rs
similarity index 99%
rename from tools/apps/src/common.rs
rename to apps/src/common.rs
index 1e0156f..3cbf51b 100644
--- a/tools/apps/src/common.rs
+++ b/apps/src/common.rs
@@ -148,7 +148,7 @@
dir: &std::ffi::OsStr, role: &str, id: &str,
) -> std::io::BufWriter<std::fs::File> {
let mut path = std::path::PathBuf::from(dir);
- let filename = format!("{}-{}.qlog", role, id);
+ let filename = format!("{}-{}.sqlog", role, id);
path.push(filename);
match std::fs::File::create(&path) {
diff --git a/tools/apps/src/lib.rs b/apps/src/lib.rs
similarity index 100%
rename from tools/apps/src/lib.rs
rename to apps/src/lib.rs
diff --git a/deps/boringssl b/deps/boringssl
deleted file mode 160000
index f1c7534..0000000
--- a/deps/boringssl
+++ /dev/null
@@ -1 +0,0 @@
-Subproject commit f1c75347daa2ea81a941e953f2263e0a4d970c8d
diff --git a/examples/cert.crt b/examples/cert.crt
deleted file mode 100644
index 34d9640..0000000
--- a/examples/cert.crt
+++ /dev/null
@@ -1,18 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIC7TCCAdUCFDuGBhl3l5Z++VCLkvaav4yteBonMA0GCSqGSIb3DQEBCwUAMEUx
-CzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRl
-cm5ldCBXaWRnaXRzIFB0eSBMdGQwHhcNMjAwMzIzMTYwNzU0WhcNNDcwODA5MTYw
-NzU0WjAhMQswCQYDVQQGEwJHQjESMBAGA1UEAwwJcXVpYy50ZWNoMIIBIjANBgkq
-hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz5bOL7LD9kiIagcVrZqZ13ZcR0KhMuzs
-brqULbZKyqC+uBRgINxYJ7LPnJ4LPYuCt/nAaQ7CLXfKgzAMFu8eIK6UEvZA6+7b
-20E4rvOpPbTB/T4JbYZNQKyM9AEwr6j0P6vFgrWT7aBzhkmiqEe5vv/7ZOEGb+Ab
-+cvMeszfBbk93nyzKdNaUuh95x7/p0Ow315np2PRuoT0QQnA9zE/9eZ3Jah3cNZn
-NuQ6BDHlkegzTV5JhYYblRo/pmt2E9E0ha+NWsRLf3ZJUYhkYR3UqMltEKuLglCO
-VWBbPmKd4IZUNIotpKMVQSVb9agNBF49hH9iBhN3fBm7Hp8KBpjJLwIDAQABMA0G
-CSqGSIb3DQEBCwUAA4IBAQCo/Rn4spa5XFk0cCoKypP27DxePkGD9rQZk/CY4inV
-JV16anZ1pr9yfO61+m3fRKTZq7yxtHRDWxDdROHx9LqV1dXLAmh1ecV9Kn6/796O
-EHsOcVB0Lfi9Ili7//oUqlhGNploRuQbgWAXU+Eo1xJRWIXeedhzBSgEOMaQk3Zn
-TdYFhP0/Ao/fEdI4VULv1A43ztnZIB2KXWgUQoFT32woL47eWge8LxxVmmH3STtz
-nNcGnYxIorCQemDHDzMrvxRWgHxkpFGGqAhkFFyCmhKFPglKwt+yVTx26T8tShID
-ISMj0rgVMptmtWKJfzNCvFG52gsuO4w3yGdjgjRRrBDm
------END CERTIFICATE-----
diff --git a/examples/cert.key b/examples/cert.key
deleted file mode 100644
index 9e3c381..0000000
--- a/examples/cert.key
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDPls4vssP2SIhq
-BxWtmpnXdlxHQqEy7OxuupQttkrKoL64FGAg3Fgnss+cngs9i4K3+cBpDsItd8qD
-MAwW7x4grpQS9kDr7tvbQTiu86k9tMH9Pglthk1ArIz0ATCvqPQ/q8WCtZPtoHOG
-SaKoR7m+//tk4QZv4Bv5y8x6zN8FuT3efLMp01pS6H3nHv+nQ7DfXmenY9G6hPRB
-CcD3MT/15nclqHdw1mc25DoEMeWR6DNNXkmFhhuVGj+ma3YT0TSFr41axEt/dklR
-iGRhHdSoyW0Qq4uCUI5VYFs+Yp3ghlQ0ii2koxVBJVv1qA0EXj2Ef2IGE3d8Gbse
-nwoGmMkvAgMBAAECggEBAMtFkpUmablKgTnBwjqCvs47OlUVK6AgW8x5qwuwC0Cr
-ctXyLcc/vJry/1UPdVZIvDHGv+Cf8Qhw2r7nV49FiqzaBmki9aOR+3uRPB4kvr6L
-t8Fw8+5pqlAAJu3wFGqN+M44N2mswDPaAAWpKTu7MGmVY+f+aT03qG1MYOiGoISK
-gP6DHiinddD38spM2muyCUyFZk9a+aBEfaQzZoU3gc0yB6R/qBOWZ7NIoIUMicku
-Zf3L6/06uunyZp+ueR83j1YWbg3JoYKlGAuQtDRF709+MQrim8lKTnfuHiBeZKYZ
-GNLSo7lGjrp6ccSyfXmlA36hSfdlrWtZJ4+utZShftECgYEA+NNOFNa1BLfDw3ot
-a6L4W6FE45B32bLbnBdg8foyEYrwzHLPFCbws1Z60pNr7NaCHDIMiKVOXvKQa78d
-qdWuPUVJ83uVs9GI8tAo00RAvBn6ut9yaaLa8mIv6ZpfU20IgE5sDjB7IBY9tTVd
-EDyJcDuKQXzQ48qmEw86wINQMd0CgYEA1ZMdt7yLnpDiYa6M/BuKjp7PWKcRlzVM
-BcCEYHA4LJ6xEOH4y9DEx2y5ljwOcXgJhXAfAyGQr7s1xiP/nXurqfmdP8u7bawp
-VwuWJ8Vv0ZXITaU0isezG2Dpnseuion3qSraWlmWUlWLVVgKETZmk7cF7VIXa0NT
-LFREdObI5HsCgYBUbm8KRyi5Zxm4VNbgtTYM8ZYMmdLxPe2i85PjyAABT+IRncuC
-jQwT7n5Swc9XWBpiMuFp5J3JPgmfZgRMwsMS61YClqbfk3Qi4FtaBMjqiu43Rubt
-zWL56DNV0xoRlufRkcq8rdq5spJR0L+5aLFCMhHh0taW1QaxZPOMq4IkyQKBgQC3
-GetubGzewqPyzuz77ri5URm+jW0dT4ofnE9hRpRCXMK9EJ52TkOGHYZ2cIKJcTno
-dpl/27Tpk/ykJJSu9SnVDbVszkOf4OuIPty6uCAHdPxG5Q3ItTCulkVz5QmUqHf1
-RlHxB8FCUSilQFdRLmx+03h3X9vID+4soQoXlwxAJQKBgE5SQpN+TG5V+E4zHgNd
-6cy6gA5dGDJ0KbsgxJwlKTFA9nIcs2ssBxLY9U4x75EGuqpeVNmq6xwwmPtBs0rp
-M3W4zdFrZQ3BneFRW7WbSBbsUSprkJW/p4GXa17GzGUq/MDXlGhNlApP1nknzFvE
-xGaH0/H/TZxpLCogVP9npUkj
------END PRIVATE KEY-----
diff --git a/examples/rootca.crt b/examples/rootca.crt
deleted file mode 100644
index 719769c..0000000
--- a/examples/rootca.crt
+++ /dev/null
@@ -1,21 +0,0 @@
------BEGIN CERTIFICATE-----
-MIIDazCCAlOgAwIBAgIUAxoIpwJReHnJMSdGsRjjKRMdg/AwDQYJKoZIhvcNAQEL
-BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
-GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yMDAzMjMxNjA3NTRaFw00NzA4
-MDkxNjA3NTRaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
-HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
-AQUAA4IBDwAwggEKAoIBAQCzwqx42InprkvjNlkfJNHY/FKJam6VG2D25SBeW0cw
-1Il38xLA9YQYTiSFePfLBt4cLnK3Na+opqg/2A9PG0iY9tpj5w2TmPnvWD+4AN5Y
-+KFwT9mGgbWSJ3vl2r/H5KU7qqBmfXPGYMHhIFU0objRKc40qww/tUCa8j3G4a5l
-zcKc2LpGeeeKtcpExJSkscXKNlMCLTIXcDVuX+i43KCacvKBg+hwNML8Jwg6pE9Y
-kvxJbnl8IfApexHKSrP6Kie9BiB4tVvbjGmQaiGM3zQAbTuaPD+le1ZYGnoxjzn6
-+cWpcPWPNEmM+zVWavIQD5rLxNW4dA4FhczLfGf1Cra7AgMBAAGjUzBRMB0GA1Ud
-DgQWBBRQ+lQtDANNRd9cfskISijXoCSiiDAfBgNVHSMEGDAWgBRQ+lQtDANNRd9c
-fskISijXoCSiiDAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCK
-dfuwKWM8iou5wSNZIND433CiXpa24ZEIesurAJ6Y9QzQlbS/K6Kp/tM4gr/kqzfe
-i8dkUtCPKBCTDQ1nuQ4Wgf9hVnoN/uct7eYKoO7gJtySdP0TqHNDtDoHPMglHN04
-vjf4A1HOECbCjAt9PD46as65Tbjbs2wT6pdcYkHWLHLQ25I13yKK2bSNgXBYTbD0
-xQIF2nw8f+CnHk4Ho2+NFJ2gl8DKfELXevI11F2eoQIcJauLM4gFhAjHWwpRmU5W
-pE3qMq2LzzmDnaBli7vDGJcUnyk1upUS5vM9+RKZYjH8aVydBzXvmnkP+rFYwwIt
-GwgB/MplEB8BXUTaVYB+
------END CERTIFICATE-----
diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml
index 16c70fd..2ebec68 100644
--- a/fuzz/Cargo.toml
+++ b/fuzz/Cargo.toml
@@ -12,7 +12,7 @@
opt-level = 3
[dependencies]
-quiche = { path = "..", features = ["fuzzing"] }
+quiche = { path = "../quiche", features = ["fuzzing"] }
lazy_static = "1"
libfuzzer-sys = { git = "https://github.com/rust-fuzz/libfuzzer-sys.git" }
diff --git a/fuzz/src/packet_recv_server.rs b/fuzz/src/packet_recv_server.rs
index 7749ee7..692c071 100644
--- a/fuzz/src/packet_recv_server.rs
+++ b/fuzz/src/packet_recv_server.rs
@@ -13,9 +13,9 @@
lazy_static! {
static ref CONFIG: Mutex<quiche::Config> = {
let crt_path = std::env::var("QUICHE_FUZZ_CRT")
- .unwrap_or_else(|_| "examples/cert.crt".to_string());
+ .unwrap_or_else(|_| "fuzz/cert.crt".to_string());
let key_path = std::env::var("QUICHE_FUZZ_KEY")
- .unwrap_or_else(|_| "examples/cert.key".to_string());
+ .unwrap_or_else(|_| "fuzz/cert.key".to_string());
let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
config.load_cert_chain_from_pem_file(&crt_path).unwrap();
diff --git a/extras/nginx/README.md b/nginx/README.md
similarity index 97%
rename from extras/nginx/README.md
rename to nginx/README.md
index 56da61a..aa6bc3a 100644
--- a/extras/nginx/README.md
+++ b/nginx/README.md
@@ -32,7 +32,7 @@
Next you’ll need to apply the patch to NGINX:
```
% cd nginx-1.16.1
- % patch -p01 < ../quiche/extras/nginx/nginx-1.16.patch
+ % patch -p01 < ../quiche/nginx/nginx-1.16.patch
```
And finally build NGINX with HTTP/3 support enabled:
@@ -43,7 +43,7 @@
--with-http_ssl_module \
--with-http_v2_module \
--with-http_v3_module \
- --with-openssl=../quiche/deps/boringssl \
+ --with-openssl=../quiche/quiche/deps/boringssl \
--with-quiche=../quiche
% make
```
diff --git a/extras/nginx/nginx-1.16.patch b/nginx/nginx-1.16.patch
similarity index 99%
rename from extras/nginx/nginx-1.16.patch
rename to nginx/nginx-1.16.patch
index 0b6d27f..96bb463 100644
--- a/extras/nginx/nginx-1.16.patch
+++ b/nginx/nginx-1.16.patch
@@ -116,7 +116,7 @@
+ QUICHE_BUILD_TARGET="debug"
+ fi
+
-+ CORE_INCS="$CORE_INCS $QUICHE/include"
++ CORE_INCS="$CORE_INCS $QUICHE/quiche/include"
+ CORE_DEPS="$CORE_DEPS $QUICHE/target/$QUICHE_BUILD_TARGET/libquiche.a"
+ CORE_LIBS="$CORE_LIBS $QUICHE/target/$QUICHE_BUILD_TARGET/libquiche.a $NGX_LIBPTHREAD -lm"
+
@@ -134,7 +134,7 @@
+
+# Copyright (C) Cloudflare, Inc.
+
-+QUICHE_COMMON_FLAGS="--verbose --no-default-features --features ffi"
++QUICHE_COMMON_FLAGS="--package quiche --verbose --no-default-features --features ffi"
+
+# Default is release build
+QUICHE_BUILD_FLAGS="$QUICHE_COMMON_FLAGS --release"
diff --git a/tools/qlog/Cargo.toml b/qlog/Cargo.toml
similarity index 96%
rename from tools/qlog/Cargo.toml
rename to qlog/Cargo.toml
index 2e47694..d143c6f 100644
--- a/tools/qlog/Cargo.toml
+++ b/qlog/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "qlog"
-version = "0.5.0"
+version = "0.6.0"
authors = ["Lucas Pardue <lucaspardue.24.7@gmail.com>"]
edition = "2018"
description = "qlog data model for QUIC and HTTP/3"
diff --git a/qlog/README.md b/qlog/README.md
new file mode 100644
index 0000000..71820ec
--- /dev/null
+++ b/qlog/README.md
@@ -0,0 +1,316 @@
+The qlog crate is an implementation of the qlog [main logging schema],
+[QUIC event definitions], and [HTTP/3 and QPACK event definitions].
+The crate provides a qlog data model that can be used for traces with
+events. It supports serialization and deserialization but defers logging IO
+choices to applications.
+
+The crate uses Serde for conversion between Rust and JSON.
+
+[main logging schema]: https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema
+[QUIC event definitions]:
+https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-quic-events.html
+[HTTP/3 and QPACK event definitions]:
+https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-h3-events.html
+
+Overview
+--------
+qlog is a hierarchical logging format, with a rough structure of:
+
+* Log
+ * Trace(s)
+ * Event(s)
+
+In practice, a single QUIC connection maps to a single Trace file with one
+or more Events. Applications can decide whether to combine Traces from
+different connections into the same Log.
+
+## Buffered Traces with standard JSON
+
+A [`Trace`] is a single JSON object. It contains metadata such as the
+[`VantagePoint`] of capture and the [`Configuration`], and protocol event
+data in the [`Event`] array.
+
+JSON Traces allow applications to appends events to them before eventually
+being serialized as a complete JSON object.
+
+### Creating a Trace
+
+```rust
+let mut trace = qlog::Trace::new(
+ qlog::VantagePoint {
+ name: Some("Example client".to_string()),
+ ty: qlog::VantagePointType::Client,
+ flow: None,
+ },
+ Some("Example qlog trace".to_string()),
+ Some("Example qlog trace description".to_string()),
+ Some(qlog::Configuration {
+ time_offset: Some(0.0),
+ original_uris: None,
+ }),
+ None,
+);
+```
+
+### Adding events to a Trace
+
+Qlog `Event` objects are added to `qlog::Trace.events`.
+
+The following example demonstrates how to log a qlog QUIC `packet_sent` event
+containing a single Crypto frame. It constructs the necessary elements of the
+[`Event`], then appends it to the trace with [`push_event()`].
+
+```rust
+let scid = [0x7e, 0x37, 0xe4, 0xdc, 0xc6, 0x68, 0x2d, 0xa8];
+let dcid = [0x36, 0xce, 0x10, 0x4e, 0xee, 0x50, 0x10, 0x1c];
+
+let pkt_hdr = qlog::events::quic::PacketHeader::new(
+ qlog::events::quic::PacketType::Initial,
+ 0, // packet_number
+ None, // flags
+ None, // token
+ None, // length
+ Some(0x00000001), // version
+ Some(&scid),
+ Some(&dcid),
+);
+
+let frames = vec![qlog::events::quic::QuicFrame::Crypto {
+ offset: 0,
+ length: 0,
+}];
+
+let raw = qlog::events::RawInfo {
+ length: Some(1251),
+ payload_length: Some(1224),
+ data: None,
+};
+
+let event_data =
+ qlog::events::EventData::PacketSent(qlog::events::quic::PacketSent {
+ header: pkt_hdr,
+ frames: Some(frames),
+ is_coalesced: None,
+ retry_token: None,
+ stateless_reset_token: None,
+ supported_versions: None,
+ raw: Some(raw),
+ datagram_id: None,
+ });
+
+trace.push_event(qlog::events::Event::with_time(0.0, event_data));
+```
+
+### Serializing
+
+The qlog crate has only been tested with `serde_json`, however other serializer
+targets might work.
+
+For example, serializing the trace created above:
+
+```rust
+serde_json::to_string_pretty(&trace).unwrap();
+```
+
+would generate the following:
+
+```
+{
+ "vantage_point": {
+ "name": "Example client",
+ "type": "client"
+ },
+ "title": "Example qlog trace",
+ "description": "Example qlog trace description",
+ "configuration": {
+ "time_offset": 0.0
+ },
+ "events": [
+ {
+ "time": 0.0,
+ "name": "transport:packet_sent",
+ "data": {
+ "header": {
+ "packet_type": "initial",
+ "packet_number": 0,
+ "version": "1",
+ "scil": 8,
+ "dcil": 8,
+ "scid": "7e37e4dcc6682da8",
+ "dcid": "36ce104eee50101c"
+ },
+ "raw": {
+ "length": 1251,
+ "payload_length": 1224
+ },
+ "frames": [
+ {
+ "frame_type": "crypto",
+ "offset": 0,
+ "length": 0
+ }
+ ]
+ }
+ }
+ ]
+}
+```
+
+## Streaming Traces JSON Text Sequences (JSON-SEQ)
+
+To help support streaming serialization of qlogs,
+draft-ietf-quic-qlog-main-schema-01 introduced support for RFC 7464 JSON
+Text Sequences (JSON-SEQ). The qlog crate supports this format and provides
+utilities that aid streaming.
+
+A [`TraceSeq`] contains metadata such as the [`VantagePoint`] of capture and
+the [`Configuration`]. However, protocol event data is handled as separate
+lines containing a record separator character, a serialized [`Event`], and a
+newline.
+
+### Creating a TraceSeq
+
+``` rust
+let mut trace = qlog::TraceSeq::new(
+ qlog::VantagePoint {
+ name: Some("Example client".to_string()),
+ ty: qlog::VantagePointType::Client,
+ flow: None,
+ },
+ Some("Example qlog trace".to_string()),
+ Some("Example qlog trace description".to_string()),
+ Some(qlog::Configuration {
+ time_offset: Some(0.0),
+ original_uris: None,
+ }),
+ None,
+);
+```
+
+Create an object with the [`Write`] trait:
+```
+let mut file = std::fs::File::create("foo.sqlog").unwrap();
+```
+
+Create a [`QlogStreamer`] and start serialization to foo.sqlog
+using [`start_log()`]:
+
+```rust
+let mut streamer = qlog::QlogStreamer::new(
+ qlog::QLOG_VERSION.to_string(),
+ Some("Example qlog".to_string()),
+ Some("Example qlog description".to_string()),
+ None,
+ std::time::Instant::now(),
+ trace,
+ qlog::EventImportance::Base,
+ Box::new(file),
+);
+
+streamer.start_log().ok();
+```
+
+### Adding simple events
+
+Once logging has started you can stream events. Simple events can be written in
+one step using [`add_event()`]:
+
+```rust
+let event_data = qlog::events::EventData::MetricsUpdated(
+ qlog::events::quic::MetricsUpdated {
+ min_rtt: Some(1.0),
+ smoothed_rtt: Some(1.0),
+ latest_rtt: Some(1.0),
+ rtt_variance: Some(1.0),
+ pto_count: Some(1),
+ congestion_window: Some(1234),
+ bytes_in_flight: Some(5678),
+ ssthresh: None,
+ packets_in_flight: None,
+ pacing_rate: None,
+ },
+);
+
+let event = qlog::events::Event::with_time(0.0, event_data);
+streamer.add_event(event).ok();
+```
+
+### Adding events with frames
+Some events contain optional arrays of QUIC frames. If the event has
+`Some(Vec<QuicFrame>)`, even if it is empty, the streamer enters a frame
+serializing mode that must be finalized before other events can be logged.
+
+In this example, a `PacketSent` event is created with an empty frame array and
+frames are written out later:
+
+```rust
+let scid = [0x7e, 0x37, 0xe4, 0xdc, 0xc6, 0x68, 0x2d, 0xa8];
+let dcid = [0x36, 0xce, 0x10, 0x4e, 0xee, 0x50, 0x10, 0x1c];
+
+let pkt_hdr = qlog::events::quic::PacketHeader::with_type(
+ qlog::events::quic::PacketType::OneRtt,
+ 0,
+ Some(0x00000001),
+ Some(&scid),
+ Some(&dcid),
+);
+
+let event_data =
+ qlog::events::EventData::PacketSent(qlog::events::quic::PacketSent {
+ header: pkt_hdr,
+ frames: Some(vec![]),
+ is_coalesced: None,
+ retry_token: None,
+ stateless_reset_token: None,
+ supported_versions: None,
+ raw: None,
+ datagram_id: None,
+};
+
+let event = qlog::events::Event::with_time(0.0, event_data);
+
+streamer.add_event(event).ok();
+```
+
+In this example, the frames contained in the QUIC packet
+are PING and PADDING. Each frame is written using the
+[`add_frame()`] method. Frame writing is concluded with
+[`finish_frames()`].
+
+```rust
+let ping = qlog::events::quic::QuicFrame::Ping;
+let padding = qlog::events::quic::QuicFrame::Padding;
+
+streamer.add_frame(ping, false).ok();
+streamer.add_frame(padding, false).ok();
+
+streamer.finish_frames().ok();
+```
+
+Once all events have have been written, the log
+can be finalized with [`finish_log()`]:
+
+```rust
+streamer.finish_log().ok();
+```
+
+### Serializing
+
+Serialization to JSON occurs as methods on the [`QlogStreamer`]
+are called. No additional steps are required.
+
+[`Trace`]: struct.Trace.html
+[`TraceSeq`]: struct.TraceSeq.html
+[`VantagePoint`]: struct.VantagePoint.html
+[`Configuration`]: struct.Configuration.html
+[`qlog::Trace.events`]: struct.Trace.html#structfield.events
+[`push_event()`]: struct.Trace.html#method.push_event
+[`packet_sent_min()`]: event/struct.Event.html#method.packet_sent_min
+[`QuicFrame::crypto()`]: enum.QuicFrame.html#variant.Crypto
+[`QlogStreamer`]: struct.QlogStreamer.html
+[`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
+[`start_log()`]: struct.QlogStreamer.html#method.start_log
+[`add_event()`]: struct.QlogStreamer.html#method.add_event
+[`add_frame()`]: struct.QlogStreamer.html#method.add_frame
+[`finish_frames()`]: struct.QlogStreamer.html#method.finish_frames
+[`finish_log()`]: struct.QlogStreamer.html#method.finish_log
\ No newline at end of file
diff --git a/qlog/src/events/connectivity.rs b/qlog/src/events/connectivity.rs
new file mode 100644
index 0000000..1885ab0
--- /dev/null
+++ b/qlog/src/events/connectivity.rs
@@ -0,0 +1,122 @@
+// Copyright (C) 2021, Cloudflare, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+use serde::Deserialize;
+use serde::Serialize;
+
+use super::ApplicationErrorCode;
+use super::Bytes;
+use super::ConnectionErrorCode;
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum TransportOwner {
+ Local,
+ Remote,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum ConnectionState {
+ Attempted,
+ Reset,
+ Handshake,
+ Active,
+ Keepalive,
+ Draining,
+ Closed,
+}
+
+#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum ConnectivityEventType {
+ ServerListening,
+ ConnectionStarted,
+ ConnectionClosed,
+ ConnectionIdUpdated,
+ SpinBitUpdated,
+ ConnectionStateUpdated,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct ServerListening {
+ ip_v4: Option<String>, // human-readable or bytes
+ ip_v6: Option<String>, // human-readable or bytes
+ port_v4: u32,
+ port_v6: u32,
+
+ retry_required: Option<bool>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct ConnectionStarted {
+ ip_version: String, // "v4" or "v6"
+ src_ip: String, // human-readable or bytes
+ dst_ip: String, // human-readable or bytes
+
+ protocol: Option<String>,
+ src_port: u32,
+ dst_port: u32,
+
+ src_cid: Option<Bytes>,
+ dst_cid: Option<Bytes>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct ConnectionClosed {
+ owner: Option<TransportOwner>,
+
+ connection_code: Option<ConnectionErrorCode>,
+ application_code: Option<ApplicationErrorCode>,
+ internal_code: Option<u32>,
+
+ reason: Option<String>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct ConnectionIdUpdated {
+ owner: Option<TransportOwner>,
+
+ old: Option<Bytes>,
+ new: Option<Bytes>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct SpinBitUpdated {
+ state: bool,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct ConnectionStateUpdated {
+ old: Option<ConnectionState>,
+ new: ConnectionState,
+}
diff --git a/qlog/src/events/h3.rs b/qlog/src/events/h3.rs
new file mode 100644
index 0000000..c94185c
--- /dev/null
+++ b/qlog/src/events/h3.rs
@@ -0,0 +1,304 @@
+// Copyright (C) 2021, Cloudflare, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+use serde::Deserialize;
+use serde::Serialize;
+
+use super::RawInfo;
+use crate::Bytes;
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum H3Owner {
+ Local,
+ Remote,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum H3StreamType {
+ Data,
+ Control,
+ Push,
+ Reserved,
+ QpackEncode,
+ QpackDecode,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum H3PushDecision {
+ Claimed,
+ Abandoned,
+}
+
+#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum Http3EventType {
+ ParametersSet,
+ ParametersRestored,
+ StreamTypeSet,
+ FrameCreated,
+ FrameParsed,
+ PushResolved,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum ApplicationError {
+ HttpNoError,
+ HttpGeneralProtocolError,
+ HttpInternalError,
+ HttpRequestCancelled,
+ HttpIncompleteRequest,
+ HttpConnectError,
+ HttpFrameError,
+ HttpExcessiveLoad,
+ HttpVersionFallback,
+ HttpIdError,
+ HttpStreamCreationError,
+ HttpClosedCriticalStream,
+ HttpEarlyResponse,
+ HttpMissingSettings,
+ HttpUnexpectedFrame,
+ HttpRequestRejection,
+ HttpSettingsError,
+ Unknown,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct HttpHeader {
+ pub name: String,
+ pub value: String,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct Setting {
+ pub name: String,
+ pub value: String,
+}
+
+// ================================================================== //
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum Http3FrameTypeName {
+ Data,
+ Headers,
+ CancelPush,
+ Settings,
+ PushPromise,
+ Goaway,
+ MaxPushId,
+ DuplicatePush,
+ Reserved,
+ Unknown,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub enum Http3Frame {
+ Data {
+ frame_type: Http3FrameTypeName,
+
+ raw: Option<Bytes>,
+ },
+
+ Headers {
+ frame_type: Http3FrameTypeName,
+ headers: Vec<HttpHeader>,
+ },
+
+ CancelPush {
+ frame_type: Http3FrameTypeName,
+ push_id: String,
+ },
+
+ Settings {
+ frame_type: Http3FrameTypeName,
+ settings: Vec<Setting>,
+ },
+
+ PushPromise {
+ frame_type: Http3FrameTypeName,
+ push_id: String,
+ headers: Vec<HttpHeader>,
+ },
+
+ Goaway {
+ frame_type: Http3FrameTypeName,
+ stream_id: String,
+ },
+
+ MaxPushId {
+ frame_type: Http3FrameTypeName,
+ push_id: String,
+ },
+
+ DuplicatePush {
+ frame_type: Http3FrameTypeName,
+ push_id: String,
+ },
+
+ Reserved {
+ frame_type: Http3FrameTypeName,
+ },
+
+ Unknown {
+ frame_type: Http3FrameTypeName,
+ },
+}
+
+impl Http3Frame {
+ pub fn data(raw: Option<Bytes>) -> Self {
+ Http3Frame::Data {
+ frame_type: Http3FrameTypeName::Data,
+ raw,
+ }
+ }
+
+ pub fn headers(headers: Vec<HttpHeader>) -> Self {
+ Http3Frame::Headers {
+ frame_type: Http3FrameTypeName::Headers,
+ headers,
+ }
+ }
+
+ pub fn cancel_push(push_id: String) -> Self {
+ Http3Frame::CancelPush {
+ frame_type: Http3FrameTypeName::CancelPush,
+ push_id,
+ }
+ }
+
+ pub fn settings(settings: Vec<Setting>) -> Self {
+ Http3Frame::Settings {
+ frame_type: Http3FrameTypeName::Settings,
+ settings,
+ }
+ }
+
+ pub fn push_promise(push_id: String, headers: Vec<HttpHeader>) -> Self {
+ Http3Frame::PushPromise {
+ frame_type: Http3FrameTypeName::PushPromise,
+ push_id,
+ headers,
+ }
+ }
+
+ pub fn goaway(stream_id: String) -> Self {
+ Http3Frame::Goaway {
+ frame_type: Http3FrameTypeName::Goaway,
+ stream_id,
+ }
+ }
+
+ pub fn max_push_id(push_id: String) -> Self {
+ Http3Frame::MaxPushId {
+ frame_type: Http3FrameTypeName::MaxPushId,
+ push_id,
+ }
+ }
+
+ pub fn duplicate_push(push_id: String) -> Self {
+ Http3Frame::DuplicatePush {
+ frame_type: Http3FrameTypeName::DuplicatePush,
+ push_id,
+ }
+ }
+
+ pub fn reserved() -> Self {
+ Http3Frame::Reserved {
+ frame_type: Http3FrameTypeName::Reserved,
+ }
+ }
+
+ pub fn unknown() -> Self {
+ Http3Frame::Unknown {
+ frame_type: Http3FrameTypeName::Unknown,
+ }
+ }
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct H3ParametersSet {
+ pub owner: Option<H3Owner>,
+
+ pub max_header_list_size: Option<u64>,
+ pub max_table_capacity: Option<u64>,
+ pub blocked_streams_count: Option<u64>,
+
+ // qlog-defined
+ pub waits_for_settings: Option<bool>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct H3ParametersRestored {
+ pub max_header_list_size: Option<u64>,
+ pub max_table_capacity: Option<u64>,
+ pub blocked_streams_count: Option<u64>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct H3StreamTypeSet {
+ pub stream_id: u64,
+ pub owner: Option<H3Owner>,
+
+ pub old: Option<H3StreamType>,
+ pub new: H3StreamType,
+
+ pub associated_push_id: Option<u64>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct H3FrameCreated {
+ pub stream_id: u64,
+ pub length: Option<u64>,
+ pub frame: Http3Frame,
+
+ pub raw: Option<RawInfo>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct H3FrameParsed {
+ pub stream_id: u64,
+ pub length: Option<u64>,
+ pub frame: Http3Frame,
+
+ pub raw: Option<RawInfo>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct H3PushResolved {
+ push_id: Option<u64>,
+ stream_id: Option<u64>,
+
+ decision: Option<H3PushDecision>,
+}
diff --git a/qlog/src/events/mod.rs b/qlog/src/events/mod.rs
new file mode 100644
index 0000000..7d84ad2
--- /dev/null
+++ b/qlog/src/events/mod.rs
@@ -0,0 +1,691 @@
+// Copyright (C) 2021, Cloudflare, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+use crate::Bytes;
+use crate::Token;
+use h3::*;
+use qpack::*;
+use quic::*;
+
+use connectivity::ConnectivityEventType;
+
+use serde::Deserialize;
+use serde::Serialize;
+
+#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
+#[serde(untagged)]
+pub enum EventType {
+ ConnectivityEventType(ConnectivityEventType),
+
+ TransportEventType(TransportEventType),
+
+ SecurityEventType(SecurityEventType),
+
+ RecoveryEventType(RecoveryEventType),
+
+ Http3EventType(Http3EventType),
+
+ QpackEventType(QpackEventType),
+
+ GenericEventType(GenericEventType),
+
+ None,
+}
+
+impl Default for EventType {
+ fn default() -> Self {
+ EventType::None
+ }
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub enum TimeFormat {
+ Absolute,
+ Delta,
+ Relative,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct Event {
+ pub time: f32,
+
+ // Strictly, the qlog 02 spec says we should have a name field in the
+ // `Event` structure. However, serde's autogenerated Deserialize code
+ // struggles to read Events properly because the `EventData` types often
+ // alias. In order to work around that, we use can use a trick that will
+ // give serde autogen all the information that it needs while also produced
+ // a legal qlog. Specifically, strongly linking an EventData enum variant
+ // with the wire-format name.
+ //
+ // The trick is to use Adjacent Tagging
+ // (https://serde.rs/enum-representations.html#adjacently-tagged) with
+ // Struct flattening (https://serde.rs/attr-flatten.html). At a high level
+ // this first creates an `EventData` JSON object:
+ //
+ // {name: <enum variant name>, data: enum variant data }
+ //
+ // and then flattens those fields into the `Event` object.
+ #[serde(flatten)]
+ pub data: EventData,
+
+ pub protocol_type: Option<String>,
+ pub group_id: Option<String>,
+
+ pub time_format: Option<TimeFormat>,
+
+ #[serde(skip)]
+ ty: EventType,
+}
+
+impl Event {
+ /// Returns a new `Event` object with the provided time and data.
+ pub fn with_time(time: f32, data: EventData) -> Self {
+ let ty = EventType::from(&data);
+ Event {
+ time,
+ data,
+ protocol_type: Default::default(),
+ group_id: Default::default(),
+ time_format: Default::default(),
+ ty,
+ }
+ }
+
+ pub fn importance(&self) -> EventImportance {
+ self.ty.into()
+ }
+}
+
+impl PartialEq for Event {
+ // custom comparison to skip over the `ty` field
+ fn eq(&self, other: &Event) -> bool {
+ self.time == other.time &&
+ self.data == other.data &&
+ self.protocol_type == other.protocol_type &&
+ self.group_id == other.group_id &&
+ self.time_format == other.time_format
+ }
+}
+
+#[derive(Clone)]
+pub enum EventImportance {
+ Core,
+ Base,
+ Extra,
+}
+
+impl EventImportance {
+ /// Returns true if this importance level is included by `other`.
+ pub fn is_contained_in(&self, other: &EventImportance) -> bool {
+ match (other, self) {
+ (EventImportance::Core, EventImportance::Core) => true,
+
+ (EventImportance::Base, EventImportance::Core) |
+ (EventImportance::Base, EventImportance::Base) => true,
+
+ (EventImportance::Extra, EventImportance::Core) |
+ (EventImportance::Extra, EventImportance::Base) |
+ (EventImportance::Extra, EventImportance::Extra) => true,
+
+ (..) => false,
+ }
+ }
+}
+
+impl From<EventType> for EventImportance {
+ fn from(ty: EventType) -> Self {
+ match ty {
+ EventType::ConnectivityEventType(
+ ConnectivityEventType::ServerListening,
+ ) => EventImportance::Extra,
+ EventType::ConnectivityEventType(
+ ConnectivityEventType::ConnectionStarted,
+ ) => EventImportance::Base,
+ EventType::ConnectivityEventType(
+ ConnectivityEventType::ConnectionIdUpdated,
+ ) => EventImportance::Base,
+ EventType::ConnectivityEventType(
+ ConnectivityEventType::SpinBitUpdated,
+ ) => EventImportance::Base,
+ EventType::ConnectivityEventType(
+ ConnectivityEventType::ConnectionStateUpdated,
+ ) => EventImportance::Base,
+
+ EventType::SecurityEventType(SecurityEventType::KeyUpdated) =>
+ EventImportance::Base,
+ EventType::SecurityEventType(SecurityEventType::KeyRetired) =>
+ EventImportance::Base,
+
+ EventType::TransportEventType(TransportEventType::ParametersSet) =>
+ EventImportance::Core,
+ EventType::TransportEventType(
+ TransportEventType::DatagramsReceived,
+ ) => EventImportance::Extra,
+ EventType::TransportEventType(TransportEventType::DatagramsSent) =>
+ EventImportance::Extra,
+ EventType::TransportEventType(
+ TransportEventType::DatagramDropped,
+ ) => EventImportance::Extra,
+ EventType::TransportEventType(TransportEventType::PacketReceived) =>
+ EventImportance::Core,
+ EventType::TransportEventType(TransportEventType::PacketSent) =>
+ EventImportance::Core,
+ EventType::TransportEventType(TransportEventType::PacketDropped) =>
+ EventImportance::Base,
+ EventType::TransportEventType(TransportEventType::PacketBuffered) =>
+ EventImportance::Base,
+ EventType::TransportEventType(
+ TransportEventType::StreamStateUpdated,
+ ) => EventImportance::Base,
+ EventType::TransportEventType(
+ TransportEventType::FramesProcessed,
+ ) => EventImportance::Extra,
+ EventType::TransportEventType(TransportEventType::DataMoved) =>
+ EventImportance::Base,
+
+ EventType::RecoveryEventType(RecoveryEventType::ParametersSet) =>
+ EventImportance::Base,
+ EventType::RecoveryEventType(RecoveryEventType::MetricsUpdated) =>
+ EventImportance::Core,
+ EventType::RecoveryEventType(
+ RecoveryEventType::CongestionStateUpdated,
+ ) => EventImportance::Base,
+ EventType::RecoveryEventType(RecoveryEventType::LossTimerUpdated) =>
+ EventImportance::Extra,
+ EventType::RecoveryEventType(RecoveryEventType::PacketLost) =>
+ EventImportance::Core,
+ EventType::RecoveryEventType(
+ RecoveryEventType::MarkedForRetransmit,
+ ) => EventImportance::Extra,
+
+ EventType::Http3EventType(Http3EventType::ParametersSet) =>
+ EventImportance::Base,
+ EventType::Http3EventType(Http3EventType::StreamTypeSet) =>
+ EventImportance::Base,
+ EventType::Http3EventType(Http3EventType::FrameCreated) =>
+ EventImportance::Core,
+ EventType::Http3EventType(Http3EventType::FrameParsed) =>
+ EventImportance::Core,
+ EventType::Http3EventType(Http3EventType::PushResolved) =>
+ EventImportance::Extra,
+
+ EventType::QpackEventType(QpackEventType::StateUpdated) =>
+ EventImportance::Base,
+ EventType::QpackEventType(QpackEventType::StreamStateUpdated) =>
+ EventImportance::Base,
+ EventType::QpackEventType(QpackEventType::DynamicTableUpdated) =>
+ EventImportance::Extra,
+ EventType::QpackEventType(QpackEventType::HeadersEncoded) =>
+ EventImportance::Base,
+ EventType::QpackEventType(QpackEventType::HeadersDecoded) =>
+ EventImportance::Base,
+ EventType::QpackEventType(QpackEventType::InstructionCreated) =>
+ EventImportance::Base,
+ EventType::QpackEventType(QpackEventType::InstructionParsed) =>
+ EventImportance::Base,
+
+ _ => unimplemented!(),
+ }
+ }
+}
+
+#[derive(Serialize, Deserialize, Clone, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum EventCategory {
+ Connectivity,
+ Security,
+ Transport,
+ Recovery,
+ Http,
+ Qpack,
+
+ Error,
+ Warning,
+ Info,
+ Debug,
+ Verbose,
+ Simulation,
+}
+
+impl std::fmt::Display for EventCategory {
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ let v = match self {
+ EventCategory::Connectivity => "connectivity",
+ EventCategory::Security => "security",
+ EventCategory::Transport => "transport",
+ EventCategory::Recovery => "recovery",
+ EventCategory::Http => "http",
+ EventCategory::Qpack => "qpack",
+ EventCategory::Error => "error",
+ EventCategory::Warning => "warning",
+ EventCategory::Info => "info",
+ EventCategory::Debug => "debug",
+ EventCategory::Verbose => "verbose",
+ EventCategory::Simulation => "simulation",
+ };
+
+ write!(f, "{}", v)
+ }
+}
+
+impl From<EventType> for EventCategory {
+ fn from(ty: EventType) -> Self {
+ match ty {
+ EventType::ConnectivityEventType(_) => EventCategory::Connectivity,
+ EventType::SecurityEventType(_) => EventCategory::Security,
+ EventType::TransportEventType(_) => EventCategory::Transport,
+ EventType::RecoveryEventType(_) => EventCategory::Recovery,
+ EventType::Http3EventType(_) => EventCategory::Http,
+ EventType::QpackEventType(_) => EventCategory::Qpack,
+
+ _ => unimplemented!(),
+ }
+ }
+}
+
+impl From<&EventData> for EventType {
+ fn from(event_data: &EventData) -> Self {
+ match event_data {
+ EventData::ServerListening { .. } =>
+ EventType::ConnectivityEventType(
+ ConnectivityEventType::ServerListening,
+ ),
+ EventData::ConnectionStarted { .. } =>
+ EventType::ConnectivityEventType(
+ ConnectivityEventType::ConnectionStarted,
+ ),
+ EventData::ConnectionClosed { .. } =>
+ EventType::ConnectivityEventType(
+ ConnectivityEventType::ConnectionClosed,
+ ),
+ EventData::ConnectionIdUpdated { .. } =>
+ EventType::ConnectivityEventType(
+ ConnectivityEventType::ConnectionIdUpdated,
+ ),
+ EventData::SpinBitUpdated { .. } => EventType::ConnectivityEventType(
+ ConnectivityEventType::SpinBitUpdated,
+ ),
+ EventData::ConnectionStateUpdated { .. } =>
+ EventType::ConnectivityEventType(
+ ConnectivityEventType::ConnectionStateUpdated,
+ ),
+
+ EventData::KeyUpdated { .. } =>
+ EventType::SecurityEventType(SecurityEventType::KeyUpdated),
+ EventData::KeyRetired { .. } =>
+ EventType::SecurityEventType(SecurityEventType::KeyRetired),
+
+ EventData::VersionInformation { .. } =>
+ EventType::TransportEventType(
+ TransportEventType::VersionInformation,
+ ),
+ EventData::AlpnInformation { .. } =>
+ EventType::TransportEventType(TransportEventType::AlpnInformation),
+ EventData::TransportParametersSet { .. } =>
+ EventType::TransportEventType(TransportEventType::ParametersSet),
+ EventData::TransportParametersRestored { .. } =>
+ EventType::TransportEventType(
+ TransportEventType::ParametersRestored,
+ ),
+ EventData::DatagramsReceived { .. } => EventType::TransportEventType(
+ TransportEventType::DatagramsReceived,
+ ),
+ EventData::DatagramsSent { .. } =>
+ EventType::TransportEventType(TransportEventType::DatagramsSent),
+ EventData::DatagramDropped { .. } =>
+ EventType::TransportEventType(TransportEventType::DatagramDropped),
+ EventData::PacketReceived { .. } =>
+ EventType::TransportEventType(TransportEventType::PacketReceived),
+ EventData::PacketSent { .. } =>
+ EventType::TransportEventType(TransportEventType::PacketSent),
+ EventData::PacketDropped { .. } =>
+ EventType::TransportEventType(TransportEventType::PacketDropped),
+ EventData::PacketBuffered { .. } =>
+ EventType::TransportEventType(TransportEventType::PacketBuffered),
+ EventData::PacketsAcked { .. } =>
+ EventType::TransportEventType(TransportEventType::PacketsAcked),
+ EventData::StreamStateUpdated { .. } =>
+ EventType::TransportEventType(
+ TransportEventType::StreamStateUpdated,
+ ),
+ EventData::FramesProcessed { .. } =>
+ EventType::TransportEventType(TransportEventType::FramesProcessed),
+ EventData::DataMoved { .. } =>
+ EventType::TransportEventType(TransportEventType::DataMoved),
+
+ EventData::RecoveryParametersSet { .. } =>
+ EventType::RecoveryEventType(RecoveryEventType::ParametersSet),
+ EventData::MetricsUpdated { .. } =>
+ EventType::RecoveryEventType(RecoveryEventType::MetricsUpdated),
+ EventData::CongestionStateUpdated { .. } =>
+ EventType::RecoveryEventType(
+ RecoveryEventType::CongestionStateUpdated,
+ ),
+ EventData::LossTimerUpdated { .. } =>
+ EventType::RecoveryEventType(RecoveryEventType::LossTimerUpdated),
+ EventData::PacketLost { .. } =>
+ EventType::RecoveryEventType(RecoveryEventType::PacketLost),
+ EventData::MarkedForRetransmit { .. } =>
+ EventType::RecoveryEventType(
+ RecoveryEventType::MarkedForRetransmit,
+ ),
+
+ EventData::H3ParametersSet { .. } =>
+ EventType::Http3EventType(Http3EventType::ParametersSet),
+ EventData::H3ParametersRestored { .. } =>
+ EventType::Http3EventType(Http3EventType::ParametersRestored),
+ EventData::H3StreamTypeSet { .. } =>
+ EventType::Http3EventType(Http3EventType::StreamTypeSet),
+ EventData::H3FrameCreated { .. } =>
+ EventType::Http3EventType(Http3EventType::FrameCreated),
+ EventData::H3FrameParsed { .. } =>
+ EventType::Http3EventType(Http3EventType::FrameParsed),
+ EventData::H3PushResolved { .. } =>
+ EventType::Http3EventType(Http3EventType::PushResolved),
+
+ EventData::QpackStateUpdated { .. } =>
+ EventType::QpackEventType(QpackEventType::StateUpdated),
+ EventData::QpackStreamStateUpdated { .. } =>
+ EventType::QpackEventType(QpackEventType::StreamStateUpdated),
+ EventData::QpackDynamicTableUpdated { .. } =>
+ EventType::QpackEventType(QpackEventType::DynamicTableUpdated),
+ EventData::QpackHeadersEncoded { .. } =>
+ EventType::QpackEventType(QpackEventType::HeadersEncoded),
+ EventData::QpackHeadersDecoded { .. } =>
+ EventType::QpackEventType(QpackEventType::HeadersDecoded),
+ EventData::QpackInstructionCreated { .. } =>
+ EventType::QpackEventType(QpackEventType::InstructionCreated),
+ EventData::QpackInstructionParsed { .. } =>
+ EventType::QpackEventType(QpackEventType::InstructionParsed),
+
+ EventData::ConnectionError { .. } =>
+ EventType::GenericEventType(GenericEventType::ConnectionError),
+ EventData::ApplicationError { .. } =>
+ EventType::GenericEventType(GenericEventType::ApplicationError),
+ EventData::InternalError { .. } =>
+ EventType::GenericEventType(GenericEventType::InternalError),
+ EventData::InternalWarning { .. } =>
+ EventType::GenericEventType(GenericEventType::InternalError),
+ EventData::Message { .. } =>
+ EventType::GenericEventType(GenericEventType::Message),
+ EventData::Marker { .. } =>
+ EventType::GenericEventType(GenericEventType::Marker),
+ }
+ }
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum DataRecipient {
+ User,
+ Application,
+ Transport,
+ Network,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct RawInfo {
+ pub length: Option<u64>,
+ pub payload_length: Option<u64>,
+
+ pub data: Option<Bytes>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(tag = "name", content = "data")]
+#[allow(clippy::large_enum_variant)]
+pub enum EventData {
+ // Connectivity
+ #[serde(rename = "connectivity:server_listening")]
+ ServerListening(connectivity::ServerListening),
+
+ #[serde(rename = "connectivity:connection_started")]
+ ConnectionStarted(connectivity::ConnectionStarted),
+
+ #[serde(rename = "connectivity:connection_closed")]
+ ConnectionClosed(connectivity::ConnectionClosed),
+
+ #[serde(rename = "connectivity:connection_id_updated")]
+ ConnectionIdUpdated(connectivity::ConnectionIdUpdated),
+
+ #[serde(rename = "connectivity:spin_bit_updated")]
+ SpinBitUpdated(connectivity::SpinBitUpdated),
+
+ #[serde(rename = "connectivity:connection_state_updated")]
+ ConnectionStateUpdated(connectivity::ConnectionStateUpdated),
+
+ // Security
+ #[serde(rename = "security:key_updated")]
+ KeyUpdated(security::KeyUpdated),
+
+ #[serde(rename = "security:key_retired")]
+ KeyRetired(security::KeyRetired),
+
+ // Transport
+ #[serde(rename = "transport:version_information")]
+ VersionInformation(quic::VersionInformation),
+
+ #[serde(rename = "transport:alpn_information")]
+ AlpnInformation(quic::AlpnInformation),
+
+ #[serde(rename = "transport:parameters_set")]
+ TransportParametersSet(quic::TransportParametersSet),
+
+ #[serde(rename = "transport:parameters_restored")]
+ TransportParametersRestored(quic::TransportParametersRestored),
+
+ #[serde(rename = "transport:datagrams_received")]
+ DatagramsReceived(quic::DatagramsReceived),
+
+ #[serde(rename = "transport:datagrams_sent")]
+ DatagramsSent(quic::DatagramsSent),
+
+ #[serde(rename = "transport:datagram_dropped")]
+ DatagramDropped(quic::DatagramDropped),
+
+ #[serde(rename = "transport:packet_received")]
+ PacketReceived(quic::PacketReceived),
+
+ #[serde(rename = "transport:packet_sent")]
+ PacketSent(quic::PacketSent),
+
+ #[serde(rename = "transport:packet_dropped")]
+ PacketDropped(quic::PacketDropped),
+
+ #[serde(rename = "transport:packet_buffered")]
+ PacketBuffered(quic::PacketBuffered),
+
+ #[serde(rename = "transport:version_information")]
+ PacketsAcked(quic::PacketsAcked),
+
+ #[serde(rename = "transport:stream_state_updated")]
+ StreamStateUpdated(quic::StreamStateUpdated),
+
+ #[serde(rename = "transport:frames_processed")]
+ FramesProcessed(quic::FramesProcessed),
+
+ #[serde(rename = "transport:data_moved")]
+ DataMoved(quic::DataMoved),
+
+ // Recovery
+ #[serde(rename = "recovery:parameters_set")]
+ RecoveryParametersSet(quic::RecoveryParametersSet),
+
+ #[serde(rename = "recovery:metrics_updated")]
+ MetricsUpdated(quic::MetricsUpdated),
+
+ #[serde(rename = "recovery:congestion_state_updated")]
+ CongestionStateUpdated(quic::CongestionStateUpdated),
+
+ #[serde(rename = "recovery:loss_timer_updated")]
+ LossTimerUpdated(quic::LossTimerUpdated),
+
+ #[serde(rename = "recovery:packet_lost")]
+ PacketLost(quic::PacketLost),
+
+ #[serde(rename = "recovery:marked_for_retransmit")]
+ MarkedForRetransmit(quic::MarkedForRetransmit),
+
+ // HTTP/3
+ #[serde(rename = "http:parameters_set")]
+ H3ParametersSet(h3::H3ParametersSet),
+
+ #[serde(rename = "http:parameters_restored")]
+ H3ParametersRestored(h3::H3ParametersRestored),
+
+ #[serde(rename = "http:stream_type_set")]
+ H3StreamTypeSet(h3::H3StreamTypeSet),
+
+ #[serde(rename = "http:frame_created")]
+ H3FrameCreated(h3::H3FrameCreated),
+
+ #[serde(rename = "http:frame_parsed")]
+ H3FrameParsed(h3::H3FrameParsed),
+
+ #[serde(rename = "http:push_resolved")]
+ H3PushResolved(h3::H3PushResolved),
+
+ // QPACK
+ #[serde(rename = "qpack:state_updated")]
+ QpackStateUpdated(qpack::QpackStateUpdated),
+
+ #[serde(rename = "qpack:stream_state_updated")]
+ QpackStreamStateUpdated(qpack::QpackStreamStateUpdated),
+
+ #[serde(rename = "qpack:dynamic_table_updated")]
+ QpackDynamicTableUpdated(qpack::QpackDynamicTableUpdated),
+
+ #[serde(rename = "qpack:headers_encoded")]
+ QpackHeadersEncoded(qpack::QpackHeadersEncoded),
+
+ #[serde(rename = "qpack:headers_decoded")]
+ QpackHeadersDecoded(qpack::QpackHeadersDecoded),
+
+ #[serde(rename = "qpack:instruction_created")]
+ QpackInstructionCreated(qpack::QpackInstructionCreated),
+
+ #[serde(rename = "qpack:instruction_parsed")]
+ QpackInstructionParsed(qpack::QpackInstructionParsed),
+
+ // Generic
+ #[serde(rename = "generic:connection_error")]
+ ConnectionError {
+ code: Option<ConnectionErrorCode>,
+ description: Option<String>,
+ },
+
+ #[serde(rename = "generic:application_error")]
+ ApplicationError {
+ code: Option<ApplicationErrorCode>,
+ description: Option<String>,
+ },
+
+ #[serde(rename = "generic:internal_error")]
+ InternalError {
+ code: Option<u64>,
+ description: Option<String>,
+ },
+
+ #[serde(rename = "generic:internal_warning")]
+ InternalWarning {
+ code: Option<u64>,
+ description: Option<String>,
+ },
+
+ #[serde(rename = "generic:message")]
+ Message { message: String },
+
+ #[serde(rename = "generic:marker")]
+ Marker {
+ marker_type: String,
+ message: Option<String>,
+ },
+}
+
+impl EventData {
+ /// Returns size of `EventData` array of `QuicFrame`s if it exists.
+ pub fn contains_quic_frames(&self) -> Option<usize> {
+ // For some EventData variants, the frame array is optional
+ // but for others it is mandatory.
+ match self {
+ EventData::PacketSent(pkt) => pkt.frames.as_ref().map(|f| f.len()),
+
+ EventData::PacketReceived(pkt) =>
+ pkt.frames.as_ref().map(|f| f.len()),
+
+ EventData::PacketLost(pkt) => pkt.frames.as_ref().map(|f| f.len()),
+
+ EventData::MarkedForRetransmit(ev) => Some(ev.frames.len()),
+ EventData::FramesProcessed(ev) => Some(ev.frames.len()),
+
+ _ => None,
+ }
+ }
+}
+
+#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum GenericEventType {
+ ConnectionError,
+ ApplicationError,
+ InternalError,
+ InternalWarning,
+
+ Message,
+ Marker,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(untagged)]
+pub enum ConnectionErrorCode {
+ TransportError(TransportError),
+ CryptoError(CryptoError),
+ Value(u64),
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(untagged)]
+pub enum ApplicationErrorCode {
+ ApplicationError(ApplicationError),
+ Value(u64),
+}
+
+// TODO
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum CryptoError {
+ Prefix,
+}
+
+pub mod quic;
+
+pub mod connectivity;
+pub mod h3;
+pub mod qpack;
+pub mod security;
diff --git a/qlog/src/events/qpack.rs b/qlog/src/events/qpack.rs
new file mode 100644
index 0000000..03303bf
--- /dev/null
+++ b/qlog/src/events/qpack.rs
@@ -0,0 +1,280 @@
+// Copyright (C) 2021, Cloudflare, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+use serde::Deserialize;
+use serde::Serialize;
+
+use super::h3::HttpHeader;
+use super::Bytes;
+
+#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum QpackEventType {
+ StateUpdated,
+ StreamStateUpdated,
+ DynamicTableUpdated,
+ HeadersEncoded,
+ HeadersDecoded,
+ InstructionCreated,
+ InstructionParsed,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum QpackOwner {
+ Local,
+ Remote,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum QpackStreamState {
+ Blocked,
+ Unblocked,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum QpackUpdateType {
+ Added,
+ Evicted,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct QpackDynamicTableEntry {
+ pub index: u64,
+ pub name: Option<String>,
+ pub value: Option<String>,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct QpackHeaderBlockPrefix {
+ pub required_insert_count: u64,
+ pub sign_bit: bool,
+ pub delta_base: u64,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum QpackInstructionTypeName {
+ SetDynamicTableCapacityInstruction,
+ InsertWithNameReferenceInstruction,
+ InsertWithoutNameReferenceInstruction,
+ DuplicateInstruction,
+ HeaderAcknowledgementInstruction,
+ StreamCancellationInstruction,
+ InsertCountIncrementInstruction,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum QpackTableType {
+ Static,
+ Dynamic,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub enum QPackInstruction {
+ SetDynamicTableCapacityInstruction {
+ instruction_type: QpackInstructionTypeName,
+
+ capacity: u64,
+ },
+
+ InsertWithNameReferenceInstruction {
+ instruction_type: QpackInstructionTypeName,
+
+ table_type: QpackTableType,
+
+ name_index: u64,
+
+ huffman_encoded_value: bool,
+ value_length: u64,
+ value: String,
+ },
+
+ InsertWithoutNameReferenceInstruction {
+ instruction_type: QpackInstructionTypeName,
+
+ huffman_encoded_name: bool,
+ name_length: u64,
+ name: String,
+
+ huffman_encoded_value: bool,
+ value_length: u64,
+ value: String,
+ },
+
+ DuplicateInstruction {
+ instruction_type: QpackInstructionTypeName,
+
+ index: u64,
+ },
+
+ HeaderAcknowledgementInstruction {
+ instruction_type: QpackInstructionTypeName,
+
+ stream_id: String,
+ },
+
+ StreamCancellationInstruction {
+ instruction_type: QpackInstructionTypeName,
+
+ stream_id: String,
+ },
+
+ InsertCountIncrementInstruction {
+ instruction_type: QpackInstructionTypeName,
+
+ increment: u64,
+ },
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum QpackHeaderBlockRepresentationTypeName {
+ IndexedHeaderField,
+ LiteralHeaderFieldWithName,
+ LiteralHeaderFieldWithoutName,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub enum QpackHeaderBlockRepresentation {
+ IndexedHeaderField {
+ header_field_type: QpackHeaderBlockRepresentationTypeName,
+
+ table_type: QpackTableType,
+ index: u64,
+
+ is_post_base: Option<bool>,
+ },
+
+ LiteralHeaderFieldWithName {
+ header_field_type: QpackHeaderBlockRepresentationTypeName,
+
+ preserve_literal: bool,
+ table_type: QpackTableType,
+ name_index: u64,
+
+ huffman_encoded_value: bool,
+ value_length: u64,
+ value: String,
+
+ is_post_base: Option<bool>,
+ },
+
+ LiteralHeaderFieldWithoutName {
+ header_field_type: QpackHeaderBlockRepresentationTypeName,
+
+ preserve_literal: bool,
+ table_type: QpackTableType,
+ name_index: u64,
+
+ huffman_encoded_name: bool,
+ name_length: u64,
+ name: String,
+
+ huffman_encoded_value: bool,
+ value_length: u64,
+ value: String,
+
+ is_post_base: Option<bool>,
+ },
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct QpackStateUpdated {
+ pub owner: Option<QpackOwner>,
+
+ pub dynamic_table_capacity: Option<u64>,
+ pub dynamic_table_size: Option<u64>,
+
+ pub known_received_count: Option<u64>,
+ pub current_insert_count: Option<u64>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct QpackStreamStateUpdated {
+ pub stream_id: u64,
+
+ pub state: QpackStreamState,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct QpackDynamicTableUpdated {
+ pub update_type: QpackUpdateType,
+
+ pub entries: Vec<QpackDynamicTableEntry>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct QpackHeadersEncoded {
+ pub stream_id: Option<u64>,
+
+ pub headers: Option<HttpHeader>,
+
+ pub block_prefix: QpackHeaderBlockPrefix,
+ pub header_block: Vec<QpackHeaderBlockRepresentation>,
+
+ pub length: Option<u32>,
+ pub raw: Option<Bytes>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct QpackHeadersDecoded {
+ pub stream_id: Option<u64>,
+
+ pub headers: Option<HttpHeader>,
+
+ pub block_prefix: QpackHeaderBlockPrefix,
+ pub header_block: Vec<QpackHeaderBlockRepresentation>,
+
+ pub length: Option<u32>,
+ pub raw: Option<Bytes>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct QpackInstructionCreated {
+ pub instruction: QPackInstruction,
+
+ pub length: Option<u32>,
+ pub raw: Option<Bytes>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct QpackInstructionParsed {
+ pub instruction: QPackInstruction,
+
+ pub length: Option<u32>,
+ pub raw: Option<Bytes>,
+}
diff --git a/qlog/src/events/quic.rs b/qlog/src/events/quic.rs
new file mode 100644
index 0000000..2495142
--- /dev/null
+++ b/qlog/src/events/quic.rs
@@ -0,0 +1,763 @@
+// Copyright (C) 2021, Cloudflare, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+use serde::Deserialize;
+use serde::Serialize;
+
+use super::connectivity::TransportOwner;
+use super::Bytes;
+use super::DataRecipient;
+use super::RawInfo;
+use super::Token;
+use crate::HexSlice;
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum PacketType {
+ Initial,
+ Handshake,
+
+ #[serde(rename = "0RTT")]
+ ZeroRtt,
+
+ #[serde(rename = "1RTT")]
+ OneRtt,
+
+ Retry,
+ VersionNegotiation,
+ Unknown,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum PacketNumberSpace {
+ Initial,
+ Handshake,
+ ApplicationData,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
+pub struct PacketHeader {
+ pub packet_type: PacketType,
+ pub packet_number: u64,
+
+ pub flags: Option<u8>,
+ pub token: Option<Token>,
+
+ pub length: Option<u16>,
+
+ pub version: Option<Bytes>,
+
+ pub scil: Option<u8>,
+ pub dcil: Option<u8>,
+ pub scid: Option<Bytes>,
+ pub dcid: Option<Bytes>,
+}
+
+impl PacketHeader {
+ #[allow(clippy::too_many_arguments)]
+ /// Creates a new PacketHeader.
+ pub fn new(
+ packet_type: PacketType, packet_number: u64, flags: Option<u8>,
+ token: Option<Token>, length: Option<u16>, version: Option<u32>,
+ scid: Option<&[u8]>, dcid: Option<&[u8]>,
+ ) -> Self {
+ let (scil, scid) = match scid {
+ Some(cid) => (
+ Some(cid.len() as u8),
+ Some(format!("{}", HexSlice::new(&cid))),
+ ),
+
+ None => (None, None),
+ };
+
+ let (dcil, dcid) = match dcid {
+ Some(cid) => (
+ Some(cid.len() as u8),
+ Some(format!("{}", HexSlice::new(&cid))),
+ ),
+
+ None => (None, None),
+ };
+
+ let version = version.map(|v| format!("{:x?}", v));
+
+ PacketHeader {
+ packet_type,
+ packet_number,
+ flags,
+ token,
+ length,
+ version,
+ scil,
+ dcil,
+ scid,
+ dcid,
+ }
+ }
+
+ /// Creates a new PacketHeader.
+ ///
+ /// Once a QUIC connection has formed, version, dcid and scid are stable, so
+ /// there are space benefits to not logging them in every packet, especially
+ /// PacketType::OneRtt.
+ pub fn with_type(
+ ty: PacketType, packet_number: u64, version: Option<u32>,
+ scid: Option<&[u8]>, dcid: Option<&[u8]>,
+ ) -> Self {
+ match ty {
+ PacketType::OneRtt => PacketHeader::new(
+ ty,
+ packet_number,
+ None,
+ None,
+ None,
+ None,
+ None,
+ None,
+ ),
+
+ _ => PacketHeader::new(
+ ty,
+ packet_number,
+ None,
+ None,
+ None,
+ version,
+ scid,
+ dcid,
+ ),
+ }
+ }
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum StreamType {
+ Bidirectional,
+ Unidirectional,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum StreamSide {
+ Sending,
+ Receiving,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum StreamState {
+ // bidirectional stream states, draft-23 3.4.
+ Idle,
+ Open,
+ HalfClosedLocal,
+ HalfClosedRemote,
+ Closed,
+
+ // sending-side stream states, draft-23 3.1.
+ Ready,
+ Send,
+ DataSent,
+ ResetSent,
+ ResetReceived,
+
+ // receive-side stream states, draft-23 3.2.
+ Receive,
+ SizeKnown,
+ DataRead,
+ ResetRead,
+
+ // both-side states
+ DataReceived,
+
+ // qlog-defined
+ Destroyed,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum ErrorSpace {
+ TransportError,
+ ApplicationError,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum TransportError {
+ NoError,
+ InternalError,
+ ServerBusy,
+ FlowControlError,
+ StreamLimitError,
+ StreamStateError,
+ FinalSizeError,
+ FrameEncodingError,
+ TransportParameterError,
+ ProtocolViolation,
+ InvalidMigration,
+ CryptoBufferExceeded,
+ Unknown,
+}
+
+#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum TransportEventType {
+ VersionInformation,
+ AlpnInformation,
+
+ ParametersSet,
+ ParametersRestored,
+
+ DatagramsSent,
+ DatagramsReceived,
+ DatagramDropped,
+
+ PacketSent,
+ PacketReceived,
+ PacketDropped,
+ PacketBuffered,
+ PacketsAcked,
+
+ FramesProcessed,
+
+ StreamStateUpdated,
+
+ DataMoved,
+}
+
+#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum TransportEventTrigger {
+ Line,
+ Retransmit,
+ KeysUnavailable,
+}
+
+#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum SecurityEventType {
+ KeyUpdated,
+ KeyRetired,
+}
+
+#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum SecurityEventTrigger {
+ Tls,
+ Implicit,
+ RemoteUpdate,
+ LocalUpdate,
+}
+
+#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum RecoveryEventType {
+ ParametersSet,
+ MetricsUpdated,
+ CongestionStateUpdated,
+ LossTimerUpdated,
+ PacketLost,
+ MarkedForRetransmit,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum RecoveryEventTrigger {
+ AckReceived,
+ PacketSent,
+ Alarm,
+ Unknown,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum LossTimerEventType {
+ Set,
+ Expired,
+ Cancelled,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum TimerType {
+ Ack,
+ Pto,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(untagged)]
+pub enum AckedRanges {
+ Single(Vec<Vec<u64>>),
+ Double(Vec<(u64, u64)>),
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum QuicFrameTypeName {
+ Padding,
+ Ping,
+ Ack,
+ ResetStream,
+ StopSending,
+ Crypto,
+ NewToken,
+ Stream,
+ MaxData,
+ MaxStreamData,
+ MaxStreams,
+ DataBlocked,
+ StreamDataBlocked,
+ StreamsBlocked,
+ NewConnectionId,
+ RetireConnectionId,
+ PathChallenge,
+ PathResponse,
+ ConnectionClose,
+ ApplicationClose,
+ HandshakeDone,
+ Datagram,
+ Unknown,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(tag = "frame_type")]
+#[serde(rename_all = "snake_case")]
+// Strictly, the qlog spec says that all these frame types have a frame_type
+// field. But instead of making that a rust object property, just use serde to
+// ensure it goes out on the wire. This means that deserialization of frames
+// also works automatically.
+pub enum QuicFrame {
+ Padding,
+
+ Ping,
+
+ Ack {
+ ack_delay: Option<f32>,
+ acked_ranges: Option<AckedRanges>,
+
+ ect1: Option<u64>,
+
+ ect0: Option<u64>,
+
+ ce: Option<u64>,
+ },
+
+ ResetStream {
+ stream_id: u64,
+ error_code: u64,
+ final_size: u64,
+ },
+
+ StopSending {
+ stream_id: u64,
+ error_code: u64,
+ },
+
+ Crypto {
+ offset: u64,
+ length: u64,
+ },
+
+ NewToken {
+ length: String,
+ token: String,
+ },
+
+ Stream {
+ stream_id: u64,
+ offset: u64,
+ length: u64,
+ fin: Option<bool>,
+
+ raw: Option<Bytes>,
+ },
+
+ MaxData {
+ maximum: u64,
+ },
+
+ MaxStreamData {
+ stream_id: u64,
+ maximum: u64,
+ },
+
+ MaxStreams {
+ stream_type: StreamType,
+ maximum: u64,
+ },
+
+ DataBlocked {
+ limit: u64,
+ },
+
+ StreamDataBlocked {
+ stream_id: u64,
+ limit: u64,
+ },
+
+ StreamsBlocked {
+ stream_type: StreamType,
+ limit: u64,
+ },
+
+ NewConnectionId {
+ sequence_number: u32,
+ retire_prior_to: u32,
+ length: u64,
+ connection_id: String,
+ reset_token: String,
+ },
+
+ RetireConnectionId {
+ sequence_number: u32,
+ },
+
+ PathChallenge {
+ data: Option<Bytes>,
+ },
+
+ PathResponse {
+ data: Option<Bytes>,
+ },
+
+ ConnectionClose {
+ error_space: ErrorSpace,
+ error_code: u64,
+ raw_error_code: Option<u64>,
+ reason: Option<String>,
+
+ trigger_frame_type: Option<u64>,
+ },
+
+ HandshakeDone,
+
+ Datagram {
+ length: u64,
+
+ raw: Option<Bytes>,
+ },
+
+ Unknown {
+ raw_frame_type: u64,
+ },
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct PreferredAddress {
+ pub ip_v4: String,
+ pub ip_v6: String,
+
+ pub port_v4: u16,
+ pub port_v6: u16,
+
+ pub connection_id: Bytes,
+ pub stateless_reset_token: Token,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct VersionInformation {
+ pub server_versions: Option<Vec<Bytes>>,
+ pub client_versions: Option<Vec<Bytes>>,
+ pub chosen_version: Option<Bytes>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct AlpnInformation {
+ pub server_alpns: Option<Vec<Bytes>>,
+ pub client_alpns: Option<Vec<Bytes>>,
+ pub chosen_alpn: Option<Bytes>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct TransportParametersSet {
+ pub owner: Option<TransportOwner>,
+
+ pub resumption_allowed: Option<bool>,
+ pub early_data_enabled: Option<bool>,
+ pub tls_cipher: Option<String>,
+ pub aead_tag_length: Option<u8>,
+
+ pub original_destination_connection_id: Option<Bytes>,
+ pub initial_source_connection_id: Option<Bytes>,
+ pub retry_source_connection_id: Option<Bytes>,
+ pub stateless_reset_token: Option<Token>,
+ pub disable_active_migration: Option<bool>,
+
+ pub max_idle_timeout: Option<u64>,
+ pub max_udp_payload_size: Option<u32>,
+ pub ack_delay_exponent: Option<u16>,
+ pub max_ack_delay: Option<u16>,
+ pub active_connection_id_limit: Option<u32>,
+
+ pub initial_max_data: Option<u64>,
+ pub initial_max_stream_data_bidi_local: Option<u64>,
+ pub initial_max_stream_data_bidi_remote: Option<u64>,
+ pub initial_max_stream_data_uni: Option<u64>,
+ pub initial_max_streams_bidi: Option<u64>,
+ pub initial_max_streams_uni: Option<u64>,
+
+ pub preferred_address: Option<PreferredAddress>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct TransportParametersRestored {
+ pub disable_active_migration: Option<bool>,
+
+ pub max_idle_timeout: Option<u64>,
+ pub max_udp_payload_size: Option<u32>,
+ pub active_connection_id_limit: Option<u32>,
+
+ pub initial_max_data: Option<u64>,
+ pub initial_max_stream_data_bidi_local: Option<u64>,
+ pub initial_max_stream_data_bidi_remote: Option<u64>,
+ pub initial_max_stream_data_uni: Option<u64>,
+ pub initial_max_streams_bidi: Option<u64>,
+ pub initial_max_streams_uni: Option<u64>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct DatagramsReceived {
+ pub count: Option<u16>,
+
+ pub raw: Option<Vec<RawInfo>>,
+
+ pub datagram_ids: Option<Vec<u32>>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct DatagramsSent {
+ pub count: Option<u16>,
+
+ pub raw: Option<Vec<RawInfo>>,
+
+ pub datagram_ids: Option<Vec<u32>>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct DatagramDropped {
+ raw: Option<RawInfo>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct PacketReceived {
+ pub header: PacketHeader,
+ // `frames` is defined here in the QLog schema specification. However,
+ // our streaming serializer requires serde to put the object at the end,
+ // so we define it there and depend on serde's preserve_order feature.
+ pub is_coalesced: Option<bool>,
+
+ pub retry_token: Option<Token>,
+
+ pub stateless_reset_token: Option<Bytes>,
+
+ pub supported_versions: Option<Vec<Bytes>>,
+
+ pub raw: Option<RawInfo>,
+ pub datagram_id: Option<u32>,
+
+ pub frames: Option<Vec<QuicFrame>>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct PacketSent {
+ pub header: PacketHeader,
+ // `frames` is defined here in the QLog schema specification. However,
+ // our streaming serializer requires serde to put the object at the end,
+ // so we define it there and depend on serde's preserve_order feature.
+ pub is_coalesced: Option<bool>,
+
+ pub retry_token: Option<Token>,
+
+ pub stateless_reset_token: Option<Bytes>,
+
+ pub supported_versions: Option<Vec<Bytes>>,
+
+ pub raw: Option<RawInfo>,
+ pub datagram_id: Option<u32>,
+
+ pub frames: Option<Vec<QuicFrame>>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct PacketDropped {
+ pub header: Option<PacketHeader>,
+
+ pub raw: Option<RawInfo>,
+ pub datagram_id: Option<u32>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct PacketBuffered {
+ pub header: Option<PacketHeader>,
+
+ pub raw: Option<RawInfo>,
+ pub datagram_id: Option<u32>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct PacketsAcked {
+ pub packet_number_space: Option<PacketNumberSpace>,
+ pub packet_numbers: Option<Vec<u64>>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct StreamStateUpdated {
+ pub stream_id: u64,
+ pub stream_type: Option<StreamType>,
+
+ pub old: Option<StreamState>,
+ pub new: StreamState,
+
+ pub stream_side: Option<StreamSide>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct FramesProcessed {
+ pub frames: Vec<QuicFrame>,
+
+ pub packet_number: Option<u64>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct DataMoved {
+ pub stream_id: Option<u64>,
+ pub offset: Option<u64>,
+ pub length: Option<u64>,
+
+ pub from: Option<DataRecipient>,
+ pub to: Option<DataRecipient>,
+
+ pub data: Option<Bytes>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct RecoveryParametersSet {
+ pub reordering_threshold: Option<u16>,
+ pub time_threshold: Option<f32>,
+ pub timer_granularity: Option<u16>,
+ pub initial_rtt: Option<f32>,
+
+ pub max_datagram_size: Option<u32>,
+ pub initial_congestion_window: Option<u64>,
+ pub minimum_congestion_window: Option<u32>,
+ pub loss_reduction_factor: Option<f32>,
+ pub persistent_congestion_threshold: Option<u16>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct MetricsUpdated {
+ pub min_rtt: Option<f32>,
+ pub smoothed_rtt: Option<f32>,
+ pub latest_rtt: Option<f32>,
+ pub rtt_variance: Option<f32>,
+
+ pub pto_count: Option<u16>,
+
+ pub congestion_window: Option<u64>,
+ pub bytes_in_flight: Option<u64>,
+
+ pub ssthresh: Option<u64>,
+
+ // qlog defined
+ pub packets_in_flight: Option<u64>,
+
+ pub pacing_rate: Option<u64>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct CongestionStateUpdated {
+ old: Option<String>,
+ new: String,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct LossTimerUpdated {
+ timer_type: Option<TimerType>,
+ packet_number_space: Option<PacketNumberSpace>,
+
+ event_type: LossTimerEventType,
+
+ delta: Option<f32>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct PacketLost {
+ pub header: Option<PacketHeader>,
+
+ pub frames: Option<Vec<QuicFrame>>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct MarkedForRetransmit {
+ pub frames: Vec<QuicFrame>,
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::testing::*;
+
+ #[test]
+ fn packet_header() {
+ let pkt_hdr = make_pkt_hdr(PacketType::Initial);
+
+ let log_string = r#"{
+ "packet_type": "initial",
+ "packet_number": 0,
+ "version": "1",
+ "scil": 8,
+ "dcil": 8,
+ "scid": "7e37e4dcc6682da8",
+ "dcid": "36ce104eee50101c"
+}"#;
+
+ assert_eq!(serde_json::to_string_pretty(&pkt_hdr).unwrap(), log_string);
+ }
+}
diff --git a/qlog/src/events/security.rs b/qlog/src/events/security.rs
new file mode 100644
index 0000000..2e31cd6
--- /dev/null
+++ b/qlog/src/events/security.rs
@@ -0,0 +1,66 @@
+// Copyright (C) 2021, Cloudflare, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+use serde::Deserialize;
+use serde::Serialize;
+
+use super::Bytes;
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum KeyType {
+ ServerInitialSecret,
+ ClientInitialSecret,
+
+ ServerHandshakeSecret,
+ ClientHandshakeSecret,
+
+ #[serde(rename = "server_0rtt_secret")]
+ Server0RttSecret,
+ #[serde(rename = "client_0rtt_secret")]
+ Client0RttSecret,
+ #[serde(rename = "server_1rtt_secret")]
+ Server1RttSecret,
+ #[serde(rename = "client_1rtt_secret")]
+ Client1RttSecret,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct KeyUpdated {
+ key_type: KeyType,
+ old: Option<Bytes>,
+ new: Option<Bytes>,
+ generation: Option<u32>,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct KeyRetired {
+ key_type: KeyType,
+ key: Option<Bytes>,
+ generation: Option<u32>,
+}
diff --git a/qlog/src/lib.rs b/qlog/src/lib.rs
new file mode 100644
index 0000000..90971c5
--- /dev/null
+++ b/qlog/src/lib.rs
@@ -0,0 +1,1046 @@
+// Copyright (C) 2019, Cloudflare, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+//! The qlog crate is an implementation of the qlog [main logging schema],
+//! [QUIC event definitions], and [HTTP/3 and QPACK event definitions].
+//! The crate provides a qlog data model that can be used for traces with
+//! events. It supports serialization and deserialization but defers logging IO
+//! choices to applications.
+//!
+//! The crate uses Serde for conversion between Rust and JSON.
+//!
+//! [main logging schema]: https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema
+//! [QUIC event definitions]:
+//! https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-quic-events.html
+//! [HTTP/3 and QPACK event definitions]:
+//! https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-h3-events.html
+//!
+//! Overview
+//! ---------------
+//! qlog is a hierarchical logging format, with a rough structure of:
+//!
+//! * Log
+//! * Trace(s)
+//! * Event(s)
+//!
+//! In practice, a single QUIC connection maps to a single Trace file with one
+//! or more Events. Applications can decide whether to combine Traces from
+//! different connections into the same Log.
+//!
+//! ## Buffered Traces with standard JSON
+//!
+//! A [`Trace`] is a single JSON object. It contains metadata such as the
+//! [`VantagePoint`] of capture and the [`Configuration`], and protocol event
+//! data in the [`Event`] array.
+//!
+//! JSON Traces allow applications to appends events to them before eventually
+//! being serialized as a complete JSON object.
+//!
+//! ### Creating a Trace
+//!
+//! ```
+//! let mut trace = qlog::Trace::new(
+//! qlog::VantagePoint {
+//! name: Some("Example client".to_string()),
+//! ty: qlog::VantagePointType::Client,
+//! flow: None,
+//! },
+//! Some("Example qlog trace".to_string()),
+//! Some("Example qlog trace description".to_string()),
+//! Some(qlog::Configuration {
+//! time_offset: Some(0.0),
+//! original_uris: None,
+//! }),
+//! None,
+//! );
+//! ```
+//!
+//! ### Adding events to a Trace
+//!
+//! Qlog [`Event`] objects are added to [`qlog::Trace.events`].
+//!
+//! The following example demonstrates how to log a qlog QUIC `packet_sent`
+//! event containing a single Crypto frame. It constructs the necessary elements
+//! of the [`Event`], then appends it to the trace with [`push_event()`].
+//!
+//! ```
+//! # let mut trace = qlog::Trace::new (
+//! # qlog::VantagePoint {
+//! # name: Some("Example client".to_string()),
+//! # ty: qlog::VantagePointType::Client,
+//! # flow: None,
+//! # },
+//! # Some("Example qlog trace".to_string()),
+//! # Some("Example qlog trace description".to_string()),
+//! # Some(qlog::Configuration {
+//! # time_offset: Some(0.0),
+//! # original_uris: None,
+//! # }),
+//! # None
+//! # );
+//!
+//! let scid = [0x7e, 0x37, 0xe4, 0xdc, 0xc6, 0x68, 0x2d, 0xa8];
+//! let dcid = [0x36, 0xce, 0x10, 0x4e, 0xee, 0x50, 0x10, 0x1c];
+//!
+//! let pkt_hdr = qlog::events::quic::PacketHeader::new(
+//! qlog::events::quic::PacketType::Initial,
+//! 0, // packet_number
+//! None, // flags
+//! None, // token
+//! None, // length
+//! Some(0x00000001), // version
+//! Some(&scid),
+//! Some(&dcid),
+//! );
+//!
+//! let frames = vec![qlog::events::quic::QuicFrame::Crypto {
+//! offset: 0,
+//! length: 0,
+//! }];
+//!
+//! let raw = qlog::events::RawInfo {
+//! length: Some(1251),
+//! payload_length: Some(1224),
+//! data: None,
+//! };
+//!
+//! let event_data =
+//! qlog::events::EventData::PacketSent(qlog::events::quic::PacketSent {
+//! header: pkt_hdr,
+//! frames: Some(frames),
+//! is_coalesced: None,
+//! retry_token: None,
+//! stateless_reset_token: None,
+//! supported_versions: None,
+//! raw: Some(raw),
+//! datagram_id: None,
+//! });
+//!
+//! trace.push_event(qlog::events::Event::with_time(0.0, event_data));
+//! ```
+//!
+//! ### Serializing
+//!
+//! The qlog crate has only been tested with `serde_json`, however
+//! other serializer targets might work.
+//!
+//! For example, serializing the trace created above:
+//!
+//! ```
+//! # let mut trace = qlog::Trace::new (
+//! # qlog::VantagePoint {
+//! # name: Some("Example client".to_string()),
+//! # ty: qlog::VantagePointType::Client,
+//! # flow: None,
+//! # },
+//! # Some("Example qlog trace".to_string()),
+//! # Some("Example qlog trace description".to_string()),
+//! # Some(qlog::Configuration {
+//! # time_offset: Some(0.0),
+//! # original_uris: None,
+//! # }),
+//! # None
+//! # );
+//! serde_json::to_string_pretty(&trace).unwrap();
+//! ```
+//!
+//! which would generate the following:
+//!
+//! ```ignore
+//! {
+//! "vantage_point": {
+//! "name": "Example client",
+//! "type": "client"
+//! },
+//! "title": "Example qlog trace",
+//! "description": "Example qlog trace description",
+//! "configuration": {
+//! "time_offset": 0.0
+//! },
+//! "events": [
+//! {
+//! "time": 0.0,
+//! "name": "transport:packet_sent",
+//! "data": {
+//! "header": {
+//! "packet_type": "initial",
+//! "packet_number": 0,
+//! "version": "1",
+//! "scil": 8,
+//! "dcil": 8,
+//! "scid": "7e37e4dcc6682da8",
+//! "dcid": "36ce104eee50101c"
+//! },
+//! "raw": {
+//! "length": 1251,
+//! "payload_length": 1224
+//! },
+//! "frames": [
+//! {
+//! "frame_type": "crypto",
+//! "offset": 0,
+//! "length": 0
+//! }
+//! ]
+//! }
+//! }
+//! ]
+//! }
+//! ```
+//!
+//! ## Streaming Traces JSON Text Sequences (JSON-SEQ)
+//!
+//! To help support streaming serialization of qlogs,
+//! draft-ietf-quic-qlog-main-schema-01 introduced support for RFC 7464 JSON
+//! Text Sequences (JSON-SEQ). The qlog crate supports this format and provides
+//! utilities that aid streaming.
+//!
+//! A [`TraceSeq`] contains metadata such as the [`VantagePoint`] of capture and
+//! the [`Configuration`]. However, protocol event data is handled as separate
+//! lines containing a record separator character, a serialized [`Event`], and a
+//! newline.
+//!
+//! ### Creating a TraceSeq
+//!
+//! ```
+//! let mut trace = qlog::TraceSeq::new(
+//! qlog::VantagePoint {
+//! name: Some("Example client".to_string()),
+//! ty: qlog::VantagePointType::Client,
+//! flow: None,
+//! },
+//! Some("Example qlog trace".to_string()),
+//! Some("Example qlog trace description".to_string()),
+//! Some(qlog::Configuration {
+//! time_offset: Some(0.0),
+//! original_uris: None,
+//! }),
+//! None,
+//! );
+//! ```
+//!
+//! Create an object with the [`Write`] trait:
+//!
+//! ```
+//! let mut file = std::fs::File::create("foo.sqlog").unwrap();
+//! ```
+//!
+//! Create a [`QlogStreamer`] and start serialization to foo.sqlog
+//! using [`start_log()`]:
+//!
+//! ```
+//! # let mut trace = qlog::TraceSeq::new(
+//! # qlog::VantagePoint {
+//! # name: Some("Example client".to_string()),
+//! # ty: qlog::VantagePointType::Client,
+//! # flow: None,
+//! # },
+//! # Some("Example qlog trace".to_string()),
+//! # Some("Example qlog trace description".to_string()),
+//! # Some(qlog::Configuration {
+//! # time_offset: Some(0.0),
+//! # original_uris: None,
+//! # }),
+//! # None,
+//! # );
+//! # let mut file = std::fs::File::create("foo.sqlog").unwrap();
+//! let mut streamer = qlog::streamer::QlogStreamer::new(
+//! qlog::QLOG_VERSION.to_string(),
+//! Some("Example qlog".to_string()),
+//! Some("Example qlog description".to_string()),
+//! None,
+//! std::time::Instant::now(),
+//! trace,
+//! qlog::events::EventImportance::Base,
+//! Box::new(file),
+//! );
+//!
+//! streamer.start_log().ok();
+//! ```
+//!
+//! ### Adding simple events
+//!
+//! Once logging has started you can stream events. Simple events
+//! can be written in one step using [`add_event()`]:
+//!
+//! ```
+//! # let mut trace = qlog::TraceSeq::new(
+//! # qlog::VantagePoint {
+//! # name: Some("Example client".to_string()),
+//! # ty: qlog::VantagePointType::Client,
+//! # flow: None,
+//! # },
+//! # Some("Example qlog trace".to_string()),
+//! # Some("Example qlog trace description".to_string()),
+//! # Some(qlog::Configuration {
+//! # time_offset: Some(0.0),
+//! # original_uris: None,
+//! # }),
+//! # None,
+//! # );
+//! # let mut file = std::fs::File::create("foo.qlog").unwrap();
+//! # let mut streamer = qlog::streamer::QlogStreamer::new(
+//! # qlog::QLOG_VERSION.to_string(),
+//! # Some("Example qlog".to_string()),
+//! # Some("Example qlog description".to_string()),
+//! # None,
+//! # std::time::Instant::now(),
+//! # trace,
+//! # qlog::events::EventImportance::Base,
+//! # Box::new(file),
+//! # );
+//! let event_data = qlog::events::EventData::MetricsUpdated(
+//! qlog::events::quic::MetricsUpdated {
+//! min_rtt: Some(1.0),
+//! smoothed_rtt: Some(1.0),
+//! latest_rtt: Some(1.0),
+//! rtt_variance: Some(1.0),
+//! pto_count: Some(1),
+//! congestion_window: Some(1234),
+//! bytes_in_flight: Some(5678),
+//! ssthresh: None,
+//! packets_in_flight: None,
+//! pacing_rate: None,
+//! },
+//! );
+//!
+//! let event = qlog::events::Event::with_time(0.0, event_data);
+//! streamer.add_event(event).ok();
+//! ```
+//!
+//! ### Adding events with frames
+//! Some events contain optional arrays of QUIC frames. If the
+//! event has `Some(Vec<QuicFrame>)`, even if it is empty, the
+//! streamer enters a frame serializing mode that must be
+//! finalized before other events can be logged.
+//!
+//! In this example, a `PacketSent` event is created with an
+//! empty frame array and frames are written out later:
+//!
+//! ```
+//! # let mut trace = qlog::TraceSeq::new(
+//! # qlog::VantagePoint {
+//! # name: Some("Example client".to_string()),
+//! # ty: qlog::VantagePointType::Client,
+//! # flow: None,
+//! # },
+//! # Some("Example qlog trace".to_string()),
+//! # Some("Example qlog trace description".to_string()),
+//! # Some(qlog::Configuration {
+//! # time_offset: Some(0.0),
+//! # original_uris: None,
+//! # }),
+//! # None,
+//! # );
+//! # let mut file = std::fs::File::create("foo.qlog").unwrap();
+//! # let mut streamer = qlog::streamer::QlogStreamer::new(
+//! # qlog::QLOG_VERSION.to_string(),
+//! # Some("Example qlog".to_string()),
+//! # Some("Example qlog description".to_string()),
+//! # None,
+//! # std::time::Instant::now(),
+//! # trace,
+//! # qlog::events::EventImportance::Base,
+//! # Box::new(file),
+//! # );
+//!
+//! let scid = [0x7e, 0x37, 0xe4, 0xdc, 0xc6, 0x68, 0x2d, 0xa8];
+//! let dcid = [0x36, 0xce, 0x10, 0x4e, 0xee, 0x50, 0x10, 0x1c];
+//!
+//! let pkt_hdr = qlog::events::quic::PacketHeader::with_type(
+//! qlog::events::quic::PacketType::OneRtt,
+//! 0,
+//! Some(0x00000001),
+//! Some(&scid),
+//! Some(&dcid),
+//! );
+//!
+//! let event_data =
+//! qlog::events::EventData::PacketSent(qlog::events::quic::PacketSent {
+//! header: pkt_hdr,
+//! frames: Some(vec![]),
+//! is_coalesced: None,
+//! retry_token: None,
+//! stateless_reset_token: None,
+//! supported_versions: None,
+//! raw: None,
+//! datagram_id: None,
+//! });
+//!
+//! let event = qlog::events::Event::with_time(0.0, event_data);
+//!
+//! streamer.add_event(event).ok();
+//! ```
+//!
+//! In this example, the frames contained in the QUIC packet
+//! are PING and PADDING. Each frame is written using the
+//! [`add_frame()`] method. Frame writing is concluded with
+//! [`finish_frames()`].
+//!
+//! ```
+//! # let mut trace = qlog::TraceSeq::new(
+//! # qlog::VantagePoint {
+//! # name: Some("Example client".to_string()),
+//! # ty: qlog::VantagePointType::Client,
+//! # flow: None,
+//! # },
+//! # Some("Example qlog trace".to_string()),
+//! # Some("Example qlog trace description".to_string()),
+//! # Some(qlog::Configuration {
+//! # time_offset: Some(0.0),
+//! # original_uris: None,
+//! # }),
+//! # None,
+//! # );
+//! # let mut file = std::fs::File::create("foo.qlog").unwrap();
+//! # let mut streamer = qlog::streamer::QlogStreamer::new(
+//! # qlog::QLOG_VERSION.to_string(),
+//! # Some("Example qlog".to_string()),
+//! # Some("Example qlog description".to_string()),
+//! # None,
+//! # std::time::Instant::now(),
+//! # trace,
+//! # qlog::events::EventImportance::Base,
+//! # Box::new(file),
+//! # );
+//!
+//! let ping = qlog::events::quic::QuicFrame::Ping;
+//! let padding = qlog::events::quic::QuicFrame::Padding;
+//!
+//! streamer.add_frame(ping, false).ok();
+//! streamer.add_frame(padding, false).ok();
+//!
+//! streamer.finish_frames().ok();
+//! ```
+//!
+//! Once all events have have been written, the log
+//! can be finalized with [`finish_log()`]:
+//!
+//! ```
+//! # let mut trace = qlog::TraceSeq::new(
+//! # qlog::VantagePoint {
+//! # name: Some("Example client".to_string()),
+//! # ty: qlog::VantagePointType::Client,
+//! # flow: None,
+//! # },
+//! # Some("Example qlog trace".to_string()),
+//! # Some("Example qlog trace description".to_string()),
+//! # Some(qlog::Configuration {
+//! # time_offset: Some(0.0),
+//! # original_uris: None,
+//! # }),
+//! # None,
+//! # );
+//! # let mut file = std::fs::File::create("foo.qlog").unwrap();
+//! # let mut streamer = qlog::streamer::QlogStreamer::new(
+//! # qlog::QLOG_VERSION.to_string(),
+//! # Some("Example qlog".to_string()),
+//! # Some("Example qlog description".to_string()),
+//! # None,
+//! # std::time::Instant::now(),
+//! # trace,
+//! # qlog::events::EventImportance::Base,
+//! # Box::new(file),
+//! # );
+//! streamer.finish_log().ok();
+//! ```
+//!
+//! ### Serializing
+//!
+//! Serialization to JSON occurs as methods on the [`QlogStreamer`]
+//! are called. No additional steps are required.
+//!
+//! [`Trace`]: struct.Trace.html
+//! [`TraceSeq`]: struct.TraceSeq.html
+//! [`VantagePoint`]: struct.VantagePoint.html
+//! [`Configuration`]: struct.Configuration.html
+//! [`qlog::Trace.events`]: struct.Trace.html#structfield.events
+//! [`push_event()`]: struct.Trace.html#method.push_event
+//! [`QlogStreamer`]: struct.QlogStreamer.html
+//! [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
+//! [`start_log()`]: struct.QlogStreamer.html#method.start_log
+//! [`add_event()`]: struct.QlogStreamer.html#method.add_event
+//! [`add_frame()`]: struct.QlogStreamer.html#method.add_frame
+//! [`finish_frames()`]: struct.QlogStreamer.html#method.finish_frames
+//! [`finish_log()`]: struct.QlogStreamer.html#method.finish_log
+
+use crate::events::quic::PacketHeader;
+use crate::events::Event;
+
+use serde::Deserialize;
+use serde::Serialize;
+
+/// A quiche qlog error.
+#[derive(Debug)]
+pub enum Error {
+ /// There is no more work to do.
+ Done,
+
+ /// The operation cannot be completed because it was attempted
+ /// in an invalid state.
+ InvalidState,
+
+ // Invalid Qlog format
+ InvalidFormat,
+
+ /// I/O error.
+ IoError(std::io::Error),
+}
+
+impl std::fmt::Display for Error {
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ write!(f, "{:?}", self)
+ }
+}
+
+impl std::error::Error for Error {
+ fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
+ None
+ }
+}
+
+impl std::convert::From<std::io::Error> for Error {
+ fn from(err: std::io::Error) -> Self {
+ Error::IoError(err)
+ }
+}
+
+pub const QLOG_VERSION: &str = "0.3";
+
+pub type Bytes = String;
+
+/// A specialized [`Result`] type for quiche qlog operations.
+///
+/// This type is used throughout the public API for any operation that
+/// can produce an error.
+///
+/// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
+pub type Result<T> = std::result::Result<T, Error>;
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone)]
+pub struct Qlog {
+ pub qlog_version: String,
+ pub qlog_format: String,
+ pub title: Option<String>,
+ pub description: Option<String>,
+ pub summary: Option<String>,
+
+ pub traces: Vec<Trace>,
+}
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, Debug)]
+pub struct QlogSeq {
+ pub qlog_version: String,
+ pub qlog_format: String,
+ pub title: Option<String>,
+ pub description: Option<String>,
+ pub summary: Option<String>,
+
+ pub trace: TraceSeq,
+}
+
+#[derive(Clone, Copy)]
+pub enum ImportanceLogLevel {
+ Core = 0,
+ Base = 1,
+ Extra = 2,
+}
+
+// We now commence data definitions heavily styled on the QLOG
+// schema definition. Data is serialized using serde.
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct Trace {
+ pub vantage_point: VantagePoint,
+ pub title: Option<String>,
+ pub description: Option<String>,
+
+ pub configuration: Option<Configuration>,
+
+ pub common_fields: Option<CommonFields>,
+
+ pub events: Vec<Event>,
+}
+
+/// Helper functions for using a qlog `Trace`.
+impl Trace {
+ /// Creates a new qlog trace
+ pub fn new(
+ vantage_point: VantagePoint, title: Option<String>,
+ description: Option<String>, configuration: Option<Configuration>,
+ common_fields: Option<CommonFields>,
+ ) -> Self {
+ Trace {
+ vantage_point,
+ title,
+ description,
+ configuration,
+ common_fields,
+ events: Vec::new(),
+ }
+ }
+
+ pub fn push_event(&mut self, event: Event) {
+ self.events.push(event);
+ }
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct TraceSeq {
+ pub vantage_point: VantagePoint,
+ pub title: Option<String>,
+ pub description: Option<String>,
+
+ pub configuration: Option<Configuration>,
+
+ pub common_fields: Option<CommonFields>,
+}
+
+/// Helper functions for using a qlog `TraceSeq`.
+impl TraceSeq {
+ /// Creates a new qlog trace
+ pub fn new(
+ vantage_point: VantagePoint, title: Option<String>,
+ description: Option<String>, configuration: Option<Configuration>,
+ common_fields: Option<CommonFields>,
+ ) -> Self {
+ TraceSeq {
+ vantage_point,
+ title,
+ description,
+ configuration,
+ common_fields,
+ }
+ }
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct VantagePoint {
+ pub name: Option<String>,
+
+ #[serde(rename = "type")]
+ pub ty: VantagePointType,
+
+ pub flow: Option<VantagePointType>,
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum VantagePointType {
+ Client,
+ Server,
+ Network,
+ Unknown,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+pub struct Configuration {
+ pub time_offset: Option<f64>,
+
+ pub original_uris: Option<Vec<String>>,
+ // TODO: additionalUserSpecifiedProperty
+}
+
+impl Default for Configuration {
+ fn default() -> Self {
+ Configuration {
+ time_offset: Some(0.0),
+ original_uris: None,
+ }
+ }
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Serialize, Deserialize, Clone, Default, PartialEq, Debug)]
+pub struct CommonFields {
+ pub group_id: Option<String>,
+ pub protocol_type: Option<Vec<String>>,
+
+ pub reference_time: Option<f32>,
+ pub time_format: Option<String>,
+ // TODO: additionalUserSpecifiedProperty
+}
+
+#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
+#[serde(rename_all = "snake_case")]
+pub enum TokenType {
+ Retry,
+ Resumption,
+ StatelessReset,
+}
+
+#[serde_with::skip_serializing_none]
+#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
+pub struct Token {
+ #[serde(rename(serialize = "type"))]
+ pub ty: Option<TokenType>,
+
+ pub length: Option<u32>,
+ pub data: Option<Bytes>,
+
+ pub details: Option<String>,
+}
+
+pub struct HexSlice<'a>(&'a [u8]);
+
+impl<'a> HexSlice<'a> {
+ pub fn new<T>(data: &'a T) -> HexSlice<'a>
+ where
+ T: ?Sized + AsRef<[u8]> + 'a,
+ {
+ HexSlice(data.as_ref())
+ }
+
+ pub fn maybe_string<T>(data: Option<&'a T>) -> Option<String>
+ where
+ T: ?Sized + AsRef<[u8]> + 'a,
+ {
+ data.map(|d| format!("{}", HexSlice::new(d)))
+ }
+}
+
+impl<'a> std::fmt::Display for HexSlice<'a> {
+ fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ for byte in self.0 {
+ write!(f, "{:02x}", byte)?;
+ }
+ Ok(())
+ }
+}
+
+#[doc(hidden)]
+pub mod testing {
+ use super::*;
+ use crate::events::quic::PacketType;
+
+ pub fn make_pkt_hdr(packet_type: PacketType) -> PacketHeader {
+ let scid = [0x7e, 0x37, 0xe4, 0xdc, 0xc6, 0x68, 0x2d, 0xa8];
+ let dcid = [0x36, 0xce, 0x10, 0x4e, 0xee, 0x50, 0x10, 0x1c];
+
+ // Some(1251),
+ // Some(1224),
+
+ PacketHeader::new(
+ packet_type,
+ 0,
+ None,
+ None,
+ None,
+ Some(0x0000_0001),
+ Some(&scid),
+ Some(&dcid),
+ )
+ }
+
+ pub fn make_trace() -> Trace {
+ Trace::new(
+ VantagePoint {
+ name: None,
+ ty: VantagePointType::Server,
+ flow: None,
+ },
+ Some("Quiche qlog trace".to_string()),
+ Some("Quiche qlog trace description".to_string()),
+ Some(Configuration {
+ time_offset: Some(0.0),
+ original_uris: None,
+ }),
+ None,
+ )
+ }
+
+ pub fn make_trace_seq() -> TraceSeq {
+ TraceSeq::new(
+ VantagePoint {
+ name: None,
+ ty: VantagePointType::Server,
+ flow: None,
+ },
+ Some("Quiche qlog trace".to_string()),
+ Some("Quiche qlog trace description".to_string()),
+ Some(Configuration {
+ time_offset: Some(0.0),
+ original_uris: None,
+ }),
+ None,
+ )
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::events::quic::PacketSent;
+ use crate::events::quic::PacketType;
+ use crate::events::quic::QuicFrame;
+ use crate::events::EventData;
+ use crate::events::RawInfo;
+ use testing::*;
+
+ #[test]
+ fn packet_sent_event_no_frames() {
+ let log_string = r#"{
+ "time": 0.0,
+ "name": "transport:packet_sent",
+ "data": {
+ "header": {
+ "packet_type": "initial",
+ "packet_number": 0,
+ "version": "1",
+ "scil": 8,
+ "dcil": 8,
+ "scid": "7e37e4dcc6682da8",
+ "dcid": "36ce104eee50101c"
+ },
+ "raw": {
+ "length": 1251,
+ "payload_length": 1224
+ }
+ }
+}"#;
+
+ let pkt_hdr = make_pkt_hdr(PacketType::Initial);
+ let ev_data = EventData::PacketSent(PacketSent {
+ header: pkt_hdr.clone(),
+ frames: None,
+ is_coalesced: None,
+ retry_token: None,
+ stateless_reset_token: None,
+ supported_versions: None,
+ raw: Some(RawInfo {
+ length: Some(1251),
+ payload_length: Some(1224),
+ data: None,
+ }),
+ datagram_id: None,
+ });
+
+ let ev = Event::with_time(0.0, ev_data);
+
+ assert_eq!(serde_json::to_string_pretty(&ev).unwrap(), log_string);
+ }
+
+ #[test]
+ fn packet_sent_event_some_frames() {
+ let log_string = r#"{
+ "time": 0.0,
+ "name": "transport:packet_sent",
+ "data": {
+ "header": {
+ "packet_type": "initial",
+ "packet_number": 0,
+ "version": "1",
+ "scil": 8,
+ "dcil": 8,
+ "scid": "7e37e4dcc6682da8",
+ "dcid": "36ce104eee50101c"
+ },
+ "raw": {
+ "length": 1251,
+ "payload_length": 1224
+ },
+ "frames": [
+ {
+ "frame_type": "padding"
+ },
+ {
+ "frame_type": "ping"
+ },
+ {
+ "frame_type": "stream",
+ "stream_id": 0,
+ "offset": 0,
+ "length": 100,
+ "fin": true
+ }
+ ]
+ }
+}"#;
+
+ let pkt_hdr = make_pkt_hdr(PacketType::Initial);
+
+ let mut frames = Vec::new();
+ frames.push(QuicFrame::Padding);
+ frames.push(QuicFrame::Ping);
+ frames.push(QuicFrame::Stream {
+ stream_id: 0,
+ offset: 0,
+ length: 100,
+ fin: Some(true),
+ raw: None,
+ });
+
+ let ev_data = EventData::PacketSent(PacketSent {
+ header: pkt_hdr.clone(),
+ frames: Some(frames),
+ is_coalesced: None,
+ retry_token: None,
+ stateless_reset_token: None,
+ supported_versions: None,
+ raw: Some(RawInfo {
+ length: Some(1251),
+ payload_length: Some(1224),
+ data: None,
+ }),
+ datagram_id: None,
+ });
+
+ let ev = Event::with_time(0.0, ev_data);
+ assert_eq!(serde_json::to_string_pretty(&ev).unwrap(), log_string);
+ }
+
+ #[test]
+ fn trace_no_events() {
+ let log_string = r#"{
+ "vantage_point": {
+ "type": "server"
+ },
+ "title": "Quiche qlog trace",
+ "description": "Quiche qlog trace description",
+ "configuration": {
+ "time_offset": 0.0
+ },
+ "events": []
+}"#;
+
+ let trace = make_trace();
+
+ let serialized = serde_json::to_string_pretty(&trace).unwrap();
+ assert_eq!(serialized, log_string);
+
+ let deserialized: Trace = serde_json::from_str(&serialized).unwrap();
+ assert_eq!(deserialized, trace);
+ }
+
+ #[test]
+ fn trace_seq_no_events() {
+ let log_string = r#"{
+ "vantage_point": {
+ "type": "server"
+ },
+ "title": "Quiche qlog trace",
+ "description": "Quiche qlog trace description",
+ "configuration": {
+ "time_offset": 0.0
+ }
+}"#;
+
+ let trace = make_trace_seq();
+
+ let serialized = serde_json::to_string_pretty(&trace).unwrap();
+ assert_eq!(serialized, log_string);
+
+ let deserialized: TraceSeq = serde_json::from_str(&serialized).unwrap();
+ assert_eq!(deserialized, trace);
+ }
+
+ #[test]
+ fn trace_single_transport_event() {
+ let log_string = r#"{
+ "vantage_point": {
+ "type": "server"
+ },
+ "title": "Quiche qlog trace",
+ "description": "Quiche qlog trace description",
+ "configuration": {
+ "time_offset": 0.0
+ },
+ "events": [
+ {
+ "time": 0.0,
+ "name": "transport:packet_sent",
+ "data": {
+ "header": {
+ "packet_type": "initial",
+ "packet_number": 0,
+ "version": "1",
+ "scil": 8,
+ "dcil": 8,
+ "scid": "7e37e4dcc6682da8",
+ "dcid": "36ce104eee50101c"
+ },
+ "raw": {
+ "length": 1251,
+ "payload_length": 1224
+ },
+ "frames": [
+ {
+ "frame_type": "stream",
+ "stream_id": 0,
+ "offset": 0,
+ "length": 100,
+ "fin": true
+ }
+ ]
+ }
+ }
+ ]
+}"#;
+
+ let mut trace = make_trace();
+
+ let pkt_hdr = make_pkt_hdr(PacketType::Initial);
+
+ let frames = vec![QuicFrame::Stream {
+ stream_id: 0,
+ offset: 0,
+ length: 100,
+ fin: Some(true),
+ raw: None,
+ }];
+ let event_data = EventData::PacketSent(PacketSent {
+ header: pkt_hdr,
+ frames: Some(frames),
+ is_coalesced: None,
+ retry_token: None,
+ stateless_reset_token: None,
+ supported_versions: None,
+ raw: Some(RawInfo {
+ length: Some(1251),
+ payload_length: Some(1224),
+ data: None,
+ }),
+ datagram_id: None,
+ });
+
+ let ev = Event::with_time(0.0, event_data);
+
+ trace.push_event(ev);
+
+ let serialized = serde_json::to_string_pretty(&trace).unwrap();
+ assert_eq!(serialized, log_string);
+
+ let deserialized: Trace = serde_json::from_str(&serialized).unwrap();
+ assert_eq!(deserialized, trace);
+ }
+}
+
+pub mod events;
+pub mod streamer;
diff --git a/qlog/src/streamer.rs b/qlog/src/streamer.rs
new file mode 100644
index 0000000..859e00b
--- /dev/null
+++ b/qlog/src/streamer.rs
@@ -0,0 +1,477 @@
+// Copyright (C) 2021, Cloudflare, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+use crate::events::quic::QuicFrame;
+use crate::events::EventData;
+use crate::events::EventImportance;
+use crate::events::EventType;
+
+/// A helper object specialized for streaming JSON-serialized qlog to a
+/// [`Write`] trait.
+///
+/// The object is responsible for the `Qlog` object that contains the
+/// provided `Trace`.
+///
+/// Serialization is progressively driven by method calls; once log streaming
+/// is started, `event::Events` can be written using `add_event()`. Some
+/// events can contain an array of `QuicFrame`s, when writing such an event,
+/// the streamer enters a frame-serialization mode where frames are be
+/// progressively written using `add_frame()`. This mode is concluded using
+/// `finished_frames()`. While serializing frames, any attempts to log
+/// additional events are ignored.
+///
+/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
+use super::*;
+
+#[derive(PartialEq, Debug)]
+pub enum StreamerState {
+ Initial,
+ Ready,
+ WritingFrames,
+ Finished,
+}
+
+pub struct QlogStreamer {
+ start_time: std::time::Instant,
+ writer: Box<dyn std::io::Write + Send + Sync>,
+ qlog: QlogSeq,
+ state: StreamerState,
+ log_level: EventImportance,
+ first_frame: bool,
+}
+
+impl QlogStreamer {
+ /// Creates a QlogStreamer object.
+ ///
+ /// It owns a `Qlog` object that contains the provided `Trace` containing
+ /// `Events`.
+ ///
+ /// All serialization will be written to the provided `Write` using the
+ /// JSON-SEQ format.
+ #[allow(clippy::too_many_arguments)]
+ pub fn new(
+ qlog_version: String, title: Option<String>, description: Option<String>,
+ summary: Option<String>, start_time: std::time::Instant, trace: TraceSeq,
+ log_level: EventImportance,
+ writer: Box<dyn std::io::Write + Send + Sync>,
+ ) -> Self {
+ let qlog = QlogSeq {
+ qlog_version,
+ qlog_format: "JSON-SEQ".to_string(),
+ title,
+ description,
+ summary,
+ trace,
+ };
+
+ QlogStreamer {
+ start_time,
+ writer,
+ qlog,
+ state: StreamerState::Initial,
+ log_level,
+ first_frame: false,
+ }
+ }
+
+ /// Starts qlog streaming serialization.
+ ///
+ /// This writes out the JSON-serialized form of all initial qlog information
+ /// `Event`s are separately appended using `add_event()` and
+ /// `add_event_with_instant()`.
+ pub fn start_log(&mut self) -> Result<()> {
+ if self.state != StreamerState::Initial {
+ return Err(Error::Done);
+ }
+
+ self.writer.as_mut().write_all(b"")?;
+ serde_json::to_writer(self.writer.as_mut(), &self.qlog)
+ .map_err(|_| Error::Done)?;
+ self.writer.as_mut().write_all(b"\n")?;
+
+ self.state = StreamerState::Ready;
+
+ Ok(())
+ }
+
+ /// Finishes qlog streaming serialization.
+ ///
+ /// The JSON-serialized output has remaining close delimiters added.
+ /// After this is called, no more serialization will occur.
+ pub fn finish_log(&mut self) -> Result<()> {
+ if self.state == StreamerState::Initial ||
+ self.state == StreamerState::Finished
+ {
+ return Err(Error::InvalidState);
+ }
+
+ self.state = StreamerState::Finished;
+
+ self.writer.as_mut().flush()?;
+
+ Ok(())
+ }
+
+ /// Writes a JSON-serialized `Event` using `std::time::Instant::now()`.
+ ///
+ /// Some qlog events can contain `QuicFrames`. If this is detected `true` is
+ /// returned and the streamer enters a frame-serialization mode that is only
+ /// concluded by `finish_frames()`. In this mode, attempts to log additional
+ /// events are ignored.
+ ///
+ /// If the event contains no array of `QuicFrames` return `false`.
+ pub fn add_event_now(&mut self, event: Event) -> Result<bool> {
+ let now = std::time::Instant::now();
+
+ self.add_event_with_instant(event, now)
+ }
+
+ /// Writes a JSON-serialized `Event` using the provided EventData and
+ /// Instant.
+ ///
+ /// Some qlog events can contain `QuicFrames`. If this is detected `true` is
+ /// returned and the streamer enters a frame-serialization mode that is only
+ /// concluded by `finish_frames()`. In this mode, attempts to log additional
+ /// events are ignored.
+ ///
+ /// If the event contains no array of `QuicFrames` return `false`.
+ pub fn add_event_with_instant(
+ &mut self, mut event: Event, now: std::time::Instant,
+ ) -> Result<bool> {
+ if self.state != StreamerState::Ready {
+ return Err(Error::InvalidState);
+ }
+
+ if !event.importance().is_contained_in(&self.log_level) {
+ return Err(Error::Done);
+ }
+
+ let dur = if cfg!(test) {
+ std::time::Duration::from_secs(0)
+ } else {
+ now.duration_since(self.start_time)
+ };
+
+ let rel_time = dur.as_secs_f32() * 1000.0;
+ event.time = rel_time;
+
+ self.add_event(event)
+ }
+
+ /// Writes a JSON-serialized `Event` using the provided Instant.
+ ///
+ /// Some qlog events can contain `QuicFrames`. If this is detected `true` is
+ /// returned and the streamer enters a frame-serialization mode that is only
+ /// concluded by `finish_frames()`. In this mode, attempts to log additional
+ /// events are ignored.
+ ///
+ /// If the event contains no array of `QuicFrames` return `false`.
+ pub fn add_event_data_with_instant(
+ &mut self, event_data: EventData, now: std::time::Instant,
+ ) -> Result<bool> {
+ if self.state != StreamerState::Ready {
+ return Err(Error::InvalidState);
+ }
+
+ let ty = EventType::from(&event_data);
+ if !EventImportance::from(ty).is_contained_in(&self.log_level) {
+ return Err(Error::Done);
+ }
+
+ let dur = if cfg!(test) {
+ std::time::Duration::from_secs(0)
+ } else {
+ now.duration_since(self.start_time)
+ };
+
+ let rel_time = dur.as_secs_f32() * 1000.0;
+ let event = Event::with_time(rel_time, event_data);
+
+ self.add_event(event)
+ }
+
+ /// Writes a JSON-serialized `Event` using the provided Event.
+ ///
+ /// Some qlog events can contain `QuicFrames`. If this is detected `true` is
+ /// returned and the streamer enters a frame-serialization mode that is only
+ /// concluded by `finish_frames()`. In this mode, attempts to log additional
+ /// events are ignored.
+ ///
+ /// If the event contains no array of `QuicFrames` return `false`.
+ pub fn add_event(&mut self, event: Event) -> Result<bool> {
+ if self.state != StreamerState::Ready {
+ return Err(Error::InvalidState);
+ }
+
+ if !event.importance().is_contained_in(&self.log_level) {
+ return Err(Error::Done);
+ }
+
+ self.writer.as_mut().write_all(b"")?;
+
+ match event.data.contains_quic_frames() {
+ // If the event contains frames, we need to remove the closing JSON
+ // delimiters before writing out. These will be later restored by
+ // `finish_frames()`.
+ Some(frames_count) => {
+ match serde_json::to_string(&event) {
+ Ok(mut ev_out) => {
+ ev_out.truncate(ev_out.len() - 3);
+
+ if frames_count == 0 {
+ self.first_frame = true;
+ }
+
+ self.writer.as_mut().write_all(ev_out.as_bytes())?;
+ },
+
+ _ => return Err(Error::Done),
+ }
+
+ self.state = StreamerState::WritingFrames;
+ },
+
+ // If the event does not contain frames, it can be written
+ // immediately in full.
+ None => {
+ serde_json::to_writer(self.writer.as_mut(), &event)
+ .map_err(|_| Error::Done)?;
+ self.writer.as_mut().write_all(b"\n")?;
+
+ self.state = StreamerState::Ready
+ },
+ }
+
+ Ok(event.data.contains_quic_frames().is_some())
+ }
+
+ /// Writes a JSON-serialized `QuicFrame`.
+ ///
+ /// Only valid while in the frame-serialization mode.
+ pub fn add_frame(&mut self, frame: QuicFrame, last: bool) -> Result<()> {
+ if self.state != StreamerState::WritingFrames {
+ return Err(Error::InvalidState);
+ }
+
+ if !self.first_frame {
+ self.writer.as_mut().write_all(b",")?;
+ } else {
+ self.first_frame = false;
+ }
+
+ serde_json::to_writer(self.writer.as_mut(), &frame)
+ .map_err(|_| Error::Done)?;
+
+ if last {
+ self.finish_frames()?;
+ }
+
+ Ok(())
+ }
+
+ /// Concludes `QuicFrame` streaming serialization.
+ ///
+ /// Only valid while in the frame-serialization mode.
+ pub fn finish_frames(&mut self) -> Result<()> {
+ if self.state != StreamerState::WritingFrames {
+ return Err(Error::InvalidState);
+ }
+
+ self.writer.as_mut().write_all(b"]}}\n")?;
+
+ self.state = StreamerState::Ready;
+
+ Ok(())
+ }
+
+ /// Returns the writer.
+ #[allow(clippy::borrowed_box)]
+ pub fn writer(&self) -> &Box<dyn std::io::Write + Send + Sync> {
+ &self.writer
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use crate::events::quic;
+ use crate::events::RawInfo;
+ use testing::*;
+
+ #[test]
+ fn serialization_states() {
+ let v: Vec<u8> = Vec::new();
+ let buff = std::io::Cursor::new(v);
+ let writer = Box::new(buff);
+
+ let trace = make_trace_seq();
+ let pkt_hdr = make_pkt_hdr(quic::PacketType::Handshake);
+ let raw = Some(RawInfo {
+ length: Some(1251),
+ payload_length: Some(1224),
+ data: None,
+ });
+
+ let frame1 = QuicFrame::Stream {
+ stream_id: 40,
+ offset: 40,
+ length: 400,
+ fin: Some(true),
+ raw: None,
+ };
+
+ let event_data1 = EventData::PacketSent(quic::PacketSent {
+ header: pkt_hdr.clone(),
+ frames: Some(vec![frame1]),
+ is_coalesced: None,
+ retry_token: None,
+ stateless_reset_token: None,
+ supported_versions: None,
+ raw: raw.clone(),
+ datagram_id: None,
+ });
+
+ let ev1 = Event::with_time(0.0, event_data1);
+
+ let frame2 = QuicFrame::Stream {
+ stream_id: 0,
+ offset: 0,
+ length: 100,
+ fin: Some(true),
+ raw: None,
+ };
+
+ let frame3 = QuicFrame::Stream {
+ stream_id: 0,
+ offset: 0,
+ length: 100,
+ fin: Some(true),
+ raw: None,
+ };
+
+ let event_data2 = EventData::PacketSent(quic::PacketSent {
+ header: pkt_hdr.clone(),
+ frames: Some(vec![]),
+ is_coalesced: None,
+ retry_token: None,
+ stateless_reset_token: None,
+ supported_versions: None,
+ raw: raw.clone(),
+ datagram_id: None,
+ });
+
+ let ev2 = Event::with_time(0.0, event_data2);
+
+ let event_data3 = EventData::PacketSent(quic::PacketSent {
+ header: pkt_hdr,
+ frames: Some(vec![]),
+ is_coalesced: None,
+ retry_token: None,
+ stateless_reset_token: Some("reset_token".to_string()),
+ supported_versions: None,
+ raw: raw.clone(),
+ datagram_id: None,
+ });
+
+ let ev3 = Event::with_time(0.0, event_data3);
+
+ let mut s = streamer::QlogStreamer::new(
+ "version".to_string(),
+ Some("title".to_string()),
+ Some("description".to_string()),
+ None,
+ std::time::Instant::now(),
+ trace,
+ EventImportance::Base,
+ writer,
+ );
+
+ // Before the log is started all other operations should fail.
+ assert!(matches!(s.add_event(ev2.clone()), Err(Error::InvalidState)));
+ assert!(matches!(
+ s.add_frame(frame2.clone(), false),
+ Err(Error::InvalidState)
+ ));
+ assert!(matches!(s.finish_frames(), Err(Error::InvalidState)));
+ assert!(matches!(s.finish_log(), Err(Error::InvalidState)));
+
+ // Once a log is started, can't write frames before an event.
+ assert!(matches!(s.start_log(), Ok(())));
+ assert!(matches!(
+ s.add_frame(frame2.clone(), false),
+ Err(Error::InvalidState)
+ ));
+ assert!(matches!(s.finish_frames(), Err(Error::InvalidState)));
+
+ // Initiate log with simple event.
+ assert!(matches!(s.add_event(ev1), Ok(true)));
+ assert!(matches!(s.finish_frames(), Ok(())));
+
+ // Some events hold frames; can't write any more events until frame
+ // writing is concluded.
+ assert!(matches!(s.add_event(ev2.clone()), Ok(true)));
+ assert!(matches!(s.add_event(ev2.clone()), Err(Error::InvalidState)));
+
+ // While writing frames, can't write events.
+ assert!(matches!(s.add_frame(frame2.clone(), false), Ok(())));
+ assert!(matches!(s.add_event(ev2.clone()), Err(Error::InvalidState)));
+ assert!(matches!(s.finish_frames(), Ok(())));
+
+ // Adding an event that includes both frames and raw data should
+ // be allowed.
+ assert!(matches!(s.add_event(ev3.clone()), Ok(true)));
+ assert!(matches!(s.add_frame(frame3.clone(), false), Ok(())));
+ assert!(matches!(s.finish_frames(), Ok(())));
+
+ // Adding an event with an external time should work too.
+ // For tests, it will resolve to 0 but we care about proving the API
+ // here, not timing specifics.
+ let now = std::time::Instant::now();
+
+ assert!(matches!(
+ s.add_event_with_instant(ev3.clone(), now),
+ Ok(true)
+ ));
+ assert!(matches!(s.add_frame(frame3.clone(), false), Ok(())));
+ assert!(matches!(s.finish_frames(), Ok(())));
+
+ assert!(matches!(s.finish_log(), Ok(())));
+
+ let r = s.writer();
+ let w: &Box<std::io::Cursor<Vec<u8>>> = unsafe { std::mem::transmute(r) };
+
+ let log_string = r#"{"qlog_version":"version","qlog_format":"JSON-SEQ","title":"title","description":"description","trace":{"vantage_point":{"type":"server"},"title":"Quiche qlog trace","description":"Quiche qlog trace description","configuration":{"time_offset":0.0}}}
+{"time":0.0,"name":"transport:packet_sent","data":{"header":{"packet_type":"handshake","packet_number":0,"version":"1","scil":8,"dcil":8,"scid":"7e37e4dcc6682da8","dcid":"36ce104eee50101c"},"raw":{"length":1251,"payload_length":1224},"frames":[{"frame_type":"stream","stream_id":40,"offset":40,"length":400,"fin":true}]}}
+{"time":0.0,"name":"transport:packet_sent","data":{"header":{"packet_type":"handshake","packet_number":0,"version":"1","scil":8,"dcil":8,"scid":"7e37e4dcc6682da8","dcid":"36ce104eee50101c"},"raw":{"length":1251,"payload_length":1224},"frames":[{"frame_type":"stream","stream_id":0,"offset":0,"length":100,"fin":true}]}}
+{"time":0.0,"name":"transport:packet_sent","data":{"header":{"packet_type":"handshake","packet_number":0,"version":"1","scil":8,"dcil":8,"scid":"7e37e4dcc6682da8","dcid":"36ce104eee50101c"},"stateless_reset_token":"reset_token","raw":{"length":1251,"payload_length":1224},"frames":[{"frame_type":"stream","stream_id":0,"offset":0,"length":100,"fin":true}]}}
+{"time":0.0,"name":"transport:packet_sent","data":{"header":{"packet_type":"handshake","packet_number":0,"version":"1","scil":8,"dcil":8,"scid":"7e37e4dcc6682da8","dcid":"36ce104eee50101c"},"stateless_reset_token":"reset_token","raw":{"length":1251,"payload_length":1224},"frames":[{"frame_type":"stream","stream_id":0,"offset":0,"length":100,"fin":true}]}}
+"#;
+
+ let written_string = std::str::from_utf8(w.as_ref().get_ref()).unwrap();
+
+ assert_eq!(log_string, written_string);
+ }
+}
diff --git a/quiche/BUILD.gn b/quiche/BUILD.gn
new file mode 100644
index 0000000..8816b56
--- /dev/null
+++ b/quiche/BUILD.gn
@@ -0,0 +1,58 @@
+# Copyright 2019 The Fuchsia Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import("//build/rust/rustc_library.gni")
+
+# Ignore warnings because this is third-party code.
+config("ignore_rust_warnings") {
+ rustflags = [ "-Awarnings" ]
+}
+
+rustc_library("quiche") {
+ name = "quiche"
+ edition = "2018"
+ configs -= [
+ "//build/config/rust:2018_idioms",
+ "//build/config:werror",
+ ]
+ configs += [ ":ignore_rust_warnings" ]
+ deps = [
+ "//third_party/rust_crates:lazy_static",
+ "//third_party/rust_crates:libc",
+ "//third_party/rust_crates:libm",
+ "//third_party/rust_crates:log",
+ "//third_party/rust_crates:ring",
+ ]
+ non_rust_deps = [ "//third_party/boringssl" ]
+
+ sources = [
+ "src/frame.rs",
+ "src/h3/qpack/decoder.rs",
+ "src/rand.rs",
+ "src/stream.rs",
+ "src/octets.rs",
+ "src/h3/qpack/huffman/mod.rs",
+ "src/h3/stream.rs",
+ "src/recovery/delivery_rate.rs",
+ "src/h3/mod.rs",
+ "src/recovery/hystart.rs",
+ "src/tls.rs",
+ "src/dgram.rs",
+ "src/h3/qpack/mod.rs",
+ "src/h3/qpack/huffman/table.rs",
+ "src/recovery/prr.rs",
+ "src/minmax.rs",
+ "src/recovery/cubic.rs",
+ "src/recovery/mod.rs",
+ "src/packet.rs",
+ "src/flowcontrol.rs",
+ "src/lib.rs",
+ "src/recovery/reno.rs",
+ "src/crypto.rs",
+ "src/h3/qpack/encoder.rs",
+ "src/ranges.rs",
+ "src/h3/frame.rs",
+ "src/h3/qpack/static_table.rs",
+ ]
+}
diff --git a/quiche/COPYING b/quiche/COPYING
new file mode 120000
index 0000000..012065c
--- /dev/null
+++ b/quiche/COPYING
@@ -0,0 +1 @@
+../COPYING
\ No newline at end of file
diff --git a/quiche/Cargo.toml b/quiche/Cargo.toml
new file mode 100644
index 0000000..a8dca53
--- /dev/null
+++ b/quiche/Cargo.toml
@@ -0,0 +1,67 @@
+[package]
+name = "quiche"
+version = "0.12.0"
+authors = ["Alessandro Ghedini <alessandro@ghedini.me>"]
+edition = "2018"
+build = "src/build.rs"
+description = "🥧 Savoury implementation of the QUIC transport protocol and HTTP/3"
+repository = "https://github.com/cloudflare/quiche"
+readme = "README.md"
+keywords = ["quic", "http3"]
+categories = ["network-programming"]
+license = "BSD-2-Clause"
+include = [
+ "/*.md",
+ "/*.toml",
+ "/COPYING",
+ "/deps/boringssl/**/*.[chS]",
+ "/deps/boringssl/**/*.asm",
+ "/deps/boringssl/src/**/*.cc",
+ "/deps/boringssl/**/CMakeLists.txt",
+ "/deps/boringssl/**/sources.cmake",
+ "/deps/boringssl/LICENSE",
+ "/examples",
+ "/include",
+ "/quiche.svg",
+ "/src",
+]
+
+[features]
+default = ["boringssl-vendored"]
+
+# Build vendored BoringSSL library.
+boringssl-vendored = []
+
+# Generate pkg-config metadata file for libquiche.
+pkg-config-meta = []
+
+# Equivalent to "--cfg fuzzing", but can also be checked in build.rs.
+fuzzing = []
+
+# Expose the FFI API.
+ffi = []
+
+[package.metadata.docs.rs]
+no-default-features = true
+
+[build-dependencies]
+cmake = "0.1"
+
+[dependencies]
+log = { version = "0.4", features = ["std"] }
+libc = "0.2"
+libm = "0.2"
+ring = "0.16"
+lazy_static = "1"
+boring-sys = { version = "2.0.0", optional = true }
+qlog = { version = "0.6", path = "../qlog", optional = true }
+
+[target."cfg(windows)".dependencies]
+winapi = { version = "0.3", features = ["wincrypt"] }
+
+[dev-dependencies]
+mio = "0.6"
+url = "1"
+
+[lib]
+crate-type = ["lib", "staticlib", "cdylib"]
diff --git a/quiche/README.md b/quiche/README.md
new file mode 120000
index 0000000..32d46ee
--- /dev/null
+++ b/quiche/README.md
@@ -0,0 +1 @@
+../README.md
\ No newline at end of file
diff --git a/examples/Makefile b/quiche/examples/Makefile
similarity index 100%
rename from examples/Makefile
rename to quiche/examples/Makefile
diff --git a/examples/README.md b/quiche/examples/README.md
similarity index 100%
rename from examples/README.md
rename to quiche/examples/README.md
diff --git a/examples/cert-big.crt b/quiche/examples/cert-big.crt
similarity index 100%
rename from examples/cert-big.crt
rename to quiche/examples/cert-big.crt
diff --git a/examples/client.c b/quiche/examples/client.c
similarity index 100%
rename from examples/client.c
rename to quiche/examples/client.c
diff --git a/examples/client.rs b/quiche/examples/client.rs
similarity index 100%
rename from examples/client.rs
rename to quiche/examples/client.rs
diff --git a/examples/gen-certs.sh b/quiche/examples/gen-certs.sh
similarity index 100%
rename from examples/gen-certs.sh
rename to quiche/examples/gen-certs.sh
diff --git a/examples/http3-client.c b/quiche/examples/http3-client.c
similarity index 95%
rename from examples/http3-client.c
rename to quiche/examples/http3-client.c
index f41e51e..9d93d70 100644
--- a/examples/http3-client.c
+++ b/quiche/examples/http3-client.c
@@ -98,6 +98,14 @@
ev_timer_again(loop, &conn_io->timer);
}
+static int for_each_setting(uint64_t identifier, uint64_t value,
+ void *argp) {
+ fprintf(stderr, "got HTTP/3 SETTING: %" PRIu64 "=%" PRIu64 "\n",
+ identifier, value);
+
+ return 0;
+}
+
static int for_each_header(uint8_t *name, size_t name_len,
uint8_t *value, size_t value_len,
void *argp) {
@@ -109,6 +117,7 @@
static void recv_cb(EV_P_ ev_io *w, int revents) {
static bool req_sent = false;
+ static bool settings_received = false;
struct conn_io *conn_io = w->data;
@@ -244,6 +253,16 @@
break;
}
+ if (!settings_received) {
+ int rc = quiche_h3_for_each_setting(conn_io->http3,
+ for_each_setting,
+ NULL);
+
+ if (rc == 0) {
+ settings_received = true;
+ }
+ }
+
switch (quiche_h3_event_type(ev)) {
case QUICHE_H3_EVENT_HEADERS: {
int rc = quiche_h3_event_for_each_header(ev, for_each_header,
diff --git a/examples/http3-client.rs b/quiche/examples/http3-client.rs
similarity index 100%
rename from examples/http3-client.rs
rename to quiche/examples/http3-client.rs
diff --git a/examples/http3-server.c b/quiche/examples/http3-server.c
similarity index 100%
rename from examples/http3-server.c
rename to quiche/examples/http3-server.c
diff --git a/examples/http3-server.rs b/quiche/examples/http3-server.rs
similarity index 100%
rename from examples/http3-server.rs
rename to quiche/examples/http3-server.rs
diff --git a/examples/qpack-decode.rs b/quiche/examples/qpack-decode.rs
similarity index 100%
rename from examples/qpack-decode.rs
rename to quiche/examples/qpack-decode.rs
diff --git a/examples/qpack-encode.rs b/quiche/examples/qpack-encode.rs
similarity index 100%
rename from examples/qpack-encode.rs
rename to quiche/examples/qpack-encode.rs
diff --git a/examples/server.c b/quiche/examples/server.c
similarity index 100%
rename from examples/server.c
rename to quiche/examples/server.c
diff --git a/examples/server.rs b/quiche/examples/server.rs
similarity index 100%
rename from examples/server.rs
rename to quiche/examples/server.rs
diff --git a/include/quiche.h b/quiche/include/quiche.h
similarity index 90%
rename from include/quiche.h
rename to quiche/include/quiche.h
index 54726ad..8a60364 100644
--- a/include/quiche.h
+++ b/quiche/include/quiche.h
@@ -34,6 +34,16 @@
#include <stdint.h>
#include <stdbool.h>
#include <stddef.h>
+
+#if defined(WIN32) || defined(_WIN32) || defined(__WIN32__) || defined(__NT__)
+#include <winsock2.h>
+#include <ws2tcpip.h>
+#include <time.h>
+#else
+#include <sys/socket.h>
+#include <sys/time.h>
+#endif
+
#ifdef __unix__
#include <sys/types.h>
#endif
@@ -132,6 +142,10 @@
int quiche_config_load_verify_locations_from_file(quiche_config *config,
const char *path);
+// Specifies a directory where trusted CA certificates are stored for the purposes of certificate verification.
+int quiche_config_load_verify_locations_from_directory(quiche_config *config,
+ const char *path);
+
// Configures whether to verify the peer's certificate.
void quiche_config_verify_peer(quiche_config *config, bool v);
@@ -202,6 +216,12 @@
size_t recv_queue_len,
size_t send_queue_len);
+// Sets the maximum connection window.
+void quiche_config_set_max_connection_window(quiche_config *config, uint64_t v);
+
+// Sets the maximum stream window.
+void quiche_config_set_max_stream_window(quiche_config *config, uint64_t v);
+
// Frees the config object.
void quiche_config_free(quiche_config *config);
@@ -353,6 +373,9 @@
void quiche_conn_application_proto(quiche_conn *conn, const uint8_t **out,
size_t *out_len);
+// Returns the peer's leaf certificate (if any) as a DER-encoded buffer.
+void quiche_conn_peer_cert(quiche_conn *conn, const uint8_t **out, size_t *out_len);
+
// Returns the serialized cryptographic session for the connection.
void quiche_conn_session(quiche_conn *conn, const uint8_t **out, size_t *out_len);
@@ -508,7 +531,7 @@
// Returns the number of items in the DATAGRAM receive queue.
ssize_t quiche_conn_dgram_recv_queue_len(quiche_conn *conn);
-///Returns the total size of all items in the DATAGRAM receive queue.
+// Returns the total size of all items in the DATAGRAM receive queue.
ssize_t quiche_conn_dgram_recv_queue_byte_size(quiche_conn *conn);
// Returns the number of items in the DATAGRAM send queue.
@@ -540,72 +563,72 @@
#define QUICHE_H3_APPLICATION_PROTOCOL "\x02h3\x05h3-29\x05h3-28\x05h3-27"
enum quiche_h3_error {
- /// There is no error or no work to do
+ // There is no error or no work to do
QUICHE_H3_ERR_DONE = -1,
- /// The provided buffer is too short.
+ // The provided buffer is too short.
QUICHE_H3_ERR_BUFFER_TOO_SHORT = -2,
- /// Internal error in the HTTP/3 stack.
+ // Internal error in the HTTP/3 stack.
QUICHE_H3_ERR_INTERNAL_ERROR = -3,
- /// Endpoint detected that the peer is exhibiting behavior that causes.
- /// excessive load.
+ // Endpoint detected that the peer is exhibiting behavior that causes.
+ // excessive load.
QUICHE_H3_ERR_EXCESSIVE_LOAD = -4,
- /// Stream ID or Push ID greater that current maximum was
- /// used incorrectly, such as exceeding a limit, reducing a limit,
- /// or being reused.
+ // Stream ID or Push ID greater that current maximum was
+ // used incorrectly, such as exceeding a limit, reducing a limit,
+ // or being reused.
QUICHE_H3_ERR_ID_ERROR= -5,
- /// The endpoint detected that its peer created a stream that it will not
- /// accept.
+ // The endpoint detected that its peer created a stream that it will not
+ // accept.
QUICHE_H3_ERR_STREAM_CREATION_ERROR = -6,
- /// A required critical stream was closed.
+ // A required critical stream was closed.
QUICHE_H3_ERR_CLOSED_CRITICAL_STREAM = -7,
- /// No SETTINGS frame at beginning of control stream.
+ // No SETTINGS frame at beginning of control stream.
QUICHE_H3_ERR_MISSING_SETTINGS = -8,
- /// A frame was received which is not permitted in the current state.
+ // A frame was received which is not permitted in the current state.
QUICHE_H3_ERR_FRAME_UNEXPECTED = -9,
- /// Frame violated layout or size rules.
+ // Frame violated layout or size rules.
QUICHE_H3_ERR_FRAME_ERROR = -10,
- /// QPACK Header block decompression failure.
+ // QPACK Header block decompression failure.
QUICHE_H3_ERR_QPACK_DECOMPRESSION_FAILED = -11,
- /// Error originated from the transport layer.
+ // Error originated from the transport layer.
QUICHE_H3_ERR_TRANSPORT_ERROR = -12,
- /// The underlying QUIC stream (or connection) doesn't have enough capacity
- /// for the operation to complete. The application should retry later on.
+ // The underlying QUIC stream (or connection) doesn't have enough capacity
+ // for the operation to complete. The application should retry later on.
QUICHE_H3_ERR_STREAM_BLOCKED = -13,
- /// Error in the payload of a SETTINGS frame.
+ // Error in the payload of a SETTINGS frame.
QUICHE_H3_ERR_SETTINGS_ERROR = -14,
- /// Server rejected request.
+ // Server rejected request.
QUICHE_H3_ERR_REQUEST_REJECTED = -15,
- /// Request or its response cancelled.
+ // Request or its response cancelled.
QUICHE_H3_ERR_REQUEST_CANCELLED = -16,
- /// Client's request stream terminated without containing a full-formed
- /// request.
+ // Client's request stream terminated without containing a full-formed
+ // request.
QUICHE_H3_ERR_REQUEST_INCOMPLETE = -17,
- /// An HTTP message was malformed and cannot be processed.
+ // An HTTP message was malformed and cannot be processed.
QUICHE_H3_ERR_MESSAGE_ERROR = -18,
// The TCP connection established in response to a CONNECT request was
- /// reset or abnormally closed.
+ // reset or abnormally closed.
QUICHE_H3_ERR_CONNECT_ERROR = -19,
- /// The requested operation cannot be served over HTTP/3. Peer should retry
- /// over HTTP/1.1.
+ // The requested operation cannot be served over HTTP/3. Peer should retry
+ // over HTTP/1.1.
QUICHE_H3_ERR_VERSION_FALLBACK = -20,
};
@@ -668,6 +691,16 @@
void *argp),
void *argp);
+// Iterates over the peer's HTTP/3 settings.
+//
+// The `cb` callback will be called for each setting in `conn`.
+// If `cb` returns any value other than `0`, processing will be interrupted and
+// the value is returned to the caller.
+int quiche_h3_for_each_setting(quiche_h3_conn *conn,
+ int (*cb)(uint64_t identifier,
+ uint64_t value, void *argp),
+ void *argp);
+
// Check whether data will follow the headers on the stream.
bool quiche_h3_event_headers_has_body(quiche_h3_event *ev);
diff --git a/quiche/quiche.svg b/quiche/quiche.svg
new file mode 120000
index 0000000..2c24bd2
--- /dev/null
+++ b/quiche/quiche.svg
@@ -0,0 +1 @@
+../quiche.svg
\ No newline at end of file
diff --git a/src/build.rs b/quiche/src/build.rs
similarity index 92%
rename from src/build.rs
rename to quiche/src/build.rs
index 70697c9..d1ef4f4 100644
--- a/src/build.rs
+++ b/quiche/src/build.rs
@@ -173,11 +173,10 @@
fn write_pkg_config() {
use std::io::prelude::*;
- let profile = std::env::var("PROFILE").unwrap();
let manifest_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap();
- let target_dir = format!("{}/target/{}", manifest_dir, profile);
+ let target_dir = target_dir_path();
- let out_path = std::path::Path::new(&target_dir).join("quiche.pc");
+ let out_path = target_dir.as_path().join("quiche.pc");
let mut out_file = std::fs::File::create(&out_path).unwrap();
let include_dir = format!("{}/include", manifest_dir);
@@ -196,12 +195,27 @@
Libs: -Wl,-rpath,${{libdir}} -L${{libdir}} -lquiche
Cflags: -I${{includedir}}
",
- include_dir, target_dir, version
+ include_dir,
+ target_dir.to_str().unwrap(),
+ version
);
out_file.write_all(output.as_bytes()).unwrap();
}
+fn target_dir_path() -> std::path::PathBuf {
+ let out_dir = std::env::var("OUT_DIR").unwrap();
+ let out_dir = std::path::Path::new(&out_dir);
+
+ for p in out_dir.ancestors() {
+ if p.ends_with("build") {
+ return p.parent().unwrap().to_path_buf();
+ }
+ }
+
+ unreachable!();
+}
+
fn main() {
if cfg!(feature = "boringssl-vendored") && !cfg!(feature = "boring-sys") {
let bssl_dir = std::env::var("QUICHE_BSSL_PATH").unwrap_or_else(|_| {
@@ -229,7 +243,8 @@
}
// MacOS: Allow cdylib to link with undefined symbols
- if cfg!(target_os = "macos") {
+ let target_os = std::env::var("CARGO_CFG_TARGET_OS").unwrap();
+ if target_os == "macos" {
println!("cargo:rustc-cdylib-link-arg=-Wl,-undefined,dynamic_lookup");
}
diff --git a/src/crypto.rs b/quiche/src/crypto.rs
similarity index 99%
rename from src/crypto.rs
rename to quiche/src/crypto.rs
index ca47421..079961f 100644
--- a/src/crypto.rs
+++ b/quiche/src/crypto.rs
@@ -181,7 +181,10 @@
let tag_len = self.alg().tag_len();
- let mut out_len = buf.len() - tag_len;
+ let mut out_len = match buf.len().checked_sub(tag_len) {
+ Some(n) => n,
+ None => return Err(Error::CryptoFail),
+ };
let max_out_len = out_len;
diff --git a/src/dgram.rs b/quiche/src/dgram.rs
similarity index 100%
rename from src/dgram.rs
rename to quiche/src/dgram.rs
diff --git a/src/ffi.rs b/quiche/src/ffi.rs
similarity index 96%
rename from src/ffi.rs
rename to quiche/src/ffi.rs
index 34fd68b..ec74d1b 100644
--- a/src/ffi.rs
+++ b/quiche/src/ffi.rs
@@ -163,6 +163,19 @@
}
#[no_mangle]
+pub extern fn quiche_config_load_verify_locations_from_directory(
+ config: &mut Config, path: *const c_char,
+) -> c_int {
+ let path = unsafe { ffi::CStr::from_ptr(path).to_str().unwrap() };
+
+ match config.load_verify_locations_from_directory(path) {
+ Ok(_) => 0,
+
+ Err(e) => e.to_c() as c_int,
+ }
+}
+
+#[no_mangle]
pub extern fn quiche_config_verify_peer(config: &mut Config, v: bool) {
config.verify_peer(v);
}
@@ -304,6 +317,18 @@
}
#[no_mangle]
+pub extern fn quiche_config_set_max_connection_window(
+ config: &mut Config, v: u64,
+) {
+ config.set_max_connection_window(v);
+}
+
+#[no_mangle]
+pub extern fn quiche_config_set_max_stream_window(config: &mut Config, v: u64) {
+ config.set_max_stream_window(v);
+}
+
+#[no_mangle]
pub extern fn quiche_config_free(config: *mut Config) {
unsafe { Box::from_raw(config) };
}
@@ -874,6 +899,20 @@
}
#[no_mangle]
+pub extern fn quiche_conn_peer_cert(
+ conn: &mut Connection, out: &mut *const u8, out_len: &mut size_t,
+) {
+ match conn.peer_cert() {
+ Some(peer_cert) => {
+ *out = peer_cert.as_ptr();
+ *out_len = peer_cert.len();
+ },
+
+ None => *out_len = 0,
+ }
+}
+
+#[no_mangle]
pub extern fn quiche_conn_session(
conn: &mut Connection, out: &mut *const u8, out_len: &mut size_t,
) {
diff --git a/quiche/src/flowcontrol.rs b/quiche/src/flowcontrol.rs
new file mode 100644
index 0000000..6731c26
--- /dev/null
+++ b/quiche/src/flowcontrol.rs
@@ -0,0 +1,220 @@
+// Copyright (C) 2021, Cloudflare, Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+use std::time::Duration;
+use std::time::Instant;
+
+// When autotuning the receiver window, decide how much
+// we increase the window.
+const WINDOW_INCREASE_FACTOR: u64 = 2;
+
+// When autotuning the receiver window, check if the last
+// update is within RTT * this constant.
+const WINDOW_TRIGGER_FACTOR: u32 = 2;
+
+#[derive(Default, Debug)]
+pub struct FlowControl {
+ /// Total consumed bytes by the receiver.
+ consumed: u64,
+
+ /// Flow control limit.
+ max_data: u64,
+
+ /// The receive window. This value is used for updating
+ /// flow control limit.
+ window: u64,
+
+ /// The maximum receive window.
+ max_window: u64,
+
+ /// Last update time of max_data for autotuning the window.
+ last_update: Option<Instant>,
+}
+
+impl FlowControl {
+ pub fn new(max_data: u64, window: u64, max_window: u64) -> Self {
+ Self {
+ max_data,
+
+ window,
+
+ max_window,
+
+ ..Default::default()
+ }
+ }
+
+ /// Returns the current window size.
+ pub fn window(&self) -> u64 {
+ self.window
+ }
+
+ /// Returns the current flow limit.
+ pub fn max_data(&self) -> u64 {
+ self.max_data
+ }
+
+ /// Update consumed bytes.
+ pub fn add_consumed(&mut self, consumed: u64) {
+ self.consumed += consumed;
+ }
+
+ /// Returns true if the flow control needs to update max_data.
+ ///
+ /// This happens when the available window is smaller than the half
+ /// of the current window.
+ pub fn should_update_max_data(&self) -> bool {
+ let available_window = self.max_data - self.consumed;
+
+ available_window < (self.window / 2)
+ }
+
+ /// Returns the new max_data limit.
+ pub fn max_data_next(&self) -> u64 {
+ self.consumed + self.window
+ }
+
+ /// Commits the new max_data limit.
+ pub fn update_max_data(&mut self, now: Instant) {
+ self.max_data = self.max_data_next();
+ self.last_update = Some(now);
+ }
+
+ /// Autotune the window size. When there is an another update
+ /// within RTT x 2, bump the window x 1.5, capped by
+ /// max_window.
+ pub fn autotune_window(&mut self, now: Instant, rtt: Duration) {
+ if let Some(last_update) = self.last_update {
+ if now - last_update < rtt * WINDOW_TRIGGER_FACTOR {
+ self.window = std::cmp::min(
+ self.window * WINDOW_INCREASE_FACTOR,
+ self.max_window,
+ );
+ }
+ }
+ }
+
+ /// Make sure the lower bound of the window is same to
+ /// the current window.
+ pub fn ensure_window_lower_bound(&mut self, min_window: u64) {
+ if min_window > self.window {
+ self.window = min_window;
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn max_data() {
+ let fc = FlowControl::new(100, 20, 100);
+
+ assert_eq!(fc.max_data(), 100);
+ }
+
+ #[test]
+ fn should_update_max_data() {
+ let mut fc = FlowControl::new(100, 20, 100);
+
+ fc.add_consumed(85);
+ assert_eq!(fc.should_update_max_data(), false);
+
+ fc.add_consumed(10);
+ assert_eq!(fc.should_update_max_data(), true);
+ }
+
+ #[test]
+ fn max_data_next() {
+ let mut fc = FlowControl::new(100, 20, 100);
+
+ let consumed = 95;
+
+ fc.add_consumed(consumed);
+ assert_eq!(fc.should_update_max_data(), true);
+ assert_eq!(fc.max_data_next(), consumed + 20);
+ }
+
+ #[test]
+ fn update_max_data() {
+ let mut fc = FlowControl::new(100, 20, 100);
+
+ let consumed = 95;
+
+ fc.add_consumed(consumed);
+ assert_eq!(fc.should_update_max_data(), true);
+
+ let max_data_next = fc.max_data_next();
+ assert_eq!(fc.max_data_next(), consumed + 20);
+
+ fc.update_max_data(Instant::now());
+ assert_eq!(fc.max_data(), max_data_next);
+ }
+
+ #[test]
+ fn autotune_window() {
+ let w = 20;
+ let mut fc = FlowControl::new(100, w, 100);
+
+ let consumed = 95;
+
+ fc.add_consumed(consumed);
+ assert_eq!(fc.should_update_max_data(), true);
+
+ let max_data_next = fc.max_data_next();
+ assert_eq!(max_data_next, consumed + w);
+
+ fc.update_max_data(Instant::now());
+ assert_eq!(fc.max_data(), max_data_next);
+
+ // Window size should be doubled.
+ fc.autotune_window(Instant::now(), Duration::from_millis(100));
+
+ let w = w * 2;
+ let consumed_inc = 15;
+
+ fc.add_consumed(consumed_inc);
+ assert_eq!(fc.should_update_max_data(), true);
+
+ let max_data_next = fc.max_data_next();
+ assert_eq!(max_data_next, consumed + consumed_inc + w);
+ }
+
+ #[test]
+ fn ensure_window_lower_bound() {
+ let w = 20;
+ let mut fc = FlowControl::new(100, w, 100);
+
+ // Window doesn't change.
+ fc.ensure_window_lower_bound(w);
+ assert_eq!(fc.window(), 20);
+
+ // Window changed to the new value.
+ fc.ensure_window_lower_bound(w * 2);
+ assert_eq!(fc.window(), 40);
+ }
+}
diff --git a/src/frame.rs b/quiche/src/frame.rs
similarity index 94%
rename from src/frame.rs
rename to quiche/src/frame.rs
index e4cae0a..5884203 100644
--- a/src/frame.rs
+++ b/quiche/src/frame.rs
@@ -24,6 +24,8 @@
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+use std::convert::TryInto;
+
use crate::Error;
use crate::Result;
@@ -32,6 +34,15 @@
use crate::ranges;
use crate::stream;
+#[cfg(feature = "qlog")]
+use qlog::events::quic::AckedRanges;
+#[cfg(feature = "qlog")]
+use qlog::events::quic::ErrorSpace;
+#[cfg(feature = "qlog")]
+use qlog::events::quic::QuicFrame;
+#[cfg(feature = "qlog")]
+use qlog::events::quic::StreamType;
+
pub const MAX_CRYPTO_OVERHEAD: usize = 8;
pub const MAX_DGRAM_OVERHEAD: usize = 2;
pub const MAX_STREAM_OVERHEAD: usize = 12;
@@ -132,7 +143,7 @@
seq_num: u64,
retire_prior_to: u64,
conn_id: Vec<u8>,
- reset_token: Vec<u8>,
+ reset_token: [u8; 16],
},
RetireConnectionId {
@@ -140,11 +151,11 @@
},
PathChallenge {
- data: Vec<u8>,
+ data: [u8; 8],
},
PathResponse {
- data: Vec<u8>,
+ data: [u8; 8],
},
ConnectionClose {
@@ -257,7 +268,11 @@
seq_num: b.get_varint()?,
retire_prior_to: b.get_varint()?,
conn_id: b.get_bytes_with_u8_length()?.to_vec(),
- reset_token: b.get_bytes(16)?.to_vec(),
+ reset_token: b
+ .get_bytes(16)?
+ .buf()
+ .try_into()
+ .map_err(|_| Error::BufferTooShort)?,
},
0x19 => Frame::RetireConnectionId {
@@ -265,11 +280,19 @@
},
0x1a => Frame::PathChallenge {
- data: b.get_bytes(8)?.to_vec(),
+ data: b
+ .get_bytes(8)?
+ .buf()
+ .try_into()
+ .map_err(|_| Error::BufferTooShort)?,
},
0x1b => Frame::PathResponse {
- data: b.get_bytes(8)?.to_vec(),
+ data: b
+ .get_bytes(8)?
+ .buf()
+ .try_into()
+ .map_err(|_| Error::BufferTooShort)?,
},
0x1c => Frame::ConnectionClose {
@@ -779,19 +802,20 @@
}
#[cfg(feature = "qlog")]
- pub fn to_qlog(&self) -> qlog::QuicFrame {
+ pub fn to_qlog(&self) -> QuicFrame {
match self {
- Frame::Padding { .. } => qlog::QuicFrame::Padding,
+ Frame::Padding { .. } => QuicFrame::Padding,
- Frame::Ping { .. } => qlog::QuicFrame::Ping,
+ Frame::Ping { .. } => QuicFrame::Ping,
Frame::ACK {
ack_delay,
ranges,
ecn_counts,
} => {
- let ack_ranges =
- ranges.iter().map(|r| (r.start, r.end - 1)).collect();
+ let ack_ranges = AckedRanges::Double(
+ ranges.iter().map(|r| (r.start, r.end - 1)).collect(),
+ );
let (ect0, ect1, ce) = match ecn_counts {
Some(ecn) => (
@@ -803,7 +827,7 @@
None => (None, None, None),
};
- qlog::QuicFrame::Ack {
+ QuicFrame::Ack {
ack_delay: Some(*ack_delay as f32 / 1000.0),
acked_ranges: Some(ack_ranges),
ect1,
@@ -816,7 +840,7 @@
stream_id,
error_code,
final_size,
- } => qlog::QuicFrame::ResetStream {
+ } => QuicFrame::ResetStream {
stream_id: *stream_id,
error_code: *error_code,
final_size: *final_size,
@@ -825,31 +849,31 @@
Frame::StopSending {
stream_id,
error_code,
- } => qlog::QuicFrame::StopSending {
+ } => QuicFrame::StopSending {
stream_id: *stream_id,
error_code: *error_code,
},
- Frame::Crypto { data } => qlog::QuicFrame::Crypto {
+ Frame::Crypto { data } => QuicFrame::Crypto {
offset: data.off(),
length: data.len() as u64,
},
- Frame::CryptoHeader { offset, length } => qlog::QuicFrame::Crypto {
+ Frame::CryptoHeader { offset, length } => QuicFrame::Crypto {
offset: *offset,
length: *length as u64,
},
- Frame::NewToken { token } => qlog::QuicFrame::NewToken {
+ Frame::NewToken { token } => QuicFrame::NewToken {
length: token.len().to_string(),
token: "TODO: update to qlog-02 token format".to_string(),
},
- Frame::Stream { stream_id, data } => qlog::QuicFrame::Stream {
+ Frame::Stream { stream_id, data } => QuicFrame::Stream {
stream_id: *stream_id,
offset: data.off() as u64,
length: data.len() as u64,
- fin: data.fin(),
+ fin: data.fin().then(|| true),
raw: None,
},
@@ -858,59 +882,56 @@
offset,
length,
fin,
- } => qlog::QuicFrame::Stream {
+ } => QuicFrame::Stream {
stream_id: *stream_id,
offset: *offset,
length: *length as u64,
- fin: *fin,
+ fin: fin.then(|| true),
raw: None,
},
- Frame::MaxData { max } => qlog::QuicFrame::MaxData { maximum: *max },
+ Frame::MaxData { max } => QuicFrame::MaxData { maximum: *max },
- Frame::MaxStreamData { stream_id, max } =>
- qlog::QuicFrame::MaxStreamData {
- stream_id: *stream_id,
- maximum: *max,
- },
-
- Frame::MaxStreamsBidi { max } => qlog::QuicFrame::MaxStreams {
- stream_type: qlog::StreamType::Bidirectional,
+ Frame::MaxStreamData { stream_id, max } => QuicFrame::MaxStreamData {
+ stream_id: *stream_id,
maximum: *max,
},
- Frame::MaxStreamsUni { max } => qlog::QuicFrame::MaxStreams {
- stream_type: qlog::StreamType::Unidirectional,
+ Frame::MaxStreamsBidi { max } => QuicFrame::MaxStreams {
+ stream_type: StreamType::Bidirectional,
+ maximum: *max,
+ },
+
+ Frame::MaxStreamsUni { max } => QuicFrame::MaxStreams {
+ stream_type: StreamType::Unidirectional,
maximum: *max,
},
Frame::DataBlocked { limit } =>
- qlog::QuicFrame::DataBlocked { limit: *limit },
+ QuicFrame::DataBlocked { limit: *limit },
Frame::StreamDataBlocked { stream_id, limit } =>
- qlog::QuicFrame::StreamDataBlocked {
+ QuicFrame::StreamDataBlocked {
stream_id: *stream_id,
limit: *limit,
},
- Frame::StreamsBlockedBidi { limit } =>
- qlog::QuicFrame::StreamsBlocked {
- stream_type: qlog::StreamType::Bidirectional,
- limit: *limit,
- },
+ Frame::StreamsBlockedBidi { limit } => QuicFrame::StreamsBlocked {
+ stream_type: StreamType::Bidirectional,
+ limit: *limit,
+ },
- Frame::StreamsBlockedUni { limit } =>
- qlog::QuicFrame::StreamsBlocked {
- stream_type: qlog::StreamType::Unidirectional,
- limit: *limit,
- },
+ Frame::StreamsBlockedUni { limit } => QuicFrame::StreamsBlocked {
+ stream_type: StreamType::Unidirectional,
+ limit: *limit,
+ },
Frame::NewConnectionId {
seq_num,
retire_prior_to,
conn_id,
..
- } => qlog::QuicFrame::NewConnectionId {
+ } => QuicFrame::NewConnectionId {
sequence_number: *seq_num as u32,
retire_prior_to: *retire_prior_to as u32,
length: conn_id.len() as u64,
@@ -919,20 +940,19 @@
},
Frame::RetireConnectionId { seq_num } =>
- qlog::QuicFrame::RetireConnectionId {
+ QuicFrame::RetireConnectionId {
sequence_number: *seq_num as u32,
},
Frame::PathChallenge { .. } =>
- qlog::QuicFrame::PathChallenge { data: None },
+ QuicFrame::PathChallenge { data: None },
- Frame::PathResponse { .. } =>
- qlog::QuicFrame::PathResponse { data: None },
+ Frame::PathResponse { .. } => QuicFrame::PathResponse { data: None },
Frame::ConnectionClose {
error_code, reason, ..
- } => qlog::QuicFrame::ConnectionClose {
- error_space: qlog::ErrorSpace::TransportError,
+ } => QuicFrame::ConnectionClose {
+ error_space: ErrorSpace::TransportError,
error_code: *error_code,
raw_error_code: None, // raw error is no different for us
reason: Some(String::from_utf8(reason.clone()).unwrap()),
@@ -940,22 +960,22 @@
},
Frame::ApplicationClose { error_code, reason } =>
- qlog::QuicFrame::ConnectionClose {
- error_space: qlog::ErrorSpace::ApplicationError,
+ QuicFrame::ConnectionClose {
+ error_space: ErrorSpace::ApplicationError,
error_code: *error_code,
raw_error_code: None, // raw error is no different for us
reason: Some(String::from_utf8(reason.clone()).unwrap()),
trigger_frame_type: None, // don't know trigger type
},
- Frame::HandshakeDone => qlog::QuicFrame::HandshakeDone,
+ Frame::HandshakeDone => QuicFrame::HandshakeDone,
- Frame::Datagram { data } => qlog::QuicFrame::Datagram {
+ Frame::Datagram { data } => QuicFrame::Datagram {
length: data.len() as u64,
raw: None,
},
- Frame::DatagramHeader { length } => qlog::QuicFrame::Datagram {
+ Frame::DatagramHeader { length } => QuicFrame::Datagram {
length: *length as u64,
raw: None,
},
@@ -1150,7 +1170,6 @@
let mut ranges = ranges::RangeSet::default();
- #[allow(clippy::range_plus_one)]
ranges.insert(smallest_ack..largest_ack + 1);
for _i in 0..block_count {
@@ -1169,7 +1188,6 @@
smallest_ack = largest_ack - ack_block;
- #[allow(clippy::range_plus_one)]
ranges.insert(smallest_ack..largest_ack + 1);
}
@@ -1821,7 +1839,7 @@
seq_num: 123_213,
retire_prior_to: 122_211,
conn_id: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15],
- reset_token: vec![0x42; 16],
+ reset_token: [0x42; 16],
};
let wire_len = {
@@ -1875,7 +1893,7 @@
let mut d = [42; 128];
let frame = Frame::PathChallenge {
- data: vec![1, 2, 3, 4, 5, 6, 7, 8],
+ data: [1, 2, 3, 4, 5, 6, 7, 8],
};
let wire_len = {
@@ -1903,7 +1921,7 @@
let mut d = [42; 128];
let frame = Frame::PathResponse {
- data: vec![1, 2, 3, 4, 5, 6, 7, 8],
+ data: [1, 2, 3, 4, 5, 6, 7, 8],
};
let wire_len = {
diff --git a/src/h3/ffi.rs b/quiche/src/h3/ffi.rs
similarity index 94%
rename from src/h3/ffi.rs
rename to quiche/src/h3/ffi.rs
index a11184a..512404d 100644
--- a/src/h3/ffi.rs
+++ b/quiche/src/h3/ffi.rs
@@ -85,6 +85,29 @@
}
#[no_mangle]
+pub extern fn quiche_h3_for_each_setting(
+ conn: &h3::Connection,
+ cb: extern fn(identifier: u64, value: u64, argp: *mut c_void) -> c_int,
+ argp: *mut c_void,
+) -> c_int {
+ match conn.peer_settings_raw() {
+ Some(raw) => {
+ for setting in raw {
+ let rc = cb(setting.0, setting.1, argp);
+
+ if rc != 0 {
+ return rc;
+ }
+ }
+
+ 0
+ },
+
+ None => -1,
+ }
+}
+
+#[no_mangle]
pub extern fn quiche_h3_conn_poll(
conn: &mut h3::Connection, quic_conn: &mut Connection,
ev: *mut *const h3::Event,
@@ -219,7 +242,7 @@
quic_conn,
stream_id,
&resp_headers,
- &priority,
+ priority,
fin,
) {
Ok(_) => 0,
diff --git a/src/h3/frame.rs b/quiche/src/h3/frame.rs
similarity index 91%
rename from src/h3/frame.rs
rename to quiche/src/h3/frame.rs
index 8b80024..993d672 100644
--- a/src/h3/frame.rs
+++ b/quiche/src/h3/frame.rs
@@ -36,10 +36,10 @@
pub const GOAWAY_FRAME_TYPE_ID: u64 = 0x6;
pub const MAX_PUSH_FRAME_TYPE_ID: u64 = 0xD;
-const SETTINGS_QPACK_MAX_TABLE_CAPACITY: u64 = 0x1;
-const SETTINGS_MAX_FIELD_SECTION_SIZE: u64 = 0x6;
-const SETTINGS_QPACK_BLOCKED_STREAMS: u64 = 0x7;
-const SETTINGS_H3_DATAGRAM: u64 = 0x276;
+pub const SETTINGS_QPACK_MAX_TABLE_CAPACITY: u64 = 0x1;
+pub const SETTINGS_MAX_FIELD_SECTION_SIZE: u64 = 0x6;
+pub const SETTINGS_QPACK_BLOCKED_STREAMS: u64 = 0x7;
+pub const SETTINGS_H3_DATAGRAM: u64 = 0x276;
// Permit between 16 maximally-encoded and 128 minimally-encoded SETTINGS.
const MAX_SETTINGS_PAYLOAD_SIZE: usize = 256;
@@ -64,6 +64,7 @@
qpack_blocked_streams: Option<u64>,
h3_datagram: Option<u64>,
grease: Option<(u64, u64)>,
+ raw: Option<Vec<(u64, u64)>>,
},
PushPromise {
@@ -153,6 +154,7 @@
qpack_blocked_streams,
h3_datagram,
grease,
+ ..
} => {
let mut len = 0;
@@ -262,9 +264,10 @@
max_field_section_size,
qpack_max_table_capacity,
qpack_blocked_streams,
+ raw,
..
} => {
- write!(f, "SETTINGS max_field_section={:?}, qpack_max_table={:?}, qpack_blocked={:?} ", max_field_section_size, qpack_max_table_capacity, qpack_blocked_streams)?;
+ write!(f, "SETTINGS max_field_section={:?}, qpack_max_table={:?}, qpack_blocked={:?} raw={:?}", max_field_section_size, qpack_max_table_capacity, qpack_blocked_streams, raw)?;
},
Frame::PushPromise {
@@ -303,6 +306,7 @@
let mut qpack_max_table_capacity = None;
let mut qpack_blocked_streams = None;
let mut h3_datagram = None;
+ let mut raw = Vec::new();
// Reject SETTINGS frames that are too long.
if settings_length > MAX_SETTINGS_PAYLOAD_SIZE {
@@ -310,28 +314,32 @@
}
while b.off() < settings_length {
- let setting_ty = b.get_varint()?;
- let settings_val = b.get_varint()?;
+ let identifier = b.get_varint()?;
+ let value = b.get_varint()?;
- match setting_ty {
+ // MAX_SETTINGS_PAYLOAD_SIZE protects us from storing too many raw
+ // settings.
+ raw.push((identifier, value));
+
+ match identifier {
SETTINGS_QPACK_MAX_TABLE_CAPACITY => {
- qpack_max_table_capacity = Some(settings_val);
+ qpack_max_table_capacity = Some(value);
},
SETTINGS_MAX_FIELD_SECTION_SIZE => {
- max_field_section_size = Some(settings_val);
+ max_field_section_size = Some(value);
},
SETTINGS_QPACK_BLOCKED_STREAMS => {
- qpack_blocked_streams = Some(settings_val);
+ qpack_blocked_streams = Some(value);
},
SETTINGS_H3_DATAGRAM => {
- if settings_val > 1 {
+ if value > 1 {
return Err(super::Error::SettingsError);
}
- h3_datagram = Some(settings_val);
+ h3_datagram = Some(value);
},
// Reserved values overlap with HTTP/2 and MUST be rejected
@@ -349,6 +357,7 @@
qpack_blocked_streams,
h3_datagram,
grease: None,
+ raw: Some(raw),
})
}
@@ -456,12 +465,20 @@
fn settings_all_no_grease() {
let mut d = [42; 128];
+ let raw_settings = vec![
+ (SETTINGS_MAX_FIELD_SECTION_SIZE, 0),
+ (SETTINGS_QPACK_MAX_TABLE_CAPACITY, 0),
+ (SETTINGS_QPACK_BLOCKED_STREAMS, 0),
+ (SETTINGS_H3_DATAGRAM, 0),
+ ];
+
let frame = Frame::Settings {
max_field_section_size: Some(0),
qpack_max_table_capacity: Some(0),
qpack_blocked_streams: Some(0),
h3_datagram: Some(0),
grease: None,
+ raw: Some(raw_settings),
};
let frame_payload_len = 9;
@@ -495,15 +512,26 @@
qpack_blocked_streams: Some(0),
h3_datagram: Some(0),
grease: Some((33, 33)),
+ raw: Default::default(),
};
- // Frame parsing will always ignore GREASE values.
+ let raw_settings = vec![
+ (SETTINGS_MAX_FIELD_SECTION_SIZE, 0),
+ (SETTINGS_QPACK_MAX_TABLE_CAPACITY, 0),
+ (SETTINGS_QPACK_BLOCKED_STREAMS, 0),
+ (SETTINGS_H3_DATAGRAM, 0),
+ (33, 33),
+ ];
+
+ // Frame parsing will not populate GREASE property but will be in the
+ // raw info.
let frame_parsed = Frame::Settings {
max_field_section_size: Some(0),
qpack_max_table_capacity: Some(0),
qpack_blocked_streams: Some(0),
h3_datagram: Some(0),
grease: None,
+ raw: Some(raw_settings),
};
let frame_payload_len = 11;
@@ -531,12 +559,15 @@
fn settings_h3_only() {
let mut d = [42; 128];
+ let raw_settings = vec![(SETTINGS_MAX_FIELD_SECTION_SIZE, 1024)];
+
let frame = Frame::Settings {
max_field_section_size: Some(1024),
qpack_max_table_capacity: None,
qpack_blocked_streams: None,
h3_datagram: None,
grease: None,
+ raw: Some(raw_settings),
};
let frame_payload_len = 3;
@@ -564,12 +595,15 @@
fn settings_h3_dgram_only() {
let mut d = [42; 128];
+ let raw_settings = vec![(SETTINGS_H3_DATAGRAM, 1)];
+
let frame = Frame::Settings {
max_field_section_size: None,
qpack_max_table_capacity: None,
qpack_blocked_streams: None,
h3_datagram: Some(1),
grease: None,
+ raw: Some(raw_settings),
};
let frame_payload_len = 3;
@@ -603,6 +637,7 @@
qpack_blocked_streams: None,
h3_datagram: Some(5),
grease: None,
+ raw: Default::default(),
};
let frame_payload_len = 3;
@@ -629,12 +664,18 @@
fn settings_qpack_only() {
let mut d = [42; 128];
+ let raw_settings = vec![
+ (SETTINGS_QPACK_MAX_TABLE_CAPACITY, 0),
+ (SETTINGS_QPACK_BLOCKED_STREAMS, 0),
+ ];
+
let frame = Frame::Settings {
max_field_section_size: None,
qpack_max_table_capacity: Some(0),
qpack_blocked_streams: Some(0),
h3_datagram: None,
grease: None,
+ raw: Some(raw_settings),
};
let frame_payload_len = 4;
diff --git a/src/h3/mod.rs b/quiche/src/h3/mod.rs
similarity index 97%
rename from src/h3/mod.rs
rename to quiche/src/h3/mod.rs
index 38adcc9..5379646 100644
--- a/src/h3/mod.rs
+++ b/quiche/src/h3/mod.rs
@@ -277,7 +277,6 @@
//! [`send_response()`]: struct.Connection.html#method.send_response
//! [`send_body()`]: struct.Connection.html#method.send_body
-use std::collections::HashMap;
use std::collections::VecDeque;
use crate::octets;
@@ -609,6 +608,7 @@
pub qpack_max_table_capacity: Option<u64>,
pub qpack_blocked_streams: Option<u64>,
pub h3_datagram: Option<u64>,
+ pub raw: Option<Vec<(u64, u64)>>,
}
struct QpackStreams {
@@ -623,7 +623,7 @@
next_request_stream_id: u64,
next_uni_stream_id: u64,
- streams: HashMap<u64, stream::Stream>,
+ streams: crate::stream::StreamIdHashMap<stream::Stream>,
local_settings: ConnectionSettings,
peer_settings: ConnectionSettings,
@@ -634,7 +634,6 @@
qpack_encoder: qpack::Encoder,
qpack_decoder: qpack::Decoder,
- #[allow(dead_code)]
local_qpack_streams: QpackStreams,
peer_qpack_streams: QpackStreams,
@@ -651,7 +650,6 @@
}
impl Connection {
- #[allow(clippy::unnecessary_wraps)]
fn new(
config: &Config, is_server: bool, enable_dgram: bool,
) -> Result<Connection> {
@@ -665,13 +663,14 @@
next_uni_stream_id: initial_uni_stream_id,
- streams: HashMap::new(),
+ streams: Default::default(),
local_settings: ConnectionSettings {
max_field_section_size: config.max_field_section_size,
qpack_max_table_capacity: config.qpack_max_table_capacity,
qpack_blocked_streams: config.qpack_blocked_streams,
h3_datagram,
+ raw: Default::default(),
},
peer_settings: ConnectionSettings {
@@ -679,6 +678,7 @@
qpack_max_table_capacity: None,
qpack_blocked_streams: None,
h3_datagram: None,
+ raw: Default::default(),
},
control_stream_id: None,
@@ -787,6 +787,10 @@
if let Err(e) = conn.stream_send(stream_id, b"", false) {
self.streams.remove(&stream_id);
+ if e == super::Error::Done {
+ return Err(Error::StreamBlocked);
+ }
+
return Err(e.into());
};
@@ -1416,6 +1420,13 @@
Ok(())
}
+ /// Gets the raw settings from peer including unknown and reserved types.
+ ///
+ /// The order of settings is the same as received in the SETTINGS frame.
+ pub fn peer_settings_raw(&self) -> Option<&[(u64, u64)]> {
+ self.peer_settings.raw.as_deref()
+ }
+
fn open_uni_stream(
&mut self, conn: &mut super::Connection, ty: u64,
) -> Result<u64> {
@@ -1570,6 +1581,7 @@
qpack_blocked_streams: self.local_settings.qpack_blocked_streams,
h3_datagram: self.local_settings.h3_datagram,
grease,
+ raw: Default::default(),
};
let mut d = [42; 128];
@@ -1905,6 +1917,7 @@
qpack_max_table_capacity,
qpack_blocked_streams,
h3_datagram,
+ raw,
..
} => {
self.peer_settings = ConnectionSettings {
@@ -1912,6 +1925,7 @@
qpack_max_table_capacity,
qpack_blocked_streams,
h3_datagram,
+ raw,
};
if let Some(1) = h3_datagram {
@@ -3336,7 +3350,7 @@
s.advance().ok();
- assert_eq!(s.server.poll(&mut s.pipe.server), Err(Error::InternalError));
+ assert_eq!(s.server.poll(&mut s.pipe.server), Err(Error::ExcessiveLoad));
}
#[test]
@@ -3528,7 +3542,7 @@
s.advance().ok();
- assert_eq!(s.server.poll(&mut s.pipe.server), Err(Error::InternalError));
+ assert_eq!(s.server.poll(&mut s.pipe.server), Err(Error::ExcessiveLoad));
// Try to call poll() again after an error occurred.
assert_eq!(s.server.poll(&mut s.pipe.server), Err(Error::Done));
@@ -3581,6 +3595,61 @@
}
#[test]
+ /// Ensure StreamBlocked when connection flow control prevents headers.
+ fn headers_blocked_on_conn() {
+ let mut config = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
+ config
+ .load_cert_chain_from_pem_file("examples/cert.crt")
+ .unwrap();
+ config
+ .load_priv_key_from_pem_file("examples/cert.key")
+ .unwrap();
+ config.set_application_protos(b"\x02h3").unwrap();
+ config.set_initial_max_data(70);
+ config.set_initial_max_stream_data_bidi_local(150);
+ config.set_initial_max_stream_data_bidi_remote(150);
+ config.set_initial_max_stream_data_uni(150);
+ config.set_initial_max_streams_bidi(100);
+ config.set_initial_max_streams_uni(5);
+ config.verify_peer(false);
+
+ let mut h3_config = Config::new().unwrap();
+
+ let mut s = Session::with_configs(&mut config, &mut h3_config).unwrap();
+
+ s.handshake().unwrap();
+
+ // After the HTTP handshake, some bytes of connection flow control have
+ // been consumed. Fill the connection with more grease data on the control
+ // stream.
+ let d = [42; 28];
+ assert_eq!(s.pipe.client.stream_send(2, &d, false), Ok(23));
+
+ let req = vec![
+ Header::new(b":method", b"GET"),
+ Header::new(b":scheme", b"https"),
+ Header::new(b":authority", b"quic.tech"),
+ Header::new(b":path", b"/test"),
+ ];
+
+ // There is 0 connection-level flow control, so sending a request is
+ // blocked.
+ assert_eq!(
+ s.client.send_request(&mut s.pipe.client, &req, true),
+ Err(Error::StreamBlocked)
+ );
+
+ // Emit the control stream data and drain it at the server via poll() to
+ // consumes it via poll() and gives back flow control.
+ s.advance().ok();
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ s.advance().ok();
+
+ // Now we can send the request.
+ assert_eq!(s.client.send_request(&mut s.pipe.client, &req, true), Ok(0));
+ }
+
+ #[test]
/// Test handling of 0-length DATA writes with and without fin.
fn zero_length_data() {
let mut s = Session::default().unwrap();
@@ -3767,6 +3836,7 @@
qpack_blocked_streams: None,
h3_datagram: Some(1),
grease: None,
+ raw: Default::default(),
};
s.send_frame_client(settings, s.client.control_stream_id.unwrap(), false)
diff --git a/src/h3/qpack/decoder.rs b/quiche/src/h3/qpack/decoder.rs
similarity index 98%
rename from src/h3/qpack/decoder.rs
rename to quiche/src/h3/qpack/decoder.rs
index 1bc5755..d06e487 100644
--- a/src/h3/qpack/decoder.rs
+++ b/quiche/src/h3/qpack/decoder.rs
@@ -68,14 +68,9 @@
}
/// A QPACK decoder.
+#[derive(Default)]
pub struct Decoder {}
-impl Default for Decoder {
- fn default() -> Decoder {
- Decoder {}
- }
-}
-
impl Decoder {
/// Creates a new QPACK decoder.
pub fn new() -> Decoder {
diff --git a/src/h3/qpack/encoder.rs b/quiche/src/h3/qpack/encoder.rs
similarity index 98%
rename from src/h3/qpack/encoder.rs
rename to quiche/src/h3/qpack/encoder.rs
index 09c8b08..c7161f0 100644
--- a/src/h3/qpack/encoder.rs
+++ b/quiche/src/h3/qpack/encoder.rs
@@ -35,14 +35,9 @@
use super::LITERAL_WITH_NAME_REF;
/// A QPACK encoder.
+#[derive(Default)]
pub struct Encoder {}
-impl Default for Encoder {
- fn default() -> Encoder {
- Encoder {}
- }
-}
-
impl Encoder {
/// Creates a new QPACK encoder.
pub fn new() -> Encoder {
diff --git a/src/h3/qpack/huffman/mod.rs b/quiche/src/h3/qpack/huffman/mod.rs
similarity index 100%
rename from src/h3/qpack/huffman/mod.rs
rename to quiche/src/h3/qpack/huffman/mod.rs
diff --git a/src/h3/qpack/huffman/table.rs b/quiche/src/h3/qpack/huffman/table.rs
similarity index 99%
rename from src/h3/qpack/huffman/table.rs
rename to quiche/src/h3/qpack/huffman/table.rs
index 7162aff..011272c 100644
--- a/src/h3/qpack/huffman/table.rs
+++ b/quiche/src/h3/qpack/huffman/table.rs
@@ -1,5 +1,3 @@
-#[allow(clippy::unreadable_literal)]
-
// (num-bits, bits)
pub const ENCODE_TABLE: [(usize, u64); 257] = [
(13, 0x1ff8),
diff --git a/src/h3/qpack/mod.rs b/quiche/src/h3/qpack/mod.rs
similarity index 100%
rename from src/h3/qpack/mod.rs
rename to quiche/src/h3/qpack/mod.rs
diff --git a/src/h3/qpack/static_table.rs b/quiche/src/h3/qpack/static_table.rs
similarity index 100%
rename from src/h3/qpack/static_table.rs
rename to quiche/src/h3/qpack/static_table.rs
diff --git a/src/h3/stream.rs b/quiche/src/h3/stream.rs
similarity index 96%
rename from src/h3/stream.rs
rename to quiche/src/h3/stream.rs
index 0f39414..8be4c17 100644
--- a/src/h3/stream.rs
+++ b/quiche/src/h3/stream.rs
@@ -545,29 +545,31 @@
fn state_transition(
&mut self, new_state: State, expected_len: usize, resize: bool,
) -> Result<()> {
- self.state = new_state;
- self.state_off = 0;
- self.state_len = expected_len;
-
// Some states don't need the state buffer, so don't resize it if not
// necessary.
if resize {
// A peer can influence the size of the state buffer (e.g. with the
// payload size of a GREASE frame), so we need to limit the maximum
// size to avoid DoS.
- if self.state_len > MAX_STATE_BUF_SIZE {
- return Err(Error::InternalError);
+ if expected_len > MAX_STATE_BUF_SIZE {
+ return Err(Error::ExcessiveLoad);
}
- self.state_buf.resize(self.state_len, 0);
+ self.state_buf.resize(expected_len, 0);
}
+ self.state = new_state;
+ self.state_off = 0;
+ self.state_len = expected_len;
+
Ok(())
}
}
#[cfg(test)]
mod tests {
+ use crate::h3::frame::*;
+
use super::*;
#[test]
@@ -579,12 +581,19 @@
let mut d = vec![42; 40];
let mut b = octets::OctetsMut::with_slice(&mut d);
- let frame = frame::Frame::Settings {
+ let raw_settings = vec![
+ (SETTINGS_MAX_FIELD_SECTION_SIZE, 0),
+ (SETTINGS_QPACK_MAX_TABLE_CAPACITY, 0),
+ (SETTINGS_QPACK_BLOCKED_STREAMS, 0),
+ ];
+
+ let frame = Frame::Settings {
max_field_section_size: Some(0),
qpack_max_table_capacity: Some(0),
qpack_blocked_streams: Some(0),
h3_datagram: None,
grease: None,
+ raw: Some(raw_settings),
};
b.put_varint(HTTP3_CONTROL_STREAM_TYPE_ID).unwrap();
@@ -635,12 +644,19 @@
let mut d = vec![42; 40];
let mut b = octets::OctetsMut::with_slice(&mut d);
+ let raw_settings = vec![
+ (SETTINGS_MAX_FIELD_SECTION_SIZE, 0),
+ (SETTINGS_QPACK_MAX_TABLE_CAPACITY, 0),
+ (SETTINGS_QPACK_BLOCKED_STREAMS, 0),
+ ];
+
let frame = frame::Frame::Settings {
max_field_section_size: Some(0),
qpack_max_table_capacity: Some(0),
qpack_blocked_streams: Some(0),
h3_datagram: None,
grease: None,
+ raw: Some(raw_settings),
};
b.put_varint(HTTP3_CONTROL_STREAM_TYPE_ID).unwrap();
@@ -700,12 +716,19 @@
let goaway = frame::Frame::GoAway { id: 0 };
+ let raw_settings = vec![
+ (SETTINGS_MAX_FIELD_SECTION_SIZE, 0),
+ (SETTINGS_QPACK_MAX_TABLE_CAPACITY, 0),
+ (SETTINGS_QPACK_BLOCKED_STREAMS, 0),
+ ];
+
let settings = frame::Frame::Settings {
max_field_section_size: Some(0),
qpack_max_table_capacity: Some(0),
qpack_blocked_streams: Some(0),
h3_datagram: None,
grease: None,
+ raw: Some(raw_settings),
};
b.put_varint(HTTP3_CONTROL_STREAM_TYPE_ID).unwrap();
@@ -743,12 +766,20 @@
let header_block = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let hdrs = frame::Frame::Headers { header_block };
+ let raw_settings = vec![
+ (SETTINGS_MAX_FIELD_SECTION_SIZE, 0),
+ (SETTINGS_QPACK_MAX_TABLE_CAPACITY, 0),
+ (SETTINGS_QPACK_BLOCKED_STREAMS, 0),
+ (33, 33),
+ ];
+
let settings = frame::Frame::Settings {
max_field_section_size: Some(0),
qpack_max_table_capacity: Some(0),
qpack_blocked_streams: Some(0),
h3_datagram: None,
grease: None,
+ raw: Some(raw_settings),
};
b.put_varint(HTTP3_CONTROL_STREAM_TYPE_ID).unwrap();
diff --git a/src/lib.rs b/quiche/src/lib.rs
similarity index 96%
rename from src/lib.rs
rename to quiche/src/lib.rs
index 9bcf2c9..f456778 100644
--- a/src/lib.rs
+++ b/quiche/src/lib.rs
@@ -287,14 +287,31 @@
//!
//! [`CongestionControlAlgorithm`]: enum.CongestionControlAlgorithm.html
-#![allow(improper_ctypes)]
-#![allow(clippy::suspicious_operation_groupings)]
#![allow(clippy::upper_case_acronyms)]
#![warn(missing_docs)]
#[macro_use]
extern crate log;
+#[cfg(feature = "qlog")]
+use qlog::events::connectivity::TransportOwner;
+#[cfg(feature = "qlog")]
+use qlog::events::quic::RecoveryEventType;
+#[cfg(feature = "qlog")]
+use qlog::events::quic::TransportEventType;
+#[cfg(feature = "qlog")]
+use qlog::events::DataRecipient;
+#[cfg(feature = "qlog")]
+use qlog::events::Event;
+#[cfg(feature = "qlog")]
+use qlog::events::EventData;
+#[cfg(feature = "qlog")]
+use qlog::events::EventImportance;
+#[cfg(feature = "qlog")]
+use qlog::events::EventType;
+#[cfg(feature = "qlog")]
+use qlog::events::RawInfo;
+
use std::cmp;
use std::time;
@@ -359,6 +376,16 @@
const RESERVED_VERSION_MASK: u32 = 0xfafafafa;
+// The default size of the receiver connection flow control window.
+const DEFAULT_CONNECTION_WINDOW: u64 = 48 * 1024;
+
+// The maximum size of the receiver connection flow control window.
+const MAX_CONNECTION_WINDOW: u64 = 24 * 1024 * 1024;
+
+// How much larger the connection flow control window need to be larger than
+// the stream flow control window.
+const CONNECTION_WINDOW_FACTOR: f64 = 1.5;
+
/// A specialized [`Result`] type for quiche operations.
///
/// This type is used throughout quiche's public API for any operation that
@@ -563,6 +590,9 @@
dgram_send_max_queue_len: usize,
max_send_udp_payload_size: usize,
+
+ max_connection_window: u64,
+ max_stream_window: u64,
}
// See https://quicwg.org/base-drafts/rfc9000.html#section-15
@@ -599,6 +629,9 @@
dgram_send_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
max_send_udp_payload_size: MAX_SEND_UDP_PAYLOAD_SIZE,
+
+ max_connection_window: MAX_CONNECTION_WINDOW,
+ max_stream_window: stream::MAX_STREAM_WINDOW,
})
}
@@ -924,6 +957,20 @@
self.dgram_recv_max_queue_len = recv_queue_len;
self.dgram_send_max_queue_len = send_queue_len;
}
+
+ /// Sets the maximum size of the connection window.
+ ///
+ /// The default value is MAX_CONNECTION_WINDOW (24MBytes).
+ pub fn set_max_connection_window(&mut self, v: u64) {
+ self.max_connection_window = v;
+ }
+
+ /// Sets the maximum size of the stream window.
+ ///
+ /// The default value is MAX_STREAM_WINDOW (16MBytes).
+ pub fn set_max_stream_window(&mut self, v: u64) {
+ self.max_stream_window = v;
+ }
}
/// A QUIC connection.
@@ -978,12 +1025,8 @@
/// Total number of bytes received from the peer.
rx_data: u64,
- /// Local flow control limit for the connection.
- max_rx_data: u64,
-
- /// Updated local flow control limit for the connection. This is used to
- /// trigger sending MAX_DATA frames after a certain threshold.
- max_rx_data_next: u64,
+ /// Receiver flow controller.
+ flow_control: flowcontrol::FlowControl,
/// Whether we send MAX_DATA frame.
almost_full: bool,
@@ -1008,7 +1051,7 @@
/// Total number of bytes sent over the connection.
sent_bytes: u64,
- /// Total number of bytes recevied over the connection.
+ /// Total number of bytes received over the connection.
recv_bytes: u64,
/// Streams map, indexed by stream ID.
@@ -1034,7 +1077,7 @@
peer_error: Option<ConnectionError>,
/// Received path challenge.
- challenge: Option<Vec<u8>>,
+ challenge: Option<[u8; 8]>,
/// The connection-level limit at which send blocking occurred.
blocked_limit: Option<u64>,
@@ -1319,12 +1362,12 @@
/// Executes the provided body if the qlog feature is enabled, quiche has been
/// configured with a log writer, the event's importance is within the
-/// confgured level.
+/// configured level.
macro_rules! qlog_with_type {
($ty:expr, $qlog:expr, $qlog_streamer_ref:ident, $body:block) => {{
#[cfg(feature = "qlog")]
{
- if qlog::EventImportance::from($ty).is_contained_in(&$qlog.level) {
+ if EventImportance::from($ty).is_contained_in(&$qlog.level) {
if let Some($qlog_streamer_ref) = &mut $qlog.streamer {
$body
}
@@ -1334,30 +1377,30 @@
}
#[cfg(feature = "qlog")]
-const QLOG_PARAMS_SET: qlog::EventType =
- qlog::EventType::TransportEventType(qlog::TransportEventType::ParametersSet);
+const QLOG_PARAMS_SET: EventType =
+ EventType::TransportEventType(TransportEventType::ParametersSet);
#[cfg(feature = "qlog")]
-const QLOG_PACKET_RX: qlog::EventType =
- qlog::EventType::TransportEventType(qlog::TransportEventType::PacketReceived);
+const QLOG_PACKET_RX: EventType =
+ EventType::TransportEventType(TransportEventType::PacketReceived);
#[cfg(feature = "qlog")]
-const QLOG_PACKET_TX: qlog::EventType =
- qlog::EventType::TransportEventType(qlog::TransportEventType::PacketSent);
+const QLOG_PACKET_TX: EventType =
+ EventType::TransportEventType(TransportEventType::PacketSent);
#[cfg(feature = "qlog")]
-const QLOG_DATA_MV: qlog::EventType =
- qlog::EventType::TransportEventType(qlog::TransportEventType::DataMoved);
+const QLOG_DATA_MV: EventType =
+ EventType::TransportEventType(TransportEventType::DataMoved);
#[cfg(feature = "qlog")]
-const QLOG_METRICS: qlog::EventType =
- qlog::EventType::RecoveryEventType(qlog::RecoveryEventType::MetricsUpdated);
+const QLOG_METRICS: EventType =
+ EventType::RecoveryEventType(RecoveryEventType::MetricsUpdated);
#[cfg(feature = "qlog")]
struct QlogInfo {
- streamer: Option<qlog::QlogStreamer>,
+ streamer: Option<qlog::streamer::QlogStreamer>,
logged_peer_params: bool,
- level: qlog::EventImportance,
+ level: EventImportance,
}
#[cfg(feature = "qlog")]
@@ -1366,7 +1409,7 @@
QlogInfo {
streamer: None,
logged_peer_params: false,
- level: qlog::EventImportance::Base,
+ level: EventImportance::Base,
}
}
}
@@ -1424,8 +1467,11 @@
recv_bytes: 0,
rx_data: 0,
- max_rx_data,
- max_rx_data_next: max_rx_data,
+ flow_control: flowcontrol::FlowControl::new(
+ max_rx_data,
+ cmp::min(max_rx_data / 2 * 3, DEFAULT_CONNECTION_WINDOW),
+ config.max_connection_window,
+ ),
almost_full: false,
tx_cap: 0,
@@ -1440,6 +1486,7 @@
streams: stream::StreamMap::new(
config.local_transport_params.initial_max_streams_bidi,
config.local_transport_params.initial_max_streams_uni,
+ config.max_stream_window,
),
odcid: None,
@@ -1556,6 +1603,8 @@
conn.derived_initial_secrets = true;
}
+ conn.recovery.on_init();
+
Ok(conn)
}
@@ -1572,7 +1621,8 @@
/// Sets qlog output to the designated [`Writer`].
///
- /// Only events included in `QlogLevel::Base` are written.
+ /// Only events included in `QlogLevel::Base` are written. The serialization
+ /// format is JSON-SEQ.
///
/// This needs to be called as soon as the connection is created, to avoid
/// missing some early logs.
@@ -1588,7 +1638,8 @@
/// Sets qlog output to the designated [`Writer`].
///
- /// Only qlog events included in the specified `QlogLevel` are written
+ /// Only qlog events included in the specified `QlogLevel` are written. The
+ /// serialization format is JSON-SEQ.
///
/// This needs to be called as soon as the connection is created, to avoid
/// missing some early logs.
@@ -1606,16 +1657,16 @@
};
let level = match qlog_level {
- QlogLevel::Core => qlog::EventImportance::Core,
+ QlogLevel::Core => EventImportance::Core,
- QlogLevel::Base => qlog::EventImportance::Base,
+ QlogLevel::Base => EventImportance::Base,
- QlogLevel::Extra => qlog::EventImportance::Extra,
+ QlogLevel::Extra => EventImportance::Extra,
};
self.qlog.level = level;
- let trace = qlog::Trace::new(
+ let trace = qlog::TraceSeq::new(
qlog::VantagePoint {
name: None,
ty: vp,
@@ -1630,7 +1681,7 @@
None,
);
- let mut streamer = qlog::QlogStreamer::new(
+ let mut streamer = qlog::streamer::QlogStreamer::new(
qlog::QLOG_VERSION.to_string(),
Some(title),
Some(description),
@@ -1645,12 +1696,10 @@
let ev_data = self
.local_transport_params
- .to_qlog(qlog::TransportOwner::Local, self.handshake.cipher());
+ .to_qlog(TransportOwner::Local, self.handshake.cipher());
// This event occurs very early, so just mark the relative time as 0.0.
- streamer
- .add_event(qlog::Event::with_time(0.0, ev_data))
- .ok();
+ streamer.add_event(Event::with_time(0.0, ev_data)).ok();
self.qlog.streamer = Some(streamer);
}
@@ -2097,7 +2146,7 @@
qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
let packet_size = b.len();
- let qlog_pkt_hdr = qlog::PacketHeader::with_type(
+ let qlog_pkt_hdr = qlog::events::quic::PacketHeader::with_type(
hdr.ty.to_qlog(),
pn,
Some(hdr.version),
@@ -2105,22 +2154,23 @@
Some(&hdr.dcid),
);
- let qlog_raw_info = qlog::RawInfo {
+ let qlog_raw_info = RawInfo {
length: Some(packet_size as u64),
payload_length: Some(payload_len as u64),
data: None,
};
- let ev_data = qlog::EventData::PacketReceived {
- header: qlog_pkt_hdr,
- frames: Some(vec![]),
- is_coalesced: None,
- retry_token: None,
- stateless_reset_token: None,
- supported_versions: None,
- raw: Some(qlog_raw_info),
- datagram_id: None,
- };
+ let ev_data =
+ EventData::PacketReceived(qlog::events::quic::PacketReceived {
+ header: qlog_pkt_hdr,
+ frames: Some(vec![]),
+ is_coalesced: None,
+ retry_token: None,
+ stateless_reset_token: None,
+ supported_versions: None,
+ raw: Some(qlog_raw_info),
+ datagram_id: None,
+ });
q.add_event_data_with_instant(ev_data, now).ok();
});
@@ -2219,10 +2269,9 @@
if self.is_established() {
qlog_with_type!(QLOG_PARAMS_SET, self.qlog, q, {
if !self.qlog.logged_peer_params {
- let ev_data = self.peer_transport_params.to_qlog(
- qlog::TransportOwner::Remote,
- self.handshake.cipher(),
- );
+ let ev_data = self
+ .peer_transport_params
+ .to_qlog(TransportOwner::Remote, self.handshake.cipher());
q.add_event_data_with_instant(ev_data, now).ok();
@@ -2361,14 +2410,18 @@
/// * When the connection timer expires (that is, any time [`on_timeout()`]
/// is also called).
///
- /// * When the application sends data to the peer (for examples, any time
+ /// * When the application sends data to the peer (for example, any time
/// [`stream_send()`] or [`stream_shutdown()`] are called).
///
+ /// * When the application receives data from the peer (for example any
+ /// time [`stream_recv()`] is called).
+ ///
/// [`Done`]: enum.Error.html#variant.Done
/// [`recv()`]: struct.Connection.html#method.recv
/// [`on_timeout()`]: struct.Connection.html#method.on_timeout
/// [`stream_send()`]: struct.Connection.html#method.stream_send
/// [`stream_shutdown()`]: struct.Connection.html#method.stream_shutdown
+ /// [`stream_recv()`]: struct.Connection.html#method.stream_recv
///
/// ## Examples:
///
@@ -2803,19 +2856,30 @@
},
};
+ // Autotune the stream window size.
+ stream.recv.autotune_window(now, self.recovery.rtt());
+
let frame = frame::Frame::MaxStreamData {
stream_id,
max: stream.recv.max_data_next(),
};
if push_frame_to_pkt!(b, frames, frame, left) {
- stream.recv.update_max_data();
+ let recv_win = stream.recv.window();
+
+ stream.recv.update_max_data(now);
self.streams.mark_almost_full(stream_id, false);
ack_eliciting = true;
in_flight = true;
+ // Make sure the connection window always has some
+ // room compared to the stream window.
+ self.flow_control.ensure_window_lower_bound(
+ (recv_win as f64 * CONNECTION_WINDOW_FACTOR) as u64,
+ );
+
// Also send MAX_DATA when MAX_STREAM_DATA is sent, to avoid a
// potential race condition.
self.almost_full = true;
@@ -2823,16 +2887,19 @@
}
// Create MAX_DATA frame as needed.
- if self.almost_full && self.max_rx_data < self.max_rx_data_next {
+ if self.almost_full && self.max_rx_data() < self.max_rx_data_next() {
+ // Autotune the connection window size.
+ self.flow_control.autotune_window(now, self.recovery.rtt());
+
let frame = frame::Frame::MaxData {
- max: self.max_rx_data_next,
+ max: self.max_rx_data_next(),
};
if push_frame_to_pkt!(b, frames, frame, left) {
self.almost_full = false;
// Commits the new max_rx_data limit.
- self.max_rx_data = self.max_rx_data_next;
+ self.flow_control.update_max_data(now);
ack_eliciting = true;
in_flight = true;
@@ -2934,10 +3001,8 @@
}
// Create PATH_RESPONSE frame.
- if let Some(ref challenge) = self.challenge {
- let frame = frame::Frame::PathResponse {
- data: challenge.clone(),
- };
+ if let Some(challenge) = self.challenge {
+ let frame = frame::Frame::PathResponse { data: challenge };
if push_frame_to_pkt!(b, frames, frame, left) {
self.challenge = None;
@@ -3215,7 +3280,7 @@
// Alternate trying to send DATAGRAMs next time.
self.emit_dgram = !dgram_emitted;
- // Create PING for PTO probe if no other ack-elicitng frame is sent.
+ // Create PING for PTO probe if no other ack-eliciting frame is sent.
if self.recovery.loss_probes[epoch] > 0 &&
!ack_eliciting &&
left >= 1 &&
@@ -3288,22 +3353,26 @@
);
qlog_with_type!(QLOG_PACKET_TX, self.qlog, q, {
- let qlog_pkt_hdr = qlog::PacketHeader::with_type(
+ let qlog_pkt_hdr = qlog::events::quic::PacketHeader::with_type(
hdr.ty.to_qlog(),
pn,
Some(hdr.version),
Some(&hdr.scid),
Some(&hdr.dcid),
);
- let length = Some(payload_len as u64 + payload_offset as u64);
- let payload_length = Some(payload_len as u64);
- let qlog_raw_info = qlog::RawInfo {
- length,
- payload_length,
+
+ // Qlog packet raw info described at
+ // https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema-00#section-5.1
+ //
+ // `length` includes packet headers and trailers (AEAD tag).
+ let length = payload_len + payload_offset + crypto_overhead;
+ let qlog_raw_info = RawInfo {
+ length: Some(length as u64),
+ payload_length: Some(payload_len as u64),
data: None,
};
- let ev_data = qlog::EventData::PacketSent {
+ let ev_data = EventData::PacketSent(qlog::events::quic::PacketSent {
header: qlog_pkt_hdr,
frames: Some(vec![]),
is_coalesced: None,
@@ -3312,7 +3381,7 @@
supported_versions: None,
raw: Some(qlog_raw_info),
datagram_id: None,
- };
+ });
q.add_event_data_with_instant(ev_data, now).ok();
});
@@ -3413,7 +3482,11 @@
/// On success the amount of bytes read and a flag indicating the fin state
/// is returned as a tuple, or [`Done`] if there is no data to read.
///
+ /// Reading data from a stream may trigger queueing of control messages
+ /// (e.g. MAX_STREAM_DATA). [`send()`] should be called after reading.
+ ///
/// [`Done`]: enum.Error.html#variant.Done
+ /// [`send()`]: struct.Connection.html#method.send
///
/// ## Examples:
///
@@ -3471,7 +3544,7 @@
},
};
- self.max_rx_data_next = self.max_rx_data_next.saturating_add(read as u64);
+ self.flow_control.add_consumed(read as u64);
let readable = stream.is_readable();
@@ -3490,14 +3563,14 @@
}
qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
- let ev_data = qlog::EventData::DataMoved {
+ let ev_data = EventData::DataMoved(qlog::events::quic::DataMoved {
stream_id: Some(stream_id),
offset: Some(offset),
length: Some(read as u64),
- from: Some(qlog::DataRecipient::Transport),
- to: Some(qlog::DataRecipient::Application),
+ from: Some(DataRecipient::Transport),
+ to: Some(DataRecipient::Application),
data: None,
- };
+ });
let now = time::Instant::now();
q.add_event_data_with_instant(ev_data, now).ok();
@@ -3517,7 +3590,8 @@
///
/// Applications can provide a 0-length buffer with the fin flag set to
/// true. This will lead to a 0-length FIN STREAM frame being sent at the
- /// latest offset. This is the only case where `Ok(0)` is returned.
+ /// latest offset. The `Ok(0)` value is only returned when the application
+ /// provided a 0-length buffer.
///
/// In addition, if the peer has signalled that it doesn't want to receive
/// any more data from this stream by sending the `STOP_SENDING` frame, the
@@ -3576,8 +3650,11 @@
// Truncate the input buffer based on the connection's send capacity if
// necessary.
+ //
+ // When the cap is zero, the method returns Ok(0) *only* when the passed
+ // buffer is empty. We return Error::Done otherwise.
let cap = self.tx_cap;
- if cap == 0 && !fin {
+ if cap == 0 && !(fin && buf.is_empty()) {
return Err(Error::Done);
}
@@ -3641,19 +3718,23 @@
self.recovery.rate_check_app_limited();
qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
- let ev_data = qlog::EventData::DataMoved {
+ let ev_data = EventData::DataMoved(qlog::events::quic::DataMoved {
stream_id: Some(stream_id),
offset: Some(offset),
length: Some(sent as u64),
- from: Some(qlog::DataRecipient::Application),
- to: Some(qlog::DataRecipient::Transport),
+ from: Some(DataRecipient::Application),
+ to: Some(DataRecipient::Transport),
data: None,
- };
+ });
let now = time::Instant::now();
q.add_event_data_with_instant(ev_data, now).ok();
});
+ if sent == 0 && !buf.is_empty() {
+ return Err(Error::Done);
+ }
+
Ok(sent)
}
@@ -4291,7 +4372,7 @@
let now = time::Instant::now();
if timeout <= now {
- return Some(time::Duration::new(0, 0));
+ return Some(time::Duration::ZERO);
}
return Some(timeout.duration_since(now));
@@ -4352,8 +4433,6 @@
q.add_event_data_with_instant(ev_data, now).ok();
}
});
-
- return;
}
}
}
@@ -4424,7 +4503,7 @@
/// Returns the peer's leaf certificate (if any) as a DER-encoded buffer.
#[inline]
- pub fn peer_cert(&self) -> Option<Vec<u8>> {
+ pub fn peer_cert(&self) -> Option<&[u8]> {
self.handshake.peer_cert()
}
@@ -4435,8 +4514,8 @@
///
/// [`set_session()`]: struct.Connection.html#method.set_session
#[inline]
- pub fn session(&self) -> Option<Vec<u8>> {
- self.session.clone()
+ pub fn session(&self) -> Option<&[u8]> {
+ self.session.as_deref()
}
/// Returns the source connection ID.
@@ -4531,7 +4610,7 @@
/// Note that a `Some` return value does not necessarily imply
/// [`is_closed()`] or any other connection state.
/// `Some` also does not guarantee that the error has been sent to
- /// or recieved by the peer.
+ /// or received by the peer.
///
/// [`close()`]: struct.Connection.html#method.close
/// [`is_closed()`]: struct.Connection.html#method.is_closed
@@ -4905,7 +4984,7 @@
return Err(Error::InvalidStreamState(stream_id));
}
- let max_rx_data_left = self.max_rx_data - self.rx_data;
+ let max_rx_data_left = self.max_rx_data() - self.rx_data;
// Get existing stream or create a new one, but if the stream
// has already been closed and collected, ignore the frame.
@@ -5029,7 +5108,7 @@
return Err(Error::InvalidStreamState(stream_id));
}
- let max_rx_data_left = self.max_rx_data - self.rx_data;
+ let max_rx_data_left = self.max_rx_data() - self.rx_data;
// Get existing stream or create a new one, but if the stream
// has already been closed and collected, ignore the frame.
@@ -5243,8 +5322,17 @@
/// This happens when the new max data limit is at least double the amount
/// of data that can be received before blocking.
fn should_update_max_data(&self) -> bool {
- self.max_rx_data_next != self.max_rx_data &&
- self.max_rx_data_next / 2 > self.max_rx_data - self.rx_data
+ self.flow_control.should_update_max_data()
+ }
+
+ /// Returns the connection level flow control limit.
+ fn max_rx_data(&self) -> u64 {
+ self.flow_control.max_data()
+ }
+
+ /// Returns the updated connection level flow control limit.
+ fn max_rx_data_next(&self) -> u64 {
+ self.flow_control.max_data_next()
}
/// Returns true if the HANDSHAKE_DONE frame needs to be sent.
@@ -5326,7 +5414,7 @@
fn drop_pkt_on_err(
e: Error, recv_count: usize, is_server: bool, trace_id: &str,
) -> Error {
- // On the server, if no other packet has been successflully processed, abort
+ // On the server, if no other packet has been successfully processed, abort
// the connection to avoid keeping the connection open when only junk is
// received.
if is_server && recv_count == 0 {
@@ -5374,7 +5462,7 @@
/// The number of bytes lost.
pub lost_bytes: u64,
- /// The number of stream bytes retranmitted.
+ /// The number of stream bytes retransmitted.
pub stream_retrans_bytes: u64,
/// The current PMTU for the connection.
@@ -5853,8 +5941,8 @@
/// Creates a qlog event for connection transport parameters and TLS fields
#[cfg(feature = "qlog")]
pub fn to_qlog(
- &self, owner: qlog::TransportOwner, cipher: Option<crypto::Algorithm>,
- ) -> qlog::EventData {
+ &self, owner: TransportOwner, cipher: Option<crypto::Algorithm>,
+ ) -> EventData {
let original_destination_connection_id = qlog::HexSlice::maybe_string(
self.original_destination_connection_id.as_ref(),
);
@@ -5868,36 +5956,42 @@
details: None,
});
- qlog::EventData::TransportParametersSet {
- owner: Some(owner),
- resumption_allowed: None,
- early_data_enabled: None,
- tls_cipher: Some(format!("{:?}", cipher)),
- aead_tag_length: None,
- original_destination_connection_id,
- initial_source_connection_id: None,
- retry_source_connection_id: None,
- stateless_reset_token,
- disable_active_migration: Some(self.disable_active_migration),
- max_idle_timeout: Some(self.max_idle_timeout),
- max_udp_payload_size: Some(self.max_udp_payload_size as u32),
- ack_delay_exponent: Some(self.ack_delay_exponent as u16),
- max_ack_delay: Some(self.max_ack_delay as u16),
- active_connection_id_limit: Some(self.active_conn_id_limit as u32),
+ EventData::TransportParametersSet(
+ qlog::events::quic::TransportParametersSet {
+ owner: Some(owner),
+ resumption_allowed: None,
+ early_data_enabled: None,
+ tls_cipher: Some(format!("{:?}", cipher)),
+ aead_tag_length: None,
+ original_destination_connection_id,
+ initial_source_connection_id: None,
+ retry_source_connection_id: None,
+ stateless_reset_token,
+ disable_active_migration: Some(self.disable_active_migration),
+ max_idle_timeout: Some(self.max_idle_timeout),
+ max_udp_payload_size: Some(self.max_udp_payload_size as u32),
+ ack_delay_exponent: Some(self.ack_delay_exponent as u16),
+ max_ack_delay: Some(self.max_ack_delay as u16),
+ active_connection_id_limit: Some(
+ self.active_conn_id_limit as u32,
+ ),
- initial_max_data: Some(self.initial_max_data),
- initial_max_stream_data_bidi_local: Some(
- self.initial_max_stream_data_bidi_local,
- ),
- initial_max_stream_data_bidi_remote: Some(
- self.initial_max_stream_data_bidi_remote,
- ),
- initial_max_stream_data_uni: Some(self.initial_max_stream_data_uni),
- initial_max_streams_bidi: Some(self.initial_max_streams_bidi),
- initial_max_streams_uni: Some(self.initial_max_streams_uni),
+ initial_max_data: Some(self.initial_max_data),
+ initial_max_stream_data_bidi_local: Some(
+ self.initial_max_stream_data_bidi_local,
+ ),
+ initial_max_stream_data_bidi_remote: Some(
+ self.initial_max_stream_data_bidi_remote,
+ ),
+ initial_max_stream_data_uni: Some(
+ self.initial_max_stream_data_uni,
+ ),
+ initial_max_streams_bidi: Some(self.initial_max_streams_bidi),
+ initial_max_streams_uni: Some(self.initial_max_streams_uni),
- preferred_address: None,
- }
+ preferred_address: None,
+ },
+ )
}
}
@@ -7037,7 +7131,7 @@
// Force server to send a single PING frame.
pipe.server.recovery.loss_probes[packet::EPOCH_INITIAL] = 1;
- // Artifically limit the amount of bytes the server can send.
+ // Artificially limit the amount of bytes the server can send.
pipe.server.max_send_bytes = 60;
assert_eq!(pipe.server.send(&mut buf), Err(Error::Done));
@@ -7150,7 +7244,7 @@
max: 30
})
);
- assert_eq!(iter.next(), Some(&frame::Frame::MaxData { max: 46 }));
+ assert_eq!(iter.next(), Some(&frame::Frame::MaxData { max: 61 }));
}
#[test]
@@ -7234,7 +7328,7 @@
let frames = [frame::Frame::Stream {
stream_id: 4,
- data: stream::RangeBuf::from(b"aaaaaaa", 0, false),
+ data: stream::RangeBuf::from(b"aaaaaaaaa", 0, false),
}];
let pkt_type = packet::Type::Short;
@@ -7245,7 +7339,7 @@
let frames = [frame::Frame::Stream {
stream_id: 4,
- data: stream::RangeBuf::from(b"a", 7, false),
+ data: stream::RangeBuf::from(b"a", 9, false),
}];
let len = pipe
@@ -7265,7 +7359,7 @@
iter.next(),
Some(&frame::Frame::MaxStreamData {
stream_id: 4,
- max: 22,
+ max: 24,
})
);
}
@@ -7767,9 +7861,7 @@
let mut pipe = testing::Pipe::default().unwrap();
assert_eq!(pipe.handshake(), Ok(()));
- let frames = [frame::Frame::PathChallenge {
- data: vec![0xba; 8],
- }];
+ let frames = [frame::Frame::PathChallenge { data: [0xba; 8] }];
let pkt_type = packet::Type::Short;
@@ -7788,9 +7880,7 @@
assert_eq!(
iter.next(),
- Some(&frame::Frame::PathResponse {
- data: vec![0xba; 8],
- })
+ Some(&frame::Frame::PathResponse { data: [0xba; 8] })
);
}
@@ -7900,7 +7990,7 @@
assert_eq!(r.next(), None);
loop {
- if pipe.server.stream_send(4, b"world", false) == Ok(0) {
+ if pipe.server.stream_send(4, b"world", false) == Err(Error::Done) {
break;
}
@@ -8596,7 +8686,7 @@
assert_eq!(w.next(), Some(4));
assert_eq!(w.next(), None);
- // Server suts down stream.
+ // Server shuts down stream.
assert_eq!(pipe.server.stream_shutdown(4, Shutdown::Write, 0), Ok(()));
let mut w = pipe.server.writable();
@@ -8912,7 +9002,7 @@
}
#[test]
- /// Tests that the MAX_STREAMS frame is sent for unirectional streams.
+ /// Tests that the MAX_STREAMS frame is sent for unidirectional streams.
fn stream_limit_update_uni() {
let mut config = Config::new(crate::PROTOCOL_VERSION).unwrap();
config
@@ -9080,6 +9170,25 @@
}
#[test]
+ /// Tests that the stream gets created with stream_send() even if there's
+ /// no data in the buffer and the fin flag is not set.
+ fn stream_zero_length_non_fin() {
+ let mut pipe = testing::Pipe::default().unwrap();
+ assert_eq!(pipe.handshake(), Ok(()));
+
+ assert_eq!(pipe.client.stream_send(0, b"", false), Ok(0));
+
+ // The stream now should have been created.
+ assert_eq!(pipe.client.streams.len(), 1);
+ assert_eq!(pipe.advance(), Ok(()));
+
+ // Sending an empty non-fin should not change any stream state on the
+ // other side.
+ let mut r = pipe.server.readable();
+ assert!(r.next().is_none());
+ }
+
+ #[test]
/// Tests that completed streams are garbage collected.
fn collect_streams() {
let mut buf = [0; 65535];
@@ -9471,7 +9580,10 @@
// Send again from blocked stream and make sure it is marked as blocked
// again.
- assert_eq!(pipe.client.stream_send(0, b"aaaaaa", false), Ok(0));
+ assert_eq!(
+ pipe.client.stream_send(0, b"aaaaaa", false),
+ Err(Error::Done)
+ );
assert_eq!(pipe.client.streams.blocked().len(), 1);
let (len, _) = pipe.client.send(&mut buf).unwrap();
@@ -11054,6 +11166,7 @@
mod dgram;
#[cfg(feature = "ffi")]
mod ffi;
+mod flowcontrol;
mod frame;
pub mod h3;
mod minmax;
diff --git a/src/minmax.rs b/quiche/src/minmax.rs
similarity index 97%
rename from src/minmax.rs
rename to quiche/src/minmax.rs
index a8a23fd..8d81c28 100644
--- a/src/minmax.rs
+++ b/quiche/src/minmax.rs
@@ -173,7 +173,7 @@
#[test]
fn reset_filter_rtt() {
- let mut f = Minmax::new(Duration::new(0, 0));
+ let mut f = Minmax::new(Duration::ZERO);
let now = Instant::now();
let rtt = Duration::from_millis(50);
@@ -211,7 +211,7 @@
#[test]
fn get_windowed_min_rtt() {
- let mut f = Minmax::new(Duration::new(0, 0));
+ let mut f = Minmax::new(Duration::ZERO);
let rtt_25 = Duration::from_millis(25);
let rtt_24 = Duration::from_millis(24);
let win = Duration::from_millis(500);
@@ -259,7 +259,7 @@
#[test]
fn get_windowed_max_rtt() {
- let mut f = Minmax::new(Duration::new(0, 0));
+ let mut f = Minmax::new(Duration::ZERO);
let rtt_25 = Duration::from_millis(25);
let rtt_24 = Duration::from_millis(24);
let win = Duration::from_millis(500);
@@ -307,7 +307,7 @@
#[test]
fn get_windowed_min_estimates_rtt() {
- let mut f = Minmax::new(Duration::new(0, 0));
+ let mut f = Minmax::new(Duration::ZERO);
let rtt_25 = Duration::from_millis(25);
let rtt_24 = Duration::from_millis(24);
let rtt_23 = Duration::from_millis(23);
@@ -371,7 +371,7 @@
#[test]
fn get_windowed_max_estimates_rtt() {
- let mut f = Minmax::new(Duration::new(0, 0));
+ let mut f = Minmax::new(Duration::ZERO);
let rtt_25 = Duration::from_millis(25);
let rtt_24 = Duration::from_millis(24);
let rtt_23 = Duration::from_millis(23);
diff --git a/src/octets.rs b/quiche/src/octets.rs
similarity index 99%
rename from src/octets.rs
rename to quiche/src/octets.rs
index 3983667..67d40be 100644
--- a/src/octets.rs
+++ b/quiche/src/octets.rs
@@ -24,8 +24,6 @@
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#![allow(dead_code)]
-
/// Zero-copy abstraction for parsing and constructing network packets.
use std::mem;
use std::ptr;
@@ -95,7 +93,6 @@
let v = $v;
- #[allow(clippy::range_plus_one)]
let dst = &mut $b.buf[$b.off..($b.off + len)];
unsafe {
diff --git a/src/packet.rs b/quiche/src/packet.rs
similarity index 98%
rename from src/packet.rs
rename to quiche/src/packet.rs
index 35a7522..b73e531 100644
--- a/src/packet.rs
+++ b/quiche/src/packet.rs
@@ -112,19 +112,20 @@
}
#[cfg(feature = "qlog")]
- pub(crate) fn to_qlog(self) -> qlog::PacketType {
+ pub(crate) fn to_qlog(self) -> qlog::events::quic::PacketType {
match self {
- Type::Initial => qlog::PacketType::Initial,
+ Type::Initial => qlog::events::quic::PacketType::Initial,
- Type::Retry => qlog::PacketType::Retry,
+ Type::Retry => qlog::events::quic::PacketType::Retry,
- Type::Handshake => qlog::PacketType::Handshake,
+ Type::Handshake => qlog::events::quic::PacketType::Handshake,
- Type::ZeroRTT => qlog::PacketType::ZeroRtt,
+ Type::ZeroRTT => qlog::events::quic::PacketType::ZeroRtt,
- Type::VersionNegotiation => qlog::PacketType::VersionNegotiation,
+ Type::VersionNegotiation =>
+ qlog::events::quic::PacketType::VersionNegotiation,
- Type::Short => qlog::PacketType::OneRtt,
+ Type::Short => qlog::events::quic::PacketType::OneRtt,
}
}
}
@@ -862,13 +863,19 @@
std::u64::MAX,
true,
true,
+ stream::MAX_STREAM_WINDOW,
),
}
}
pub fn clear(&mut self) {
- self.crypto_stream =
- stream::Stream::new(std::u64::MAX, std::u64::MAX, true, true);
+ self.crypto_stream = stream::Stream::new(
+ std::u64::MAX,
+ std::u64::MAX,
+ true,
+ true,
+ stream::MAX_STREAM_WINDOW,
+ );
self.ack_elicited = false;
}
@@ -2805,4 +2812,37 @@
Err(Error::InvalidPacket)
);
}
+
+ #[test]
+ fn decrypt_pkt_too_small() {
+ let mut buf = [0; 65535];
+ let mut b = octets::OctetsMut::with_slice(&mut buf);
+
+ let hdr = Header {
+ ty: Type::Initial,
+ version: crate::PROTOCOL_VERSION,
+ dcid: ConnectionId::default(),
+ scid: ConnectionId::default(),
+ pkt_num: 0,
+ pkt_num_len: 0,
+ token: None,
+ versions: None,
+ key_phase: false,
+ };
+
+ hdr.to_bytes(&mut b).unwrap();
+
+ b.put_bytes(&[0; 1]).unwrap();
+
+ // No space for decryption.
+ let payload_len = 1;
+
+ let (aead, _) =
+ crypto::derive_initial_key_material(b"", hdr.version, true).unwrap();
+
+ assert_eq!(
+ decrypt_pkt(&mut b, 0, 1, payload_len, &aead),
+ Err(Error::CryptoFail)
+ );
+ }
}
diff --git a/src/rand.rs b/quiche/src/rand.rs
similarity index 100%
rename from src/rand.rs
rename to quiche/src/rand.rs
diff --git a/src/ranges.rs b/quiche/src/ranges.rs
similarity index 99%
rename from src/ranges.rs
rename to quiche/src/ranges.rs
index c390873..b10eb35 100644
--- a/src/ranges.rs
+++ b/quiche/src/ranges.rs
@@ -108,7 +108,6 @@
}
pub fn push_item(&mut self, item: u64) {
- #[allow(clippy::range_plus_one)]
self.insert(item..item + 1);
}
diff --git a/src/recovery/cubic.rs b/quiche/src/recovery/cubic.rs
similarity index 93%
rename from src/recovery/cubic.rs
rename to quiche/src/recovery/cubic.rs
index 9903866..bd0eebe 100644
--- a/src/recovery/cubic.rs
+++ b/quiche/src/recovery/cubic.rs
@@ -45,6 +45,7 @@
use crate::recovery::Recovery;
pub static CUBIC: CongestionControlOps = CongestionControlOps {
+ on_init,
on_packet_sent,
on_packet_acked,
congestion_event,
@@ -52,6 +53,7 @@
checkpoint,
rollback,
has_custom_pacing,
+ debug_fmt,
};
/// CUBIC Constants.
@@ -61,9 +63,12 @@
const C: f64 = 0.4;
-/// The packet count threshold to restore to the prior state if the
-/// lost packet count since the last checkpoint is less than the threshold.
-const RESTORE_COUNT_THRESHOLD: usize = 10;
+/// Threshold for rolling back state, as percentage of lost packets relative to
+/// cwnd.
+const ROLLBACK_THRESHOLD_PERCENT: usize = 20;
+
+/// Minimum threshold for rolling back state, as number of packets.
+const MIN_ROLLBACK_THRESHOLD: usize = 2;
/// Default value of alpha_aimd in the beginning of congestion avoidance.
const ALPHA_AIMD: f64 = 3.0 * (1.0 - BETA_CUBIC) / (1.0 + BETA_CUBIC);
@@ -140,6 +145,8 @@
}
}
+fn on_init(_r: &mut Recovery) {}
+
fn collapse_cwnd(r: &mut Recovery) {
let cubic = &mut r.cubic_state;
@@ -210,12 +217,16 @@
// <https://tools.ietf.org/id/draft-ietf-tcpm-rfc8312bis-00.html#section-4.9>
//
// When the recovery episode ends with recovering
- // a few packets (less than RESTORE_COUNT_THRESHOLD), it's considered
- // as spurious and restore to the previous state.
+ // a few packets (less than cwnd / mss * ROLLBACK_THRESHOLD_PERCENT(%)), it's
+ // considered as spurious and restore to the previous state.
if r.congestion_recovery_start_time.is_some() {
let new_lost = r.lost_count - r.cubic_state.prior.lost_count;
+ let rollback_threshold = (r.congestion_window / r.max_datagram_size) *
+ ROLLBACK_THRESHOLD_PERCENT /
+ 100;
+ let rollback_threshold = rollback_threshold.max(MIN_ROLLBACK_THRESHOLD);
- if new_lost < RESTORE_COUNT_THRESHOLD {
+ if new_lost < rollback_threshold {
let did_rollback = rollback(r);
if did_rollback {
@@ -385,6 +396,11 @@
}
fn rollback(r: &mut Recovery) -> bool {
+ // Don't go back to slow start.
+ if r.cubic_state.prior.congestion_window < r.cubic_state.prior.ssthresh {
+ return false;
+ }
+
if r.congestion_window >= r.cubic_state.prior.congestion_window {
return false;
}
@@ -402,6 +418,14 @@
false
}
+fn debug_fmt(r: &Recovery, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ write!(
+ f,
+ "cubic={{ k={} w_max={} }} ",
+ r.cubic_state.k, r.cubic_state.w_max
+ )
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -574,7 +598,7 @@
now += rtt;
// To avoid rollback
- r.lost_count += RESTORE_COUNT_THRESHOLD;
+ r.lost_count += MIN_ROLLBACK_THRESHOLD;
// During Congestion Avoidance, it will take
// 5 ACKs to increase cwnd by 1 MSS.
@@ -928,7 +952,39 @@
now + rtt + Duration::from_millis(5),
);
- // cwnd is restored to the previous one.
+ // This is from slow start, no rollback.
+ assert_eq!(r.cwnd(), cur_cwnd);
+
+ let now = now + rtt;
+
+ // Trigger another congestion event.
+ let prev_cwnd = r.cwnd();
+ r.congestion_event(now, packet::EPOCH_APPLICATION, now);
+
+ // After congestion event, cwnd will be reduced.
+ let cur_cwnd = (cur_cwnd as f64 * BETA_CUBIC) as usize;
+ assert_eq!(r.cwnd(), cur_cwnd);
+
+ let rtt = Duration::from_millis(100);
+
+ let acked = vec![Acked {
+ pkt_num: 0,
+ // To exit from recovery
+ time_sent: now + rtt,
+ size: r.max_datagram_size,
+ }];
+
+ // Ack more than cwnd bytes with rtt=100ms.
+ r.update_rtt(rtt, Duration::from_millis(0), now);
+
+ // Trigger detecting sprurious congestion event.
+ r.on_packets_acked(
+ acked,
+ packet::EPOCH_APPLICATION,
+ now + rtt + Duration::from_millis(5),
+ );
+
+ // cwnd is rolled back to the previous one.
assert_eq!(r.cwnd(), prev_cwnd);
}
@@ -961,7 +1017,7 @@
now += rtt;
// To avoid rollback
- r.lost_count += RESTORE_COUNT_THRESHOLD;
+ r.lost_count += MIN_ROLLBACK_THRESHOLD;
// During Congestion Avoidance, it will take
// 5 ACKs to increase cwnd by 1 MSS.
diff --git a/src/recovery/delivery_rate.rs b/quiche/src/recovery/delivery_rate.rs
similarity index 100%
rename from src/recovery/delivery_rate.rs
rename to quiche/src/recovery/delivery_rate.rs
diff --git a/src/recovery/hystart.rs b/quiche/src/recovery/hystart.rs
similarity index 98%
rename from src/recovery/hystart.rs
rename to quiche/src/recovery/hystart.rs
index 7c3d625..dc73cdc 100644
--- a/src/recovery/hystart.rs
+++ b/quiche/src/recovery/hystart.rs
@@ -77,7 +77,7 @@
write!(f, "css_baseline_min_rtt={:?} ", self.css_baseline_min_rtt)?;
write!(f, "rtt_sample_count={:?} ", self.rtt_sample_count)?;
write!(f, "css_start_time={:?} ", self.css_start_time)?;
- write!(f, "css_round_count={:?} ", self.css_round_count)?;
+ write!(f, "css_round_count={:?}", self.css_round_count)?;
Ok(())
}
diff --git a/src/recovery/mod.rs b/quiche/src/recovery/mod.rs
similarity index 97%
rename from src/recovery/mod.rs
rename to quiche/src/recovery/mod.rs
index 1e5334a..e3c6780 100644
--- a/src/recovery/mod.rs
+++ b/quiche/src/recovery/mod.rs
@@ -42,6 +42,9 @@
use crate::packet;
use crate::ranges;
+#[cfg(feature = "qlog")]
+use qlog::events::EventData;
+
// Loss Recovery
const INITIAL_PACKET_THRESHOLD: u64 = 3;
@@ -168,7 +171,7 @@
largest_sent_pkt: [0; packet::EPOCH_COUNT],
- latest_rtt: Duration::new(0, 0),
+ latest_rtt: Duration::ZERO,
// This field should be initialized to `INITIAL_RTT` for the initial
// PTO calculation, but it also needs to be an `Option` to track
@@ -176,13 +179,13 @@
// handled by the `rtt()` method instead.
smoothed_rtt: None,
- minmax_filter: minmax::Minmax::new(Duration::new(0, 0)),
+ minmax_filter: minmax::Minmax::new(Duration::ZERO),
- min_rtt: Duration::new(0, 0),
+ min_rtt: Duration::ZERO,
rttvar: INITIAL_RTT / 2,
- max_ack_delay: Duration::new(0, 0),
+ max_ack_delay: Duration::ZERO,
loss_time: [None; packet::EPOCH_COUNT],
@@ -243,6 +246,10 @@
}
}
+ pub fn on_init(&mut self) {
+ (self.cc_ops.on_init)(self);
+ }
+
pub fn on_packet_sent(
&mut self, mut pkt: Sent, epoch: packet::Epoch,
handshake_status: HandshakeStatus, now: Instant, trace_id: &str,
@@ -668,7 +675,6 @@
let mut time = self.loss_time[epoch];
// Iterate over all packet number spaces starting from Handshake.
- #[allow(clippy::needless_range_loop)]
for e in packet::EPOCH_HANDSHAKE..packet::EPOCH_COUNT {
let new_time = self.loss_time[e];
@@ -920,7 +926,7 @@
}
#[cfg(feature = "qlog")]
- pub fn maybe_qlog(&mut self) -> Option<qlog::EventData> {
+ pub fn maybe_qlog(&mut self) -> Option<EventData> {
let qlog_metrics = QlogMetrics {
min_rtt: self.min_rtt,
smoothed_rtt: self.rtt(),
@@ -965,6 +971,8 @@
}
pub struct CongestionControlOps {
+ pub on_init: fn(r: &mut Recovery),
+
pub on_packet_sent: fn(r: &mut Recovery, sent_bytes: usize, now: Instant),
pub on_packet_acked:
@@ -984,6 +992,9 @@
pub rollback: fn(r: &mut Recovery) -> bool,
pub has_custom_pacing: fn() -> bool,
+
+ pub debug_fmt:
+ fn(r: &Recovery, formatter: &mut std::fmt::Formatter) -> std::fmt::Result,
}
impl From<CongestionControlAlgorithm> for &'static CongestionControlOps {
@@ -1041,6 +1052,9 @@
write!(f, "hystart={:?} ", self.hystart)?;
}
+ // CC-specific debug info
+ (self.cc_ops.debug_fmt)(self, f)?;
+
Ok(())
}
}
@@ -1155,7 +1169,7 @@
// This function diffs each of the fields. A qlog MetricsUpdated event is
// only generated if at least one field is different. Where fields are
// different, the qlog event contains the latest value.
- fn maybe_update(&mut self, latest: Self) -> Option<qlog::EventData> {
+ fn maybe_update(&mut self, latest: Self) -> Option<EventData> {
let mut emit_event = false;
let new_min_rtt = if self.min_rtt != latest.min_rtt {
@@ -1217,18 +1231,20 @@
if emit_event {
// QVis can't use all these fields and they can be large.
- return Some(qlog::EventData::MetricsUpdated {
- min_rtt: new_min_rtt,
- smoothed_rtt: new_smoothed_rtt,
- latest_rtt: new_latest_rtt,
- rtt_variance: new_rttvar,
- pto_count: None,
- congestion_window: new_cwnd,
- bytes_in_flight: new_bytes_in_flight,
- ssthresh: new_ssthresh,
- packets_in_flight: None,
- pacing_rate: None,
- });
+ return Some(EventData::MetricsUpdated(
+ qlog::events::quic::MetricsUpdated {
+ min_rtt: new_min_rtt,
+ smoothed_rtt: new_smoothed_rtt,
+ latest_rtt: new_latest_rtt,
+ rtt_variance: new_rttvar,
+ pto_count: None,
+ congestion_window: new_cwnd,
+ bytes_in_flight: new_bytes_in_flight,
+ ssthresh: new_ssthresh,
+ packets_in_flight: None,
+ pacing_rate: None,
+ },
+ ));
}
None
diff --git a/src/recovery/prr.rs b/quiche/src/recovery/prr.rs
similarity index 100%
rename from src/recovery/prr.rs
rename to quiche/src/recovery/prr.rs
diff --git a/src/recovery/reno.rs b/quiche/src/recovery/reno.rs
similarity index 98%
rename from src/recovery/reno.rs
rename to quiche/src/recovery/reno.rs
index ba01ad3..2e6b682 100644
--- a/src/recovery/reno.rs
+++ b/quiche/src/recovery/reno.rs
@@ -39,6 +39,7 @@
use crate::recovery::Recovery;
pub static RENO: CongestionControlOps = CongestionControlOps {
+ on_init,
on_packet_sent,
on_packet_acked,
congestion_event,
@@ -46,8 +47,11 @@
checkpoint,
rollback,
has_custom_pacing,
+ debug_fmt,
};
+pub fn on_init(_r: &mut Recovery) {}
+
pub fn on_packet_sent(r: &mut Recovery, sent_bytes: usize, _now: Instant) {
r.bytes_in_flight += sent_bytes;
}
@@ -146,6 +150,10 @@
false
}
+fn debug_fmt(_r: &Recovery, _f: &mut std::fmt::Formatter) -> std::fmt::Result {
+ Ok(())
+}
+
#[cfg(test)]
mod tests {
use super::*;
diff --git a/src/stream.rs b/quiche/src/stream.rs
similarity index 90%
rename from src/stream.rs
rename to quiche/src/stream.rs
index da15ec4..2c91666 100644
--- a/src/stream.rs
+++ b/quiche/src/stream.rs
@@ -30,16 +30,19 @@
use std::collections::hash_map;
-use std::collections::BTreeSet;
use std::collections::BTreeMap;
+use std::collections::BTreeSet;
use std::collections::BinaryHeap;
use std::collections::HashMap;
use std::collections::HashSet;
use std::collections::VecDeque;
+use std::time;
+
use crate::Error;
use crate::Result;
+use crate::flowcontrol;
use crate::ranges;
const DEFAULT_URGENCY: u8 = 127;
@@ -50,18 +53,57 @@
#[cfg(not(test))]
const SEND_BUFFER_SIZE: usize = 4096;
+// The default size of the receiver stream flow control window.
+const DEFAULT_STREAM_WINDOW: u64 = 32 * 1024;
+
+/// The maximum size of the receiver stream flow control window.
+pub const MAX_STREAM_WINDOW: u64 = 16 * 1024 * 1024;
+
+/// A simple no-op hasher for Stream IDs.
+///
+/// The QUIC protocol and quiche library guarantees stream ID uniqueness, so
+/// we can save effort by avoiding using a more complicated algorithm.
+#[derive(Default)]
+pub struct StreamIdHasher {
+ id: u64,
+}
+
+impl std::hash::Hasher for StreamIdHasher {
+ #[inline]
+ fn finish(&self) -> u64 {
+ self.id
+ }
+
+ #[inline]
+ fn write_u64(&mut self, id: u64) {
+ self.id = id;
+ }
+
+ #[inline]
+ fn write(&mut self, _: &[u8]) {
+ // We need a default write() for the trait but stream IDs will always
+ // be a u64 so we just delegate to write_u64.
+ unimplemented!()
+ }
+}
+
+type BuildStreamIdHasher = std::hash::BuildHasherDefault<StreamIdHasher>;
+
+pub type StreamIdHashMap<V> = HashMap<u64, V, BuildStreamIdHasher>;
+pub type StreamIdHashSet = HashSet<u64, BuildStreamIdHasher>;
+
/// Keeps track of QUIC streams and enforces stream limits.
#[derive(Default)]
pub struct StreamMap {
/// Map of streams indexed by stream ID.
- streams: HashMap<u64, Stream>,
+ streams: StreamIdHashMap<Stream>,
/// Set of streams that were completed and garbage collected.
///
/// Instead of keeping the full stream state forever, we collect completed
/// streams to save memory, but we still need to keep track of previously
/// created streams, to prevent peers from re-creating them.
- collected: HashSet<u64>,
+ collected: StreamIdHashSet,
/// Peer's maximum bidirectional stream count limit.
peer_max_streams_bidi: u64,
@@ -122,21 +164,26 @@
/// Set of stream IDs corresponding to streams that are blocked. The value
/// of the map elements represents the offset of the stream at which the
/// blocking occurred.
- blocked: HashMap<u64, u64>,
+ blocked: StreamIdHashMap<u64>,
/// Set of stream IDs corresponding to streams that are reset. The value
/// of the map elements is a tuple of the error code and final size values
/// to include in the RESET_STREAM frame.
- reset: HashMap<u64, (u64, u64)>,
+ reset: StreamIdHashMap<(u64, u64)>,
/// Set of stream IDs corresponding to streams that are shutdown on the
/// receive side, and need to send a STOP_SENDING frame. The value of the
/// map elements is the error code to include in the STOP_SENDING frame.
- stopped: HashMap<u64, u64>,
+ stopped: StreamIdHashMap<u64>,
+
+ /// The maximum size of a stream window.
+ max_stream_window: u64,
}
impl StreamMap {
- pub fn new(max_streams_bidi: u64, max_streams_uni: u64) -> StreamMap {
+ pub fn new(
+ max_streams_bidi: u64, max_streams_uni: u64, max_stream_window: u64,
+ ) -> StreamMap {
StreamMap {
local_max_streams_bidi: max_streams_bidi,
local_max_streams_bidi_next: max_streams_bidi,
@@ -144,6 +191,8 @@
local_max_streams_uni: max_streams_uni,
local_max_streams_uni_next: max_streams_uni,
+ max_stream_window,
+
..StreamMap::default()
}
}
@@ -249,7 +298,13 @@
},
};
- let s = Stream::new(max_rx_data, max_tx_data, is_bidi(id), local);
+ let s = Stream::new(
+ max_rx_data,
+ max_tx_data,
+ is_bidi(id),
+ local,
+ self.max_stream_window,
+ );
v.insert(s)
},
@@ -579,9 +634,10 @@
/// Creates a new stream with the given flow control limits.
pub fn new(
max_rx_data: u64, max_tx_data: u64, bidi: bool, local: bool,
+ max_window: u64,
) -> Stream {
Stream {
- recv: RecvBuf::new(max_rx_data),
+ recv: RecvBuf::new(max_rx_data, max_window),
send: SendBuf::new(max_tx_data),
bidi,
local,
@@ -653,7 +709,6 @@
}
impl StreamIter {
- #[inline]
fn from(streams: &BTreeSet<u64>) -> Self {
StreamIter {
streams: streams.iter().rev().copied().collect(),
@@ -694,11 +749,8 @@
/// The total length of data received on this stream.
len: u64,
- /// The maximum offset the peer is allowed to send us.
- max_data: u64,
-
- /// The updated maximum offset the peer is allowed to send us.
- max_data_next: u64,
+ /// Receiver flow controller.
+ flow_control: flowcontrol::FlowControl,
/// The final stream offset received from the peer, if any.
fin_off: Option<u64>,
@@ -712,10 +764,13 @@
impl RecvBuf {
/// Creates a new receive buffer.
- fn new(max_data: u64) -> RecvBuf {
+ fn new(max_data: u64, max_window: u64) -> RecvBuf {
RecvBuf {
- max_data,
- max_data_next: max_data,
+ flow_control: flowcontrol::FlowControl::new(
+ max_data,
+ cmp::min(max_data, DEFAULT_STREAM_WINDOW),
+ max_window,
+ ),
..RecvBuf::default()
}
}
@@ -726,7 +781,7 @@
/// as handling incoming data that overlaps data that is already in the
/// buffer.
pub fn write(&mut self, buf: RangeBuf) -> Result<()> {
- if buf.max_off() > self.max_data {
+ if buf.max_off() > self.max_data() {
return Err(Error::FlowControl);
}
@@ -781,11 +836,10 @@
}
}
- let mut tmp_buf = Some(buf);
+ let mut tmp_bufs = VecDeque::with_capacity(2);
+ tmp_bufs.push_back(buf);
- while let Some(mut buf) = tmp_buf {
- tmp_buf = None;
-
+ 'tmp: while let Some(mut buf) = tmp_bufs.pop_front() {
// Discard incoming data below current stream offset. Bytes up to
// `self.off` have already been received so we should not buffer
// them again. This is also important to make sure `ready()` doesn't
@@ -805,7 +859,7 @@
for b in &self.data {
// New buffer is fully contained in existing buffer.
if buf.off() >= b.off() && buf.max_off() <= b.max_off() {
- return Ok(());
+ continue 'tmp;
}
// New buffer's start overlaps existing buffer.
@@ -815,8 +869,9 @@
// New buffer's end overlaps existing buffer.
if buf.off() < b.off() && buf.max_off() > b.off() {
- tmp_buf =
- Some(buf.split_off((b.off() - buf.off()) as usize));
+ tmp_bufs.push_back(
+ buf.split_off((b.off() - buf.off()) as usize),
+ );
}
}
}
@@ -879,7 +934,8 @@
std::collections::binary_heap::PeekMut::pop(buf);
}
- self.max_data_next = self.max_data_next.saturating_add(len as u64);
+ // Update consumed bytes for flow control.
+ self.flow_control.add_consumed(len as u64);
Ok((len, self.is_fin()))
}
@@ -922,13 +978,28 @@
}
/// Commits the new max_data limit.
- pub fn update_max_data(&mut self) {
- self.max_data = self.max_data_next;
+ pub fn update_max_data(&mut self, now: time::Instant) {
+ self.flow_control.update_max_data(now);
}
/// Return the new max_data limit.
pub fn max_data_next(&mut self) -> u64 {
- self.max_data_next
+ self.flow_control.max_data_next()
+ }
+
+ /// Return the current flow control limit.
+ fn max_data(&self) -> u64 {
+ self.flow_control.max_data()
+ }
+
+ /// Return the current window.
+ pub fn window(&self) -> u64 {
+ self.flow_control.window()
+ }
+
+ /// Autotune the window size.
+ pub fn autotune_window(&mut self, now: time::Instant, rtt: time::Duration) {
+ self.flow_control.autotune_window(now, rtt);
}
/// Shuts down receiving data.
@@ -953,11 +1024,7 @@
/// Returns true if we need to update the local flow control limit.
pub fn almost_full(&self) -> bool {
- // Send MAX_STREAM_DATA when the new limit is at least double the
- // amount of data that can be received before blocking.
- self.fin_off.is_none() &&
- self.max_data_next != self.max_data &&
- self.max_data_next / 2 > self.max_data - self.len
+ self.fin_off.is_none() && self.flow_control.should_update_max_data()
}
/// Returns the largest offset ever received.
@@ -1340,7 +1407,7 @@
/// Returns true if all data in the stream has been sent.
///
- /// This happens when the stream's send final size is knwon, and the
+ /// This happens when the stream's send final size is known, and the
/// application has already written data up to that point.
pub fn is_fin(&self) -> bool {
if self.fin_off == Some(self.off) {
@@ -1414,7 +1481,7 @@
pub struct RangeBuf {
/// The internal buffer holding the data.
///
- /// To avoid neeless allocations when a RangeBuf is split, this field is
+ /// To avoid needless allocations when a RangeBuf is split, this field is
/// reference-counted and can be shared between multiple RangeBuf objects,
/// and sliced using the `start` and `len` values.
data: Arc<Vec<u8>>,
@@ -1480,12 +1547,12 @@
/// Splits the buffer into two at the given index.
pub fn split_off(&mut self, at: usize) -> RangeBuf {
- if at > self.len {
- panic!(
- "`at` split index (is {}) should be <= len (is {})",
- at, self.len
- );
- }
+ assert!(
+ at <= self.len,
+ "`at` split index (is {}) should be <= len (is {})",
+ at,
+ self.len
+ );
let buf = RangeBuf {
data: self.data.clone(),
@@ -1537,7 +1604,7 @@
#[test]
fn empty_read() {
- let mut recv = RecvBuf::new(std::u64::MAX);
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
assert_eq!(recv.len, 0);
let mut buf = [0; 32];
@@ -1547,7 +1614,7 @@
#[test]
fn empty_stream_frame() {
- let mut recv = RecvBuf::new(15);
+ let mut recv = RecvBuf::new(15, DEFAULT_STREAM_WINDOW);
assert_eq!(recv.len, 0);
let buf = RangeBuf::from(b"hello", 0, false);
@@ -1603,7 +1670,7 @@
#[test]
fn ordered_read() {
- let mut recv = RecvBuf::new(std::u64::MAX);
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
assert_eq!(recv.len, 0);
let mut buf = [0; 32];
@@ -1640,7 +1707,7 @@
#[test]
fn split_read() {
- let mut recv = RecvBuf::new(std::u64::MAX);
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
assert_eq!(recv.len, 0);
let mut buf = [0; 32];
@@ -1680,7 +1747,7 @@
#[test]
fn incomplete_read() {
- let mut recv = RecvBuf::new(std::u64::MAX);
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
assert_eq!(recv.len, 0);
let mut buf = [0; 32];
@@ -1708,7 +1775,7 @@
#[test]
fn zero_len_read() {
- let mut recv = RecvBuf::new(std::u64::MAX);
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
assert_eq!(recv.len, 0);
let mut buf = [0; 32];
@@ -1736,7 +1803,7 @@
#[test]
fn past_read() {
- let mut recv = RecvBuf::new(std::u64::MAX);
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
assert_eq!(recv.len, 0);
let mut buf = [0; 32];
@@ -1775,7 +1842,7 @@
#[test]
fn fully_overlapping_read() {
- let mut recv = RecvBuf::new(std::u64::MAX);
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
assert_eq!(recv.len, 0);
let mut buf = [0; 32];
@@ -1806,7 +1873,7 @@
#[test]
fn fully_overlapping_read2() {
- let mut recv = RecvBuf::new(std::u64::MAX);
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
assert_eq!(recv.len, 0);
let mut buf = [0; 32];
@@ -1837,7 +1904,7 @@
#[test]
fn fully_overlapping_read3() {
- let mut recv = RecvBuf::new(std::u64::MAX);
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
assert_eq!(recv.len, 0);
let mut buf = [0; 32];
@@ -1868,7 +1935,7 @@
#[test]
fn fully_overlapping_read_multi() {
- let mut recv = RecvBuf::new(std::u64::MAX);
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
assert_eq!(recv.len, 0);
let mut buf = [0; 32];
@@ -1905,7 +1972,7 @@
#[test]
fn overlapping_start_read() {
- let mut recv = RecvBuf::new(std::u64::MAX);
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
assert_eq!(recv.len, 0);
let mut buf = [0; 32];
@@ -1935,7 +2002,7 @@
#[test]
fn overlapping_end_read() {
- let mut recv = RecvBuf::new(std::u64::MAX);
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
assert_eq!(recv.len, 0);
let mut buf = [0; 32];
@@ -1964,8 +2031,92 @@
}
#[test]
+ fn overlapping_end_twice_read() {
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
+ assert_eq!(recv.len, 0);
+
+ let mut buf = [0; 32];
+
+ let first = RangeBuf::from(b"he", 0, false);
+ let second = RangeBuf::from(b"ow", 4, false);
+ let third = RangeBuf::from(b"rl", 7, false);
+ let fourth = RangeBuf::from(b"helloworld", 0, true);
+
+ assert!(recv.write(third).is_ok());
+ assert_eq!(recv.len, 9);
+ assert_eq!(recv.off, 0);
+ assert_eq!(recv.data.len(), 1);
+
+ assert!(recv.write(second).is_ok());
+ assert_eq!(recv.len, 9);
+ assert_eq!(recv.off, 0);
+ assert_eq!(recv.data.len(), 2);
+
+ assert!(recv.write(first).is_ok());
+ assert_eq!(recv.len, 9);
+ assert_eq!(recv.off, 0);
+ assert_eq!(recv.data.len(), 3);
+
+ assert!(recv.write(fourth).is_ok());
+ assert_eq!(recv.len, 10);
+ assert_eq!(recv.off, 0);
+ assert_eq!(recv.data.len(), 6);
+
+ let (len, fin) = recv.emit(&mut buf).unwrap();
+ assert_eq!(len, 10);
+ assert_eq!(fin, true);
+ assert_eq!(&buf[..len], b"helloworld");
+ assert_eq!(recv.len, 10);
+ assert_eq!(recv.off, 10);
+
+ assert_eq!(recv.emit(&mut buf), Err(Error::Done));
+ }
+
+ #[test]
+ fn overlapping_end_twice_and_contained_read() {
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
+ assert_eq!(recv.len, 0);
+
+ let mut buf = [0; 32];
+
+ let first = RangeBuf::from(b"hellow", 0, false);
+ let second = RangeBuf::from(b"barfoo", 10, true);
+ let third = RangeBuf::from(b"rl", 7, false);
+ let fourth = RangeBuf::from(b"elloworldbarfoo", 1, true);
+
+ assert!(recv.write(third).is_ok());
+ assert_eq!(recv.len, 9);
+ assert_eq!(recv.off, 0);
+ assert_eq!(recv.data.len(), 1);
+
+ assert!(recv.write(second).is_ok());
+ assert_eq!(recv.len, 16);
+ assert_eq!(recv.off, 0);
+ assert_eq!(recv.data.len(), 2);
+
+ assert!(recv.write(first).is_ok());
+ assert_eq!(recv.len, 16);
+ assert_eq!(recv.off, 0);
+ assert_eq!(recv.data.len(), 3);
+
+ assert!(recv.write(fourth).is_ok());
+ assert_eq!(recv.len, 16);
+ assert_eq!(recv.off, 0);
+ assert_eq!(recv.data.len(), 5);
+
+ let (len, fin) = recv.emit(&mut buf).unwrap();
+ assert_eq!(len, 16);
+ assert_eq!(fin, true);
+ assert_eq!(&buf[..len], b"helloworldbarfoo");
+ assert_eq!(recv.len, 16);
+ assert_eq!(recv.off, 16);
+
+ assert_eq!(recv.emit(&mut buf), Err(Error::Done));
+ }
+
+ #[test]
fn partially_multi_overlapping_reordered_read() {
- let mut recv = RecvBuf::new(std::u64::MAX);
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
assert_eq!(recv.len, 0);
let mut buf = [0; 32];
@@ -2002,7 +2153,7 @@
#[test]
fn partially_multi_overlapping_reordered_read2() {
- let mut recv = RecvBuf::new(std::u64::MAX);
+ let mut recv = RecvBuf::new(std::u64::MAX, DEFAULT_STREAM_WINDOW);
assert_eq!(recv.len, 0);
let mut buf = [0; 32];
@@ -2292,7 +2443,7 @@
#[test]
fn recv_flow_control() {
- let mut stream = Stream::new(15, 0, true, true);
+ let mut stream = Stream::new(15, 0, true, true, DEFAULT_STREAM_WINDOW);
assert!(!stream.recv.almost_full());
let mut buf = [0; 32];
@@ -2313,7 +2464,7 @@
assert!(stream.recv.almost_full());
- stream.recv.update_max_data();
+ stream.recv.update_max_data(time::Instant::now());
assert_eq!(stream.recv.max_data_next(), 25);
assert!(!stream.recv.almost_full());
@@ -2323,7 +2474,7 @@
#[test]
fn recv_past_fin() {
- let mut stream = Stream::new(15, 0, true, true);
+ let mut stream = Stream::new(15, 0, true, true, DEFAULT_STREAM_WINDOW);
assert!(!stream.recv.almost_full());
let first = RangeBuf::from(b"hello", 0, true);
@@ -2335,7 +2486,7 @@
#[test]
fn recv_fin_dup() {
- let mut stream = Stream::new(15, 0, true, true);
+ let mut stream = Stream::new(15, 0, true, true, DEFAULT_STREAM_WINDOW);
assert!(!stream.recv.almost_full());
let first = RangeBuf::from(b"hello", 0, true);
@@ -2353,7 +2504,7 @@
#[test]
fn recv_fin_change() {
- let mut stream = Stream::new(15, 0, true, true);
+ let mut stream = Stream::new(15, 0, true, true, DEFAULT_STREAM_WINDOW);
assert!(!stream.recv.almost_full());
let first = RangeBuf::from(b"hello", 0, true);
@@ -2365,7 +2516,7 @@
#[test]
fn recv_fin_lower_than_received() {
- let mut stream = Stream::new(15, 0, true, true);
+ let mut stream = Stream::new(15, 0, true, true, DEFAULT_STREAM_WINDOW);
assert!(!stream.recv.almost_full());
let first = RangeBuf::from(b"hello", 0, true);
@@ -2377,7 +2528,7 @@
#[test]
fn recv_fin_flow_control() {
- let mut stream = Stream::new(15, 0, true, true);
+ let mut stream = Stream::new(15, 0, true, true, DEFAULT_STREAM_WINDOW);
assert!(!stream.recv.almost_full());
let mut buf = [0; 32];
@@ -2397,7 +2548,7 @@
#[test]
fn recv_fin_reset_mismatch() {
- let mut stream = Stream::new(15, 0, true, true);
+ let mut stream = Stream::new(15, 0, true, true, DEFAULT_STREAM_WINDOW);
assert!(!stream.recv.almost_full());
let first = RangeBuf::from(b"hello", 0, true);
@@ -2408,7 +2559,7 @@
#[test]
fn recv_reset_dup() {
- let mut stream = Stream::new(15, 0, true, true);
+ let mut stream = Stream::new(15, 0, true, true, DEFAULT_STREAM_WINDOW);
assert!(!stream.recv.almost_full());
let first = RangeBuf::from(b"hello", 0, false);
@@ -2420,7 +2571,7 @@
#[test]
fn recv_reset_change() {
- let mut stream = Stream::new(15, 0, true, true);
+ let mut stream = Stream::new(15, 0, true, true, DEFAULT_STREAM_WINDOW);
assert!(!stream.recv.almost_full());
let first = RangeBuf::from(b"hello", 0, false);
@@ -2432,7 +2583,7 @@
#[test]
fn recv_reset_lower_than_received() {
- let mut stream = Stream::new(15, 0, true, true);
+ let mut stream = Stream::new(15, 0, true, true, DEFAULT_STREAM_WINDOW);
assert!(!stream.recv.almost_full());
let first = RangeBuf::from(b"hello", 0, false);
@@ -2445,7 +2596,7 @@
fn send_flow_control() {
let mut buf = [0; 25];
- let mut stream = Stream::new(0, 15, true, true);
+ let mut stream = Stream::new(0, 15, true, true, DEFAULT_STREAM_WINDOW);
let first = b"hello";
let second = b"world";
@@ -2488,7 +2639,7 @@
#[test]
fn send_past_fin() {
- let mut stream = Stream::new(0, 15, true, true);
+ let mut stream = Stream::new(0, 15, true, true, DEFAULT_STREAM_WINDOW);
let first = b"hello";
let second = b"world";
@@ -2504,7 +2655,7 @@
#[test]
fn send_fin_dup() {
- let mut stream = Stream::new(0, 15, true, true);
+ let mut stream = Stream::new(0, 15, true, true, DEFAULT_STREAM_WINDOW);
assert_eq!(stream.send.write(b"hello", true), Ok(5));
assert!(stream.send.is_fin());
@@ -2515,7 +2666,7 @@
#[test]
fn send_undo_fin() {
- let mut stream = Stream::new(0, 15, true, true);
+ let mut stream = Stream::new(0, 15, true, true, DEFAULT_STREAM_WINDOW);
assert_eq!(stream.send.write(b"hello", true), Ok(5));
assert!(stream.send.is_fin());
@@ -2530,7 +2681,7 @@
fn send_fin_max_data_match() {
let mut buf = [0; 15];
- let mut stream = Stream::new(0, 15, true, true);
+ let mut stream = Stream::new(0, 15, true, true, DEFAULT_STREAM_WINDOW);
let slice = b"hellohellohello";
@@ -2546,7 +2697,7 @@
fn send_fin_zero_length() {
let mut buf = [0; 5];
- let mut stream = Stream::new(0, 15, true, true);
+ let mut stream = Stream::new(0, 15, true, true, DEFAULT_STREAM_WINDOW);
assert_eq!(stream.send.write(b"hello", false), Ok(5));
assert_eq!(stream.send.write(b"", true), Ok(0));
@@ -2562,7 +2713,7 @@
fn send_ack() {
let mut buf = [0; 5];
- let mut stream = Stream::new(0, 15, true, true);
+ let mut stream = Stream::new(0, 15, true, true, DEFAULT_STREAM_WINDOW);
assert_eq!(stream.send.write(b"hello", false), Ok(5));
assert_eq!(stream.send.write(b"world", false), Ok(5));
@@ -2592,7 +2743,7 @@
fn send_ack_reordering() {
let mut buf = [0; 5];
- let mut stream = Stream::new(0, 15, true, true);
+ let mut stream = Stream::new(0, 15, true, true, DEFAULT_STREAM_WINDOW);
assert_eq!(stream.send.write(b"hello", false), Ok(5));
assert_eq!(stream.send.write(b"world", false), Ok(5));
@@ -2629,7 +2780,7 @@
#[test]
fn recv_data_below_off() {
- let mut stream = Stream::new(15, 0, true, true);
+ let mut stream = Stream::new(15, 0, true, true, DEFAULT_STREAM_WINDOW);
let first = RangeBuf::from(b"hello", 0, false);
@@ -2651,7 +2802,7 @@
#[test]
fn stream_complete() {
- let mut stream = Stream::new(30, 30, true, true);
+ let mut stream = Stream::new(30, 30, true, true, DEFAULT_STREAM_WINDOW);
assert_eq!(stream.send.write(b"hello", false), Ok(5));
assert_eq!(stream.send.write(b"world", false), Ok(5));
@@ -2694,7 +2845,7 @@
fn send_fin_zero_length_output() {
let mut buf = [0; 5];
- let mut stream = Stream::new(0, 15, true, true);
+ let mut stream = Stream::new(0, 15, true, true, DEFAULT_STREAM_WINDOW);
assert_eq!(stream.send.write(b"hello", false), Ok(5));
assert_eq!(stream.send.off_front(), 0);
@@ -2719,7 +2870,7 @@
fn send_emit() {
let mut buf = [0; 5];
- let mut stream = Stream::new(0, 20, true, true);
+ let mut stream = Stream::new(0, 20, true, true, DEFAULT_STREAM_WINDOW);
assert_eq!(stream.send.write(b"hello", false), Ok(5));
assert_eq!(stream.send.write(b"world", false), Ok(5));
@@ -2771,7 +2922,7 @@
fn send_emit_ack() {
let mut buf = [0; 5];
- let mut stream = Stream::new(0, 20, true, true);
+ let mut stream = Stream::new(0, 20, true, true, DEFAULT_STREAM_WINDOW);
assert_eq!(stream.send.write(b"hello", false), Ok(5));
assert_eq!(stream.send.write(b"world", false), Ok(5));
@@ -2838,7 +2989,7 @@
fn send_emit_retransmit() {
let mut buf = [0; 5];
- let mut stream = Stream::new(0, 20, true, true);
+ let mut stream = Stream::new(0, 20, true, true, DEFAULT_STREAM_WINDOW);
assert_eq!(stream.send.write(b"hello", false), Ok(5));
assert_eq!(stream.send.write(b"world", false), Ok(5));
diff --git a/src/tls.rs b/quiche/src/tls.rs
similarity index 99%
rename from src/tls.rs
rename to quiche/src/tls.rs
index 3111ea1..09159bb 100644
--- a/src/tls.rs
+++ b/quiche/src/tls.rs
@@ -609,7 +609,7 @@
Some(sigalg.to_string())
}
- pub fn peer_cert(&self) -> Option<Vec<u8>> {
+ pub fn peer_cert(&self) -> Option<&[u8]> {
let peer_cert = unsafe {
let chain =
map_result_ptr(SSL_get0_peer_certificates(self.as_ptr())).ok()?;
@@ -620,14 +620,14 @@
let buffer =
map_result_ptr(sk_value(chain, 0) as *const CRYPTO_BUFFER)
.ok()?;
+
let out_len = CRYPTO_BUFFER_len(buffer);
if out_len == 0 {
return None;
}
let out = CRYPTO_BUFFER_data(buffer);
- let der = slice::from_raw_parts(out, out_len as usize);
- der.to_vec()
+ slice::from_raw_parts(out, out_len as usize)
};
Some(peer_cert)
diff --git a/tools/http3_test/Cargo.toml b/tools/http3_test/Cargo.toml
index fa3be83..10bb0c3 100644
--- a/tools/http3_test/Cargo.toml
+++ b/tools/http3_test/Cargo.toml
@@ -15,4 +15,4 @@
serde = "1.0"
serde_json = "1.0"
serde_derive = "1.0"
-quiche = { path = "../../"}
+quiche = { path = "../../quiche"}
diff --git a/tools/qlog/README.md b/tools/qlog/README.md
deleted file mode 100644
index 748c44c..0000000
--- a/tools/qlog/README.md
+++ /dev/null
@@ -1,308 +0,0 @@
-The qlog crate is an implementation of the qlog [main logging schema],
-[QUIC event definitions], and [HTTP/3 and QPACK event definitions].
-The crate provides a qlog data model that can be used for traces with
-events. It supports serialization and deserialization but defers logging IO
-choices to applications.
-
-The crate uses Serde for conversion between Rust and JSON.
-
-[main logging schema]: https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema
-[QUIC event definitions]:
-https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-quic-events.html
-[HTTP/3 and QPACK event definitions]:
-https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-h3-events.html
-
-Overview
---------
-qlog is a hierarchical logging format, with a rough structure of:
-
-* Log
- * Trace(s)
- * Event(s)
-
-In practice, a single QUIC connection maps to a single Trace file with one
-or more Events. Applications can decide whether to combine Traces from
-different connections into the same Log.
-
-## Traces
-
-A [`Trace`] contains metadata such as the [`VantagePoint`] of capture and
-the [`Configuration`], and protocol event data in the [`Event`] array.
-
-## Writing out logs
-As events occur during the connection, the application appends them to the
-trace. The qlog crate supports two modes of writing logs: the buffered mode
-stores everything in memory and requires the application to serialize and write
-the output, the streaming mode progressively writes serialized JSON output to a
-writer designated by the application.
-
-Buffered Mode
----------------
-
-Create the trace:
-
-```rust
-let mut trace = qlog::Trace::new(
- qlog::VantagePoint {
- name: Some("Example client".to_string()),
- ty: qlog::VantagePointType::Client,
- flow: None,
- },
- Some("Example qlog trace".to_string()),
- Some("Example qlog trace description".to_string()),
- Some(qlog::Configuration {
- time_offset: Some(0.0),
- original_uris: None,
- }),
- None,
-);
-```
-
-### Adding events
-
-Qlog `Event` objects are added to `qlog::Trace.events`.
-
-The following example demonstrates how to log a qlog QUIC `packet_sent` event
-containing a single Crypto frame. It constructs the necessary elements of the
-[`Event`], then appends it to the trace with [`push_event()`].
-
-```rust
-let scid = [0x7e, 0x37, 0xe4, 0xdc, 0xc6, 0x68, 0x2d, 0xa8];
-let dcid = [0x36, 0xce, 0x10, 0x4e, 0xee, 0x50, 0x10, 0x1c];
-
-let pkt_hdr = qlog::PacketHeader::new(
- qlog::PacketType::Initial,
- 0, // packet_number
- None, // flags
- None, // token
- None, // length
- Some(0xff00001b),
- Some(b"7e37e4dcc6682da8"),
- Some(&dcid),
-);
-
-let frames = let frames = vec![qlog::QuicFrame::Crypto {
- offset: 0,
- length: 0,
-}];
-
-let raw = qlog::RawInfo {
- length: Some(1251),
- payload_length: Some(1224),
- data: None,
-};
-
-let event_data = qlog::EventData::PacketSent {
- header: pkt_hdr,
- frames: Some(frames),
- is_coalesced: None,
- retry_token: None,
- stateless_reset_token: None,
- supported_versions: None,
- raw: Some(raw),
- datagram_id: None,
-};
-
-trace.push_event(qlog::Event::with_time(0.0, event_data));
-```
-
-### Serializing
-
-The qlog crate has only been tested with `serde_json`, however other serializer
-targets might work.
-
-For example, serializing the trace created above:
-
-```rust
-serde_json::to_string_pretty(&trace).unwrap();
-```
-
-would generate the following:
-
-```
- {
- "vantage_point": {
- "name": "Example client",
- "type": "client"
- },
- "title": "Example qlog trace",
- "description": "Example qlog trace description",
- "configuration": {
- "time_offset": 0.0
- },
- "events": [
- [
- 0,
- "transport",
- "packet_sent",
- {
- "header": {
- "packet_type": "initial",
- "packet_number": 0,
- "version": "ff00001d",
- "scil": 8,
- "dcil": 8,
- "scid": "7e37e4dcc6682da8",
- "dcid": "36ce104eee50101c"
- },
- "raw": {
- "length": 1251,
- "payload_length": 1224
- },
- "frames": [
- {
- "frame_type": "crypto",
- "offset": 0,
- "length": 100,
- }
- ]
- }
- ]
- ]
- }
-```
-
-Streaming Mode
----------------
-
-Create the trace:
-
-```rust
-let mut trace = qlog::Trace::new(
- qlog::VantagePoint {
- name: Some("Example client".to_string()),
- ty: qlog::VantagePointType::Client,
- flow: None,
- },
- Some("Example qlog trace".to_string()),
- Some("Example qlog trace description".to_string()),
- Some(qlog::Configuration {
- time_offset: Some(0.0),
- original_uris: None,
- }),
- None,
-);
-```
-
-Create an object with the [`Write`] trait:
-
-```rust
-let mut file = std::fs::File::create("foo.qlog").unwrap();
-```
-
-Create a [`QlogStreamer`] and start serialization to foo.qlog
-using [`start_log()`]:
-
-```rust
-let mut streamer = qlog::QlogStreamer::new(
- qlog::QLOG_VERSION.to_string(),
- Some("Example qlog".to_string()),
- Some("Example qlog description".to_string()),
- None,
- std::time::Instant::now(),
- trace,
- qlog::EventImportance::Base,
- Box::new(file),
-);
-
-streamer.start_log().ok();
-```
-
-### Adding simple events
-
-Once logging has started you can stream events. Simple events can be written in
-one step using [`add_event()`]:
-
-```rust
-let event_data = qlog::EventData::MetricsUpdated {
- min_rtt: Some(1.0),
- smoothed_rtt: Some(1.0),
- latest_rtt: Some(1.0),
- rtt_variance: Some(1.0),
- pto_count: Some(1),
- congestion_window: Some(1234),
- bytes_in_flight: Some(5678),
- ssthresh: None,
- packets_in_flight: None,
- pacing_rate: None,
-};
-
-let event = qlog::Event::with_time(0.0, event_data);
-streamer.add_event(event).ok();
-```
-
-### Adding events with frames
-Some events contain optional arrays of QUIC frames. If the event has
-`Some(Vec<QuicFrame>)`, even if it is empty, the streamer enters a frame
-serializing mode that must be finalized before other events can be logged.
-
-In this example, a `PacketSent` event is created with an empty frame array and
-frames are written out later:
-
-```rust
-let pkt_hdr = qlog::PacketHeader::with_type(
- qlog::PacketType::OneRtt,
- 0,
- Some(0x00000001),
- Some(b"7e37e4dcc6682da8"),
- Some(b"36ce104eee50101c"),
-);
-
-let event_data = qlog::EventData::PacketSent {
- header: pkt_hdr,
- frames: Some(vec![]),
- is_coalesced: None,
- retry_token: None,
- stateless_reset_token: None,
- supported_versions: None,
- raw: None,
- datagram_id: None,
-};
-
-let event = qlog::Event::with_time(0.0, event_data);
-
-streamer.add_event(event).ok();
-```
-
-In this example, the frames contained in the QUIC packet
-are PING and PADDING. Each frame is written using the
-[`add_frame()`] method. Frame writing is concluded with
-[`finish_frames()`].
-
-```rust
-let ping = qlog::QuicFrame::Ping;
-let padding = qlog::QuicFrame::Padding;
-
-streamer.add_frame(ping, false).ok();
-streamer.add_frame(padding, false).ok();
-
-streamer.finish_frames().ok();
-```
-
-Once all events have have been written, the log
-can be finalized with [`finish_log()`]:
-
-```rust
-streamer.finish_log().ok();
-```
-
-### Serializing
-
-Serialization to JSON occurs as methods on the [`QlogStreamer`]
-are called. No additional steps are required.
-
-[`Trace`]: struct.Trace.html
-[`VantagePoint`]: struct.VantagePoint.html
-[`Configuration`]: struct.Configuration.html
-[`qlog::Trace.events`]: struct.Trace.html#structfield.events
-[`push_event()`]: struct.Trace.html#method.push_event
-[`packet_sent_min()`]: event/struct.Event.html#method.packet_sent_min
-[`QuicFrame::crypto()`]: enum.QuicFrame.html#variant.Crypto
-[`QlogStreamer`]: struct.QlogStreamer.html
-[`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
-[`start_log()`]: struct.QlogStreamer.html#method.start_log
-[`add_event()`]: struct.QlogStreamer.html#method.add_event
-[`add_event_with_instant()`]: struct.QlogStreamer.html#method.add_event
-[`add_frame()`]: struct.QlogStreamer.html#method.add_frame
-[`finish_frames()`]: struct.QlogStreamer.html#method.finish_frames
-[`finish_log()`]: struct.QlogStreamer.html#method.finish_log
\ No newline at end of file
diff --git a/tools/qlog/src/lib.rs b/tools/qlog/src/lib.rs
deleted file mode 100644
index 17768fa..0000000
--- a/tools/qlog/src/lib.rs
+++ /dev/null
@@ -1,3290 +0,0 @@
-// Copyright (C) 2019, Cloudflare, Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistributions in binary form must reproduce the above copyright
-// notice, this list of conditions and the following disclaimer in the
-// documentation and/or other materials provided with the distribution.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
-// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
-// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-//! The qlog crate is an implementation of the qlog [main logging schema],
-//! [QUIC event definitions], and [HTTP/3 and QPACK event definitions].
-//! The crate provides a qlog data model that can be used for traces with
-//! events. It supports serialization and deserialization but defers logging IO
-//! choices to applications.
-//!
-//! The crate uses Serde for conversion between Rust and JSON.
-//!
-//! [main logging schema]: https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-main-schema
-//! [QUIC event definitions]:
-//! https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-quic-events.html
-//! [HTTP/3 and QPACK event definitions]:
-//! https://datatracker.ietf.org/doc/html/draft-ietf-quic-qlog-h3-events.html
-//!
-//! Overview
-//! ---------------
-//! qlog is a hierarchical logging format, with a rough structure of:
-//!
-//! * Log
-//! * Trace(s)
-//! * Event(s)
-//!
-//! In practice, a single QUIC connection maps to a single Trace file with one
-//! or more Events. Applications can decide whether to combine Traces from
-//! different connections into the same Log.
-//!
-//! ## Traces
-//!
-//! A [`Trace`] contains metadata such as the [`VantagePoint`] of capture and
-//! the [`Configuration`], and protocol event data in the [`Event`] array.
-//!
-//! ## Writing out logs
-//! As events occur during the connection, the application appends them to the
-//! trace. The qlog crate supports two modes of writing logs: the buffered mode
-//! stores everything in memory and requires the application to serialize and
-//! write the output, the streaming mode progressively writes serialized JSON
-//! output to a writer designated by the application.
-//!
-//! ### Creating a Trace
-//!
-//! A typical application needs a single qlog [`Trace`] that it appends QUIC
-//! and/or HTTP/3 events to:
-//!
-//! ```
-//! let mut trace = qlog::Trace::new(
-//! qlog::VantagePoint {
-//! name: Some("Example client".to_string()),
-//! ty: qlog::VantagePointType::Client,
-//! flow: None,
-//! },
-//! Some("Example qlog trace".to_string()),
-//! Some("Example qlog trace description".to_string()),
-//! Some(qlog::Configuration {
-//! time_offset: Some(0.0),
-//! original_uris: None,
-//! }),
-//! None,
-//! );
-//! ```
-//!
-//! ## Adding events
-//!
-//! Qlog [`Event`] objects are added to [`qlog::Trace.events`].
-//!
-//! The following example demonstrates how to log a qlog QUIC `packet_sent`
-//! event containing a single Crypto frame. It constructs the necessary elements
-//! of the [`Event`], then appends it to the trace with [`push_event()`].
-//!
-//! ```
-//! # let mut trace = qlog::Trace::new (
-//! # qlog::VantagePoint {
-//! # name: Some("Example client".to_string()),
-//! # ty: qlog::VantagePointType::Client,
-//! # flow: None,
-//! # },
-//! # Some("Example qlog trace".to_string()),
-//! # Some("Example qlog trace description".to_string()),
-//! # Some(qlog::Configuration {
-//! # time_offset: Some(0.0),
-//! # original_uris: None,
-//! # }),
-//! # None
-//! # );
-//!
-//! let scid = [0x7e, 0x37, 0xe4, 0xdc, 0xc6, 0x68, 0x2d, 0xa8];
-//! let dcid = [0x36, 0xce, 0x10, 0x4e, 0xee, 0x50, 0x10, 0x1c];
-//!
-//! let pkt_hdr = qlog::PacketHeader::new(
-//! qlog::PacketType::Initial,
-//! 0, // packet_number
-//! None, // flags
-//! None, // token
-//! None, // length
-//! Some(0x00000001), // version
-//! Some(b"7e37e4dcc6682da8"), // scid
-//! Some(&dcid),
-//! );
-//!
-//! let frames = vec![qlog::QuicFrame::Crypto {
-//! offset: 0,
-//! length: 0,
-//! }];
-//!
-//! let raw = qlog::RawInfo {
-//! length: Some(1251),
-//! payload_length: Some(1224),
-//! data: None,
-//! };
-//!
-//! let event_data = qlog::EventData::PacketSent {
-//! header: pkt_hdr,
-//! frames: Some(frames),
-//! is_coalesced: None,
-//! retry_token: None,
-//! stateless_reset_token: None,
-//! supported_versions: None,
-//! raw: Some(raw),
-//! datagram_id: None,
-//! };
-//!
-//! trace.push_event(qlog::Event::with_time(0.0, event_data));
-//! ```
-//!
-//! ### Serializing
-//!
-//! The qlog crate has only been tested with `serde_json`, however
-//! other serializer targets might work.
-//!
-//! For example, serializing the trace created above:
-//!
-//! ```
-//! # let mut trace = qlog::Trace::new (
-//! # qlog::VantagePoint {
-//! # name: Some("Example client".to_string()),
-//! # ty: qlog::VantagePointType::Client,
-//! # flow: None,
-//! # },
-//! # Some("Example qlog trace".to_string()),
-//! # Some("Example qlog trace description".to_string()),
-//! # Some(qlog::Configuration {
-//! # time_offset: Some(0.0),
-//! # original_uris: None,
-//! # }),
-//! # None
-//! # );
-//! serde_json::to_string_pretty(&trace).unwrap();
-//! ```
-//!
-//! which would generate the following:
-//!
-//! ```ignore
-//! {
-//! "vantage_point": {
-//! "name": "Example client",
-//! "type": "client"
-//! },
-//! "title": "Example qlog trace",
-//! "description": "Example qlog trace description",
-//! "configuration": {
-//! "time_offset": 0.0
-//! },
-//! "events": [
-//! [
-//! 0,
-//! "transport",
-//! "packet_sent",
-//! {
-//! "header": {
-//! "packet_type": "initial",
-//! "packet_number": 0,
-//! "version": "1",
-//! "scil": 8,
-//! "dcil": 8,
-//! "scid": "7e37e4dcc6682da8",
-//! "dcid": "36ce104eee50101c"
-//! },
-//! "raw": {
-//! "length": 1251,
-//! "payload_length": 1224
-//! },
-//! "frames": [
-//! {
-//! "frame_type": "crypto",
-//! "offset": 0,
-//! "length": 100,
-//! }
-//! ]
-//! }
-//! ]
-//! ]
-//! }
-//! ```
-//!
-//! Streaming Mode
-//! --------------
-//!
-//! Create the trace:
-//!
-//! ```
-//! let mut trace = qlog::Trace::new(
-//! qlog::VantagePoint {
-//! name: Some("Example client".to_string()),
-//! ty: qlog::VantagePointType::Client,
-//! flow: None,
-//! },
-//! Some("Example qlog trace".to_string()),
-//! Some("Example qlog trace description".to_string()),
-//! Some(qlog::Configuration {
-//! time_offset: Some(0.0),
-//! original_uris: None,
-//! }),
-//! None,
-//! );
-//! ```
-//! Create an object with the [`Write`] trait:
-//!
-//! ```
-//! let mut file = std::fs::File::create("foo.qlog").unwrap();
-//! ```
-//!
-//! Create a [`QlogStreamer`] and start serialization to foo.qlog
-//! using [`start_log()`]:
-//!
-//! ```
-//! # let mut trace = qlog::Trace::new(
-//! # qlog::VantagePoint {
-//! # name: Some("Example client".to_string()),
-//! # ty: qlog::VantagePointType::Client,
-//! # flow: None,
-//! # },
-//! # Some("Example qlog trace".to_string()),
-//! # Some("Example qlog trace description".to_string()),
-//! # Some(qlog::Configuration {
-//! # time_offset: Some(0.0),
-//! # original_uris: None,
-//! # }),
-//! # None,
-//! # );
-//! # let mut file = std::fs::File::create("foo.qlog").unwrap();
-//! let mut streamer = qlog::QlogStreamer::new(
-//! qlog::QLOG_VERSION.to_string(),
-//! Some("Example qlog".to_string()),
-//! Some("Example qlog description".to_string()),
-//! None,
-//! std::time::Instant::now(),
-//! trace,
-//! qlog::EventImportance::Base,
-//! Box::new(file),
-//! );
-//!
-//! streamer.start_log().ok();
-//! ```
-//!
-//! ### Adding simple events
-//!
-//! Once logging has started you can stream events. Simple events
-//! can be written in one step using [`add_event()`]:
-//!
-//! ```
-//! # let mut trace = qlog::Trace::new(
-//! # qlog::VantagePoint {
-//! # name: Some("Example client".to_string()),
-//! # ty: qlog::VantagePointType::Client,
-//! # flow: None,
-//! # },
-//! # Some("Example qlog trace".to_string()),
-//! # Some("Example qlog trace description".to_string()),
-//! # Some(qlog::Configuration {
-//! # time_offset: Some(0.0),
-//! # original_uris: None,
-//! # }),
-//! # None,
-//! # );
-//! # let mut file = std::fs::File::create("foo.qlog").unwrap();
-//! # let mut streamer = qlog::QlogStreamer::new(
-//! # qlog::QLOG_VERSION.to_string(),
-//! # Some("Example qlog".to_string()),
-//! # Some("Example qlog description".to_string()),
-//! # None,
-//! # std::time::Instant::now(),
-//! # trace,
-//! # qlog::EventImportance::Base,
-//! # Box::new(file),
-//! # );
-//! let event_data = qlog::EventData::MetricsUpdated {
-//! min_rtt: Some(1.0),
-//! smoothed_rtt: Some(1.0),
-//! latest_rtt: Some(1.0),
-//! rtt_variance: Some(1.0),
-//! pto_count: Some(1),
-//! congestion_window: Some(1234),
-//! bytes_in_flight: Some(5678),
-//! ssthresh: None,
-//! packets_in_flight: None,
-//! pacing_rate: None,
-//! };
-//!
-//! let event = qlog::Event::with_time(0.0, event_data);
-//! streamer.add_event(event).ok();
-//! ```
-//!
-//! ### Adding events with frames
-//! Some events contain optional arrays of QUIC frames. If the
-//! event has `Some(Vec<QuicFrame>)`, even if it is empty, the
-//! streamer enters a frame serializing mode that must be
-//! finalized before other events can be logged.
-//!
-//! In this example, a `PacketSent` event is created with an
-//! empty frame array and frames are written out later:
-//!
-//! ```
-//! # let mut trace = qlog::Trace::new(
-//! # qlog::VantagePoint {
-//! # name: Some("Example client".to_string()),
-//! # ty: qlog::VantagePointType::Client,
-//! # flow: None,
-//! # },
-//! # Some("Example qlog trace".to_string()),
-//! # Some("Example qlog trace description".to_string()),
-//! # Some(qlog::Configuration {
-//! # time_offset: Some(0.0),
-//! # original_uris: None,
-//! # }),
-//! # None,
-//! # );
-//! # let mut file = std::fs::File::create("foo.qlog").unwrap();
-//! # let mut streamer = qlog::QlogStreamer::new(
-//! # qlog::QLOG_VERSION.to_string(),
-//! # Some("Example qlog".to_string()),
-//! # Some("Example qlog description".to_string()),
-//! # None,
-//! # std::time::Instant::now(),
-//! # trace,
-//! # qlog::EventImportance::Base,
-//! # Box::new(file),
-//! # );
-//! let pkt_hdr = qlog::PacketHeader::with_type(
-//! qlog::PacketType::OneRtt,
-//! 0,
-//! Some(0x00000001),
-//! Some(b"7e37e4dcc6682da8"),
-//! Some(b"36ce104eee50101c"),
-//! );
-//!
-//! let event_data = qlog::EventData::PacketSent {
-//! header: pkt_hdr,
-//! frames: Some(vec![]),
-//! is_coalesced: None,
-//! retry_token: None,
-//! stateless_reset_token: None,
-//! supported_versions: None,
-//! raw: None,
-//! datagram_id: None,
-//! };
-//!
-//! let event = qlog::Event::with_time(0.0, event_data);
-//!
-//! streamer.add_event(event).ok();
-//! ```
-//!
-//! In this example, the frames contained in the QUIC packet
-//! are PING and PADDING. Each frame is written using the
-//! [`add_frame()`] method. Frame writing is concluded with
-//! [`finish_frames()`].
-//!
-//! ```
-//! # let mut trace = qlog::Trace::new(
-//! # qlog::VantagePoint {
-//! # name: Some("Example client".to_string()),
-//! # ty: qlog::VantagePointType::Client,
-//! # flow: None,
-//! # },
-//! # Some("Example qlog trace".to_string()),
-//! # Some("Example qlog trace description".to_string()),
-//! # Some(qlog::Configuration {
-//! # time_offset: Some(0.0),
-//! # original_uris: None,
-//! # }),
-//! # None,
-//! # );
-//! # let mut file = std::fs::File::create("foo.qlog").unwrap();
-//! # let mut streamer = qlog::QlogStreamer::new(
-//! # qlog::QLOG_VERSION.to_string(),
-//! # Some("Example qlog".to_string()),
-//! # Some("Example qlog description".to_string()),
-//! # None,
-//! # std::time::Instant::now(),
-//! # trace,
-//! # qlog::EventImportance::Base,
-//! # Box::new(file),
-//! # );
-//!
-//! let ping = qlog::QuicFrame::Ping;
-//! let padding = qlog::QuicFrame::Padding;
-//!
-//! streamer.add_frame(ping, false).ok();
-//! streamer.add_frame(padding, false).ok();
-//!
-//! streamer.finish_frames().ok();
-//! ```
-//!
-//! Once all events have have been written, the log
-//! can be finalized with [`finish_log()`]:
-//!
-//! ```
-//! # let mut trace = qlog::Trace::new(
-//! # qlog::VantagePoint {
-//! # name: Some("Example client".to_string()),
-//! # ty: qlog::VantagePointType::Client,
-//! # flow: None,
-//! # },
-//! # Some("Example qlog trace".to_string()),
-//! # Some("Example qlog trace description".to_string()),
-//! # Some(qlog::Configuration {
-//! # time_offset: Some(0.0),
-//! # original_uris: None,
-//! # }),
-//! # None,
-//! # );
-//! # let mut file = std::fs::File::create("foo.qlog").unwrap();
-//! # let mut streamer = qlog::QlogStreamer::new(
-//! # qlog::QLOG_VERSION.to_string(),
-//! # Some("Example qlog".to_string()),
-//! # Some("Example qlog description".to_string()),
-//! # None,
-//! # std::time::Instant::now(),
-//! # trace,
-//! # qlog::EventImportance::Base,
-//! # Box::new(file),
-//! # );
-//! streamer.finish_log().ok();
-//! ```
-//!
-//! ### Serializing
-//!
-//! Serialization to JSON occurs as methods on the [`QlogStreamer`]
-//! are called. No additional steps are required.
-//!
-//! [`Trace`]: struct.Trace.html
-//! [`VantagePoint`]: struct.VantagePoint.html
-//! [`Configuration`]: struct.Configuration.html
-//! [`qlog::Trace.events`]: struct.Trace.html#structfield.events
-//! [`push_event()`]: struct.Trace.html#method.push_event
-//! [`packet_sent_min()`]: event/struct.Event.html#method.packet_sent_min
-//! [`QuicFrame::crypto()`]: enum.QuicFrame.html#variant.Crypto
-//! [`QlogStreamer`]: struct.QlogStreamer.html
-//! [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
-//! [`start_log()`]: struct.QlogStreamer.html#method.start_log
-//! [`add_event()`]: struct.QlogStreamer.html#method.add_event
-//! [`add_event_with_instant()`]: struct.QlogStreamer.html#method.add_event
-//! [`add_frame()`]: struct.QlogStreamer.html#method.add_frame
-//! [`finish_frames()`]: struct.QlogStreamer.html#method.finish_frames
-//! [`finish_log()`]: struct.QlogStreamer.html#method.finish_log
-
-use serde::Deserialize;
-use serde::Serialize;
-
-/// A quiche qlog error.
-#[derive(Debug)]
-pub enum Error {
- /// There is no more work to do.
- Done,
-
- /// The operation cannot be completed because it was attempted
- /// in an invalid state.
- InvalidState,
-
- /// I/O error.
- IoError(std::io::Error),
-}
-
-impl std::fmt::Display for Error {
- fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
- write!(f, "{:?}", self)
- }
-}
-
-impl std::error::Error for Error {
- fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
- None
- }
-}
-
-impl std::convert::From<std::io::Error> for Error {
- fn from(err: std::io::Error) -> Self {
- Error::IoError(err)
- }
-}
-
-pub const QLOG_VERSION: &str = "draft-02";
-
-/// A specialized [`Result`] type for quiche qlog operations.
-///
-/// This type is used throughout the public API for any operation that
-/// can produce an error.
-///
-/// [`Result`]: https://doc.rust-lang.org/std/result/enum.Result.html
-pub type Result<T> = std::result::Result<T, Error>;
-
-#[serde_with::skip_serializing_none]
-#[derive(Serialize, Deserialize, Clone)]
-pub struct Qlog {
- pub qlog_version: String,
- pub qlog_format: String,
- pub title: Option<String>,
- pub description: Option<String>,
- pub summary: Option<String>,
-
- pub traces: Vec<Trace>,
-}
-
-impl Default for Qlog {
- fn default() -> Self {
- Qlog {
- qlog_version: QLOG_VERSION.to_string(),
- qlog_format: "JSON".to_string(),
- title: Some("Default qlog title".to_string()),
- description: Some("Default qlog description".to_string()),
- summary: Some("Default qlog title".to_string()),
- traces: Vec::new(),
- }
- }
-}
-
-#[derive(PartialEq, Debug)]
-pub enum StreamerState {
- Initial,
- Ready,
- WritingFrames,
- Finished,
-}
-
-#[derive(Clone, Copy)]
-pub enum ImportanceLogLevel {
- Core = 0,
- Base = 1,
- Extra = 2,
-}
-
-/// A helper object specialized for streaming JSON-serialized qlog to a
-/// [`Write`] trait.
-///
-/// The object is responsible for the `Qlog` object that contains the provided
-/// `Trace`.
-///
-/// Serialization is progressively driven by method calls; once log streaming is
-/// started, `event::Events` can be written using `add_event()`. Some events
-/// can contain an array of `QuicFrame`s, when writing such an event, the
-/// streamer enters a frame-serialization mode where frames are be progressively
-/// written using `add_frame()`. This mode is concluded using
-/// `finished_frames()`. While serializing frames, any attempts to log
-/// additional events are ignored.
-///
-/// [`Write`]: https://doc.rust-lang.org/std/io/trait.Write.html
-pub struct QlogStreamer {
- start_time: std::time::Instant,
- writer: Box<dyn std::io::Write + Send + Sync>,
- qlog: Qlog,
- state: StreamerState,
- log_level: EventImportance,
- first_event: bool,
- first_frame: bool,
-}
-
-impl QlogStreamer {
- /// Creates a QlogStreamer object.
- ///
- /// It owns a `Qlog` object that contains the provided `Trace` containing
- /// `Events`.
- ///
- /// All serialization will be written to the provided `Write`.
- #[allow(clippy::too_many_arguments)]
- pub fn new(
- qlog_version: String, title: Option<String>, description: Option<String>,
- summary: Option<String>, start_time: std::time::Instant, trace: Trace,
- log_level: EventImportance,
- writer: Box<dyn std::io::Write + Send + Sync>,
- ) -> Self {
- let qlog = Qlog {
- qlog_version,
- qlog_format: "JSON".to_string(),
- title,
- description,
- summary,
- traces: vec![trace],
- };
-
- QlogStreamer {
- start_time,
- writer,
- qlog,
- state: StreamerState::Initial,
- log_level,
- first_event: true,
- first_frame: false,
- }
- }
-
- /// Starts qlog streaming serialization.
- ///
- /// This writes out the JSON-serialized form of all information up to qlog
- /// `Trace`'s array of `Event`s. These are are separately appended
- /// using `add_event()` and `add_event_with_instant()`.
- pub fn start_log(&mut self) -> Result<()> {
- if self.state != StreamerState::Initial {
- return Err(Error::Done);
- }
-
- // A qlog contains a trace holding a vector of events that we want to
- // serialize in a streaming manner. So at the start of serialization,
- // take off all closing delimiters, and leave us in a state to accept
- // new events.
- match serde_json::to_string(&self.qlog) {
- Ok(mut out) => {
- out.truncate(out.len() - 4);
-
- self.writer.as_mut().write_all(out.as_bytes())?;
-
- self.state = StreamerState::Ready;
-
- self.first_event = self.qlog.traces[0].events.is_empty();
- },
-
- _ => return Err(Error::Done),
- }
-
- Ok(())
- }
-
- /// Finishes qlog streaming serialization.
- ///
- /// The JSON-serialized output has remaining close delimiters added.
- /// After this is called, no more serialization will occur.
- pub fn finish_log(&mut self) -> Result<()> {
- if self.state == StreamerState::Initial ||
- self.state == StreamerState::Finished
- {
- return Err(Error::InvalidState);
- }
-
- self.writer.as_mut().write_all(b"]}]}")?;
-
- self.state = StreamerState::Finished;
-
- self.writer.as_mut().flush()?;
-
- Ok(())
- }
-
- /// Writes a JSON-serialized `Event` using `std::time::Instant::now()`.
- ///
- /// Some qlog events can contain `QuicFrames`. If this is detected `true` is
- /// returned and the streamer enters a frame-serialization mode that is only
- /// concluded by `finish_frames()`. In this mode, attempts to log additional
- /// events are ignored.
- ///
- /// If the event contains no array of `QuicFrames` return `false`.
- pub fn add_event_now(&mut self, event: Event) -> Result<bool> {
- let now = std::time::Instant::now();
-
- self.add_event_with_instant(event, now)
- }
-
- /// Writes a JSON-serialized `Event` using the provided EventData and
- /// Instant.
- ///
- /// Some qlog events can contain `QuicFrames`. If this is detected `true` is
- /// returned and the streamer enters a frame-serialization mode that is only
- /// concluded by `finish_frames()`. In this mode, attempts to log additional
- /// events are ignored.
- ///
- /// If the event contains no array of `QuicFrames` return `false`.
- pub fn add_event_with_instant(
- &mut self, mut event: Event, now: std::time::Instant,
- ) -> Result<bool> {
- if self.state != StreamerState::Ready {
- return Err(Error::InvalidState);
- }
-
- if !event.importance().is_contained_in(&self.log_level) {
- return Err(Error::Done);
- }
-
- let dur = if cfg!(test) {
- std::time::Duration::from_secs(0)
- } else {
- now.duration_since(self.start_time)
- };
-
- let rel_time = dur.as_secs_f32() * 1000.0;
- event.time = rel_time;
-
- self.add_event(event)
- }
-
- /// Writes a JSON-serialized `Event` using the provided Instant.
- ///
- /// Some qlog events can contain `QuicFrames`. If this is detected `true` is
- /// returned and the streamer enters a frame-serialization mode that is only
- /// concluded by `finish_frames()`. In this mode, attempts to log additional
- /// events are ignored.
- ///
- /// If the event contains no array of `QuicFrames` return `false`.
- pub fn add_event_data_with_instant(
- &mut self, event_data: EventData, now: std::time::Instant,
- ) -> Result<bool> {
- if self.state != StreamerState::Ready {
- return Err(Error::InvalidState);
- }
-
- let ty = EventType::from(&event_data);
- if !EventImportance::from(ty).is_contained_in(&self.log_level) {
- return Err(Error::Done);
- }
-
- let dur = if cfg!(test) {
- std::time::Duration::from_secs(0)
- } else {
- now.duration_since(self.start_time)
- };
-
- let rel_time = dur.as_secs_f32() * 1000.0;
- let event = Event::with_time(rel_time, event_data);
-
- self.add_event(event)
- }
-
- /// Writes a JSON-serialized `Event` using the provided Event.
- ///
- /// Some qlog events can contain `QuicFrames`. If this is detected `true` is
- /// returned and the streamer enters a frame-serialization mode that is only
- /// concluded by `finish_frames()`. In this mode, attempts to log additional
- /// events are ignored.
- ///
- /// If the event contains no array of `QuicFrames` return `false`.
- pub fn add_event(&mut self, event: Event) -> Result<bool> {
- if self.state != StreamerState::Ready {
- return Err(Error::InvalidState);
- }
-
- if !event.importance().is_contained_in(&self.log_level) {
- return Err(Error::Done);
- }
-
- let (ev, contains_frames) = match serde_json::to_string(&event) {
- Ok(mut ev_out) =>
- if let Some(f) = event.data.contains_quic_frames() {
- ev_out.truncate(ev_out.len() - 3);
-
- if f == 0 {
- self.first_frame = true;
- }
-
- (ev_out, true)
- } else {
- (ev_out, false)
- },
-
- _ => return Err(Error::Done),
- };
-
- let maybe_comma = if self.first_event {
- self.first_event = false;
- ""
- } else {
- ","
- };
-
- let out = format!("{}{}", maybe_comma, ev);
-
- self.writer.as_mut().write_all(out.as_bytes())?;
-
- if contains_frames {
- self.state = StreamerState::WritingFrames
- } else {
- self.state = StreamerState::Ready
- };
-
- Ok(contains_frames)
- }
-
- /// Writes a JSON-serialized `QuicFrame`.
- ///
- /// Only valid while in the frame-serialization mode.
- pub fn add_frame(&mut self, frame: QuicFrame, last: bool) -> Result<()> {
- if self.state != StreamerState::WritingFrames {
- return Err(Error::InvalidState);
- }
-
- match serde_json::to_string(&frame) {
- Ok(mut out) => {
- if !self.first_frame {
- out.insert(0, ',');
- } else {
- self.first_frame = false;
- }
-
- self.writer.as_mut().write_all(out.as_bytes())?;
-
- if last {
- self.finish_frames()?;
- }
- },
-
- _ => return Err(Error::Done),
- }
-
- Ok(())
- }
-
- /// Concludes `QuicFrame` streaming serialization.
- ///
- /// Only valid while in the frame-serialization mode.
- pub fn finish_frames(&mut self) -> Result<()> {
- if self.state != StreamerState::WritingFrames {
- return Err(Error::InvalidState);
- }
-
- self.writer.as_mut().write_all(b"]}}")?;
- self.state = StreamerState::Ready;
-
- Ok(())
- }
-
- /// Returns the writer.
- #[allow(clippy::borrowed_box)]
- pub fn writer(&self) -> &Box<dyn std::io::Write + Send + Sync> {
- &self.writer
- }
-}
-
-// We now commence data definitions heavily styled on the QLOG
-// schema definition. Data is serialized using serde.
-#[serde_with::skip_serializing_none]
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-pub struct Trace {
- pub vantage_point: VantagePoint,
- pub title: Option<String>,
- pub description: Option<String>,
-
- pub configuration: Option<Configuration>,
-
- pub common_fields: Option<CommonFields>,
-
- pub events: Vec<Event>,
-}
-
-/// Helper functions for using a qlog trace.
-impl Trace {
- /// Creates a new qlog trace
- pub fn new(
- vantage_point: VantagePoint, title: Option<String>,
- description: Option<String>, configuration: Option<Configuration>,
- common_fields: Option<CommonFields>,
- ) -> Self {
- Trace {
- vantage_point,
- title,
- description,
- configuration,
- common_fields,
- events: Vec::new(),
- }
- }
-
- pub fn push_event(&mut self, event: Event) {
- self.events.push(event);
- }
-}
-
-pub type Bytes = String;
-
-#[serde_with::skip_serializing_none]
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-pub struct VantagePoint {
- pub name: Option<String>,
-
- #[serde(rename = "type")]
- pub ty: VantagePointType,
-
- pub flow: Option<VantagePointType>,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum VantagePointType {
- Client,
- Server,
- Network,
- Unknown,
-}
-
-#[serde_with::skip_serializing_none]
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-pub struct Configuration {
- pub time_offset: Option<f64>,
-
- pub original_uris: Option<Vec<String>>,
- /* TODO
- * additionalUserSpecifiedProperty */
-}
-
-impl Default for Configuration {
- fn default() -> Self {
- Configuration {
- time_offset: Some(0.0),
- original_uris: None,
- }
- }
-}
-
-#[serde_with::skip_serializing_none]
-#[derive(Serialize, Deserialize, Clone, Default, PartialEq, Debug)]
-pub struct CommonFields {
- pub group_id: Option<String>,
- pub protocol_type: Option<String>,
-
- pub reference_time: Option<String>,
- /* TODO
- * additionalUserSpecifiedProperty */
-}
-
-#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
-#[serde(untagged)]
-pub enum EventType {
- ConnectivityEventType(ConnectivityEventType),
-
- TransportEventType(TransportEventType),
-
- SecurityEventType(SecurityEventType),
-
- RecoveryEventType(RecoveryEventType),
-
- Http3EventType(Http3EventType),
-
- QpackEventType(QpackEventType),
-
- GenericEventType(GenericEventType),
-
- None,
-}
-
-impl Default for EventType {
- fn default() -> Self {
- EventType::None
- }
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-pub enum TimeFormat {
- Absolute,
- Delta,
- Relative,
-}
-
-#[serde_with::skip_serializing_none]
-#[derive(Serialize, Deserialize, Clone, Debug)]
-pub struct Event {
- pub time: f32,
-
- // Strictly, the qlog 02 spec says we should have a name field in the
- // `Event` structure. However, serde's autogenerated Deserialize code
- // struggles to read Events properly because the `EventData` types often
- // alias. In order to work around that, we use can use a trick that will
- // give serde autogen all the information that it needs while also produced
- // a legal qlog. Specifically, strongly linking an EventData enum variant
- // with the wire-format name.
- //
- // The trick is to use Adjacent Tagging
- // (https://serde.rs/enum-representations.html#adjacently-tagged) with
- // Struct flattening (https://serde.rs/attr-flatten.html). At a high level
- // this first creates an `EventData` JSON object:
- //
- // {name: <enum variant name>, data: enum variant data }
- //
- // and then flattens those fields into the `Event` object.
- #[serde(flatten)]
- pub data: EventData,
-
- pub protocol_type: Option<String>,
- pub group_id: Option<String>,
-
- pub time_format: Option<TimeFormat>,
-
- #[serde(skip)]
- ty: EventType,
-}
-
-impl Event {
- /// Returns a new `Event` object with the provided time and data.
- pub fn with_time(time: f32, data: EventData) -> Self {
- let ty = EventType::from(&data);
- Event {
- time,
- data,
- protocol_type: Default::default(),
- group_id: Default::default(),
- time_format: Default::default(),
- ty,
- }
- }
-
- fn importance(&self) -> EventImportance {
- self.ty.into()
- }
-}
-
-impl PartialEq for Event {
- // custom comparison to skip over the `ty` field
- fn eq(&self, other: &Event) -> bool {
- self.time == other.time &&
- self.data == other.data &&
- self.protocol_type == other.protocol_type &&
- self.group_id == other.group_id &&
- self.time_format == other.time_format
- }
-}
-
-#[derive(Clone)]
-pub enum EventImportance {
- Core,
- Base,
- Extra,
-}
-
-impl EventImportance {
- /// Returns true if this importance level is included by `other`.
- pub fn is_contained_in(&self, other: &EventImportance) -> bool {
- match (other, self) {
- (EventImportance::Core, EventImportance::Core) => true,
-
- (EventImportance::Base, EventImportance::Core) |
- (EventImportance::Base, EventImportance::Base) => true,
-
- (EventImportance::Extra, EventImportance::Core) |
- (EventImportance::Extra, EventImportance::Base) |
- (EventImportance::Extra, EventImportance::Extra) => true,
-
- (..) => false,
- }
- }
-}
-
-impl From<EventType> for EventImportance {
- fn from(ty: EventType) -> Self {
- match ty {
- EventType::ConnectivityEventType(
- ConnectivityEventType::ServerListening,
- ) => EventImportance::Extra,
- EventType::ConnectivityEventType(
- ConnectivityEventType::ConnectionStarted,
- ) => EventImportance::Base,
- EventType::ConnectivityEventType(
- ConnectivityEventType::ConnectionIdUpdated,
- ) => EventImportance::Base,
- EventType::ConnectivityEventType(
- ConnectivityEventType::SpinBitUpdated,
- ) => EventImportance::Base,
- EventType::ConnectivityEventType(
- ConnectivityEventType::ConnectionStateUpdated,
- ) => EventImportance::Base,
-
- EventType::SecurityEventType(SecurityEventType::KeyUpdated) =>
- EventImportance::Base,
- EventType::SecurityEventType(SecurityEventType::KeyRetired) =>
- EventImportance::Base,
-
- EventType::TransportEventType(TransportEventType::ParametersSet) =>
- EventImportance::Core,
- EventType::TransportEventType(
- TransportEventType::DatagramsReceived,
- ) => EventImportance::Extra,
- EventType::TransportEventType(TransportEventType::DatagramsSent) =>
- EventImportance::Extra,
- EventType::TransportEventType(
- TransportEventType::DatagramDropped,
- ) => EventImportance::Extra,
- EventType::TransportEventType(TransportEventType::PacketReceived) =>
- EventImportance::Core,
- EventType::TransportEventType(TransportEventType::PacketSent) =>
- EventImportance::Core,
- EventType::TransportEventType(TransportEventType::PacketDropped) =>
- EventImportance::Base,
- EventType::TransportEventType(TransportEventType::PacketBuffered) =>
- EventImportance::Base,
- EventType::TransportEventType(
- TransportEventType::StreamStateUpdated,
- ) => EventImportance::Base,
- EventType::TransportEventType(
- TransportEventType::FramesProcessed,
- ) => EventImportance::Extra,
- EventType::TransportEventType(TransportEventType::DataMoved) =>
- EventImportance::Base,
-
- EventType::RecoveryEventType(RecoveryEventType::ParametersSet) =>
- EventImportance::Base,
- EventType::RecoveryEventType(RecoveryEventType::MetricsUpdated) =>
- EventImportance::Core,
- EventType::RecoveryEventType(
- RecoveryEventType::CongestionStateUpdated,
- ) => EventImportance::Base,
- EventType::RecoveryEventType(RecoveryEventType::LossTimerUpdated) =>
- EventImportance::Extra,
- EventType::RecoveryEventType(RecoveryEventType::PacketLost) =>
- EventImportance::Core,
- EventType::RecoveryEventType(
- RecoveryEventType::MarkedForRetransmit,
- ) => EventImportance::Extra,
-
- EventType::Http3EventType(Http3EventType::ParametersSet) =>
- EventImportance::Base,
- EventType::Http3EventType(Http3EventType::StreamTypeSet) =>
- EventImportance::Base,
- EventType::Http3EventType(Http3EventType::FrameCreated) =>
- EventImportance::Core,
- EventType::Http3EventType(Http3EventType::FrameParsed) =>
- EventImportance::Core,
- EventType::Http3EventType(Http3EventType::PushResolved) =>
- EventImportance::Extra,
-
- EventType::QpackEventType(QpackEventType::StateUpdated) =>
- EventImportance::Base,
- EventType::QpackEventType(QpackEventType::StreamStateUpdated) =>
- EventImportance::Base,
- EventType::QpackEventType(QpackEventType::DynamicTableUpdated) =>
- EventImportance::Extra,
- EventType::QpackEventType(QpackEventType::HeadersEncoded) =>
- EventImportance::Base,
- EventType::QpackEventType(QpackEventType::HeadersDecoded) =>
- EventImportance::Base,
- EventType::QpackEventType(QpackEventType::InstructionCreated) =>
- EventImportance::Base,
- EventType::QpackEventType(QpackEventType::InstructionParsed) =>
- EventImportance::Base,
-
- _ => unimplemented!(),
- }
- }
-}
-
-#[derive(Serialize, Deserialize, Clone, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum EventCategory {
- Connectivity,
- Security,
- Transport,
- Recovery,
- Http,
- Qpack,
-
- Error,
- Warning,
- Info,
- Debug,
- Verbose,
- Simulation,
-}
-
-impl std::fmt::Display for EventCategory {
- fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
- let v = match self {
- EventCategory::Connectivity => "connectivity",
- EventCategory::Security => "security",
- EventCategory::Transport => "transport",
- EventCategory::Recovery => "recovery",
- EventCategory::Http => "http",
- EventCategory::Qpack => "qpack",
- EventCategory::Error => "error",
- EventCategory::Warning => "warning",
- EventCategory::Info => "info",
- EventCategory::Debug => "debug",
- EventCategory::Verbose => "verbose",
- EventCategory::Simulation => "simulation",
- };
-
- write!(f, "{}", v)
- }
-}
-
-impl From<EventType> for EventCategory {
- fn from(ty: EventType) -> Self {
- match ty {
- EventType::ConnectivityEventType(_) => EventCategory::Connectivity,
- EventType::SecurityEventType(_) => EventCategory::Security,
- EventType::TransportEventType(_) => EventCategory::Transport,
- EventType::RecoveryEventType(_) => EventCategory::Recovery,
- EventType::Http3EventType(_) => EventCategory::Http,
- EventType::QpackEventType(_) => EventCategory::Qpack,
-
- _ => unimplemented!(),
- }
- }
-}
-
-impl From<&EventData> for EventType {
- fn from(event_data: &EventData) -> Self {
- match event_data {
- EventData::ServerListening { .. } =>
- EventType::ConnectivityEventType(
- ConnectivityEventType::ServerListening,
- ),
- EventData::ConnectionStarted { .. } =>
- EventType::ConnectivityEventType(
- ConnectivityEventType::ConnectionStarted,
- ),
- EventData::ConnectionClosed { .. } =>
- EventType::ConnectivityEventType(
- ConnectivityEventType::ConnectionClosed,
- ),
- EventData::ConnectionIdUpdated { .. } =>
- EventType::ConnectivityEventType(
- ConnectivityEventType::ConnectionIdUpdated,
- ),
- EventData::SpinBitUpdated { .. } => EventType::ConnectivityEventType(
- ConnectivityEventType::SpinBitUpdated,
- ),
- EventData::ConnectionStateUpdated { .. } =>
- EventType::ConnectivityEventType(
- ConnectivityEventType::ConnectionStateUpdated,
- ),
-
- EventData::KeyUpdated { .. } =>
- EventType::SecurityEventType(SecurityEventType::KeyUpdated),
- EventData::KeyRetired { .. } =>
- EventType::SecurityEventType(SecurityEventType::KeyRetired),
-
- EventData::VersionInformation { .. } =>
- EventType::TransportEventType(
- TransportEventType::VersionInformation,
- ),
- EventData::AlpnInformation { .. } =>
- EventType::TransportEventType(TransportEventType::AlpnInformation),
- EventData::TransportParametersSet { .. } =>
- EventType::TransportEventType(TransportEventType::ParametersSet),
- EventData::TransportParametersRestored { .. } =>
- EventType::TransportEventType(
- TransportEventType::ParametersRestored,
- ),
- EventData::DatagramsReceived { .. } => EventType::TransportEventType(
- TransportEventType::DatagramsReceived,
- ),
- EventData::DatagramsSent { .. } =>
- EventType::TransportEventType(TransportEventType::DatagramsSent),
- EventData::DatagramDropped { .. } =>
- EventType::TransportEventType(TransportEventType::DatagramDropped),
- EventData::PacketReceived { .. } =>
- EventType::TransportEventType(TransportEventType::PacketReceived),
- EventData::PacketSent { .. } =>
- EventType::TransportEventType(TransportEventType::PacketSent),
- EventData::PacketDropped { .. } =>
- EventType::TransportEventType(TransportEventType::PacketDropped),
- EventData::PacketBuffered { .. } =>
- EventType::TransportEventType(TransportEventType::PacketBuffered),
- EventData::PacketsAcked { .. } =>
- EventType::TransportEventType(TransportEventType::PacketsAcked),
- EventData::StreamStateUpdated { .. } =>
- EventType::TransportEventType(
- TransportEventType::StreamStateUpdated,
- ),
- EventData::FramesProcessed { .. } =>
- EventType::TransportEventType(TransportEventType::FramesProcessed),
- EventData::DataMoved { .. } =>
- EventType::TransportEventType(TransportEventType::DataMoved),
-
- EventData::RecoveryParametersSet { .. } =>
- EventType::RecoveryEventType(RecoveryEventType::ParametersSet),
- EventData::MetricsUpdated { .. } =>
- EventType::RecoveryEventType(RecoveryEventType::MetricsUpdated),
- EventData::CongestionStateUpdated { .. } =>
- EventType::RecoveryEventType(
- RecoveryEventType::CongestionStateUpdated,
- ),
- EventData::LossTimerUpdated { .. } =>
- EventType::RecoveryEventType(RecoveryEventType::LossTimerUpdated),
- EventData::PacketLost { .. } =>
- EventType::RecoveryEventType(RecoveryEventType::PacketLost),
- EventData::MarkedForRetransmit { .. } =>
- EventType::RecoveryEventType(
- RecoveryEventType::MarkedForRetransmit,
- ),
-
- EventData::H3ParametersSet { .. } =>
- EventType::Http3EventType(Http3EventType::ParametersSet),
- EventData::H3ParametersRestored { .. } =>
- EventType::Http3EventType(Http3EventType::ParametersRestored),
- EventData::H3StreamTypeSet { .. } =>
- EventType::Http3EventType(Http3EventType::StreamTypeSet),
- EventData::H3FrameCreated { .. } =>
- EventType::Http3EventType(Http3EventType::FrameCreated),
- EventData::H3FrameParsed { .. } =>
- EventType::Http3EventType(Http3EventType::FrameParsed),
- EventData::H3PushResolved { .. } =>
- EventType::Http3EventType(Http3EventType::PushResolved),
-
- EventData::QpackStateUpdated { .. } =>
- EventType::QpackEventType(QpackEventType::StateUpdated),
- EventData::QpackStreamStateUpdated { .. } =>
- EventType::QpackEventType(QpackEventType::StreamStateUpdated),
- EventData::QpackDynamicTableUpdated { .. } =>
- EventType::QpackEventType(QpackEventType::DynamicTableUpdated),
- EventData::QpackHeadersEncoded { .. } =>
- EventType::QpackEventType(QpackEventType::HeadersEncoded),
- EventData::QpackHeadersDecoded { .. } =>
- EventType::QpackEventType(QpackEventType::HeadersDecoded),
- EventData::QpackInstructionCreated { .. } =>
- EventType::QpackEventType(QpackEventType::InstructionCreated),
- EventData::QpackInstructionParsed { .. } =>
- EventType::QpackEventType(QpackEventType::InstructionParsed),
-
- EventData::ConnectionError { .. } =>
- EventType::GenericEventType(GenericEventType::ConnectionError),
- EventData::ApplicationError { .. } =>
- EventType::GenericEventType(GenericEventType::ApplicationError),
- EventData::InternalError { .. } =>
- EventType::GenericEventType(GenericEventType::InternalError),
- EventData::InternalWarning { .. } =>
- EventType::GenericEventType(GenericEventType::InternalError),
- EventData::Message { .. } =>
- EventType::GenericEventType(GenericEventType::Message),
- EventData::Marker { .. } =>
- EventType::GenericEventType(GenericEventType::Marker),
- }
- }
-}
-
-#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum ConnectivityEventType {
- ServerListening,
- ConnectionStarted,
- ConnectionClosed,
- ConnectionIdUpdated,
- SpinBitUpdated,
- ConnectionStateUpdated,
-}
-
-#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum TransportEventType {
- VersionInformation,
- AlpnInformation,
-
- ParametersSet,
- ParametersRestored,
-
- DatagramsSent,
- DatagramsReceived,
- DatagramDropped,
-
- PacketSent,
- PacketReceived,
- PacketDropped,
- PacketBuffered,
- PacketsAcked,
-
- FramesProcessed,
-
- StreamStateUpdated,
-
- DataMoved,
-}
-
-#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum TransportEventTrigger {
- Line,
- Retransmit,
- KeysUnavailable,
-}
-
-#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum SecurityEventType {
- KeyUpdated,
- KeyRetired,
-}
-
-#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum SecurityEventTrigger {
- Tls,
- Implicit,
- RemoteUpdate,
- LocalUpdate,
-}
-
-#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum RecoveryEventType {
- ParametersSet,
- MetricsUpdated,
- CongestionStateUpdated,
- LossTimerUpdated,
- PacketLost,
- MarkedForRetransmit,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum RecoveryEventTrigger {
- AckReceived,
- PacketSent,
- Alarm,
- Unknown,
-}
-
-// ================================================================== //
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum KeyType {
- ServerInitialSecret,
- ClientInitialSecret,
-
- ServerHandshakeSecret,
- ClientHandshakeSecret,
-
- Server0RttSecret,
- Client0RttSecret,
-
- Server1RttSecret,
- Client1RttSecret,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum ConnectionState {
- Attempted,
- Reset,
- Handshake,
- Active,
- Keepalive,
- Draining,
- Closed,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum TransportOwner {
- Local,
- Remote,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-pub struct PreferredAddress {
- pub ip_v4: String,
- pub ip_v6: String,
-
- pub port_v4: u16,
- pub port_v6: u16,
-
- pub connection_id: Bytes,
- pub stateless_reset_token: Token,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum PacketNumberSpace {
- Initial,
- Handshake,
- ApplicationData,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-pub enum LossTimerEventType {
- Set,
- Expired,
- Cancelled,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum StreamSide {
- Sending,
- Receiving,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum StreamState {
- // bidirectional stream states, draft-23 3.4.
- Idle,
- Open,
- HalfClosedLocal,
- HalfClosedRemote,
- Closed,
-
- // sending-side stream states, draft-23 3.1.
- Ready,
- Send,
- DataSent,
- ResetSent,
- ResetReceived,
-
- // receive-side stream states, draft-23 3.2.
- Receive,
- SizeKnown,
- DataRead,
- ResetRead,
-
- // both-side states
- DataReceived,
-
- // qlog-defined
- Destroyed,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum TimerType {
- Ack,
- Pto,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum DataRecipient {
- User,
- Application,
- Transport,
- Network,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum H3Owner {
- Local,
- Remote,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum H3StreamType {
- Data,
- Control,
- Push,
- Reserved,
- QpackEncode,
- QpackDecode,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum H3PushDecision {
- Claimed,
- Abandoned,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum QpackOwner {
- Local,
- Remote,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum QpackStreamState {
- Blocked,
- Unblocked,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum QpackUpdateType {
- Added,
- Evicted,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-pub struct QpackDynamicTableEntry {
- pub index: u64,
- pub name: Option<String>,
- pub value: Option<String>,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-pub struct QpackHeaderBlockPrefix {
- pub required_insert_count: u64,
- pub sign_bit: bool,
- pub delta_base: u64,
-}
-
-#[serde_with::skip_serializing_none]
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-pub struct RawInfo {
- pub length: Option<u64>,
- pub payload_length: Option<u64>,
-
- pub data: Option<Bytes>,
-}
-
-#[serde_with::skip_serializing_none]
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(tag = "name", content = "data")]
-#[allow(clippy::large_enum_variant)]
-pub enum EventData {
- // ================================================================== //
- // CONNECTIVITY
- #[serde(rename = "connectivity:server_listening")]
- ServerListening {
- ip_v4: Option<String>, // human-readable or bytes
- ip_v6: Option<String>, // human-readable or bytes
- port_v4: u32,
- port_v6: u32,
-
- retry_required: Option<bool>,
- },
-
- #[serde(rename = "connectivity:connection_started")]
- ConnectionStarted {
- ip_version: String, // "v4" or "v6"
- src_ip: String, // human-readable or bytes
- dst_ip: String, // human-readable or bytes
-
- protocol: Option<String>,
- src_port: u32,
- dst_port: u32,
-
- src_cid: Option<Bytes>,
- dst_cid: Option<Bytes>,
- },
-
- #[serde(rename = "connectivity:connection_closed")]
- ConnectionClosed {
- owner: Option<TransportOwner>,
-
- connection_code: Option<ConnectionErrorCode>,
- application_code: Option<ApplicationErrorCode>,
- internal_code: Option<u32>,
-
- reason: Option<String>,
- },
-
- #[serde(rename = "connectivity:connection_id_updated")]
- ConnectionIdUpdated {
- owner: Option<TransportOwner>,
-
- old: Option<Bytes>,
- new: Option<Bytes>,
- },
-
- #[serde(rename = "connectivity:spin_bit_updated")]
- SpinBitUpdated { state: bool },
-
- #[serde(rename = "connectivity:connection_state_updated")]
- ConnectionStateUpdated {
- old: Option<ConnectionState>,
- new: ConnectionState,
- },
-
- // ================================================================== //
- // SECURITY
- #[serde(rename = "security:connection_state_updated")]
- KeyUpdated {
- key_type: KeyType,
- old: Option<Bytes>,
- new: Bytes,
- generation: Option<u32>,
- },
-
- #[serde(rename = "security:connection_state_updated")]
- KeyRetired {
- key_type: KeyType,
- key: Option<Bytes>,
- generation: Option<u32>,
- },
-
- // ================================================================== //
- // TRANSPORT
- #[serde(rename = "transport:version_information")]
- VersionInformation {
- server_versions: Option<Vec<Bytes>>,
- client_versions: Option<Vec<Bytes>>,
- chosen_version: Option<Bytes>,
- },
-
- #[serde(rename = "transport:version_information")]
- AlpnInformation {
- server_alpns: Option<Vec<Bytes>>,
- client_alpns: Option<Vec<Bytes>>,
- chosen_alpn: Option<Bytes>,
- },
-
- #[serde(rename = "transport:parameters_set")]
- TransportParametersSet {
- owner: Option<TransportOwner>,
-
- resumption_allowed: Option<bool>,
- early_data_enabled: Option<bool>,
- tls_cipher: Option<String>,
- aead_tag_length: Option<u8>,
-
- original_destination_connection_id: Option<Bytes>,
- initial_source_connection_id: Option<Bytes>,
- retry_source_connection_id: Option<Bytes>,
- stateless_reset_token: Option<Token>,
- disable_active_migration: Option<bool>,
-
- max_idle_timeout: Option<u64>,
- max_udp_payload_size: Option<u32>,
- ack_delay_exponent: Option<u16>,
- max_ack_delay: Option<u16>,
- active_connection_id_limit: Option<u32>,
-
- initial_max_data: Option<u64>,
- initial_max_stream_data_bidi_local: Option<u64>,
- initial_max_stream_data_bidi_remote: Option<u64>,
- initial_max_stream_data_uni: Option<u64>,
- initial_max_streams_bidi: Option<u64>,
- initial_max_streams_uni: Option<u64>,
-
- preferred_address: Option<PreferredAddress>,
- },
-
- #[serde(rename = "transport:parameters_restored")]
- TransportParametersRestored {
- disable_active_migration: Option<bool>,
-
- max_idle_timeout: Option<u64>,
- max_udp_payload_size: Option<u32>,
- active_connection_id_limit: Option<u32>,
-
- initial_max_data: Option<u64>,
- initial_max_stream_data_bidi_local: Option<u64>,
- initial_max_stream_data_bidi_remote: Option<u64>,
- initial_max_stream_data_uni: Option<u64>,
- initial_max_streams_bidi: Option<u64>,
- initial_max_streams_uni: Option<u64>,
- },
-
- #[serde(rename = "transport:datagrams_received")]
- DatagramsReceived {
- count: Option<u16>,
-
- raw: Option<Vec<RawInfo>>,
-
- datagram_ids: Option<Vec<u32>>,
- },
-
- #[serde(rename = "transport:datagrams_sent")]
- DatagramsSent {
- count: Option<u16>,
-
- raw: Option<Vec<RawInfo>>,
-
- datagram_ids: Option<Vec<u32>>,
- },
-
- #[serde(rename = "transport:datagram_dropped")]
- DatagramDropped { raw: Option<RawInfo> },
-
- #[serde(rename = "transport:packet_received")]
- PacketReceived {
- header: PacketHeader,
- // `frames` is defined here in the QLog schema specification. However,
- // our streaming serializer requires serde to put the object at the end,
- // so we define it there and depend on serde's preserve_order feature.
- is_coalesced: Option<bool>,
-
- retry_token: Option<Token>,
-
- stateless_reset_token: Option<Bytes>,
-
- supported_versions: Option<Vec<Bytes>>,
-
- raw: Option<RawInfo>,
- datagram_id: Option<u32>,
-
- frames: Option<Vec<QuicFrame>>,
- },
-
- #[serde(rename = "transport:packet_sent")]
- PacketSent {
- header: PacketHeader,
- // `frames` is defined here in the QLog schema specification. However,
- // our streaming serializer requires serde to put the object at the end,
- // so we define it there and depend on serde's preserve_order feature.
- is_coalesced: Option<bool>,
-
- retry_token: Option<Token>,
-
- stateless_reset_token: Option<Bytes>,
-
- supported_versions: Option<Vec<Bytes>>,
-
- raw: Option<RawInfo>,
- datagram_id: Option<u32>,
-
- frames: Option<Vec<QuicFrame>>,
- },
-
- #[serde(rename = "transport:packet_dropped")]
- PacketDropped {
- header: Option<PacketHeader>,
-
- raw: Option<RawInfo>,
- datagram_id: Option<u32>,
- },
-
- #[serde(rename = "transport:packet_buffered")]
- PacketBuffered {
- header: Option<PacketHeader>,
-
- raw: Option<RawInfo>,
- datagram_id: Option<u32>,
- },
-
- #[serde(rename = "transport:version_information")]
- PacketsAcked {
- packet_number_space: Option<PacketNumberSpace>,
- packet_numbers: Option<Vec<u64>>,
- },
-
- #[serde(rename = "transport:stream_state_updated")]
- StreamStateUpdated {
- stream_id: u64,
- stream_type: Option<StreamType>,
-
- old: Option<StreamState>,
- new: StreamState,
-
- stream_side: Option<StreamSide>,
- },
-
- #[serde(rename = "transport:frames_processed")]
- FramesProcessed {
- frames: Vec<QuicFrame>,
-
- packet_number: Option<u64>,
- },
-
- #[serde(rename = "transport:data_moved")]
- DataMoved {
- stream_id: Option<u64>,
- offset: Option<u64>,
- length: Option<u64>,
-
- from: Option<DataRecipient>,
- to: Option<DataRecipient>,
-
- data: Option<Bytes>,
- },
-
- // ================================================================== //
- // RECOVERY
- #[serde(rename = "recovery:parameters_set")]
- RecoveryParametersSet {
- reordering_threshold: Option<u16>,
- time_threshold: Option<f32>,
- timer_granularity: Option<u16>,
- initial_rtt: Option<f32>,
-
- max_datagram_size: Option<u32>,
- initial_congestion_window: Option<u64>,
- minimum_congestion_window: Option<u32>,
- loss_reduction_factor: Option<f32>,
- persistent_congestion_threshold: Option<u16>,
- },
-
- #[serde(rename = "recovery:metrics_updated")]
- MetricsUpdated {
- min_rtt: Option<f32>,
- smoothed_rtt: Option<f32>,
- latest_rtt: Option<f32>,
- rtt_variance: Option<f32>,
-
- pto_count: Option<u16>,
-
- congestion_window: Option<u64>,
- bytes_in_flight: Option<u64>,
-
- ssthresh: Option<u64>,
-
- // qlog defined
- packets_in_flight: Option<u64>,
-
- pacing_rate: Option<u64>,
- },
-
- #[serde(rename = "recovery:congestion_state_updated")]
- CongestionStateUpdated { old: Option<String>, new: String },
-
- #[serde(rename = "recovery:loss_timer_updated")]
- LossTimerUpdated {
- timer_type: Option<TimerType>,
- packet_number_space: Option<PacketNumberSpace>,
-
- event_type: LossTimerEventType,
-
- delta: Option<f32>,
- },
-
- #[serde(rename = "recovery:packet_lost")]
- PacketLost {
- header: Option<PacketHeader>,
-
- frames: Option<Vec<QuicFrame>>,
- },
-
- #[serde(rename = "recovery:marked_for_retransmit")]
- MarkedForRetransmit { frames: Vec<QuicFrame> },
-
- // ================================================================== //
- // HTTP/3
- #[serde(rename = "http:parameters_set")]
- H3ParametersSet {
- owner: Option<H3Owner>,
-
- max_header_list_size: Option<u64>,
- max_table_capacity: Option<u64>,
- blocked_streams_count: Option<u64>,
-
- // qlog-defined
- waits_for_settings: Option<bool>,
- },
-
- #[serde(rename = "http:parameters_restored")]
- H3ParametersRestored {
- max_header_list_size: Option<u64>,
- max_table_capacity: Option<u64>,
- blocked_streams_count: Option<u64>,
- },
-
- #[serde(rename = "http:stream_type_set")]
- H3StreamTypeSet {
- stream_id: u64,
- owner: Option<H3Owner>,
-
- old: Option<H3StreamType>,
- new: H3StreamType,
-
- associated_push_id: Option<u64>,
- },
-
- #[serde(rename = "http:frame_created")]
- H3FrameCreated {
- stream_id: u64,
- length: Option<u64>,
- frame: Http3Frame,
-
- raw: Option<RawInfo>,
- },
-
- #[serde(rename = "http:frame_parsed")]
- H3FrameParsed {
- stream_id: u64,
- length: Option<u64>,
- frame: Http3Frame,
-
- raw: Option<RawInfo>,
- },
-
- #[serde(rename = "http:push_resolved")]
- H3PushResolved {
- push_id: Option<u64>,
- stream_id: Option<u64>,
-
- decision: Option<H3PushDecision>,
- },
-
- // ================================================================== //
- // QPACK
- #[serde(rename = "qpack:state_updated")]
- QpackStateUpdated {
- owner: Option<QpackOwner>,
-
- dynamic_table_capacity: Option<u64>,
- dynamic_table_size: Option<u64>,
-
- known_received_count: Option<u64>,
- current_insert_count: Option<u64>,
- },
-
- #[serde(rename = "qpack:stream_state_updated")]
- QpackStreamStateUpdated {
- stream_id: u64,
-
- state: QpackStreamState,
- },
-
- #[serde(rename = "qpack:dynamic_table_updated")]
- QpackDynamicTableUpdated {
- update_type: QpackUpdateType,
-
- entries: Vec<QpackDynamicTableEntry>,
- },
-
- #[serde(rename = "qpack:headers_encoded")]
- QpackHeadersEncoded {
- stream_id: Option<u64>,
-
- headers: Option<HttpHeader>,
-
- block_prefix: QpackHeaderBlockPrefix,
- header_block: Vec<QpackHeaderBlockRepresentation>,
-
- length: Option<u32>,
- raw: Option<Bytes>,
- },
-
- #[serde(rename = "qpack:headers_decoded")]
- QpackHeadersDecoded {
- stream_id: Option<u64>,
-
- headers: Option<HttpHeader>,
-
- block_prefix: QpackHeaderBlockPrefix,
- header_block: Vec<QpackHeaderBlockRepresentation>,
-
- length: Option<u32>,
- raw: Option<Bytes>,
- },
-
- #[serde(rename = "qpack:instruction_created")]
- QpackInstructionCreated {
- instruction: QPackInstruction,
-
- length: Option<u32>,
- raw: Option<Bytes>,
- },
-
- #[serde(rename = "qpack:instruction_parsed")]
- QpackInstructionParsed {
- instruction: QPackInstruction,
-
- length: Option<u32>,
- raw: Option<Bytes>,
- },
-
- // ================================================================== //
- // Generic
- #[serde(rename = "generic:connection_error")]
- ConnectionError {
- code: Option<ConnectionErrorCode>,
- description: Option<String>,
- },
-
- #[serde(rename = "generic:application_error")]
- ApplicationError {
- code: Option<ApplicationErrorCode>,
- description: Option<String>,
- },
-
- #[serde(rename = "generic:internal_error")]
- InternalError {
- code: Option<u64>,
- description: Option<String>,
- },
-
- #[serde(rename = "generic:internal_warning")]
- InternalWarning {
- code: Option<u64>,
- description: Option<String>,
- },
-
- #[serde(rename = "generic:message")]
- Message { message: String },
-
- #[serde(rename = "generic:marker")]
- Marker {
- marker_type: String,
- message: Option<String>,
- },
-}
-
-impl EventData {
- /// Returns size of `EventData` array of `QuicFrame`s if it exists.
- pub fn contains_quic_frames(&self) -> Option<usize> {
- // For some EventData variants, the frame array is optional
- // but for others it is mandatory.
- match self {
- EventData::PacketSent { frames, .. } |
- EventData::PacketReceived { frames, .. } |
- EventData::PacketLost { frames, .. } =>
- frames.as_ref().map(|f| f.len()),
-
- EventData::MarkedForRetransmit { frames } |
- EventData::FramesProcessed { frames, .. } => Some(frames.len()),
-
- _ => None,
- }
- }
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum PacketType {
- Initial,
- Handshake,
-
- #[serde(rename = "0RTT")]
- ZeroRtt,
-
- #[serde(rename = "1RTT")]
- OneRtt,
-
- Retry,
- VersionNegotiation,
- Unknown,
-}
-
-#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum Http3EventType {
- ParametersSet,
- ParametersRestored,
- StreamTypeSet,
- FrameCreated,
- FrameParsed,
- PushResolved,
-}
-
-#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum QpackEventType {
- StateUpdated,
- StreamStateUpdated,
- DynamicTableUpdated,
- HeadersEncoded,
- HeadersDecoded,
- InstructionCreated,
- InstructionParsed,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum QuicFrameTypeName {
- Padding,
- Ping,
- Ack,
- ResetStream,
- StopSending,
- Crypto,
- NewToken,
- Stream,
- MaxData,
- MaxStreamData,
- MaxStreams,
- DataBlocked,
- StreamDataBlocked,
- StreamsBlocked,
- NewConnectionId,
- RetireConnectionId,
- PathChallenge,
- PathResponse,
- ConnectionClose,
- ApplicationClose,
- HandshakeDone,
- Datagram,
- Unknown,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum TokenType {
- Retry,
- Resumption,
- StatelessReset,
-}
-
-#[serde_with::skip_serializing_none]
-#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
-pub struct Token {
- #[serde(rename(serialize = "type"))]
- pub ty: Option<TokenType>,
-
- pub length: Option<u32>,
- pub data: Option<Bytes>,
-
- pub details: Option<String>,
-}
-
-// TODO: search for pub enum Error { to see how best to encode errors in qlog.
-#[serde_with::skip_serializing_none]
-#[derive(Clone, Serialize, Deserialize, PartialEq, Debug)]
-pub struct PacketHeader {
- pub packet_type: PacketType,
- pub packet_number: u64,
-
- pub flags: Option<u8>,
- pub token: Option<Token>,
-
- pub length: Option<u16>,
-
- pub version: Option<Bytes>,
-
- pub scil: Option<u8>,
- pub dcil: Option<u8>,
- pub scid: Option<Bytes>,
- pub dcid: Option<Bytes>,
-}
-
-impl PacketHeader {
- #[allow(clippy::too_many_arguments)]
- /// Creates a new PacketHeader.
- pub fn new(
- packet_type: PacketType, packet_number: u64, flags: Option<u8>,
- token: Option<Token>, length: Option<u16>, version: Option<u32>,
- scid: Option<&[u8]>, dcid: Option<&[u8]>,
- ) -> Self {
- let (scil, scid) = match scid {
- Some(cid) => (
- Some(cid.len() as u8),
- Some(format!("{}", HexSlice::new(&cid))),
- ),
-
- None => (None, None),
- };
-
- let (dcil, dcid) = match dcid {
- Some(cid) => (
- Some(cid.len() as u8),
- Some(format!("{}", HexSlice::new(&cid))),
- ),
-
- None => (None, None),
- };
-
- let version = version.map(|v| format!("{:x?}", v));
-
- PacketHeader {
- packet_type,
- packet_number,
- flags,
- token,
- length,
- version,
- scil,
- dcil,
- scid,
- dcid,
- }
- }
-
- /// Creates a new PacketHeader.
- ///
- /// Once a QUIC connection has formed, version, dcid and scid are stable, so
- /// there are space benefits to not logging them in every packet, especially
- /// PacketType::OneRtt.
- pub fn with_type(
- ty: PacketType, packet_number: u64, version: Option<u32>,
- scid: Option<&[u8]>, dcid: Option<&[u8]>,
- ) -> Self {
- match ty {
- PacketType::OneRtt => PacketHeader::new(
- ty,
- packet_number,
- None,
- None,
- None,
- None,
- None,
- None,
- ),
-
- _ => PacketHeader::new(
- ty,
- packet_number,
- None,
- None,
- None,
- version,
- scid,
- dcid,
- ),
- }
- }
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum StreamType {
- Bidirectional,
- Unidirectional,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum ErrorSpace {
- TransportError,
- ApplicationError,
-}
-
-#[derive(Serialize, Deserialize, Clone, Copy, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum GenericEventType {
- ConnectionError,
- ApplicationError,
- InternalError,
- InternalWarning,
-
- Message,
- Marker,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(untagged)]
-pub enum ConnectionErrorCode {
- TransportError(TransportError),
- CryptoError(CryptoError),
- Value(u64),
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(untagged)]
-pub enum ApplicationErrorCode {
- ApplicationError(ApplicationError),
- Value(u64),
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum TransportError {
- NoError,
- InternalError,
- ServerBusy,
- FlowControlError,
- StreamLimitError,
- StreamStateError,
- FinalSizeError,
- FrameEncodingError,
- TransportParameterError,
- ProtocolViolation,
- InvalidMigration,
- CryptoBufferExceeded,
- Unknown,
-}
-
-// TODO
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum CryptoError {
- Prefix,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum ApplicationError {
- HttpNoError,
- HttpGeneralProtocolError,
- HttpInternalError,
- HttpRequestCancelled,
- HttpIncompleteRequest,
- HttpConnectError,
- HttpFrameError,
- HttpExcessiveLoad,
- HttpVersionFallback,
- HttpIdError,
- HttpStreamCreationError,
- HttpClosedCriticalStream,
- HttpEarlyResponse,
- HttpMissingSettings,
- HttpUnexpectedFrame,
- HttpRequestRejection,
- HttpSettingsError,
- Unknown,
-}
-
-#[serde_with::skip_serializing_none]
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(tag = "frame_type")]
-#[serde(rename_all = "snake_case")]
-// Strictly, the qlog spec says that all these frame types have a frame_type
-// field. But instead of making that a rust object property, just use serde to
-// ensure it goes out on the wire. This means that deserialization of frames
-// also works automatically.
-pub enum QuicFrame {
- Padding,
-
- Ping,
-
- Ack {
- ack_delay: Option<f32>,
- acked_ranges: Option<Vec<(u64, u64)>>,
-
- ect1: Option<u64>,
-
- ect0: Option<u64>,
-
- ce: Option<u64>,
- },
-
- ResetStream {
- stream_id: u64,
- error_code: u64,
- final_size: u64,
- },
-
- StopSending {
- stream_id: u64,
- error_code: u64,
- },
-
- Crypto {
- offset: u64,
- length: u64,
- },
-
- NewToken {
- length: String,
- token: String,
- },
-
- Stream {
- stream_id: u64,
- offset: u64,
- length: u64,
- fin: bool,
-
- raw: Option<Bytes>,
- },
-
- MaxData {
- maximum: u64,
- },
-
- MaxStreamData {
- stream_id: u64,
- maximum: u64,
- },
-
- MaxStreams {
- stream_type: StreamType,
- maximum: u64,
- },
-
- DataBlocked {
- limit: u64,
- },
-
- StreamDataBlocked {
- stream_id: u64,
- limit: u64,
- },
-
- StreamsBlocked {
- stream_type: StreamType,
- limit: u64,
- },
-
- NewConnectionId {
- sequence_number: u32,
- retire_prior_to: u32,
- length: u64,
- connection_id: String,
- reset_token: String,
- },
-
- RetireConnectionId {
- sequence_number: u32,
- },
-
- PathChallenge {
- data: Option<Bytes>,
- },
-
- PathResponse {
- data: Option<Bytes>,
- },
-
- ConnectionClose {
- error_space: ErrorSpace,
- error_code: u64,
- raw_error_code: Option<u64>,
- reason: Option<String>,
-
- trigger_frame_type: Option<u64>,
- },
-
- HandshakeDone,
-
- Datagram {
- length: u64,
-
- raw: Option<Bytes>,
- },
-
- Unknown {
- raw_frame_type: u64,
- },
-}
-
-// ================================================================== //
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum Http3FrameTypeName {
- Data,
- Headers,
- CancelPush,
- Settings,
- PushPromise,
- Goaway,
- MaxPushId,
- DuplicatePush,
- Reserved,
- Unknown,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-pub struct HttpHeader {
- pub name: String,
- pub value: String,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-pub struct Setting {
- pub name: String,
- pub value: String,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-pub enum Http3Frame {
- Data {
- frame_type: Http3FrameTypeName,
-
- raw: Option<Bytes>,
- },
-
- Headers {
- frame_type: Http3FrameTypeName,
- headers: Vec<HttpHeader>,
- },
-
- CancelPush {
- frame_type: Http3FrameTypeName,
- push_id: String,
- },
-
- Settings {
- frame_type: Http3FrameTypeName,
- settings: Vec<Setting>,
- },
-
- PushPromise {
- frame_type: Http3FrameTypeName,
- push_id: String,
- headers: Vec<HttpHeader>,
- },
-
- Goaway {
- frame_type: Http3FrameTypeName,
- stream_id: String,
- },
-
- MaxPushId {
- frame_type: Http3FrameTypeName,
- push_id: String,
- },
-
- DuplicatePush {
- frame_type: Http3FrameTypeName,
- push_id: String,
- },
-
- Reserved {
- frame_type: Http3FrameTypeName,
- },
-
- Unknown {
- frame_type: Http3FrameTypeName,
- },
-}
-
-impl Http3Frame {
- pub fn data(raw: Option<Bytes>) -> Self {
- Http3Frame::Data {
- frame_type: Http3FrameTypeName::Data,
- raw,
- }
- }
-
- pub fn headers(headers: Vec<HttpHeader>) -> Self {
- Http3Frame::Headers {
- frame_type: Http3FrameTypeName::Headers,
- headers,
- }
- }
-
- pub fn cancel_push(push_id: String) -> Self {
- Http3Frame::CancelPush {
- frame_type: Http3FrameTypeName::CancelPush,
- push_id,
- }
- }
-
- pub fn settings(settings: Vec<Setting>) -> Self {
- Http3Frame::Settings {
- frame_type: Http3FrameTypeName::Settings,
- settings,
- }
- }
-
- pub fn push_promise(push_id: String, headers: Vec<HttpHeader>) -> Self {
- Http3Frame::PushPromise {
- frame_type: Http3FrameTypeName::PushPromise,
- push_id,
- headers,
- }
- }
-
- pub fn goaway(stream_id: String) -> Self {
- Http3Frame::Goaway {
- frame_type: Http3FrameTypeName::Goaway,
- stream_id,
- }
- }
-
- pub fn max_push_id(push_id: String) -> Self {
- Http3Frame::MaxPushId {
- frame_type: Http3FrameTypeName::MaxPushId,
- push_id,
- }
- }
-
- pub fn duplicate_push(push_id: String) -> Self {
- Http3Frame::DuplicatePush {
- frame_type: Http3FrameTypeName::DuplicatePush,
- push_id,
- }
- }
-
- pub fn reserved() -> Self {
- Http3Frame::Reserved {
- frame_type: Http3FrameTypeName::Reserved,
- }
- }
-
- pub fn unknown() -> Self {
- Http3Frame::Unknown {
- frame_type: Http3FrameTypeName::Unknown,
- }
- }
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum QpackInstructionTypeName {
- SetDynamicTableCapacityInstruction,
- InsertWithNameReferenceInstruction,
- InsertWithoutNameReferenceInstruction,
- DuplicateInstruction,
- HeaderAcknowledgementInstruction,
- StreamCancellationInstruction,
- InsertCountIncrementInstruction,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum QpackTableType {
- Static,
- Dynamic,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-pub enum QPackInstruction {
- SetDynamicTableCapacityInstruction {
- instruction_type: QpackInstructionTypeName,
-
- capacity: u64,
- },
-
- InsertWithNameReferenceInstruction {
- instruction_type: QpackInstructionTypeName,
-
- table_type: QpackTableType,
-
- name_index: u64,
-
- huffman_encoded_value: bool,
- value_length: u64,
- value: String,
- },
-
- InsertWithoutNameReferenceInstruction {
- instruction_type: QpackInstructionTypeName,
-
- huffman_encoded_name: bool,
- name_length: u64,
- name: String,
-
- huffman_encoded_value: bool,
- value_length: u64,
- value: String,
- },
-
- DuplicateInstruction {
- instruction_type: QpackInstructionTypeName,
-
- index: u64,
- },
-
- HeaderAcknowledgementInstruction {
- instruction_type: QpackInstructionTypeName,
-
- stream_id: String,
- },
-
- StreamCancellationInstruction {
- instruction_type: QpackInstructionTypeName,
-
- stream_id: String,
- },
-
- InsertCountIncrementInstruction {
- instruction_type: QpackInstructionTypeName,
-
- increment: u64,
- },
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-#[serde(rename_all = "snake_case")]
-pub enum QpackHeaderBlockRepresentationTypeName {
- IndexedHeaderField,
- LiteralHeaderFieldWithName,
- LiteralHeaderFieldWithoutName,
-}
-
-#[derive(Serialize, Deserialize, Clone, PartialEq, Debug)]
-pub enum QpackHeaderBlockRepresentation {
- IndexedHeaderField {
- header_field_type: QpackHeaderBlockRepresentationTypeName,
-
- table_type: QpackTableType,
- index: u64,
-
- is_post_base: Option<bool>,
- },
-
- LiteralHeaderFieldWithName {
- header_field_type: QpackHeaderBlockRepresentationTypeName,
-
- preserve_literal: bool,
- table_type: QpackTableType,
- name_index: u64,
-
- huffman_encoded_value: bool,
- value_length: u64,
- value: String,
-
- is_post_base: Option<bool>,
- },
-
- LiteralHeaderFieldWithoutName {
- header_field_type: QpackHeaderBlockRepresentationTypeName,
-
- preserve_literal: bool,
- table_type: QpackTableType,
- name_index: u64,
-
- huffman_encoded_name: bool,
- name_length: u64,
- name: String,
-
- huffman_encoded_value: bool,
- value_length: u64,
- value: String,
-
- is_post_base: Option<bool>,
- },
-}
-
-pub struct HexSlice<'a>(&'a [u8]);
-
-impl<'a> HexSlice<'a> {
- pub fn new<T>(data: &'a T) -> HexSlice<'a>
- where
- T: ?Sized + AsRef<[u8]> + 'a,
- {
- HexSlice(data.as_ref())
- }
-
- pub fn maybe_string<T>(data: Option<&'a T>) -> Option<String>
- where
- T: ?Sized + AsRef<[u8]> + 'a,
- {
- data.map(|d| format!("{}", HexSlice::new(d)))
- }
-}
-
-impl<'a> std::fmt::Display for HexSlice<'a> {
- fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
- for byte in self.0 {
- write!(f, "{:02x}", byte)?;
- }
- Ok(())
- }
-}
-
-#[doc(hidden)]
-pub mod testing {
- use super::*;
-
- pub fn make_pkt_hdr(packet_type: PacketType) -> PacketHeader {
- let scid = [0x7e, 0x37, 0xe4, 0xdc, 0xc6, 0x68, 0x2d, 0xa8];
- let dcid = [0x36, 0xce, 0x10, 0x4e, 0xee, 0x50, 0x10, 0x1c];
-
- // Some(1251),
- // Some(1224),
-
- PacketHeader::new(
- packet_type,
- 0,
- None,
- None,
- None,
- Some(0x0000_0001),
- Some(&scid),
- Some(&dcid),
- )
- }
-
- pub fn make_trace() -> Trace {
- Trace::new(
- VantagePoint {
- name: None,
- ty: VantagePointType::Server,
- flow: None,
- },
- Some("Quiche qlog trace".to_string()),
- Some("Quiche qlog trace description".to_string()),
- Some(Configuration {
- time_offset: Some(0.0),
- original_uris: None,
- }),
- None,
- )
- }
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use testing::*;
-
- #[test]
- fn packet_header() {
- let pkt_hdr = make_pkt_hdr(PacketType::Initial);
-
- let log_string = r#"{
- "packet_type": "initial",
- "packet_number": 0,
- "version": "1",
- "scil": 8,
- "dcil": 8,
- "scid": "7e37e4dcc6682da8",
- "dcid": "36ce104eee50101c"
-}"#;
-
- assert_eq!(serde_json::to_string_pretty(&pkt_hdr).unwrap(), log_string);
- }
-
- #[test]
- fn packet_sent_event_no_frames() {
- let log_string = r#"{
- "time": 0.0,
- "name": "transport:packet_sent",
- "data": {
- "header": {
- "packet_type": "initial",
- "packet_number": 0,
- "version": "1",
- "scil": 8,
- "dcil": 8,
- "scid": "7e37e4dcc6682da8",
- "dcid": "36ce104eee50101c"
- },
- "raw": {
- "length": 1251,
- "payload_length": 1224
- }
- }
-}"#;
-
- let pkt_hdr = make_pkt_hdr(PacketType::Initial);
- let ev_data = EventData::PacketSent {
- header: pkt_hdr.clone(),
- frames: None,
- is_coalesced: None,
- retry_token: None,
- stateless_reset_token: None,
- supported_versions: None,
- raw: Some(RawInfo {
- length: Some(1251),
- payload_length: Some(1224),
- data: None,
- }),
- datagram_id: None,
- };
-
- let ev = Event::with_time(0.0, ev_data);
-
- assert_eq!(serde_json::to_string_pretty(&ev).unwrap(), log_string);
- }
-
- #[test]
- fn packet_sent_event_some_frames() {
- let log_string = r#"{
- "time": 0.0,
- "name": "transport:packet_sent",
- "data": {
- "header": {
- "packet_type": "initial",
- "packet_number": 0,
- "version": "1",
- "scil": 8,
- "dcil": 8,
- "scid": "7e37e4dcc6682da8",
- "dcid": "36ce104eee50101c"
- },
- "raw": {
- "length": 1251,
- "payload_length": 1224
- },
- "frames": [
- {
- "frame_type": "padding"
- },
- {
- "frame_type": "ping"
- },
- {
- "frame_type": "stream",
- "stream_id": 0,
- "offset": 0,
- "length": 100,
- "fin": true
- }
- ]
- }
-}"#;
-
- let pkt_hdr = make_pkt_hdr(PacketType::Initial);
-
- let mut frames = Vec::new();
- frames.push(QuicFrame::Padding);
- frames.push(QuicFrame::Ping);
- frames.push(QuicFrame::Stream {
- stream_id: 0,
- offset: 0,
- length: 100,
- fin: true,
- raw: None,
- });
-
- let ev_data = EventData::PacketSent {
- header: pkt_hdr.clone(),
- frames: Some(frames),
- is_coalesced: None,
- retry_token: None,
- stateless_reset_token: None,
- supported_versions: None,
- raw: Some(RawInfo {
- length: Some(1251),
- payload_length: Some(1224),
- data: None,
- }),
- datagram_id: None,
- };
-
- let ev = Event::with_time(0.0, ev_data);
- assert_eq!(serde_json::to_string_pretty(&ev).unwrap(), log_string);
- }
-
- #[test]
- fn trace_no_events() {
- let log_string = r#"{
- "vantage_point": {
- "type": "server"
- },
- "title": "Quiche qlog trace",
- "description": "Quiche qlog trace description",
- "configuration": {
- "time_offset": 0.0
- },
- "events": []
-}"#;
-
- let trace = make_trace();
-
- let serialized = serde_json::to_string_pretty(&trace).unwrap();
- assert_eq!(serialized, log_string);
-
- let deserialized: Trace = serde_json::from_str(&serialized).unwrap();
- assert_eq!(deserialized, trace);
- }
-
- #[test]
- fn trace_single_transport_event() {
- let log_string = r#"{
- "vantage_point": {
- "type": "server"
- },
- "title": "Quiche qlog trace",
- "description": "Quiche qlog trace description",
- "configuration": {
- "time_offset": 0.0
- },
- "events": [
- {
- "time": 0.0,
- "name": "transport:packet_sent",
- "data": {
- "header": {
- "packet_type": "initial",
- "packet_number": 0,
- "version": "1",
- "scil": 8,
- "dcil": 8,
- "scid": "7e37e4dcc6682da8",
- "dcid": "36ce104eee50101c"
- },
- "raw": {
- "length": 1251,
- "payload_length": 1224
- },
- "frames": [
- {
- "frame_type": "stream",
- "stream_id": 0,
- "offset": 0,
- "length": 100,
- "fin": true
- }
- ]
- }
- }
- ]
-}"#;
-
- let mut trace = make_trace();
-
- let pkt_hdr = make_pkt_hdr(PacketType::Initial);
-
- let frames = vec![QuicFrame::Stream {
- stream_id: 0,
- offset: 0,
- length: 100,
- fin: true,
- raw: None,
- }];
- let event_data = EventData::PacketSent {
- header: pkt_hdr,
- frames: Some(frames),
- is_coalesced: None,
- retry_token: None,
- stateless_reset_token: None,
- supported_versions: None,
- raw: Some(RawInfo {
- length: Some(1251),
- payload_length: Some(1224),
- data: None,
- }),
- datagram_id: None,
- };
-
- let ev = Event::with_time(0.0, event_data);
-
- trace.push_event(ev);
-
- let serialized = serde_json::to_string_pretty(&trace).unwrap();
- assert_eq!(serialized, log_string);
-
- let deserialized: Trace = serde_json::from_str(&serialized).unwrap();
- assert_eq!(deserialized, trace);
- }
-
- #[test]
- fn serialization_states() {
- let v: Vec<u8> = Vec::new();
- let buff = std::io::Cursor::new(v);
- let writer = Box::new(buff);
-
- let mut trace = make_trace();
- let pkt_hdr = make_pkt_hdr(PacketType::Handshake);
- let raw = Some(RawInfo {
- length: Some(1251),
- payload_length: Some(1224),
- data: None,
- });
-
- let frame1 = QuicFrame::Stream {
- stream_id: 40,
- offset: 40,
- length: 400,
- fin: true,
- raw: None,
- };
-
- let event_data1 = EventData::PacketSent {
- header: pkt_hdr.clone(),
- frames: Some(vec![frame1]),
- is_coalesced: None,
- retry_token: None,
- stateless_reset_token: None,
- supported_versions: None,
- raw: raw.clone(),
- datagram_id: None,
- };
-
- let event1 = Event::with_time(0.0, event_data1);
-
- trace.push_event(event1);
-
- let frame2 = QuicFrame::Stream {
- stream_id: 0,
- offset: 0,
- length: 100,
- fin: true,
- raw: None,
- };
-
- let frame3 = QuicFrame::Stream {
- stream_id: 0,
- offset: 0,
- length: 100,
- fin: true,
- raw: None,
- };
-
- let event_data2 = EventData::PacketSent {
- header: pkt_hdr.clone(),
- frames: Some(vec![]),
- is_coalesced: None,
- retry_token: None,
- stateless_reset_token: None,
- supported_versions: None,
- raw: raw.clone(),
- datagram_id: None,
- };
-
- let event2 = Event::with_time(0.0, event_data2);
-
- let event_data3 = EventData::PacketSent {
- header: pkt_hdr,
- frames: Some(vec![]),
- is_coalesced: None,
- retry_token: None,
- stateless_reset_token: Some("reset_token".to_string()),
- supported_versions: None,
- raw: raw.clone(),
- datagram_id: None,
- };
-
- let event3 = Event::with_time(0.0, event_data3);
-
- let mut s = QlogStreamer::new(
- "version".to_string(),
- Some("title".to_string()),
- Some("description".to_string()),
- None,
- std::time::Instant::now(),
- trace,
- EventImportance::Base,
- writer,
- );
-
- // Before the log is started all other operations should fail.
- assert!(match s.add_event(event2.clone()) {
- Err(Error::InvalidState) => true,
- _ => false,
- });
- assert!(match s.add_frame(frame2.clone(), false) {
- Err(Error::InvalidState) => true,
- _ => false,
- });
- assert!(match s.finish_frames() {
- Err(Error::InvalidState) => true,
- _ => false,
- });
- assert!(match s.finish_log() {
- Err(Error::InvalidState) => true,
- _ => false,
- });
-
- // Once a log is started, can't write frames before an event.
- assert!(match s.start_log() {
- Ok(()) => true,
- _ => false,
- });
- assert!(match s.add_frame(frame2.clone(), true) {
- Err(Error::InvalidState) => true,
- _ => false,
- });
- assert!(match s.finish_frames() {
- Err(Error::InvalidState) => true,
- _ => false,
- });
-
- // Some events hold frames; can't write any more events until frame
- // writing is concluded.
- assert!(match s.add_event(event2.clone()) {
- Ok(true) => true,
- _ => false,
- });
- assert!(match s.add_event(event2.clone()) {
- Err(Error::InvalidState) => true,
- _ => false,
- });
-
- // While writing frames, can't write events.
- assert!(match s.add_frame(frame2.clone(), false) {
- Ok(()) => true,
- _ => false,
- });
-
- assert!(match s.add_event(event2.clone()) {
- Err(Error::InvalidState) => true,
- _ => false,
- });
- assert!(match s.finish_frames() {
- Ok(()) => true,
- _ => false,
- });
-
- // Adding an event that includes both frames and raw data should
- // be allowed.
- assert!(match s.add_event(event3.clone()) {
- Ok(true) => true,
- _ => false,
- });
- assert!(match s.add_frame(frame3.clone(), false) {
- Ok(()) => true,
- _ => false,
- });
- assert!(match s.finish_frames() {
- Ok(()) => true,
- _ => false,
- });
-
- // Adding an event with an external time should work too.
- // For tests, it will resolve to 0 but we care about proving the API
- // here, not timing specifics.
- let now = std::time::Instant::now();
-
- assert!(match s.add_event_with_instant(event3.clone(), now) {
- Ok(true) => true,
- _ => false,
- });
- assert!(match s.add_frame(frame3.clone(), false) {
- Ok(()) => true,
- _ => false,
- });
- assert!(match s.finish_frames() {
- Ok(()) => true,
- _ => false,
- });
-
- assert!(match s.finish_log() {
- Ok(()) => true,
- _ => false,
- });
-
- let r = s.writer();
- let w: &Box<std::io::Cursor<Vec<u8>>> = unsafe { std::mem::transmute(r) };
-
- let log_string = r#"{"qlog_version":"version","qlog_format":"JSON","title":"title","description":"description","traces":[{"vantage_point":{"type":"server"},"title":"Quiche qlog trace","description":"Quiche qlog trace description","configuration":{"time_offset":0.0},"events":[{"time":0.0,"name":"transport:packet_sent","data":{"header":{"packet_type":"handshake","packet_number":0,"version":"1","scil":8,"dcil":8,"scid":"7e37e4dcc6682da8","dcid":"36ce104eee50101c"},"raw":{"length":1251,"payload_length":1224},"frames":[{"frame_type":"stream","stream_id":40,"offset":40,"length":400,"fin":true}]}},{"time":0.0,"name":"transport:packet_sent","data":{"header":{"packet_type":"handshake","packet_number":0,"version":"1","scil":8,"dcil":8,"scid":"7e37e4dcc6682da8","dcid":"36ce104eee50101c"},"raw":{"length":1251,"payload_length":1224},"frames":[{"frame_type":"stream","stream_id":0,"offset":0,"length":100,"fin":true}]}},{"time":0.0,"name":"transport:packet_sent","data":{"header":{"packet_type":"handshake","packet_number":0,"version":"1","scil":8,"dcil":8,"scid":"7e37e4dcc6682da8","dcid":"36ce104eee50101c"},"stateless_reset_token":"reset_token","raw":{"length":1251,"payload_length":1224},"frames":[{"frame_type":"stream","stream_id":0,"offset":0,"length":100,"fin":true}]}},{"time":0.0,"name":"transport:packet_sent","data":{"header":{"packet_type":"handshake","packet_number":0,"version":"1","scil":8,"dcil":8,"scid":"7e37e4dcc6682da8","dcid":"36ce104eee50101c"},"stateless_reset_token":"reset_token","raw":{"length":1251,"payload_length":1224},"frames":[{"frame_type":"stream","stream_id":0,"offset":0,"length":100,"fin":true}]}}]}]}"#;
-
- let written_string = std::str::from_utf8(w.as_ref().get_ref()).unwrap();
-
- assert_eq!(log_string, written_string);
- }
-}
diff --git a/tools/release.sh b/tools/release.sh
index 2763b72..e315e3f 100755
--- a/tools/release.sh
+++ b/tools/release.sh
@@ -14,9 +14,9 @@
VERSION=$1
-cargo package
+cargo package --package quiche
-sed -i "0,/^version/ s/version = \"\(.*\)\"/version = \"$VERSION\"/" Cargo.toml
-git add Cargo.toml
+sed -i "0,/^version/ s/version = \"\(.*\)\"/version = \"$VERSION\"/" quiche/Cargo.toml
+git add quiche/Cargo.toml
git commit -m $VERSION
git tag -a $VERSION -m "quiche $VERSION" --sign