Merge tag '0.10.0' into upgrade
quiche 0.10.0
* tag '0.10.0': (262 commits)
0.10.0
qlog: update docs and bump to 0.5.0
recovery: introduce pacing rate multiplier
recovery: name time conversion constant
frame: parse ACKs with ECN counts
Revert "zero-copy emit scatter API"
avoid counting reset streams' unsent data towards flow control limit
add method to check if stream can hold more data
http3_test: silence unused field warnings
nginx: link to libm to fix build failure
deps: bump BoringSSL submodule
build: drop support for building with Android NDK < 19
recovery: hystart++ draft 03
zero-copy emit scatter API
h3: rename max_header_list to max_field_section
examples: fix invalid pointer bugs
allow to query if the connection was timed out
expose local_error to applications
add method to return the server name requested by the client
qlog: update to qlog 02, support event importance levels
...
Change-Id: I205aa6aa81542ef169b0194d12e50a9d3400295b
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..db8017c
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,55 @@
+version: 2
+updates:
+- package-ecosystem: cargo
+ directory: "/"
+ schedule:
+ interval: daily
+ time: "13:00"
+ open-pull-requests-limit: 10
+ ignore:
+ - dependency-name: env_logger
+ versions:
+ - ">= 0.7.a, < 0.8"
+ - dependency-name: mio
+ versions:
+ - ">= 0.7.a, < 0.8"
+ - dependency-name: ring
+ versions:
+ - ">= 0.15.a, < 0.16"
+ - dependency-name: ring
+ versions:
+ - ">= 0.16.a, < 0.17"
+ - dependency-name: url
+ versions:
+ - ">= 2.a, < 3"
+- package-ecosystem: cargo
+ directory: "/tools/apps"
+ schedule:
+ interval: daily
+ time: "13:00"
+ open-pull-requests-limit: 10
+ ignore:
+ - dependency-name: env_logger
+ versions:
+ - ">= 0.7.a, < 0.8"
+ - dependency-name: env_logger
+ versions:
+ - ">= 0.8.a, < 0.9"
+ - dependency-name: mio
+ versions:
+ - ">= 0.7.a, < 0.8"
+ - dependency-name: url
+ versions:
+ - ">= 2.a, < 3"
+- package-ecosystem: cargo
+ directory: "/tools/qlog"
+ schedule:
+ interval: daily
+ time: "13:00"
+ open-pull-requests-limit: 10
+- package-ecosystem: cargo
+ directory: "/fuzz"
+ schedule:
+ interval: daily
+ time: "13:00"
+ open-pull-requests-limit: 10
diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml
new file mode 100644
index 0000000..9364a36
--- /dev/null
+++ b/.github/workflows/deploy.yml
@@ -0,0 +1,57 @@
+on:
+ push:
+ branches:
+ - master
+
+name: Deploy
+
+jobs:
+ docs:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install stable toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: stable
+ override: true
+
+ - name: Run cargo doc
+ uses: actions-rs/cargo@v1
+ with:
+ command: doc
+ args: --no-deps
+
+ - name: Deploy to GitHub Pages
+ uses: crazy-max/ghaction-github-pages@v2
+ with:
+ target_branch: gh-pages
+ build_dir: target/doc
+ fqdn: docs.quic.tech
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+ docker:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Build Docker images
+ run: make docker-build
+
+ - name: Login to DockerHub
+ uses: docker/login-action@v1
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_TOKEN }}
+
+ - name: Publish Docker images
+ run: make docker-publish
diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml
new file mode 100644
index 0000000..68b19c6
--- /dev/null
+++ b/.github/workflows/nightly.yml
@@ -0,0 +1,196 @@
+on: [push, pull_request]
+
+name: Nightly
+
+env:
+ RUSTFLAGS: "-D warnings"
+ TOOLCHAIN: "nightly"
+
+jobs:
+ quiche:
+ runs-on: ubuntu-latest
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install nightly toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ env.TOOLCHAIN }}
+ components: rustfmt
+ override: true
+
+ - name: Run cargo fmt
+ uses: actions-rs/cargo@v1
+ with:
+ command: fmt
+ args: -- --check
+
+ - name: Install dependencies
+ run: sudo apt-get install libev-dev uthash-dev
+
+ - name: Run cargo test
+ uses: actions-rs/cargo@v1
+ with:
+ command: test
+ args: --tests --examples --verbose --features qlog
+
+ - name: Run cargo package
+ uses: actions-rs/cargo@v1
+ with:
+ command: package
+ args: --verbose --allow-dirty
+
+ - name: Run cargo doc
+ uses: actions-rs/cargo@v1
+ with:
+ command: doc
+ args: --no-deps
+
+ - name: Build C examples
+ run: make -C examples
+
+ apps:
+ runs-on: ubuntu-latest
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install nightly toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ env.TOOLCHAIN }}
+ components: rustfmt
+ override: true
+
+ - name: Run cargo build
+ uses: actions-rs/cargo@v1
+ with:
+ command: build
+ args: --verbose --manifest-path=tools/apps/Cargo.toml
+
+ - name: Run cargo fmt
+ uses: actions-rs/cargo@v1
+ with:
+ command: fmt
+ args: --manifest-path=tools/apps/Cargo.toml -- --check
+
+ fuzz:
+ runs-on: ubuntu-latest
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install nightly toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ env.TOOLCHAIN }}
+ components: rustfmt
+ override: true
+
+ - name: Install cargo-fuzz
+ run: cargo install cargo-fuzz
+
+ - name: Run cargo fuzz for packet_recv_client
+ uses: actions-rs/cargo@v1
+ with:
+ command: fuzz
+ args: run packet_recv_client -- -runs=1
+
+ - name: Run cargo fuzz for packet_recv_server
+ uses: actions-rs/cargo@v1
+ with:
+ command: fuzz
+ args: run packet_recv_server -- -runs=1
+
+ - name: Run cargo fuzz for qpack_decode
+ uses: actions-rs/cargo@v1
+ with:
+ command: fuzz
+ args: run qpack_decode -- -runs=1
+
+ - name: Run cargo fmt
+ uses: actions-rs/cargo@v1
+ with:
+ command: fmt
+ args: --manifest-path=fuzz/Cargo.toml -- --check
+
+ qlog:
+ runs-on: ubuntu-latest
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install nightly toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ env.TOOLCHAIN }}
+ components: rustfmt
+ override: true
+
+ - name: Run cargo test
+ uses: actions-rs/cargo@v1
+ with:
+ command: test
+ args: --verbose --manifest-path=tools/qlog/Cargo.toml
+
+ - name: Run cargo fmt
+ uses: actions-rs/cargo@v1
+ with:
+ command: fmt
+ args: --manifest-path=tools/qlog/Cargo.toml -- --check
+
+ http3_test:
+ runs-on: ubuntu-latest
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install nightly toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ env.TOOLCHAIN }}
+ components: rustfmt
+ override: true
+
+ - name: Run cargo test
+ uses: actions-rs/cargo@v1
+ with:
+ command: test
+ args: --no-run --verbose --manifest-path=tools/http3_test/Cargo.toml
+
+ - name: Run cargo fmt
+ uses: actions-rs/cargo@v1
+ with:
+ command: fmt
+ args: --manifest-path=tools/http3_test/Cargo.toml -- --check
diff --git a/.github/workflows/stable.yml b/.github/workflows/stable.yml
new file mode 100644
index 0000000..0429a74
--- /dev/null
+++ b/.github/workflows/stable.yml
@@ -0,0 +1,365 @@
+on: [push, pull_request]
+
+name: Stable
+
+env:
+ RUSTFLAGS: "-D warnings"
+ TOOLCHAIN: "stable"
+
+jobs:
+ quiche:
+ runs-on: ubuntu-latest
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install stable toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ env.TOOLCHAIN }}
+ components: clippy
+ override: true
+
+ - name: Install dependencies
+ run: sudo apt-get install libev-dev uthash-dev
+
+ - name: Run cargo test
+ uses: actions-rs/cargo@v1
+ with:
+ command: test
+ args: --tests --examples --verbose --features ffi,qlog
+
+ - name: Run cargo package
+ uses: actions-rs/cargo@v1
+ with:
+ command: package
+ args: --verbose --allow-dirty
+
+ - name: Run cargo clippy
+ uses: actions-rs/cargo@v1
+ with:
+ command: clippy
+ args: --examples -- -D warnings
+
+ - name: Run cargo doc
+ uses: actions-rs/cargo@v1
+ with:
+ command: doc
+ args: --no-deps
+
+ - name: Build C examples
+ run: make -C examples
+
+ quiche_macos:
+ runs-on: macos-latest
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install stable toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ env.TOOLCHAIN }}
+ override: true
+
+ - name: Run cargo test
+ uses: actions-rs/cargo@v1
+ with:
+ command: test
+ args: --tests --examples --verbose --features ffi,qlog
+
+ quiche_ios:
+ runs-on: macos-latest
+ strategy:
+ matrix:
+ target: ["x86_64-apple-ios", "aarch64-apple-ios"]
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install stable toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ env.TOOLCHAIN }}
+ target: ${{ matrix.target }}
+ override: true
+
+ - name: Run cargo build
+ uses: actions-rs/cargo@v1
+ with:
+ command: build
+ args: --target=${{ matrix.target }} --verbose
+
+ quiche_windows:
+ runs-on: windows-latest
+ strategy:
+ matrix:
+ target: ["x86_64-pc-windows-msvc", "i686-pc-windows-msvc"]
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install stable toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ env.TOOLCHAIN }}
+ target: ${{ matrix.target }}
+ override: true
+
+ - name: Install dependencies
+ uses: crazy-max/ghaction-chocolatey@v1
+ with:
+ args: install nasm
+
+ - name: Run cargo test
+ uses: actions-rs/cargo@v1
+ with:
+ command: test
+ args: --target=${{ matrix.target }} --tests --examples --verbose --features ffi,qlog
+
+ quiche_multiarch:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ target: ["aarch64-unknown-linux-gnu","armv7-unknown-linux-gnueabihf","i686-unknown-linux-gnu"]
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install stable toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ env.TOOLCHAIN }}
+ target: ${{ matrix.target }}
+ override: true
+
+ - name: Run cargo test
+ uses: actions-rs/cargo@v1
+ with:
+ command: test
+ args: --target=${{ matrix.target }} --tests --examples --verbose --features ffi,qlog
+ use-cross: true
+
+ apps:
+ runs-on: ubuntu-latest
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install stable toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ env.TOOLCHAIN }}
+ override: true
+
+ - name: Run cargo build
+ uses: actions-rs/cargo@v1
+ with:
+ command: build
+ args: --verbose --manifest-path=tools/apps/Cargo.toml
+
+ - name: Run cargo clippy
+ uses: actions-rs/cargo@v1
+ with:
+ command: clippy
+ args: --manifest-path=tools/apps/Cargo.toml -- -D warnings
+
+ qlog:
+ runs-on: ubuntu-latest
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install stable toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ env.TOOLCHAIN }}
+ override: true
+
+ - name: Run cargo test
+ uses: actions-rs/cargo@v1
+ with:
+ command: test
+ args: --verbose --manifest-path=tools/qlog/Cargo.toml
+
+ - name: Run cargo clippy
+ uses: actions-rs/cargo@v1
+ with:
+ command: clippy
+ args: --manifest-path=tools/qlog/Cargo.toml -- -D warnings
+
+ http3_test:
+ runs-on: ubuntu-latest
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install stable toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ env.TOOLCHAIN }}
+ override: true
+
+ - name: Run cargo test
+ uses: actions-rs/cargo@v1
+ with:
+ command: test
+ args: --no-run --verbose --manifest-path=tools/http3_test/Cargo.toml
+
+ - name: Run cargo clippy
+ uses: actions-rs/cargo@v1
+ with:
+ command: clippy
+ args: --manifest-path=tools/http3_test/Cargo.toml -- -D warnings
+
+ nginx:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ version: ["1.16.1"]
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install stable toolchain
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ env.TOOLCHAIN }}
+ override: true
+
+ - name: Install dependencies
+ run: sudo apt-get install libpcre3-dev zlib1g-dev
+
+ - name: Download NGINX sources
+ run: curl -O https://nginx.org/download/nginx-${{ matrix.version }}.tar.gz
+
+ - name: Extract NGINX sources
+ run: tar xzf nginx-${{ matrix.version }}.tar.gz
+
+ - name: Build NGINX
+ run: |
+ cd nginx-${{ matrix.version }} &&
+ patch -p01 < ../extras/nginx/nginx-1.16.patch &&
+ ./configure --with-http_ssl_module --with-http_v2_module --with-http_v3_module --with-openssl="../deps/boringssl" --with-quiche=".." --with-debug &&
+ make -j`nproc` &&
+ objs/nginx -V
+
+ docker:
+ runs-on: ubuntu-latest
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Build Docker images
+ run: make docker-build
+
+ android_ndk_lts:
+ runs-on: ubuntu-latest
+ env:
+ NDK_LTS_VER: "21"
+ strategy:
+ matrix:
+ target: ["aarch64-linux-android","armv7-linux-androideabi","x86_64-linux-android","i686-linux-android"]
+ include:
+ - target: "aarch64-linux-android"
+ arch: "arm64-v8a"
+ - target: "armv7-linux-androideabi"
+ arch: "armeabi-v7a"
+ - target: "x86_64-linux-android"
+ arch: "x86_64"
+ - target: "i686-linux-android"
+ arch: "x86"
+ # Only run on "pull_request" event for external PRs. This is to avoid
+ # duplicate builds for PRs created from internal branches.
+ if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v2
+ with:
+ submodules: 'recursive'
+
+ - name: Install stable toolchain for the target
+ uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: ${{ env.TOOLCHAIN }}
+ target: ${{ matrix.target }}
+ override: true
+
+ - name: Install cargo-ndk
+ uses: actions-rs/install@v0.1
+ with:
+ crate: cargo-ndk
+
+ - name: Download NDK
+ run: curl --http1.1 -O https://dl.google.com/android/repository/android-ndk-r${{ env.NDK_LTS_VER }}-linux-x86_64.zip
+
+ - name: Extract NDK
+ run: unzip -q android-ndk-r${{ env.NDK_LTS_VER }}-linux-x86_64.zip
+
+ - name: Run cargo ndk
+ uses: actions-rs/cargo@v1
+ with:
+ command: ndk
+ args: -t ${{ matrix.arch }} -p ${{ env.NDK_LTS_VER }} -- build --verbose --features ffi
+ env:
+ ANDROID_NDK_HOME: ${{ github.workspace }}/android-ndk-r${{ env.NDK_LTS_VER }}
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 636c574..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,177 +0,0 @@
-env:
- global:
- - secure: "Aa+7RLfRyfdH2ENH0fyjI7Waq7/9XSk1LP6sDwG0zG1uhmus/z0QaiaCd027mFY3V25Kzfd6TVzL92dp3U63+3Qn3hHle3oNdu1jqaSBHB1L7R+IS6tLW0Bny5Zd7mMRNbVO642qvmuZo8A5nuu/WHp3w91WI4rp1rhULm/uhQ/+7Sx7mDSR1vRJyIeB7mETzRpckz0cUiZU656AB1hRsMVzkm+no9urHcnEP6AcPukVlWGgsdcBloMoczwp8M/cnvrGg8SuroEnu82i3eY5TwmXBhtmFa06WUaiIafT8PcB+JVADwunS1nJ7tTGCjInVQY5qNogPJxdr97E9vuHJGMDpdCkDuvTFaF2FdnXuvli3nUr2w7mP3t991ocWQW9PMRUTJe+/pFJ935puy8zD797UKUR3d2GwZRHIpwbzmcP+QQ1wG3odoFk4i/tEUAHtKirh6VglQyp9BFIaIX7TuuqIyRqxcqHVsVfDVVouHBIYuy7uRDd7YjmoF3IqqIKsHEFBnMT9GSQeifjJAPYEOoPXfzoa1Ya8PbmSdYtVR9nyjIHHLv/xDYxG3ulBgmz1ieGe9pGNgqLmi6GSiyJ/Vj+5L+1bJGrVMDELHQR/bVdZMmoAp6y8f397hace2qHKDBVN4AVfDWeCiTs1NUtgrJVIpj3Xt788RrnIfvuhF4="
- - secure: "CE2MWJZ+EtmaOheaGBd5XNgV7A/AGLpQ1Lct2R4v8BG3pXFFAM9uJEvpyTXBFx40gzD8HTBHbNRw1Ae+hP/Hz3M8o96jrdVsPlojKaShaOqoalYCghnfxDSd6WsstJdTYUk1iKhU3bSin/PHhDHe5i/dpwTay4DqSCX4MWaJPpItRIwiPpd8ekVGrHb/DkuTpEWY35Dg+9oao9KnB5c/D1oHLvInRMCivtpPpKRxNWkznOGprQALGIPmnHupBQ0zu6H7+K7TE/DT/HmocQd1h6HEba6DGkKqXU79NgS3uz2EFfgdhnIvTlxWTZFtkET9lym5mqjyuvwkIDsEDfGmk7dGjT0MFe2/9RRMxeNjINA1dlat4riUZ+a5dWqj/7uSvcOgTv0lqT5qitHbtqI0Id/TMaZlVE9r1x+vSTy/7ISzwhVPOwFa6SZJFwrHVBqnPZKP8gajpCcDJMKdFjfuLNZMF/NlDhj78S9Hyr0CUUoCKcPYiCxyCPfV7LzNMtlaynKJG2A+z0vLqZ8YSlTrqBfjuVzrq8EDJyJWMRuik3rkNe/GOFzUgQ+nBxgOzPT0Y5CPlEQNYW991pRz2r5gXhR+MSpjF3ytSrMCYYfIwymwqxb9oX4EvPCHCrWCqT4lebQyfPO8Pp8tDeV3XeRBM5a6KjOD0WfZ5pRlZ/REWrM="
-
-addons:
- apt:
- packages: &linux_deps
- - libev-dev
- - uthash-dev
- - protobuf-compiler
-
-matrix:
- include:
- - name: "stable Linux x86_64/x86"
- language: rust
- rust: stable
- env:
- DEPLOY_BUILD=yes
- TARGET_32=i686-unknown-linux-gnu
- addons:
- apt:
- packages:
- - [*linux_deps, gcc-multilib, g++-multilib]
- install:
- - rustup component add clippy
- - rustup target add $TARGET_32
- script:
- - RUSTFLAGS="-D warnings" cargo test --verbose
- - RUSTFLAGS="-D warnings" cargo package --verbose --allow-dirty
- - cargo clippy --examples -- -D warnings
- - cargo doc --no-deps
- - make -C examples
- # http3_test
- - RUSTFLAGS="-D warnings" cargo test --no-run --verbose --manifest-path tools/http3_test/Cargo.toml
- - cargo clippy --manifest-path tools/http3_test/Cargo.toml -- -D warnings
- # qlog
- - RUSTFLAGS="-D warnings" cargo test --verbose --manifest-path tools/qlog/Cargo.toml
- - cargo clippy --manifest-path tools/qlog/Cargo.toml -- -D warnings
- # quiche-apps
- - RUSTFLAGS="-D warnings" cargo build --verbose --manifest-path tools/apps/Cargo.toml
- - cargo clippy --manifest-path tools/apps/Cargo.toml -- -D warnings
- # x86 cross build
- - RUSTFLAGS="-D warnings" cargo build --target=$TARGET_32
- # docker
- - make docker-build
- - name: "nightly Linux x86_64"
- language: rust
- rust: nightly
- addons:
- apt:
- packages:
- - [*linux_deps]
- install:
- - rustup component add rustfmt
- - cargo install cargo-fuzz
- script:
- - RUSTFLAGS="-D warnings" cargo test --verbose
- - RUSTFLAGS="-D warnings" cargo package --verbose --allow-dirty
- - cargo fmt -- --check
- - cargo doc --no-deps
- - make -C examples
- # fuzzers
- - RUSTFLAGS="-D warnings" cargo fuzz run packet_recv_client -- -runs=1
- - RUSTFLAGS="-D warnings" cargo fuzz run packet_recv_server -- -runs=1
- - RUSTFLAGS="-D warnings" cargo fuzz run qpack_decode -- -runs=1
- - cargo fmt --manifest-path fuzz/Cargo.toml -- --check
- # http3_test
- - RUSTFLAGS="-D warnings" cargo test --no-run --verbose --manifest-path tools/http3_test/Cargo.toml
- - cargo fmt --manifest-path tools/http3_test/Cargo.toml -- --check
- # qlog
- - RUSTFLAGS="-D warnings" cargo test --verbose --manifest-path tools/qlog/Cargo.toml
- - cargo fmt --manifest-path tools/qlog/Cargo.toml -- --check
- # quiche-apps
- - RUSTFLAGS="-D warnings" cargo build --verbose --manifest-path tools/apps/Cargo.toml
- - cargo fmt --manifest-path tools/apps/Cargo.toml -- --check
- - name: "stable macOS + iOS"
- language: rust
- rust: stable
- os: osx
- osx_image: xcode11.2
- install:
- - rustup target add aarch64-apple-ios x86_64-apple-ios
- script:
- # macOS
- - RUSTFLAGS="-D warnings" cargo build --verbose
- - RUSTFLAGS="-D warnings" cargo test --verbose
- # iOS
- - cargo install --force cargo-lipo
- - RUSTFLAGS="-D warnings" cargo lipo --verbose
- - name: "stable Windows x86_64/x86"
- language: rust
- rust: stable
- os: windows
- env:
- TARGET_64=x86_64-pc-windows-msvc
- TARGET_32=i686-pc-windows-msvc
- before_install:
- - choco install nasm
- # Update $PATH
- - export PATH="$(powershell -Command '("Process", "Machine" | % { [Environment]::GetEnvironmentVariable("PATH", $_) -Split ";" -Replace "\\$", "" } | Select -Unique | % { cygpath $_ }) -Join ":"')"
- install:
- - rustup target add $TARGET_32 $TARGET_64
- script:
- - RUSTFLAGS="-D warnings" cargo build --verbose --target=$TARGET_64
- - RUSTFLAGS="-D warnings" cargo test --verbose --target=$TARGET_64
- - RUSTFLAGS="-D warnings" cargo build --verbose --target=$TARGET_32
- - RUSTFLAGS="-D warnings" cargo test --verbose --target=$TARGET_32
- - name: "stable Android"
- language: rust
- rust: stable
- env:
- NDK_VER_OLD=r13b
- NDK_VER=r21
- install:
- - rustup target add aarch64-linux-android arm-linux-androideabi armv7-linux-androideabi i686-linux-android
- - cargo install cargo-ndk
- script:
- #
- # Old NDK. Here we use 13b
- #
- - NDK_URL=https://dl.google.com/android/repository/android-ndk-%s-linux-x86_64.zip
- - curl -ondk.zip -q $(printf $NDK_URL $NDK_VER_OLD)
- - unzip -q ndk.zip -d $HOME
- - export ANDROID_NDK_HOME=$HOME/android-ndk-$NDK_VER_OLD
- # Setup android toolchain
- - export TOOLCHAIN_DIR=$(pwd)/toolchain
- - mkdir -p $TOOLCHAIN_DIR
- - tools/android/setup_android.sh
- - tools/android/build_android.sh --verbose --features ndk-old-gcc
- - rm -fr $TOOLCHAIN_DIR && rm -f .cargo/config
- - cargo clean
- #
- # NDK 19 or higher. Here we use 21 (long term support)
- #
- - curl -ondk.zip -q $(printf $NDK_URL $NDK_VER)
- - unzip -q ndk.zip -d $HOME
- - export ANDROID_NDK_HOME=$HOME/android-ndk-$NDK_VER
- - tools/android/build_android_ndk19.sh --verbose
- - name: "NGINX"
- language: rust
- rust: stable
- env:
- NGINX_VER=1.16.1
- addons:
- apt:
- packages:
- - [*linux_deps]
- script:
- - curl -O https://nginx.org/download/nginx-$NGINX_VER.tar.gz
- - tar xzf nginx-$NGINX_VER.tar.gz
- - |
- cd nginx-$NGINX_VER &&
- patch -p01 < ../extras/nginx/nginx-1.16.patch &&
- ./configure --with-http_ssl_module --with-http_v2_module --with-http_v3_module --with-openssl="../deps/boringssl" --with-quiche=".." --with-debug &&
- make -j`nproc`
- - objs/nginx -V
-
-deploy:
- # publish docs
- - provider: pages
- fqdn: docs.quic.tech
- local-dir: target/doc
- skip-cleanup: true
- github-token: $GITHUB_TOKEN
- on:
- branch: master
- condition: $DEPLOY_BUILD = yes
- # publish Docker images
- - provider: script
- skip-cleanup: true
- script: >-
- echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin &&
- make docker-publish
- on:
- branch: master
- condition: $DEPLOY_BUILD = yes
diff --git a/BUILD.gn b/BUILD.gn
index f9d6869..28c2370 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -27,32 +27,33 @@
non_rust_deps = [ "//third_party/boringssl" ]
sources = [
+ "src/crypto.rs",
+ "src/dgram.rs",
+ "src/ffi.rs",
+ "src/frame.rs",
+ "src/h3/ffi.rs",
+ "src/h3/frame.rs",
+ "src/h3/mod.rs",
+ "src/h3/qpack/decoder.rs",
+ "src/h3/qpack/encoder.rs",
+ "src/h3/qpack/huffman/mod.rs",
+ "src/h3/qpack/huffman/table.rs",
+ "src/h3/qpack/mod.rs",
+ "src/h3/qpack/static_table.rs",
+ "src/h3/stream.rs",
+ "src/lib.rs",
+ "src/minmax.rs",
+ "src/octets.rs",
"src/packet.rs",
"src/rand.rs",
- "src/minmax.rs",
- "src/h3/qpack/huffman/table.rs",
- "src/stream.rs",
- "src/frame.rs",
- "src/h3/stream.rs",
- "src/recovery/reno.rs",
- "src/recovery/mod.rs",
- "src/recovery/cubic.rs",
- "src/octets.rs",
- "src/h3/qpack/encoder.rs",
- "src/h3/qpack/static_table.rs",
- "src/h3/qpack/mod.rs",
- "src/h3/ffi.rs",
- "src/tls.rs",
- "src/lib.rs",
- "src/h3/mod.rs",
- "src/recovery/hystart.rs",
- "src/h3/qpack/decoder.rs",
"src/ranges.rs",
- "src/crypto.rs",
- "src/h3/qpack/huffman/mod.rs",
- "src/ffi.rs",
- "src/dgram.rs",
- "src/h3/frame.rs",
+ "src/recovery/cubic.rs",
"src/recovery/delivery_rate.rs",
+ "src/recovery/hystart.rs",
+ "src/recovery/mod.rs",
+ "src/recovery/prr.rs",
+ "src/recovery/reno.rs",
+ "src/stream.rs",
+ "src/tls.rs",
]
}
diff --git a/Cargo.toml b/Cargo.toml
index 416f466..470dbce 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "quiche"
-version = "0.6.0"
+version = "0.10.0"
authors = ["Alessandro Ghedini <alessandro@ghedini.me>"]
edition = "2018"
build = "src/build.rs"
@@ -17,6 +17,7 @@
"/COPYING",
"/benches",
"/deps/boringssl/**/*.[chS]",
+ "/deps/boringssl/**/*.asm",
"/deps/boringssl/src/**/*.cc",
"/deps/boringssl/**/CMakeLists.txt",
"/deps/boringssl/**/sources.cmake",
@@ -39,8 +40,8 @@
# Equivalent to "--cfg fuzzing", but can also be checked in build.rs.
fuzzing = []
-# For building with Android NDK < 18 and GCC.
-ndk-old-gcc = []
+# Expose the FFI API.
+ffi = []
[package.metadata.docs.rs]
no-default-features = true
@@ -54,7 +55,8 @@
libm = "0.2"
ring = "0.16"
lazy_static = "1"
-qlog = { version = "0.3", path = "tools/qlog", optional = true }
+boring-sys = { version = "1.0.2", optional = true }
+qlog = { version = "0.5", path = "tools/qlog", optional = true }
[target."cfg(windows)".dependencies]
winapi = { version = "0.3", features = ["wincrypt"] }
diff --git a/Dockerfile b/Dockerfile
index cc7520c..9650ba3 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-FROM rust:1.44.1 as build
+FROM rust:1.53 as build
WORKDIR /build
diff --git a/Makefile b/Makefile
index e86c434..cc67ac1 100644
--- a/Makefile
+++ b/Makefile
@@ -34,9 +34,9 @@
# build fuzzers
.PHONY: build-fuzz
build-fuzz:
- cargo +nightly fuzz run packet_recv_client -- -runs=1
- cargo +nightly fuzz run packet_recv_server -- -runs=1
- cargo +nightly fuzz run qpack_decode -- -runs=1
+ cargo +nightly fuzz build --release packet_recv_client
+ cargo +nightly fuzz build --release packet_recv_server
+ cargo +nightly fuzz build --release qpack_decode
# build fuzzing image
.PHONY: docker-fuzz
diff --git a/README.md b/README.md
index 99e2908..efc0985 100644
--- a/README.md
+++ b/README.md
@@ -3,17 +3,13 @@
[](https://crates.io/crates/quiche)
[](https://docs.rs/quiche)
[](https://opensource.org/licenses/BSD-2-Clause)
-[](https://travis-ci.com/cloudflare/quiche)
+
[quiche] is an implementation of the QUIC transport protocol and HTTP/3 as
specified by the [IETF]. It provides a low level API for processing QUIC packets
and handling connection state. The application is responsible for providing I/O
(e.g. sockets handling) as well as an event loop with support for timers.
-A live QUIC server based on quiche is available at ``https://quic.tech:4433/``,
-and an HTTP/3 one at ``https://quic.tech:8443/``, that can be used for
-experimentation.
-
For more information on how quiche came about and some insights into its design
you can read a [post] on Cloudflare's blog that goes into some more detail.
@@ -26,7 +22,9 @@
### Cloudflare
-quiche powers Cloudflare edge network's [HTTP/3 support][cloudflare-http3].
+quiche powers Cloudflare edge network's [HTTP/3 support][cloudflare-http3]. The
+[cloudflare-quic.com](https://cloudflare-quic.com) website can be used for
+testing and experimentation.
### curl
@@ -49,10 +47,10 @@
Before diving into the quiche API, here are a few examples on how to use the
quiche tools provided as part of the [quiche-apps](tools/apps/) crate.
-The client can be run as follows:
+After cloning the project according to the command mentioned in the [building](#building) section, the client can be run as follows:
```bash
- $ cargo run --manifest-path=tools/apps/Cargo.toml --bin quiche-client -- https://quic.tech:8443/
+ $ cargo run --manifest-path=tools/apps/Cargo.toml --bin quiche-client -- https://cloudflare-quic.com/
```
while the server can be run as follows:
@@ -99,9 +97,11 @@
```rust
loop {
- let read = socket.recv(&mut buf).unwrap();
+ let (read, from) = socket.recv_from(&mut buf).unwrap();
- let read = match conn.recv(&mut buf[..read]) {
+ let recv_info = quiche::RecvInfo { from };
+
+ let read = match conn.recv(&mut buf[..read], recv_info) {
Ok(v) => v,
Err(e) => {
@@ -119,7 +119,7 @@
```rust
loop {
- let write = match conn.send(&mut out) {
+ let (write, send_info) = match conn.send(&mut out) {
Ok(v) => v,
Err(quiche::Error::Done) => {
@@ -133,7 +133,7 @@
},
};
- socket.send(&out[..write]).unwrap();
+ socket.send_to(&out[..write], &send_info.to).unwrap();
}
```
@@ -156,7 +156,7 @@
// Send more packets as needed after timeout.
loop {
- let write = match conn.send(&mut out) {
+ let (write, send_info) = match conn.send(&mut out) {
Ok(v) => v,
Err(quiche::Error::Done) => {
@@ -170,7 +170,7 @@
},
};
- socket.send(&out[..write]).unwrap();
+ socket.send_to(&out[..write], &send_info.to).unwrap();
}
```
@@ -241,12 +241,15 @@
built automatically alongside the Rust one. This is fully stand-alone and can
be linked directly into C/C++ applications.
+Note that in order to enable the FFI API, the ``ffi`` feature must be enabled (it
+is disabled by default), by passing ``--features ffi`` to ``cargo``.
+
[thin C API]: https://github.com/cloudflare/quiche/blob/master/include/quiche.h
Building
--------
-quiche requires Rust 1.39 or later to build. The latest stable Rust release can
+quiche requires Rust 1.53 or later to build. The latest stable Rust release can
be installed using [rustup](https://rustup.rs/).
Once the Rust build environment is setup, the quiche source code can be fetched
@@ -287,78 +290,45 @@
### Building for Android
-To build quiche for Android, you need the following:
+Building quiche for Android (NDK version 19 or higher, 21 recommended), can be
+done using [cargo-ndk] (v2.0 or later).
-- Install the [Android NDK] (13b or higher), using Android Studio or directly.
-- Set `ANDROID_NDK_HOME` environment variable to NDK path, e.g.
+First the [Android NDK] needs to be installed, either using Android Studio or
+directly, and the `ANDROID_NDK_HOME` environment variable needs to be set to the
+NDK installation path, e.g.:
```bash
$ export ANDROID_NDK_HOME=/usr/local/share/android-ndk
```
-- Install the Rust toolchain for Android architectures needed:
+Then the Rust toolchain for the Android architectures needed can be installed as
+follows:
```bash
- $ rustup target add aarch64-linux-android arm-linux-androideabi armv7-linux-androideabi i686-linux-android x86_64-linux-android
+ $ rustup target add aarch64-linux-android armv7-linux-androideabi i686-linux-android x86_64-linux-android
```
Note that the minimum API level is 21 for all target architectures.
-Depending on the NDK version used, you can take one of the following procedures:
-
-[Android NDK]: https://developer.android.com/ndk
-
-#### NDK version >= 19
-
-For NDK version 19 or higher (21 recommended), you can build in a simpler
-way using [cargo-ndk]. You need to install [cargo-ndk] first.
+[cargo-ndk] (v2.0 or later) also needs to be installed:
```bash
$ cargo install cargo-ndk
```
-You can build the quiche library using the following procedure. Note that
-`--target` and `--android-platform` are mandatory.
+Finally the quiche library can be built using the following procedure. Note that
+the `-t <architecture>` and `-p <NDK version>` options are mandatory.
```bash
- $ cargo ndk --target aarch64-linux-android --android-platform 21 -- build
+ $ cargo ndk -t arm64-v8a -p 21 -- build --features ffi
```
See [build_android_ndk19.sh] for more information.
-Note that building with NDK version 18 appears to be broken.
-
+[Android NDK]: https://developer.android.com/ndk
[cargo-ndk]: https://docs.rs/crate/cargo-ndk
[build_android_ndk19.sh]: https://github.com/cloudflare/quiche/blob/master/tools/android/build_android_ndk19.sh
-#### NDK version < 18
-
-If you need to use NDK version < 18 (gcc), you can build quiche in the following way.
-
-To prepare the cross-compiling toolchain, run the following command:
-
-```bash
- $ tools/android/setup_android.sh
-```
-
-It will create a standalone toolchain for arm64/arm/x86 architectures under the
-`$TOOLCHAIN_DIR/arch` directory. If you didn't set `TOOLCHAIN_DIR` environment
-variable, the current directory will be used.
-
-After it run successfully, run the following script to build libquiche:
-
-```bash
- $ tools/android/build_android.sh --features ndk-old-gcc
-```
-
-It will build binaries for aarch64, armv7 and i686. You can pass parameters to
-this script for cargo build. For example if you want to build a release binary
-with verbose logs, do the following:
-
-```bash
- $ tools/android/build_android.sh --features ndk-old-gcc --release -vv
-```
-
### Building for iOS
To build quiche for iOS, you need the following:
@@ -385,13 +355,13 @@
To build libquiche, run the following command:
```bash
- $ cargo lipo
+ $ cargo lipo --features ffi
```
or
```bash
- $ cargo lipo --release
+ $ cargo lipo --features ffi --release
```
iOS build is tested in Xcode 10.1 and Xcode 11.2.
diff --git a/deps/boringssl b/deps/boringssl
index 597b810..f1c7534 160000
--- a/deps/boringssl
+++ b/deps/boringssl
@@ -1 +1 @@
-Subproject commit 597b810379e126ae05d32c1d94b1a9464385acd0
+Subproject commit f1c75347daa2ea81a941e953f2263e0a4d970c8d
diff --git a/examples/Makefile b/examples/Makefile
index bb75e7d..82db2e4 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -17,7 +17,7 @@
LDFLAGS = -L$(LIBCRYPTO_DIR) -L$(LIBSSL_DIR) -L$(LIB_DIR)
-LIBS = $(LIB_DIR)/libquiche.a -lev -ldl -pthread
+LIBS = $(LIB_DIR)/libquiche.a -lev -ldl -pthread -lm
all: client server http3-client http3-server
@@ -34,7 +34,7 @@
$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ $(INCS) $(LIBS)
$(LIB_DIR)/libquiche.a: $(shell find $(SOURCE_DIR) -type f -name '*.rs')
- cd .. && cargo build --target-dir $(BUILD_DIR)
+ cd .. && cargo build --target-dir $(BUILD_DIR) --features ffi
clean:
- @$(RM) -rf client server http3-client http3-server build/
+ @$(RM) -rf client server http3-client http3-server build/ *.dSYM/
diff --git a/examples/client.c b/examples/client.c
index bca9781..0df9665 100644
--- a/examples/client.c
+++ b/examples/client.c
@@ -61,8 +61,11 @@
static void flush_egress(struct ev_loop *loop, struct conn_io *conn_io) {
static uint8_t out[MAX_DATAGRAM_SIZE];
+ quiche_send_info send_info;
+
while (1) {
- ssize_t written = quiche_conn_send(conn_io->conn, out, sizeof(out));
+ ssize_t written = quiche_conn_send(conn_io->conn, out, sizeof(out),
+ &send_info);
if (written == QUICHE_ERR_DONE) {
fprintf(stderr, "done writing\n");
@@ -74,7 +77,10 @@
return;
}
- ssize_t sent = send(conn_io->sock, out, written, 0);
+ ssize_t sent = sendto(conn_io->sock, out, written, 0,
+ (struct sockaddr *) &send_info.to,
+ send_info.to_len);
+
if (sent != written) {
perror("failed to send");
return;
@@ -96,7 +102,13 @@
static uint8_t buf[65535];
while (1) {
- ssize_t read = recv(conn_io->sock, buf, sizeof(buf), 0);
+ struct sockaddr_storage peer_addr;
+ socklen_t peer_addr_len = sizeof(peer_addr);
+ memset(&peer_addr, 0, peer_addr_len);
+
+ ssize_t read = recvfrom(conn_io->sock, buf, sizeof(buf), 0,
+ (struct sockaddr *) &peer_addr,
+ &peer_addr_len);
if (read < 0) {
if ((errno == EWOULDBLOCK) || (errno == EAGAIN)) {
@@ -108,7 +120,13 @@
return;
}
- ssize_t done = quiche_conn_recv(conn_io->conn, buf, read);
+ quiche_recv_info recv_info = {
+ (struct sockaddr *) &peer_addr,
+
+ peer_addr_len,
+ };
+
+ ssize_t done = quiche_conn_recv(conn_io->conn, buf, read, &recv_info);
if (done < 0) {
fprintf(stderr, "failed to process packet\n");
@@ -228,11 +246,6 @@
return -1;
}
- if (connect(sock, peer->ai_addr, peer->ai_addrlen) < 0) {
- perror("failed to connect socket");
- return -1;
- }
-
quiche_config *config = quiche_config_new(0xbabababa);
if (config == NULL) {
fprintf(stderr, "failed to create config\n");
@@ -240,10 +253,11 @@
}
quiche_config_set_application_protos(config,
- (uint8_t *) "\x05hq-29\x05hq-28\x05hq-27\x08http/0.9", 27);
+ (uint8_t *) "\x0ahq-interop\x05hq-29\x05hq-28\x05hq-27\x08http/0.9", 38);
quiche_config_set_max_idle_timeout(config, 5000);
- quiche_config_set_max_udp_payload_size(config, MAX_DATAGRAM_SIZE);
+ quiche_config_set_max_recv_udp_payload_size(config, MAX_DATAGRAM_SIZE);
+ quiche_config_set_max_send_udp_payload_size(config, MAX_DATAGRAM_SIZE);
quiche_config_set_initial_max_data(config, 10000000);
quiche_config_set_initial_max_stream_data_bidi_local(config, 1000000);
quiche_config_set_initial_max_stream_data_uni(config, 1000000);
@@ -268,8 +282,9 @@
return -1;
}
- quiche_conn *conn = quiche_connect(host, (const uint8_t *) scid,
- sizeof(scid), config);
+ quiche_conn *conn = quiche_connect(host, (const uint8_t*) scid, sizeof(scid),
+ peer->ai_addr, peer->ai_addrlen, config);
+
if (conn == NULL) {
fprintf(stderr, "failed to create connection\n");
return -1;
diff --git a/examples/client.rs b/examples/client.rs
index 2e427f6..7f97b6d 100644
--- a/examples/client.rs
+++ b/examples/client.rs
@@ -69,7 +69,6 @@
// Create the UDP socket backing the QUIC connection, and register it with
// the event loop.
let socket = std::net::UdpSocket::bind(bind_addr).unwrap();
- socket.connect(peer_addr).unwrap();
let socket = mio::net::UdpSocket::from_socket(socket).unwrap();
poll.register(
@@ -87,11 +86,14 @@
config.verify_peer(false);
config
- .set_application_protos(b"\x05hq-29\x05hq-28\x05hq-27\x08http/0.9")
+ .set_application_protos(
+ b"\x0ahq-interop\x05hq-29\x05hq-28\x05hq-27\x08http/0.9",
+ )
.unwrap();
config.set_max_idle_timeout(5000);
- config.set_max_udp_payload_size(MAX_DATAGRAM_SIZE as u64);
+ config.set_max_recv_udp_payload_size(MAX_DATAGRAM_SIZE);
+ config.set_max_send_udp_payload_size(MAX_DATAGRAM_SIZE);
config.set_initial_max_data(10_000_000);
config.set_initial_max_stream_data_bidi_local(1_000_000);
config.set_initial_max_stream_data_bidi_remote(1_000_000);
@@ -103,8 +105,11 @@
let mut scid = [0; quiche::MAX_CONN_ID_LEN];
SystemRandom::new().fill(&mut scid[..]).unwrap();
+ let scid = quiche::ConnectionId::from_ref(&scid);
+
// Create a QUIC connection and initiate handshake.
- let mut conn = quiche::connect(url.domain(), &scid, &mut config).unwrap();
+ let mut conn =
+ quiche::connect(url.domain(), &scid, peer_addr, &mut config).unwrap();
info!(
"connecting to {:} from {:} with scid {}",
@@ -113,9 +118,9 @@
hex_dump(&scid)
);
- let write = conn.send(&mut out).expect("initial send failed");
+ let (write, send_info) = conn.send(&mut out).expect("initial send failed");
- while let Err(e) = socket.send(&out[..write]) {
+ while let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
continue;
@@ -146,7 +151,7 @@
break 'read;
}
- let len = match socket.recv(&mut buf) {
+ let (len, from) = match socket.recv_from(&mut buf) {
Ok(v) => v,
Err(e) => {
@@ -163,8 +168,10 @@
debug!("got {} bytes", len);
+ let recv_info = quiche::RecvInfo { from };
+
// Process potentially coalesced packets.
- let read = match conn.recv(&mut buf[..len]) {
+ let read = match conn.recv(&mut buf[..len], recv_info) {
Ok(v) => v,
Err(e) => {
@@ -209,7 +216,7 @@
);
print!("{}", unsafe {
- std::str::from_utf8_unchecked(&stream_buf)
+ std::str::from_utf8_unchecked(stream_buf)
});
// The server reported that it has no more data to send, which
@@ -228,7 +235,7 @@
// Generate outgoing QUIC packets and send them on the UDP socket, until
// quiche reports that there are no more packets to be sent.
loop {
- let write = match conn.send(&mut out) {
+ let (write, send_info) = match conn.send(&mut out) {
Ok(v) => v,
Err(quiche::Error::Done) => {
@@ -244,7 +251,7 @@
},
};
- if let Err(e) = socket.send(&out[..write]) {
+ if let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
break;
diff --git a/examples/http3-client.c b/examples/http3-client.c
index 8c75309..f41e51e 100644
--- a/examples/http3-client.c
+++ b/examples/http3-client.c
@@ -65,8 +65,11 @@
static void flush_egress(struct ev_loop *loop, struct conn_io *conn_io) {
static uint8_t out[MAX_DATAGRAM_SIZE];
+ quiche_send_info send_info;
+
while (1) {
- ssize_t written = quiche_conn_send(conn_io->conn, out, sizeof(out));
+ ssize_t written = quiche_conn_send(conn_io->conn, out, sizeof(out),
+ &send_info);
if (written == QUICHE_ERR_DONE) {
fprintf(stderr, "done writing\n");
@@ -78,7 +81,10 @@
return;
}
- ssize_t sent = send(conn_io->sock, out, written, 0);
+ ssize_t sent = sendto(conn_io->sock, out, written, 0,
+ (struct sockaddr *) &send_info.to,
+ send_info.to_len);
+
if (sent != written) {
perror("failed to send");
return;
@@ -109,7 +115,13 @@
static uint8_t buf[65535];
while (1) {
- ssize_t read = recv(conn_io->sock, buf, sizeof(buf), 0);
+ struct sockaddr_storage peer_addr;
+ socklen_t peer_addr_len = sizeof(peer_addr);
+ memset(&peer_addr, 0, peer_addr_len);
+
+ ssize_t read = recvfrom(conn_io->sock, buf, sizeof(buf), 0,
+ (struct sockaddr *) &peer_addr,
+ &peer_addr_len);
if (read < 0) {
if ((errno == EWOULDBLOCK) || (errno == EAGAIN)) {
@@ -121,7 +133,13 @@
return;
}
- ssize_t done = quiche_conn_recv(conn_io->conn, buf, read);
+ quiche_recv_info recv_info = {
+ (struct sockaddr *) &peer_addr,
+
+ peer_addr_len,
+ };
+
+ ssize_t done = quiche_conn_recv(conn_io->conn, buf, read, &recv_info);
if (done < 0) {
fprintf(stderr, "failed to process packet: %zd\n", done);
@@ -239,14 +257,18 @@
}
case QUICHE_H3_EVENT_DATA: {
- ssize_t len = quiche_h3_recv_body(conn_io->http3,
- conn_io->conn, s,
- buf, sizeof(buf));
- if (len <= 0) {
- break;
+ for (;;) {
+ ssize_t len = quiche_h3_recv_body(conn_io->http3,
+ conn_io->conn, s,
+ buf, sizeof(buf));
+
+ if (len <= 0) {
+ break;
+ }
+
+ printf("%.*s", (int) len, buf);
}
- printf("%.*s", (int) len, buf);
break;
}
@@ -256,9 +278,17 @@
}
break;
+ case QUICHE_H3_EVENT_RESET:
+ fprintf(stderr, "request was reset\n");
+
+ if (quiche_conn_close(conn_io->conn, true, 0, NULL, 0) < 0) {
+ fprintf(stderr, "failed to close connection\n");
+ }
+ break;
+
case QUICHE_H3_EVENT_DATAGRAM:
break;
-
+
case QUICHE_H3_EVENT_GOAWAY: {
fprintf(stderr, "got GOAWAY\n");
break;
@@ -322,11 +352,6 @@
return -1;
}
- if (connect(sock, peer->ai_addr, peer->ai_addrlen) < 0) {
- perror("failed to connect socket");
- return -1;
- }
-
quiche_config *config = quiche_config_new(0xbabababa);
if (config == NULL) {
fprintf(stderr, "failed to create config\n");
@@ -338,7 +363,8 @@
sizeof(QUICHE_H3_APPLICATION_PROTOCOL) - 1);
quiche_config_set_max_idle_timeout(config, 5000);
- quiche_config_set_max_udp_payload_size(config, MAX_DATAGRAM_SIZE);
+ quiche_config_set_max_recv_udp_payload_size(config, MAX_DATAGRAM_SIZE);
+ quiche_config_set_max_send_udp_payload_size(config, MAX_DATAGRAM_SIZE);
quiche_config_set_initial_max_data(config, 10000000);
quiche_config_set_initial_max_stream_data_bidi_local(config, 1000000);
quiche_config_set_initial_max_stream_data_bidi_remote(config, 1000000);
@@ -366,8 +392,9 @@
return -1;
}
- quiche_conn *conn = quiche_connect(host, (const uint8_t *) scid,
- sizeof(scid), config);
+ quiche_conn *conn = quiche_connect(host, (const uint8_t*) scid, sizeof(scid),
+ peer->ai_addr, peer->ai_addrlen, config);
+
if (conn == NULL) {
fprintf(stderr, "failed to create connection\n");
return -1;
diff --git a/examples/http3-client.rs b/examples/http3-client.rs
index a93d67e..973b5bc 100644
--- a/examples/http3-client.rs
+++ b/examples/http3-client.rs
@@ -67,7 +67,6 @@
// Create the UDP socket backing the QUIC connection, and register it with
// the event loop.
let socket = std::net::UdpSocket::bind(bind_addr).unwrap();
- socket.connect(peer_addr).unwrap();
let socket = mio::net::UdpSocket::from_socket(socket).unwrap();
poll.register(
@@ -89,7 +88,8 @@
.unwrap();
config.set_max_idle_timeout(5000);
- config.set_max_udp_payload_size(MAX_DATAGRAM_SIZE as u64);
+ config.set_max_recv_udp_payload_size(MAX_DATAGRAM_SIZE);
+ config.set_max_send_udp_payload_size(MAX_DATAGRAM_SIZE);
config.set_initial_max_data(10_000_000);
config.set_initial_max_stream_data_bidi_local(1_000_000);
config.set_initial_max_stream_data_bidi_remote(1_000_000);
@@ -104,8 +104,11 @@
let mut scid = [0; quiche::MAX_CONN_ID_LEN];
SystemRandom::new().fill(&mut scid[..]).unwrap();
+ let scid = quiche::ConnectionId::from_ref(&scid);
+
// Create a QUIC connection and initiate handshake.
- let mut conn = quiche::connect(url.domain(), &scid, &mut config).unwrap();
+ let mut conn =
+ quiche::connect(url.domain(), &scid, peer_addr, &mut config).unwrap();
info!(
"connecting to {:} from {:} with scid {}",
@@ -114,9 +117,9 @@
hex_dump(&scid)
);
- let write = conn.send(&mut out).expect("initial send failed");
+ let (write, send_info) = conn.send(&mut out).expect("initial send failed");
- while let Err(e) = socket.send(&out[..write]) {
+ while let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
continue;
@@ -138,11 +141,14 @@
}
let req = vec![
- quiche::h3::Header::new(":method", "GET"),
- quiche::h3::Header::new(":scheme", url.scheme()),
- quiche::h3::Header::new(":authority", url.host_str().unwrap()),
- quiche::h3::Header::new(":path", &path),
- quiche::h3::Header::new("user-agent", "quiche"),
+ quiche::h3::Header::new(b":method", b"GET"),
+ quiche::h3::Header::new(b":scheme", url.scheme().as_bytes()),
+ quiche::h3::Header::new(
+ b":authority",
+ url.host_str().unwrap().as_bytes(),
+ ),
+ quiche::h3::Header::new(b":path", path.as_bytes()),
+ quiche::h3::Header::new(b"user-agent", b"quiche"),
];
let req_start = std::time::Instant::now();
@@ -166,7 +172,7 @@
break 'read;
}
- let len = match socket.recv(&mut buf) {
+ let (len, from) = match socket.recv_from(&mut buf) {
Ok(v) => v,
Err(e) => {
@@ -183,8 +189,10 @@
debug!("got {} bytes", len);
+ let recv_info = quiche::RecvInfo { from };
+
// Process potentially coalesced packets.
- let read = match conn.recv(&mut buf[..len]) {
+ let read = match conn.recv(&mut buf[..len], recv_info) {
Ok(v) => v,
Err(e) => {
@@ -235,7 +243,7 @@
},
Ok((stream_id, quiche::h3::Event::Data)) => {
- if let Ok(read) =
+ while let Ok(read) =
http3_conn.recv_body(&mut conn, stream_id, &mut buf)
{
debug!(
@@ -258,6 +266,15 @@
conn.close(true, 0x00, b"kthxbye").unwrap();
},
+ Ok((_stream_id, quiche::h3::Event::Reset(e))) => {
+ error!(
+ "request was reset by peer with {}, closing...",
+ e
+ );
+
+ conn.close(true, 0x00, b"kthxbye").unwrap();
+ },
+
Ok((_flow_id, quiche::h3::Event::Datagram)) => (),
Ok((goaway_id, quiche::h3::Event::GoAway)) => {
@@ -280,7 +297,7 @@
// Generate outgoing QUIC packets and send them on the UDP socket, until
// quiche reports that there are no more packets to be sent.
loop {
- let write = match conn.send(&mut out) {
+ let (write, send_info) = match conn.send(&mut out) {
Ok(v) => v,
Err(quiche::Error::Done) => {
@@ -296,7 +313,7 @@
},
};
- if let Err(e) = socket.send(&out[..write]) {
+ if let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
break;
diff --git a/examples/http3-server.c b/examples/http3-server.c
index b6b3041..34e156d 100644
--- a/examples/http3-server.c
+++ b/examples/http3-server.c
@@ -89,8 +89,11 @@
static void flush_egress(struct ev_loop *loop, struct conn_io *conn_io) {
static uint8_t out[MAX_DATAGRAM_SIZE];
+ quiche_send_info send_info;
+
while (1) {
- ssize_t written = quiche_conn_send(conn_io->conn, out, sizeof(out));
+ ssize_t written = quiche_conn_send(conn_io->conn, out, sizeof(out),
+ &send_info);
if (written == QUICHE_ERR_DONE) {
fprintf(stderr, "done writing\n");
@@ -173,7 +176,9 @@
}
static struct conn_io *create_conn(uint8_t *scid, size_t scid_len,
- uint8_t *odcid, size_t odcid_len) {
+ uint8_t *odcid, size_t odcid_len,
+ struct sockaddr_storage *peer_addr,
+ socklen_t peer_addr_len) {
struct conn_io *conn_io = calloc(1, sizeof(*conn_io));
if (conn_io == NULL) {
fprintf(stderr, "failed to allocate connection IO\n");
@@ -187,7 +192,11 @@
memcpy(conn_io->cid, scid, LOCAL_CONN_ID_LEN);
quiche_conn *conn = quiche_accept(conn_io->cid, LOCAL_CONN_ID_LEN,
- odcid, odcid_len, config);
+ odcid, odcid_len,
+ (struct sockaddr *) peer_addr,
+ peer_addr_len,
+ config);
+
if (conn == NULL) {
fprintf(stderr, "failed to create connection\n");
return NULL;
@@ -196,6 +205,9 @@
conn_io->sock = conns->sock;
conn_io->conn = conn;
+ memcpy(&conn_io->peer_addr, peer_addr, peer_addr_len);
+ conn_io->peer_addr_len = peer_addr_len;
+
ev_init(&conn_io->timer, timeout_cb);
conn_io->timer.data = conn_io;
@@ -334,16 +346,21 @@
continue;
}
- conn_io = create_conn(dcid, dcid_len, odcid, odcid_len);
+ conn_io = create_conn(dcid, dcid_len, odcid, odcid_len,
+ &peer_addr, peer_addr_len);
+
if (conn_io == NULL) {
continue;
}
-
- memcpy(&conn_io->peer_addr, &peer_addr, peer_addr_len);
- conn_io->peer_addr_len = peer_addr_len;
}
- ssize_t done = quiche_conn_recv(conn_io->conn, buf, read);
+ quiche_recv_info recv_info = {
+ (struct sockaddr *) &peer_addr,
+
+ peer_addr_len,
+ };
+
+ ssize_t done = quiche_conn_recv(conn_io->conn, buf, read, &recv_info);
if (done < 0) {
fprintf(stderr, "failed to process packet: %zd\n", done);
@@ -425,6 +442,9 @@
case QUICHE_H3_EVENT_FINISHED:
break;
+ case QUICHE_H3_EVENT_RESET:
+ break;
+
case QUICHE_H3_EVENT_DATAGRAM:
break;
@@ -532,7 +552,8 @@
sizeof(QUICHE_H3_APPLICATION_PROTOCOL) - 1);
quiche_config_set_max_idle_timeout(config, 5000);
- quiche_config_set_max_udp_payload_size(config, MAX_DATAGRAM_SIZE);
+ quiche_config_set_max_recv_udp_payload_size(config, MAX_DATAGRAM_SIZE);
+ quiche_config_set_max_send_udp_payload_size(config, MAX_DATAGRAM_SIZE);
quiche_config_set_initial_max_data(config, 10000000);
quiche_config_set_initial_max_stream_data_bidi_local(config, 1000000);
quiche_config_set_initial_max_stream_data_bidi_remote(config, 1000000);
diff --git a/examples/http3-server.rs b/examples/http3-server.rs
index 4c41cbb..4846861 100644
--- a/examples/http3-server.rs
+++ b/examples/http3-server.rs
@@ -53,7 +53,7 @@
partial_responses: HashMap<u64, PartialResponse>,
}
-type ClientMap = HashMap<Vec<u8>, (net::SocketAddr, Client)>;
+type ClientMap = HashMap<quiche::ConnectionId<'static>, Client>;
fn main() {
let mut buf = [0; 65535];
@@ -100,7 +100,8 @@
.unwrap();
config.set_max_idle_timeout(5000);
- config.set_max_udp_payload_size(MAX_DATAGRAM_SIZE as u64);
+ config.set_max_recv_udp_payload_size(MAX_DATAGRAM_SIZE);
+ config.set_max_send_udp_payload_size(MAX_DATAGRAM_SIZE);
config.set_initial_max_data(10_000_000);
config.set_initial_max_stream_data_bidi_local(1_000_000);
config.set_initial_max_stream_data_bidi_remote(1_000_000);
@@ -122,8 +123,7 @@
// Find the shorter timeout from all the active connections.
//
// TODO: use event loop that properly supports timers
- let timeout =
- clients.values().filter_map(|(_, c)| c.conn.timeout()).min();
+ let timeout = clients.values().filter_map(|c| c.conn.timeout()).min();
poll.poll(&mut events, timeout).unwrap();
@@ -136,12 +136,12 @@
if events.is_empty() {
debug!("timed out");
- clients.values_mut().for_each(|(_, c)| c.conn.on_timeout());
+ clients.values_mut().for_each(|c| c.conn.on_timeout());
break 'read;
}
- let (len, src) = match socket.recv_from(&mut buf) {
+ let (len, from) = match socket.recv_from(&mut buf) {
Ok(v) => v,
Err(e) => {
@@ -177,11 +177,12 @@
let conn_id = ring::hmac::sign(&conn_id_seed, &hdr.dcid);
let conn_id = &conn_id.as_ref()[..quiche::MAX_CONN_ID_LEN];
+ let conn_id = conn_id.to_vec().into();
// Lookup a connection based on the packet's connection ID. If there
// is no connection matching, create a new one.
- let (_, client) = if !clients.contains_key(&hdr.dcid) &&
- !clients.contains_key(conn_id)
+ let client = if !clients.contains_key(&hdr.dcid) &&
+ !clients.contains_key(&conn_id)
{
if hdr.ty != quiche::Type::Initial {
error!("Packet is not Initial");
@@ -197,7 +198,7 @@
let out = &out[..len];
- if let Err(e) = socket.send_to(out, &src) {
+ if let Err(e) = socket.send_to(out, &from) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
break;
@@ -211,6 +212,8 @@
let mut scid = [0; quiche::MAX_CONN_ID_LEN];
scid.copy_from_slice(&conn_id);
+ let scid = quiche::ConnectionId::from_ref(&scid);
+
// Token is always present in Initial packets.
let token = hdr.token.as_ref().unwrap();
@@ -218,7 +221,7 @@
if token.is_empty() {
warn!("Doing stateless retry");
- let new_token = mint_token(&hdr, &src);
+ let new_token = mint_token(&hdr, &from);
let len = quiche::retry(
&hdr.scid,
@@ -232,7 +235,7 @@
let out = &out[..len];
- if let Err(e) = socket.send_to(out, &src) {
+ if let Err(e) = socket.send_to(out, &from) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
break;
@@ -243,11 +246,11 @@
continue 'read;
}
- let odcid = validate_token(&src, token);
+ let odcid = validate_token(&from, token);
// The token was not valid, meaning the retry failed, so
// drop the packet.
- if odcid == None {
+ if odcid.is_none() {
error!("Invalid address validation token");
continue 'read;
}
@@ -257,17 +260,15 @@
continue 'read;
}
- // Reuse the source connection ID we sent in the Retry
- // packet, instead of changing it again.
- scid.copy_from_slice(&hdr.dcid);
+ // Reuse the source connection ID we sent in the Retry packet,
+ // instead of changing it again.
+ let scid = hdr.dcid.clone();
- debug!(
- "New connection: dcid={} scid={}",
- hex_dump(&hdr.dcid),
- hex_dump(&scid)
- );
+ debug!("New connection: dcid={:?} scid={:?}", hdr.dcid, scid);
- let conn = quiche::accept(&scid, odcid, &mut config).unwrap();
+ let conn =
+ quiche::accept(&scid, odcid.as_ref(), from, &mut config)
+ .unwrap();
let client = Client {
conn,
@@ -275,19 +276,21 @@
partial_responses: HashMap::new(),
};
- clients.insert(scid.to_vec(), (src, client));
+ clients.insert(scid.clone(), client);
- clients.get_mut(&scid[..]).unwrap()
+ clients.get_mut(&scid).unwrap()
} else {
match clients.get_mut(&hdr.dcid) {
Some(v) => v,
- None => clients.get_mut(conn_id).unwrap(),
+ None => clients.get_mut(&conn_id).unwrap(),
}
};
+ let recv_info = quiche::RecvInfo { from };
+
// Process potentially coalesced packets.
- let read = match client.conn.recv(pkt_buf) {
+ let read = match client.conn.recv(pkt_buf, recv_info) {
Ok(v) => v,
Err(e) => {
@@ -357,6 +360,8 @@
Ok((_stream_id, quiche::h3::Event::Finished)) => (),
+ Ok((_stream_id, quiche::h3::Event::Reset { .. })) => (),
+
Ok((_flow_id, quiche::h3::Event::Datagram)) => (),
Ok((_goaway_id, quiche::h3::Event::GoAway)) => (),
@@ -382,9 +387,9 @@
// Generate outgoing QUIC packets for all active connections and send
// them on the UDP socket, until quiche reports that there are no more
// packets to be sent.
- for (peer, client) in clients.values_mut() {
+ for client in clients.values_mut() {
loop {
- let write = match client.conn.send(&mut out) {
+ let (write, send_info) = match client.conn.send(&mut out) {
Ok(v) => v,
Err(quiche::Error::Done) => {
@@ -400,8 +405,7 @@
},
};
- // TODO: coalesce packets.
- if let Err(e) = socket.send_to(&out[..write], &peer) {
+ if let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
break;
@@ -415,7 +419,7 @@
}
// Garbage collect closed connections.
- clients.retain(|_, (_, ref mut c)| {
+ clients.retain(|_, ref mut c| {
debug!("Collecting garbage");
if c.conn.is_closed() {
@@ -464,7 +468,7 @@
/// authenticate of the token. *It should not be used in production system*.
fn validate_token<'a>(
src: &net::SocketAddr, token: &'a [u8],
-) -> Option<&'a [u8]> {
+) -> Option<quiche::ConnectionId<'a>> {
if token.len() < 6 {
return None;
}
@@ -484,9 +488,7 @@
return None;
}
- let token = &token[addr.len()..];
-
- Some(&token[..])
+ Some(quiche::ConnectionId::from_ref(&token[addr.len()..]))
}
/// Handles incoming HTTP/3 requests.
@@ -535,6 +537,8 @@
let written = match http3_conn.send_body(conn, stream_id, &body, true) {
Ok(v) => v,
+ Err(quiche::h3::Error::Done) => 0,
+
Err(e) => {
error!("{} stream send failed {:?}", conn.trace_id(), e);
return;
@@ -558,25 +562,24 @@
) -> (Vec<quiche::h3::Header>, Vec<u8>) {
let mut file_path = std::path::PathBuf::from(root);
let mut path = std::path::Path::new("");
- let mut method = "";
+ let mut method = None;
// Look for the request's path and method.
for hdr in request {
match hdr.name() {
- ":path" => {
- path = std::path::Path::new(hdr.value());
- },
+ b":path" =>
+ path = std::path::Path::new(
+ std::str::from_utf8(hdr.value()).unwrap(),
+ ),
- ":method" => {
- method = hdr.value();
- },
+ b":method" => method = Some(hdr.value()),
_ => (),
}
}
let (status, body) = match method {
- "GET" => {
+ Some(b"GET") => {
for c in path.components() {
if let std::path::Component::Normal(v) = c {
file_path.push(v)
@@ -594,9 +597,12 @@
};
let headers = vec![
- quiche::h3::Header::new(":status", &status.to_string()),
- quiche::h3::Header::new("server", "quiche"),
- quiche::h3::Header::new("content-length", &body.len().to_string()),
+ quiche::h3::Header::new(b":status", status.to_string().as_bytes()),
+ quiche::h3::Header::new(b"server", b"quiche"),
+ quiche::h3::Header::new(
+ b"content-length",
+ body.len().to_string().as_bytes(),
+ ),
];
(headers, body)
@@ -616,7 +622,7 @@
let resp = client.partial_responses.get_mut(&stream_id).unwrap();
if let Some(ref headers) = resp.headers {
- match http3_conn.send_response(conn, stream_id, &headers, false) {
+ match http3_conn.send_response(conn, stream_id, headers, false) {
Ok(_) => (),
Err(quiche::h3::Error::StreamBlocked) => {
@@ -637,7 +643,11 @@
let written = match http3_conn.send_body(conn, stream_id, body, true) {
Ok(v) => v,
+ Err(quiche::h3::Error::Done) => 0,
+
Err(e) => {
+ client.partial_responses.remove(&stream_id);
+
error!("{} stream send failed {:?}", conn.trace_id(), e);
return;
},
@@ -649,9 +659,3 @@
client.partial_responses.remove(&stream_id);
}
}
-
-fn hex_dump(buf: &[u8]) -> String {
- let vec: Vec<String> = buf.iter().map(|b| format!("{:02x}", b)).collect();
-
- vec.join("")
-}
diff --git a/examples/qpack-decode.rs b/examples/qpack-decode.rs
index 8468a85..d2aaaa5 100644
--- a/examples/qpack-decode.rs
+++ b/examples/qpack-decode.rs
@@ -77,7 +77,9 @@
}
for hdr in dec.decode(&data[..len], std::u64::MAX).unwrap() {
- println!("{}\t{}", hdr.name(), hdr.value());
+ let name = std::str::from_utf8(hdr.name()).unwrap();
+ let value = std::str::from_utf8(hdr.value()).unwrap();
+ println!("{}\t{}", name, value);
}
println!();
diff --git a/examples/qpack-encode.rs b/examples/qpack-encode.rs
index e381227..5215fe4 100644
--- a/examples/qpack-encode.rs
+++ b/examples/qpack-encode.rs
@@ -83,6 +83,6 @@
let name = line.split('\t').next().unwrap();
let value = line.split('\t').last().unwrap();
- headers.push(h3::Header::new(name, value));
+ headers.push(h3::Header::new(name.as_bytes(), value.as_bytes()));
}
}
diff --git a/examples/server.c b/examples/server.c
index 025e1d5..b6ac1f2 100644
--- a/examples/server.c
+++ b/examples/server.c
@@ -86,8 +86,11 @@
static void flush_egress(struct ev_loop *loop, struct conn_io *conn_io) {
static uint8_t out[MAX_DATAGRAM_SIZE];
+ quiche_send_info send_info;
+
while (1) {
- ssize_t written = quiche_conn_send(conn_io->conn, out, sizeof(out));
+ ssize_t written = quiche_conn_send(conn_io->conn, out, sizeof(out),
+ &send_info);
if (written == QUICHE_ERR_DONE) {
fprintf(stderr, "done writing\n");
@@ -100,8 +103,9 @@
}
ssize_t sent = sendto(conn_io->sock, out, written, 0,
- (struct sockaddr *) &conn_io->peer_addr,
- conn_io->peer_addr_len);
+ (struct sockaddr *) &send_info.to,
+ send_info.to_len);
+
if (sent != written) {
perror("failed to send");
return;
@@ -169,18 +173,28 @@
return cid;
}
-static struct conn_io *create_conn(uint8_t *dcid, size_t dcid_len, uint8_t *odcid,
- size_t odcid_len) {
- struct conn_io *conn_io = malloc(sizeof(*conn_io));
+static struct conn_io *create_conn(uint8_t *scid, size_t scid_len,
+ uint8_t *odcid, size_t odcid_len,
+ struct sockaddr_storage *peer_addr,
+ socklen_t peer_addr_len) {
+ struct conn_io *conn_io = calloc(1, sizeof(*conn_io));
if (conn_io == NULL) {
fprintf(stderr, "failed to allocate connection IO\n");
return NULL;
}
- memcpy(conn_io->cid, dcid, LOCAL_CONN_ID_LEN);
+ if (scid_len != LOCAL_CONN_ID_LEN) {
+ fprintf(stderr, "failed, scid length too short\n");
+ }
+
+ memcpy(conn_io->cid, scid, LOCAL_CONN_ID_LEN);
quiche_conn *conn = quiche_accept(conn_io->cid, LOCAL_CONN_ID_LEN,
- odcid, odcid_len, config);
+ odcid, odcid_len,
+ (struct sockaddr *) peer_addr,
+ peer_addr_len,
+ config);
+
if (conn == NULL) {
fprintf(stderr, "failed to create connection\n");
return NULL;
@@ -189,6 +203,9 @@
conn_io->sock = conns->sock;
conn_io->conn = conn;
+ memcpy(&conn_io->peer_addr, peer_addr, peer_addr_len);
+ conn_io->peer_addr_len = peer_addr_len;
+
ev_init(&conn_io->timer, timeout_cb);
conn_io->timer.data = conn_io;
@@ -318,16 +335,21 @@
continue;
}
- conn_io = create_conn(dcid, dcid_len, odcid, odcid_len);
+ conn_io = create_conn(dcid, dcid_len, odcid, odcid_len,
+ &peer_addr, peer_addr_len);
+
if (conn_io == NULL) {
continue;
}
-
- memcpy(&conn_io->peer_addr, &peer_addr, peer_addr_len);
- conn_io->peer_addr_len = peer_addr_len;
}
- ssize_t done = quiche_conn_recv(conn_io->conn, buf, read);
+ quiche_recv_info recv_info = {
+ (struct sockaddr *) &peer_addr,
+
+ peer_addr_len,
+ };
+
+ ssize_t done = quiche_conn_recv(conn_io->conn, buf, read, &recv_info);
if (done < 0) {
fprintf(stderr, "failed to process packet: %zd\n", done);
@@ -451,10 +473,11 @@
quiche_config_load_priv_key_from_pem_file(config, "./cert.key");
quiche_config_set_application_protos(config,
- (uint8_t *) "\x05hq-29\x05hq-28\x05hq-27\x08http/0.9", 27);
+ (uint8_t *) "\x0ahq-interop\x05hq-29\x05hq-28\x05hq-27\x08http/0.9", 38);
quiche_config_set_max_idle_timeout(config, 5000);
- quiche_config_set_max_udp_payload_size(config, MAX_DATAGRAM_SIZE);
+ quiche_config_set_max_recv_udp_payload_size(config, MAX_DATAGRAM_SIZE);
+ quiche_config_set_max_send_udp_payload_size(config, MAX_DATAGRAM_SIZE);
quiche_config_set_initial_max_data(config, 10000000);
quiche_config_set_initial_max_stream_data_bidi_local(config, 1000000);
quiche_config_set_initial_max_stream_data_bidi_remote(config, 1000000);
diff --git a/examples/server.rs b/examples/server.rs
index 8213d95..a632bbc 100644
--- a/examples/server.rs
+++ b/examples/server.rs
@@ -47,7 +47,7 @@
partial_responses: HashMap<u64, PartialResponse>,
}
-type ClientMap = HashMap<Vec<u8>, (net::SocketAddr, Client)>;
+type ClientMap = HashMap<quiche::ConnectionId<'static>, Client>;
fn main() {
let mut buf = [0; 65535];
@@ -90,11 +90,14 @@
.unwrap();
config
- .set_application_protos(b"\x05hq-29\x05hq-28\x05hq-27\x08http/0.9")
+ .set_application_protos(
+ b"\x0ahq-interop\x05hq-29\x05hq-28\x05hq-27\x08http/0.9",
+ )
.unwrap();
config.set_max_idle_timeout(5000);
- config.set_max_udp_payload_size(MAX_DATAGRAM_SIZE as u64);
+ config.set_max_recv_udp_payload_size(MAX_DATAGRAM_SIZE);
+ config.set_max_send_udp_payload_size(MAX_DATAGRAM_SIZE);
config.set_initial_max_data(10_000_000);
config.set_initial_max_stream_data_bidi_local(1_000_000);
config.set_initial_max_stream_data_bidi_remote(1_000_000);
@@ -114,8 +117,7 @@
// Find the shorter timeout from all the active connections.
//
// TODO: use event loop that properly supports timers
- let timeout =
- clients.values().filter_map(|(_, c)| c.conn.timeout()).min();
+ let timeout = clients.values().filter_map(|c| c.conn.timeout()).min();
poll.poll(&mut events, timeout).unwrap();
@@ -128,12 +130,12 @@
if events.is_empty() {
debug!("timed out");
- clients.values_mut().for_each(|(_, c)| c.conn.on_timeout());
+ clients.values_mut().for_each(|c| c.conn.on_timeout());
break 'read;
}
- let (len, src) = match socket.recv_from(&mut buf) {
+ let (len, from) = match socket.recv_from(&mut buf) {
Ok(v) => v,
Err(e) => {
@@ -169,11 +171,12 @@
let conn_id = ring::hmac::sign(&conn_id_seed, &hdr.dcid);
let conn_id = &conn_id.as_ref()[..quiche::MAX_CONN_ID_LEN];
+ let conn_id = conn_id.to_vec().into();
// Lookup a connection based on the packet's connection ID. If there
// is no connection matching, create a new one.
- let (_, client) = if !clients.contains_key(&hdr.dcid) &&
- !clients.contains_key(conn_id)
+ let client = if !clients.contains_key(&hdr.dcid) &&
+ !clients.contains_key(&conn_id)
{
if hdr.ty != quiche::Type::Initial {
error!("Packet is not Initial");
@@ -189,7 +192,7 @@
let out = &out[..len];
- if let Err(e) = socket.send_to(out, &src) {
+ if let Err(e) = socket.send_to(out, &from) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
break;
@@ -203,6 +206,8 @@
let mut scid = [0; quiche::MAX_CONN_ID_LEN];
scid.copy_from_slice(&conn_id);
+ let scid = quiche::ConnectionId::from_ref(&scid);
+
// Token is always present in Initial packets.
let token = hdr.token.as_ref().unwrap();
@@ -210,7 +215,7 @@
if token.is_empty() {
warn!("Doing stateless retry");
- let new_token = mint_token(&hdr, &src);
+ let new_token = mint_token(&hdr, &from);
let len = quiche::retry(
&hdr.scid,
@@ -224,7 +229,7 @@
let out = &out[..len];
- if let Err(e) = socket.send_to(out, &src) {
+ if let Err(e) = socket.send_to(out, &from) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
break;
@@ -235,11 +240,11 @@
continue 'read;
}
- let odcid = validate_token(&src, token);
+ let odcid = validate_token(&from, token);
// The token was not valid, meaning the retry failed, so
// drop the packet.
- if odcid == None {
+ if odcid.is_none() {
error!("Invalid address validation token");
continue 'read;
}
@@ -249,36 +254,36 @@
continue 'read;
}
- // Reuse the source connection ID we sent in the Retry
- // packet, instead of changing it again.
- scid.copy_from_slice(&hdr.dcid);
+ // Reuse the source connection ID we sent in the Retry packet,
+ // instead of changing it again.
+ let scid = hdr.dcid.clone();
- debug!(
- "New connection: dcid={} scid={}",
- hex_dump(&hdr.dcid),
- hex_dump(&scid)
- );
+ debug!("New connection: dcid={:?} scid={:?}", hdr.dcid, scid);
- let conn = quiche::accept(&scid, odcid, &mut config).unwrap();
+ let conn =
+ quiche::accept(&scid, odcid.as_ref(), from, &mut config)
+ .unwrap();
let client = Client {
conn,
partial_responses: HashMap::new(),
};
- clients.insert(scid.to_vec(), (src, client));
+ clients.insert(scid.clone(), client);
- clients.get_mut(&scid[..]).unwrap()
+ clients.get_mut(&scid).unwrap()
} else {
match clients.get_mut(&hdr.dcid) {
Some(v) => v,
- None => clients.get_mut(conn_id).unwrap(),
+ None => clients.get_mut(&conn_id).unwrap(),
}
};
+ let recv_info = quiche::RecvInfo { from };
+
// Process potentially coalesced packets.
- let read = match client.conn.recv(pkt_buf) {
+ let read = match client.conn.recv(pkt_buf, recv_info) {
Ok(v) => v,
Err(e) => {
@@ -325,9 +330,9 @@
// Generate outgoing QUIC packets for all active connections and send
// them on the UDP socket, until quiche reports that there are no more
// packets to be sent.
- for (peer, client) in clients.values_mut() {
+ for client in clients.values_mut() {
loop {
- let write = match client.conn.send(&mut out) {
+ let (write, send_info) = match client.conn.send(&mut out) {
Ok(v) => v,
Err(quiche::Error::Done) => {
@@ -343,8 +348,7 @@
},
};
- // TODO: coalesce packets.
- if let Err(e) = socket.send_to(&out[..write], &peer) {
+ if let Err(e) = socket.send_to(&out[..write], &send_info.to) {
if e.kind() == std::io::ErrorKind::WouldBlock {
debug!("send() would block");
break;
@@ -358,7 +362,7 @@
}
// Garbage collect closed connections.
- clients.retain(|_, (_, ref mut c)| {
+ clients.retain(|_, ref mut c| {
debug!("Collecting garbage");
if c.conn.is_closed() {
@@ -407,7 +411,7 @@
/// authenticate of the token. *It should not be used in production system*.
fn validate_token<'a>(
src: &net::SocketAddr, token: &'a [u8],
-) -> Option<&'a [u8]> {
+) -> Option<quiche::ConnectionId<'a>> {
if token.len() < 6 {
return None;
}
@@ -427,9 +431,7 @@
return None;
}
- let token = &token[addr.len()..];
-
- Some(&token[..])
+ Some(quiche::ConnectionId::from_ref(&token[addr.len()..]))
}
/// Handles incoming HTTP/0.9 requests.
@@ -497,12 +499,14 @@
let resp = client.partial_responses.get_mut(&stream_id).unwrap();
let body = &resp.body[resp.written..];
- let written = match conn.stream_send(stream_id, &body, true) {
+ let written = match conn.stream_send(stream_id, body, true) {
Ok(v) => v,
Err(quiche::Error::Done) => 0,
Err(e) => {
+ client.partial_responses.remove(&stream_id);
+
error!("{} stream send failed {:?}", conn.trace_id(), e);
return;
},
@@ -514,9 +518,3 @@
client.partial_responses.remove(&stream_id);
}
}
-
-fn hex_dump(buf: &[u8]) -> String {
- let vec: Vec<String> = buf.iter().map(|b| format!("{:02x}", b)).collect();
-
- vec.join("")
-}
diff --git a/extras/nginx/README.md b/extras/nginx/README.md
index 505c028..56da61a 100644
--- a/extras/nginx/README.md
+++ b/extras/nginx/README.md
@@ -88,11 +88,8 @@
# Enable all TLS versions (TLSv1.3 is required for QUIC).
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
- # Request buffering in not currently supported for HTTP/3.
- proxy_request_buffering off;
-
# Add Alt-Svc header to negotiate HTTP/3.
- add_header alt-svc 'h3-29=":443"; ma=86400';
+ add_header alt-svc 'h3=":443"; ma=86400';
}
}
```
@@ -168,3 +165,25 @@
### $http3
"h3" if HTTP/3 was negotiated, or an empty string otherwise.
+
+0-RTT
+-----
+
+To support [0-RTT QUIC connection resumption](https://blog.cloudflare.com/even-faster-connection-establishment-with-quic-0-rtt-resumption/)
+from the client, you will need the following configuration:
+
+```
+http {
+ server {
+ ...
+ ssl_early_data on;
+ ssl_session_ticket_key <file>;
+ ...
+ }
+}
+```
+
+Please see
+[ssl_session_ticket_key](https://nginx.org/en/docs/http/ngx_http_ssl_module.html#ssl_session_ticket_key)
+on how to generate the secret file used for TLS session tickets. This is
+required when using multiple worker processes.
diff --git a/extras/nginx/nginx-1.16.patch b/extras/nginx/nginx-1.16.patch
index 62b54fc..0b6d27f 100644
--- a/extras/nginx/nginx-1.16.patch
+++ b/extras/nginx/nginx-1.16.patch
@@ -1,4 +1,4 @@
-From d736db24826262e2c9887c610818532d3b6e19c1 Mon Sep 17 00:00:00 2001
+From c466e7a968a130199adee030873dfd60d38bc664 Mon Sep 17 00:00:00 2001
From: Alessandro Ghedini <alessandro@cloudflare.com>
Date: Thu, 22 Oct 2020 12:28:02 +0100
Subject: [PATCH] Initial QUIC and HTTP/3 implementation using quiche
@@ -8,13 +8,13 @@
auto/lib/make | 4 +
auto/lib/openssl/make | 12 +-
auto/lib/quiche/conf | 23 +
- auto/lib/quiche/make | 22 +
+ auto/lib/quiche/make | 23 +
auto/make | 3 +-
auto/modules | 44 +
auto/options | 9 +
src/core/ngx_connection.h | 7 +
src/core/ngx_core.h | 3 +
- src/event/ngx_event_quic.c | 575 ++++++
+ src/event/ngx_event_quic.c | 620 +++++++
src/event/ngx_event_quic.h | 49 +
src/event/ngx_event_udp.c | 8 +
src/http/modules/ngx_http_ssl_module.c | 13 +-
@@ -22,16 +22,17 @@
src/http/ngx_http.h | 4 +
src/http/ngx_http_core_module.c | 7 +
src/http/ngx_http_core_module.h | 3 +
- src/http/ngx_http_request.c | 144 +-
+ src/http/ngx_http_request.c | 140 +-
src/http/ngx_http_request.h | 3 +
- src/http/ngx_http_request_body.c | 29 +
+ src/http/ngx_http_request_body.c | 33 +
src/http/ngx_http_upstream.c | 13 +
- src/http/v3/ngx_http_v3.c | 2234 +++++++++++++++++++++++
- src/http/v3/ngx_http_v3.h | 77 +
+ src/http/v3/ngx_http_v3.c | 2231 +++++++++++++++++++++++
+ src/http/v3/ngx_http_v3.h | 79 +
src/http/v3/ngx_http_v3_filter_module.c | 68 +
src/http/v3/ngx_http_v3_module.c | 286 +++
src/http/v3/ngx_http_v3_module.h | 34 +
- 27 files changed, 3700 insertions(+), 11 deletions(-)
+ src/os/unix/ngx_udp_sendmsg_chain.c | 1 +
+ 28 files changed, 3746 insertions(+), 11 deletions(-)
create mode 100644 auto/lib/quiche/conf
create mode 100644 auto/lib/quiche/make
create mode 100644 src/event/ngx_event_quic.c
@@ -117,7 +118,7 @@
+
+ CORE_INCS="$CORE_INCS $QUICHE/include"
+ CORE_DEPS="$CORE_DEPS $QUICHE/target/$QUICHE_BUILD_TARGET/libquiche.a"
-+ CORE_LIBS="$CORE_LIBS $QUICHE/target/$QUICHE_BUILD_TARGET/libquiche.a $NGX_LIBPTHREAD"
++ CORE_LIBS="$CORE_LIBS $QUICHE/target/$QUICHE_BUILD_TARGET/libquiche.a $NGX_LIBPTHREAD -lm"
+
+ if [ "$NGX_SYSTEM" = "Darwin" ]; then
+ CORE_LIBS+=" -framework Security"
@@ -126,20 +127,21 @@
+fi
diff --git a/auto/lib/quiche/make b/auto/lib/quiche/make
new file mode 100644
-index 000000000..1e8f8a9c0
+index 000000000..6e5ede5bc
--- /dev/null
+++ b/auto/lib/quiche/make
-@@ -0,0 +1,22 @@
+@@ -0,0 +1,23 @@
+
+# Copyright (C) Cloudflare, Inc.
+
++QUICHE_COMMON_FLAGS="--verbose --no-default-features --features ffi"
+
+# Default is release build
-+QUICHE_BUILD_FLAGS="--release --no-default-features"
++QUICHE_BUILD_FLAGS="$QUICHE_COMMON_FLAGS --release"
+QUICHE_BUILD_TARGET="release"
+
+if [ $NGX_DEBUG = YES ]; then
-+ QUICHE_BUILD_FLAGS="--no-default-features"
++ QUICHE_BUILD_FLAGS="$QUICHE_COMMON_FLAGS"
+ QUICHE_BUILD_TARGET="debug"
+fi
+
@@ -338,10 +340,10 @@
#include <ngx_module.h>
diff --git a/src/event/ngx_event_quic.c b/src/event/ngx_event_quic.c
new file mode 100644
-index 000000000..d9fc0b57a
+index 000000000..591a809e0
--- /dev/null
+++ b/src/event/ngx_event_quic.c
-@@ -0,0 +1,575 @@
+@@ -0,0 +1,620 @@
+
+/*
+ * Copyright (C) Cloudflare, Inc.
@@ -363,6 +365,8 @@
+static void ngx_quic_read_handler(ngx_event_t *ev);
+static void ngx_quic_write_handler(ngx_event_t *ev);
+
++static void ngx_quic_set_timer(ngx_connection_t *c);
++
+static void ngx_quic_handshake_completed(ngx_connection_t *c);
+
+static void ngx_quic_shutdown_handler(ngx_event_t *ev);
@@ -514,7 +518,8 @@
+ }
+#endif
+
-+ conn = quiche_conn_new_with_tls(scid, sizeof(scid), NULL, 0, quic->config,
++ conn = quiche_conn_new_with_tls(scid, sizeof(scid), NULL, 0,
++ c->sockaddr, c->socklen, quic->config,
+ c->ssl->connection, true);
+ if (conn == NULL) {
+ ngx_log_error(NGX_LOG_ERR, c->log, 0, "failed to create quic connection");
@@ -523,6 +528,7 @@
+
+ qc = ngx_pcalloc(c->pool, sizeof(ngx_quic_connection_t));
+ if (qc == NULL) {
++ quiche_conn_free(conn);
+ return NGX_ERROR;
+ }
+
@@ -543,12 +549,17 @@
+ size_t buf_len;
+ ssize_t done;
+
++ quiche_recv_info recv_info = {
++ c->sockaddr,
++ c->socklen,
++ };
++
+ /* Process the client's Initial packet, which was saved into c->buffer by
+ * ngx_event_recvmsg(). */
+ buf = c->buffer->pos;
+ buf_len = ngx_buf_size(c->buffer);
+
-+ done = quiche_conn_recv(c->quic->conn, buf, buf_len);
++ done = quiche_conn_recv(c->quic->conn, buf, buf_len, &recv_info);
+
+ if ((done < 0) && (done != QUICHE_ERR_DONE)) {
+ ngx_log_debug1(NGX_LOG_DEBUG_EVENT, c->log, 0,
@@ -582,7 +593,10 @@
+ ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0,
+ "quic connection timed out");
+
-+ ngx_quic_finalize_connection(c, NGX_QUIC_INTERNAL_ERROR);
++ if (c->quic->handler != NULL) {
++ c->quic->handler(c);
++ }
++
+ return;
+ }
+
@@ -597,7 +611,12 @@
+ return;
+ }
+
-+ ssize_t done = quiche_conn_recv(c->quic->conn, buf, n);
++ quiche_recv_info recv_info = {
++ c->sockaddr,
++ c->socklen,
++ };
++
++ ssize_t done = quiche_conn_recv(c->quic->conn, buf, n, &recv_info);
+
+ if (done == QUICHE_ERR_DONE) {
+ break;
@@ -636,7 +655,7 @@
+ngx_quic_write_handler(ngx_event_t *wev)
+{
+ ngx_connection_t *c;
-+ uint64_t expiry;
++ quiche_send_info send_info;
+ static uint8_t out[MAX_DATAGRAM_SIZE];
+
+ c = wev->data;
@@ -660,7 +679,8 @@
+ }
+
+ for (;;) {
-+ ssize_t written = quiche_conn_send(c->quic->conn, out, sizeof(out));
++ ssize_t written = quiche_conn_send(c->quic->conn, out, sizeof(out),
++ &send_info);
+
+ if (written == QUICHE_ERR_DONE) {
+ ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic done writing");
@@ -675,7 +695,13 @@
+ return;
+ }
+
-+ if (ngx_quic_send_udp_packet(c, out, written) == NGX_ERROR) {
++ int rc = ngx_quic_send_udp_packet(c, out, written);
++
++ if (rc == NGX_AGAIN) {
++ break;
++ }
++
++ if (rc == NGX_ERROR) {
+ ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0,
+ "failed to send quic packet");
+
@@ -684,6 +710,18 @@
+ }
+ }
+
++ ngx_quic_set_timer(c);
++}
++
++
++static void
++ngx_quic_set_timer(ngx_connection_t *c)
++{
++ uint64_t expiry;
++ ngx_event_t *wev;
++
++ wev = c->write;
++
+ expiry = quiche_conn_timeout_as_millis(c->quic->conn);
+ expiry = ngx_max(expiry, 1);
+
@@ -762,36 +800,42 @@
+ngx_int_t
+ngx_quic_shutdown(ngx_connection_t *c)
+{
-+ if (!quiche_conn_is_closed(c->quic->conn)) {
-+ /* We shouldn't free the connection state yet, as we need to wait for
-+ * the draining timeout to expire. Setup event handlers such that we
-+ * will try again when that happens (or when another event is
-+ * triggered). */
-+ c->read->handler = ngx_quic_shutdown_handler;
-+ c->write->handler = ngx_quic_shutdown_handler;
++ ssize_t written;
++ quiche_send_info send_info;
++ static uint8_t out[MAX_DATAGRAM_SIZE];
+
-+ /* We need to flush any remaining frames to the client (including
-+ * CONNECTION_CLOSE), so invoke the write handler. This also takes
-+ * care of setting up the draining timer. */
-+ ngx_quic_write_handler(c->write);
++ /* Connection is closed, free memory. */
++ if (quiche_conn_is_closed(c->quic->conn)) {
++ ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, "free quic connection");
+
-+ /* The QUIC connection might have already been freed inside the write
-+ * handler, in which case we are done. */
-+ if (c->destroyed) {
-+ return NGX_OK;
-+ }
++ quiche_conn_free(c->quic->conn);
+
-+ return NGX_AGAIN;
++ c->quic = NULL;
++ c->ssl = NULL;
++
++ return NGX_OK;
+ }
+
-+ ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, "free quic connection");
++ /* We can't free the connection state yet, as we need to wait for the
++ * draining timeout to expire.
++ *
++ * Setup event handlers such that we will try again when that happens (or
++ * when another event is triggered). */
++ c->read->handler = ngx_quic_shutdown_handler;
++ c->write->handler = ngx_quic_shutdown_handler;
+
-+ quiche_conn_free(c->quic->conn);
++ /* Try sending a packet in order to flush pending frames (CONNECTION_CLOSE
++ * for example), but ignore errors as we are already closing the connection
++ * anyway. */
++ written = quiche_conn_send(c->quic->conn, out, sizeof(out), &send_info);
+
-+ c->quic = NULL;
-+ c->ssl = NULL;
++ if (written > 0) {
++ ngx_quic_send_udp_packet(c, out, written);
++ }
+
-+ return NGX_OK;
++ ngx_quic_set_timer(c);
++
++ return NGX_AGAIN;
+}
+
+
@@ -801,6 +845,8 @@
+ ngx_connection_t *c;
+ ngx_connection_handler_pt handler;
+
++ ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, 0, "quic shutdown handler");
++
+ c = ev->data;
+ handler = c->quic->handler;
+
@@ -808,12 +854,8 @@
+ ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0, "quic alarm fired");
+
+ quiche_conn_on_timeout(c->quic->conn);
-+
-+ ev->timedout = 0;
+ }
+
-+ ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, 0, "quic shutdown handler");
-+
+ if (ngx_quic_shutdown(c) == NGX_AGAIN) {
+ return;
+ }
@@ -860,10 +902,6 @@
+ c->quic->handler = ngx_quic_close_connection;
+ return;
+ }
-+
-+ if (c->destroyed) {
-+ return;
-+ }
+ }
+
+#if (NGX_STAT_STUB)
@@ -892,8 +930,9 @@
+static ngx_int_t
+ngx_quic_send_udp_packet(ngx_connection_t *c, uint8_t *buf, size_t len)
+{
-+ ngx_buf_t out_buf = {0};
-+ ngx_chain_t out_chain = {0};
++ ngx_buf_t out_buf = {0};
++ ngx_chain_t out_chain = {0};
++ ngx_chain_t *cl;
+
+ /* The send_chain() API takes an ngx_chain_t parameter instead of a simple
+ * buffer, so we need to initialize the chain such that it contains only a
@@ -911,7 +950,15 @@
+ out_chain.buf = &out_buf;
+ out_chain.next = NULL;
+
-+ if (c->send_chain(c, &out_chain, 0) == NGX_CHAIN_ERROR) {
++ c->write->ready = 1;
++
++ cl = c->send_chain(c, &out_chain, 0);
++
++ if (cl != NULL) {
++ return NGX_AGAIN;
++ }
++
++ if (cl == NGX_CHAIN_ERROR) {
+ return NGX_ERROR;
+ }
+
@@ -1211,7 +1258,7 @@
diff --git a/src/http/ngx_http_request.c b/src/http/ngx_http_request.c
-index 80c19656f..313e5a88a 100644
+index 80c19656f..a4f396753 100644
--- a/src/http/ngx_http_request.c
+++ b/src/http/ngx_http_request.c
@@ -64,6 +64,10 @@ static void ngx_http_ssl_handshake(ngx_event_t *rev);
@@ -1412,7 +1459,7 @@
ngx_http_close_connection(c);
}
-@@ -3684,6 +3809,21 @@ ngx_http_close_connection(ngx_connection_t *c)
+@@ -3684,6 +3809,17 @@ ngx_http_close_connection(ngx_connection_t *c)
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0,
"close http connection: %d", c->fd);
@@ -1423,10 +1470,6 @@
+ c->quic->handler = ngx_http_close_connection;
+ return;
+ }
-+
-+ if (c->destroyed) {
-+ return;
-+ }
+ }
+
+#endif
@@ -1463,49 +1506,46 @@
ngx_http_log_handler_pt log_handler;
diff --git a/src/http/ngx_http_request_body.c b/src/http/ngx_http_request_body.c
-index c4f092e59..2f8514418 100644
+index c4f092e59..220cd142f 100644
--- a/src/http/ngx_http_request_body.c
+++ b/src/http/ngx_http_request_body.c
-@@ -85,6 +85,13 @@ ngx_http_read_client_request_body(ngx_http_request_t *r,
- }
- #endif
+@@ -312,6 +312,12 @@ ngx_http_do_read_client_request_body(ngx_http_request_t *r)
+ ngx_del_timer(c->read);
+ }
+#if (NGX_HTTP_V3)
-+ if (r->qstream) {
-+ rc = ngx_http_v3_read_request_body(r);
-+ goto done;
-+ }
++ if (r->qstream) {
++ return NGX_AGAIN;
++ }
+#endif
+
- preread = r->header_in->last - r->header_in->pos;
-
- if (preread) {
-@@ -226,6 +233,18 @@ ngx_http_read_unbuffered_request_body(ngx_http_request_t *r)
- }
- #endif
+ if (ngx_handle_read_event(c->read, 0) != NGX_OK) {
+ return NGX_HTTP_INTERNAL_SERVER_ERROR;
+ }
+@@ -404,6 +410,12 @@ ngx_http_do_read_client_request_body(ngx_http_request_t *r)
+ clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);
+ ngx_add_timer(c->read, clcf->client_body_timeout);
+#if (NGX_HTTP_V3)
-+ if (r->qstream) {
-+ rc = ngx_http_v3_read_unbuffered_request_body(r);
-+
-+ if (rc == NGX_OK) {
-+ r->reading_body = 0;
-+ }
-+
-+ return rc;
-+ }
++ if (r->qstream) {
++ return NGX_AGAIN;
++ }
+#endif
+
- if (r->connection->read->timedout) {
- r->connection->timedout = 1;
- return NGX_HTTP_REQUEST_TIME_OUT;
-@@ -525,6 +544,13 @@ ngx_http_discard_request_body(ngx_http_request_t *r)
+ if (ngx_handle_read_event(c->read, 0) != NGX_OK) {
+ return NGX_HTTP_INTERNAL_SERVER_ERROR;
+ }
+@@ -525,6 +537,17 @@ ngx_http_discard_request_body(ngx_http_request_t *r)
}
#endif
+#if (NGX_HTTP_V3)
+ if (r->qstream) {
+ r->qstream->skip_data = 1;
++
++ /* disable stream read to avoid pointless data events */
++ ngx_http_v3_stop_stream_read(r->qstream, 0);
++
+ return NGX_OK;
+ }
+#endif
@@ -1513,7 +1553,7 @@
if (ngx_http_test_expect(r) != NGX_OK) {
return NGX_HTTP_INTERNAL_SERVER_ERROR;
}
-@@ -808,6 +834,9 @@ ngx_http_test_expect(ngx_http_request_t *r)
+@@ -808,6 +831,9 @@ ngx_http_test_expect(ngx_http_request_t *r)
|| r->http_version < NGX_HTTP_VERSION_11
#if (NGX_HTTP_V2)
|| r->stream != NULL
@@ -1523,6 +1563,20 @@
#endif
)
{
+@@ -848,6 +874,13 @@ ngx_http_test_expect(ngx_http_request_t *r)
+ static ngx_int_t
+ ngx_http_request_body_filter(ngx_http_request_t *r, ngx_chain_t *in)
+ {
++
++#if (NGX_HTTP_V3)
++ if (r->qstream) {
++ return ngx_http_v3_request_body_filter(r, in);
++ }
++#endif
++
+ if (r->headers_in.chunked) {
+ return ngx_http_request_body_chunked_filter(r, in);
+
diff --git a/src/http/ngx_http_upstream.c b/src/http/ngx_http_upstream.c
index a7391d09a..398af2797 100644
--- a/src/http/ngx_http_upstream.c
@@ -1556,10 +1610,10 @@
if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) {
diff --git a/src/http/v3/ngx_http_v3.c b/src/http/v3/ngx_http_v3.c
new file mode 100644
-index 000000000..b14af1570
+index 000000000..1a05d4e01
--- /dev/null
+++ b/src/http/v3/ngx_http_v3.c
-@@ -0,0 +1,2234 @@
+@@ -0,0 +1,2231 @@
+
+/*
+ * Copyright (C) Cloudflare, Inc.
@@ -1581,12 +1635,17 @@
+
+
+/* errors */
-+#define NGX_HTTP_V3_NO_ERROR 0x0
-+#define NGX_HTTP_V3_INTERNAL_ERROR 0x3
++#define NGX_HTTP_V3_NO_ERROR 0x0100
++#define NGX_HTTP_V3_PROTOCOL_ERROR 0x0101
++#define NGX_HTTP_V3_INTERNAL_ERROR 0x0102
+
+
+static void ngx_http_v3_handler(ngx_connection_t *c);
+
++static void ngx_http_v3_idle_handler(ngx_connection_t *c);
++
++static void ngx_http_v3_handle_connection(ngx_http_v3_connection_t *h3c);
++
+static ngx_http_v3_stream_t *ngx_http_v3_stream_lookup(
+ ngx_http_v3_connection_t *h3c, ngx_uint_t stream_id);
+static ngx_http_v3_stream_t *ngx_http_v3_create_stream(
@@ -1613,11 +1672,9 @@
+static ngx_int_t ngx_http_v3_construct_request_line(ngx_http_request_t *r);
+
+static void ngx_http_v3_run_request(ngx_http_request_t *r);
-+static ngx_int_t ngx_http_v3_process_request_body(ngx_http_request_t *r,
-+ ngx_uint_t do_read, ngx_uint_t last);
-+static ngx_int_t ngx_http_v3_filter_request_body(ngx_http_request_t *r);
-+static void ngx_http_v3_read_client_request_body_handler(ngx_http_request_t *r);
+
++static ssize_t ngx_http_v3_recv_body(ngx_connection_t *c, u_char *buf,
++ size_t size);
+static ngx_chain_t *ngx_http_v3_send_chain(ngx_connection_t *fc,
+ ngx_chain_t *in, off_t limit);
+
@@ -1865,53 +1922,6 @@
+}
+
+
-+static ngx_int_t
-+ngx_http_v3_process_data(ngx_connection_t *c, int64_t stream_id)
-+{
-+ int rc;
-+ ngx_http_request_t *r;
-+ ngx_http_v3_stream_t *stream;
-+ ngx_http_v3_connection_t *h3c;
-+
-+ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 process data");
-+
-+ h3c = c->data;
-+
-+ stream = ngx_http_v3_stream_lookup(h3c, stream_id);
-+
-+ if (stream == NULL) {
-+
-+ return NGX_OK;
-+ }
-+
-+ if (stream->skip_data) {
-+ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
-+ "skipping http3 DATA frame");
-+
-+ return NGX_OK;
-+ }
-+
-+ r = stream->request;
-+
-+ if (!r->request_body) {
-+ return NGX_AGAIN;
-+ }
-+
-+ rc = ngx_http_v3_process_request_body(r, 1, stream->in_closed);
-+
-+ if (rc == NGX_AGAIN) {
-+ return NGX_AGAIN;
-+ }
-+
-+ if (rc != NGX_OK) {
-+ stream->skip_data = 1;
-+ ngx_http_finalize_request(r, rc);
-+ }
-+
-+ return NGX_OK;
-+}
-+
-+
+static void
+ngx_http_v3_process_blocked_streams(ngx_http_v3_connection_t *h3c)
+{
@@ -1959,6 +1969,9 @@
+static void
+ngx_http_v3_handler(ngx_connection_t *c)
+{
++ ngx_chain_t out;
++ ngx_connection_t *fc;
++ ngx_http_request_t *r;
+ ngx_http_v3_connection_t *h3c;
+ ngx_http_v3_stream_t *stream;
+
@@ -1966,8 +1979,13 @@
+
+ h3c = c->data;
+
++ if (c->read->timedout) {
++ ngx_http_v3_finalize_connection(h3c, NGX_HTTP_V3_PROTOCOL_ERROR);
++ return;
++ }
++
+ if (c->error) {
-+ ngx_http_v3_finalize_connection(h3c, 0);
++ ngx_http_v3_finalize_connection(h3c, NGX_HTTP_V3_INTERNAL_ERROR);
+ return;
+ }
+
@@ -1977,10 +1995,15 @@
+ quiche_h3_event *ev;
+
+ int64_t stream_id = quiche_h3_conn_poll(h3c->h3, c->quic->conn, &ev);
-+ if (stream_id < 0) {
++ if (stream_id == QUICHE_H3_ERR_DONE) {
+ break;
+ }
+
++ if (stream_id < 0) {
++ ngx_http_v3_finalize_connection(h3c, NGX_HTTP_V3_PROTOCOL_ERROR);
++ return;
++ }
++
+ ngx_log_debug2(NGX_LOG_DEBUG_HTTP, h3c->connection->log, 0,
+ "http3 event stream:%ui ev:%ui", stream_id,
+ quiche_h3_event_type(ev));
@@ -1992,9 +2015,16 @@
+ }
+
+ case QUICHE_H3_EVENT_DATA: {
-+ if (ngx_http_v3_process_data(c, stream_id) == NGX_AGAIN) {
-+ quiche_h3_event_free(ev);
-+ return;
++ /* Lookup stream. If there isn't one, it means it has already
++ * been closed, so ignore the event. */
++ stream = ngx_http_v3_stream_lookup(h3c, stream_id);
++
++ if (stream != NULL && !stream->in_closed) {
++ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
++ "http3 data");
++
++ ngx_post_event(stream->request->connection->read,
++ &ngx_posted_events);
+ }
+
+ break;
@@ -2009,12 +2039,41 @@
+ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
+ "http3 finished");
+
-+ stream->in_closed = 1;
-+
+ /* Flush request body that was buffered. */
+ if (stream->request->request_body) {
-+ ngx_http_v3_process_request_body(stream->request, 0, 1);
++ out.buf = stream->request->request_body->buf;
++ out.next = NULL;
++
++ ngx_http_v3_request_body_filter(stream->request, &out);
++
++ ngx_post_event(stream->request->connection->read,
++ &ngx_posted_events);
+ }
++
++ stream->in_closed = 1;
++ }
++
++ break;
++ }
++
++ case QUICHE_H3_EVENT_RESET: {
++ /* Lookup stream. If there isn't one, it means it has already
++ * been closed, so ignore the event. */
++ stream = ngx_http_v3_stream_lookup(h3c, stream_id);
++
++ if (stream != NULL && !stream->in_closed) {
++ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0,
++ "http3 reset");
++
++ r = stream->request;
++ fc = r->connection;
++
++ fc->error = 1;
++
++ ngx_post_event(stream->request->connection->read,
++ &ngx_posted_events);
++
++ stream->in_closed = 1;
+ }
+
+ break;
@@ -2029,6 +2088,68 @@
+
+ quiche_h3_event_free(ev);
+ }
++
++ ngx_http_v3_handle_connection(h3c);
++}
++
++
++static void
++ngx_http_v3_idle_handler(ngx_connection_t *c)
++{
++ ngx_http_v3_connection_t *h3c;
++
++ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 idle handler");
++
++ h3c = c->data;
++
++ if (c->read->timedout) {
++ ngx_http_v3_finalize_connection(h3c, NGX_HTTP_V3_NO_ERROR);
++ return;
++ }
++
++ if (c->error) {
++ ngx_http_v3_finalize_connection(h3c, NGX_HTTP_V3_INTERNAL_ERROR);
++ return;
++ }
++
++ if (!quiche_conn_is_readable(c->quic->conn)) {
++ return;
++ }
++
++ if (c->read->timer_set) {
++ ngx_del_timer(c->read);
++ }
++
++ c->quic->handler = ngx_http_v3_handler;
++
++ ngx_http_v3_handler(c);
++}
++
++
++static void
++ngx_http_v3_handle_connection(ngx_http_v3_connection_t *h3c)
++{
++ ngx_connection_t *c;
++ ngx_http_v3_srv_conf_t *h3scf;
++
++ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, h3c->connection->log, 0,
++ "http3 handle connection");
++
++ c = h3c->connection;
++
++ if (h3c->processing || c->error) {
++ return;
++ }
++
++ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, h3c->connection->log, 0,
++ "http3 connection is idle");
++
++ h3scf = ngx_http_get_module_srv_conf(h3c->http_connection->conf_ctx,
++ ngx_http_v3_module);
++
++ c->quic->handler = ngx_http_v3_idle_handler;
++
++ ngx_add_timer(c->read, h3scf->idle_timeout);
+}
+
+
@@ -2115,6 +2236,8 @@
+ fc->sndlowat = 1;
+ fc->tcp_nodelay = NGX_TCP_NODELAY_DISABLED;
+
++ fc->recv = ngx_http_v3_recv_body;
++
+ fc->send_chain = ngx_http_v3_send_chain;
+ fc->need_last_buf = 1;
+
@@ -2748,390 +2871,188 @@
+}
+
+
++/* End of functions copied from HTTP/2 module. */
++
++
+ngx_int_t
-+ngx_http_v3_read_request_body(ngx_http_request_t *r)
++ngx_http_v3_request_body_filter(ngx_http_request_t *r, ngx_chain_t *in)
+{
-+ off_t len;
-+ ngx_http_v3_stream_t *stream;
++ size_t size;
++ ngx_int_t rc;
++ ngx_buf_t *b;
++ ngx_chain_t *cl, *tl, *out, **ll;
++ ngx_connection_t *c;
+ ngx_http_request_body_t *rb;
+ ngx_http_core_loc_conf_t *clcf;
+
-+ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
-+ "http3 read request body");
++ c = r->qstream->connection->connection;
+
-+ stream = r->qstream;
+ rb = r->request_body;
+
-+ if (stream->skip_data) {
-+ r->request_body_no_buffering = 0;
-+ rb->post_handler(r);
-+ return NGX_OK;
-+ }
-+
+ clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);
+
-+ len = r->headers_in.content_length_n;
-+
-+ if (r->request_body_no_buffering && !stream->in_closed) {
-+
-+ if (len < 0 || len > (off_t) clcf->client_body_buffer_size) {
-+ len = clcf->client_body_buffer_size;
-+ }
-+
-+ rb->buf = ngx_create_temp_buf(r->pool, (size_t) len);
-+
-+ } else if (len >= 0 && len <= (off_t) clcf->client_body_buffer_size
-+ && !r->request_body_in_file_only)
-+ {
-+ rb->buf = ngx_create_temp_buf(r->pool, (size_t) len);
-+
-+ } else {
-+ rb->buf = ngx_calloc_buf(r->pool);
-+
-+ if (rb->buf != NULL) {
-+ rb->buf->sync = 1;
-+ }
-+ }
-+
-+ if (rb->buf == NULL) {
-+ stream->skip_data = 1;
-+ return NGX_HTTP_INTERNAL_SERVER_ERROR;
-+ }
-+
-+ rb->rest = 1;
-+
-+ if (stream->in_closed) {
-+ r->request_body_no_buffering = 0;
-+
-+ return ngx_http_v3_process_request_body(r, 0, 1);
-+ }
-+
-+ /* TODO: set timer */
-+ ngx_add_timer(r->connection->read, clcf->client_body_timeout);
-+
-+ r->read_event_handler = ngx_http_v3_read_client_request_body_handler;
-+ r->write_event_handler = ngx_http_request_empty_handler;
-+
-+ return NGX_AGAIN;
-+}
-+
-+
-+static ngx_int_t
-+ngx_http_v3_process_request_body(ngx_http_request_t *r, ngx_uint_t do_read,
-+ ngx_uint_t last)
-+{
-+ ssize_t len = 0;
-+ ngx_buf_t *buf;
-+ ngx_int_t rc;
-+ ngx_connection_t *c, *fc;
-+ ngx_http_v3_connection_t *h3c;
-+ ngx_http_request_body_t *rb;
-+ ngx_http_core_loc_conf_t *clcf;
-+
-+ fc = r->connection;
-+ h3c = r->qstream->connection;
-+ c = h3c->connection;
-+
-+ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, c->log, 0, "http3 process request body");
-+
-+ rb = r->request_body;
-+ buf = rb->buf;
-+
-+ if (buf->sync) {
-+ buf->pos = buf->start;
-+ buf->last = buf->start;
-+
-+ r->request_body_in_file_only = 1;
-+ }
-+
-+ if (do_read) {
-+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
-+ "http3 reading %z bytes of request body",
-+ buf->end - buf->last);
-+
-+ if (buf->last == buf->end) {
-+ return NGX_AGAIN;
-+ }
-+
-+ len = quiche_h3_recv_body(h3c->h3, c->quic->conn, r->qstream->id,
-+ buf->last, buf->end - buf->last);
-+
-+ if (len == QUICHE_H3_ERR_DONE) {
-+ return NGX_AGAIN;
-+ }
-+
-+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
-+ "http3 read %z bytes of request body", len);
-+
-+ buf->last += len;
-+ }
-+
-+ if (last) {
-+ rb->rest = 0;
-+
-+ if (fc->read->timer_set) {
-+ ngx_del_timer(fc->read);
-+ }
-+
-+ if (r->request_body_no_buffering) {
-+ ngx_post_event(fc->read, &ngx_posted_events);
-+ return NGX_OK;
-+ }
-+
-+ rc = ngx_http_v3_filter_request_body(r);
-+
-+ if (rc != NGX_OK) {
-+ return rc;
-+ }
-+
-+ if (buf->sync) {
-+ /* prevent reusing this buffer in the upstream module */
-+ rb->buf = NULL;
-+ }
++ if (rb->rest == -1) {
++ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
++ "http3 request body filter");
+
+ if (r->headers_in.chunked) {
-+ r->headers_in.content_length_n = rb->received;
-+ }
-+
-+ r->read_event_handler = ngx_http_block_reading;
-+ rb->post_handler(r);
-+
-+ return NGX_OK;
-+ }
-+
-+ if (len == 0) {
-+ return NGX_OK;
-+ }
-+
-+ clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);
-+ ngx_add_timer(fc->read, clcf->client_body_timeout);
-+
-+ if (r->request_body_no_buffering) {
-+ ngx_post_event(fc->read, &ngx_posted_events);
-+ return NGX_AGAIN;
-+ }
-+
-+ if (buf->sync) {
-+ return ngx_http_v3_filter_request_body(r);
-+ }
-+
-+ return NGX_OK;
-+}
-+
-+
-+static ngx_int_t
-+ngx_http_v3_filter_request_body(ngx_http_request_t *r)
-+{
-+ ngx_buf_t *b, *buf;
-+ ngx_int_t rc;
-+ ngx_chain_t *cl;
-+ ngx_http_request_body_t *rb;
-+ ngx_http_core_loc_conf_t *clcf;
-+
-+ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
-+ "http3 filter request body");
-+
-+ rb = r->request_body;
-+ buf = rb->buf;
-+
-+ if (buf->pos == buf->last && rb->rest) {
-+ cl = NULL;
-+ goto update;
-+ }
-+
-+ cl = ngx_chain_get_free_buf(r->pool, &rb->free);
-+ if (cl == NULL) {
-+ return NGX_HTTP_INTERNAL_SERVER_ERROR;
-+ }
-+
-+ b = cl->buf;
-+
-+ ngx_memzero(b, sizeof(ngx_buf_t));
-+
-+ if (buf->pos != buf->last) {
-+ r->request_length += buf->last - buf->pos;
-+ rb->received += buf->last - buf->pos;
-+
-+ if (r->headers_in.content_length_n != -1) {
-+ if (rb->received > r->headers_in.content_length_n) {
-+ ngx_log_error(NGX_LOG_INFO, r->connection->log, 0,
-+ "client intended to send body data "
-+ "larger than declared");
-+
-+ return NGX_HTTP_BAD_REQUEST;
-+ }
-+
++ rb->rest = clcf->client_body_buffer_size;
++ r->headers_in.content_length_n = 0;
+ } else {
-+ clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);
-+
-+ if (clcf->client_max_body_size
-+ && rb->received > clcf->client_max_body_size)
-+ {
-+ ngx_log_error(NGX_LOG_ERR, r->connection->log, 0,
-+ "client intended to send too large chunked body: "
-+ "%O bytes", rb->received);
-+
-+ return NGX_HTTP_REQUEST_ENTITY_TOO_LARGE;
-+ }
++ rb->rest = r->headers_in.content_length_n;
+ }
++ }
++
++ out = NULL;
++ ll = &out;
++
++ for (cl = in; cl; cl = cl->next) {
++
++ if (rb->rest == 0) {
++ break;
++ }
++
++ if (ngx_buf_size(cl->buf) == 0) {
++ continue;
++ }
++
++ tl = ngx_chain_get_free_buf(r->pool, &rb->free);
++ if (tl == NULL) {
++ return NGX_HTTP_INTERNAL_SERVER_ERROR;
++ }
++
++ b = tl->buf;
++
++ ngx_memzero(b, sizeof(ngx_buf_t));
+
+ b->temporary = 1;
-+ b->pos = buf->pos;
-+ b->last = buf->last;
-+ b->start = b->pos;
-+ b->end = b->last;
++ b->tag = (ngx_buf_tag_t) &ngx_http_read_client_request_body;
++ b->start = cl->buf->pos;
++ b->pos = cl->buf->pos;
++ b->last = cl->buf->last;
++ b->end = cl->buf->end;
++ b->flush = r->request_body_no_buffering;
+
-+ buf->pos = buf->last;
-+ }
++ size = cl->buf->last - cl->buf->pos;
+
-+ if (!rb->rest) {
-+ if (r->headers_in.content_length_n != -1
-+ && r->headers_in.content_length_n != rb->received)
-+ {
-+ ngx_log_error(NGX_LOG_INFO, r->connection->log, 0,
-+ "client prematurely closed stream: "
-+ "only %O out of %O bytes of request body received",
-+ rb->received, r->headers_in.content_length_n);
++ cl->buf->pos = cl->buf->last;
+
-+ return NGX_HTTP_BAD_REQUEST;
++ if (r->headers_in.chunked) {
++ r->headers_in.content_length_n += size;
+ }
+
-+ b->last_buf = 1;
++ if (quiche_conn_stream_finished(c->quic->conn, r->qstream->id)) {
++ rb->rest = 0;
++ b->last = cl->buf->pos;
++ b->last_buf = 1;
++ }
++
++ *ll = tl;
++ ll = &tl->next;
+ }
+
-+ b->tag = (ngx_buf_tag_t) &ngx_http_v3_filter_request_body;
-+ b->flush = r->request_body_no_buffering;
++ rc = ngx_http_top_request_body_filter(r, out);
+
-+update:
-+
-+ rc = ngx_http_top_request_body_filter(r, cl);
-+
-+ ngx_chain_update_chains(r->pool, &rb->free, &rb->busy, &cl,
-+ (ngx_buf_tag_t) &ngx_http_v3_filter_request_body);
++ ngx_chain_update_chains(r->pool, &rb->free, &rb->busy, &out,
++ (ngx_buf_tag_t) &ngx_http_read_client_request_body);
+
+ return rc;
+}
+
+
-+static void
-+ngx_http_v3_read_client_request_body_handler(ngx_http_request_t *r)
++size_t
++ngx_http_v3_get_headers_out_count(ngx_http_request_t *r)
+{
-+ ngx_connection_t *fc;
++ size_t headers_count;
++ ngx_uint_t i;
++ ngx_list_part_t *part;
++ ngx_table_elt_t *header;
+
-+ fc = r->connection;
++ headers_count = 1; /* :status */
+
-+ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, fc->log, 0,
-+ "http3 read client request body handler");
-+
-+ if (fc->read->timedout) {
-+ ngx_log_error(NGX_LOG_INFO, fc->log, NGX_ETIMEDOUT, "client timed out");
-+
-+ fc->timedout = 1;
-+ r->qstream->skip_data = 1;
-+
-+ ngx_http_finalize_request(r, NGX_HTTP_REQUEST_TIME_OUT);
-+ return;
++ if (r->headers_out.server == NULL) {
++ headers_count += 1;
+ }
+
-+ if (fc->error) {
-+ ngx_log_error(NGX_LOG_INFO, fc->log, 0,
-+ "client prematurely closed stream");
-+
-+ r->qstream->skip_data = 1;
-+
-+ ngx_http_finalize_request(r, NGX_HTTP_CLIENT_CLOSED_REQUEST);
-+ return;
++ if (r->headers_out.date == NULL) {
++ headers_count += 1;
+ }
++
++ if (r->headers_out.content_type.len) {
++ headers_count += 1;
++ }
++
++ if (r->headers_out.content_length == NULL
++ && r->headers_out.content_length_n >= 0)
++ {
++ headers_count += 1;
++ }
++
++ if (r->headers_out.last_modified == NULL
++ && r->headers_out.last_modified_time != -1)
++ {
++ headers_count += 1;
++ }
++
++ if (r->headers_out.location && r->headers_out.location->value.len) {
++ headers_count += 1;
++ }
++
++#if (NGX_HTTP_GZIP)
++ if (r->gzip_vary) {
++ headers_count += 1;
++ }
++#endif
++
++ part = &r->headers_out.headers.part;
++ header = part->elts;
++
++ for (i = 0; /* void */; i++) {
++
++ if (i >= part->nelts) {
++ if (part->next == NULL) {
++ break;
++ }
++
++ part = part->next;
++ header = part->elts;
++ i = 0;
++ }
++
++ if (header[i].hash == 0) {
++ continue;
++ }
++
++ headers_count += 1;
++ }
++
++ return headers_count;
+}
+
+
+ngx_int_t
-+ngx_http_v3_read_unbuffered_request_body(ngx_http_request_t *r)
++ngx_http_v3_push_response_headers(ngx_http_request_t *r)
+{
-+ ngx_buf_t *buf;
-+ ngx_int_t rc;
++ u_char *tmp;
++ size_t len, headers_count;
++ ngx_str_t host, location;
++ ngx_uint_t i, port;
++ ngx_list_part_t *part;
++ ngx_table_elt_t *header;
+ ngx_connection_t *fc;
-+ ngx_http_v3_stream_t *stream;
++ quiche_h3_header *h;
++ ngx_http_core_loc_conf_t *clcf;
++ ngx_http_core_srv_conf_t *cscf;
++ u_char addr[NGX_SOCKADDR_STRLEN];
+
-+ stream = r->qstream;
-+ fc = r->connection;
-+
-+ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, fc->log, 0,
-+ "http3 read unbuffered request body");
-+
-+ if (fc->read->timedout) {
-+ stream->skip_data = 1;
-+ fc->timedout = 1;
-+
-+ return NGX_HTTP_REQUEST_TIME_OUT;
-+ }
-+
-+ if (fc->error) {
-+ stream->skip_data = 1;
-+ return NGX_HTTP_BAD_REQUEST;
-+ }
-+
-+ rc = ngx_http_v3_filter_request_body(r);
-+
-+ if (rc != NGX_OK) {
-+ stream->skip_data = 1;
-+ return rc;
-+ }
-+
-+ if (!r->request_body->rest) {
++ /* The list of response headers was already generated, so there's nothing
++ * more to do here. */
++ if (r->qstream->headers != NULL) {
+ return NGX_OK;
+ }
+
-+ if (r->request_body->busy != NULL) {
-+ return NGX_AGAIN;
-+ }
-+
-+ buf = r->request_body->buf;
-+
-+ buf->pos = buf->start;
-+ buf->last = buf->start;
-+
-+ ngx_post_event(stream->connection->connection->read, &ngx_posted_events);
-+
-+ return NGX_AGAIN;
-+}
-+
-+
-+/* End of functions copied from HTTP/2 module. */
-+
-+
-+ngx_int_t
-+ngx_http_v3_send_response(ngx_http_request_t *r)
-+{
-+ int rc;
-+ u_char *tmp;
-+ u_char status[3], content_len[NGX_OFF_T_LEN],
-+ last_modified[sizeof("Wed, 31 Dec 1986 18:00:00 GMT") - 1],
-+ addr[NGX_SOCKADDR_STRLEN];
-+ size_t len;
-+ ngx_array_t *headers;
-+ ngx_str_t host, location;
-+ ngx_uint_t i, port, fin;
-+ ngx_list_part_t *part;
-+ ngx_table_elt_t *header;
-+ ngx_connection_t *c, *fc;
-+ quiche_h3_header *h;
-+ ngx_http_v3_connection_t *h3c;
-+ ngx_http_core_loc_conf_t *clcf;
-+ ngx_http_core_srv_conf_t *cscf;
-+
-+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
-+ "http3 send response stream %ui", r->qstream->id);
-+
+ fc = r->connection;
+
-+ if (fc->error) {
-+ return NGX_ERROR;
-+ }
-+
-+ h3c = r->qstream->connection;
-+ c = h3c->connection;
-+
+ if (r->method == NGX_HTTP_HEAD) {
+ r->header_only = 1;
+ }
@@ -3165,14 +3086,18 @@
+ r->headers_out.last_modified = NULL;
+ }
+
-+ headers = ngx_array_create(r->pool, 1, sizeof(quiche_h3_header));
-+ if (headers == NULL) {
++ headers_count = ngx_http_v3_get_headers_out_count(r);
++
++ r->qstream->headers =
++ ngx_array_create(r->pool, headers_count, sizeof(quiche_h3_header));
++
++ if (r->qstream->headers == NULL) {
+ return NGX_ERROR;
+ }
+
+ /* Generate :status pseudo-header. */
+ {
-+ h = ngx_array_push(headers);
++ h = ngx_array_push(r->qstream->headers);
+ if (h == NULL) {
+ return NGX_ERROR;
+ }
@@ -3180,16 +3105,20 @@
+ h->name = (u_char *) ":status";
+ h->name_len = sizeof(":status") - 1;
+
-+ h->value = status;
-+ h->value_len =
-+ ngx_sprintf(status, "%03ui", r->headers_out.status) - status;
++ tmp = ngx_pnalloc(r->pool, sizeof("418") - 1);
++ if (tmp == NULL) {
++ return NGX_ERROR;
++ }
++
++ h->value = tmp;
++ h->value_len = ngx_sprintf(tmp, "%03ui", r->headers_out.status) - tmp;
+ }
+
+ clcf = ngx_http_get_module_loc_conf(r, ngx_http_core_module);
+
+ /* Generate Server header.*/
+ if (r->headers_out.server == NULL) {
-+ h = ngx_array_push(headers);
++ h = ngx_array_push(r->qstream->headers);
+ if (h == NULL) {
+ return NGX_ERROR;
+ }
@@ -3217,7 +3146,7 @@
+ "http3 output header: \"date: %V\"",
+ &ngx_cached_http_time);
+
-+ h = ngx_array_push(headers);
++ h = ngx_array_push(r->qstream->headers);
+ if (h == NULL) {
+ return NGX_ERROR;
+ }
@@ -3231,7 +3160,7 @@
+
+ /* Generate Content-Type header. */
+ if (r->headers_out.content_type.len) {
-+ h = ngx_array_push(headers);
++ h = ngx_array_push(r->qstream->headers);
+ if (h == NULL) {
+ return NGX_ERROR;
+ }
@@ -3276,7 +3205,7 @@
+ if (r->headers_out.content_length == NULL
+ && r->headers_out.content_length_n >= 0)
+ {
-+ h = ngx_array_push(headers);
++ h = ngx_array_push(r->qstream->headers);
+ if (h == NULL) {
+ return NGX_ERROR;
+ }
@@ -3284,32 +3213,40 @@
+ h->name = (u_char *) "content-length";
+ h->name_len = sizeof("content-length") - 1;
+
-+ h->value = content_len;
++ tmp = ngx_pnalloc(r->pool, NGX_OFF_T_LEN);
++ if (tmp == NULL) {
++ return NGX_ERROR;
++ }
++
++ h->value = tmp;
+ h->value_len =
-+ ngx_sprintf(content_len, "%O", r->headers_out.content_length_n) -
-+ content_len;
++ ngx_sprintf(tmp, "%O", r->headers_out.content_length_n) - tmp;
+ }
+
+ /* Generate Last-Modified header. */
+ if (r->headers_out.last_modified == NULL
+ && r->headers_out.last_modified_time != -1)
+ {
-+ h = ngx_array_push(headers);
++ h = ngx_array_push(r->qstream->headers);
+ if (h == NULL) {
+ return NGX_ERROR;
+ }
+
-+ ngx_http_time(last_modified, r->headers_out.last_modified_time);
-+
-+ ngx_log_debug2(NGX_LOG_DEBUG_HTTP, fc->log, 0,
-+ "http3 output header: \"last-modified: %*.s\"",
-+ sizeof(last_modified), last_modified);
-+
+ h->name = (u_char *) "last-modified";
+ h->name_len = sizeof("last-modified") - 1;
+
-+ h->value = last_modified;
-+ h->value_len = sizeof(last_modified);
++ tmp = ngx_pnalloc(r->pool, sizeof("Wed, 31 Dec 1986 18:00:00 GMT") - 1);
++ if (tmp == NULL) {
++ return NGX_ERROR;
++ }
++
++ h->value = tmp;
++ h->value_len =
++ ngx_http_time(tmp, r->headers_out.last_modified_time) - tmp;
++
++ ngx_log_debug2(NGX_LOG_DEBUG_HTTP, fc->log, 0,
++ "http3 output header: \"last-modified: %*.s\"",
++ h->value_len, h->value);
+ }
+
+ /* Generate Location header. */
@@ -3326,8 +3263,8 @@
+ host = r->headers_in.server;
+
+ } else {
-+ host.len = NGX_SOCKADDR_STRLEN;
+ host.data = addr;
++ host.len = NGX_SOCKADDR_STRLEN;
+
+ if (ngx_connection_local_sockaddr(fc, &host, 0) != NGX_OK) {
+ return NGX_ERROR;
@@ -3392,7 +3329,7 @@
+ "http3 output header: \"location: %V\"",
+ &r->headers_out.location->value);
+
-+ h = ngx_array_push(headers);
++ h = ngx_array_push(r->qstream->headers);
+ if (h == NULL) {
+ return NGX_ERROR;
+ }
@@ -3407,7 +3344,7 @@
+#if (NGX_HTTP_GZIP)
+ /* Generate Vary header. */
+ if (r->gzip_vary) {
-+ h = ngx_array_push(headers);
++ h = ngx_array_push(r->qstream->headers);
+ if (h == NULL) {
+ return NGX_ERROR;
+ }
@@ -3443,7 +3380,7 @@
+ continue;
+ }
+
-+ h = ngx_array_push(headers);
++ h = ngx_array_push(r->qstream->headers);
+ if (h == NULL) {
+ return NGX_ERROR;
+ }
@@ -3463,13 +3400,41 @@
+ h->value_len = header[i].value.len;
+ }
+
++ return NGX_OK;
++}
++
++
++ngx_int_t
++ngx_http_v3_send_response(ngx_http_request_t *r)
++{
++ int rc;
++ ngx_uint_t fin;
++ ngx_connection_t *c, *fc;
++ ngx_http_v3_connection_t *h3c;
++
++ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0,
++ "http3 send response stream %ui", r->qstream->id);
++
++ fc = r->connection;
++
++ if (fc->error) {
++ return NGX_ERROR;
++ }
++
++ h3c = r->qstream->connection;
++ c = h3c->connection;
++
++ if (ngx_http_v3_push_response_headers(r) != NGX_OK) {
++ return NGX_ERROR;
++ }
++
+ fin = r->header_only
+ || (r->headers_out.content_length_n == 0 && !r->expect_trailers);
+
+ rc = quiche_h3_send_response(h3c->h3, c->quic->conn, r->qstream->id,
-+ headers->elts, headers->nelts, fin);
-+
-+ ngx_array_destroy(headers);
++ r->qstream->headers->elts,
++ r->qstream->headers->nelts,
++ fin);
+
+ if (rc == QUICHE_H3_ERR_STREAM_BLOCKED) {
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0,
@@ -3548,6 +3513,70 @@
+}
+
+
++static ssize_t
++ngx_http_v3_recv_body(ngx_connection_t *c, u_char *buf, size_t size)
++{
++ ssize_t n;
++ ngx_event_t *rev;
++ ngx_http_request_t *r;
++ ngx_http_v3_connection_t *h3c;
++
++ rev = c->read;
++
++ r = c->data;
++ h3c = r->qstream->connection;
++
++ if (c->error) {
++ rev->ready = 0;
++
++ return NGX_ERROR;
++ }
++
++ n = quiche_h3_recv_body(h3c->h3, c->quic->conn, r->qstream->id, buf, size);
++
++ ngx_log_debug2(NGX_LOG_DEBUG_EVENT, c->log, 0,
++ "http3 body recv: %z of %uz", n, size);
++
++ if (quiche_conn_stream_finished(c->quic->conn, r->qstream->id)) {
++ rev->ready = 0;
++
++ /* Re-schedule connection read event to poll for Finished event. */
++ ngx_post_event(h3c->connection->read, &ngx_posted_events);
++ }
++
++ if (n == 0) {
++ rev->ready = 0;
++
++ return 0;
++ }
++
++ if (n > 0) {
++
++ if ((size_t) n < size) {
++ rev->ready = 0;
++ }
++
++ return n;
++ }
++
++ if (n == QUICHE_H3_ERR_DONE) {
++ ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, 0,
++ "quiche_h3_recv_body() not ready");
++
++ n = NGX_AGAIN;
++
++ } else {
++ rev->error = 1;
++
++ n = NGX_ERROR;
++ }
++
++ rev->ready = 0;
++
++ return n;
++}
++
++
+static ngx_chain_t *
+ngx_http_v3_send_chain(ngx_connection_t *fc, ngx_chain_t *in, off_t limit)
+{
@@ -3683,6 +3712,8 @@
+ h3c->free_fake_connections = fc;
+
+ h3c->processing--;
++
++ ngx_http_v3_handle_connection(h3c);
+}
+
+
@@ -3710,6 +3741,25 @@
+ ngx_http_v3_close_stream(r->qstream, 0);
+}
+
++void
++ngx_http_v3_stop_stream_read(ngx_http_v3_stream_t *stream, ngx_int_t rc)
++{
++ ngx_http_v3_connection_t *h3c;
++
++ if (!stream) {
++ return;
++ }
++
++ h3c = stream->connection;
++
++ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, h3c->connection->log, 0,
++ "http3 stream shutdown read %ui", stream->id);
++
++ quiche_conn_stream_shutdown(h3c->connection->quic->conn,
++ stream->id,
++ QUICHE_SHUTDOWN_READ, rc);
++}
++
+
+static void
+ngx_http_v3_finalize_connection(ngx_http_v3_connection_t *h3c,
@@ -3737,18 +3787,17 @@
+ c->read->handler = ngx_http_empty_handler;
+ c->write->handler = ngx_http_empty_handler;
+
++ root = h3c->streams.root;
+ sentinel = h3c->streams.sentinel;
+
++ if (root != sentinel) {
++ node = ngx_rbtree_min(h3c->streams.root, sentinel);
++ } else {
++ node = NULL;
++ }
++
+ /* Close all pending streams / requests. */
-+ for ( ;; ) {
-+ root = h3c->streams.root;
-+
-+ if (root == sentinel) {
-+ break;
-+ }
-+
-+ node = ngx_rbtree_min(root, sentinel);
-+
++ while (node != NULL) {
+ stream = (ngx_http_v3_stream_t *) node;
+
+ r = stream->request;
@@ -3771,6 +3820,8 @@
+ ev = fc->read;
+ }
+
++ node = ngx_rbtree_next(&h3c->streams, node);
++
+ ev->eof = 1;
+ ev->handler(ev);
+ }
@@ -3796,10 +3847,10 @@
+}
diff --git a/src/http/v3/ngx_http_v3.h b/src/http/v3/ngx_http_v3.h
new file mode 100644
-index 000000000..45a55b898
+index 000000000..f8415a999
--- /dev/null
+++ b/src/http/v3/ngx_http_v3.h
-@@ -0,0 +1,77 @@
+@@ -0,0 +1,79 @@
+
+/*
+ * Copyright (C) Cloudflare, Inc.
@@ -3848,6 +3899,7 @@
+
+ ngx_http_v3_connection_t *connection;
+
++ ngx_array_t *headers;
+ ngx_array_t *cookies;
+
+ ngx_http_v3_stream_t *next;
@@ -3868,12 +3920,13 @@
+
+void ngx_http_v3_init(ngx_event_t *rev);
+
-+ngx_int_t ngx_http_v3_read_request_body(ngx_http_request_t *r);
-+ngx_int_t ngx_http_v3_read_unbuffered_request_body(ngx_http_request_t *r);
-+
+ngx_int_t ngx_http_v3_send_response(ngx_http_request_t *r);
+
+void ngx_http_v3_close_stream(ngx_http_v3_stream_t *stream, ngx_int_t rc);
++void ngx_http_v3_stop_stream_read(ngx_http_v3_stream_t *stream, ngx_int_t rc);
++
++ngx_int_t ngx_http_v3_request_body_filter(ngx_http_request_t *r,
++ ngx_chain_t *in);
+
+
+#endif /* _NGX_HTTP_V3_H_INCLUDED_ */
@@ -4183,8 +4236,8 @@
+ return NGX_CONF_ERROR;
+ }
+
-+ quiche_h3_config_set_max_header_list_size(conf->http3,
-+ conf->max_header_size);
++ quiche_h3_config_set_max_field_section_size(conf->http3,
++ conf->max_header_size);
+
+ cln = ngx_pool_cleanup_add(cf->pool, 0);
+ if (cln == NULL) {
@@ -4283,6 +4336,18 @@
+
+
+#endif /* _NGX_HTTP_V3_MODULE_H_INCLUDED_ */
+diff --git a/src/os/unix/ngx_udp_sendmsg_chain.c b/src/os/unix/ngx_udp_sendmsg_chain.c
+index 5399c7916..9b03ca536 100644
+--- a/src/os/unix/ngx_udp_sendmsg_chain.c
++++ b/src/os/unix/ngx_udp_sendmsg_chain.c
+@@ -315,6 +315,7 @@ eintr:
+
+ switch (err) {
+ case NGX_EAGAIN:
++ case ENOBUFS:
+ ngx_log_debug0(NGX_LOG_DEBUG_EVENT, c->log, err,
+ "sendmsg() not ready");
+ return NGX_AGAIN;
--
-2.28.0
+2.32.0
diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml
index 492e5ad..16c70fd 100644
--- a/fuzz/Cargo.toml
+++ b/fuzz/Cargo.toml
@@ -31,3 +31,8 @@
[[bin]]
name = "qpack_decode"
path = "src/qpack_decode.rs"
+
+[profile.release]
+debug = true
+debug-assertions = true
+overflow-checks = true
diff --git a/fuzz/Dockerfile b/fuzz/Dockerfile
index ecd2085..63e7693 100644
--- a/fuzz/Dockerfile
+++ b/fuzz/Dockerfile
@@ -9,6 +9,6 @@
COPY ./cert.crt ./
COPY ./cert.key ./
-COPY ./target/x86_64-unknown-linux-gnu/debug/packet_recv_client ./
-COPY ./target/x86_64-unknown-linux-gnu/debug/packet_recv_server ./
-COPY ./target/x86_64-unknown-linux-gnu/debug/qpack_decode ./
+COPY ./target/x86_64-unknown-linux-gnu/release/packet_recv_client ./
+COPY ./target/x86_64-unknown-linux-gnu/release/packet_recv_server ./
+COPY ./target/x86_64-unknown-linux-gnu/release/qpack_decode ./
diff --git a/fuzz/src/packet_recv_client.rs b/fuzz/src/packet_recv_client.rs
index 34c9f04..2823abd 100644
--- a/fuzz/src/packet_recv_client.rs
+++ b/fuzz/src/packet_recv_client.rs
@@ -6,6 +6,8 @@
#[macro_use]
extern crate lazy_static;
+use std::net::SocketAddr;
+
use std::sync::Mutex;
lazy_static! {
@@ -26,13 +28,23 @@
};
}
-static SCID: [u8; quiche::MAX_CONN_ID_LEN] = [0; quiche::MAX_CONN_ID_LEN];
+static SCID: quiche::ConnectionId<'static> =
+ quiche::ConnectionId::from_ref(&[0; quiche::MAX_CONN_ID_LEN]);
fuzz_target!(|data: &[u8]| {
- let mut buf = data.to_vec();
- let mut conn =
- quiche::connect(Some("quic.tech"), &SCID, &mut CONFIG.lock().unwrap())
- .unwrap();
+ let from: SocketAddr = "127.0.0.1:1234".parse().unwrap();
- conn.recv(&mut buf).ok();
+ let mut buf = data.to_vec();
+
+ let mut conn = quiche::connect(
+ Some("quic.tech"),
+ &SCID,
+ from.clone(),
+ &mut CONFIG.lock().unwrap(),
+ )
+ .unwrap();
+
+ let info = quiche::RecvInfo { from };
+
+ conn.recv(&mut buf, info).ok();
});
diff --git a/fuzz/src/packet_recv_server.rs b/fuzz/src/packet_recv_server.rs
index 51bfc26..7749ee7 100644
--- a/fuzz/src/packet_recv_server.rs
+++ b/fuzz/src/packet_recv_server.rs
@@ -6,6 +6,8 @@
#[macro_use]
extern crate lazy_static;
+use std::net::SocketAddr;
+
use std::sync::Mutex;
lazy_static! {
@@ -32,12 +34,18 @@
};
}
-static SCID: [u8; quiche::MAX_CONN_ID_LEN] = [0; quiche::MAX_CONN_ID_LEN];
+static SCID: quiche::ConnectionId<'static> =
+ quiche::ConnectionId::from_ref(&[0; quiche::MAX_CONN_ID_LEN]);
fuzz_target!(|data: &[u8]| {
- let mut buf = data.to_vec();
- let mut conn =
- quiche::accept(&SCID, None, &mut CONFIG.lock().unwrap()).unwrap();
+ let from: SocketAddr = "127.0.0.1:1234".parse().unwrap();
- conn.recv(&mut buf).ok();
+ let mut buf = data.to_vec();
+
+ let mut conn =
+ quiche::accept(&SCID, None, from, &mut CONFIG.lock().unwrap()).unwrap();
+
+ let info = quiche::RecvInfo { from };
+
+ conn.recv(&mut buf, info).ok();
});
diff --git a/include/quiche.h b/include/quiche.h
index 8a25975..54726ad 100644
--- a/include/quiche.h
+++ b/include/quiche.h
@@ -46,7 +46,7 @@
//
// The current QUIC wire version.
-#define QUICHE_PROTOCOL_VERSION 0xff00001d
+#define QUICHE_PROTOCOL_VERSION 0x00000001
// The maximum length of a connection ID.
#define QUICHE_MAX_CONN_ID_LEN 20
@@ -94,6 +94,12 @@
// The peer violated the local stream limits.
QUICHE_ERR_STREAM_LIMIT = -12,
+ // The specified stream was stopped by the peer.
+ QUICHE_ERR_STREAM_STOPPED = -15,
+
+ // The specified stream was reset by the peer.
+ QUICHE_ERR_STREAM_RESET = -16,
+
// The received data exceeds the stream's final size.
QUICHE_ERR_FINAL_SIZE = -13,
@@ -122,6 +128,10 @@
int quiche_config_load_priv_key_from_pem_file(quiche_config *config,
const char *path);
+// Specifies a file where trusted CA certificates are stored for the purposes of certificate verification.
+int quiche_config_load_verify_locations_from_file(quiche_config *config,
+ const char *path);
+
// Configures whether to verify the peer's certificate.
void quiche_config_verify_peer(quiche_config *config, bool v);
@@ -139,11 +149,15 @@
const uint8_t *protos,
size_t protos_len);
-// Sets the `max_idle_timeout` transport parameter.
+// Sets the `max_idle_timeout` transport parameter, in milliseconds, default is
+// no timeout.
void quiche_config_set_max_idle_timeout(quiche_config *config, uint64_t v);
// Sets the `max_udp_payload_size transport` parameter.
-void quiche_config_set_max_udp_payload_size(quiche_config *config, uint64_t v);
+void quiche_config_set_max_recv_udp_payload_size(quiche_config *config, size_t v);
+
+// Sets the maximum outgoing UDP payload size.
+void quiche_config_set_max_send_udp_payload_size(quiche_config *config, size_t v);
// Sets the `initial_max_data` transport parameter.
void quiche_config_set_initial_max_data(quiche_config *config, uint64_t v);
@@ -205,11 +219,14 @@
// Creates a new server-side connection.
quiche_conn *quiche_accept(const uint8_t *scid, size_t scid_len,
const uint8_t *odcid, size_t odcid_len,
+ const struct sockaddr *from, size_t from_len,
quiche_config *config);
// Creates a new client-side connection.
-quiche_conn *quiche_connect(const char *server_name, const uint8_t *scid,
- size_t scid_len, quiche_config *config);
+quiche_conn *quiche_connect(const char *server_name,
+ const uint8_t *scid, size_t scid_len,
+ const struct sockaddr *to, size_t to_len,
+ quiche_config *config);
// Writes a version negotiation packet.
ssize_t quiche_negotiate_version(const uint8_t *scid, size_t scid_len,
@@ -228,6 +245,7 @@
quiche_conn *quiche_conn_new_with_tls(const uint8_t *scid, size_t scid_len,
const uint8_t *odcid, size_t odcid_len,
+ const struct sockaddr *peer, size_t peer_len,
quiche_config *config, void *ssl,
bool is_server);
@@ -245,14 +263,30 @@
void quiche_conn_set_qlog_fd(quiche_conn *conn, int fd, const char *log_title,
const char *log_desc);
+// Configures the given session for resumption.
+int quiche_conn_set_session(quiche_conn *conn, const uint8_t *buf, size_t buf_len);
+
+typedef struct {
+ struct sockaddr *from;
+ socklen_t from_len;
+} quiche_recv_info;
+
// Processes QUIC packets received from the peer.
-ssize_t quiche_conn_recv(quiche_conn *conn, uint8_t *buf, size_t buf_len);
+ssize_t quiche_conn_recv(quiche_conn *conn, uint8_t *buf, size_t buf_len,
+ const quiche_recv_info *info);
+
+typedef struct {
+ // The address the packet should be sent to.
+ struct sockaddr_storage to;
+ socklen_t to_len;
+
+ // The time to send the packet out.
+ struct timespec at;
+} quiche_send_info;
// Writes a single QUIC packet to be sent to the peer.
-ssize_t quiche_conn_send(quiche_conn *conn, uint8_t *out, size_t out_len);
-
-// Buffer holding data at a specific offset.
-typedef struct RangeBuf quiche_rangebuf;
+ssize_t quiche_conn_send(quiche_conn *conn, uint8_t *out, size_t out_len,
+ quiche_send_info *out_info);
// Reads contiguous data from a stream.
ssize_t quiche_conn_stream_recv(quiche_conn *conn, uint64_t stream_id,
@@ -267,12 +301,18 @@
QUICHE_SHUTDOWN_WRITE = 1,
};
+// Sets the priority for a stream.
+int quiche_conn_stream_priority(quiche_conn *conn, uint64_t stream_id,
+ uint8_t urgency, bool incremental);
+
// Shuts down reading or writing from/to the specified stream.
int quiche_conn_stream_shutdown(quiche_conn *conn, uint64_t stream_id,
enum quiche_shutdown direction, uint64_t err);
ssize_t quiche_conn_stream_capacity(quiche_conn *conn, uint64_t stream_id);
+bool quiche_conn_stream_readable(quiche_conn *conn, uint64_t stream_id);
+
// Returns true if all the data has been read from the specified stream.
bool quiche_conn_stream_finished(quiche_conn *conn, uint64_t stream_id);
@@ -284,6 +324,9 @@
// Returns an iterator over streams that can be written to.
quiche_stream_iter *quiche_conn_writable(quiche_conn *conn);
+// Returns the maximum possible size of egress UDP payloads.
+size_t quiche_conn_max_send_udp_payload_size(quiche_conn *conn);
+
// Returns the amount of time until the next timeout event, in nanoseconds.
uint64_t quiche_conn_timeout_as_nanos(quiche_conn *conn);
@@ -297,10 +340,22 @@
int quiche_conn_close(quiche_conn *conn, bool app, uint64_t err,
const uint8_t *reason, size_t reason_len);
+// Returns a string uniquely representing the connection.
+void quiche_conn_trace_id(quiche_conn *conn, const uint8_t **out, size_t *out_len);
+
+// Returns the source connection ID.
+void quiche_conn_source_id(quiche_conn *conn, const uint8_t **out, size_t *out_len);
+
+// Returns the destination connection ID.
+void quiche_conn_destination_id(quiche_conn *conn, const uint8_t **out, size_t *out_len);
+
// Returns the negotiated ALPN protocol.
void quiche_conn_application_proto(quiche_conn *conn, const uint8_t **out,
size_t *out_len);
+// Returns the serialized cryptographic session for the connection.
+void quiche_conn_session(quiche_conn *conn, const uint8_t **out, size_t *out_len);
+
// Returns true if the connection handshake is complete.
bool quiche_conn_is_established(quiche_conn *conn);
@@ -308,9 +363,42 @@
// enough to send or receive early data.
bool quiche_conn_is_in_early_data(quiche_conn *conn);
+// Returns whether there is stream or DATAGRAM data available to read.
+bool quiche_conn_is_readable(quiche_conn *conn);
+
+// Returns true if the connection is draining.
+bool quiche_conn_is_draining(quiche_conn *conn);
+
+// Returns the number of bidirectional streams that can be created
+// before the peer's stream count limit is reached.
+uint64_t quiche_conn_peer_streams_left_bidi(quiche_conn *conn);
+
+// Returns the number of unidirectional streams that can be created
+// before the peer's stream count limit is reached.
+uint64_t quiche_conn_peer_streams_left_uni(quiche_conn *conn);
+
// Returns true if the connection is closed.
bool quiche_conn_is_closed(quiche_conn *conn);
+// Returns true if the connection was closed due to the idle timeout.
+bool quiche_conn_is_timed_out(quiche_conn *conn);
+
+// Returns true if a connection error was received, and updates the provided
+// parameters accordingly.
+bool quiche_conn_peer_error(quiche_conn *conn,
+ bool *is_app,
+ uint64_t *error_code,
+ const uint8_t **reason,
+ size_t *reason_len);
+
+// Returns true if a connection error was queued or sent, and updates the provided
+// parameters accordingly.
+bool quiche_conn_local_error(quiche_conn *conn,
+ bool *is_app,
+ uint64_t *error_code,
+ const uint8_t **reason,
+ size_t *reason_len);
+
// Initializes the stream's application data.
//
// Stream data can only be initialized once. Additional calls to this method
@@ -341,14 +429,71 @@
// The number of QUIC packets that were lost.
size_t lost;
+ // The number of sent QUIC packets with retranmitted data.
+ size_t retrans;
+
// The estimated round-trip time of the connection (in nanoseconds).
uint64_t rtt;
// The size of the connection's congestion window in bytes.
size_t cwnd;
- // The estimated data delivery rate in bytes/s.
+ // The number of sent bytes.
+ uint64_t sent_bytes;
+
+ // The number of recevied bytes.
+ uint64_t recv_bytes;
+
+ // The number of bytes lost.
+ uint64_t lost_bytes;
+
+ // The number of stream bytes retransmitted.
+ uint64_t stream_retrans_bytes;
+
+ // The current PMTU for the connection.
+ size_t pmtu;
+
+ // The most recent data delivery rate estimate in bytes/s.
uint64_t delivery_rate;
+
+ // The maximum idle timeout.
+ uint64_t peer_max_idle_timeout;
+
+ // The maximum UDP payload size.
+ uint64_t peer_max_udp_payload_size;
+
+ // The initial flow control maximum data for the connection.
+ uint64_t peer_initial_max_data;
+
+ // The initial flow control maximum data for local bidirectional streams.
+ uint64_t peer_initial_max_stream_data_bidi_local;
+
+ // The initial flow control maximum data for remote bidirectional streams.
+ uint64_t peer_initial_max_stream_data_bidi_remote;
+
+ // The initial flow control maximum data for unidirectional streams.
+ uint64_t peer_initial_max_stream_data_uni;
+
+ // The initial maximum bidirectional streams.
+ uint64_t peer_initial_max_streams_bidi;
+
+ // The initial maximum unidirectional streams.
+ uint64_t peer_initial_max_streams_uni;
+
+ // The ACK delay exponent.
+ uint64_t peer_ack_delay_exponent;
+
+ // The max ACK delay.
+ uint64_t peer_max_ack_delay;
+
+ // Whether active migration is disabled.
+ bool peer_disable_active_migration;
+
+ // The active connection ID limit.
+ uint64_t peer_active_conn_id_limit;
+
+ // DATAGRAM frame extension parameter, if any.
+ ssize_t peer_max_datagram_frame_size;
} quiche_stats;
// Collects and returns statistics about the connection.
@@ -357,6 +502,21 @@
// Returns the maximum DATAGRAM payload that can be sent.
ssize_t quiche_conn_dgram_max_writable_len(quiche_conn *conn);
+// Returns the length of the first stored DATAGRAM.
+ssize_t quiche_conn_dgram_recv_front_len(quiche_conn *conn);
+
+// Returns the number of items in the DATAGRAM receive queue.
+ssize_t quiche_conn_dgram_recv_queue_len(quiche_conn *conn);
+
+///Returns the total size of all items in the DATAGRAM receive queue.
+ssize_t quiche_conn_dgram_recv_queue_byte_size(quiche_conn *conn);
+
+// Returns the number of items in the DATAGRAM send queue.
+ssize_t quiche_conn_dgram_send_queue_len(quiche_conn *conn);
+
+// Returns the total size of all items in the DATAGRAM send queue.
+ssize_t quiche_conn_dgram_send_queue_byte_size(quiche_conn *conn);
+
// Reads the first received DATAGRAM.
ssize_t quiche_conn_dgram_recv(quiche_conn *conn, uint8_t *buf,
size_t buf_len);
@@ -377,7 +537,7 @@
//
// List of ALPN tokens of supported HTTP/3 versions.
-#define QUICHE_H3_APPLICATION_PROTOCOL "\x05h3-29\x05h3-28\x05h3-27"
+#define QUICHE_H3_APPLICATION_PROTOCOL "\x02h3\x05h3-29\x05h3-28\x05h3-27"
enum quiche_h3_error {
/// There is no error or no work to do
@@ -423,6 +583,30 @@
/// The underlying QUIC stream (or connection) doesn't have enough capacity
/// for the operation to complete. The application should retry later on.
QUICHE_H3_ERR_STREAM_BLOCKED = -13,
+
+ /// Error in the payload of a SETTINGS frame.
+ QUICHE_H3_ERR_SETTINGS_ERROR = -14,
+
+ /// Server rejected request.
+ QUICHE_H3_ERR_REQUEST_REJECTED = -15,
+
+ /// Request or its response cancelled.
+ QUICHE_H3_ERR_REQUEST_CANCELLED = -16,
+
+ /// Client's request stream terminated without containing a full-formed
+ /// request.
+ QUICHE_H3_ERR_REQUEST_INCOMPLETE = -17,
+
+ /// An HTTP message was malformed and cannot be processed.
+ QUICHE_H3_ERR_MESSAGE_ERROR = -18,
+
+ // The TCP connection established in response to a CONNECT request was
+ /// reset or abnormally closed.
+ QUICHE_H3_ERR_CONNECT_ERROR = -19,
+
+ /// The requested operation cannot be served over HTTP/3. Peer should retry
+ /// over HTTP/1.1.
+ QUICHE_H3_ERR_VERSION_FALLBACK = -20,
};
// Stores configuration shared between multiple connections.
@@ -431,8 +615,8 @@
// Creates an HTTP/3 config object with default settings values.
quiche_h3_config *quiche_h3_config_new(void);
-// Sets the `SETTINGS_MAX_HEADER_LIST_SIZE` setting.
-void quiche_h3_config_set_max_header_list_size(quiche_h3_config *config, uint64_t v);
+// Sets the `SETTINGS_MAX_FIELD_SECTION_SIZE` setting.
+void quiche_h3_config_set_max_field_section_size(quiche_h3_config *config, uint64_t v);
// Sets the `SETTINGS_QPACK_MAX_TABLE_CAPACITY` setting.
void quiche_h3_config_set_qpack_max_table_capacity(quiche_h3_config *config, uint64_t v);
@@ -460,13 +644,14 @@
QUICHE_H3_EVENT_FINISHED,
QUICHE_H3_EVENT_DATAGRAM,
QUICHE_H3_EVENT_GOAWAY,
+ QUICHE_H3_EVENT_RESET,
};
typedef struct Http3Event quiche_h3_event;
// Processes HTTP/3 data received from the peer.
-int quiche_h3_conn_poll(quiche_h3_conn *conn, quiche_conn *quic_conn,
- quiche_h3_event **ev);
+int64_t quiche_h3_conn_poll(quiche_h3_conn *conn, quiche_conn *quic_conn,
+ quiche_h3_event **ev);
// Returns the type of the event.
enum quiche_h3_event_type quiche_h3_event_type(quiche_h3_event *ev);
@@ -522,13 +707,18 @@
ssize_t quiche_h3_recv_body(quiche_h3_conn *conn, quiche_conn *quic_conn,
uint64_t stream_id, uint8_t *out, size_t out_len);
+// Returns whether the peer enabled HTTP/3 DATAGRAM frame support.
+bool quiche_h3_dgram_enabled_by_peer(quiche_h3_conn *conn,
+ quiche_conn *quic_conn);
+
// Writes data to the DATAGRAM send queue.
ssize_t quiche_h3_send_dgram(quiche_h3_conn *conn, quiche_conn *quic_conn,
uint64_t flow_id, uint8_t *data, size_t data_len);
// Reads data from the DATAGRAM receive queue.
ssize_t quiche_h3_recv_dgram(quiche_h3_conn *conn, quiche_conn *quic_conn,
- uint64_t *flow_id, uint8_t *out, size_t out_len);
+ uint64_t *flow_id, size_t *flow_id_len,
+ uint8_t *out, size_t out_len);
// Frees the HTTP/3 connection object.
void quiche_h3_conn_free(quiche_h3_conn *conn);
diff --git a/rustfmt.toml b/rustfmt.toml
index 4d853aa..f0d9d93 100644
--- a/rustfmt.toml
+++ b/rustfmt.toml
@@ -18,7 +18,7 @@
where_single_line = false
imports_indent = "Block"
imports_layout = "Vertical"
-merge_imports = false
+imports_granularity = "Item"
reorder_imports = true
reorder_modules = true
reorder_impl_items = true
diff --git a/src/build.rs b/src/build.rs
index 98774aa..70697c9 100644
--- a/src/build.rs
+++ b/src/build.rs
@@ -1,26 +1,6 @@
// Additional parameters for Android build of BoringSSL.
//
-// Android NDK < 18 with GCC.
-const CMAKE_PARAMS_ANDROID_NDK_OLD_GCC: &[(&str, &[(&str, &str)])] = &[
- ("aarch64", &[(
- "ANDROID_TOOLCHAIN_NAME",
- "aarch64-linux-android-4.9",
- )]),
- ("arm", &[(
- "ANDROID_TOOLCHAIN_NAME",
- "arm-linux-androideabi-4.9",
- )]),
- ("x86", &[(
- "ANDROID_TOOLCHAIN_NAME",
- "x86-linux-android-4.9",
- )]),
- ("x86_64", &[(
- "ANDROID_TOOLCHAIN_NAME",
- "x86_64-linux-android-4.9",
- )]),
-];
-
-// Android NDK >= 19.
+// Requires Android NDK >= 19.
const CMAKE_PARAMS_ANDROID_NDK: &[(&str, &[(&str, &str)])] = &[
("aarch64", &[("ANDROID_ABI", "arm64-v8a")]),
("arm", &[("ANDROID_ABI", "armeabi-v7a")]),
@@ -28,6 +8,7 @@
("x86_64", &[("ANDROID_ABI", "x86_64")]),
];
+// iOS.
const CMAKE_PARAMS_IOS: &[(&str, &[(&str, &str)])] = &[
("aarch64", &[
("CMAKE_OSX_ARCHITECTURES", "arm64"),
@@ -39,6 +20,12 @@
]),
];
+// ARM Linux.
+const CMAKE_PARAMS_ARM_LINUX: &[(&str, &[(&str, &str)])] = &[
+ ("aarch64", &[("CMAKE_SYSTEM_PROCESSOR", "aarch64")]),
+ ("arm", &[("CMAKE_SYSTEM_PROCESSOR", "arm")]),
+];
+
/// Returns the platform-specific output path for lib.
///
/// MSVC generator on Windows place static libs in a target sub-folder,
@@ -90,20 +77,13 @@
// Add platform-specific parameters.
match os.as_ref() {
"android" => {
- let cmake_params_android = if cfg!(feature = "ndk-old-gcc") {
- CMAKE_PARAMS_ANDROID_NDK_OLD_GCC
- } else {
- CMAKE_PARAMS_ANDROID_NDK
- };
-
// We need ANDROID_NDK_HOME to be set properly.
let android_ndk_home = std::env::var("ANDROID_NDK_HOME")
.expect("Please set ANDROID_NDK_HOME for Android build");
let android_ndk_home = std::path::Path::new(&android_ndk_home);
- for (android_arch, params) in cmake_params_android {
+ for (android_arch, params) in CMAKE_PARAMS_ANDROID_NDK {
if *android_arch == arch {
for (name, value) in *params {
- eprintln!("android arch={} add {}={}", arch, name, value);
boringssl_cmake.define(name, value);
}
}
@@ -111,7 +91,6 @@
let toolchain_file =
android_ndk_home.join("build/cmake/android.toolchain.cmake");
let toolchain_file = toolchain_file.to_str().unwrap();
- eprintln!("android toolchain={}", toolchain_file);
boringssl_cmake.define("CMAKE_TOOLCHAIN_FILE", toolchain_file);
// 21 is the minimum level tested. You can give higher value.
@@ -125,7 +104,6 @@
for (ios_arch, params) in CMAKE_PARAMS_IOS {
if *ios_arch == arch {
for (name, value) in *params {
- eprintln!("ios arch={} add {}={}", arch, name, value);
boringssl_cmake.define(name, value);
}
}
@@ -149,6 +127,34 @@
boringssl_cmake
},
+ "linux" => match arch.as_ref() {
+ "aarch64" | "arm" => {
+ for (arm_arch, params) in CMAKE_PARAMS_ARM_LINUX {
+ if *arm_arch == arch {
+ for (name, value) in *params {
+ boringssl_cmake.define(name, value);
+ }
+ }
+ }
+ boringssl_cmake.define("CMAKE_SYSTEM_NAME", "Linux");
+ boringssl_cmake.define("CMAKE_SYSTEM_VERSION", "1");
+
+ boringssl_cmake
+ },
+
+ "x86" => {
+ boringssl_cmake.define(
+ "CMAKE_TOOLCHAIN_FILE",
+ pwd.join("deps/boringssl/src/util/32-bit-toolchain.cmake")
+ .as_os_str(),
+ );
+
+ boringssl_cmake
+ },
+
+ _ => boringssl_cmake,
+ },
+
_ => {
// Configure BoringSSL for building on 32-bit non-windows platforms.
if arch == "x86" && os != "windows" {
@@ -197,7 +203,7 @@
}
fn main() {
- if cfg!(feature = "boringssl-vendored") {
+ if cfg!(feature = "boringssl-vendored") && !cfg!(feature = "boring-sys") {
let bssl_dir = std::env::var("QUICHE_BSSL_PATH").unwrap_or_else(|_| {
let mut cfg = get_boringssl_cmake_config();
@@ -217,6 +223,11 @@
println!("cargo:rustc-link-lib=static=ssl");
}
+ if cfg!(feature = "boring-sys") {
+ println!("cargo:rustc-link-lib=static=crypto");
+ println!("cargo:rustc-link-lib=static=ssl");
+ }
+
// MacOS: Allow cdylib to link with undefined symbols
if cfg!(target_os = "macos") {
println!("cargo:rustc-cdylib-link-arg=-Wl,-undefined,dynamic_lookup");
diff --git a/src/crypto.rs b/src/crypto.rs
index b45f4a5..ca47421 100644
--- a/src/crypto.rs
+++ b/src/crypto.rs
@@ -24,9 +24,14 @@
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+use std::mem::MaybeUninit;
+
use ring::aead;
use ring::hkdf;
+use libc::c_int;
+use libc::c_void;
+
use crate::Error;
use crate::Result;
@@ -68,11 +73,13 @@
}
impl Algorithm {
- fn get_ring_aead(self) -> &'static aead::Algorithm {
+ fn get_evp_aead(self) -> *const EVP_AEAD {
match self {
- Algorithm::AES128_GCM => &aead::AES_128_GCM,
- Algorithm::AES256_GCM => &aead::AES_256_GCM,
- Algorithm::ChaCha20_Poly1305 => &aead::CHACHA20_POLY1305,
+ Algorithm::AES128_GCM => unsafe { EVP_aead_aes_128_gcm() },
+ Algorithm::AES256_GCM => unsafe { EVP_aead_aes_256_gcm() },
+ Algorithm::ChaCha20_Poly1305 => unsafe {
+ EVP_aead_chacha20_poly1305()
+ },
}
}
@@ -93,7 +100,11 @@
}
pub fn key_len(self) -> usize {
- self.get_ring_aead().key_len()
+ match self {
+ Algorithm::AES128_GCM => 16,
+ Algorithm::AES256_GCM => 32,
+ Algorithm::ChaCha20_Poly1305 => 32,
+ }
}
pub fn tag_len(self) -> usize {
@@ -101,20 +112,28 @@
return 0;
}
- self.get_ring_aead().tag_len()
+ match self {
+ Algorithm::AES128_GCM => 16,
+ Algorithm::AES256_GCM => 16,
+ Algorithm::ChaCha20_Poly1305 => 16,
+ }
}
pub fn nonce_len(self) -> usize {
- self.get_ring_aead().nonce_len()
+ match self {
+ Algorithm::AES128_GCM => 12,
+ Algorithm::AES256_GCM => 12,
+ Algorithm::ChaCha20_Poly1305 => 12,
+ }
}
}
pub struct Open {
alg: Algorithm,
- hp_key: aead::quic::HeaderProtectionKey,
+ ctx: EVP_AEAD_CTX,
- key: aead::LessSafeKey,
+ hp_key: aead::quic::HeaderProtectionKey,
nonce: Vec<u8>,
}
@@ -124,20 +143,17 @@
alg: Algorithm, key: &[u8], iv: &[u8], hp_key: &[u8],
) -> Result<Open> {
Ok(Open {
+ alg,
+
+ ctx: make_aead_ctx(alg, key)?,
+
hp_key: aead::quic::HeaderProtectionKey::new(
alg.get_ring_hp(),
hp_key,
)
.map_err(|_| Error::CryptoFail)?,
- key: aead::LessSafeKey::new(
- aead::UnboundKey::new(alg.get_ring_aead(), key)
- .map_err(|_| Error::CryptoFail)?,
- ),
-
nonce: Vec::from(iv),
-
- alg,
})
}
@@ -149,9 +165,9 @@
let mut iv = vec![0; nonce_len];
let mut pn_key = vec![0; key_len];
- derive_pkt_key(aead, &secret, &mut key)?;
- derive_pkt_iv(aead, &secret, &mut iv)?;
- derive_hdr_key(aead, &secret, &mut pn_key)?;
+ derive_pkt_key(aead, secret, &mut key)?;
+ derive_pkt_iv(aead, secret, &mut iv)?;
+ derive_hdr_key(aead, secret, &mut pn_key)?;
Open::new(aead, &key, &iv, &pn_key)
}
@@ -163,16 +179,34 @@
return Ok(buf.len());
}
+ let tag_len = self.alg().tag_len();
+
+ let mut out_len = buf.len() - tag_len;
+
+ let max_out_len = out_len;
+
let nonce = make_nonce(&self.nonce, counter);
- let ad = aead::Aad::from(ad);
+ let rc = unsafe {
+ EVP_AEAD_CTX_open(
+ &self.ctx, // ctx
+ buf.as_mut_ptr(), // out
+ &mut out_len, // out_len
+ max_out_len, // max_out_len
+ nonce[..].as_ptr(), // nonce
+ nonce.len(), // nonce_len
+ buf.as_ptr(), // inp
+ buf.len(), // in_len
+ ad.as_ptr(), // ad
+ ad.len(), // ad_len
+ )
+ };
- let plain = self
- .key
- .open_in_place(nonce, ad, buf)
- .map_err(|_| Error::CryptoFail)?;
+ if rc != 1 {
+ return Err(Error::CryptoFail);
+ }
- Ok(plain.len())
+ Ok(out_len)
}
pub fn new_mask(&self, sample: &[u8]) -> Result<[u8; 5]> {
@@ -196,9 +230,9 @@
pub struct Seal {
alg: Algorithm,
- hp_key: aead::quic::HeaderProtectionKey,
+ ctx: EVP_AEAD_CTX,
- key: aead::LessSafeKey,
+ hp_key: aead::quic::HeaderProtectionKey,
nonce: Vec<u8>,
}
@@ -208,20 +242,17 @@
alg: Algorithm, key: &[u8], iv: &[u8], hp_key: &[u8],
) -> Result<Seal> {
Ok(Seal {
+ alg,
+
+ ctx: make_aead_ctx(alg, key)?,
+
hp_key: aead::quic::HeaderProtectionKey::new(
alg.get_ring_hp(),
hp_key,
)
.map_err(|_| Error::CryptoFail)?,
- key: aead::LessSafeKey::new(
- aead::UnboundKey::new(alg.get_ring_aead(), key)
- .map_err(|_| Error::CryptoFail)?,
- ),
-
nonce: Vec::from(iv),
-
- alg,
})
}
@@ -233,40 +264,66 @@
let mut iv = vec![0; nonce_len];
let mut pn_key = vec![0; key_len];
- derive_pkt_key(aead, &secret, &mut key)?;
- derive_pkt_iv(aead, &secret, &mut iv)?;
- derive_hdr_key(aead, &secret, &mut pn_key)?;
+ derive_pkt_key(aead, secret, &mut key)?;
+ derive_pkt_iv(aead, secret, &mut iv)?;
+ derive_hdr_key(aead, secret, &mut pn_key)?;
Seal::new(aead, &key, &iv, &pn_key)
}
pub fn seal_with_u64_counter(
- &self, counter: u64, ad: &[u8], buf: &mut [u8],
- ) -> Result<()> {
+ &self, counter: u64, ad: &[u8], buf: &mut [u8], in_len: usize,
+ extra_in: Option<&[u8]>,
+ ) -> Result<usize> {
if cfg!(feature = "fuzzing") {
- return Ok(());
+ if let Some(extra) = extra_in {
+ buf[in_len..in_len + extra.len()].copy_from_slice(extra);
+ return Ok(in_len + extra.len());
+ }
+
+ return Ok(in_len);
+ }
+
+ let tag_len = self.alg().tag_len();
+
+ let mut out_tag_len = tag_len;
+
+ let (extra_in_ptr, extra_in_len) = match extra_in {
+ Some(v) => (v.as_ptr(), v.len()),
+
+ None => (std::ptr::null(), 0),
+ };
+
+ // Make sure all the outputs combined fit in the buffer.
+ if in_len + tag_len + extra_in_len > buf.len() {
+ return Err(Error::CryptoFail);
}
let nonce = make_nonce(&self.nonce, counter);
- let ad = aead::Aad::from(ad);
+ let rc = unsafe {
+ EVP_AEAD_CTX_seal_scatter(
+ &self.ctx, // ctx
+ buf.as_mut_ptr(), // out
+ buf[in_len..].as_mut_ptr(), // out_tag
+ &mut out_tag_len, // out_tag_len
+ tag_len + extra_in_len, // max_out_tag_len
+ nonce[..].as_ptr(), // nonce
+ nonce.len(), // nonce_len
+ buf.as_ptr(), // inp
+ in_len, // in_len
+ extra_in_ptr, // extra_in
+ extra_in_len, // extra_in_len
+ ad.as_ptr(), // ad
+ ad.len(), // ad_len
+ )
+ };
- let tag_len = self.alg().tag_len();
+ if rc != 1 {
+ return Err(Error::CryptoFail);
+ }
- let in_out_len =
- buf.len().checked_sub(tag_len).ok_or(Error::CryptoFail)?;
-
- let (in_out, tag_out) = buf.split_at_mut(in_out_len);
-
- let tag = self
- .key
- .seal_in_place_separate_tag(nonce, ad, in_out)
- .map_err(|_| Error::CryptoFail)?;
-
- // Append the AEAD tag to the end of the sealed buffer.
- tag_out.copy_from_slice(tag.as_ref());
-
- Ok(())
+ Ok(in_len + out_tag_len)
}
pub fn new_mask(&self, sample: &[u8]) -> Result<[u8; 5]> {
@@ -297,7 +354,7 @@
let key_len = aead.key_len();
let nonce_len = aead.nonce_len();
- let initial_secret = derive_initial_secret(&cid, version)?;
+ let initial_secret = derive_initial_secret(cid, version);
// Client.
let mut client_key = vec![0; key_len];
@@ -334,26 +391,33 @@
Ok((open, seal))
}
-fn derive_initial_secret(secret: &[u8], version: u32) -> Result<hkdf::Prk> {
+fn derive_initial_secret(secret: &[u8], version: u32) -> hkdf::Prk {
const INITIAL_SALT: [u8; 20] = [
+ 0x38, 0x76, 0x2c, 0xf7, 0xf5, 0x59, 0x34, 0xb3, 0x4d, 0x17, 0x9a, 0xe6,
+ 0xa4, 0xc8, 0x0c, 0xad, 0xcc, 0xbb, 0x7f, 0x0a,
+ ];
+
+ const INITIAL_SALT_DRAFT29: [u8; 20] = [
0xaf, 0xbf, 0xec, 0x28, 0x99, 0x93, 0xd2, 0x4c, 0x9e, 0x97, 0x86, 0xf1,
0x9c, 0x61, 0x11, 0xe0, 0x43, 0x90, 0xa8, 0x99,
];
- const INITIAL_SALT_OLD: [u8; 20] = [
+ const INITIAL_SALT_DRAFT27: [u8; 20] = [
0xc3, 0xee, 0xf7, 0x12, 0xc7, 0x2e, 0xbb, 0x5a, 0x11, 0xa7, 0xd2, 0x43,
0x2b, 0xb4, 0x63, 0x65, 0xbe, 0xf9, 0xf5, 0x02,
];
let salt = match version {
crate::PROTOCOL_VERSION_DRAFT27 | crate::PROTOCOL_VERSION_DRAFT28 =>
- &INITIAL_SALT_OLD,
+ &INITIAL_SALT_DRAFT27,
+
+ crate::PROTOCOL_VERSION_DRAFT29 => &INITIAL_SALT_DRAFT29,
_ => &INITIAL_SALT,
};
let salt = hkdf::Salt::new(hkdf::HKDF_SHA256, salt);
- Ok(salt.extract(secret))
+ salt.extract(secret)
}
fn derive_client_initial_secret(prk: &hkdf::Prk, out: &mut [u8]) -> Result<()> {
@@ -411,6 +475,31 @@
hkdf_expand_label(&secret, LABEL, &mut out[..nonce_len])
}
+fn make_aead_ctx(alg: Algorithm, key: &[u8]) -> Result<EVP_AEAD_CTX> {
+ let mut ctx = MaybeUninit::uninit();
+
+ let ctx = unsafe {
+ let aead = alg.get_evp_aead();
+
+ let rc = EVP_AEAD_CTX_init(
+ ctx.as_mut_ptr(),
+ aead,
+ key.as_ptr(),
+ alg.key_len(),
+ alg.tag_len(),
+ std::ptr::null_mut(),
+ );
+
+ if rc != 1 {
+ return Err(Error::CryptoFail);
+ }
+
+ ctx.assume_init()
+ };
+
+ Ok(ctx)
+}
+
fn hkdf_expand_label(
prk: &hkdf::Prk, label: &[u8], out: &mut [u8],
) -> Result<()> {
@@ -429,9 +518,9 @@
Ok(())
}
-fn make_nonce(iv: &[u8], counter: u64) -> aead::Nonce {
+fn make_nonce(iv: &[u8], counter: u64) -> [u8; aead::NONCE_LEN] {
let mut nonce = [0; aead::NONCE_LEN];
- nonce.copy_from_slice(&iv);
+ nonce.copy_from_slice(iv);
// XOR the last bytes of the IV with the counter. This is equivalent to
// left-padding the counter with zero bytes.
@@ -439,7 +528,7 @@
*a ^= b;
}
- aead::Nonce::assume_unique_for_key(nonce)
+ nonce
}
// The ring HKDF expand() API does not accept an arbitrary output length, so we
@@ -453,12 +542,55 @@
}
}
+#[allow(non_camel_case_types)]
+#[repr(transparent)]
+struct EVP_AEAD(c_void);
+
+// NOTE: This structure is copied from <openssl/aead.h> in order to be able to
+// statically allocate it. While it is not often modified upstream, it needs to
+// be kept in sync.
+#[repr(C)]
+struct EVP_AEAD_CTX {
+ aead: libc::uintptr_t,
+ opaque: [u8; 580],
+ alignment: u64,
+ tag_len: u8,
+}
+
+extern {
+ // EVP_AEAD
+ fn EVP_aead_aes_128_gcm() -> *const EVP_AEAD;
+
+ fn EVP_aead_aes_256_gcm() -> *const EVP_AEAD;
+
+ fn EVP_aead_chacha20_poly1305() -> *const EVP_AEAD;
+
+ // EVP_AEAD_CTX
+ fn EVP_AEAD_CTX_init(
+ ctx: *mut EVP_AEAD_CTX, aead: *const EVP_AEAD, key: *const u8,
+ key_len: usize, tag_len: usize, engine: *mut c_void,
+ ) -> c_int;
+
+ fn EVP_AEAD_CTX_open(
+ ctx: *const EVP_AEAD_CTX, out: *mut u8, out_len: *mut usize,
+ max_out_len: usize, nonce: *const u8, nonce_len: usize, inp: *const u8,
+ in_len: usize, ad: *const u8, ad_len: usize,
+ ) -> c_int;
+
+ fn EVP_AEAD_CTX_seal_scatter(
+ ctx: *const EVP_AEAD_CTX, out: *mut u8, out_tag: *mut u8,
+ out_tag_len: *mut usize, max_out_tag_len: usize, nonce: *const u8,
+ nonce_len: usize, inp: *const u8, in_len: usize, extra_in: *const u8,
+ extra_in_len: usize, ad: *const u8, ad_len: usize,
+ ) -> c_int;
+}
+
#[cfg(test)]
mod tests {
use super::*;
#[test]
- fn derive_initial_secrets() {
+ fn derive_initial_secrets_v1() {
let dcid = [0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08];
let mut secret = [0; 32];
@@ -469,7 +601,86 @@
let aead = Algorithm::AES128_GCM;
let initial_secret =
- derive_initial_secret(&dcid, crate::PROTOCOL_VERSION).unwrap();
+ derive_initial_secret(&dcid, crate::PROTOCOL_VERSION_V1);
+
+ // Client.
+ assert!(
+ derive_client_initial_secret(&initial_secret, &mut secret).is_ok()
+ );
+ let expected_client_initial_secret = [
+ 0xc0, 0x0c, 0xf1, 0x51, 0xca, 0x5b, 0xe0, 0x75, 0xed, 0x0e, 0xbf,
+ 0xb5, 0xc8, 0x03, 0x23, 0xc4, 0x2d, 0x6b, 0x7d, 0xb6, 0x78, 0x81,
+ 0x28, 0x9a, 0xf4, 0x00, 0x8f, 0x1f, 0x6c, 0x35, 0x7a, 0xea,
+ ];
+ assert_eq!(&secret, &expected_client_initial_secret);
+
+ assert!(derive_pkt_key(aead, &secret, &mut pkt_key).is_ok());
+ let expected_client_pkt_key = [
+ 0x1f, 0x36, 0x96, 0x13, 0xdd, 0x76, 0xd5, 0x46, 0x77, 0x30, 0xef,
+ 0xcb, 0xe3, 0xb1, 0xa2, 0x2d,
+ ];
+ assert_eq!(&pkt_key, &expected_client_pkt_key);
+
+ assert!(derive_pkt_iv(aead, &secret, &mut pkt_iv).is_ok());
+ let expected_client_pkt_iv = [
+ 0xfa, 0x04, 0x4b, 0x2f, 0x42, 0xa3, 0xfd, 0x3b, 0x46, 0xfb, 0x25,
+ 0x5c,
+ ];
+ assert_eq!(&pkt_iv, &expected_client_pkt_iv);
+
+ assert!(derive_hdr_key(aead, &secret, &mut hdr_key).is_ok());
+ let expected_client_hdr_key = [
+ 0x9f, 0x50, 0x44, 0x9e, 0x04, 0xa0, 0xe8, 0x10, 0x28, 0x3a, 0x1e,
+ 0x99, 0x33, 0xad, 0xed, 0xd2,
+ ];
+ assert_eq!(&hdr_key, &expected_client_hdr_key);
+
+ // Server.
+ assert!(
+ derive_server_initial_secret(&initial_secret, &mut secret).is_ok()
+ );
+ let expected_server_initial_secret = [
+ 0x3c, 0x19, 0x98, 0x28, 0xfd, 0x13, 0x9e, 0xfd, 0x21, 0x6c, 0x15,
+ 0x5a, 0xd8, 0x44, 0xcc, 0x81, 0xfb, 0x82, 0xfa, 0x8d, 0x74, 0x46,
+ 0xfa, 0x7d, 0x78, 0xbe, 0x80, 0x3a, 0xcd, 0xda, 0x95, 0x1b,
+ ];
+ assert_eq!(&secret, &expected_server_initial_secret);
+
+ assert!(derive_pkt_key(aead, &secret, &mut pkt_key).is_ok());
+ let expected_server_pkt_key = [
+ 0xcf, 0x3a, 0x53, 0x31, 0x65, 0x3c, 0x36, 0x4c, 0x88, 0xf0, 0xf3,
+ 0x79, 0xb6, 0x06, 0x7e, 0x37,
+ ];
+ assert_eq!(&pkt_key, &expected_server_pkt_key);
+
+ assert!(derive_pkt_iv(aead, &secret, &mut pkt_iv).is_ok());
+ let expected_server_pkt_iv = [
+ 0x0a, 0xc1, 0x49, 0x3c, 0xa1, 0x90, 0x58, 0x53, 0xb0, 0xbb, 0xa0,
+ 0x3e,
+ ];
+ assert_eq!(&pkt_iv, &expected_server_pkt_iv);
+
+ assert!(derive_hdr_key(aead, &secret, &mut hdr_key).is_ok());
+ let expected_server_hdr_key = [
+ 0xc2, 0x06, 0xb8, 0xd9, 0xb9, 0xf0, 0xf3, 0x76, 0x44, 0x43, 0x0b,
+ 0x49, 0x0e, 0xea, 0xa3, 0x14,
+ ];
+ assert_eq!(&hdr_key, &expected_server_hdr_key);
+ }
+
+ #[test]
+ fn derive_initial_secrets_draft29() {
+ let dcid = [0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08];
+
+ let mut secret = [0; 32];
+ let mut pkt_key = [0; 16];
+ let mut pkt_iv = [0; 12];
+ let mut hdr_key = [0; 16];
+
+ let aead = Algorithm::AES128_GCM;
+
+ let initial_secret =
+ derive_initial_secret(&dcid, crate::PROTOCOL_VERSION_DRAFT29);
// Client.
assert!(
@@ -537,7 +748,7 @@
}
#[test]
- fn derive_initial_secrets_old() {
+ fn derive_initial_secrets_draft27() {
let dcid = [0x83, 0x94, 0xc8, 0xf0, 0x3e, 0x51, 0x57, 0x08];
let mut secret = [0; 32];
@@ -548,8 +759,7 @@
let aead = Algorithm::AES128_GCM;
let initial_secret =
- derive_initial_secret(&dcid, crate::PROTOCOL_VERSION_DRAFT28)
- .unwrap();
+ derive_initial_secret(&dcid, crate::PROTOCOL_VERSION_DRAFT27);
// Client.
assert!(
diff --git a/src/dgram.rs b/src/dgram.rs
index 755d95f..5da185e 100644
--- a/src/dgram.rs
+++ b/src/dgram.rs
@@ -46,13 +46,14 @@
}
}
- pub fn push(&mut self, data: &[u8]) -> Result<()> {
+ pub fn push(&mut self, data: Vec<u8>) -> Result<()> {
if self.is_full() {
return Err(Error::Done);
}
- self.queue.push_back(data.to_vec());
self.queue_bytes_size += data.len();
+ self.queue.push_back(data);
+
Ok(())
}
@@ -99,6 +100,10 @@
self.queue.len() == self.queue_max_len
}
+ pub fn len(&self) -> usize {
+ self.queue.len()
+ }
+
pub fn byte_size(&self) -> usize {
self.queue_bytes_size
}
diff --git a/src/ffi.rs b/src/ffi.rs
index a3815a9..34fd68b 100644
--- a/src/ffi.rs
+++ b/src/ffi.rs
@@ -29,6 +29,8 @@
use std::slice;
use std::sync::atomic;
+use std::net::SocketAddr;
+
#[cfg(unix)]
use std::os::unix::io::FromRawFd;
@@ -36,7 +38,39 @@
use libc::c_int;
use libc::c_void;
use libc::size_t;
+use libc::sockaddr;
use libc::ssize_t;
+use libc::timespec;
+
+#[cfg(not(windows))]
+use libc::sockaddr_in;
+#[cfg(windows)]
+use winapi::shared::ws2def::SOCKADDR_IN as sockaddr_in;
+
+#[cfg(not(windows))]
+use libc::sockaddr_in6;
+#[cfg(windows)]
+use winapi::shared::ws2ipdef::SOCKADDR_IN6_LH as sockaddr_in6;
+
+#[cfg(not(windows))]
+use libc::sockaddr_storage;
+#[cfg(windows)]
+use winapi::shared::ws2def::SOCKADDR_STORAGE_LH as sockaddr_storage;
+
+#[cfg(windows)]
+use libc::c_int as socklen_t;
+#[cfg(not(windows))]
+use libc::socklen_t;
+
+#[cfg(not(windows))]
+use libc::AF_INET;
+#[cfg(windows)]
+use winapi::shared::ws2def::AF_INET;
+
+#[cfg(not(windows))]
+use libc::AF_INET6;
+#[cfg(windows)]
+use winapi::shared::ws2def::AF_INET6;
use crate::*;
@@ -116,6 +150,19 @@
}
#[no_mangle]
+pub extern fn quiche_config_load_verify_locations_from_file(
+ config: &mut Config, path: *const c_char,
+) -> c_int {
+ let path = unsafe { ffi::CStr::from_ptr(path).to_str().unwrap() };
+
+ match config.load_verify_locations_from_file(path) {
+ Ok(_) => 0,
+
+ Err(e) => e.to_c() as c_int,
+ }
+}
+
+#[no_mangle]
pub extern fn quiche_config_verify_peer(config: &mut Config, v: bool) {
config.verify_peer(v);
}
@@ -154,10 +201,10 @@
}
#[no_mangle]
-pub extern fn quiche_config_set_max_udp_payload_size(
- config: &mut Config, v: u64,
+pub extern fn quiche_config_set_max_recv_udp_payload_size(
+ config: &mut Config, v: size_t,
) {
- config.set_max_udp_payload_size(v);
+ config.set_max_recv_udp_payload_size(v);
}
#[no_mangle]
@@ -250,6 +297,13 @@
}
#[no_mangle]
+pub extern fn quiche_config_set_max_send_udp_payload_size(
+ config: &mut Config, v: size_t,
+) {
+ config.set_max_send_udp_payload_size(v);
+}
+
+#[no_mangle]
pub extern fn quiche_config_free(config: *mut Config) {
unsafe { Box::from_raw(config) };
}
@@ -322,17 +376,22 @@
#[no_mangle]
pub extern fn quiche_accept(
scid: *const u8, scid_len: size_t, odcid: *const u8, odcid_len: size_t,
- config: &mut Config,
+ from: &sockaddr, from_len: socklen_t, config: &mut Config,
) -> *mut Connection {
let scid = unsafe { slice::from_raw_parts(scid, scid_len) };
+ let scid = ConnectionId::from_ref(scid);
- let odcid = if !odcid.is_null() || odcid_len == 0 {
- Some(unsafe { slice::from_raw_parts(odcid, odcid_len) })
+ let odcid = if !odcid.is_null() && odcid_len > 0 {
+ Some(ConnectionId::from_ref(unsafe {
+ slice::from_raw_parts(odcid, odcid_len)
+ }))
} else {
None
};
- match accept(scid, odcid, config) {
+ let from = std_addr_from_c(from, from_len);
+
+ match accept(&scid, odcid.as_ref(), from, config) {
Ok(c) => Box::into_raw(Pin::into_inner(c)),
Err(_) => ptr::null_mut(),
@@ -341,8 +400,8 @@
#[no_mangle]
pub extern fn quiche_connect(
- server_name: *const c_char, scid: *const u8, scid_len: size_t,
- config: &mut Config,
+ server_name: *const c_char, scid: *const u8, scid_len: size_t, to: &sockaddr,
+ to_len: socklen_t, config: &mut Config,
) -> *mut Connection {
let server_name = if server_name.is_null() {
None
@@ -351,8 +410,11 @@
};
let scid = unsafe { slice::from_raw_parts(scid, scid_len) };
+ let scid = ConnectionId::from_ref(scid);
- match connect(server_name, scid, config) {
+ let to = std_addr_from_c(to, to_len);
+
+ match connect(server_name, &scid, to, config) {
Ok(c) => Box::into_raw(Pin::into_inner(c)),
Err(_) => ptr::null_mut(),
@@ -365,10 +427,14 @@
out: *mut u8, out_len: size_t,
) -> ssize_t {
let scid = unsafe { slice::from_raw_parts(scid, scid_len) };
+ let scid = ConnectionId::from_ref(scid);
+
let dcid = unsafe { slice::from_raw_parts(dcid, dcid_len) };
+ let dcid = ConnectionId::from_ref(dcid);
+
let out = unsafe { slice::from_raw_parts_mut(out, out_len) };
- match negotiate_version(scid, dcid, out) {
+ match negotiate_version(&scid, &dcid, out) {
Ok(v) => v as ssize_t,
Err(e) => e.to_c(),
@@ -387,12 +453,18 @@
token_len: size_t, version: u32, out: *mut u8, out_len: size_t,
) -> ssize_t {
let scid = unsafe { slice::from_raw_parts(scid, scid_len) };
+ let scid = ConnectionId::from_ref(scid);
+
let dcid = unsafe { slice::from_raw_parts(dcid, dcid_len) };
+ let dcid = ConnectionId::from_ref(dcid);
+
let new_scid = unsafe { slice::from_raw_parts(new_scid, new_scid_len) };
+ let new_scid = ConnectionId::from_ref(new_scid);
+
let token = unsafe { slice::from_raw_parts(token, token_len) };
let out = unsafe { slice::from_raw_parts_mut(out, out_len) };
- match retry(scid, dcid, new_scid, token, version, out) {
+ match retry(&scid, &dcid, &new_scid, token, version, out) {
Ok(v) => v as ssize_t,
Err(e) => e.to_c(),
@@ -402,19 +474,32 @@
#[no_mangle]
pub extern fn quiche_conn_new_with_tls(
scid: *const u8, scid_len: size_t, odcid: *const u8, odcid_len: size_t,
- config: &mut Config, ssl: *mut c_void, is_server: bool,
+ peer: &sockaddr, peer_len: socklen_t, config: &mut Config, ssl: *mut c_void,
+ is_server: bool,
) -> *mut Connection {
let scid = unsafe { slice::from_raw_parts(scid, scid_len) };
+ let scid = ConnectionId::from_ref(scid);
- let odcid = if !odcid.is_null() || odcid_len == 0 {
- Some(unsafe { slice::from_raw_parts(odcid, odcid_len) })
+ let odcid = if !odcid.is_null() && odcid_len > 0 {
+ Some(ConnectionId::from_ref(unsafe {
+ slice::from_raw_parts(odcid, odcid_len)
+ }))
} else {
None
};
+ let peer = std_addr_from_c(peer, peer_len);
+
let tls = unsafe { tls::Handshake::from_ptr(ssl) };
- match Connection::with_tls(scid, odcid, config, tls, is_server) {
+ match Connection::with_tls(
+ &scid,
+ odcid.as_ref(),
+ peer,
+ config,
+ tls,
+ is_server,
+ ) {
Ok(c) => Box::into_raw(Pin::into_inner(c)),
Err(_) => ptr::null_mut(),
@@ -503,8 +588,35 @@
}
#[no_mangle]
+pub extern fn quiche_conn_set_session(
+ conn: &mut Connection, buf: *const u8, buf_len: size_t,
+) -> c_int {
+ let buf = unsafe { slice::from_raw_parts(buf, buf_len) };
+
+ match conn.set_session(buf) {
+ Ok(_) => 0,
+
+ Err(e) => e.to_c() as c_int,
+ }
+}
+
+#[repr(C)]
+pub struct RecvInfo<'a> {
+ from: &'a sockaddr,
+ from_len: socklen_t,
+}
+
+impl<'a> From<&RecvInfo<'a>> for crate::RecvInfo {
+ fn from(info: &RecvInfo) -> crate::RecvInfo {
+ crate::RecvInfo {
+ from: std_addr_from_c(info.from, info.from_len),
+ }
+ }
+}
+
+#[no_mangle]
pub extern fn quiche_conn_recv(
- conn: &mut Connection, buf: *mut u8, buf_len: size_t,
+ conn: &mut Connection, buf: *mut u8, buf_len: size_t, info: &RecvInfo,
) -> ssize_t {
if buf_len > <ssize_t>::max_value() as usize {
panic!("The provided buffer is too large");
@@ -512,16 +624,24 @@
let buf = unsafe { slice::from_raw_parts_mut(buf, buf_len) };
- match conn.recv(buf) {
+ match conn.recv(buf, info.into()) {
Ok(v) => v as ssize_t,
Err(e) => e.to_c(),
}
}
+#[repr(C)]
+pub struct SendInfo {
+ to: sockaddr_storage,
+ to_len: socklen_t,
+
+ at: timespec,
+}
+
#[no_mangle]
pub extern fn quiche_conn_send(
- conn: &mut Connection, out: *mut u8, out_len: size_t,
+ conn: &mut Connection, out: *mut u8, out_len: size_t, out_info: &mut SendInfo,
) -> ssize_t {
if out_len > <ssize_t>::max_value() as usize {
panic!("The provided buffer is too large");
@@ -530,7 +650,13 @@
let out = unsafe { slice::from_raw_parts_mut(out, out_len) };
match conn.send(out) {
- Ok(v) => v as ssize_t,
+ Ok((v, info)) => {
+ out_info.to_len = std_addr_to_c(&info.to, &mut out_info.to);
+
+ std_time_to_c(&info.at, &mut out_info.at);
+
+ v as ssize_t
+ },
Err(e) => e.to_c(),
}
@@ -577,6 +703,17 @@
}
#[no_mangle]
+pub extern fn quiche_conn_stream_priority(
+ conn: &mut Connection, stream_id: u64, urgency: u8, incremental: bool,
+) -> c_int {
+ match conn.stream_priority(stream_id, urgency, incremental) {
+ Ok(_) => 0,
+
+ Err(e) => e.to_c() as c_int,
+ }
+}
+
+#[no_mangle]
pub extern fn quiche_conn_stream_shutdown(
conn: &mut Connection, stream_id: u64, direction: Shutdown, err: u64,
) -> c_int {
@@ -599,6 +736,13 @@
}
#[no_mangle]
+pub extern fn quiche_conn_stream_readable(
+ conn: &mut Connection, stream_id: u64,
+) -> bool {
+ conn.stream_readable(stream_id)
+}
+
+#[no_mangle]
pub extern fn quiche_conn_stream_finished(
conn: &mut Connection, stream_id: u64,
) -> bool {
@@ -615,8 +759,19 @@
Box::into_raw(Box::new(conn.writable()))
}
+#[no_mangle]
+pub extern fn quiche_conn_max_send_udp_payload_size(conn: &Connection) -> usize {
+ conn.max_send_udp_payload_size()
+}
+
+#[no_mangle]
+pub extern fn quiche_conn_is_readable(conn: &Connection) -> bool {
+ conn.is_readable()
+}
+
struct AppData(*mut c_void);
unsafe impl Send for AppData {}
+unsafe impl Sync for AppData {}
#[no_mangle]
pub extern fn quiche_conn_stream_init_application_data(
@@ -678,6 +833,37 @@
}
#[no_mangle]
+pub extern fn quiche_conn_trace_id(
+ conn: &mut Connection, out: &mut *const u8, out_len: &mut size_t,
+) {
+ let trace_id = conn.trace_id();
+
+ *out = trace_id.as_ptr();
+ *out_len = trace_id.len();
+}
+
+#[no_mangle]
+pub extern fn quiche_conn_source_id(
+ conn: &mut Connection, out: &mut *const u8, out_len: &mut size_t,
+) {
+ let conn_id = conn.source_id();
+ let id = conn_id.as_ref();
+ *out = id.as_ptr();
+ *out_len = id.len();
+}
+
+#[no_mangle]
+pub extern fn quiche_conn_destination_id(
+ conn: &mut Connection, out: &mut *const u8, out_len: &mut size_t,
+) {
+ let conn_id = conn.destination_id();
+ let id = conn_id.as_ref();
+
+ *out = id.as_ptr();
+ *out_len = id.len();
+}
+
+#[no_mangle]
pub extern fn quiche_conn_application_proto(
conn: &mut Connection, out: &mut *const u8, out_len: &mut size_t,
) {
@@ -688,6 +874,20 @@
}
#[no_mangle]
+pub extern fn quiche_conn_session(
+ conn: &mut Connection, out: &mut *const u8, out_len: &mut size_t,
+) {
+ match conn.session() {
+ Some(session) => {
+ *out = session.as_ptr();
+ *out_len = session.len();
+ },
+
+ None => *out_len = 0,
+ }
+}
+
+#[no_mangle]
pub extern fn quiche_conn_is_established(conn: &mut Connection) -> bool {
conn.is_established()
}
@@ -698,11 +898,59 @@
}
#[no_mangle]
+pub extern fn quiche_conn_is_draining(conn: &mut Connection) -> bool {
+ conn.is_draining()
+}
+
+#[no_mangle]
pub extern fn quiche_conn_is_closed(conn: &mut Connection) -> bool {
conn.is_closed()
}
#[no_mangle]
+pub extern fn quiche_conn_is_timed_out(conn: &mut Connection) -> bool {
+ conn.is_timed_out()
+}
+
+#[no_mangle]
+pub extern fn quiche_conn_peer_error(
+ conn: &mut Connection, is_app: *mut bool, error_code: *mut u64,
+ reason: &mut *const u8, reason_len: &mut size_t,
+) -> bool {
+ match &conn.peer_error {
+ Some(conn_err) => unsafe {
+ *is_app = conn_err.is_app;
+ *error_code = conn_err.error_code;
+ *reason = conn_err.reason.as_ptr();
+ *reason_len = conn_err.reason.len();
+
+ true
+ },
+
+ None => false,
+ }
+}
+
+#[no_mangle]
+pub extern fn quiche_conn_local_error(
+ conn: &mut Connection, is_app: *mut bool, error_code: *mut u64,
+ reason: &mut *const u8, reason_len: &mut size_t,
+) -> bool {
+ match &conn.local_error {
+ Some(conn_err) => unsafe {
+ *is_app = conn_err.is_app;
+ *error_code = conn_err.error_code;
+ *reason = conn_err.reason.as_ptr();
+ *reason_len = conn_err.reason.len();
+
+ true
+ },
+
+ None => false,
+ }
+}
+
+#[no_mangle]
pub extern fn quiche_stream_iter_next(
iter: &mut StreamIter, stream_id: *mut u64,
) -> bool {
@@ -721,12 +969,31 @@
#[repr(C)]
pub struct Stats {
- pub recv: usize,
- pub sent: usize,
- pub lost: usize,
- pub rtt: u64,
- pub cwnd: usize,
- pub delivery_rate: u64,
+ recv: usize,
+ sent: usize,
+ lost: usize,
+ retrans: usize,
+ rtt: u64,
+ cwnd: usize,
+ sent_bytes: u64,
+ lost_bytes: u64,
+ recv_bytes: u64,
+ stream_retrans_bytes: u64,
+ pmtu: usize,
+ delivery_rate: u64,
+ peer_max_idle_timeout: u64,
+ peer_max_udp_payload_size: u64,
+ peer_initial_max_data: u64,
+ peer_initial_max_stream_data_bidi_local: u64,
+ peer_initial_max_stream_data_bidi_remote: u64,
+ peer_initial_max_stream_data_uni: u64,
+ peer_initial_max_streams_bidi: u64,
+ peer_initial_max_streams_uni: u64,
+ peer_ack_delay_exponent: u64,
+ peer_max_ack_delay: u64,
+ peer_disable_active_migration: bool,
+ peer_active_conn_id_limit: u64,
+ peer_max_datagram_frame_size: ssize_t,
}
#[no_mangle]
@@ -736,9 +1003,34 @@
out.recv = stats.recv;
out.sent = stats.sent;
out.lost = stats.lost;
+ out.retrans = stats.retrans;
out.rtt = stats.rtt.as_nanos() as u64;
out.cwnd = stats.cwnd;
+ out.sent_bytes = stats.sent_bytes;
+ out.lost_bytes = stats.lost_bytes;
+ out.recv_bytes = stats.recv_bytes;
+ out.stream_retrans_bytes = stats.stream_retrans_bytes;
+ out.pmtu = stats.pmtu;
out.delivery_rate = stats.delivery_rate;
+ out.peer_max_idle_timeout = stats.peer_max_idle_timeout;
+ out.peer_max_udp_payload_size = stats.peer_max_udp_payload_size;
+ out.peer_initial_max_data = stats.peer_initial_max_data;
+ out.peer_initial_max_stream_data_bidi_local =
+ stats.peer_initial_max_stream_data_bidi_local;
+ out.peer_initial_max_stream_data_bidi_remote =
+ stats.peer_initial_max_stream_data_bidi_remote;
+ out.peer_initial_max_stream_data_uni = stats.peer_initial_max_stream_data_uni;
+ out.peer_initial_max_streams_bidi = stats.peer_initial_max_streams_bidi;
+ out.peer_initial_max_streams_uni = stats.peer_initial_max_streams_uni;
+ out.peer_ack_delay_exponent = stats.peer_ack_delay_exponent;
+ out.peer_max_ack_delay = stats.peer_max_ack_delay;
+ out.peer_disable_active_migration = stats.peer_disable_active_migration;
+ out.peer_active_conn_id_limit = stats.peer_active_conn_id_limit;
+ out.peer_max_datagram_frame_size = match stats.peer_max_datagram_frame_size {
+ None => Error::Done.to_c(),
+
+ Some(v) => v as ssize_t,
+ }
}
#[no_mangle]
@@ -751,6 +1043,39 @@
}
#[no_mangle]
+pub extern fn quiche_conn_dgram_recv_front_len(conn: &Connection) -> ssize_t {
+ match conn.dgram_recv_front_len() {
+ None => Error::Done.to_c(),
+
+ Some(v) => v as ssize_t,
+ }
+}
+
+#[no_mangle]
+pub extern fn quiche_conn_dgram_recv_queue_len(conn: &Connection) -> ssize_t {
+ conn.dgram_recv_queue_len() as ssize_t
+}
+
+#[no_mangle]
+pub extern fn quiche_conn_dgram_recv_queue_byte_size(
+ conn: &Connection,
+) -> ssize_t {
+ conn.dgram_recv_queue_byte_size() as ssize_t
+}
+
+#[no_mangle]
+pub extern fn quiche_conn_dgram_send_queue_len(conn: &Connection) -> ssize_t {
+ conn.dgram_send_queue_len() as ssize_t
+}
+
+#[no_mangle]
+pub extern fn quiche_conn_dgram_send_queue_byte_size(
+ conn: &Connection,
+) -> ssize_t {
+ conn.dgram_send_queue_byte_size() as ssize_t
+}
+
+#[no_mangle]
pub extern fn quiche_conn_dgram_send(
conn: &mut Connection, buf: *const u8, buf_len: size_t,
) -> ssize_t {
@@ -802,3 +1127,79 @@
pub extern fn quiche_conn_free(conn: *mut Connection) {
unsafe { Box::from_raw(conn) };
}
+
+#[no_mangle]
+pub extern fn quiche_conn_peer_streams_left_bidi(conn: &mut Connection) -> u64 {
+ conn.peer_streams_left_bidi()
+}
+
+#[no_mangle]
+pub extern fn quiche_conn_peer_streams_left_uni(conn: &mut Connection) -> u64 {
+ conn.peer_streams_left_uni()
+}
+
+fn std_addr_from_c(addr: &sockaddr, addr_len: socklen_t) -> SocketAddr {
+ unsafe {
+ match addr.sa_family as i32 {
+ AF_INET => {
+ assert!(addr_len as usize == std::mem::size_of::<sockaddr_in>());
+
+ SocketAddr::V4(
+ *(addr as *const _ as *const sockaddr_in as *const _),
+ )
+ },
+
+ AF_INET6 => {
+ assert!(addr_len as usize == std::mem::size_of::<sockaddr_in6>());
+
+ SocketAddr::V6(
+ *(addr as *const _ as *const sockaddr_in6 as *const _),
+ )
+ },
+
+ _ => unimplemented!("unsupported address type"),
+ }
+ }
+}
+
+fn std_addr_to_c(addr: &SocketAddr, out: &mut sockaddr_storage) -> socklen_t {
+ unsafe {
+ match addr {
+ SocketAddr::V4(addr) => {
+ let sa_len = std::mem::size_of::<sockaddr_in>();
+
+ let src = addr as *const _ as *const u8;
+ let dst = out as *mut _ as *mut u8;
+
+ std::ptr::copy_nonoverlapping(src, dst, sa_len);
+
+ sa_len as socklen_t
+ },
+
+ SocketAddr::V6(addr) => {
+ let sa_len = std::mem::size_of::<sockaddr_in6>();
+
+ let src = addr as *const _ as *const u8;
+ let dst = out as *mut _ as *mut u8;
+
+ std::ptr::copy_nonoverlapping(src, dst, sa_len);
+
+ sa_len as socklen_t
+ },
+ }
+ }
+}
+
+#[cfg(not(any(target_os = "macos", target_os = "ios", target_os = "windows")))]
+fn std_time_to_c(time: &std::time::Instant, out: &mut timespec) {
+ unsafe {
+ ptr::copy_nonoverlapping(time as *const _ as *const timespec, out, 1)
+ }
+}
+
+#[cfg(any(target_os = "macos", target_os = "ios", target_os = "windows"))]
+fn std_time_to_c(_time: &std::time::Instant, out: &mut timespec) {
+ // TODO: implement Instant conversion for systems that don't use timespec.
+ out.tv_sec = 0;
+ out.tv_nsec = 0;
+}
diff --git a/src/frame.rs b/src/frame.rs
index b191a42..e4cae0a 100644
--- a/src/frame.rs
+++ b/src/frame.rs
@@ -37,6 +37,13 @@
pub const MAX_STREAM_OVERHEAD: usize = 12;
pub const MAX_STREAM_SIZE: u64 = 1 << 62;
+#[derive(Clone, Debug, PartialEq)]
+pub struct EcnCounts {
+ ect0_count: u64,
+ ect1_count: u64,
+ ecn_ce_count: u64,
+}
+
#[derive(Clone, PartialEq)]
pub enum Frame {
Padding {
@@ -48,6 +55,7 @@
ACK {
ack_delay: u64,
ranges: ranges::RangeSet,
+ ecn_counts: Option<EcnCounts>,
},
ResetStream {
@@ -65,6 +73,11 @@
data: stream::RangeBuf,
},
+ CryptoHeader {
+ offset: u64,
+ length: usize,
+ },
+
NewToken {
token: Vec<u8>,
},
@@ -74,6 +87,13 @@
data: stream::RangeBuf,
},
+ StreamHeader {
+ stream_id: u64,
+ offset: u64,
+ length: usize,
+ fin: bool,
+ },
+
MaxData {
max: u64,
},
@@ -143,6 +163,10 @@
Datagram {
data: Vec<u8>,
},
+
+ DatagramHeader {
+ length: usize,
+ },
}
impl Frame {
@@ -168,7 +192,7 @@
0x01 => Frame::Ping,
- 0x02 => parse_ack_frame(frame_type, b)?,
+ 0x02..=0x03 => parse_ack_frame(frame_type, b)?,
0x04 => Frame::ResetStream {
stream_id: b.get_varint()?,
@@ -319,8 +343,16 @@
b.put_varint(0x01)?;
},
- Frame::ACK { ack_delay, ranges } => {
- b.put_varint(0x02)?;
+ Frame::ACK {
+ ack_delay,
+ ranges,
+ ecn_counts,
+ } => {
+ if ecn_counts.is_none() {
+ b.put_varint(0x02)?;
+ } else {
+ b.put_varint(0x03)?;
+ }
let mut it = ranges.iter().rev();
@@ -343,6 +375,12 @@
smallest_ack = block.start;
}
+
+ if let Some(ecn) = ecn_counts {
+ b.put_varint(ecn.ect0_count)?;
+ b.put_varint(ecn.ect1_count)?;
+ b.put_varint(ecn.ecn_ce_count)?;
+ }
},
Frame::ResetStream {
@@ -368,41 +406,34 @@
},
Frame::Crypto { data } => {
- b.put_varint(0x06)?;
+ encode_crypto_header(data.off() as u64, data.len() as u64, b)?;
- b.put_varint(data.off() as u64)?;
- b.put_varint(data.len() as u64)?;
- b.put_bytes(&data)?;
+ b.put_bytes(data)?;
},
+ Frame::CryptoHeader { .. } => (),
+
Frame::NewToken { token } => {
b.put_varint(0x07)?;
b.put_varint(token.len() as u64)?;
- b.put_bytes(&token)?;
+ b.put_bytes(token)?;
},
Frame::Stream { stream_id, data } => {
- let mut ty: u8 = 0x08;
+ encode_stream_header(
+ *stream_id,
+ data.off() as u64,
+ data.len() as u64,
+ data.fin(),
+ b,
+ )?;
- // Always encode offset
- ty |= 0x04;
-
- // Always encode length
- ty |= 0x02;
-
- if data.fin() {
- ty |= 0x01;
- }
-
- b.put_varint(u64::from(ty))?;
-
- b.put_varint(*stream_id)?;
- b.put_varint(data.off() as u64)?;
- b.put_varint(data.len() as u64)?;
- b.put_bytes(data.as_ref())?;
+ b.put_bytes(data)?;
},
+ Frame::StreamHeader { .. } => (),
+
Frame::MaxData { max } => {
b.put_varint(0x10)?;
@@ -512,16 +543,12 @@
},
Frame::Datagram { data } => {
- let mut ty: u8 = 0x30;
+ encode_dgram_header(data.len() as u64, b)?;
- // Always encode length
- ty |= 0x01;
-
- b.put_varint(u64::from(ty))?;
-
- b.put_varint(data.len() as u64)?;
b.put_bytes(data.as_ref())?;
},
+
+ Frame::DatagramHeader { .. } => (),
}
Ok(before - b.cap())
@@ -533,7 +560,11 @@
Frame::Ping => 1,
- Frame::ACK { ack_delay, ranges } => {
+ Frame::ACK {
+ ack_delay,
+ ranges,
+ ecn_counts,
+ } => {
let mut it = ranges.iter().rev();
let first = it.next().unwrap();
@@ -557,6 +588,12 @@
smallest_ack = block.start;
}
+ if let Some(ecn) = ecn_counts {
+ len += octets::varint_len(ecn.ect0_count) +
+ octets::varint_len(ecn.ect1_count) +
+ octets::varint_len(ecn.ecn_ce_count);
+ }
+
len
},
@@ -583,10 +620,17 @@
Frame::Crypto { data } => {
1 + // frame type
octets::varint_len(data.off() as u64) + // offset
- octets::varint_len(data.len() as u64) + // length
+ 2 + // length, always encode as 2-byte varint
data.len() // data
},
+ Frame::CryptoHeader { offset, length, .. } => {
+ 1 + // frame type
+ octets::varint_len(*offset) + // offset
+ 2 + // length, always encode as 2-byte varint
+ length // data
+ },
+
Frame::NewToken { token } => {
1 + // frame type
octets::varint_len(token.len() as u64) + // token length
@@ -597,10 +641,23 @@
1 + // frame type
octets::varint_len(*stream_id) + // stream_id
octets::varint_len(data.off() as u64) + // offset
- octets::varint_len(data.len() as u64) + // length
+ 2 + // length, always encode as 2-byte varint
data.len() // data
},
+ Frame::StreamHeader {
+ stream_id,
+ offset,
+ length,
+ ..
+ } => {
+ 1 + // frame type
+ octets::varint_len(*stream_id) + // stream_id
+ octets::varint_len(*offset) + // offset
+ 2 + // length, always encode as 2-byte varint
+ length // data
+ },
+
Frame::MaxData { max } => {
1 + // frame type
octets::varint_len(*max) // max
@@ -698,173 +755,210 @@
Frame::Datagram { data } => {
1 + // frame type
- octets::varint_len(data.len() as u64) + // length
+ 2 + // length, always encode as 2-byte varint
data.len() // data
},
+
+ Frame::DatagramHeader { length } => {
+ 1 + // frame type
+ 2 + // length, always encode as 2-byte varint
+ *length // data
+ },
}
}
pub fn ack_eliciting(&self) -> bool {
// Any other frame is ack-eliciting (note the `!`).
- !matches!(self, Frame::Padding { .. } |
- Frame::ACK { .. } |
- Frame::ApplicationClose { .. } |
- Frame::ConnectionClose { .. })
- }
-
- pub fn shrink_for_retransmission(&mut self) {
- if let Frame::Datagram { data } = self {
- *data = Vec::new();
- }
+ !matches!(
+ self,
+ Frame::Padding { .. } |
+ Frame::ACK { .. } |
+ Frame::ApplicationClose { .. } |
+ Frame::ConnectionClose { .. }
+ )
}
#[cfg(feature = "qlog")]
pub fn to_qlog(&self) -> qlog::QuicFrame {
match self {
- Frame::Padding { .. } => qlog::QuicFrame::padding(),
+ Frame::Padding { .. } => qlog::QuicFrame::Padding,
- Frame::Ping { .. } => qlog::QuicFrame::ping(),
+ Frame::Ping { .. } => qlog::QuicFrame::Ping,
- Frame::ACK { ack_delay, ranges } => {
+ Frame::ACK {
+ ack_delay,
+ ranges,
+ ecn_counts,
+ } => {
let ack_ranges =
ranges.iter().map(|r| (r.start, r.end - 1)).collect();
- qlog::QuicFrame::ack(
- Some(ack_delay.to_string()),
- Some(ack_ranges),
- None,
- None,
- None,
- )
+
+ let (ect0, ect1, ce) = match ecn_counts {
+ Some(ecn) => (
+ Some(ecn.ect0_count),
+ Some(ecn.ect1_count),
+ Some(ecn.ecn_ce_count),
+ ),
+
+ None => (None, None, None),
+ };
+
+ qlog::QuicFrame::Ack {
+ ack_delay: Some(*ack_delay as f32 / 1000.0),
+ acked_ranges: Some(ack_ranges),
+ ect1,
+ ect0,
+ ce,
+ }
},
Frame::ResetStream {
stream_id,
error_code,
final_size,
- } => qlog::QuicFrame::reset_stream(
- stream_id.to_string(),
- *error_code,
- final_size.to_string(),
- ),
+ } => qlog::QuicFrame::ResetStream {
+ stream_id: *stream_id,
+ error_code: *error_code,
+ final_size: *final_size,
+ },
Frame::StopSending {
stream_id,
error_code,
- } =>
- qlog::QuicFrame::stop_sending(stream_id.to_string(), *error_code),
+ } => qlog::QuicFrame::StopSending {
+ stream_id: *stream_id,
+ error_code: *error_code,
+ },
- Frame::Crypto { data } => qlog::QuicFrame::crypto(
- data.off().to_string(),
- data.len().to_string(),
- ),
+ Frame::Crypto { data } => qlog::QuicFrame::Crypto {
+ offset: data.off(),
+ length: data.len() as u64,
+ },
- Frame::NewToken { token } => qlog::QuicFrame::new_token(
- token.len().to_string(),
- "TODO: https://github.com/quiclog/internet-drafts/issues/36"
- .to_string(),
- ),
+ Frame::CryptoHeader { offset, length } => qlog::QuicFrame::Crypto {
+ offset: *offset,
+ length: *length as u64,
+ },
- Frame::Stream { stream_id, data } => qlog::QuicFrame::stream(
- stream_id.to_string(),
- data.off().to_string(),
- data.len().to_string(),
- data.fin(),
- None,
- ),
+ Frame::NewToken { token } => qlog::QuicFrame::NewToken {
+ length: token.len().to_string(),
+ token: "TODO: update to qlog-02 token format".to_string(),
+ },
- Frame::MaxData { max } => qlog::QuicFrame::max_data(max.to_string()),
+ Frame::Stream { stream_id, data } => qlog::QuicFrame::Stream {
+ stream_id: *stream_id,
+ offset: data.off() as u64,
+ length: data.len() as u64,
+ fin: data.fin(),
+ raw: None,
+ },
+
+ Frame::StreamHeader {
+ stream_id,
+ offset,
+ length,
+ fin,
+ } => qlog::QuicFrame::Stream {
+ stream_id: *stream_id,
+ offset: *offset,
+ length: *length as u64,
+ fin: *fin,
+ raw: None,
+ },
+
+ Frame::MaxData { max } => qlog::QuicFrame::MaxData { maximum: *max },
Frame::MaxStreamData { stream_id, max } =>
- qlog::QuicFrame::max_stream_data(
- stream_id.to_string(),
- max.to_string(),
- ),
+ qlog::QuicFrame::MaxStreamData {
+ stream_id: *stream_id,
+ maximum: *max,
+ },
- Frame::MaxStreamsBidi { max } => qlog::QuicFrame::max_streams(
- qlog::StreamType::Bidirectional,
- max.to_string(),
- ),
+ Frame::MaxStreamsBidi { max } => qlog::QuicFrame::MaxStreams {
+ stream_type: qlog::StreamType::Bidirectional,
+ maximum: *max,
+ },
- Frame::MaxStreamsUni { max } => qlog::QuicFrame::max_streams(
- qlog::StreamType::Unidirectional,
- max.to_string(),
- ),
+ Frame::MaxStreamsUni { max } => qlog::QuicFrame::MaxStreams {
+ stream_type: qlog::StreamType::Unidirectional,
+ maximum: *max,
+ },
Frame::DataBlocked { limit } =>
- qlog::QuicFrame::data_blocked(limit.to_string()),
+ qlog::QuicFrame::DataBlocked { limit: *limit },
Frame::StreamDataBlocked { stream_id, limit } =>
- qlog::QuicFrame::stream_data_blocked(
- stream_id.to_string(),
- limit.to_string(),
- ),
+ qlog::QuicFrame::StreamDataBlocked {
+ stream_id: *stream_id,
+ limit: *limit,
+ },
Frame::StreamsBlockedBidi { limit } =>
- qlog::QuicFrame::streams_blocked(
- qlog::StreamType::Bidirectional,
- limit.to_string(),
- ),
+ qlog::QuicFrame::StreamsBlocked {
+ stream_type: qlog::StreamType::Bidirectional,
+ limit: *limit,
+ },
Frame::StreamsBlockedUni { limit } =>
- qlog::QuicFrame::streams_blocked(
- qlog::StreamType::Unidirectional,
- limit.to_string(),
- ),
+ qlog::QuicFrame::StreamsBlocked {
+ stream_type: qlog::StreamType::Unidirectional,
+ limit: *limit,
+ },
Frame::NewConnectionId {
seq_num,
retire_prior_to,
conn_id,
..
- } => qlog::QuicFrame::new_connection_id(
- seq_num.to_string(),
- retire_prior_to.to_string(),
- conn_id.len() as u64,
- "TODO: https://github.com/quiclog/internet-drafts/issues/36"
- .to_string(),
- "TODO: https://github.com/quiclog/internet-drafts/issues/36"
- .to_string(),
- ),
+ } => qlog::QuicFrame::NewConnectionId {
+ sequence_number: *seq_num as u32,
+ retire_prior_to: *retire_prior_to as u32,
+ length: conn_id.len() as u64,
+ connection_id: "TODO: update to qlog-02 token format".to_string(),
+ reset_token: "TODO: update to qlog-02 token format".to_string(),
+ },
Frame::RetireConnectionId { seq_num } =>
- qlog::QuicFrame::retire_connection_id(seq_num.to_string()),
+ qlog::QuicFrame::RetireConnectionId {
+ sequence_number: *seq_num as u32,
+ },
- Frame::PathChallenge { .. } => qlog::QuicFrame::path_challenge(Some(
- "TODO: https://github.com/quiclog/internet-drafts/issues/36"
- .to_string(),
- )),
+ Frame::PathChallenge { .. } =>
+ qlog::QuicFrame::PathChallenge { data: None },
- Frame::PathResponse { .. } => qlog::QuicFrame::path_response(Some(
- "TODO: https://github.com/quiclog/internet-drafts/issues/36"
- .to_string(),
- )),
+ Frame::PathResponse { .. } =>
+ qlog::QuicFrame::PathResponse { data: None },
Frame::ConnectionClose {
error_code, reason, ..
- } => qlog::QuicFrame::connection_close(
- qlog::ErrorSpace::TransportError,
- *error_code,
- *error_code,
- String::from_utf8(reason.clone()).unwrap(),
- Some(
- "TODO: https://github.com/quiclog/internet-drafts/issues/36"
- .to_string(),
- ),
- ),
+ } => qlog::QuicFrame::ConnectionClose {
+ error_space: qlog::ErrorSpace::TransportError,
+ error_code: *error_code,
+ raw_error_code: None, // raw error is no different for us
+ reason: Some(String::from_utf8(reason.clone()).unwrap()),
+ trigger_frame_type: None, // don't know trigger type
+ },
Frame::ApplicationClose { error_code, reason } =>
- qlog::QuicFrame::connection_close(
- qlog::ErrorSpace::ApplicationError,
- *error_code,
- *error_code,
- String::from_utf8(reason.clone()).unwrap(),
- None, /* Application variant of the frame has no trigger
- * frame type */
- ),
+ qlog::QuicFrame::ConnectionClose {
+ error_space: qlog::ErrorSpace::ApplicationError,
+ error_code: *error_code,
+ raw_error_code: None, // raw error is no different for us
+ reason: Some(String::from_utf8(reason.clone()).unwrap()),
+ trigger_frame_type: None, // don't know trigger type
+ },
- Frame::HandshakeDone => qlog::QuicFrame::handshake_done(),
+ Frame::HandshakeDone => qlog::QuicFrame::HandshakeDone,
- Frame::Datagram { .. } => qlog::QuicFrame::unknown(0x30),
+ Frame::Datagram { data } => qlog::QuicFrame::Datagram {
+ length: data.len() as u64,
+ raw: None,
+ },
+
+ Frame::DatagramHeader { length } => qlog::QuicFrame::Datagram {
+ length: *length as u64,
+ raw: None,
+ },
}
}
}
@@ -880,8 +974,16 @@
write!(f, "PING")?;
},
- Frame::ACK { ack_delay, ranges } => {
- write!(f, "ACK delay={} blocks={:?}", ack_delay, ranges)?;
+ Frame::ACK {
+ ack_delay,
+ ranges,
+ ecn_counts,
+ } => {
+ write!(
+ f,
+ "ACK delay={} blocks={:?} ecn_counts={:?}",
+ ack_delay, ranges, ecn_counts
+ )?;
},
Frame::ResetStream {
@@ -911,6 +1013,10 @@
write!(f, "CRYPTO off={} len={}", data.off(), data.len())?;
},
+ Frame::CryptoHeader { offset, length } => {
+ write!(f, "CRYPTO off={} len={}", offset, length)?;
+ },
+
Frame::NewToken { .. } => {
write!(f, "NEW_TOKEN (TODO)")?;
},
@@ -926,6 +1032,19 @@
)?;
},
+ Frame::StreamHeader {
+ stream_id,
+ offset,
+ length,
+ fin,
+ } => {
+ write!(
+ f,
+ "STREAM id={} off={} len={} fin={}",
+ stream_id, offset, length, fin
+ )?;
+ },
+
Frame::MaxData { max } => {
write!(f, "MAX_DATA max={}", max)?;
},
@@ -1003,7 +1122,11 @@
},
Frame::Datagram { data } => {
- write!(f, "DATAGRAM len={}", data.len(),)?;
+ write!(f, "DATAGRAM len={}", data.len())?;
+ },
+
+ Frame::DatagramHeader { length } => {
+ write!(f, "DATAGRAM len={}", length)?;
},
}
@@ -1011,7 +1134,9 @@
}
}
-fn parse_ack_frame(_ty: u64, b: &mut octets::Octets) -> Result<Frame> {
+fn parse_ack_frame(ty: u64, b: &mut octets::Octets) -> Result<Frame> {
+ let first = ty as u8;
+
let largest_ack = b.get_varint()?;
let ack_delay = b.get_varint()?;
let block_count = b.get_varint()?;
@@ -1048,7 +1173,77 @@
ranges.insert(smallest_ack..largest_ack + 1);
}
- Ok(Frame::ACK { ack_delay, ranges })
+ let ecn_counts = if first & 0x01 != 0 {
+ let ecn = EcnCounts {
+ ect0_count: b.get_varint()?,
+ ect1_count: b.get_varint()?,
+ ecn_ce_count: b.get_varint()?,
+ };
+
+ Some(ecn)
+ } else {
+ None
+ };
+
+ Ok(Frame::ACK {
+ ack_delay,
+ ranges,
+ ecn_counts,
+ })
+}
+
+pub fn encode_crypto_header(
+ offset: u64, length: u64, b: &mut octets::OctetsMut,
+) -> Result<()> {
+ b.put_varint(0x06)?;
+
+ b.put_varint(offset)?;
+
+ // Always encode length field as 2-byte varint.
+ b.put_varint_with_len(length, 2)?;
+
+ Ok(())
+}
+
+pub fn encode_stream_header(
+ stream_id: u64, offset: u64, length: u64, fin: bool,
+ b: &mut octets::OctetsMut,
+) -> Result<()> {
+ let mut ty: u8 = 0x08;
+
+ // Always encode offset.
+ ty |= 0x04;
+
+ // Always encode length.
+ ty |= 0x02;
+
+ if fin {
+ ty |= 0x01;
+ }
+
+ b.put_varint(u64::from(ty))?;
+
+ b.put_varint(stream_id)?;
+ b.put_varint(offset)?;
+
+ // Always encode length field as 2-byte varint.
+ b.put_varint_with_len(length, 2)?;
+
+ Ok(())
+}
+
+pub fn encode_dgram_header(length: u64, b: &mut octets::OctetsMut) -> Result<()> {
+ let mut ty: u8 = 0x30;
+
+ // Always encode length
+ ty |= 0x01;
+
+ b.put_varint(u64::from(ty))?;
+
+ // Always encode length field as 2-byte varint.
+ b.put_varint_with_len(length, 2)?;
+
+ Ok(())
}
fn parse_stream_frame(ty: u64, b: &mut octets::Octets) -> Result<Frame> {
@@ -1166,6 +1361,7 @@
let frame = Frame::ACK {
ack_delay: 874_656_534,
ranges,
+ ecn_counts: None,
};
let wire_len = {
@@ -1189,6 +1385,48 @@
}
#[test]
+ fn ack_ecn() {
+ let mut d = [42; 128];
+
+ let mut ranges = ranges::RangeSet::default();
+ ranges.insert(4..7);
+ ranges.insert(9..12);
+ ranges.insert(15..19);
+ ranges.insert(3000..5000);
+
+ let ecn_counts = Some(EcnCounts {
+ ect0_count: 100,
+ ect1_count: 200,
+ ecn_ce_count: 300,
+ });
+
+ let frame = Frame::ACK {
+ ack_delay: 874_656_534,
+ ranges,
+ ecn_counts,
+ };
+
+ let wire_len = {
+ let mut b = octets::OctetsMut::with_slice(&mut d);
+ frame.to_bytes(&mut b).unwrap()
+ };
+
+ assert_eq!(wire_len, 23);
+
+ let mut b = octets::Octets::with_slice(&d);
+ assert_eq!(Frame::from_bytes(&mut b, packet::Type::Short), Ok(frame));
+
+ let mut b = octets::Octets::with_slice(&d);
+ assert!(Frame::from_bytes(&mut b, packet::Type::Initial).is_ok());
+
+ let mut b = octets::Octets::with_slice(&d);
+ assert!(Frame::from_bytes(&mut b, packet::Type::ZeroRTT).is_err());
+
+ let mut b = octets::Octets::with_slice(&d);
+ assert!(Frame::from_bytes(&mut b, packet::Type::Handshake).is_ok());
+ }
+
+ #[test]
fn reset_stream() {
let mut d = [42; 128];
@@ -1262,7 +1500,7 @@
frame.to_bytes(&mut b).unwrap()
};
- assert_eq!(wire_len, 18);
+ assert_eq!(wire_len, 19);
let mut b = octets::Octets::with_slice(&d);
assert_eq!(Frame::from_bytes(&mut b, packet::Type::Short), Ok(frame));
@@ -1321,7 +1559,7 @@
frame.to_bytes(&mut b).unwrap()
};
- assert_eq!(wire_len, 19);
+ assert_eq!(wire_len, 20);
let mut b = octets::Octets::with_slice(&d);
assert_eq!(Frame::from_bytes(&mut b, packet::Type::Short), Ok(frame));
@@ -1352,7 +1590,7 @@
frame.to_bytes(&mut b).unwrap()
};
- assert_eq!(wire_len, 23);
+ assert_eq!(wire_len, 24);
let mut b = octets::Octets::with_slice(&d);
assert_eq!(
@@ -1779,14 +2017,14 @@
let data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
- let mut frame = Frame::Datagram { data: data.clone() };
+ let frame = Frame::Datagram { data: data.clone() };
let wire_len = {
let mut b = octets::OctetsMut::with_slice(&mut d);
frame.to_bytes(&mut b).unwrap()
};
- assert_eq!(wire_len, 14);
+ assert_eq!(wire_len, 15);
let mut b = octets::Octets::with_slice(&mut d);
assert_eq!(
@@ -1810,15 +2048,5 @@
};
assert_eq!(frame_data, data);
-
- frame.shrink_for_retransmission();
-
- let frame_data = match &frame {
- Frame::Datagram { data } => data.clone(),
-
- _ => unreachable!(),
- };
-
- assert_eq!(frame_data.len(), 0);
}
}
diff --git a/src/h3/ffi.rs b/src/h3/ffi.rs
index 2d278c7..a11184a 100644
--- a/src/h3/ffi.rs
+++ b/src/h3/ffi.rs
@@ -27,7 +27,6 @@
use std::ffi;
use std::ptr;
use std::slice;
-use std::str;
use libc::c_char;
use libc::c_int;
@@ -49,10 +48,10 @@
}
#[no_mangle]
-pub extern fn quiche_h3_config_set_max_header_list_size(
+pub extern fn quiche_h3_config_set_max_field_section_size(
config: &mut h3::Config, v: u64,
) {
- config.set_max_header_list_size(v);
+ config.set_max_field_section_size(v);
}
#[no_mangle]
@@ -115,6 +114,8 @@
h3::Event::Datagram { .. } => 3,
h3::Event::GoAway { .. } => 4,
+
+ h3::Event::Reset { .. } => 5,
}
}
@@ -264,6 +265,13 @@
}
#[no_mangle]
+pub extern fn quiche_h3_dgram_enabled_by_peer(
+ conn: &h3::Connection, quic_conn: &Connection,
+) -> bool {
+ conn.dgram_enabled_by_peer(quic_conn)
+}
+
+#[no_mangle]
pub extern fn quiche_h3_send_dgram(
conn: &mut h3::Connection, quic_conn: &mut Connection, flow_id: u64,
data: *const u8, data_len: size_t,
@@ -317,15 +325,8 @@
for h in headers {
out.push({
- let name = unsafe {
- let slice = slice::from_raw_parts(h.name, h.name_len);
- str::from_utf8_unchecked(slice)
- };
-
- let value = unsafe {
- let slice = slice::from_raw_parts(h.value, h.value_len);
- str::from_utf8_unchecked(slice)
- };
+ let name = unsafe { slice::from_raw_parts(h.name, h.name_len) };
+ let value = unsafe { slice::from_raw_parts(h.value, h.value_len) };
h3::HeaderRef::new(name, value)
});
diff --git a/src/h3/frame.rs b/src/h3/frame.rs
index bee2b3b..8b80024 100644
--- a/src/h3/frame.rs
+++ b/src/h3/frame.rs
@@ -37,8 +37,12 @@
pub const MAX_PUSH_FRAME_TYPE_ID: u64 = 0xD;
const SETTINGS_QPACK_MAX_TABLE_CAPACITY: u64 = 0x1;
-const SETTINGS_MAX_HEADER_LIST_SIZE: u64 = 0x6;
+const SETTINGS_MAX_FIELD_SECTION_SIZE: u64 = 0x6;
const SETTINGS_QPACK_BLOCKED_STREAMS: u64 = 0x7;
+const SETTINGS_H3_DATAGRAM: u64 = 0x276;
+
+// Permit between 16 maximally-encoded and 128 minimally-encoded SETTINGS.
+const MAX_SETTINGS_PAYLOAD_SIZE: usize = 256;
#[derive(Clone, PartialEq)]
pub enum Frame {
@@ -55,9 +59,10 @@
},
Settings {
- max_header_list_size: Option<u64>,
+ max_field_section_size: Option<u64>,
qpack_max_table_capacity: Option<u64>,
qpack_blocked_streams: Option<u64>,
+ h3_datagram: Option<u64>,
grease: Option<(u64, u64)>,
},
@@ -143,15 +148,16 @@
},
Frame::Settings {
- max_header_list_size,
+ max_field_section_size,
qpack_max_table_capacity,
qpack_blocked_streams,
+ h3_datagram,
grease,
} => {
let mut len = 0;
- if let Some(val) = max_header_list_size {
- len += octets::varint_len(SETTINGS_MAX_HEADER_LIST_SIZE);
+ if let Some(val) = max_field_section_size {
+ len += octets::varint_len(SETTINGS_MAX_FIELD_SECTION_SIZE);
len += octets::varint_len(*val);
}
@@ -165,6 +171,11 @@
len += octets::varint_len(*val);
}
+ if let Some(val) = h3_datagram {
+ len += octets::varint_len(SETTINGS_H3_DATAGRAM);
+ len += octets::varint_len(*val);
+ }
+
if let Some(val) = grease {
len += octets::varint_len(val.0);
len += octets::varint_len(val.1);
@@ -173,8 +184,8 @@
b.put_varint(SETTINGS_FRAME_TYPE_ID)?;
b.put_varint(len as u64)?;
- if let Some(val) = max_header_list_size {
- b.put_varint(SETTINGS_MAX_HEADER_LIST_SIZE)?;
+ if let Some(val) = max_field_section_size {
+ b.put_varint(SETTINGS_MAX_FIELD_SECTION_SIZE)?;
b.put_varint(*val as u64)?;
}
@@ -188,6 +199,11 @@
b.put_varint(*val as u64)?;
}
+ if let Some(val) = h3_datagram {
+ b.put_varint(SETTINGS_H3_DATAGRAM)?;
+ b.put_varint(*val as u64)?;
+ }
+
if let Some(val) = grease {
b.put_varint(val.0)?;
b.put_varint(val.1)?;
@@ -243,12 +259,12 @@
},
Frame::Settings {
- max_header_list_size,
+ max_field_section_size,
qpack_max_table_capacity,
qpack_blocked_streams,
..
} => {
- write!(f, "SETTINGS max_headers={:?}, qpack_max_table={:?}, qpack_blocked={:?} ", max_header_list_size, qpack_max_table_capacity, qpack_blocked_streams)?;
+ write!(f, "SETTINGS max_field_section={:?}, qpack_max_table={:?}, qpack_blocked={:?} ", max_field_section_size, qpack_max_table_capacity, qpack_blocked_streams)?;
},
Frame::PushPromise {
@@ -283,9 +299,15 @@
fn parse_settings_frame(
b: &mut octets::Octets, settings_length: usize,
) -> Result<Frame> {
- let mut max_header_list_size = None;
+ let mut max_field_section_size = None;
let mut qpack_max_table_capacity = None;
let mut qpack_blocked_streams = None;
+ let mut h3_datagram = None;
+
+ // Reject SETTINGS frames that are too long.
+ if settings_length > MAX_SETTINGS_PAYLOAD_SIZE {
+ return Err(super::Error::ExcessiveLoad);
+ }
while b.off() < settings_length {
let setting_ty = b.get_varint()?;
@@ -296,23 +318,36 @@
qpack_max_table_capacity = Some(settings_val);
},
- SETTINGS_MAX_HEADER_LIST_SIZE => {
- max_header_list_size = Some(settings_val);
+ SETTINGS_MAX_FIELD_SECTION_SIZE => {
+ max_field_section_size = Some(settings_val);
},
SETTINGS_QPACK_BLOCKED_STREAMS => {
qpack_blocked_streams = Some(settings_val);
},
+ SETTINGS_H3_DATAGRAM => {
+ if settings_val > 1 {
+ return Err(super::Error::SettingsError);
+ }
+
+ h3_datagram = Some(settings_val);
+ },
+
+ // Reserved values overlap with HTTP/2 and MUST be rejected
+ 0x0 | 0x2 | 0x3 | 0x4 | 0x5 =>
+ return Err(super::Error::SettingsError),
+
// Unknown Settings parameters must be ignored.
_ => (),
}
}
Ok(Frame::Settings {
- max_header_list_size,
+ max_field_section_size,
qpack_max_table_capacity,
qpack_blocked_streams,
+ h3_datagram,
grease: None,
})
}
@@ -422,13 +457,14 @@
let mut d = [42; 128];
let frame = Frame::Settings {
- max_header_list_size: Some(0),
+ max_field_section_size: Some(0),
qpack_max_table_capacity: Some(0),
qpack_blocked_streams: Some(0),
+ h3_datagram: Some(0),
grease: None,
};
- let frame_payload_len = 6;
+ let frame_payload_len = 9;
let frame_header_len = 2;
let wire_len = {
@@ -454,21 +490,23 @@
let mut d = [42; 128];
let frame = Frame::Settings {
- max_header_list_size: Some(0),
+ max_field_section_size: Some(0),
qpack_max_table_capacity: Some(0),
qpack_blocked_streams: Some(0),
+ h3_datagram: Some(0),
grease: Some((33, 33)),
};
// Frame parsing will always ignore GREASE values.
let frame_parsed = Frame::Settings {
- max_header_list_size: Some(0),
+ max_field_section_size: Some(0),
qpack_max_table_capacity: Some(0),
qpack_blocked_streams: Some(0),
+ h3_datagram: Some(0),
grease: None,
};
- let frame_payload_len = 8;
+ let frame_payload_len = 11;
let frame_header_len = 2;
let wire_len = {
@@ -494,9 +532,10 @@
let mut d = [42; 128];
let frame = Frame::Settings {
- max_header_list_size: Some(1024),
+ max_field_section_size: Some(1024),
qpack_max_table_capacity: None,
qpack_blocked_streams: None,
+ h3_datagram: None,
grease: None,
};
@@ -522,13 +561,79 @@
}
#[test]
+ fn settings_h3_dgram_only() {
+ let mut d = [42; 128];
+
+ let frame = Frame::Settings {
+ max_field_section_size: None,
+ qpack_max_table_capacity: None,
+ qpack_blocked_streams: None,
+ h3_datagram: Some(1),
+ grease: None,
+ };
+
+ let frame_payload_len = 3;
+ let frame_header_len = 2;
+
+ let wire_len = {
+ let mut b = octets::OctetsMut::with_slice(&mut d);
+ frame.to_bytes(&mut b).unwrap()
+ };
+
+ assert_eq!(wire_len, frame_header_len + frame_payload_len);
+
+ assert_eq!(
+ Frame::from_bytes(
+ SETTINGS_FRAME_TYPE_ID,
+ frame_payload_len as u64,
+ &d[frame_header_len..]
+ )
+ .unwrap(),
+ frame
+ );
+ }
+
+ #[test]
+ fn settings_h3_dgram_bad() {
+ let mut d = [42; 128];
+
+ let frame = Frame::Settings {
+ max_field_section_size: None,
+ qpack_max_table_capacity: None,
+ qpack_blocked_streams: None,
+ h3_datagram: Some(5),
+ grease: None,
+ };
+
+ let frame_payload_len = 3;
+ let frame_header_len = 2;
+
+ let wire_len = {
+ let mut b = octets::OctetsMut::with_slice(&mut d);
+ frame.to_bytes(&mut b).unwrap()
+ };
+
+ assert_eq!(wire_len, frame_header_len + frame_payload_len);
+
+ assert_eq!(
+ Frame::from_bytes(
+ SETTINGS_FRAME_TYPE_ID,
+ frame_payload_len as u64,
+ &d[frame_header_len..]
+ ),
+ Err(crate::h3::Error::SettingsError)
+ );
+ }
+
+ #[test]
fn settings_qpack_only() {
let mut d = [42; 128];
let frame = Frame::Settings {
- max_header_list_size: None,
+ max_field_section_size: None,
qpack_max_table_capacity: Some(0),
qpack_blocked_streams: Some(0),
+ h3_datagram: None,
grease: None,
};
@@ -554,6 +659,99 @@
}
#[test]
+ fn settings_h2_prohibited() {
+ // We need to test the prohibited values (0x0 | 0x2 | 0x3 | 0x4 | 0x5)
+ // but the quiche API doesn't support that, so use a manually created
+ // frame data buffer where d[frame_header_len] is the SETTING type field.
+ let frame_payload_len = 2u64;
+ let frame_header_len = 2;
+ let mut d = [
+ SETTINGS_FRAME_TYPE_ID as u8,
+ frame_payload_len as u8,
+ 0x0,
+ 1,
+ ];
+
+ assert_eq!(
+ Frame::from_bytes(
+ SETTINGS_FRAME_TYPE_ID,
+ frame_payload_len,
+ &d[frame_header_len..]
+ ),
+ Err(crate::h3::Error::SettingsError)
+ );
+
+ d[frame_header_len] = 0x2;
+
+ assert_eq!(
+ Frame::from_bytes(
+ SETTINGS_FRAME_TYPE_ID,
+ frame_payload_len,
+ &d[frame_header_len..]
+ ),
+ Err(crate::h3::Error::SettingsError)
+ );
+
+ d[frame_header_len] = 0x3;
+
+ assert_eq!(
+ Frame::from_bytes(
+ SETTINGS_FRAME_TYPE_ID,
+ frame_payload_len,
+ &d[frame_header_len..]
+ ),
+ Err(crate::h3::Error::SettingsError)
+ );
+
+ d[frame_header_len] = 0x4;
+
+ assert_eq!(
+ Frame::from_bytes(
+ SETTINGS_FRAME_TYPE_ID,
+ frame_payload_len,
+ &d[frame_header_len..]
+ ),
+ Err(crate::h3::Error::SettingsError)
+ );
+
+ d[frame_header_len] = 0x5;
+
+ assert_eq!(
+ Frame::from_bytes(
+ SETTINGS_FRAME_TYPE_ID,
+ frame_payload_len,
+ &d[frame_header_len..]
+ ),
+ Err(crate::h3::Error::SettingsError)
+ );
+ }
+
+ #[test]
+ fn settings_too_big() {
+ // We need to test a SETTINGS frame that exceeds
+ // MAX_SETTINGS_PAYLOAD_SIZE, so just craft a special buffer that look
+ // likes the frame. The payload content doesn't matter since quiche
+ // should abort before then.
+ let frame_payload_len = MAX_SETTINGS_PAYLOAD_SIZE + 1;
+ let frame_header_len = 2;
+ let d = [
+ SETTINGS_FRAME_TYPE_ID as u8,
+ frame_payload_len as u8,
+ 0x1,
+ 1,
+ ];
+
+ assert_eq!(
+ Frame::from_bytes(
+ SETTINGS_FRAME_TYPE_ID,
+ frame_payload_len as u64,
+ &d[frame_header_len..]
+ ),
+ Err(crate::h3::Error::ExcessiveLoad)
+ );
+ }
+
+ #[test]
fn push_promise() {
let mut d = [42; 128];
diff --git a/src/h3/mod.rs b/src/h3/mod.rs
index 6248688..38adcc9 100644
--- a/src/h3/mod.rs
+++ b/src/h3/mod.rs
@@ -59,8 +59,9 @@
//!
//! ```no_run
//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
-//! # let scid = [0xba; 16];
-//! # let mut conn = quiche::connect(None, &scid, &mut config).unwrap();
+//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+//! # let from = "127.0.0.1:1234".parse().unwrap();
+//! # let mut conn = quiche::accept(&scid, None, from, &mut config).unwrap();
//! # let h3_config = quiche::h3::Config::new()?;
//! let h3_conn = quiche::h3::Connection::with_transport(&mut conn, &h3_config)?;
//! # Ok::<(), quiche::h3::Error>(())
@@ -74,16 +75,17 @@
//!
//! ```no_run
//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
-//! # let scid = [0xba; 16];
-//! # let mut conn = quiche::connect(None, &scid, &mut config).unwrap();
+//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+//! # let to = "127.0.0.1:1234".parse().unwrap();
+//! # let mut conn = quiche::connect(None, &scid, to, &mut config).unwrap();
//! # let h3_config = quiche::h3::Config::new()?;
//! # let mut h3_conn = quiche::h3::Connection::with_transport(&mut conn, &h3_config)?;
//! let req = vec![
-//! quiche::h3::Header::new(":method", "GET"),
-//! quiche::h3::Header::new(":scheme", "https"),
-//! quiche::h3::Header::new(":authority", "quic.tech"),
-//! quiche::h3::Header::new(":path", "/"),
-//! quiche::h3::Header::new("user-agent", "quiche"),
+//! quiche::h3::Header::new(b":method", b"GET"),
+//! quiche::h3::Header::new(b":scheme", b"https"),
+//! quiche::h3::Header::new(b":authority", b"quic.tech"),
+//! quiche::h3::Header::new(b":path", b"/"),
+//! quiche::h3::Header::new(b"user-agent", b"quiche"),
//! ];
//!
//! h3_conn.send_request(&mut conn, &req, true)?;
@@ -95,16 +97,17 @@
//!
//! ```no_run
//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
-//! # let scid = [0xba; 16];
-//! # let mut conn = quiche::connect(None, &scid, &mut config).unwrap();
+//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+//! # let to = "127.0.0.1:1234".parse().unwrap();
+//! # let mut conn = quiche::connect(None, &scid, to, &mut config).unwrap();
//! # let h3_config = quiche::h3::Config::new()?;
//! # let mut h3_conn = quiche::h3::Connection::with_transport(&mut conn, &h3_config)?;
//! let req = vec![
-//! quiche::h3::Header::new(":method", "GET"),
-//! quiche::h3::Header::new(":scheme", "https"),
-//! quiche::h3::Header::new(":authority", "quic.tech"),
-//! quiche::h3::Header::new(":path", "/"),
-//! quiche::h3::Header::new("user-agent", "quiche"),
+//! quiche::h3::Header::new(b":method", b"GET"),
+//! quiche::h3::Header::new(b":scheme", b"https"),
+//! quiche::h3::Header::new(b":authority", b"quic.tech"),
+//! quiche::h3::Header::new(b":path", b"/"),
+//! quiche::h3::Header::new(b"user-agent", b"quiche"),
//! ];
//!
//! let stream_id = h3_conn.send_request(&mut conn, &req, false)?;
@@ -125,8 +128,9 @@
//! use quiche::h3::NameValue;
//!
//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
-//! # let scid = [0xba; 16];
-//! # let mut conn = quiche::accept(&scid, None, &mut config).unwrap();
+//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+//! # let from = "127.0.0.1:1234".parse().unwrap();
+//! # let mut conn = quiche::accept(&scid, None, from, &mut config).unwrap();
//! # let h3_config = quiche::h3::Config::new()?;
//! # let mut h3_conn = quiche::h3::Connection::with_transport(&mut conn, &h3_config)?;
//! loop {
@@ -135,15 +139,15 @@
//! let mut headers = list.into_iter();
//!
//! // Look for the request's method.
-//! let method = headers.find(|h| h.name() == ":method").unwrap();
+//! let method = headers.find(|h| h.name() == b":method").unwrap();
//!
//! // Look for the request's path.
-//! let path = headers.find(|h| h.name() == ":path").unwrap();
+//! let path = headers.find(|h| h.name() == b":path").unwrap();
//!
-//! if method.value() == "GET" && path.value() == "/" {
+//! if method.value() == b"GET" && path.value() == b"/" {
//! let resp = vec![
-//! quiche::h3::Header::new(":status", &200.to_string()),
-//! quiche::h3::Header::new("server", "quiche"),
+//! quiche::h3::Header::new(b":status", 200.to_string().as_bytes()),
+//! quiche::h3::Header::new(b"server", b"quiche"),
//! ];
//!
//! h3_conn.send_response(&mut conn, stream_id, &resp, false)?;
@@ -160,6 +164,10 @@
//! // Peer terminated stream, handle it.
//! },
//!
+//! Ok((stream_id, quiche::h3::Event::Reset(err))) => {
+//! // Peer reset the stream, handle it.
+//! },
+//!
//! Ok((_flow_id, quiche::h3::Event::Datagram)) => (),
//!
//! Ok((goaway_id, quiche::h3::Event::GoAway)) => {
@@ -186,22 +194,25 @@
//! use quiche::h3::NameValue;
//!
//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION).unwrap();
-//! # let scid = [0xba; 16];
-//! # let mut conn = quiche::connect(None, &scid, &mut config).unwrap();
+//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+//! # let to = "127.0.0.1:1234".parse().unwrap();
+//! # let mut conn = quiche::connect(None, &scid, to, &mut config).unwrap();
//! # let h3_config = quiche::h3::Config::new()?;
//! # let mut h3_conn = quiche::h3::Connection::with_transport(&mut conn, &h3_config)?;
//! loop {
//! match h3_conn.poll(&mut conn) {
//! Ok((stream_id, quiche::h3::Event::Headers{list, has_body})) => {
-//! let status = list.iter().find(|h| h.name() == ":status").unwrap();
+//! let status = list.iter().find(|h| h.name() == b":status").unwrap();
//! println!("Received {} response on stream {}",
-//! status.value(), stream_id);
+//! std::str::from_utf8(status.value()).unwrap(),
+//! stream_id);
//! },
//!
//! Ok((stream_id, quiche::h3::Event::Data)) => {
//! let mut body = vec![0; 4096];
//!
-//! if let Ok(read) =
+//! // Consume all body data received on the stream.
+//! while let Ok(read) =
//! h3_conn.recv_body(&mut conn, stream_id, &mut body)
//! {
//! println!("Received {} bytes of payload on stream {}",
@@ -213,6 +224,10 @@
//! // Peer terminated stream, handle it.
//! },
//!
+//! Ok((stream_id, quiche::h3::Event::Reset(err))) => {
+//! // Peer reset the stream, handle it.
+//! },
+//!
//! Ok((_flow_id, quiche::h3::Event::Datagram)) => (),
//!
//! Ok((goaway_id, quiche::h3::Event::GoAway)) => {
@@ -274,7 +289,7 @@
///
/// [`Config::set_application_protos()`]:
/// ../struct.Config.html#method.set_application_protos
-pub const APPLICATION_PROTOCOL: &[u8] = b"\x05h3-29\x05h3-28\x05h3-27";
+pub const APPLICATION_PROTOCOL: &[u8] = b"\x02h3\x05h3-29\x05h3-28\x05h3-27";
// The offset used when converting HTTP/3 urgency to quiche urgency.
const PRIORITY_URGENCY_OFFSET: u8 = 124;
@@ -333,6 +348,30 @@
/// The underlying QUIC stream (or connection) doesn't have enough capacity
/// for the operation to complete. The application should retry later on.
StreamBlocked,
+
+ /// Error in the payload of a SETTINGS frame.
+ SettingsError,
+
+ /// Server rejected request.
+ RequestRejected,
+
+ /// Request or its response cancelled.
+ RequestCancelled,
+
+ /// Client's request stream terminated without containing a full-formed
+ /// request.
+ RequestIncomplete,
+
+ /// An HTTP message was malformed and cannot be processed.
+ MessageError,
+
+ /// The TCP connection established in response to a CONNECT request was
+ /// reset or abnormally closed.
+ ConnectError,
+
+ /// The requested operation cannot be served over HTTP/3. Peer should retry
+ /// over HTTP/1.1.
+ VersionFallback,
}
impl Error {
@@ -351,9 +390,17 @@
Error::BufferTooShort => 0x999,
Error::TransportError { .. } => 0xFF,
Error::StreamBlocked => 0xFF,
+ Error::SettingsError => 0x109,
+ Error::RequestRejected => 0x10B,
+ Error::RequestCancelled => 0x10C,
+ Error::RequestIncomplete => 0x10D,
+ Error::MessageError => 0x10E,
+ Error::ConnectError => 0x10F,
+ Error::VersionFallback => 0x110,
}
}
+ #[cfg(feature = "ffi")]
fn to_c(self) -> libc::ssize_t {
match self {
Error::Done => -1,
@@ -369,6 +416,13 @@
Error::QpackDecompressionFailed => -11,
Error::TransportError { .. } => -12,
Error::StreamBlocked => -13,
+ Error::SettingsError => -14,
+ Error::RequestRejected => -15,
+ Error::RequestCancelled => -16,
+ Error::RequestIncomplete => -17,
+ Error::MessageError => -18,
+ Error::ConnectError => -19,
+ Error::VersionFallback => -20,
}
}
}
@@ -403,7 +457,7 @@
/// An HTTP/3 configuration.
pub struct Config {
- max_header_list_size: Option<u64>,
+ max_field_section_size: Option<u64>,
qpack_max_table_capacity: Option<u64>,
qpack_blocked_streams: Option<u64>,
}
@@ -412,17 +466,23 @@
/// Creates a new configuration object with default settings.
pub fn new() -> Result<Config> {
Ok(Config {
- max_header_list_size: None,
+ max_field_section_size: None,
qpack_max_table_capacity: None,
qpack_blocked_streams: None,
})
}
- /// Sets the `SETTINGS_MAX_HEADER_LIST_SIZE` setting.
+ /// Sets the `SETTINGS_MAX_FIELD_SECTION_SIZE` setting.
///
- /// By default no limit is enforced.
- pub fn set_max_header_list_size(&mut self, v: u64) {
- self.max_header_list_size = Some(v);
+ /// By default no limit is enforced. When a request whose headers exceed
+ /// the limit set by the application is received, the call to the [`poll()`]
+ /// method will return the [`Error::ExcessiveLoad`] error, and the
+ /// connection will be closed.
+ ///
+ /// [`poll()`]: struct.Connection.html#method.poll
+ /// [`Error::ExcessiveLoad`]: enum.Error.html#variant.ExcessiveLoad
+ pub fn set_max_field_section_size(&mut self, v: u64) {
+ self.max_field_section_size = Some(v);
}
/// Sets the `SETTINGS_QPACK_MAX_TABLE_CAPACITY` setting.
@@ -443,52 +503,52 @@
/// A trait for types with associated string name and value.
pub trait NameValue {
/// Returns the object's name.
- fn name(&self) -> &str;
+ fn name(&self) -> &[u8];
/// Returns the object's value.
- fn value(&self) -> &str;
+ fn value(&self) -> &[u8];
}
/// An owned name-value pair representing a raw HTTP header.
#[derive(Clone, Debug, PartialEq)]
-pub struct Header(String, String);
+pub struct Header(Vec<u8>, Vec<u8>);
impl Header {
/// Creates a new header.
///
/// Both `name` and `value` will be cloned.
- pub fn new(name: &str, value: &str) -> Self {
- Self(String::from(name), String::from(value))
+ pub fn new(name: &[u8], value: &[u8]) -> Self {
+ Self(name.to_vec(), value.to_vec())
}
}
impl NameValue for Header {
- fn name(&self) -> &str {
+ fn name(&self) -> &[u8] {
&self.0
}
- fn value(&self) -> &str {
+ fn value(&self) -> &[u8] {
&self.1
}
}
/// A non-owned name-value pair representing a raw HTTP header.
#[derive(Clone, Debug, PartialEq)]
-pub struct HeaderRef<'a>(&'a str, &'a str);
+pub struct HeaderRef<'a>(&'a [u8], &'a [u8]);
impl<'a> HeaderRef<'a> {
/// Creates a new header.
- pub fn new(name: &'a str, value: &'a str) -> Self {
+ pub fn new(name: &'a [u8], value: &'a [u8]) -> Self {
Self(name, value)
}
}
impl<'a> NameValue for HeaderRef<'a> {
- fn name(&self) -> &str {
+ fn name(&self) -> &[u8] {
self.0
}
- fn value(&self) -> &str {
+ fn value(&self) -> &[u8] {
self.1
}
}
@@ -511,16 +571,33 @@
/// This indicates that the application can use the [`recv_body()`] method
/// to retrieve the data from the stream.
///
- /// This event will keep being reported until all the available data is
- /// retrieved by the application.
+ /// Note that [`recv_body()`] will need to be called repeatedly until the
+ /// [`Done`] value is returned, as the event will not be re-armed until all
+ /// buffered data is read.
///
/// [`recv_body()`]: struct.Connection.html#method.recv_body
+ /// [`Done`]: enum.Error.html#variant.Done
Data,
/// Stream was closed,
Finished,
+ /// Stream was reset.
+ ///
+ /// The associated data represents the error code sent by the peer.
+ Reset(u64),
+
/// DATAGRAM was received.
+ ///
+ /// This indicates that the application can use the [`recv_dgram()`] method
+ /// to retrieve the HTTP/3 DATAGRAM.
+ ///
+ /// Note that [`recv_dgram()`] will need to be called repeatedly until the
+ /// [`Done`] value is returned, as the event will not be re-armed until all
+ /// buffered DATAGRAMs with the same flow ID are read.
+ ///
+ /// [`recv_dgram()`]: struct.Connection.html#method.recv_dgram
+ /// [`Done`]: enum.Error.html#variant.Done
Datagram,
/// GOAWAY was received.
@@ -528,9 +605,10 @@
}
struct ConnectionSettings {
- pub max_header_list_size: Option<u64>,
+ pub max_field_section_size: Option<u64>,
pub qpack_max_table_capacity: Option<u64>,
pub qpack_blocked_streams: Option<u64>,
+ pub h3_datagram: Option<u64>,
}
struct QpackStreams {
@@ -556,6 +634,7 @@
qpack_encoder: qpack::Encoder,
qpack_decoder: qpack::Decoder,
+ #[allow(dead_code)]
local_qpack_streams: QpackStreams,
peer_qpack_streams: QpackStreams,
@@ -567,11 +646,17 @@
local_goaway_id: Option<u64>,
peer_goaway_id: Option<u64>,
+
+ dgram_event_triggered: bool,
}
impl Connection {
- fn new(config: &Config, is_server: bool) -> Result<Connection> {
+ #[allow(clippy::unnecessary_wraps)]
+ fn new(
+ config: &Config, is_server: bool, enable_dgram: bool,
+ ) -> Result<Connection> {
let initial_uni_stream_id = if is_server { 0x3 } else { 0x2 };
+ let h3_datagram = if enable_dgram { Some(1) } else { None };
Ok(Connection {
is_server,
@@ -583,15 +668,17 @@
streams: HashMap::new(),
local_settings: ConnectionSettings {
- max_header_list_size: config.max_header_list_size,
+ max_field_section_size: config.max_field_section_size,
qpack_max_table_capacity: config.qpack_max_table_capacity,
qpack_blocked_streams: config.qpack_blocked_streams,
+ h3_datagram,
},
peer_settings: ConnectionSettings {
- max_header_list_size: None,
+ max_field_section_size: None,
qpack_max_table_capacity: None,
qpack_blocked_streams: None,
+ h3_datagram: None,
},
control_stream_id: None,
@@ -618,6 +705,8 @@
local_goaway_id: None,
peer_goaway_id: None,
+
+ dgram_event_triggered: false,
})
}
@@ -625,12 +714,27 @@
///
/// This will also initiate the HTTP/3 handshake with the peer by opening
/// all control streams (including QPACK) and sending the local settings.
+ ///
+ /// On success the new connection is returned.
+ ///
+ /// The [`StreamLimit`] error is returned when the HTTP/3 control stream
+ /// cannot be created.
+ ///
+ /// [`StreamLimit`]: ../enum.Error.html#variant.InvalidState
pub fn with_transport(
conn: &mut super::Connection, config: &Config,
) -> Result<Connection> {
- let mut http3_conn = Connection::new(config, conn.is_server)?;
+ let mut http3_conn =
+ Connection::new(config, conn.is_server, conn.dgram_enabled())?;
- http3_conn.send_settings(conn)?;
+ match http3_conn.send_settings(conn) {
+ Ok(_) => (),
+
+ Err(e) => {
+ conn.close(true, e.to_wire(), b"Error opening control stream")?;
+ return Err(e);
+ },
+ };
// Try opening QPACK streams, but ignore errors if it fails since we
// don't need them right now.
@@ -680,7 +784,11 @@
// stream_capacity() will fail. By writing a 0-length buffer, we force
// the creation of the QUIC stream state, without actually writing
// anything.
- conn.stream_send(stream_id, b"", false)?;
+ if let Err(e) = conn.stream_send(stream_id, b"", false) {
+ self.streams.remove(&stream_id);
+
+ return Err(e.into());
+ };
self.send_headers(conn, stream_id, headers, fin)?;
@@ -737,7 +845,7 @@
return Err(Error::FrameUnexpected);
}
- let mut urgency = 3;
+ let mut urgency = 3u8.saturating_add(PRIORITY_URGENCY_OFFSET);
let mut incremental = false;
for param in priority.split(',') {
@@ -755,11 +863,14 @@
// TODO: this also detects when u is not an sh-integer and
// clamps it in the same way. A real structured header parser
// would actually fail to parse.
- let mut u =
- i64::from_str_radix(param.rsplit('=').next().unwrap(), 10)
- .unwrap_or(7);
+ let mut u = param
+ .rsplit('=')
+ .next()
+ .unwrap()
+ .parse::<i64>()
+ .unwrap_or(7);
- if u < 0 || u > 7 {
+ if !(0..=7).contains(&u) {
u = 7;
}
@@ -786,7 +897,7 @@
let mut header_block = vec![0; headers_len];
let len = self
.qpack_encoder
- .encode(&headers, &mut header_block)
+ .encode(headers, &mut header_block)
.map_err(|_| Error::InternalError)?;
header_block.truncate(len);
@@ -806,16 +917,26 @@
self.frames_greased = true;
}
- let stream_cap = conn.stream_capacity(stream_id)?;
-
let header_block = self.encode_header_block(headers)?;
let overhead = octets::varint_len(frame::HEADERS_FRAME_TYPE_ID) +
octets::varint_len(header_block.len() as u64);
- if stream_cap < overhead + header_block.len() {
- return Err(Error::StreamBlocked);
- }
+ // Headers need to be sent atomically, so make sure the stream has
+ // enough capacity.
+ match conn.stream_writable(stream_id, overhead + header_block.len()) {
+ Ok(true) => (),
+
+ Ok(false) => return Err(Error::StreamBlocked),
+
+ Err(e) => {
+ if conn.stream_finished(stream_id) {
+ self.streams.remove(&stream_id);
+ }
+
+ return Err(e.into());
+ },
+ };
trace!(
"{} tx frm HEADERS stream={} len={} fin={}",
@@ -881,15 +1002,34 @@
},
};
+ // Avoid sending 0-length DATA frames when the fin flag is false.
+ if body.is_empty() && !fin {
+ return Err(Error::Done);
+ }
+
let overhead = octets::varint_len(frame::DATA_FRAME_TYPE_ID) +
octets::varint_len(body.len() as u64);
- let stream_cap = conn.stream_capacity(stream_id)?;
+ let stream_cap = match conn.stream_capacity(stream_id) {
+ Ok(v) => v,
- // Make sure there is enough capacity to send the frame header and at
- // least one byte of frame payload (this to avoid sending 0-length DATA
- // frames).
- if stream_cap <= overhead {
+ Err(e) => {
+ if conn.stream_finished(stream_id) {
+ self.streams.remove(&stream_id);
+ }
+
+ return Err(e.into());
+ },
+ };
+
+ if stream_cap < overhead + body.len() {
+ // Ensure the peer is notified that the connection or stream is
+ // blocked when the stream's capacity is limited by flow control.
+ let _ = conn.stream_writable(stream_id, overhead + body.len());
+ }
+
+ // Make sure there is enough capacity to send the DATA frame header.
+ if stream_cap < overhead {
return Err(Error::Done);
}
@@ -900,6 +1040,11 @@
// application can try again later.
let fin = if body_len != body.len() { false } else { fin };
+ // Again, avoid sending 0-length DATA frames when the fin flag is false.
+ if body_len == 0 && !fin {
+ return Err(Error::Done);
+ }
+
trace!(
"{} tx frm DATA stream={} len={} fin={}",
conn.trace_id(),
@@ -924,6 +1069,18 @@
Ok(written)
}
+ /// Returns whether the peer enabled HTTP/3 DATAGRAM frame support.
+ ///
+ /// Support is signalled by the peer's SETTINGS, so this method always
+ /// returns false until they have been processed using the [`poll()`]
+ /// method.
+ ///
+ /// [`poll()`]: struct.Connection.html#method.poll
+ pub fn dgram_enabled_by_peer(&self, conn: &super::Connection) -> bool {
+ self.peer_settings.h3_datagram == Some(1) &&
+ conn.dgram_max_writable_len().is_some()
+ }
+
/// Sends an HTTP/3 DATAGRAM with the specified flow ID.
pub fn send_dgram(
&mut self, conn: &mut super::Connection, flow_id: u64, buf: &[u8],
@@ -935,7 +1092,7 @@
b.put_varint(flow_id)?;
b.put_bytes(buf)?;
- conn.dgram_send(&d)?;
+ conn.dgram_send_vec(d)?;
Ok(())
}
@@ -977,6 +1134,35 @@
}
}
+ // A helper function for determining if there is a DATAGRAM event.
+ fn process_dgrams(
+ &mut self, conn: &mut super::Connection,
+ ) -> Result<(u64, Event)> {
+ let mut d = [0; 8];
+
+ match conn.dgram_recv_peek(&mut d, 8) {
+ Ok(_) => {
+ if self.dgram_event_triggered {
+ return Err(Error::Done);
+ }
+
+ self.dgram_event_triggered = true;
+
+ Ok((0, Event::Datagram))
+ },
+
+ Err(crate::Error::Done) => {
+ // The dgram recv queue is empty, so re-arm the Datagram event
+ // so it is issued next time a DATAGRAM is received.
+ self.dgram_event_triggered = false;
+
+ Err(Error::Done)
+ },
+
+ Err(e) => Err(Error::TransportError(e)),
+ }
+ }
+
/// Reads request or response body data into the provided buffer.
///
/// Applications should call this method whenever the [`poll()`] method
@@ -991,32 +1177,78 @@
pub fn recv_body(
&mut self, conn: &mut super::Connection, stream_id: u64, out: &mut [u8],
) -> Result<usize> {
- let stream = self.streams.get_mut(&stream_id).ok_or(Error::Done)?;
+ let mut total = 0;
- if stream.state() != stream::State::Data {
- return Err(Error::Done);
+ // Try to consume all buffered data for the stream, even across multiple
+ // DATA frames.
+ while total < out.len() {
+ let stream = self.streams.get_mut(&stream_id).ok_or(Error::Done)?;
+
+ if stream.state() != stream::State::Data {
+ break;
+ }
+
+ let (read, fin) =
+ match stream.try_consume_data(conn, &mut out[total..]) {
+ Ok(v) => v,
+
+ Err(Error::Done) => break,
+
+ Err(e) => return Err(e),
+ };
+
+ total += read;
+
+ // No more data to read, we are done.
+ if read == 0 || fin {
+ break;
+ }
+
+ // Process incoming data from the stream. For example, if a whole
+ // DATA frame was consumed, and another one is queued behind it,
+ // this will ensure the additional data will also be returned to
+ // the application.
+ match self.process_readable_stream(conn, stream_id, false) {
+ Ok(_) => unreachable!(),
+
+ Err(Error::Done) => (),
+
+ Err(e) => return Err(e),
+ };
+
+ if conn.stream_finished(stream_id) {
+ break;
+ }
}
- let read = stream.try_consume_data(conn, out)?;
-
// While body is being received, the stream is marked as finished only
// when all data is read by the application.
if conn.stream_finished(stream_id) {
- self.finished_streams.push_back(stream_id);
+ self.process_finished_stream(stream_id);
}
- Ok(read)
+ if total == 0 {
+ return Err(Error::Done);
+ }
+
+ Ok(total)
}
/// Processes HTTP/3 data received from the peer.
///
- /// On success it returns an [`Event`] and an ID.
+ /// On success it returns an [`Event`] and an ID, or [`Done`] when there are
+ /// no events to report.
+ ///
+ /// Note that all events are edge-triggered, meaning that once reported they
+ /// will not be reported again by calling this method again, until the event
+ /// is re-armed.
///
/// The events [`Headers`], [`Data`] and [`Finished`] return a stream ID,
/// which is used in methods [`recv_body()`], [`send_response()`] or
/// [`send_body()`].
///
- /// The event [`Datagram`] returns a flow ID.
+ /// The event [`Datagram`] returns a dummy value of `0`, this should be
+ /// ignored by the application.
///
/// The event [`GoAway`] returns an ID that depends on the connection role.
/// A client receives the largest processed stream ID. A server receives the
@@ -1026,6 +1258,7 @@
/// the appropriate error code, using the transport's [`close()`] method.
///
/// [`Event`]: enum.Event.html
+ /// [`Done`]: enum.Error.html#variant.Done
/// [`Headers`]: enum.Event.html#variant.Headers
/// [`Data`]: enum.Event.html#variant.Data
/// [`Finished`]: enum.Event.html#variant.Finished
@@ -1040,7 +1273,7 @@
// When connection close is initiated by the local application (e.g. due
// to a protocol error), the connection itself might be in a broken
// state, so return early.
- if conn.error.is_some() || conn.app_error.is_some() {
+ if conn.local_error.is_some() {
return Err(Error::Done);
}
@@ -1080,44 +1313,49 @@
return Ok((finished, Event::Finished));
}
- // Process DATAGRAMs
- let mut d = [0; 8];
+ // Process queued DATAGRAMs if the poll threshold allows it.
+ match self.process_dgrams(conn) {
+ Ok(v) => return Ok(v),
- match conn.dgram_recv_peek(&mut d, 8) {
- Ok(_) => {
- let mut b = octets::Octets::with_slice(&d);
- let flow_id = b.get_varint()?;
- return Ok((flow_id, Event::Datagram));
- },
+ Err(Error::Done) => (),
- Err(crate::Error::Done) => (),
-
- Err(e) => return Err(Error::TransportError(e)),
+ Err(e) => return Err(e),
};
// Process HTTP/3 data from readable streams.
for s in conn.readable() {
trace!("{} stream id {} is readable", conn.trace_id(), s);
- let ev = match self.process_readable_stream(conn, s) {
+ let ev = match self.process_readable_stream(conn, s, true) {
Ok(v) => Some(v),
Err(Error::Done) => None,
+ // Return early if the stream was reset, to avoid returning
+ // a Finished event later as well.
+ Err(Error::TransportError(crate::Error::StreamReset(e))) =>
+ return Ok((s, Event::Reset(e))),
+
Err(e) => return Err(e),
};
if conn.stream_finished(s) {
- self.finished_streams.push_back(s);
+ self.process_finished_stream(s);
}
// TODO: check if stream is completed so it can be freed
-
if let Some(ev) = ev {
return Ok(ev);
}
}
+ // Process finished streams list once again, to make sure `Finished`
+ // events are returned when receiving empty stream frames with the fin
+ // flag set.
+ if let Some(finished) = self.finished_streams.pop_front() {
+ return Ok((finished, Event::Finished));
+ }
+
Err(Error::Done)
}
@@ -1135,9 +1373,13 @@
pub fn send_goaway(
&mut self, conn: &mut super::Connection, id: u64,
) -> Result<()> {
+ let mut id = id;
+
+ // TODO: server push
+ //
+ // In the meantime always send 0 from client.
if !self.is_server {
- // TODO: server push
- return Ok(());
+ id = 0;
}
if self.is_server && id % 4 != 0 {
@@ -1237,7 +1479,17 @@
) -> Result<()> {
let mut d = [0; 8];
- let stream_cap = conn.stream_capacity(stream_id)?;
+ let stream_cap = match conn.stream_capacity(stream_id) {
+ Ok(v) => v,
+
+ Err(e) => {
+ if conn.stream_finished(stream_id) {
+ self.streams.remove(&stream_id);
+ }
+
+ return Err(e.into());
+ },
+ };
let grease_frame1 = grease_value();
let grease_frame2 = grease_value();
@@ -1283,7 +1535,7 @@
Ok(stream_id) => {
trace!("{} open GREASE stream {}", conn.trace_id(), stream_id);
- conn.stream_send(stream_id, b"GREASE is the word", false)?;
+ conn.stream_send(stream_id, b"GREASE is the word", true)?;
},
Err(Error::IdError) => {
@@ -1311,11 +1563,12 @@
};
let frame = frame::Frame::Settings {
- max_header_list_size: self.local_settings.max_header_list_size,
+ max_field_section_size: self.local_settings.max_field_section_size,
qpack_max_table_capacity: self
.local_settings
.qpack_max_table_capacity,
qpack_blocked_streams: self.local_settings.qpack_blocked_streams,
+ h3_datagram: self.local_settings.h3_datagram,
grease,
};
@@ -1346,7 +1599,7 @@
return Err(Error::ClosedCriticalStream);
}
- match self.process_readable_stream(conn, stream_id) {
+ match self.process_readable_stream(conn, stream_id, true) {
Ok(ev) => return Ok(ev),
Err(Error::Done) => (),
@@ -1368,7 +1621,7 @@
}
fn process_readable_stream(
- &mut self, conn: &mut super::Connection, stream_id: u64,
+ &mut self, conn: &mut super::Connection, stream_id: u64, polling: bool,
) -> Result<(u64, Event)> {
self.streams
.entry(stream_id)
@@ -1541,6 +1794,11 @@
},
stream::State::FramePayload => {
+ // Do not emit events when not polling.
+ if !polling {
+ break;
+ }
+
stream.try_fill_buffer(conn)?;
let frame = match stream.try_consume_frame() {
@@ -1569,6 +1827,15 @@
},
stream::State::Data => {
+ // Do not emit events when not polling.
+ if !polling {
+ break;
+ }
+
+ if !stream.try_trigger_data_event() {
+ break;
+ }
+
return Ok((stream_id, Event::Data));
},
@@ -1583,16 +1850,44 @@
stream::State::Drain => {
// Discard incoming data on the stream.
- conn.stream_shutdown(stream_id, crate::Shutdown::Read, 0)?;
+ conn.stream_shutdown(
+ stream_id,
+ crate::Shutdown::Read,
+ 0x100,
+ )?;
break;
},
+
+ stream::State::Finished => break,
}
}
Err(Error::Done)
}
+ fn process_finished_stream(&mut self, stream_id: u64) {
+ let stream = match self.streams.get_mut(&stream_id) {
+ Some(v) => v,
+
+ None => return,
+ };
+
+ if stream.state() == stream::State::Finished {
+ return;
+ }
+
+ match stream.ty() {
+ Some(stream::Type::Request) | Some(stream::Type::Push) => {
+ stream.finished();
+
+ self.finished_streams.push_back(stream_id);
+ },
+
+ _ => (),
+ };
+ }
+
fn process_frame(
&mut self, conn: &mut super::Connection, stream_id: u64,
frame: frame::Frame,
@@ -1606,16 +1901,31 @@
match frame {
frame::Frame::Settings {
- max_header_list_size,
+ max_field_section_size,
qpack_max_table_capacity,
qpack_blocked_streams,
+ h3_datagram,
..
} => {
self.peer_settings = ConnectionSettings {
- max_header_list_size,
+ max_field_section_size,
qpack_max_table_capacity,
qpack_blocked_streams,
+ h3_datagram,
};
+
+ if let Some(1) = h3_datagram {
+ // The peer MUST have also enabled DATAGRAM with a TP
+ if conn.dgram_max_writable_len().is_none() {
+ conn.close(
+ true,
+ Error::SettingsError.to_wire(),
+ b"H3_DATAGRAM sent with value 1 but max_datagram_frame_size TP not set.",
+ )?;
+
+ return Err(Error::SettingsError);
+ }
+ }
},
frame::Frame::Headers { header_block } => {
@@ -1629,21 +1939,32 @@
return Err(Error::FrameUnexpected);
}
- // Use "infinite" as default value for max_header_list_size if
+ // Use "infinite" as default value for max_field_section_size if
// it is not configured by the application.
let max_size = self
.local_settings
- .max_header_list_size
+ .max_field_section_size
.unwrap_or(std::u64::MAX);
- let headers = self
+ let headers = match self
.qpack_decoder
.decode(&header_block[..], max_size)
- .map_err(|e| match e {
- qpack::Error::HeaderListTooLarge => Error::ExcessiveLoad,
+ {
+ Ok(v) => v,
- _ => Error::QpackDecompressionFailed,
- })?;
+ Err(e) => {
+ let e = match e {
+ qpack::Error::HeaderListTooLarge =>
+ Error::ExcessiveLoad,
+
+ _ => Error::QpackDecompressionFailed,
+ };
+
+ conn.close(true, e.to_wire(), b"Error parsing headers.")?;
+
+ return Err(e);
+ },
+ };
let has_body = !conn.stream_finished(stream_id);
@@ -1814,8 +2135,6 @@
pub pipe: testing::Pipe,
pub client: Connection,
pub server: Connection,
-
- buf: [u8; 65535],
}
impl Session {
@@ -1831,6 +2150,8 @@
config.set_initial_max_streams_bidi(5);
config.set_initial_max_streams_uni(5);
config.verify_peer(false);
+ config.enable_dgram(true, 3, 3);
+ config.set_ack_delay_exponent(8);
let h3_config = Config::new()?;
Session::with_configs(&mut config, &h3_config)
@@ -1839,47 +2160,49 @@
pub fn with_configs(
config: &mut crate::Config, h3_config: &Config,
) -> Result<Session> {
+ let pipe = testing::Pipe::with_config(config)?;
+ let client_dgram = pipe.client.dgram_enabled();
+ let server_dgram = pipe.server.dgram_enabled();
Ok(Session {
- pipe: testing::Pipe::with_config(config)?,
- client: Connection::new(&h3_config, false)?,
- server: Connection::new(&h3_config, true)?,
- buf: [0; 65535],
+ pipe,
+ client: Connection::new(h3_config, false, client_dgram)?,
+ server: Connection::new(h3_config, true, server_dgram)?,
})
}
/// Do the HTTP/3 handshake so both ends are in sane initial state.
pub fn handshake(&mut self) -> Result<()> {
- self.pipe.handshake(&mut self.buf)?;
+ self.pipe.handshake()?;
// Client streams.
self.client.send_settings(&mut self.pipe.client)?;
- self.pipe.advance(&mut self.buf).ok();
+ self.pipe.advance().ok();
self.client
.open_qpack_encoder_stream(&mut self.pipe.client)?;
- self.pipe.advance(&mut self.buf).ok();
+ self.pipe.advance().ok();
self.client
.open_qpack_decoder_stream(&mut self.pipe.client)?;
- self.pipe.advance(&mut self.buf).ok();
+ self.pipe.advance().ok();
if self.pipe.client.grease {
self.client.open_grease_stream(&mut self.pipe.client)?;
}
- self.pipe.advance(&mut self.buf).ok();
+ self.pipe.advance().ok();
// Server streams.
self.server.send_settings(&mut self.pipe.server)?;
- self.pipe.advance(&mut self.buf).ok();
+ self.pipe.advance().ok();
self.server
.open_qpack_encoder_stream(&mut self.pipe.server)?;
- self.pipe.advance(&mut self.buf).ok();
+ self.pipe.advance().ok();
self.server
.open_qpack_decoder_stream(&mut self.pipe.server)?;
- self.pipe.advance(&mut self.buf).ok();
+ self.pipe.advance().ok();
if self.pipe.server.grease {
self.server.open_grease_stream(&mut self.pipe.server)?;
@@ -1900,7 +2223,7 @@
/// Advances the session pipe over the buffer.
pub fn advance(&mut self) -> crate::Result<()> {
- self.pipe.advance(&mut self.buf)
+ self.pipe.advance()
}
/// Polls the client for events.
@@ -1918,11 +2241,11 @@
/// On success it returns the newly allocated stream and the headers.
pub fn send_request(&mut self, fin: bool) -> Result<(u64, Vec<Header>)> {
let req = vec![
- Header::new(":method", "GET"),
- Header::new(":scheme", "https"),
- Header::new(":authority", "quic.tech"),
- Header::new(":path", "/test"),
- Header::new("user-agent", "quiche-test"),
+ Header::new(b":method", b"GET"),
+ Header::new(b":scheme", b"https"),
+ Header::new(b":authority", b"quic.tech"),
+ Header::new(b":path", b"/test"),
+ Header::new(b"user-agent", b"quiche-test"),
];
let stream =
@@ -1940,8 +2263,8 @@
&mut self, stream: u64, fin: bool,
) -> Result<Vec<Header>> {
let resp = vec![
- Header::new(":status", "200"),
- Header::new("server", "quiche-test"),
+ Header::new(b":status", b"200"),
+ Header::new(b"server", b"quiche-test"),
];
self.server.send_response(
@@ -2024,6 +2347,54 @@
Ok(())
}
+ /// Send an HTTP/3 DATAGRAM with default data from the client.
+ ///
+ /// On success it returns the data.
+ pub fn send_dgram_client(&mut self, flow_id: u64) -> Result<Vec<u8>> {
+ let bytes = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+
+ self.client
+ .send_dgram(&mut self.pipe.client, flow_id, &bytes)?;
+
+ self.advance().ok();
+
+ Ok(bytes)
+ }
+
+ /// Receives an HTTP/3 DATAGRAM from the server.
+ ///
+ /// On success it returns the DATAGRAM length, flow ID and flow ID
+ /// length.
+ pub fn recv_dgram_client(
+ &mut self, buf: &mut [u8],
+ ) -> Result<(usize, u64, usize)> {
+ self.client.recv_dgram(&mut self.pipe.client, buf)
+ }
+
+ /// Send an HTTP/3 DATAGRAM with default data from the server
+ ///
+ /// On success it returns the data.
+ pub fn send_dgram_server(&mut self, flow_id: u64) -> Result<Vec<u8>> {
+ let bytes = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+
+ self.server
+ .send_dgram(&mut self.pipe.server, flow_id, &bytes)?;
+
+ self.advance().ok();
+
+ Ok(bytes)
+ }
+
+ /// Receives an HTTP/3 DATAGRAM from the client.
+ ///
+ /// On success it returns the DATAGRAM length, flow ID and flow ID
+ /// length.
+ pub fn recv_dgram_server(
+ &mut self, buf: &mut [u8],
+ ) -> Result<(usize, u64, usize)> {
+ self.server.recv_dgram(&mut self.pipe.server, buf)
+ }
+
/// Sends a single HTTP/3 frame from the server.
pub fn send_frame_server(
&mut self, frame: frame::Frame, stream_id: u64, fin: bool,
@@ -2041,6 +2412,28 @@
Ok(())
}
+
+ /// Sends an arbitrary buffer of HTTP/3 stream data from the client.
+ pub fn send_arbitrary_stream_data_client(
+ &mut self, data: &[u8], stream_id: u64, fin: bool,
+ ) -> Result<()> {
+ self.pipe.client.stream_send(stream_id, data, fin)?;
+
+ self.advance().ok();
+
+ Ok(())
+ }
+
+ /// Sends an arbitrary buffer of HTTP/3 stream data from the server.
+ pub fn send_arbitrary_stream_data_server(
+ &mut self, data: &[u8], stream_id: u64, fin: bool,
+ ) -> Result<()> {
+ self.pipe.server.stream_send(stream_id, data, fin)?;
+
+ self.advance().ok();
+
+ Ok(())
+ }
}
}
@@ -2158,9 +2551,10 @@
};
assert_eq!(s.poll_client(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_client(), Ok((stream, Event::Data)));
+ assert_eq!(s.poll_client(), Err(Error::Done));
for _ in 0..total_data_frames {
- assert_eq!(s.poll_client(), Ok((stream, Event::Data)));
assert_eq!(s.recv_body_client(stream, &mut recv_buf), Ok(body.len()));
}
@@ -2227,9 +2621,10 @@
};
assert_eq!(s.poll_server(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
for _ in 0..total_data_frames {
- assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
assert_eq!(s.recv_body_server(stream, &mut recv_buf), Ok(body.len()));
}
@@ -2288,7 +2683,8 @@
assert_eq!(ev, ev_headers);
assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
assert_eq!(s.recv_body_server(stream, &mut recv_buf), Ok(body.len()));
- assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+
assert_eq!(s.recv_body_server(stream, &mut recv_buf), Ok(body.len()));
assert_eq!(s.poll_server(), Ok((stream, Event::Finished)));
}
@@ -2320,6 +2716,46 @@
}
#[test]
+ /// Send a request with no body, get a response with one DATA frame and an
+ /// empty FIN after reception from the client.
+ fn request_no_body_response_one_chunk_empty_fin() {
+ let mut s = Session::default().unwrap();
+ s.handshake().unwrap();
+
+ let (stream, req) = s.send_request(true).unwrap();
+
+ let ev_headers = Event::Headers {
+ list: req,
+ has_body: false,
+ };
+
+ assert_eq!(s.poll_server(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Finished)));
+
+ let resp = s.send_response(stream, false).unwrap();
+
+ let body = s.send_body_server(stream, false).unwrap();
+
+ let mut recv_buf = vec![0; body.len()];
+
+ let ev_headers = Event::Headers {
+ list: resp,
+ has_body: true,
+ };
+
+ assert_eq!(s.poll_client(), Ok((stream, ev_headers)));
+
+ assert_eq!(s.poll_client(), Ok((stream, Event::Data)));
+ assert_eq!(s.recv_body_client(stream, &mut recv_buf), Ok(body.len()));
+
+ assert_eq!(s.pipe.server.stream_send(stream, &[], true), Ok(0));
+ s.advance().ok();
+
+ assert_eq!(s.poll_client(), Ok((stream, Event::Finished)));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ }
+
+ #[test]
/// Try to send DATA frames before HEADERS.
fn body_response_before_headers() {
let mut s = Session::default().unwrap();
@@ -2620,12 +3056,12 @@
let mut s = Session::default().unwrap();
s.handshake().unwrap();
- s.client.send_goaway(&mut s.pipe.client, 1).unwrap();
+ s.client.send_goaway(&mut s.pipe.client, 100).unwrap();
s.advance().ok();
// TODO: server push
- assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.poll_server(), Ok((0, Event::GoAway)));
}
#[test]
@@ -2703,10 +3139,10 @@
fn uni_stream_local_counting() {
let config = Config::new().unwrap();
- let h3_cln = Connection::new(&config, false).unwrap();
+ let h3_cln = Connection::new(&config, false, false).unwrap();
assert_eq!(h3_cln.next_uni_stream_id, 2);
- let h3_srv = Connection::new(&config, true).unwrap();
+ let h3_srv = Connection::new(&config, true, false).unwrap();
assert_eq!(h3_srv.next_uni_stream_id, 3);
}
@@ -2842,10 +3278,32 @@
#[test]
/// Tests limits for the stream state buffer maximum size.
fn max_state_buf_size() {
- // DATA frames don't consume the state buffer, so can be of any size.
let mut s = Session::default().unwrap();
s.handshake().unwrap();
+ let req = vec![
+ Header::new(b":method", b"GET"),
+ Header::new(b":scheme", b"https"),
+ Header::new(b":authority", b"quic.tech"),
+ Header::new(b":path", b"/test"),
+ Header::new(b"user-agent", b"quiche-test"),
+ ];
+
+ assert_eq!(
+ s.client.send_request(&mut s.pipe.client, &req, false),
+ Ok(0)
+ );
+
+ s.advance().ok();
+
+ let ev_headers = Event::Headers {
+ list: req,
+ has_body: true,
+ };
+
+ assert_eq!(s.server.poll(&mut s.pipe.server), Ok((0, ev_headers)));
+
+ // DATA frames don't consume the state buffer, so can be of any size.
let mut d = [42; 128];
let mut b = octets::OctetsMut::with_slice(&mut d);
@@ -2919,16 +3377,16 @@
};
assert_eq!(s.poll_server(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
for _ in 0..total_data_frames {
- assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
assert_eq!(
s.recv_body_server(stream, &mut recv_buf),
Ok(bytes.len())
);
}
- assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
assert_eq!(
s.recv_body_server(stream, &mut recv_buf),
Ok(bytes.len() - 2)
@@ -2959,18 +3417,18 @@
config.verify_peer(false);
let mut h3_config = Config::new().unwrap();
- h3_config.set_max_header_list_size(65);
+ h3_config.set_max_field_section_size(65);
let mut s = Session::with_configs(&mut config, &mut h3_config).unwrap();
s.handshake().unwrap();
let req = vec![
- Header::new(":method", "GET"),
- Header::new(":scheme", "https"),
- Header::new(":authority", "quic.tech"),
- Header::new(":path", "/test"),
- Header::new("aaaaaaa", "aaaaaaaa"),
+ Header::new(b":method", b"GET"),
+ Header::new(b":scheme", b"https"),
+ Header::new(b":authority", b"quic.tech"),
+ Header::new(b":path", b"/test"),
+ Header::new(b"aaaaaaa", b"aaaaaaaa"),
];
let stream = s
@@ -2983,6 +3441,11 @@
assert_eq!(stream, 0);
assert_eq!(s.poll_server(), Err(Error::ExcessiveLoad));
+
+ assert_eq!(
+ s.pipe.server.local_error.as_ref().unwrap().error_code,
+ Error::to_wire(Error::ExcessiveLoad)
+ );
}
#[test]
@@ -2992,11 +3455,11 @@
s.handshake().unwrap();
let req = vec![
- Header::new(":method", "GET"),
- Header::new(":scheme", "https"),
- Header::new(":authority", "quic.tech"),
- Header::new(":path", "/test"),
- Header::new("user-agent", "quiche-test"),
+ Header::new(b":method", b"GET"),
+ Header::new(b":scheme", b"https"),
+ Header::new(b":authority", b"quic.tech"),
+ Header::new(b":path", b"/test"),
+ Header::new(b"user-agent", b"quiche-test"),
];
// We need to open all streams in the same flight, so we can't use the
@@ -3022,9 +3485,8 @@
}
#[test]
- /// Tests that calling poll() after an error occured does nothing.
- fn poll_after_error() {
- // DATA frames don't consume the state buffer, so can be of any size.
+ /// Tests that sending DATA before HEADERS causes an error.
+ fn data_before_headers() {
let mut s = Session::default().unwrap();
s.handshake().unwrap();
@@ -3034,16 +3496,22 @@
let frame_type = b.put_varint(frame::DATA_FRAME_TYPE_ID).unwrap();
s.pipe.client.stream_send(0, frame_type, false).unwrap();
- let frame_len = b.put_varint(1 << 24).unwrap();
+ let frame_len = b.put_varint(5).unwrap();
s.pipe.client.stream_send(0, frame_len, false).unwrap();
- s.pipe.client.stream_send(0, &d, false).unwrap();
+ s.pipe.client.stream_send(0, b"hello", false).unwrap();
s.advance().ok();
- assert_eq!(s.server.poll(&mut s.pipe.server), Ok((0, Event::Data)));
+ assert_eq!(
+ s.server.poll(&mut s.pipe.server),
+ Err(Error::FrameUnexpected)
+ );
+ }
- // GREASE frames consume the state buffer, so need to be limited.
+ #[test]
+ /// Tests that calling poll() after an error occured does nothing.
+ fn poll_after_error() {
let mut s = Session::default().unwrap();
s.handshake().unwrap();
@@ -3092,10 +3560,10 @@
s.handshake().unwrap();
let req = vec![
- Header::new(":method", "GET"),
- Header::new(":scheme", "https"),
- Header::new(":authority", "quic.tech"),
- Header::new(":path", "/test"),
+ Header::new(b":method", b"GET"),
+ Header::new(b":scheme", b"https"),
+ Header::new(b":authority", b"quic.tech"),
+ Header::new(b":path", b"/test"),
];
assert_eq!(s.client.send_request(&mut s.pipe.client, &req, true), Ok(0));
@@ -3113,6 +3581,61 @@
}
#[test]
+ /// Test handling of 0-length DATA writes with and without fin.
+ fn zero_length_data() {
+ let mut s = Session::default().unwrap();
+ s.handshake().unwrap();
+
+ let (stream, req) = s.send_request(false).unwrap();
+
+ assert_eq!(
+ s.client.send_body(&mut s.pipe.client, 0, b"", false),
+ Err(Error::Done)
+ );
+ assert_eq!(s.client.send_body(&mut s.pipe.client, 0, b"", true), Ok(0));
+
+ s.advance().ok();
+
+ let mut recv_buf = vec![0; 100];
+
+ let ev_headers = Event::Headers {
+ list: req,
+ has_body: true,
+ };
+
+ assert_eq!(s.poll_server(), Ok((stream, ev_headers)));
+
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+ assert_eq!(s.recv_body_server(stream, &mut recv_buf), Err(Error::Done));
+
+ assert_eq!(s.poll_server(), Ok((stream, Event::Finished)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ let resp = s.send_response(stream, false).unwrap();
+
+ assert_eq!(
+ s.server.send_body(&mut s.pipe.server, 0, b"", false),
+ Err(Error::Done)
+ );
+ assert_eq!(s.server.send_body(&mut s.pipe.server, 0, b"", true), Ok(0));
+
+ s.advance().ok();
+
+ let ev_headers = Event::Headers {
+ list: resp,
+ has_body: true,
+ };
+
+ assert_eq!(s.poll_client(), Ok((stream, ev_headers)));
+
+ assert_eq!(s.poll_client(), Ok((stream, Event::Data)));
+ assert_eq!(s.recv_body_client(stream, &mut recv_buf), Err(Error::Done));
+
+ assert_eq!(s.poll_client(), Ok((stream, Event::Finished)));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ }
+
+ #[test]
/// Tests that blocked 0-length DATA writes are reported correctly.
fn zero_length_data_blocked() {
let mut config = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
@@ -3123,7 +3646,7 @@
.load_priv_key_from_pem_file("examples/cert.key")
.unwrap();
config.set_application_protos(b"\x02h3").unwrap();
- config.set_initial_max_data(70);
+ config.set_initial_max_data(69);
config.set_initial_max_stream_data_bidi_local(150);
config.set_initial_max_stream_data_bidi_remote(150);
config.set_initial_max_stream_data_uni(150);
@@ -3138,10 +3661,10 @@
s.handshake().unwrap();
let req = vec![
- Header::new(":method", "GET"),
- Header::new(":scheme", "https"),
- Header::new(":authority", "quic.tech"),
- Header::new(":path", "/test"),
+ Header::new(b":method", b"GET"),
+ Header::new(b":scheme", b"https"),
+ Header::new(b":authority", b"quic.tech"),
+ Header::new(b":path", b"/test"),
];
assert_eq!(
@@ -3159,8 +3682,1014 @@
// Once the server gives flow control credits back, we can send the body.
assert_eq!(s.client.send_body(&mut s.pipe.client, 0, b"", true), Ok(0));
}
+
+ #[test]
+ /// Tests that receiving a H3_DATAGRAM setting is ok.
+ fn dgram_setting() {
+ let mut config = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
+ config
+ .load_cert_chain_from_pem_file("examples/cert.crt")
+ .unwrap();
+ config
+ .load_priv_key_from_pem_file("examples/cert.key")
+ .unwrap();
+ config.set_application_protos(b"\x02h3").unwrap();
+ config.set_initial_max_data(70);
+ config.set_initial_max_stream_data_bidi_local(150);
+ config.set_initial_max_stream_data_bidi_remote(150);
+ config.set_initial_max_stream_data_uni(150);
+ config.set_initial_max_streams_bidi(100);
+ config.set_initial_max_streams_uni(5);
+ config.enable_dgram(true, 1000, 1000);
+ config.verify_peer(false);
+
+ let h3_config = Config::new().unwrap();
+
+ let mut s = Session::with_configs(&mut config, &h3_config).unwrap();
+ assert_eq!(s.pipe.handshake(), Ok(()));
+
+ s.client.send_settings(&mut s.pipe.client).unwrap();
+ assert_eq!(s.pipe.advance(), Ok(()));
+
+ // Before processing SETTINGS (via poll), HTTP/3 DATAGRAMS are not
+ // enabled.
+ assert!(!s.server.dgram_enabled_by_peer(&s.pipe.server));
+
+ // When everything is ok, poll returns Done and DATAGRAM is enabled.
+ assert_eq!(s.server.poll(&mut s.pipe.server), Err(Error::Done));
+ assert!(s.server.dgram_enabled_by_peer(&s.pipe.server));
+
+ // Now detect things on the client
+ s.server.send_settings(&mut s.pipe.server).unwrap();
+ assert_eq!(s.pipe.advance(), Ok(()));
+ assert!(!s.client.dgram_enabled_by_peer(&s.pipe.client));
+ assert_eq!(s.client.poll(&mut s.pipe.client), Err(Error::Done));
+ assert!(s.client.dgram_enabled_by_peer(&s.pipe.client));
+ }
+
+ #[test]
+ /// Tests that receiving a H3_DATAGRAM setting when no TP is set generates
+ /// an error.
+ fn dgram_setting_no_tp() {
+ let mut config = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
+ config
+ .load_cert_chain_from_pem_file("examples/cert.crt")
+ .unwrap();
+ config
+ .load_priv_key_from_pem_file("examples/cert.key")
+ .unwrap();
+ config.set_application_protos(b"\x02h3").unwrap();
+ config.set_initial_max_data(70);
+ config.set_initial_max_stream_data_bidi_local(150);
+ config.set_initial_max_stream_data_bidi_remote(150);
+ config.set_initial_max_stream_data_uni(150);
+ config.set_initial_max_streams_bidi(100);
+ config.set_initial_max_streams_uni(5);
+ config.verify_peer(false);
+
+ let h3_config = Config::new().unwrap();
+
+ let mut s = Session::with_configs(&mut config, &h3_config).unwrap();
+ assert_eq!(s.pipe.handshake(), Ok(()));
+
+ s.client.control_stream_id = Some(
+ s.client
+ .open_uni_stream(
+ &mut s.pipe.client,
+ stream::HTTP3_CONTROL_STREAM_TYPE_ID,
+ )
+ .unwrap(),
+ );
+
+ let settings = frame::Frame::Settings {
+ max_field_section_size: None,
+ qpack_max_table_capacity: None,
+ qpack_blocked_streams: None,
+ h3_datagram: Some(1),
+ grease: None,
+ };
+
+ s.send_frame_client(settings, s.client.control_stream_id.unwrap(), false)
+ .unwrap();
+
+ assert_eq!(s.pipe.advance(), Ok(()));
+
+ assert_eq!(s.server.poll(&mut s.pipe.server), Err(Error::SettingsError));
+ }
+
+ #[test]
+ /// Tests that receiving SETTINGS with prohibited values generates an error.
+ fn settings_h2_prohibited() {
+ let mut config = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
+ config
+ .load_cert_chain_from_pem_file("examples/cert.crt")
+ .unwrap();
+ config
+ .load_priv_key_from_pem_file("examples/cert.key")
+ .unwrap();
+ config.set_application_protos(b"\x02h3").unwrap();
+ config.set_initial_max_data(70);
+ config.set_initial_max_stream_data_bidi_local(150);
+ config.set_initial_max_stream_data_bidi_remote(150);
+ config.set_initial_max_stream_data_uni(150);
+ config.set_initial_max_streams_bidi(100);
+ config.set_initial_max_streams_uni(5);
+ config.verify_peer(false);
+
+ let h3_config = Config::new().unwrap();
+
+ let mut s = Session::with_configs(&mut config, &h3_config).unwrap();
+ assert_eq!(s.pipe.handshake(), Ok(()));
+
+ s.client.control_stream_id = Some(
+ s.client
+ .open_uni_stream(
+ &mut s.pipe.client,
+ stream::HTTP3_CONTROL_STREAM_TYPE_ID,
+ )
+ .unwrap(),
+ );
+
+ s.server.control_stream_id = Some(
+ s.server
+ .open_uni_stream(
+ &mut s.pipe.server,
+ stream::HTTP3_CONTROL_STREAM_TYPE_ID,
+ )
+ .unwrap(),
+ );
+
+ let frame_payload_len = 2u64;
+ let settings = [
+ frame::SETTINGS_FRAME_TYPE_ID as u8,
+ frame_payload_len as u8,
+ 0x2, // 0x2 is a reserved setting type
+ 1,
+ ];
+
+ s.send_arbitrary_stream_data_client(
+ &settings,
+ s.client.control_stream_id.unwrap(),
+ false,
+ )
+ .unwrap();
+
+ s.send_arbitrary_stream_data_server(
+ &settings,
+ s.server.control_stream_id.unwrap(),
+ false,
+ )
+ .unwrap();
+
+ assert_eq!(s.pipe.advance(), Ok(()));
+
+ assert_eq!(s.server.poll(&mut s.pipe.server), Err(Error::SettingsError));
+
+ assert_eq!(s.client.poll(&mut s.pipe.client), Err(Error::SettingsError));
+ }
+
+ #[test]
+ /// Send a single DATAGRAM.
+ fn single_dgram() {
+ let mut buf = [0; 65535];
+ let mut s = Session::default().unwrap();
+ s.handshake().unwrap();
+
+ // We'll send default data of 10 bytes on flow ID 0.
+ let result = (11, 0, 1);
+
+ s.send_dgram_client(0).unwrap();
+
+ assert_eq!(s.poll_server(), Ok((0, Event::Datagram)));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ s.send_dgram_server(0).unwrap();
+ assert_eq!(s.poll_client(), Ok((0, Event::Datagram)));
+ assert_eq!(s.recv_dgram_client(&mut buf), Ok(result));
+ }
+
+ #[test]
+ /// Send multiple DATAGRAMs.
+ fn multiple_dgram() {
+ let mut buf = [0; 65535];
+ let mut s = Session::default().unwrap();
+ s.handshake().unwrap();
+
+ // We'll send default data of 10 bytes on flow ID 0.
+ let result = (11, 0, 1);
+
+ s.send_dgram_client(0).unwrap();
+ s.send_dgram_client(0).unwrap();
+ s.send_dgram_client(0).unwrap();
+
+ assert_eq!(s.poll_server(), Ok((0, Event::Datagram)));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Err(Error::Done));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ s.send_dgram_server(0).unwrap();
+ s.send_dgram_server(0).unwrap();
+ s.send_dgram_server(0).unwrap();
+
+ assert_eq!(s.poll_client(), Ok((0, Event::Datagram)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_client(&mut buf), Ok(result));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_client(&mut buf), Ok(result));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_client(&mut buf), Ok(result));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_client(&mut buf), Err(Error::Done));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ }
+
+ #[test]
+ /// Send more DATAGRAMs than the send queue allows.
+ fn multiple_dgram_overflow() {
+ let mut buf = [0; 65535];
+ let mut s = Session::default().unwrap();
+ s.handshake().unwrap();
+
+ // We'll send default data of 10 bytes on flow ID 0.
+ let result = (11, 0, 1);
+
+ // Five DATAGRAMs
+ s.send_dgram_client(0).unwrap();
+ s.send_dgram_client(0).unwrap();
+ s.send_dgram_client(0).unwrap();
+ s.send_dgram_client(0).unwrap();
+ s.send_dgram_client(0).unwrap();
+
+ // Only 3 independent DATAGRAM events will fire.
+ assert_eq!(s.poll_server(), Ok((0, Event::Datagram)));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Err(Error::Done));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ }
+
+ #[test]
+ /// Send a single DATAGRAM and request. Ensure that poll continuously cycles
+ /// between the two types if the data is not read.
+ fn poll_yield_cycling() {
+ let mut config = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
+ config
+ .load_cert_chain_from_pem_file("examples/cert.crt")
+ .unwrap();
+ config
+ .load_priv_key_from_pem_file("examples/cert.key")
+ .unwrap();
+ config.set_application_protos(b"\x02h3").unwrap();
+ config.set_initial_max_data(1500);
+ config.set_initial_max_stream_data_bidi_local(150);
+ config.set_initial_max_stream_data_bidi_remote(150);
+ config.set_initial_max_stream_data_uni(150);
+ config.set_initial_max_streams_bidi(100);
+ config.set_initial_max_streams_uni(5);
+ config.verify_peer(false);
+ config.enable_dgram(true, 100, 100);
+
+ let mut h3_config = Config::new().unwrap();
+ let mut s = Session::with_configs(&mut config, &mut h3_config).unwrap();
+ s.handshake().unwrap();
+
+ // Send request followed by DATAGRAM on client side.
+ let (stream, req) = s.send_request(false).unwrap();
+
+ s.send_body_client(stream, true).unwrap();
+
+ let ev_headers = Event::Headers {
+ list: req,
+ has_body: true,
+ };
+
+ s.send_dgram_client(0).unwrap();
+
+ // Now let's test the poll counts and yielding.
+ assert_eq!(s.poll_server(), Ok((0, Event::Datagram)));
+
+ assert_eq!(s.poll_server(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ }
+
+ #[test]
+ /// Send a single DATAGRAM and request. Ensure that poll
+ /// yield cycles and cleanly exits if data is read.
+ fn poll_yield_single_read() {
+ let mut buf = [0; 65535];
+
+ let mut config = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
+ config
+ .load_cert_chain_from_pem_file("examples/cert.crt")
+ .unwrap();
+ config
+ .load_priv_key_from_pem_file("examples/cert.key")
+ .unwrap();
+ config.set_application_protos(b"\x02h3").unwrap();
+ config.set_initial_max_data(1500);
+ config.set_initial_max_stream_data_bidi_local(150);
+ config.set_initial_max_stream_data_bidi_remote(150);
+ config.set_initial_max_stream_data_uni(150);
+ config.set_initial_max_streams_bidi(100);
+ config.set_initial_max_streams_uni(5);
+ config.verify_peer(false);
+ config.enable_dgram(true, 100, 100);
+
+ let mut h3_config = Config::new().unwrap();
+ let mut s = Session::with_configs(&mut config, &mut h3_config).unwrap();
+ s.handshake().unwrap();
+
+ // We'll send default data of 10 bytes on flow ID 0.
+ let result = (11, 0, 1);
+
+ // Send request followed by DATAGRAM on client side.
+ let (stream, req) = s.send_request(false).unwrap();
+
+ let body = s.send_body_client(stream, true).unwrap();
+
+ let mut recv_buf = vec![0; body.len()];
+
+ let ev_headers = Event::Headers {
+ list: req,
+ has_body: true,
+ };
+
+ s.send_dgram_client(0).unwrap();
+
+ // Now let's test the poll counts and yielding.
+ assert_eq!(s.poll_server(), Ok((0, Event::Datagram)));
+
+ assert_eq!(s.poll_server(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(result));
+
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ assert_eq!(s.recv_body_server(stream, &mut recv_buf), Ok(body.len()));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Finished)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ // Send response followed by DATAGRAM on server side
+ let resp = s.send_response(stream, false).unwrap();
+
+ let body = s.send_body_server(stream, true).unwrap();
+
+ let mut recv_buf = vec![0; body.len()];
+
+ let ev_headers = Event::Headers {
+ list: resp,
+ has_body: true,
+ };
+
+ s.send_dgram_server(0).unwrap();
+
+ // Now let's test the poll counts and yielding.
+ assert_eq!(s.poll_client(), Ok((0, Event::Datagram)));
+
+ assert_eq!(s.poll_client(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_client(), Ok((stream, Event::Data)));
+
+ assert_eq!(s.poll_client(), Err(Error::Done));
+
+ assert_eq!(s.recv_dgram_client(&mut buf), Ok(result));
+
+ assert_eq!(s.poll_client(), Err(Error::Done));
+
+ assert_eq!(s.recv_body_client(stream, &mut recv_buf), Ok(body.len()));
+
+ assert_eq!(s.poll_client(), Ok((stream, Event::Finished)));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ }
+
+ #[test]
+ /// Send a multiple DATAGRAMs and requests. Ensure that poll
+ /// yield cycles and cleanly exits if data is read.
+ fn poll_yield_multi_read() {
+ let mut buf = [0; 65535];
+
+ let mut config = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
+ config
+ .load_cert_chain_from_pem_file("examples/cert.crt")
+ .unwrap();
+ config
+ .load_priv_key_from_pem_file("examples/cert.key")
+ .unwrap();
+ config.set_application_protos(b"\x02h3").unwrap();
+ config.set_initial_max_data(1500);
+ config.set_initial_max_stream_data_bidi_local(150);
+ config.set_initial_max_stream_data_bidi_remote(150);
+ config.set_initial_max_stream_data_uni(150);
+ config.set_initial_max_streams_bidi(100);
+ config.set_initial_max_streams_uni(5);
+ config.verify_peer(false);
+ config.enable_dgram(true, 100, 100);
+
+ let mut h3_config = Config::new().unwrap();
+ let mut s = Session::with_configs(&mut config, &mut h3_config).unwrap();
+ s.handshake().unwrap();
+
+ // 10 bytes on flow ID 0 and 2.
+ let flow_0_result = (11, 0, 1);
+ let flow_2_result = (11, 2, 1);
+
+ // Send requests followed by DATAGRAMs on client side.
+ let (stream, req) = s.send_request(false).unwrap();
+
+ let body = s.send_body_client(stream, true).unwrap();
+
+ let mut recv_buf = vec![0; body.len()];
+
+ let ev_headers = Event::Headers {
+ list: req,
+ has_body: true,
+ };
+
+ s.send_dgram_client(0).unwrap();
+ s.send_dgram_client(0).unwrap();
+ s.send_dgram_client(0).unwrap();
+ s.send_dgram_client(0).unwrap();
+ s.send_dgram_client(0).unwrap();
+ s.send_dgram_client(2).unwrap();
+ s.send_dgram_client(2).unwrap();
+ s.send_dgram_client(2).unwrap();
+ s.send_dgram_client(2).unwrap();
+ s.send_dgram_client(2).unwrap();
+
+ // Now let's test the poll counts and yielding.
+ assert_eq!(s.poll_server(), Ok((0, Event::Datagram)));
+
+ assert_eq!(s.poll_server(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ // Second cycle, start to read
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_0_result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_0_result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_0_result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ assert_eq!(s.recv_body_server(stream, &mut recv_buf), Ok(body.len()));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Finished)));
+
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ // Third cycle.
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_0_result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_0_result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_2_result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_2_result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_2_result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_2_result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_2_result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ // Send response followed by DATAGRAM on server side
+ let resp = s.send_response(stream, false).unwrap();
+
+ let body = s.send_body_server(stream, true).unwrap();
+
+ let mut recv_buf = vec![0; body.len()];
+
+ let ev_headers = Event::Headers {
+ list: resp,
+ has_body: true,
+ };
+
+ s.send_dgram_server(0).unwrap();
+ s.send_dgram_server(0).unwrap();
+ s.send_dgram_server(0).unwrap();
+ s.send_dgram_server(0).unwrap();
+ s.send_dgram_server(0).unwrap();
+ s.send_dgram_server(2).unwrap();
+ s.send_dgram_server(2).unwrap();
+ s.send_dgram_server(2).unwrap();
+ s.send_dgram_server(2).unwrap();
+ s.send_dgram_server(2).unwrap();
+
+ assert_eq!(s.poll_client(), Ok((0, Event::Datagram)));
+
+ assert_eq!(s.poll_client(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_client(), Ok((stream, Event::Data)));
+
+ assert_eq!(s.poll_client(), Err(Error::Done));
+
+ // Second cycle, start to read
+ assert_eq!(s.recv_dgram_client(&mut buf), Ok(flow_0_result));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_client(&mut buf), Ok(flow_0_result));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_client(&mut buf), Ok(flow_0_result));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+
+ assert_eq!(s.recv_body_client(stream, &mut recv_buf), Ok(body.len()));
+ assert_eq!(s.poll_client(), Ok((stream, Event::Finished)));
+
+ assert_eq!(s.poll_client(), Err(Error::Done));
+
+ // Third cycle.
+ assert_eq!(s.recv_dgram_client(&mut buf), Ok(flow_0_result));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_client(&mut buf), Ok(flow_0_result));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_client(&mut buf), Ok(flow_2_result));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_client(&mut buf), Ok(flow_2_result));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_client(&mut buf), Ok(flow_2_result));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_client(&mut buf), Ok(flow_2_result));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_client(&mut buf), Ok(flow_2_result));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ }
+
+ #[test]
+ /// Tests that the Finished event is not issued for streams of unknown type
+ /// (e.g. GREASE).
+ fn finished_is_for_requests() {
+ let mut s = Session::default().unwrap();
+ s.handshake().unwrap();
+
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ assert_eq!(s.client.open_grease_stream(&mut s.pipe.client), Ok(()));
+ assert_eq!(s.pipe.advance(), Ok(()));
+
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ }
+
+ #[test]
+ /// Tests that streams are marked as finished only once.
+ fn finished_once() {
+ let mut s = Session::default().unwrap();
+ s.handshake().unwrap();
+
+ let (stream, req) = s.send_request(false).unwrap();
+ let body = s.send_body_client(stream, true).unwrap();
+
+ let mut recv_buf = vec![0; body.len()];
+
+ let ev_headers = Event::Headers {
+ list: req,
+ has_body: true,
+ };
+
+ assert_eq!(s.poll_server(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+
+ assert_eq!(s.recv_body_server(stream, &mut recv_buf), Ok(body.len()));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Finished)));
+
+ assert_eq!(s.recv_body_server(stream, &mut recv_buf), Err(Error::Done));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ }
+
+ #[test]
+ /// Tests that the Data event is properly re-armed.
+ fn data_event_rearm() {
+ let bytes = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+
+ let mut s = Session::default().unwrap();
+ s.handshake().unwrap();
+
+ let (stream, req) = s.send_request(false).unwrap();
+
+ let mut recv_buf = vec![0; bytes.len()];
+
+ let ev_headers = Event::Headers {
+ list: req,
+ has_body: true,
+ };
+
+ // Manually send an incomplete DATA frame (i.e. the frame size is longer
+ // than the actual data sent).
+ {
+ let mut d = [42; 10];
+ let mut b = octets::OctetsMut::with_slice(&mut d);
+
+ b.put_varint(frame::DATA_FRAME_TYPE_ID).unwrap();
+ b.put_varint(bytes.len() as u64).unwrap();
+ let off = b.off();
+ s.pipe.client.stream_send(stream, &d[..off], false).unwrap();
+
+ assert_eq!(
+ s.pipe.client.stream_send(stream, &bytes[..5], false),
+ Ok(5)
+ );
+
+ s.advance().ok();
+ }
+
+ assert_eq!(s.poll_server(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ // Read the available body data.
+ assert_eq!(s.recv_body_server(stream, &mut recv_buf), Ok(5));
+
+ // Send the remaining DATA payload.
+ assert_eq!(s.pipe.client.stream_send(stream, &bytes[5..], false), Ok(5));
+ s.advance().ok();
+
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ // Read the rest of the body data.
+ assert_eq!(s.recv_body_server(stream, &mut recv_buf), Ok(5));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ // Send more data.
+ let body = s.send_body_client(stream, false).unwrap();
+
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ assert_eq!(s.recv_body_server(stream, &mut recv_buf), Ok(body.len()));
+
+ // Send more data, then HEADERS, then more data.
+ let body = s.send_body_client(stream, false).unwrap();
+
+ let trailers = vec![Header::new(b"hello", b"world")];
+
+ s.client
+ .send_headers(&mut s.pipe.client, stream, &trailers, false)
+ .unwrap();
+
+ let ev_trailers = Event::Headers {
+ list: trailers,
+ has_body: true,
+ };
+
+ s.advance().ok();
+
+ s.send_body_client(stream, false).unwrap();
+
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+ assert_eq!(s.recv_body_server(stream, &mut recv_buf), Ok(body.len()));
+
+ assert_eq!(s.poll_server(), Ok((stream, ev_trailers)));
+
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+ assert_eq!(s.recv_body_server(stream, &mut recv_buf), Ok(body.len()));
+
+ let (stream, req) = s.send_request(false).unwrap();
+
+ let ev_headers = Event::Headers {
+ list: req,
+ has_body: true,
+ };
+
+ // Manually send an incomplete DATA frame (i.e. only the header is sent).
+ {
+ let mut d = [42; 10];
+ let mut b = octets::OctetsMut::with_slice(&mut d);
+
+ b.put_varint(frame::DATA_FRAME_TYPE_ID).unwrap();
+ b.put_varint(bytes.len() as u64).unwrap();
+ let off = b.off();
+ s.pipe.client.stream_send(stream, &d[..off], false).unwrap();
+
+ s.advance().ok();
+ }
+
+ assert_eq!(s.poll_server(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ assert_eq!(s.recv_body_server(stream, &mut recv_buf), Err(Error::Done));
+
+ assert_eq!(s.pipe.client.stream_send(stream, &bytes[..5], false), Ok(5));
+
+ s.advance().ok();
+
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ assert_eq!(s.recv_body_server(stream, &mut recv_buf), Ok(5));
+
+ assert_eq!(s.pipe.client.stream_send(stream, &bytes[5..], false), Ok(5));
+ s.advance().ok();
+
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ assert_eq!(s.recv_body_server(stream, &mut recv_buf), Ok(5));
+
+ // Buffer multiple data frames.
+ let body = s.send_body_client(stream, false).unwrap();
+ s.send_body_client(stream, false).unwrap();
+ s.send_body_client(stream, false).unwrap();
+
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ {
+ let mut d = [42; 10];
+ let mut b = octets::OctetsMut::with_slice(&mut d);
+
+ b.put_varint(frame::DATA_FRAME_TYPE_ID).unwrap();
+ b.put_varint(0).unwrap();
+ let off = b.off();
+ s.pipe.client.stream_send(stream, &d[..off], true).unwrap();
+
+ s.advance().ok();
+ }
+
+ let mut recv_buf = vec![0; bytes.len() * 3];
+
+ assert_eq!(
+ s.recv_body_server(stream, &mut recv_buf),
+ Ok(body.len() * 3)
+ );
+ }
+
+ #[test]
+ /// Tests that the Datagram event is properly re-armed.
+ fn dgram_event_rearm() {
+ let mut buf = [0; 65535];
+
+ let mut config = crate::Config::new(crate::PROTOCOL_VERSION).unwrap();
+ config
+ .load_cert_chain_from_pem_file("examples/cert.crt")
+ .unwrap();
+ config
+ .load_priv_key_from_pem_file("examples/cert.key")
+ .unwrap();
+ config.set_application_protos(b"\x02h3").unwrap();
+ config.set_initial_max_data(1500);
+ config.set_initial_max_stream_data_bidi_local(150);
+ config.set_initial_max_stream_data_bidi_remote(150);
+ config.set_initial_max_stream_data_uni(150);
+ config.set_initial_max_streams_bidi(100);
+ config.set_initial_max_streams_uni(5);
+ config.verify_peer(false);
+ config.enable_dgram(true, 100, 100);
+
+ let mut h3_config = Config::new().unwrap();
+ let mut s = Session::with_configs(&mut config, &mut h3_config).unwrap();
+ s.handshake().unwrap();
+
+ // 10 bytes on flow ID 0 and 2.
+ let flow_0_result = (11, 0, 1);
+ let flow_2_result = (11, 2, 1);
+
+ // Send requests followed by DATAGRAMs on client side.
+ let (stream, req) = s.send_request(false).unwrap();
+
+ let body = s.send_body_client(stream, true).unwrap();
+
+ let mut recv_buf = vec![0; body.len()];
+
+ let ev_headers = Event::Headers {
+ list: req,
+ has_body: true,
+ };
+
+ s.send_dgram_client(0).unwrap();
+ s.send_dgram_client(0).unwrap();
+ s.send_dgram_client(2).unwrap();
+ s.send_dgram_client(2).unwrap();
+
+ assert_eq!(s.poll_server(), Ok((0, Event::Datagram)));
+
+ assert_eq!(s.poll_server(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Data)));
+
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_0_result));
+
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_0_result));
+
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_2_result));
+
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_2_result));
+
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ s.send_dgram_client(0).unwrap();
+ s.send_dgram_client(2).unwrap();
+
+ assert_eq!(s.poll_server(), Ok((0, Event::Datagram)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_0_result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ assert_eq!(s.recv_dgram_server(&mut buf), Ok(flow_2_result));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ assert_eq!(s.recv_body_server(stream, &mut recv_buf), Ok(body.len()));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Finished)));
+ }
+
+ #[test]
+ fn reset_stream() {
+ let mut buf = [0; 65535];
+
+ let mut s = Session::default().unwrap();
+ s.handshake().unwrap();
+
+ // Client sends request.
+ let (stream, req) = s.send_request(false).unwrap();
+
+ let ev_headers = Event::Headers {
+ list: req,
+ has_body: true,
+ };
+
+ // Server sends response and closes stream.
+ assert_eq!(s.poll_server(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ let resp = s.send_response(stream, true).unwrap();
+
+ let ev_headers = Event::Headers {
+ list: resp,
+ has_body: false,
+ };
+
+ assert_eq!(s.poll_client(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_client(), Ok((stream, Event::Finished)));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+
+ // Client sends RESET_STREAM, closing stream.
+ let frames = [crate::frame::Frame::ResetStream {
+ stream_id: stream,
+ error_code: 42,
+ final_size: 68,
+ }];
+
+ let pkt_type = crate::packet::Type::Short;
+ assert_eq!(
+ s.pipe.send_pkt_to_server(pkt_type, &frames, &mut buf),
+ Ok(39)
+ );
+
+ // Server issues Reset event for the stream.
+ assert_eq!(s.poll_server(), Ok((stream, Event::Reset(42))));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ // Sending RESET_STREAM again shouldn't trigger another Reset event.
+ assert_eq!(
+ s.pipe.send_pkt_to_server(pkt_type, &frames, &mut buf),
+ Ok(39)
+ );
+
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ }
+
+ #[test]
+ fn reset_finished_at_server() {
+ let mut s = Session::default().unwrap();
+ s.handshake().unwrap();
+
+ // Client sends HEADERS and doesn't fin
+ let (stream, _req) = s.send_request(false).unwrap();
+
+ // ..then Client sends RESET_STREAM
+ assert_eq!(
+ s.pipe.client.stream_shutdown(0, crate::Shutdown::Write, 0),
+ Ok(())
+ );
+
+ assert_eq!(s.pipe.advance(), Ok(()));
+
+ // Server receives just a reset
+ assert_eq!(s.poll_server(), Ok((stream, Event::Reset(0))));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ // Client sends HEADERS and fin
+ let (stream, req) = s.send_request(true).unwrap();
+
+ // ..then Client sends RESET_STREAM
+ assert_eq!(
+ s.pipe.client.stream_shutdown(4, crate::Shutdown::Write, 0),
+ Ok(())
+ );
+
+ let ev_headers = Event::Headers {
+ list: req,
+ has_body: false,
+ };
+
+ // Server receives headers and fin.
+ assert_eq!(s.poll_server(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Finished)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+ }
+
+ #[test]
+ fn reset_finished_at_client() {
+ let mut buf = [0; 65535];
+ let mut s = Session::default().unwrap();
+ s.handshake().unwrap();
+
+ // Client sends HEADERS and doesn't fin
+ let (stream, req) = s.send_request(false).unwrap();
+
+ let ev_headers = Event::Headers {
+ list: req,
+ has_body: true,
+ };
+
+ // Server receives headers.
+ assert_eq!(s.poll_server(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ // Server sends response and doesn't fin
+ s.send_response(stream, false).unwrap();
+
+ assert_eq!(s.pipe.advance(), Ok(()));
+
+ // .. then Server sends RESET_STREAM
+ assert_eq!(
+ s.pipe
+ .server
+ .stream_shutdown(stream, crate::Shutdown::Write, 0),
+ Ok(())
+ );
+
+ assert_eq!(s.pipe.advance(), Ok(()));
+
+ // Client receives Reset only
+ assert_eq!(s.poll_client(), Ok((stream, Event::Reset(0))));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ // Client sends headers and fin.
+ let (stream, req) = s.send_request(true).unwrap();
+
+ let ev_headers = Event::Headers {
+ list: req,
+ has_body: false,
+ };
+
+ // Server receives headers and fin.
+ assert_eq!(s.poll_server(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_server(), Ok((stream, Event::Finished)));
+ assert_eq!(s.poll_server(), Err(Error::Done));
+
+ // Server sends response and fin
+ let resp = s.send_response(stream, true).unwrap();
+
+ assert_eq!(s.pipe.advance(), Ok(()));
+
+ // ..then Server sends RESET_STREAM
+ let frames = [crate::frame::Frame::ResetStream {
+ stream_id: stream,
+ error_code: 42,
+ final_size: 68,
+ }];
+
+ let pkt_type = crate::packet::Type::Short;
+ assert_eq!(
+ s.pipe.send_pkt_to_server(pkt_type, &frames, &mut buf),
+ Ok(39)
+ );
+
+ assert_eq!(s.pipe.advance(), Ok(()));
+
+ let ev_headers = Event::Headers {
+ list: resp,
+ has_body: false,
+ };
+
+ // Client receives headers and fin.
+ assert_eq!(s.poll_client(), Ok((stream, ev_headers)));
+ assert_eq!(s.poll_client(), Ok((stream, Event::Finished)));
+ assert_eq!(s.poll_client(), Err(Error::Done));
+ }
}
+#[cfg(feature = "ffi")]
mod ffi;
mod frame;
#[doc(hidden)]
diff --git a/src/h3/qpack/decoder.rs b/src/h3/qpack/decoder.rs
index 240ca69..1bc5755 100644
--- a/src/h3/qpack/decoder.rs
+++ b/src/h3/qpack/decoder.rs
@@ -149,9 +149,7 @@
name.to_vec()
};
- let name = String::from_utf8(name)
- .map_err(|_| Error::InvalidHeaderValue)?;
-
+ let name = name.to_vec();
let value = decode_str(&mut b)?;
trace!(
@@ -198,7 +196,7 @@
// Instead of calling Header::new(), create Header directly
// from `value`, which is already String, but clone `name`
// as it is just a reference.
- let hdr = Header(name.to_string(), value);
+ let hdr = Header(name.to_vec(), value);
out.push(hdr);
},
@@ -215,7 +213,7 @@
}
}
-fn lookup_static(idx: u64) -> Result<(&'static str, &'static str)> {
+fn lookup_static(idx: u64) -> Result<(&'static [u8], &'static [u8])> {
if idx >= super::static_table::STATIC_TABLE.len() as u64 {
return Err(Error::InvalidStaticTableIndex);
}
@@ -254,7 +252,7 @@
Err(Error::BufferTooShort)
}
-fn decode_str<'a>(b: &'a mut octets::Octets) -> Result<String> {
+fn decode_str(b: &mut octets::Octets) -> Result<Vec<u8>> {
let first = b.peek_u8()?;
let huff = first & 0x80 == 0x80;
@@ -269,7 +267,6 @@
val.to_vec()
};
- let val = String::from_utf8(val).map_err(|_| Error::InvalidHeaderValue)?;
Ok(val)
}
diff --git a/src/h3/qpack/encoder.rs b/src/h3/qpack/encoder.rs
index 1307df3..09c8b08 100644
--- a/src/h3/qpack/encoder.rs
+++ b/src/h3/qpack/encoder.rs
@@ -55,7 +55,7 @@
) -> Result<usize> {
let mut b = octets::OctetsMut::with_slice(out);
- // Request Insert Count.
+ // Required Insert Count.
encode_int(0, 0, 8, &mut b)?;
// Base.
@@ -80,14 +80,12 @@
None => {
// Encode as fully literal.
- let name_len = super::huffman::encode_output_length(
- h.name().as_bytes(),
- true,
- )?;
+ let name_len =
+ super::huffman::encode_output_length(h.name(), true)?;
encode_int(name_len as u64, LITERAL | 0x08, 3, &mut b)?;
- super::huffman::encode(h.name().as_bytes(), &mut b, true)?;
+ super::huffman::encode(h.name(), &mut b, true)?;
encode_str(h.value(), 7, &mut b)?;
},
@@ -151,12 +149,12 @@
Ok(())
}
-fn encode_str(v: &str, prefix: usize, b: &mut octets::OctetsMut) -> Result<()> {
- let len = super::huffman::encode_output_length(v.as_bytes(), false)?;
+fn encode_str(v: &[u8], prefix: usize, b: &mut octets::OctetsMut) -> Result<()> {
+ let len = super::huffman::encode_output_length(v, false)?;
encode_int(len as u64, 0x80, prefix, b)?;
- super::huffman::encode(v.as_bytes(), b, false)?;
+ super::huffman::encode(v, b, false)?;
Ok(())
}
diff --git a/src/h3/qpack/mod.rs b/src/h3/qpack/mod.rs
index 6f2bdda..0a23306 100644
--- a/src/h3/qpack/mod.rs
+++ b/src/h3/qpack/mod.rs
@@ -87,15 +87,15 @@
let mut encoded = [0u8; 240];
let headers = vec![
- h3::Header::new(":path", "/rsrc.php/v3/yn/r/rIPZ9Qkrdd9.png"),
- h3::Header::new("accept-encoding", "gzip, deflate, br"),
- h3::Header::new("accept-language", "en-US,en;q=0.9"),
- h3::Header::new("user-agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.70 Safari/537.36"),
- h3::Header::new("accept", "image/webp,image/apng,image/*,*/*;q=0.8"),
- h3::Header::new("referer", "https://static.xx.fbcdn.net/rsrc.php/v3/yT/l/0,cross/dzXGESIlGQQ.css"),
- h3::Header::new(":authority", "static.xx.fbcdn.net"),
- h3::Header::new(":scheme", "https"),
- h3::Header::new(":method", "GET"),
+ h3::Header::new(b":path", b"/rsrc.php/v3/yn/r/rIPZ9Qkrdd9.png"),
+ h3::Header::new(b"accept-encoding", b"gzip, deflate, br"),
+ h3::Header::new(b"accept-language", b"en-US,en;q=0.9"),
+ h3::Header::new(b"user-agent", b"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.70 Safari/537.36"),
+ h3::Header::new(b"accept", b"image/webp,image/apng,image/*,*/*;q=0.8"),
+ h3::Header::new(b"referer", b"https://static.xx.fbcdn.net/rsrc.php/v3/yT/l/0,cross/dzXGESIlGQQ.css"),
+ h3::Header::new(b":authority", b"static.xx.fbcdn.net"),
+ h3::Header::new(b":scheme", b"https"),
+ h3::Header::new(b":method", b"GET"),
];
let mut enc = Encoder::new();
@@ -110,20 +110,20 @@
let mut encoded = [0u8; 35];
let headers_expected = vec![
- crate::h3::Header::new(":status", "200"),
- crate::h3::Header::new(":path", "/HeLlO"),
- crate::h3::Header::new("woot", "woot"),
- crate::h3::Header::new("hello", "WorlD"),
- crate::h3::Header::new("foo", "BaR"),
+ crate::h3::Header::new(b":status", b"200"),
+ crate::h3::Header::new(b":path", b"/HeLlO"),
+ crate::h3::Header::new(b"woot", b"woot"),
+ crate::h3::Header::new(b"hello", b"WorlD"),
+ crate::h3::Header::new(b"foo", b"BaR"),
];
// Header.
let headers_in = vec![
- crate::h3::Header::new(":StAtUs", "200"),
- crate::h3::Header::new(":PaTh", "/HeLlO"),
- crate::h3::Header::new("WooT", "woot"),
- crate::h3::Header::new("hello", "WorlD"),
- crate::h3::Header::new("fOo", "BaR"),
+ crate::h3::Header::new(b":StAtUs", b"200"),
+ crate::h3::Header::new(b":PaTh", b"/HeLlO"),
+ crate::h3::Header::new(b"WooT", b"woot"),
+ crate::h3::Header::new(b"hello", b"WorlD"),
+ crate::h3::Header::new(b"fOo", b"BaR"),
];
let mut enc = Encoder::new();
@@ -136,11 +136,11 @@
// HeaderRef.
let headers_in = vec![
- crate::h3::HeaderRef::new(":StAtUs", "200"),
- crate::h3::HeaderRef::new(":PaTh", "/HeLlO"),
- crate::h3::HeaderRef::new("WooT", "woot"),
- crate::h3::HeaderRef::new("hello", "WorlD"),
- crate::h3::HeaderRef::new("fOo", "BaR"),
+ crate::h3::HeaderRef::new(b":StAtUs", b"200"),
+ crate::h3::HeaderRef::new(b":PaTh", b"/HeLlO"),
+ crate::h3::HeaderRef::new(b"WooT", b"woot"),
+ crate::h3::HeaderRef::new(b"hello", b"WorlD"),
+ crate::h3::HeaderRef::new(b"fOo", b"BaR"),
];
let mut enc = Encoder::new();
diff --git a/src/h3/qpack/static_table.rs b/src/h3/qpack/static_table.rs
index 4010d12..3cc10d4 100644
--- a/src/h3/qpack/static_table.rs
+++ b/src/h3/qpack/static_table.rs
@@ -24,113 +24,113 @@
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-pub const STATIC_TABLE: [(&str, &str); 99] = [
- (":authority", ""),
- (":path", "/"),
- ("age", "0"),
- ("content-disposition", ""),
- ("content-length", "0"),
- ("cookie", ""),
- ("date", ""),
- ("etag", ""),
- ("if-modified-since", ""),
- ("if-none-match", ""),
- ("last-modified", ""),
- ("link", ""),
- ("location", ""),
- ("referer", ""),
- ("set-cookie", ""),
- (":method", "CONNECT"),
- (":method", "DELETE"),
- (":method", "GET"),
- (":method", "HEAD"),
- (":method", "OPTIONS"),
- (":method", "POST"),
- (":method", "PUT"),
- (":scheme", "http"),
- (":scheme", "https"),
- (":status", "103"),
- (":status", "200"),
- (":status", "304"),
- (":status", "404"),
- (":status", "503"),
- ("accept", "*/*"),
- ("accept", "application/dns-message"),
- ("accept-encoding", "gzip, deflate, br"),
- ("accept-ranges", "bytes"),
- ("access-control-allow-headers", "cache-control"),
- ("access-control-allow-headers", "content-type"),
- ("access-control-allow-origin", "*"),
- ("cache-control", "max-age=0"),
- ("cache-control", "max-age=2592000"),
- ("cache-control", "max-age=604800"),
- ("cache-control", "no-cache"),
- ("cache-control", "no-store"),
- ("cache-control", "public, max-age=31536000"),
- ("content-encoding", "br"),
- ("content-encoding", "gzip"),
- ("content-type", "application/dns-message"),
- ("content-type", "application/javascript"),
- ("content-type", "application/json"),
- ("content-type", "application/x-www-form-urlencoded"),
- ("content-type", "image/gif"),
- ("content-type", "image/jpeg"),
- ("content-type", "image/png"),
- ("content-type", "text/css"),
- ("content-type", "text/html; charset=utf-8"),
- ("content-type", "text/plain"),
- ("content-type", "text/plain;charset=utf-8"),
- ("range", "bytes=0-"),
- ("strict-transport-security", "max-age=31536000"),
+pub const STATIC_TABLE: [(&[u8], &[u8]); 99] = [
+ (b":authority", b""),
+ (b":path", b"/"),
+ (b"age", b"0"),
+ (b"content-disposition", b""),
+ (b"content-length", b"0"),
+ (b"cookie", b""),
+ (b"date", b""),
+ (b"etag", b""),
+ (b"if-modified-since", b""),
+ (b"if-none-match", b""),
+ (b"last-modified", b""),
+ (b"link", b""),
+ (b"location", b""),
+ (b"referer", b""),
+ (b"set-cookie", b""),
+ (b":method", b"CONNECT"),
+ (b":method", b"DELETE"),
+ (b":method", b"GET"),
+ (b":method", b"HEAD"),
+ (b":method", b"OPTIONS"),
+ (b":method", b"POST"),
+ (b":method", b"PUT"),
+ (b":scheme", b"http"),
+ (b":scheme", b"https"),
+ (b":status", b"103"),
+ (b":status", b"200"),
+ (b":status", b"304"),
+ (b":status", b"404"),
+ (b":status", b"503"),
+ (b"accept", b"*/*"),
+ (b"accept", b"application/dns-message"),
+ (b"accept-encoding", b"gzip, deflate, br"),
+ (b"accept-ranges", b"bytes"),
+ (b"access-control-allow-headers", b"cache-control"),
+ (b"access-control-allow-headers", b"content-type"),
+ (b"access-control-allow-origin", b"*"),
+ (b"cache-control", b"max-age=0"),
+ (b"cache-control", b"max-age=2592000"),
+ (b"cache-control", b"max-age=604800"),
+ (b"cache-control", b"no-cache"),
+ (b"cache-control", b"no-store"),
+ (b"cache-control", b"public, max-age=31536000"),
+ (b"content-encoding", b"br"),
+ (b"content-encoding", b"gzip"),
+ (b"content-type", b"application/dns-message"),
+ (b"content-type", b"application/javascript"),
+ (b"content-type", b"application/json"),
+ (b"content-type", b"application/x-www-form-urlencoded"),
+ (b"content-type", b"image/gif"),
+ (b"content-type", b"image/jpeg"),
+ (b"content-type", b"image/png"),
+ (b"content-type", b"text/css"),
+ (b"content-type", b"text/html; charset=utf-8"),
+ (b"content-type", b"text/plain"),
+ (b"content-type", b"text/plain;charset=utf-8"),
+ (b"range", b"bytes=0-"),
+ (b"strict-transport-security", b"max-age=31536000"),
(
- "strict-transport-security",
- "max-age=31536000; includesubdomains",
+ b"strict-transport-security",
+ b"max-age=31536000; includesubdomains",
),
(
- "strict-transport-security",
- "max-age=31536000; includesubdomains; preload",
+ b"strict-transport-security",
+ b"max-age=31536000; includesubdomains; preload",
),
- ("vary", "accept-encoding"),
- ("vary", "origin"),
- ("x-content-type-options", "nosniff"),
- ("x-xss-protection", "1; mode=block"),
- (":status", "100"),
- (":status", "204"),
- (":status", "206"),
- (":status", "302"),
- (":status", "400"),
- (":status", "403"),
- (":status", "421"),
- (":status", "425"),
- (":status", "500"),
- ("accept-language", ""),
- ("access-control-allow-credentials", "FALSE"),
- ("access-control-allow-credentials", "TRUE"),
- ("access-control-allow-headers", "*"),
- ("access-control-allow-methods", "get"),
- ("access-control-allow-methods", "get, post, options"),
- ("access-control-allow-methods", "options"),
- ("access-control-expose-headers", "content-length"),
- ("access-control-request-headers", "content-type"),
- ("access-control-request-method", "get"),
- ("access-control-request-method", "post"),
- ("alt-svc", "clear"),
- ("authorization", ""),
+ (b"vary", b"accept-encoding"),
+ (b"vary", b"origin"),
+ (b"x-content-type-options", b"nosniff"),
+ (b"x-xss-protection", b"1; mode=block"),
+ (b":status", b"100"),
+ (b":status", b"204"),
+ (b":status", b"206"),
+ (b":status", b"302"),
+ (b":status", b"400"),
+ (b":status", b"403"),
+ (b":status", b"421"),
+ (b":status", b"425"),
+ (b":status", b"500"),
+ (b"accept-language", b""),
+ (b"access-control-allow-credentials", b"FALSE"),
+ (b"access-control-allow-credentials", b"TRUE"),
+ (b"access-control-allow-headers", b"*"),
+ (b"access-control-allow-methods", b"get"),
+ (b"access-control-allow-methods", b"get, post, options"),
+ (b"access-control-allow-methods", b"options"),
+ (b"access-control-expose-headers", b"content-length"),
+ (b"access-control-request-headers", b"content-type"),
+ (b"access-control-request-method", b"get"),
+ (b"access-control-request-method", b"post"),
+ (b"alt-svc", b"clear"),
+ (b"authorization", b""),
(
- "content-security-policy",
- "script-src 'none'; object-src 'none'; base-uri 'none'",
+ b"content-security-policy",
+ b"script-src 'none'; object-src 'none'; base-uri 'none'",
),
- ("early-data", "1"),
- ("expect-ct", ""),
- ("forwarded", ""),
- ("if-range", ""),
- ("origin", ""),
- ("purpose", "prefetch"),
- ("server", ""),
- ("timing-allow-origin", "*"),
- ("upgrade-insecure-requests", "1"),
- ("user-agent", ""),
- ("x-forwarded-for", ""),
- ("x-frame-options", "deny"),
- ("x-frame-options", "sameorigin"),
+ (b"early-data", b"1"),
+ (b"expect-ct", b""),
+ (b"forwarded", b""),
+ (b"if-range", b""),
+ (b"origin", b""),
+ (b"purpose", b"prefetch"),
+ (b"server", b""),
+ (b"timing-allow-origin", b"*"),
+ (b"upgrade-insecure-requests", b"1"),
+ (b"user-agent", b""),
+ (b"x-forwarded-for", b""),
+ (b"x-frame-options", b"deny"),
+ (b"x-frame-options", b"sameorigin"),
];
diff --git a/src/h3/stream.rs b/src/h3/stream.rs
index f2f8f0c..0f39414 100644
--- a/src/h3/stream.rs
+++ b/src/h3/stream.rs
@@ -73,6 +73,9 @@
/// Reading and discarding data.
Drain,
+
+ /// All data has been read.
+ Finished,
}
impl Type {
@@ -135,6 +138,9 @@
/// Whether the stream has been locally initialized.
local_initialized: bool,
+
+ /// Whether a `Data` event has been triggered for this stream.
+ data_event_triggered: bool,
}
impl Stream {
@@ -171,9 +177,15 @@
is_local,
remote_initialized: false,
local_initialized: false,
+
+ data_event_triggered: false,
}
}
+ pub fn ty(&self) -> Option<Type> {
+ self.ty
+ }
+
pub fn state(&self) -> State {
self.state
}
@@ -261,6 +273,9 @@
(frame::HEADERS_FRAME_TYPE_ID, false) =>
self.remote_initialized = true,
+ (frame::DATA_FRAME_TYPE_ID, false) =>
+ return Err(Error::FrameUnexpected),
+
(frame::CANCEL_PUSH_FRAME_TYPE_ID, _) =>
return Err(Error::FrameUnexpected),
@@ -275,9 +290,7 @@
// All other frames can be ignored regardless of stream
// state.
- (_, false) => (),
-
- (_, true) => (),
+ _ => (),
}
}
},
@@ -347,7 +360,18 @@
) -> Result<()> {
let buf = &mut self.state_buf[self.state_off..self.state_len];
- let (read, _) = conn.stream_recv(self.id, buf)?;
+ let read = match conn.stream_recv(self.id, buf) {
+ Ok((len, _)) => len,
+
+ Err(e) => {
+ // The stream is not readable anymore, so re-arm the Data event.
+ if e == crate::Error::Done {
+ self.reset_data_event();
+ }
+
+ return Err(e.into());
+ },
+ };
trace!(
"{} read {} bytes on stream {}",
@@ -359,6 +383,8 @@
self.state_off += read;
if !self.state_buffer_complete() {
+ self.reset_data_event();
+
return Err(Error::Done);
}
@@ -416,6 +442,9 @@
/// Tries to parse a frame from the state buffer.
pub fn try_consume_frame(&mut self) -> Result<frame::Frame> {
+ // Processing a frame other than DATA, so re-arm the Data event.
+ self.reset_data_event();
+
// TODO: properly propagate frame parsing errors.
let frame = frame::Frame::from_bytes(
self.frame_type.unwrap(),
@@ -431,18 +460,39 @@
/// Tries to read DATA payload from the transport stream.
pub fn try_consume_data(
&mut self, conn: &mut crate::Connection, out: &mut [u8],
- ) -> Result<usize> {
+ ) -> Result<(usize, bool)> {
let left = std::cmp::min(out.len(), self.state_len - self.state_off);
- let (len, _) = conn.stream_recv(self.id, &mut out[..left])?;
+ let (len, fin) = match conn.stream_recv(self.id, &mut out[..left]) {
+ Ok(v) => v,
+
+ Err(e) => {
+ // The stream is not readable anymore, so re-arm the Data event.
+ if e == crate::Error::Done {
+ self.reset_data_event();
+ }
+
+ return Err(e.into());
+ },
+ };
self.state_off += len;
+ // The stream is not readable anymore, so re-arm the Data event.
+ if !conn.stream_readable(self.id) {
+ self.reset_data_event();
+ }
+
if self.state_buffer_complete() {
self.state_transition(State::FrameType, 1, true)?;
}
- Ok(len)
+ Ok((len, fin))
+ }
+
+ /// Marks the stream as finished.
+ pub fn finished(&mut self) {
+ let _ = self.state_transition(State::Finished, 0, false);
}
/// Tries to read DATA payload from the given cursor.
@@ -466,6 +516,25 @@
Ok(len)
}
+ /// Tries to update the data triggered state for the stream.
+ ///
+ /// This returns `true` if a Data event was not already triggered before
+ /// the last reset, and updates the state. Returns `false` otherwise.
+ pub fn try_trigger_data_event(&mut self) -> bool {
+ if self.data_event_triggered {
+ return false;
+ }
+
+ self.data_event_triggered = true;
+
+ true
+ }
+
+ /// Resets the data triggered state.
+ fn reset_data_event(&mut self) {
+ self.data_event_triggered = false;
+ }
+
/// Returns true if the state buffer has enough data to complete the state.
fn state_buffer_complete(&self) -> bool {
self.state_off == self.state_len
@@ -511,9 +580,10 @@
let mut b = octets::OctetsMut::with_slice(&mut d);
let frame = frame::Frame::Settings {
- max_header_list_size: Some(0),
+ max_field_section_size: Some(0),
qpack_max_table_capacity: Some(0),
qpack_blocked_streams: Some(0),
+ h3_datagram: None,
grease: None,
};
@@ -566,9 +636,10 @@
let mut b = octets::OctetsMut::with_slice(&mut d);
let frame = frame::Frame::Settings {
- max_header_list_size: Some(0),
+ max_field_section_size: Some(0),
qpack_max_table_capacity: Some(0),
qpack_blocked_streams: Some(0),
+ h3_datagram: None,
grease: None,
};
@@ -630,9 +701,10 @@
let goaway = frame::Frame::GoAway { id: 0 };
let settings = frame::Frame::Settings {
- max_header_list_size: Some(0),
+ max_field_section_size: Some(0),
qpack_max_table_capacity: Some(0),
qpack_blocked_streams: Some(0),
+ h3_datagram: None,
grease: None,
};
@@ -672,9 +744,10 @@
let hdrs = frame::Frame::Headers { header_block };
let settings = frame::Frame::Settings {
- max_header_list_size: Some(0),
+ max_field_section_size: Some(0),
qpack_max_table_capacity: Some(0),
qpack_blocked_streams: Some(0),
+ h3_datagram: None,
grease: None,
};
@@ -910,4 +983,28 @@
.unwrap();
assert_eq!(stream.state, State::Drain);
}
+
+ #[test]
+ fn data_before_headers() {
+ let mut stream = Stream::new(0, false);
+
+ let mut d = vec![42; 128];
+ let mut b = octets::OctetsMut::with_slice(&mut d);
+
+ let data = frame::Frame::Data {
+ payload: vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
+ };
+
+ data.to_bytes(&mut b).unwrap();
+
+ let mut cursor = std::io::Cursor::new(d);
+
+ // Parse the DATA frame type.
+ stream.try_fill_buffer_for_tests(&mut cursor).unwrap();
+
+ let frame_ty = stream.try_consume_varint().unwrap();
+ assert_eq!(frame_ty, frame::DATA_FRAME_TYPE_ID);
+
+ assert_eq!(stream.set_frame_type(frame_ty), Err(Error::FrameUnexpected));
+ }
}
diff --git a/src/lib.rs b/src/lib.rs
index 1f7f98b..d8be83d 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -54,15 +54,25 @@
//! ```
//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
//! # let server_name = "quic.tech";
-//! # let scid = [0xba; 16];
+//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+//! # let to = "127.0.0.1:1234".parse().unwrap();
//! // Client connection.
-//! let conn = quiche::connect(Some(&server_name), &scid, &mut config)?;
+//! let conn = quiche::connect(Some(&server_name), &scid, to, &mut config)?;
//!
//! // Server connection.
-//! let conn = quiche::accept(&scid, None, &mut config)?;
+//! # let from = "127.0.0.1:1234".parse().unwrap();
+//! let conn = quiche::accept(&scid, None, from, &mut config)?;
//! # Ok::<(), quiche::Error>(())
//! ```
//!
+//! In both cases, the application is responsible for generating a new source
+//! connection ID that will be used to identify the new connection.
+//!
+//! The application also need to pass the address of the remote peer of the
+//! connection: in the case of a client that would be the address of the server
+//! it is trying to connect to, and for a server that is the address of the
+//! client that initiated the connection.
+//!
//! ## Handling incoming packets
//!
//! Using the connection's [`recv()`] method the application can process
@@ -72,12 +82,15 @@
//! # let mut buf = [0; 512];
//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
-//! # let scid = [0xba; 16];
-//! # let mut conn = quiche::accept(&scid, None, &mut config)?;
+//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+//! # let from = "127.0.0.1:1234".parse().unwrap();
+//! # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
//! loop {
-//! let read = socket.recv(&mut buf).unwrap();
+//! let (read, from) = socket.recv_from(&mut buf).unwrap();
//!
-//! let read = match conn.recv(&mut buf[..read]) {
+//! let recv_info = quiche::RecvInfo { from };
+//!
+//! let read = match conn.recv(&mut buf[..read], recv_info) {
//! Ok(v) => v,
//!
//! Err(quiche::Error::Done) => {
@@ -94,6 +107,10 @@
//! # Ok::<(), quiche::Error>(())
//! ```
//!
+//! The application has to pass a [`RecvInfo`] structure in order to provide
+//! additional information about the received packet (such as the address it
+//! was received from).
+//!
//! ## Generating outgoing packets
//!
//! Outgoing packet are generated using the connection's [`send()`] method
@@ -103,10 +120,11 @@
//! # let mut out = [0; 512];
//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
-//! # let scid = [0xba; 16];
-//! # let mut conn = quiche::accept(&scid, None, &mut config)?;
+//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+//! # let from = "127.0.0.1:1234".parse().unwrap();
+//! # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
//! loop {
-//! let write = match conn.send(&mut out) {
+//! let (write, send_info) = match conn.send(&mut out) {
//! Ok(v) => v,
//!
//! Err(quiche::Error::Done) => {
@@ -120,19 +138,24 @@
//! },
//! };
//!
-//! socket.send(&out[..write]).unwrap();
+//! socket.send_to(&out[..write], &send_info.to).unwrap();
//! }
//! # Ok::<(), quiche::Error>(())
//! ```
//!
+//! The application will be provided with a [`SendInfo`] structure providing
+//! additional information about the newly created packet (such as the address
+//! the packet should be sent to).
+//!
//! When packets are sent, the application is responsible for maintaining a
//! timer to react to time-based connection events. The timer expiration can be
//! obtained using the connection's [`timeout()`] method.
//!
//! ```
//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
-//! # let scid = [0xba; 16];
-//! # let mut conn = quiche::accept(&scid, None, &mut config)?;
+//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+//! # let from = "127.0.0.1:1234".parse().unwrap();
+//! # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
//! let timeout = conn.timeout();
//! # Ok::<(), quiche::Error>(())
//! ```
@@ -146,14 +169,15 @@
//! # let mut out = [0; 512];
//! # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
-//! # let scid = [0xba; 16];
-//! # let mut conn = quiche::accept(&scid, None, &mut config)?;
+//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+//! # let from = "127.0.0.1:1234".parse().unwrap();
+//! # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
//! // Timeout expired, handle it.
//! conn.on_timeout();
//!
//! // Send more packets as needed after timeout.
//! loop {
-//! let write = match conn.send(&mut out) {
+//! let (write, send_info) = match conn.send(&mut out) {
//! Ok(v) => v,
//!
//! Err(quiche::Error::Done) => {
@@ -167,7 +191,7 @@
//! },
//! };
//!
-//! socket.send(&out[..write]).unwrap();
+//! socket.send_to(&out[..write], &send_info.to).unwrap();
//! }
//! # Ok::<(), quiche::Error>(())
//! ```
@@ -181,8 +205,9 @@
//!
//! ```no_run
//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
-//! # let scid = [0xba; 16];
-//! # let mut conn = quiche::accept(&scid, None, &mut config)?;
+//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+//! # let from = "127.0.0.1:1234".parse().unwrap();
+//! # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
//! if conn.is_established() {
//! // Handshake completed, send some data on stream 0.
//! conn.stream_send(0, b"hello", true)?;
@@ -200,8 +225,9 @@
//! ```no_run
//! # let mut buf = [0; 512];
//! # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
-//! # let scid = [0xba; 16];
-//! # let mut conn = quiche::accept(&scid, None, &mut config)?;
+//! # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+//! # let from = "127.0.0.1:1234".parse().unwrap();
+//! # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
//! if conn.is_established() {
//! // Iterate over readable streams.
//! for stream_id in conn.readable() {
@@ -222,7 +248,9 @@
//! [`connect()`]: fn.connect.html
//! [`accept()`]: fn.accept.html
//! [`recv()`]: struct.Connection.html#method.recv
+//! [`RecvInfo`]: struct.RecvInfo.html
//! [`send()`]: struct.Connection.html#method.send
+//! [`SendInfo`]: struct.SendInfo.html
//! [`timeout()`]: struct.Connection.html#method.timeout
//! [`on_timeout()`]: struct.Connection.html#method.on_timeout
//! [`stream_send()`]: struct.Connection.html#method.stream_send
@@ -260,6 +288,8 @@
//! [`CongestionControlAlgorithm`]: enum.CongestionControlAlgorithm.html
#![allow(improper_ctypes)]
+#![allow(clippy::suspicious_operation_groupings)]
+#![allow(clippy::upper_case_acronyms)]
#![warn(missing_docs)]
#[macro_use]
@@ -268,15 +298,20 @@
use std::cmp;
use std::time;
+use std::net::SocketAddr;
+
use std::pin::Pin;
use std::str::FromStr;
+use std::collections::VecDeque;
+
/// The current QUIC wire version.
-pub const PROTOCOL_VERSION: u32 = PROTOCOL_VERSION_DRAFT29;
+pub const PROTOCOL_VERSION: u32 = PROTOCOL_VERSION_V1;
/// Supported QUIC versions.
///
/// Note that the older ones might not be fully supported.
+const PROTOCOL_VERSION_V1: u32 = 0x0000_0001;
const PROTOCOL_VERSION_DRAFT27: u32 = 0xff00_001b;
const PROTOCOL_VERSION_DRAFT28: u32 = 0xff00_001c;
const PROTOCOL_VERSION_DRAFT29: u32 = 0xff00_001d;
@@ -306,6 +341,9 @@
// The highest possible stream ID allowed.
const MAX_STREAM_ID: u64 = 1 << 60;
+// The default max_datagram_size used in congestion control.
+const MAX_SEND_UDP_PAYLOAD_SIZE: usize = 1200;
+
// The default length of DATAGRAM queues.
const DEFAULT_MAX_DGRAM_QUEUE_LEN: usize = 0;
@@ -313,6 +351,14 @@
// frames size. We enforce the recommendation for forward compatibility.
const MAX_DGRAM_FRAME_SIZE: u64 = 65536;
+// The length of the payload length field.
+const PAYLOAD_LENGTH_LEN: usize = 2;
+
+// The number of undecryptable that can be buffered.
+const MAX_UNDECRYPTABLE_PACKETS: usize = 10;
+
+const RESERVED_VERSION_MASK: u32 = 0xfafafafa;
+
/// A specialized [`Result`] type for quiche operations.
///
/// This type is used throughout quiche's public API for any operation that
@@ -323,52 +369,65 @@
/// A QUIC error.
#[derive(Clone, Copy, Debug, PartialEq)]
-#[repr(C)]
pub enum Error {
/// There is no more work to do.
- Done = -1,
+ Done,
/// The provided buffer is too short.
- BufferTooShort = -2,
+ BufferTooShort,
/// The provided packet cannot be parsed because its version is unknown.
- UnknownVersion = -3,
+ UnknownVersion,
/// The provided packet cannot be parsed because it contains an invalid
/// frame.
- InvalidFrame = -4,
+ InvalidFrame,
/// The provided packet cannot be parsed.
- InvalidPacket = -5,
+ InvalidPacket,
/// The operation cannot be completed because the connection is in an
/// invalid state.
- InvalidState = -6,
+ InvalidState,
/// The operation cannot be completed because the stream is in an
/// invalid state.
- InvalidStreamState = -7,
+ ///
+ /// The stream ID is provided as associated data.
+ InvalidStreamState(u64),
/// The peer's transport params cannot be parsed.
- InvalidTransportParam = -8,
+ InvalidTransportParam,
/// A cryptographic operation failed.
- CryptoFail = -9,
+ CryptoFail,
/// The TLS handshake failed.
- TlsFail = -10,
+ TlsFail,
/// The peer violated the local flow control limits.
- FlowControl = -11,
+ FlowControl,
/// The peer violated the local stream limits.
- StreamLimit = -12,
+ StreamLimit,
+
+ /// The specified stream was stopped by the peer.
+ ///
+ /// The error code sent as part of the `STOP_SENDING` frame is provided as
+ /// associated data.
+ StreamStopped(u64),
+
+ /// The specified stream was reset by the peer.
+ ///
+ /// The error code sent as part of the `RESET_STREAM` frame is provided as
+ /// associated data.
+ StreamReset(u64),
/// The received data exceeds the stream's final size.
- FinalSize = -13,
+ FinalSize,
/// Error in congestion control.
- CongestionControl = -14,
+ CongestionControl,
}
impl Error {
@@ -376,7 +435,7 @@
match self {
Error::Done => 0x0,
Error::InvalidFrame => 0x7,
- Error::InvalidStreamState => 0x5,
+ Error::InvalidStreamState(..) => 0x5,
Error::InvalidTransportParam => 0x8,
Error::FlowControl => 0x3,
Error::StreamLimit => 0x4,
@@ -385,8 +444,26 @@
}
}
+ #[cfg(feature = "ffi")]
fn to_c(self) -> libc::ssize_t {
- self as _
+ match self {
+ Error::Done => -1,
+ Error::BufferTooShort => -2,
+ Error::UnknownVersion => -3,
+ Error::InvalidFrame => -4,
+ Error::InvalidPacket => -5,
+ Error::InvalidState => -6,
+ Error::InvalidStreamState(_) => -7,
+ Error::InvalidTransportParam => -8,
+ Error::CryptoFail => -9,
+ Error::TlsFail => -10,
+ Error::FlowControl => -11,
+ Error::StreamLimit => -12,
+ Error::FinalSize => -13,
+ Error::CongestionControl => -14,
+ Error::StreamStopped { .. } => -15,
+ Error::StreamReset { .. } => -16,
+ }
}
}
@@ -408,6 +485,36 @@
}
}
+/// Ancillary information about incoming packets.
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub struct RecvInfo {
+ /// The address the packet was received from.
+ pub from: SocketAddr,
+}
+
+/// Ancillary information about outgoing packets.
+#[derive(Clone, Copy, Debug, PartialEq)]
+pub struct SendInfo {
+ /// The address the packet should be sent to.
+ pub to: SocketAddr,
+
+ /// The time to send the packet out.
+ pub at: time::Instant,
+}
+
+/// Represents information carried by `CONNECTION_CLOSE` frames.
+#[derive(Clone, Debug, PartialEq)]
+pub struct ConnectionError {
+ /// Whether the error came from the application or the transport layer.
+ pub is_app: bool,
+
+ /// The error code carried by the `CONNECTION_CLOSE` frame.
+ pub error_code: u64,
+
+ /// The reason carried by the `CONNECTION_CLOSE` frame.
+ pub reason: Vec<u8>,
+}
+
/// The stream's side to shutdown.
///
/// This should be used when calling [`stream_shutdown()`].
@@ -422,6 +529,20 @@
Write = 1,
}
+/// Qlog logging level.
+#[repr(C)]
+#[cfg(feature = "qlog")]
+pub enum QlogLevel {
+ /// Logs any events of Core importance.
+ Core = 0,
+
+ /// Logs any events of Core and Base importance.
+ Base = 1,
+
+ /// Logs any events of Core, Base and Extra importance
+ Extra = 2,
+}
+
/// Stores configuration shared between multiple connections.
pub struct Config {
local_transport_params: TransportParams,
@@ -440,6 +561,13 @@
dgram_recv_max_queue_len: usize,
dgram_send_max_queue_len: usize,
+
+ max_send_udp_payload_size: usize,
+}
+
+// See https://quicwg.org/base-drafts/rfc9000.html#section-15
+fn is_reserved_version(version: u32) -> bool {
+ version & RESERVED_VERSION_MASK == version
}
impl Config {
@@ -452,6 +580,10 @@
/// # Ok::<(), quiche::Error>(())
/// ```
pub fn new(version: u32) -> Result<Config> {
+ if !is_reserved_version(version) && !version_is_supported(version) {
+ return Err(Error::UnknownVersion);
+ }
+
let tls_ctx = tls::Context::new()?;
Ok(Config {
@@ -465,6 +597,8 @@
dgram_recv_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
dgram_send_max_queue_len: DEFAULT_MAX_DGRAM_QUEUE_LEN,
+
+ max_send_udp_payload_size: MAX_SEND_UDP_PAYLOAD_SIZE,
})
}
@@ -560,6 +694,20 @@
self.tls_ctx.enable_keylog();
}
+ /// Configures the session ticket key material.
+ ///
+ /// On the server this key will be used to encrypt and decrypt session
+ /// tickets, used to perform session resumption without server-side state.
+ ///
+ /// By default a key is generated internally, and rotated regularly, so
+ /// applications don't need to call this unless they need to use a
+ /// specific key (e.g. in order to support resumption across multiple
+ /// servers), in which case the application is also responsible for
+ /// rotating the key to provide forward secrecy.
+ pub fn set_ticket_key(&mut self, key: &[u8]) -> Result<()> {
+ self.tls_ctx.set_ticket_key(key)
+ }
+
/// Enables sending or receiving early data.
pub fn enable_early_data(&mut self) {
self.tls_ctx.set_early_data_enabled(true);
@@ -586,7 +734,7 @@
/// # Ok::<(), quiche::Error>(())
/// ```
pub fn set_application_protos(&mut self, protos: &[u8]) -> Result<()> {
- let mut b = octets::Octets::with_slice(&protos);
+ let mut b = octets::Octets::with_slice(protos);
let mut protos_list = Vec::new();
@@ -599,7 +747,7 @@
self.tls_ctx.set_alpn(&self.application_protos)
}
- /// Sets the `max_idle_timeout` transport parameter.
+ /// Sets the `max_idle_timeout` transport parameter, in milliseconds.
///
/// The default value is infinite, that is, no timeout is used.
pub fn set_max_idle_timeout(&mut self, v: u64) {
@@ -609,8 +757,15 @@
/// Sets the `max_udp_payload_size transport` parameter.
///
/// The default value is `65527`.
- pub fn set_max_udp_payload_size(&mut self, v: u64) {
- self.local_transport_params.max_udp_payload_size = v;
+ pub fn set_max_recv_udp_payload_size(&mut self, v: usize) {
+ self.local_transport_params.max_udp_payload_size = v as u64;
+ }
+
+ /// Sets the maximum outgoing UDP payload size.
+ ///
+ /// The default and minimum value is `1200`.
+ pub fn set_max_send_udp_payload_size(&mut self, v: usize) {
+ self.max_send_udp_payload_size = cmp::max(v, MAX_SEND_UDP_PAYLOAD_SIZE);
}
/// Sets the `initial_max_data` transport parameter.
@@ -722,7 +877,7 @@
/// Sets the congestion control algorithm used by string.
///
- /// The default value is `reno`. On error `Error::CongestionControl`
+ /// The default value is `cubic`. On error `Error::CongestionControl`
/// will be returned.
///
/// ## Examples:
@@ -777,10 +932,10 @@
version: u32,
/// Peer's connection ID.
- dcid: Vec<u8>,
+ dcid: ConnectionId<'static>,
/// Local connection ID.
- scid: Vec<u8>,
+ scid: ConnectionId<'static>,
/// Unique opaque ID for the connection that can be used for logging.
trace_id: String,
@@ -797,9 +952,17 @@
/// TLS handshake state.
handshake: tls::Handshake,
+ /// Serialized TLS session buffer.
+ ///
+ /// This field is populated when a new session ticket is processed on the
+ /// client. On the server this is empty.
+ session: Option<Vec<u8>>,
+
/// Loss recovery and congestion control state.
recovery: recovery::Recovery,
+ peer_addr: SocketAddr,
+
/// List of supported application protocols.
application_protos: Vec<Vec<u8>>,
@@ -809,6 +972,9 @@
/// Total number of sent packets.
sent_count: usize,
+ /// Total number of packets sent with data retransmitted.
+ retrans_count: usize,
+
/// Total number of bytes received from the peer.
rx_data: u64,
@@ -822,6 +988,9 @@
/// Whether we send MAX_DATA frame.
almost_full: bool,
+ /// Number of stream data bytes that can be buffered.
+ tx_cap: usize,
+
/// Total number of bytes sent to the peer.
tx_data: u64,
@@ -832,28 +1001,37 @@
/// is verified.
max_send_bytes: usize,
+ /// Total number of bytes retransmitted over the connection.
+ /// This counts only STREAM and CRYPTO data.
+ stream_retrans_bytes: u64,
+
+ /// Total number of bytes sent over the connection.
+ sent_bytes: u64,
+
+ /// Total number of bytes recevied over the connection.
+ recv_bytes: u64,
+
/// Streams map, indexed by stream ID.
streams: stream::StreamMap,
/// Peer's original destination connection ID. Used by the client to
/// validate the server's transport parameter.
- odcid: Option<Vec<u8>>,
+ odcid: Option<ConnectionId<'static>>,
/// Peer's retry source connection ID. Used by the client during stateless
/// retry to validate the server's transport parameter.
- rscid: Option<Vec<u8>>,
+ rscid: Option<ConnectionId<'static>>,
/// Received address verification token.
token: Option<Vec<u8>>,
- /// Error code to be sent to the peer in CONNECTION_CLOSE.
- error: Option<u64>,
+ /// Error code and reason to be sent to the peer in a CONNECTION_CLOSE
+ /// frame.
+ local_error: Option<ConnectionError>,
- /// Error code to be sent to the peer in APPLICATION_CLOSE.
- app_error: Option<u64>,
-
- /// Error reason to be sent to the peer in APPLICATION_CLOSE.
- app_reason: Vec<u8>,
+ /// Error code and reason received from the peer in a CONNECTION_CLOSE
+ /// frame.
+ peer_error: Option<ConnectionError>,
/// Received path challenge.
challenge: Option<Vec<u8>>,
@@ -867,6 +1045,12 @@
/// Draining timeout expiration time.
draining_timer: Option<time::Instant>,
+ /// List of raw packets that were received before they could be decrypted.
+ undecryptable_pkts: VecDeque<(Vec<u8>, RecvInfo)>,
+
+ /// The negotiated ALPN protocol.
+ alpn: Vec<u8>,
+
/// Whether this is a server-side connection.
is_server: bool,
@@ -892,9 +1076,15 @@
/// Whether the peer's transport parameters were parsed.
parsed_peer_transport_params: bool,
- /// Whether the HANDSHAKE_DONE has been sent.
+ /// Whether the connection handshake has been completed.
+ handshake_completed: bool,
+
+ /// Whether the HANDSHAKE_DONE frame has been sent.
handshake_done_sent: bool,
+ /// Whether the HANDSHAKE_DONE frame has been acked.
+ handshake_done_acked: bool,
+
/// Whether the connection handshake has been confirmed.
handshake_confirmed: bool,
@@ -905,23 +1095,24 @@
/// Whether the connection is closed.
closed: bool,
+ // Whether the connection was timed out
+ timed_out: bool,
+
/// Whether to send GREASE.
grease: bool,
/// TLS keylog writer.
- keylog: Option<Box<dyn std::io::Write + Send>>,
+ keylog: Option<Box<dyn std::io::Write + Send + Sync>>,
- /// Qlog streaming output.
#[cfg(feature = "qlog")]
- qlog_streamer: Option<qlog::QlogStreamer>,
-
- /// Whether peer transport parameters were qlogged.
- #[cfg(feature = "qlog")]
- qlogged_peer_params: bool,
+ qlog: QlogInfo,
/// DATAGRAM queues.
dgram_recv_queue: dgram::DatagramQueue,
dgram_send_queue: dgram::DatagramQueue,
+
+ /// Whether to emit DATAGRAM frames in the next packet.
+ emit_dgram: bool,
}
/// Creates a new server-side connection.
@@ -937,14 +1128,17 @@
///
/// ```no_run
/// # let mut config = quiche::Config::new(0xbabababa)?;
-/// # let scid = [0xba; 16];
-/// let conn = quiche::accept(&scid, None, &mut config)?;
+/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+/// # let from = "127.0.0.1:1234".parse().unwrap();
+/// let conn = quiche::accept(&scid, None, from, &mut config)?;
/// # Ok::<(), quiche::Error>(())
/// ```
+#[inline]
pub fn accept(
- scid: &[u8], odcid: Option<&[u8]>, config: &mut Config,
+ scid: &ConnectionId, odcid: Option<&ConnectionId>, from: SocketAddr,
+ config: &mut Config,
) -> Result<Pin<Box<Connection>>> {
- let conn = Connection::new(scid, odcid, config, true)?;
+ let conn = Connection::new(scid, odcid, from, config, true)?;
Ok(conn)
}
@@ -960,14 +1154,17 @@
/// ```no_run
/// # let mut config = quiche::Config::new(0xbabababa)?;
/// # let server_name = "quic.tech";
-/// # let scid = [0xba; 16];
-/// let conn = quiche::connect(Some(&server_name), &scid, &mut config)?;
+/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+/// # let to = "127.0.0.1:1234".parse().unwrap();
+/// let conn = quiche::connect(Some(&server_name), &scid, to, &mut config)?;
/// # Ok::<(), quiche::Error>(())
/// ```
+#[inline]
pub fn connect(
- server_name: Option<&str>, scid: &[u8], config: &mut Config,
+ server_name: Option<&str>, scid: &ConnectionId, to: SocketAddr,
+ config: &mut Config,
) -> Result<Pin<Box<Connection>>> {
- let conn = Connection::new(scid, None, config, false)?;
+ let mut conn = Connection::new(scid, None, to, config, false)?;
if let Some(server_name) = server_name {
conn.handshake.set_host_name(server_name)?;
@@ -999,8 +1196,9 @@
/// }
/// # Ok::<(), quiche::Error>(())
/// ```
+#[inline]
pub fn negotiate_version(
- scid: &[u8], dcid: &[u8], out: &mut [u8],
+ scid: &ConnectionId, dcid: &ConnectionId, out: &mut [u8],
) -> Result<usize> {
packet::negotiate_version(scid, dcid, out)
}
@@ -1026,12 +1224,12 @@
/// # let mut config = quiche::Config::new(0xbabababa)?;
/// # let mut buf = [0; 512];
/// # let mut out = [0; 512];
-/// # let scid = [0xba; 16];
+/// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
/// # fn mint_token(hdr: &quiche::Header, src: &std::net::SocketAddr) -> Vec<u8> {
/// # vec![]
/// # }
-/// # fn validate_token<'a>(src: &std::net::SocketAddr, token: &'a [u8]) -> Option<&'a [u8]> {
+/// # fn validate_token<'a>(src: &std::net::SocketAddr, token: &'a [u8]) -> Option<quiche::ConnectionId<'a>> {
/// # None
/// # }
/// let (len, src) = socket.recv_from(&mut buf).unwrap();
@@ -1055,26 +1253,29 @@
/// // Client sent token, validate it.
/// let odcid = validate_token(&src, token);
///
-/// if odcid == None {
+/// if odcid.is_none() {
/// // Invalid address validation token.
/// return Ok(());
/// }
///
-/// let conn = quiche::accept(&scid, odcid, &mut config)?;
+/// let conn = quiche::accept(&scid, odcid.as_ref(), src, &mut config)?;
/// # Ok::<(), quiche::Error>(())
/// ```
+#[inline]
pub fn retry(
- scid: &[u8], dcid: &[u8], new_scid: &[u8], token: &[u8], version: u32,
- out: &mut [u8],
+ scid: &ConnectionId, dcid: &ConnectionId, new_scid: &ConnectionId,
+ token: &[u8], version: u32, out: &mut [u8],
) -> Result<usize> {
packet::retry(scid, dcid, new_scid, token, version, out)
}
/// Returns true if the given protocol version is supported.
+#[inline]
pub fn version_is_supported(version: u32) -> bool {
matches!(
version,
- PROTOCOL_VERSION_DRAFT27 |
+ PROTOCOL_VERSION_V1 |
+ PROTOCOL_VERSION_DRAFT27 |
PROTOCOL_VERSION_DRAFT28 |
PROTOCOL_VERSION_DRAFT29
)
@@ -1086,11 +1287,12 @@
/// there is no room to add the frame in the packet. You may retry to add the
/// frame later.
macro_rules! push_frame_to_pkt {
- ($frames:expr, $frame:expr, $payload_len: expr, $left:expr) => {{
+ ($out:expr, $frames:expr, $frame:expr, $left:expr) => {{
if $frame.wire_len() <= $left {
- $payload_len += $frame.wire_len();
$left -= $frame.wire_len();
+ $frame.to_bytes(&mut $out)?;
+
$frames.push($frame);
true
@@ -1100,32 +1302,87 @@
}};
}
-/// Conditional qlog action.
+/// Conditional qlog actions.
///
/// Executes the provided body if the qlog feature is enabled and quiche
-/// has been condifigured with a log writer.
+/// has been configured with a log writer.
macro_rules! qlog_with {
- ($qlog_streamer:expr, $qlog_streamer_ref:ident, $body:block) => {{
+ ($qlog:expr, $qlog_streamer_ref:ident, $body:block) => {{
#[cfg(feature = "qlog")]
{
- if let Some($qlog_streamer_ref) = &mut $qlog_streamer {
+ if let Some($qlog_streamer_ref) = &mut $qlog.streamer {
$body
}
}
}};
}
+/// Executes the provided body if the qlog feature is enabled, quiche has been
+/// configured with a log writer, the event's importance is within the
+/// confgured level.
+macro_rules! qlog_with_type {
+ ($ty:expr, $qlog:expr, $qlog_streamer_ref:ident, $body:block) => {{
+ #[cfg(feature = "qlog")]
+ {
+ if qlog::EventImportance::from($ty).is_contained_in(&$qlog.level) {
+ if let Some($qlog_streamer_ref) = &mut $qlog.streamer {
+ $body
+ }
+ }
+ }
+ }};
+}
+
+#[cfg(feature = "qlog")]
+const QLOG_PARAMS_SET: qlog::EventType =
+ qlog::EventType::TransportEventType(qlog::TransportEventType::ParametersSet);
+
+#[cfg(feature = "qlog")]
+const QLOG_PACKET_RX: qlog::EventType =
+ qlog::EventType::TransportEventType(qlog::TransportEventType::PacketReceived);
+
+#[cfg(feature = "qlog")]
+const QLOG_PACKET_TX: qlog::EventType =
+ qlog::EventType::TransportEventType(qlog::TransportEventType::PacketSent);
+
+#[cfg(feature = "qlog")]
+const QLOG_DATA_MV: qlog::EventType =
+ qlog::EventType::TransportEventType(qlog::TransportEventType::DataMoved);
+
+#[cfg(feature = "qlog")]
+const QLOG_METRICS: qlog::EventType =
+ qlog::EventType::RecoveryEventType(qlog::RecoveryEventType::MetricsUpdated);
+
+#[cfg(feature = "qlog")]
+struct QlogInfo {
+ streamer: Option<qlog::QlogStreamer>,
+ logged_peer_params: bool,
+ level: qlog::EventImportance,
+}
+
+#[cfg(feature = "qlog")]
+impl Default for QlogInfo {
+ fn default() -> Self {
+ QlogInfo {
+ streamer: None,
+ logged_peer_params: false,
+ level: qlog::EventImportance::Base,
+ }
+ }
+}
+
impl Connection {
fn new(
- scid: &[u8], odcid: Option<&[u8]>, config: &mut Config, is_server: bool,
+ scid: &ConnectionId, odcid: Option<&ConnectionId>, peer: SocketAddr,
+ config: &mut Config, is_server: bool,
) -> Result<Pin<Box<Connection>>> {
let tls = config.tls_ctx.new_handshake()?;
- Connection::with_tls(scid, odcid, config, tls, is_server)
+ Connection::with_tls(scid, odcid, peer, config, tls, is_server)
}
fn with_tls(
- scid: &[u8], odcid: Option<&[u8]>, config: &mut Config,
- tls: tls::Handshake, is_server: bool,
+ scid: &ConnectionId, odcid: Option<&ConnectionId>, peer: SocketAddr,
+ config: &mut Config, tls: tls::Handshake, is_server: bool,
) -> Result<Pin<Box<Connection>>> {
let max_rx_data = config.local_transport_params.initial_max_data;
@@ -1135,8 +1392,8 @@
let mut conn = Box::pin(Connection {
version: config.version,
- dcid: Vec::new(),
- scid: scid.to_vec(),
+ dcid: ConnectionId::default(),
+ scid: scid.to_vec().into(),
trace_id: scid_as_hex.join(""),
@@ -1152,21 +1409,32 @@
handshake: tls,
- recovery: recovery::Recovery::new(&config),
+ session: None,
+
+ recovery: recovery::Recovery::new(config),
+
+ peer_addr: peer,
application_protos: config.application_protos.clone(),
recv_count: 0,
sent_count: 0,
+ retrans_count: 0,
+ sent_bytes: 0,
+ recv_bytes: 0,
rx_data: 0,
max_rx_data,
max_rx_data_next: max_rx_data,
almost_full: false,
+ tx_cap: 0,
+
tx_data: 0,
max_tx_data: 0,
+ stream_retrans_bytes: 0,
+
max_send_bytes: 0,
streams: stream::StreamMap::new(
@@ -1180,10 +1448,9 @@
token: None,
- error: None,
+ local_error: None,
- app_error: None,
- app_reason: Vec::new(),
+ peer_error: None,
challenge: None,
@@ -1193,6 +1460,10 @@
draining_timer: None,
+ undecryptable_pkts: VecDeque::new(),
+
+ alpn: Vec::new(),
+
is_server,
derived_initial_secrets: false,
@@ -1211,7 +1482,10 @@
parsed_peer_transport_params: false,
+ handshake_completed: false,
+
handshake_done_sent: false,
+ handshake_done_acked: false,
handshake_confirmed: false,
@@ -1219,15 +1493,14 @@
closed: false,
+ timed_out: false,
+
grease: config.grease,
keylog: None,
#[cfg(feature = "qlog")]
- qlog_streamer: None,
-
- #[cfg(feature = "qlog")]
- qlogged_peer_params: false,
+ qlog: Default::default(),
dgram_recv_queue: dgram::DatagramQueue::new(
config.dgram_recv_max_queue_len,
@@ -1236,22 +1509,28 @@
dgram_send_queue: dgram::DatagramQueue::new(
config.dgram_send_max_queue_len,
),
+
+ emit_dgram: true,
});
if let Some(odcid) = odcid {
conn.local_transport_params
- .original_destination_connection_id = Some(odcid.to_vec());
+ .original_destination_connection_id = Some(odcid.to_vec().into());
conn.local_transport_params.retry_source_connection_id =
- Some(scid.to_vec());
+ Some(scid.to_vec().into());
conn.did_retry = true;
}
conn.local_transport_params.initial_source_connection_id =
- Some(scid.to_vec());
+ Some(scid.to_vec().into());
- conn.handshake.init(&conn)?;
+ let conn_ptr = &conn as &Connection as *const Connection;
+ conn.handshake.init(conn_ptr, is_server)?;
+
+ conn.handshake
+ .use_legacy_codepoint(config.version != PROTOCOL_VERSION_V1);
conn.encode_transport_params()?;
@@ -1267,7 +1546,7 @@
conn.is_server,
)?;
- conn.dcid.extend_from_slice(&dcid);
+ conn.dcid = dcid.to_vec().into();
conn.pkt_num_spaces[packet::EPOCH_INITIAL].crypto_open =
Some(aead_open);
@@ -1286,27 +1565,56 @@
/// missing some early logs.
///
/// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
- pub fn set_keylog(&mut self, writer: Box<dyn std::io::Write + Send>) {
+ #[inline]
+ pub fn set_keylog(&mut self, writer: Box<dyn std::io::Write + Send + Sync>) {
self.keylog = Some(writer);
}
/// Sets qlog output to the designated [`Writer`].
///
+ /// Only events included in `QlogLevel::Base` are written.
+ ///
/// This needs to be called as soon as the connection is created, to avoid
/// missing some early logs.
///
/// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
#[cfg(feature = "qlog")]
pub fn set_qlog(
- &mut self, writer: Box<dyn std::io::Write + Send>, title: String,
+ &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
description: String,
) {
+ self.set_qlog_with_level(writer, title, description, QlogLevel::Base)
+ }
+
+ /// Sets qlog output to the designated [`Writer`].
+ ///
+ /// Only qlog events included in the specified `QlogLevel` are written
+ ///
+ /// This needs to be called as soon as the connection is created, to avoid
+ /// missing some early logs.
+ ///
+ /// [`Writer`]: https://doc.rust-lang.org/std/io/trait.Write.html
+ #[cfg(feature = "qlog")]
+ pub fn set_qlog_with_level(
+ &mut self, writer: Box<dyn std::io::Write + Send + Sync>, title: String,
+ description: String, qlog_level: QlogLevel,
+ ) {
let vp = if self.is_server {
qlog::VantagePointType::Server
} else {
qlog::VantagePointType::Client
};
+ let level = match qlog_level {
+ QlogLevel::Core => qlog::EventImportance::Core,
+
+ QlogLevel::Base => qlog::EventImportance::Base,
+
+ QlogLevel::Extra => qlog::EventImportance::Extra,
+ };
+
+ self.qlog.level = level;
+
let trace = qlog::Trace::new(
qlog::VantagePoint {
name: None,
@@ -1316,8 +1624,7 @@
Some(title.to_string()),
Some(description.to_string()),
Some(qlog::Configuration {
- time_offset: Some("0".to_string()),
- time_units: Some(qlog::TimeUnits::Ms),
+ time_offset: Some(0.0),
original_uris: None,
}),
None,
@@ -1328,23 +1635,53 @@
Some(title),
Some(description),
None,
- std::time::Instant::now(),
+ time::Instant::now(),
trace,
+ self.qlog.level.clone(),
writer,
);
streamer.start_log().ok();
- let ev = self.local_transport_params.to_qlog(
- qlog::TransportOwner::Local,
- self.version,
- self.handshake.alpn_protocol(),
- self.handshake.cipher(),
- );
+ let ev_data = self
+ .local_transport_params
+ .to_qlog(qlog::TransportOwner::Local, self.handshake.cipher());
- streamer.add_event(ev).ok();
+ // This event occurs very early, so just mark the relative time as 0.0.
+ streamer
+ .add_event(qlog::Event::with_time(0.0, ev_data))
+ .ok();
- self.qlog_streamer = Some(streamer);
+ self.qlog.streamer = Some(streamer);
+ }
+
+ /// Configures the given session for resumption.
+ ///
+ /// On the client, this can be used to offer the given serialized session,
+ /// as returned by [`session()`], for resumption.
+ ///
+ /// This must only be called immediately after creating a connection, that
+ /// is, before any packet is sent or received.
+ ///
+ /// [`session()`]: struct.Connection.html#method.session
+ #[inline]
+ pub fn set_session(&mut self, session: &[u8]) -> Result<()> {
+ let mut b = octets::Octets::with_slice(session);
+
+ let session_len = b.get_u64()? as usize;
+ let session_bytes = b.get_bytes(session_len)?;
+
+ self.handshake.set_session(session_bytes.as_ref())?;
+
+ let raw_params_len = b.get_u64()? as usize;
+ let raw_params_bytes = b.get_bytes(raw_params_len)?;
+
+ let peer_params =
+ TransportParams::decode(raw_params_bytes.as_ref(), self.is_server)?;
+
+ self.process_peer_transport_params(peer_params);
+
+ Ok(())
}
/// Processes QUIC packets received from the peer.
@@ -1366,12 +1703,15 @@
/// # let mut buf = [0; 512];
/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
/// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
- /// # let scid = [0xba; 16];
- /// # let mut conn = quiche::accept(&scid, None, &mut config)?;
+ /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+ /// # let from = "127.0.0.1:1234".parse().unwrap();
+ /// # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
/// loop {
- /// let read = socket.recv(&mut buf).unwrap();
+ /// let (read, from) = socket.recv_from(&mut buf).unwrap();
///
- /// let read = match conn.recv(&mut buf[..read]) {
+ /// let recv_info = quiche::RecvInfo { from };
+ ///
+ /// let read = match conn.recv(&mut buf[..read], recv_info) {
/// Ok(v) => v,
///
/// Err(e) => {
@@ -1382,9 +1722,13 @@
/// }
/// # Ok::<(), quiche::Error>(())
/// ```
- pub fn recv(&mut self, buf: &mut [u8]) -> Result<usize> {
+ pub fn recv(&mut self, buf: &mut [u8], info: RecvInfo) -> Result<usize> {
let len = buf.len();
+ if len == 0 {
+ return Err(Error::BufferTooShort);
+ }
+
// Keep track of how many bytes we received from the client, so we
// can limit bytes sent back before address validation, to a multiple
// of this. The limit needs to be increased early on, so that if there
@@ -1401,7 +1745,7 @@
// Process coalesced packets.
while left > 0 {
- let read = match self.recv_single(&mut buf[len - left..len]) {
+ let read = match self.recv_single(&mut buf[len - left..len], &info) {
Ok(v) => v,
Err(Error::Done) => left,
@@ -1418,6 +1762,25 @@
left -= read;
}
+ // Process previously undecryptable 0-RTT packets if the decryption key
+ // is now available.
+ if self.pkt_num_spaces[packet::EPOCH_APPLICATION]
+ .crypto_0rtt_open
+ .is_some()
+ {
+ while let Some((mut pkt, info)) = self.undecryptable_pkts.pop_front()
+ {
+ if let Err(e) = self.recv(&mut pkt, info) {
+ self.undecryptable_pkts.clear();
+
+ // Even though the packet was previously "accepted", it
+ // should be safe to forward the error, as it also comes
+ // from the `recv()` method.
+ return Err(e);
+ }
+ }
+ }
+
Ok(done)
}
@@ -1430,18 +1793,18 @@
/// On error, an error other than [`Done`] is returned.
///
/// [`Done`]: enum.Error.html#variant.Done
- fn recv_single(&mut self, buf: &mut [u8]) -> Result<usize> {
+ fn recv_single(&mut self, buf: &mut [u8], info: &RecvInfo) -> Result<usize> {
let now = time::Instant::now();
if buf.is_empty() {
return Err(Error::Done);
}
- if self.is_closed() || self.draining_timer.is_some() {
+ if self.is_closed() || self.is_draining() {
return Err(Error::Done);
}
- let is_closing = self.error.is_some() || self.app_error.is_some();
+ let is_closing = self.local_error.is_some();
if is_closing {
return Err(Error::Done);
@@ -1494,20 +1857,33 @@
return Err(Error::Done);
}
- match versions.iter().filter(|&&v| version_is_supported(v)).max() {
- Some(v) => self.version = *v,
+ let supported_versions =
+ versions.iter().filter(|&&v| version_is_supported(v));
- None => {
- // We don't support any of the versions offered.
- //
- // While a man-in-the-middle attacker might be able to
- // inject a version negotiation packet that triggers this
- // failure, the window of opportunity is very small and
- // this error is quite useful for debugging, so don't just
- // ignore the packet.
- return Err(Error::UnknownVersion);
- },
- };
+ let mut found_version = false;
+
+ for &v in supported_versions {
+ found_version = true;
+
+ // The final version takes precedence over draft ones.
+ if v == PROTOCOL_VERSION_V1 {
+ self.version = v;
+ break;
+ }
+
+ self.version = cmp::max(self.version, v);
+ }
+
+ if !found_version {
+ // We don't support any of the versions offered.
+ //
+ // While a man-in-the-middle attacker might be able to
+ // inject a version negotiation packet that triggers this
+ // failure, the window of opportunity is very small and
+ // this error is quite useful for debugging, so don't just
+ // ignore the packet.
+ return Err(Error::UnknownVersion);
+ }
self.did_version_negotiation = true;
@@ -1528,6 +1904,9 @@
self.pkt_num_spaces[packet::EPOCH_INITIAL].crypto_seal =
Some(aead_seal);
+ self.handshake
+ .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
+
// Encode transport parameters again, as the new version might be
// using a different format.
self.encode_transport_params()?;
@@ -1561,8 +1940,7 @@
// Remember peer's new connection ID.
self.odcid = Some(self.dcid.clone());
- self.dcid.resize(hdr.scid.len(), 0);
- self.dcid.copy_from_slice(&hdr.scid);
+ self.dcid = hdr.scid.clone();
self.rscid = Some(self.dcid.clone());
@@ -1594,6 +1972,9 @@
self.version = hdr.version;
self.did_version_negotiation = true;
+ self.handshake
+ .use_legacy_codepoint(self.version != PROTOCOL_VERSION_V1);
+
// Encode transport parameters again, as the new version might be
// using a different format.
self.encode_transport_params()?;
@@ -1620,6 +2001,17 @@
})? as usize
};
+ // Make sure the buffer is same or larger than an explicit
+ // payload length.
+ if payload_len > b.cap() {
+ return Err(drop_pkt_on_err(
+ Error::InvalidPacket,
+ self.recv_count,
+ self.is_server,
+ &self.trace_id,
+ ));
+ }
+
// Derive initial secrets on the server.
if !self.derived_initial_secrets {
let (aead_open, aead_seal) = crypto::derive_initial_key_material(
@@ -1640,28 +2032,49 @@
let epoch = hdr.ty.to_epoch()?;
// Select AEAD context used to open incoming packet.
- #[allow(clippy::or_fun_call)]
- let aead = (self.pkt_num_spaces[epoch].crypto_0rtt_open.as_ref())
+ let aead = if hdr.ty == packet::Type::ZeroRTT {
// Only use 0-RTT key if incoming packet is 0-RTT.
- .filter(|_| hdr.ty == packet::Type::ZeroRTT)
+ self.pkt_num_spaces[epoch].crypto_0rtt_open.as_ref()
+ } else {
// Otherwise use the packet number space's main key.
- .or(self.pkt_num_spaces[epoch].crypto_open.as_ref())
- // Finally, discard packet if no usable key is available.
- //
- // TODO: buffer 0-RTT/1-RTT packets instead of discarding when the
- // required key is not available yet, as an optimization.
- .ok_or_else(|| {
- drop_pkt_on_err(
+ self.pkt_num_spaces[epoch].crypto_open.as_ref()
+ };
+
+ // Finally, discard packet if no usable key is available.
+ let aead = match aead {
+ Some(v) => v,
+
+ None => {
+ if hdr.ty == packet::Type::ZeroRTT &&
+ self.undecryptable_pkts.len() < MAX_UNDECRYPTABLE_PACKETS &&
+ !self.is_established()
+ {
+ // Buffer 0-RTT packets when the required read key is not
+ // available yet, and process them later.
+ //
+ // TODO: in the future we might want to buffer other types
+ // of undecryptable packets as well.
+ let pkt_len = b.off() + payload_len;
+ let pkt = (b.buf()[..pkt_len]).to_vec();
+
+ self.undecryptable_pkts.push_back((pkt, *info));
+ return Ok(pkt_len);
+ }
+
+ let e = drop_pkt_on_err(
Error::CryptoFail,
self.recv_count,
self.is_server,
&self.trace_id,
- )
- })?;
+ );
+
+ return Err(e);
+ },
+ };
let aead_tag_len = aead.alg().tag_len();
- packet::decrypt_hdr(&mut b, &mut hdr, &aead).map_err(|e| {
+ packet::decrypt_hdr(&mut b, &mut hdr, aead).map_err(|e| {
drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
})?;
@@ -1681,28 +2094,35 @@
pn
);
- qlog_with!(self.qlog_streamer, q, {
+ qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
let packet_size = b.len();
let qlog_pkt_hdr = qlog::PacketHeader::with_type(
hdr.ty.to_qlog(),
pn,
- Some(packet_size as u64),
- Some(payload_len as u64),
Some(hdr.version),
Some(&hdr.scid),
Some(&hdr.dcid),
);
- q.add_event(qlog::event::Event::packet_received(
- hdr.ty.to_qlog(),
- qlog_pkt_hdr,
- Some(Vec::new()),
- None,
- None,
- None,
- ))
- .ok();
+ let qlog_raw_info = qlog::RawInfo {
+ length: Some(packet_size as u64),
+ payload_length: Some(payload_len as u64),
+ data: None,
+ };
+
+ let ev_data = qlog::EventData::PacketReceived {
+ header: qlog_pkt_hdr,
+ frames: Some(vec![]),
+ is_coalesced: None,
+ retry_token: None,
+ stateless_reset_token: None,
+ supported_versions: None,
+ raw: Some(qlog_raw_info),
+ datagram_id: None,
+ };
+
+ q.add_event_data_with_instant(ev_data, now).ok();
});
let mut payload = packet::decrypt_pkt(
@@ -1710,7 +2130,7 @@
pn,
pn_len,
payload_len,
- &aead,
+ aead,
)
.map_err(|e| {
drop_pkt_on_err(e, self.recv_count, self.is_server, &self.trace_id)
@@ -1721,6 +2141,11 @@
return Err(Error::Done);
}
+ // Packets with no frames are invalid.
+ if payload.cap() == 0 {
+ return Err(Error::InvalidPacket);
+ }
+
if !self.is_server && !self.got_peer_conn_id {
if self.odcid.is_none() {
self.odcid = Some(self.dcid.clone());
@@ -1728,18 +2153,21 @@
// Replace the randomly generated destination connection ID with
// the one supplied by the server.
- self.dcid.resize(hdr.scid.len(), 0);
- self.dcid.copy_from_slice(&hdr.scid);
+ self.dcid = hdr.scid.clone();
self.got_peer_conn_id = true;
}
if self.is_server && !self.got_peer_conn_id {
- self.dcid.extend_from_slice(&hdr.scid);
+ self.dcid = hdr.scid.clone();
- if !self.did_retry && self.version >= PROTOCOL_VERSION_DRAFT28 {
+ if !self.did_retry &&
+ (self.version >= PROTOCOL_VERSION_DRAFT28 ||
+ self.version == PROTOCOL_VERSION_V1)
+ {
self.local_transport_params
- .original_destination_connection_id = Some(hdr.dcid.to_vec());
+ .original_destination_connection_id =
+ Some(hdr.dcid.to_vec().into());
self.encode_transport_params()?;
}
@@ -1756,7 +2184,7 @@
while payload.cap() > 0 {
let frame = frame::Frame::from_bytes(&mut payload, hdr.ty)?;
- qlog_with!(self.qlog_streamer, q, {
+ qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
q.add_frame(frame.to_qlog(), false).ok();
});
@@ -1765,7 +2193,7 @@
}
if let Err(e) = self.process_frame(frame, epoch, now) {
- qlog_with!(self.qlog_streamer, q, {
+ qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
// Always conclude frame writing on error.
q.finish_frames().ok();
});
@@ -1774,32 +2202,31 @@
}
}
- qlog_with!(self.qlog_streamer, q, {
+ qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
// Always conclude frame writing.
q.finish_frames().ok();
});
- qlog_with!(self.qlog_streamer, q, {
- let ev = self.recovery.to_qlog();
- q.add_event(ev).ok();
+ qlog_with_type!(QLOG_PACKET_RX, self.qlog, q, {
+ if let Some(ev_data) = self.recovery.maybe_qlog() {
+ q.add_event_data_with_instant(ev_data, now).ok();
+ }
});
// Only log the remote transport parameters once the connection is
// established (i.e. after frames have been fully parsed) and only
// once per connection.
if self.is_established() {
- qlog_with!(self.qlog_streamer, q, {
- if !self.qlogged_peer_params {
- let ev = self.peer_transport_params.to_qlog(
+ qlog_with_type!(QLOG_PARAMS_SET, self.qlog, q, {
+ if !self.qlog.logged_peer_params {
+ let ev_data = self.peer_transport_params.to_qlog(
qlog::TransportOwner::Remote,
- self.version,
- self.handshake.alpn_protocol(),
self.handshake.cipher(),
);
- q.add_event(ev).ok();
+ q.add_event_data_with_instant(ev_data, now).ok();
- self.qlogged_peer_params = true;
+ self.qlog.logged_peer_params = true;
}
});
}
@@ -1818,23 +2245,55 @@
}
},
- frame::Frame::Crypto { data } => {
+ frame::Frame::CryptoHeader { offset, length } => {
self.pkt_num_spaces[epoch]
.crypto_stream
.send
- .ack(data.off(), data.len());
+ .ack_and_drop(offset, length);
},
- frame::Frame::Stream { stream_id, data } => {
+ frame::Frame::StreamHeader {
+ stream_id,
+ offset,
+ length,
+ ..
+ } => {
let stream = match self.streams.get_mut(stream_id) {
Some(v) => v,
None => continue,
};
- stream.send.ack(data.off(), data.len());
+ stream.send.ack_and_drop(offset, length);
- if stream.is_complete() {
+ // Only collect the stream if it is complete and not
+ // readable. If it is readable, it will get collected when
+ // stream_recv() is used.
+ if stream.is_complete() && !stream.is_readable() {
+ let local = stream.local;
+ self.streams.collect(stream_id, local);
+ }
+ },
+
+ frame::Frame::HandshakeDone => {
+ // Explicitly set this to true, so that if the frame was
+ // already scheduled for retransmission, it is aborted.
+ self.handshake_done_sent = true;
+
+ self.handshake_done_acked = true;
+ },
+
+ frame::Frame::ResetStream { stream_id, .. } => {
+ let stream = match self.streams.get_mut(stream_id) {
+ Some(v) => v,
+
+ None => continue,
+ };
+
+ // Only collect the stream if it is complete and not
+ // readable. If it is readable, it will get collected when
+ // stream_recv() is used.
+ if stream.is_complete() && !stream.is_readable() {
let local = stream.local;
self.streams.collect(stream_id, local);
}
@@ -1864,10 +2323,15 @@
self.idle_timer = Some(now + idle_timeout);
}
+ // Update send capacity.
+ self.update_tx_cap();
+
self.recv_count += 1;
let read = b.off() + aead_tag_len;
+ self.recv_bytes += read as u64;
+
// An Handshake packet has been received from the client and has been
// successfully processed, so we can drop the initial state and consider
// the client's address to be verified.
@@ -1912,10 +2376,11 @@
/// # let mut out = [0; 512];
/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
/// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
- /// # let scid = [0xba; 16];
- /// # let mut conn = quiche::accept(&scid, None, &mut config)?;
+ /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+ /// # let from = "127.0.0.1:1234".parse().unwrap();
+ /// # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
/// loop {
- /// let write = match conn.send(&mut out) {
+ /// let (write, send_info) = match conn.send(&mut out) {
/// Ok(v) => v,
///
/// Err(quiche::Error::Done) => {
@@ -1929,47 +2394,166 @@
/// },
/// };
///
- /// socket.send(&out[..write]).unwrap();
+ /// socket.send_to(&out[..write], &send_info.to).unwrap();
/// }
/// # Ok::<(), quiche::Error>(())
/// ```
- pub fn send(&mut self, out: &mut [u8]) -> Result<usize> {
+ pub fn send(&mut self, out: &mut [u8]) -> Result<(usize, SendInfo)> {
+ if out.is_empty() {
+ return Err(Error::BufferTooShort);
+ }
+
+ if self.is_closed() || self.is_draining() {
+ return Err(Error::Done);
+ }
+
+ if self.local_error.is_none() {
+ self.do_handshake()?;
+ }
+
+ // Process previously undecryptable 0-RTT packets if the decryption key
+ // is now available.
+ if self.pkt_num_spaces[packet::EPOCH_APPLICATION]
+ .crypto_0rtt_open
+ .is_some()
+ {
+ while let Some((mut pkt, info)) = self.undecryptable_pkts.pop_front()
+ {
+ if self.recv(&mut pkt, info).is_err() {
+ self.undecryptable_pkts.clear();
+
+ // Forwarding the error value here could confuse
+ // applications, as they may not expect getting a `recv()`
+ // error when calling `send()`.
+ //
+ // We simply fall-through to sending packets, which should
+ // take care of terminating the connection as needed.
+ break;
+ }
+ }
+ }
+
+ // There's no point in trying to send a packet if the Initial secrets
+ // have not been derived yet, so return early.
+ if !self.derived_initial_secrets {
+ return Err(Error::Done);
+ }
+
+ let mut has_initial = false;
+
+ let mut done = 0;
+
+ // Limit output packet size to respect the sender and receiver's
+ // maximum UDP payload size limit.
+ let mut left = cmp::min(out.len(), self.max_send_udp_payload_size());
+
+ // Limit data sent by the server based on the amount of data received
+ // from the client before its address is validated.
+ if !self.verified_peer_address && self.is_server {
+ left = cmp::min(left, self.max_send_bytes);
+ }
+
+ // Generate coalesced packets.
+ while left > 0 {
+ let (ty, written) = match self
+ .send_single(&mut out[done..done + left], has_initial)
+ {
+ Ok(v) => v,
+
+ Err(Error::BufferTooShort) | Err(Error::Done) => break,
+
+ Err(e) => return Err(e),
+ };
+
+ done += written;
+ left -= written;
+
+ match ty {
+ packet::Type::Initial => has_initial = true,
+
+ // No more packets can be coalesced after a 1-RTT.
+ packet::Type::Short => break,
+
+ _ => (),
+ };
+
+ // When sending multiple PTO probes, don't coalesce them together,
+ // so they are sent on separate UDP datagrams.
+ if let Ok(epoch) = ty.to_epoch() {
+ if self.recovery.loss_probes[epoch] > 0 {
+ break;
+ }
+ }
+ }
+
+ if done == 0 {
+ return Err(Error::Done);
+ }
+
+ // Pad UDP datagram if it contains a QUIC Initial packet.
+ if has_initial && left > 0 && done < MIN_CLIENT_INITIAL_LEN {
+ let pad_len = cmp::min(left, MIN_CLIENT_INITIAL_LEN - done);
+
+ // Fill padding area with null bytes, to avoid leaking information
+ // in case the application reuses the packet buffer.
+ out[done..done + pad_len].fill(0);
+
+ done += pad_len;
+ }
+
+ let info = SendInfo {
+ to: self.peer_addr,
+
+ at: self
+ .recovery
+ .get_packet_send_time()
+ .unwrap_or_else(time::Instant::now),
+ };
+
+ Ok((done, info))
+ }
+
+ fn send_single(
+ &mut self, out: &mut [u8], has_initial: bool,
+ ) -> Result<(packet::Type, usize)> {
let now = time::Instant::now();
if out.is_empty() {
return Err(Error::BufferTooShort);
}
- if self.is_closed() || self.draining_timer.is_some() {
+ if self.is_draining() {
return Err(Error::Done);
}
- // If the Initial secrets have not been derived yet, there's no point
- // in trying to send a packet, so return early.
- if !self.derived_initial_secrets {
- return Err(Error::Done);
- }
-
- let is_closing = self.error.is_some() || self.app_error.is_some();
-
- if !is_closing {
- self.do_handshake()?;
- }
+ let is_closing = self.local_error.is_some();
let mut b = octets::OctetsMut::with_slice(out);
- let epoch = self.write_epoch()?;
+ let pkt_type = self.write_pkt_type()?;
- let pkt_type = packet::Type::from_epoch(epoch);
+ let epoch = pkt_type.to_epoch()?;
+
+ let stream_retrans_bytes = self.stream_retrans_bytes;
// Process lost frames.
for lost in self.recovery.lost[epoch].drain(..) {
match lost {
- frame::Frame::Crypto { data } => {
- self.pkt_num_spaces[epoch].crypto_stream.send.push(data)?;
+ frame::Frame::CryptoHeader { offset, length } => {
+ self.pkt_num_spaces[epoch]
+ .crypto_stream
+ .send
+ .retransmit(offset, length);
+
+ self.stream_retrans_bytes += length as u64;
},
- frame::Frame::Stream { stream_id, data } => {
+ frame::Frame::StreamHeader {
+ stream_id,
+ offset,
+ length,
+ fin,
+ } => {
let stream = match self.streams.get_mut(stream_id) {
Some(v) => v,
@@ -1978,9 +2562,9 @@
let was_flushable = stream.is_flushable();
- let empty_fin = data.is_empty() && data.fin();
+ let empty_fin = length == 0 && fin;
- stream.send.push(data)?;
+ stream.send.retransmit(offset, length);
// If the stream is now flushable push it to the flushable
// queue, but only if it wasn't already queued.
@@ -1996,13 +2580,27 @@
incremental,
);
}
+
+ self.stream_retrans_bytes += length as u64;
},
frame::Frame::ACK { .. } => {
self.pkt_num_spaces[epoch].ack_elicited = true;
},
- frame::Frame::HandshakeDone => {
+ frame::Frame::ResetStream {
+ stream_id,
+ error_code,
+ final_size,
+ } =>
+ if self.streams.get(stream_id).is_some() {
+ self.streams
+ .mark_reset(stream_id, true, error_code, final_size);
+ },
+
+ // Retransmit HANDSHAKE_DONE only if it hasn't been acked at
+ // least once already.
+ frame::Frame::HandshakeDone if !self.handshake_done_acked => {
self.handshake_done_sent = false;
},
@@ -2020,20 +2618,15 @@
}
}
- let mut left = b.cap();
+ if stream_retrans_bytes > self.stream_retrans_bytes {
+ self.retrans_count += 1;
+ }
- // Limit output packet size to respect peer's max_packet_size limit.
- left = cmp::min(left, self.max_send_udp_payload_len());
+ let mut left = b.cap();
// Limit output packet size by congestion window size.
left = cmp::min(left, self.recovery.cwnd_available());
- // Limit data sent by the server based on the amount of data received
- // from the client before its address is validated.
- if !self.verified_peer_address && self.is_server {
- left = cmp::min(left, self.max_send_bytes);
- }
-
let pn = self.pkt_num_spaces[epoch].next_pkt_num;
let pn_len = packet::pkt_num_len(pn)?;
@@ -2044,16 +2637,11 @@
let hdr = Header {
ty: pkt_type,
- version: self.version,
- dcid: self.dcid.clone(),
- // Don't needlessly clone the source connection ID for 1-RTT packets
- // as it is not used.
- scid: if pkt_type != packet::Type::Short {
- self.scid.clone()
- } else {
- Vec::new()
- },
+ version: self.version,
+
+ dcid: ConnectionId::from_ref(&self.dcid),
+ scid: ConnectionId::from_ref(&self.scid),
pkt_num: 0,
pkt_num_len: pn_len,
@@ -2080,10 +2668,10 @@
// We assume that the payload length, which is only present in long
// header packets, can always be encoded with a 2-byte varint.
if pkt_type != packet::Type::Short {
- overhead += 2;
+ overhead += PAYLOAD_LENGTH_LEN;
}
- // Make sure we have enough space left for the packet.
+ // Make sure we have enough space left for the packet overhead.
match left.checked_sub(overhead) {
Some(v) => left = v,
@@ -2099,13 +2687,31 @@
},
}
+ // Make sure there is enough space for the minimum payload length.
+ if left < PAYLOAD_MIN_LEN {
+ self.recovery.update_app_limited(false);
+ return Err(Error::Done);
+ }
+
let mut frames: Vec<frame::Frame> = Vec::new();
let mut ack_eliciting = false;
let mut in_flight = false;
let mut has_data = false;
- let mut payload_len = 0;
+ let header_offset = b.off();
+
+ // Reserve space for payload length in advance. Since we don't yet know
+ // what the final length will be, we reserve 2 bytes in all cases.
+ //
+ // Only long header packets have an explicit length field.
+ if pkt_type != packet::Type::Short {
+ b.skip(PAYLOAD_LENGTH_LEN)?;
+ }
+
+ packet::encode_pkt_num(pn, &mut b)?;
+
+ let payload_offset = b.off();
// Create ACK frame.
if self.pkt_num_spaces[epoch].recv_pkt_need_ack.len() > 0 &&
@@ -2123,22 +2729,20 @@
let frame = frame::Frame::ACK {
ack_delay,
ranges: self.pkt_num_spaces[epoch].recv_pkt_need_ack.clone(),
+ ecn_counts: None, // sending ECN is not supported at this time
};
- if push_frame_to_pkt!(frames, frame, payload_len, left) {
+ if push_frame_to_pkt!(b, frames, frame, left) {
self.pkt_num_spaces[epoch].ack_elicited = false;
}
}
if pkt_type == packet::Type::Short && !is_closing {
// Create HANDSHAKE_DONE frame.
- if self.is_established() &&
- !self.handshake_done_sent &&
- self.is_server
- {
+ if self.should_send_handshake_done() {
let frame = frame::Frame::HandshakeDone;
- if push_frame_to_pkt!(frames, frame, payload_len, left) {
+ if push_frame_to_pkt!(b, frames, frame, left) {
self.handshake_done_sent = true;
ack_eliciting = true;
@@ -2152,7 +2756,7 @@
max: self.streams.max_streams_bidi_next(),
};
- if push_frame_to_pkt!(frames, frame, payload_len, left) {
+ if push_frame_to_pkt!(b, frames, frame, left) {
self.streams.update_max_streams_bidi();
ack_eliciting = true;
@@ -2166,7 +2770,7 @@
max: self.streams.max_streams_uni_next(),
};
- if push_frame_to_pkt!(frames, frame, payload_len, left) {
+ if push_frame_to_pkt!(b, frames, frame, left) {
self.streams.update_max_streams_uni();
ack_eliciting = true;
@@ -2174,28 +2778,11 @@
}
}
- // Create MAX_DATA frame as needed.
- if self.almost_full {
- let frame = frame::Frame::MaxData {
- max: self.max_rx_data_next,
- };
-
- if push_frame_to_pkt!(frames, frame, payload_len, left) {
- self.almost_full = false;
-
- // Commits the new max_rx_data limit.
- self.max_rx_data = self.max_rx_data_next;
-
- ack_eliciting = true;
- in_flight = true;
- }
- }
-
// Create DATA_BLOCKED frame.
if let Some(limit) = self.blocked_limit {
let frame = frame::Frame::DataBlocked { limit };
- if push_frame_to_pkt!(frames, frame, payload_len, left) {
+ if push_frame_to_pkt!(b, frames, frame, left) {
self.blocked_limit = None;
ack_eliciting = true;
@@ -2221,13 +2808,75 @@
max: stream.recv.max_data_next(),
};
- if push_frame_to_pkt!(frames, frame, payload_len, left) {
+ if push_frame_to_pkt!(b, frames, frame, left) {
stream.recv.update_max_data();
self.streams.mark_almost_full(stream_id, false);
ack_eliciting = true;
in_flight = true;
+
+ // Also send MAX_DATA when MAX_STREAM_DATA is sent, to avoid a
+ // potential race condition.
+ self.almost_full = true;
+ }
+ }
+
+ // Create MAX_DATA frame as needed.
+ if self.almost_full && self.max_rx_data < self.max_rx_data_next {
+ let frame = frame::Frame::MaxData {
+ max: self.max_rx_data_next,
+ };
+
+ if push_frame_to_pkt!(b, frames, frame, left) {
+ self.almost_full = false;
+
+ // Commits the new max_rx_data limit.
+ self.max_rx_data = self.max_rx_data_next;
+
+ ack_eliciting = true;
+ in_flight = true;
+ }
+ }
+
+ // Create STOP_SENDING frames as needed.
+ for (stream_id, error_code) in self
+ .streams
+ .stopped()
+ .map(|(&k, &v)| (k, v))
+ .collect::<Vec<(u64, u64)>>()
+ {
+ let frame = frame::Frame::StopSending {
+ stream_id,
+ error_code,
+ };
+
+ if push_frame_to_pkt!(b, frames, frame, left) {
+ self.streams.mark_stopped(stream_id, false, 0);
+
+ ack_eliciting = true;
+ in_flight = true;
+ }
+ }
+
+ // Create RESET_STREAM frames as needed.
+ for (stream_id, (error_code, final_size)) in self
+ .streams
+ .reset()
+ .map(|(&k, &v)| (k, v))
+ .collect::<Vec<(u64, (u64, u64))>>()
+ {
+ let frame = frame::Frame::ResetStream {
+ stream_id,
+ error_code,
+ final_size,
+ };
+
+ if push_frame_to_pkt!(b, frames, frame, left) {
+ self.streams.mark_reset(stream_id, false, 0, 0);
+
+ ack_eliciting = true;
+ in_flight = true;
}
}
@@ -2240,7 +2889,7 @@
{
let frame = frame::Frame::StreamDataBlocked { stream_id, limit };
- if push_frame_to_pkt!(frames, frame, payload_len, left) {
+ if push_frame_to_pkt!(b, frames, frame, left) {
self.streams.mark_blocked(stream_id, false, 0);
ack_eliciting = true;
@@ -2250,30 +2899,32 @@
}
// Create CONNECTION_CLOSE frame.
- if let Some(err) = self.error {
- let frame = frame::Frame::ConnectionClose {
- error_code: err,
- frame_type: 0,
- reason: Vec::new(),
- };
+ if let Some(conn_err) = self.local_error.as_ref() {
+ if conn_err.is_app {
+ // Create ApplicationClose frame.
+ if pkt_type == packet::Type::Short {
+ let frame = frame::Frame::ApplicationClose {
+ error_code: conn_err.error_code,
+ reason: conn_err.reason.clone(),
+ };
- if push_frame_to_pkt!(frames, frame, payload_len, left) {
- self.draining_timer = Some(now + (self.recovery.pto() * 3));
+ if push_frame_to_pkt!(b, frames, frame, left) {
+ self.draining_timer =
+ Some(now + (self.recovery.pto() * 3));
- ack_eliciting = true;
- in_flight = true;
- }
- }
-
- // Create APPLICATION_CLOSE frame.
- if let Some(err) = self.app_error {
- if pkt_type == packet::Type::Short {
- let frame = frame::Frame::ApplicationClose {
- error_code: err,
- reason: self.app_reason.clone(),
+ ack_eliciting = true;
+ in_flight = true;
+ }
+ }
+ } else {
+ // Create ConnectionClose frame.
+ let frame = frame::Frame::ConnectionClose {
+ error_code: conn_err.error_code,
+ frame_type: 0,
+ reason: conn_err.reason.clone(),
};
- if push_frame_to_pkt!(frames, frame, payload_len, left) {
+ if push_frame_to_pkt!(b, frames, frame, left) {
self.draining_timer = Some(now + (self.recovery.pto() * 3));
ack_eliciting = true;
@@ -2288,7 +2939,7 @@
data: challenge.clone(),
};
- if push_frame_to_pkt!(frames, frame, payload_len, left) {
+ if push_frame_to_pkt!(b, frames, frame, left) {
self.challenge = None;
ack_eliciting = true;
@@ -2301,42 +2952,145 @@
left > frame::MAX_CRYPTO_OVERHEAD &&
!is_closing
{
- let crypto_len = left - frame::MAX_CRYPTO_OVERHEAD;
- let crypto_buf = self.pkt_num_spaces[epoch]
- .crypto_stream
- .send
- .pop(crypto_len)?;
+ let crypto_off =
+ self.pkt_num_spaces[epoch].crypto_stream.send.off_front();
- let frame = frame::Frame::Crypto { data: crypto_buf };
+ // Encode the frame.
+ //
+ // Instead of creating a `frame::Frame` object, encode the frame
+ // directly into the packet buffer.
+ //
+ // First we reserve some space in the output buffer for writing the
+ // frame header (we assume the length field is always a 2-byte
+ // varint as we don't know the value yet).
+ //
+ // Then we emit the data from the crypto stream's send buffer.
+ //
+ // Finally we go back and encode the frame header with the now
+ // available information.
+ let hdr_off = b.off();
+ let hdr_len = 1 + // frame type
+ octets::varint_len(crypto_off) + // offset
+ 2; // length, always encode as 2-byte varint
- if push_frame_to_pkt!(frames, frame, payload_len, left) {
- ack_eliciting = true;
- in_flight = true;
- has_data = true;
+ if let Some(max_len) = left.checked_sub(hdr_len) {
+ let (mut crypto_hdr, mut crypto_payload) =
+ b.split_at(hdr_off + hdr_len)?;
+
+ // Write stream data into the packet buffer.
+ let (len, _) = self.pkt_num_spaces[epoch]
+ .crypto_stream
+ .send
+ .emit(&mut crypto_payload.as_mut()[..max_len])?;
+
+ // Encode the frame's header.
+ //
+ // Due to how `OctetsMut::split_at()` works, `crypto_hdr` starts
+ // from the initial offset of `b` (rather than the current
+ // offset), so it needs to be advanced to the
+ // initial frame offset.
+ crypto_hdr.skip(hdr_off)?;
+
+ frame::encode_crypto_header(
+ crypto_off,
+ len as u64,
+ &mut crypto_hdr,
+ )?;
+
+ // Advance the packet buffer's offset.
+ b.skip(hdr_len + len)?;
+
+ let frame = frame::Frame::CryptoHeader {
+ offset: crypto_off,
+ length: len,
+ };
+
+ if push_frame_to_pkt!(b, frames, frame, left) {
+ ack_eliciting = true;
+ in_flight = true;
+ has_data = true;
+ }
}
}
+ // The preference of data-bearing frame to include in a packet
+ // is managed by `self.emit_dgram`. However, whether any frames
+ // can be sent depends on the state of their buffers. In the case
+ // where one type is preferred but its buffer is empty, fall back
+ // to the other type in order not to waste this function call.
+ let mut dgram_emitted = false;
+ let dgrams_to_emit = self.dgram_max_writable_len().is_some();
+ let stream_to_emit = self.streams.has_flushable();
+
+ let mut do_dgram = self.emit_dgram && dgrams_to_emit;
+ let do_stream = !self.emit_dgram && stream_to_emit;
+
+ if !do_stream && dgrams_to_emit {
+ do_dgram = true;
+ }
+
// Create DATAGRAM frame.
- if pkt_type == packet::Type::Short &&
+ if (pkt_type == packet::Type::Short || pkt_type == packet::Type::ZeroRTT) &&
left > frame::MAX_DGRAM_OVERHEAD &&
- !is_closing
+ !is_closing &&
+ do_dgram
{
if let Some(max_dgram_payload) = self.dgram_max_writable_len() {
while let Some(len) = self.dgram_send_queue.peek_front_len() {
- if (len + frame::MAX_DGRAM_OVERHEAD) <= left {
- // Front of the queue fits this packet, send it
+ let hdr_off = b.off();
+ let hdr_len = 1 + // frame type
+ 2; // length, always encode as 2-byte varint
+
+ if (hdr_len + len) <= left {
+ // Front of the queue fits this packet, send it.
match self.dgram_send_queue.pop() {
Some(data) => {
- let frame = frame::Frame::Datagram { data };
+ // Encode the frame.
+ //
+ // Instead of creating a `frame::Frame` object,
+ // encode the frame directly into the packet
+ // buffer.
+ //
+ // First we reserve some space in the output
+ // buffer for writing the frame header (we
+ // assume the length field is always a 2-byte
+ // varint as we don't know the value yet).
+ //
+ // Then we emit the data from the DATAGRAM's
+ // buffer.
+ //
+ // Finally we go back and encode the frame
+ // header with the now available information.
+ let (mut dgram_hdr, mut dgram_payload) =
+ b.split_at(hdr_off + hdr_len)?;
- if push_frame_to_pkt!(
- frames,
- frame,
- payload_len,
- left
- ) {
+ dgram_payload.as_mut()[..len]
+ .copy_from_slice(&data);
+
+ // Encode the frame's header.
+ //
+ // Due to how `OctetsMut::split_at()` works,
+ // `dgram_hdr` starts from the initial offset
+ // of `b` (rather than the current offset), so
+ // it needs to be advanced to the initial frame
+ // offset.
+ dgram_hdr.skip(hdr_off)?;
+
+ frame::encode_dgram_header(
+ len as u64,
+ &mut dgram_hdr,
+ )?;
+
+ // Advance the packet buffer's offset.
+ b.skip(hdr_len + len)?;
+
+ let frame =
+ frame::Frame::DatagramHeader { length: len };
+
+ if push_frame_to_pkt!(b, frames, frame, left) {
ack_eliciting = true;
in_flight = true;
+ dgram_emitted = true;
}
},
@@ -2353,9 +3107,10 @@
}
// Create a single STREAM frame for the first stream that is flushable.
- if pkt_type == packet::Type::Short &&
+ if (pkt_type == packet::Type::Short || pkt_type == packet::Type::ZeroRTT) &&
left > frame::MAX_STREAM_OVERHEAD &&
- !is_closing
+ !is_closing &&
+ !dgram_emitted
{
while let Some(stream_id) = self.streams.pop_flushable() {
let stream = match self.streams.get_mut(stream_id) {
@@ -2364,34 +3119,75 @@
None => continue,
};
- let off = stream.send.off_front();
+ // Avoid sending frames for streams that were already stopped.
+ //
+ // This might happen if stream data was buffered but not yet
+ // flushed on the wire when a STOP_SENDING frame is received.
+ if stream.send.is_stopped() {
+ continue;
+ }
- // Try to accurately account for the STREAM frame's overhead,
- // such that we can fill as much of the packet buffer as
- // possible.
- let overhead = 1 +
- octets::varint_len(stream_id) +
- octets::varint_len(off) +
- octets::varint_len(left as u64);
+ let stream_off = stream.send.off_front();
- let max_len = match left.checked_sub(overhead) {
+ // Encode the frame.
+ //
+ // Instead of creating a `frame::Frame` object, encode the frame
+ // directly into the packet buffer.
+ //
+ // First we reserve some space in the output buffer for writing
+ // the frame header (we assume the length field is always a
+ // 2-byte varint as we don't know the value yet).
+ //
+ // Then we emit the data from the stream's send buffer.
+ //
+ // Finally we go back and encode the frame header with the now
+ // available information.
+ let hdr_off = b.off();
+ let hdr_len = 1 + // frame type
+ octets::varint_len(stream_id) + // stream_id
+ octets::varint_len(stream_off) + // offset
+ 2; // length, always encode as 2-byte varint
+
+ let max_len = match left.checked_sub(hdr_len) {
Some(v) => v,
None => continue,
};
- let stream_buf = stream.send.pop(max_len)?;
+ let (mut stream_hdr, mut stream_payload) =
+ b.split_at(hdr_off + hdr_len)?;
- if stream_buf.is_empty() && !stream_buf.fin() {
- continue;
- }
+ // Write stream data into the packet buffer.
+ let (len, fin) =
+ stream.send.emit(&mut stream_payload.as_mut()[..max_len])?;
- let frame = frame::Frame::Stream {
+ // Encode the frame's header.
+ //
+ // Due to how `OctetsMut::split_at()` works, `stream_hdr` starts
+ // from the initial offset of `b` (rather than the current
+ // offset), so it needs to be advanced to the initial frame
+ // offset.
+ stream_hdr.skip(hdr_off)?;
+
+ frame::encode_stream_header(
stream_id,
- data: stream_buf,
+ stream_off,
+ len as u64,
+ fin,
+ &mut stream_hdr,
+ )?;
+
+ // Advance the packet buffer's offset.
+ b.skip(hdr_len + len)?;
+
+ let frame = frame::Frame::StreamHeader {
+ stream_id,
+ offset: stream_off,
+ length: len,
+ fin,
};
- if push_frame_to_pkt!(frames, frame, payload_len, left) {
+ if push_frame_to_pkt!(b, frames, frame, left) {
ack_eliciting = true;
in_flight = true;
has_data = true;
@@ -2416,6 +3212,9 @@
}
}
+ // Alternate trying to send DATAGRAMs next time.
+ self.emit_dgram = !dgram_emitted;
+
// Create PING for PTO probe if no other ack-elicitng frame is sent.
if self.recovery.loss_probes[epoch] > 0 &&
!ack_eliciting &&
@@ -2424,7 +3223,7 @@
{
let frame = frame::Frame::Ping;
- if push_frame_to_pkt!(frames, frame, payload_len, left) {
+ if push_frame_to_pkt!(b, frames, frame, left) {
ack_eliciting = true;
in_flight = true;
}
@@ -2442,46 +3241,44 @@
return Err(Error::Done);
}
- // Pad the client's initial packet.
- if !self.is_server && pkt_type == packet::Type::Initial {
- let pkt_len = pn_len + payload_len + crypto_overhead;
+ // When coalescing a 1-RTT packet, we can't add padding in the UDP
+ // datagram, so use PADDING frames instead.
+ //
+ // This is only needed if an Initial packet has already been written to
+ // the UDP datagram, as Initial always requires padding.
+ if has_initial && pkt_type == packet::Type::Short && left >= 1 {
+ let frame = frame::Frame::Padding { len: left };
- let frame = frame::Frame::Padding {
- len: cmp::min(MIN_CLIENT_INITIAL_LEN - pkt_len, left),
- };
-
- payload_len += frame.wire_len();
-
- frames.push(frame);
-
- in_flight = true;
+ if push_frame_to_pkt!(b, frames, frame, left) {
+ in_flight = true;
+ }
}
// Pad payload so that it's always at least 4 bytes.
- if payload_len < PAYLOAD_MIN_LEN {
+ if b.off() - payload_offset < PAYLOAD_MIN_LEN {
+ let payload_len = b.off() - payload_offset;
+
let frame = frame::Frame::Padding {
len: PAYLOAD_MIN_LEN - payload_len,
};
- payload_len += frame.wire_len();
-
- frames.push(frame);
-
- in_flight = true;
+ #[allow(unused_assignments)]
+ if push_frame_to_pkt!(b, frames, frame, left) {
+ in_flight = true;
+ }
}
- payload_len += crypto_overhead;
+ let payload_len = b.off() - payload_offset;
- // Only long header packets have an explicit length field.
+ // Fill in payload length.
if pkt_type != packet::Type::Short {
- let len = pn_len + payload_len;
- b.put_varint(len as u64)?;
+ let len = pn_len + payload_len + crypto_overhead;
+
+ let (_, mut payload_with_len) = b.split_at(header_offset)?;
+ payload_with_len
+ .put_varint_with_len(len as u64, PAYLOAD_LENGTH_LEN)?;
}
- packet::encode_pkt_num(pn, &mut b)?;
-
- let payload_offset = b.off();
-
trace!(
"{} tx pkt {:?} len={} pn={}",
self.trace_id,
@@ -2490,43 +3287,45 @@
pn
);
- qlog_with!(self.qlog_streamer, q, {
+ qlog_with_type!(QLOG_PACKET_TX, self.qlog, q, {
let qlog_pkt_hdr = qlog::PacketHeader::with_type(
hdr.ty.to_qlog(),
pn,
- Some(payload_len as u64 + payload_offset as u64),
- Some(payload_len as u64),
Some(hdr.version),
Some(&hdr.scid),
Some(&hdr.dcid),
);
+ let length = Some(payload_len as u64 + payload_offset as u64);
+ let payload_length = Some(payload_len as u64);
+ let qlog_raw_info = qlog::RawInfo {
+ length,
+ payload_length,
+ data: None,
+ };
- let packet_sent_ev = qlog::event::Event::packet_sent_min(
- hdr.ty.to_qlog(),
- qlog_pkt_hdr,
- Some(Vec::new()),
- );
+ let ev_data = qlog::EventData::PacketSent {
+ header: qlog_pkt_hdr,
+ frames: Some(vec![]),
+ is_coalesced: None,
+ retry_token: None,
+ stateless_reset_token: None,
+ supported_versions: None,
+ raw: Some(qlog_raw_info),
+ datagram_id: None,
+ };
- q.add_event(packet_sent_ev).ok();
+ q.add_event_data_with_instant(ev_data, now).ok();
});
- // Encode frames into the output packet.
for frame in &mut frames {
trace!("{} tx frm {:?}", self.trace_id, frame);
- frame.to_bytes(&mut b)?;
-
- qlog_with!(self.qlog_streamer, q, {
+ qlog_with_type!(QLOG_PACKET_TX, self.qlog, q, {
q.add_frame(frame.to_qlog(), false).ok();
});
-
- // Once frames have been serialized they are passed to the Recovery
- // module which manages retransmission. However, some frames do not
- // contain retransmittable data, so drop it here.
- frame.shrink_for_retransmission();
}
- qlog_with!(self.qlog_streamer, q, {
+ qlog_with_type!(QLOG_PACKET_TX, self.qlog, q, {
q.finish_frames().ok();
});
@@ -2541,6 +3340,7 @@
pn_len,
payload_len,
payload_offset,
+ None,
aead,
)?;
@@ -2568,14 +3368,16 @@
&self.trace_id,
);
- qlog_with!(self.qlog_streamer, q, {
- let ev = self.recovery.to_qlog();
- q.add_event(ev).ok();
+ qlog_with_type!(QLOG_METRICS, self.qlog, q, {
+ if let Some(ev_data) = self.recovery.maybe_qlog() {
+ q.add_event_data_with_instant(ev_data, now).ok();
+ }
});
self.pkt_num_spaces[epoch].next_pkt_num += 1;
self.sent_count += 1;
+ self.sent_bytes += written as u64;
if self.dgram_send_queue.byte_size() > self.recovery.cwnd_available() {
self.recovery.update_app_limited(false);
@@ -2600,23 +3402,7 @@
self.ack_eliciting_sent = true;
}
- Ok(written)
- }
-
- // Returns the maximum len of a packet to be sent. This is max_packet_size
- // as sent by the peer, except during the handshake when we haven't parsed
- // transport parameters yet, so use a default value then.
- fn max_send_udp_payload_len(&self) -> usize {
- if self.is_established() {
- // We cap the maximum packet size to 16KB or so, so that it can be
- // always encoded with a 2-byte varint.
- cmp::min(16383, self.peer_transport_params.max_udp_payload_size)
- as usize
- } else {
- // Allow for 1200 bytes (minimum QUIC packet size) during the
- // handshake.
- MIN_CLIENT_INITIAL_LEN
- }
+ Ok((pkt_type, written))
}
/// Reads contiguous data from a stream into the provided slice.
@@ -2635,8 +3421,9 @@
/// # let mut buf = [0; 512];
/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
/// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
- /// # let scid = [0xba; 16];
- /// # let mut conn = quiche::accept(&scid, None, &mut config)?;
+ /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+ /// # let from = "127.0.0.1:1234".parse().unwrap();
+ /// # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
/// # let stream_id = 0;
/// while let Ok((read, fin)) = conn.stream_recv(stream_id, &mut buf) {
/// println!("Got {} bytes on stream {}", read, stream_id);
@@ -2650,22 +3437,39 @@
if !stream::is_bidi(stream_id) &&
stream::is_local(stream_id, self.is_server)
{
- return Err(Error::InvalidStreamState);
+ return Err(Error::InvalidStreamState(stream_id));
}
let stream = self
.streams
.get_mut(stream_id)
- .ok_or(Error::InvalidStreamState)?;
+ .ok_or(Error::InvalidStreamState(stream_id))?;
if !stream.is_readable() {
return Err(Error::Done);
}
+ let local = stream.local;
+
#[cfg(feature = "qlog")]
let offset = stream.recv.off_front();
- let (read, fin) = stream.recv.pop(out)?;
+ let (read, fin) = match stream.recv.emit(out) {
+ Ok(v) => v,
+
+ Err(e) => {
+ // Collect the stream if it is now complete. This can happen if
+ // we got a `StreamReset` error which will now be propagated to
+ // the application, so we don't need to keep the stream's state
+ // anymore.
+ if stream.is_complete() {
+ self.streams.collect(stream_id, local);
+ }
+
+ self.streams.mark_readable(stream_id, false);
+ return Err(e);
+ },
+ };
self.max_rx_data_next = self.max_rx_data_next.saturating_add(read as u64);
@@ -2673,8 +3477,6 @@
let complete = stream.is_complete();
- let local = stream.local;
-
if stream.recv.almost_full() {
self.streams.mark_almost_full(stream_id, true);
}
@@ -2687,16 +3489,18 @@
self.streams.collect(stream_id, local);
}
- qlog_with!(self.qlog_streamer, q, {
- let ev = qlog::event::Event::h3_data_moved(
- stream_id.to_string(),
- Some(offset.to_string()),
- Some(read as u64),
- Some(qlog::H3DataRecipient::Transport),
- None,
- None,
- );
- q.add_event(ev).ok();
+ qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
+ let ev_data = qlog::EventData::DataMoved {
+ stream_id: Some(stream_id),
+ offset: Some(offset),
+ length: Some(read as u64),
+ from: Some(qlog::DataRecipient::Transport),
+ to: Some(qlog::DataRecipient::Application),
+ data: None,
+ };
+
+ let now = time::Instant::now();
+ q.add_event_data_with_instant(ev_data, now).ok();
});
if self.should_update_max_data() {
@@ -2711,6 +3515,14 @@
/// On success the number of bytes written is returned, or [`Done`] if no
/// data was written (e.g. because the stream has no capacity).
///
+ /// Applications can provide a 0-length buffer with the fin flag set to
+ /// true. This will lead to a 0-length FIN STREAM frame being sent at the
+ /// latest offset. This is the only case where `Ok(0)` is returned.
+ ///
+ /// In addition, if the peer has signalled that it doesn't want to receive
+ /// any more data from this stream by sending the `STOP_SENDING` frame, the
+ /// [`StreamStopped`] error will be returned instead of any data.
+ ///
/// Note that in order to avoid buffering an infinite amount of data in the
/// stream's send buffer, streams are only allowed to buffer outgoing data
/// up to the amount that the peer allows it to send (that is, up to the
@@ -2726,6 +3538,7 @@
/// early data if enabled (whenever [`is_in_early_data()`] returns `true`).
///
/// [`Done`]: enum.Error.html#variant.Done
+ /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
/// [`is_established()`]: struct.Connection.html#method.is_established
/// [`is_in_early_data()`]: struct.Connection.html#method.is_in_early_data
///
@@ -2735,8 +3548,9 @@
/// # let mut buf = [0; 512];
/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
/// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
- /// # let scid = [0xba; 16];
- /// # let mut conn = quiche::accept(&scid, None, &mut config)?;
+ /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+ /// # let from = "127.0.0.1:1234".parse().unwrap();
+ /// # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
/// # let stream_id = 0;
/// conn.stream_send(stream_id, b"hello", true)?;
/// # Ok::<(), quiche::Error>(())
@@ -2748,7 +3562,7 @@
if !stream::is_bidi(stream_id) &&
!stream::is_local(stream_id, self.is_server)
{
- return Err(Error::InvalidStreamState);
+ return Err(Error::InvalidStreamState(stream_id));
}
// Mark the connection as blocked if the connection-level flow control
@@ -2762,7 +3576,10 @@
// Truncate the input buffer based on the connection's send capacity if
// necessary.
- let cap = self.send_capacity();
+ let cap = self.tx_cap;
+ if cap == 0 && !fin {
+ return Err(Error::Done);
+ }
let (buf, fin) = if cap < buf.len() {
(&buf[..cap], false)
@@ -2778,7 +3595,14 @@
let was_flushable = stream.is_flushable();
- let sent = stream.send.push_slice(buf, fin)?;
+ let sent = match stream.send.write(buf, fin) {
+ Ok(v) => v,
+
+ Err(e) => {
+ self.streams.mark_writable(stream_id, false);
+ return Err(e);
+ },
+ };
let urgency = stream.urgency;
let incremental = stream.incremental;
@@ -2810,20 +3634,24 @@
self.streams.mark_writable(stream_id, false);
}
+ self.tx_cap -= sent;
+
self.tx_data += sent as u64;
self.recovery.rate_check_app_limited();
- qlog_with!(self.qlog_streamer, q, {
- let ev = qlog::event::Event::h3_data_moved(
- stream_id.to_string(),
- Some(offset.to_string()),
- Some(sent as u64),
- None,
- Some(qlog::H3DataRecipient::Transport),
- None,
- );
- q.add_event(ev).ok();
+ qlog_with_type!(QLOG_DATA_MV, self.qlog, q, {
+ let ev_data = qlog::EventData::DataMoved {
+ stream_id: Some(stream_id),
+ offset: Some(offset),
+ length: Some(sent as u64),
+ from: Some(qlog::DataRecipient::Application),
+ to: Some(qlog::DataRecipient::Transport),
+ data: None,
+ };
+
+ let now = time::Instant::now();
+ q.add_event_data_with_instant(ev_data, now).ok();
});
Ok(sent)
@@ -2868,7 +3696,8 @@
/// data in the stream's receive buffer is dropped, and no additional data
/// is added to it. Data received after calling this method is still
/// validated and acked but not stored, and [`stream_recv()`] will not
- /// return it to the application.
+ /// return it to the application. In addition, a `STOP_SENDING` frame will
+ /// be sent to the peer to signal it to stop sending data.
///
/// When the `direction` argument is set to [`Shutdown::Write`], outstanding
/// data in the stream's send buffer is dropped, and no additional data
@@ -2880,23 +3709,34 @@
/// [`stream_recv()`]: struct.Connection.html#method.stream_recv
/// [`stream_send()`]: struct.Connection.html#method.stream_send
pub fn stream_shutdown(
- &mut self, stream_id: u64, direction: Shutdown, _err: u64,
+ &mut self, stream_id: u64, direction: Shutdown, err: u64,
) -> Result<()> {
// Get existing stream.
let stream = self.streams.get_mut(stream_id).ok_or(Error::Done)?;
match direction {
- // TODO: send STOP_SENDING
Shutdown::Read => {
stream.recv.shutdown()?;
+ if !stream.recv.is_fin() {
+ self.streams.mark_stopped(stream_id, true, err);
+ }
+
// Once shutdown, the stream is guaranteed to be non-readable.
self.streams.mark_readable(stream_id, false);
},
- // TODO: send RESET_STREAM
Shutdown::Write => {
- stream.send.shutdown()?;
+ let (final_size, unsent) = stream.send.shutdown()?;
+
+ // Claw back some flow control allowance from data that was
+ // buffered but not actually sent before the stream was reset.
+ self.tx_data = self.tx_data.saturating_sub(unsent);
+
+ // Update send capacity.
+ self.update_tx_cap();
+
+ self.streams.mark_reset(stream_id, true, err, final_size);
// Once shutdown, the stream is guaranteed to be non-writable.
self.streams.mark_writable(stream_id, false);
@@ -2907,13 +3747,80 @@
}
/// Returns the stream's send capacity in bytes.
+ ///
+ /// If the specified stream doesn't exist (including when it has already
+ /// been completed and closed), the [`InvalidStreamState`] error will be
+ /// returned.
+ ///
+ /// In addition, if the peer has signalled that it doesn't want to receive
+ /// any more data from this stream by sending the `STOP_SENDING` frame, the
+ /// [`StreamStopped`] error will be returned.
+ ///
+ /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
+ /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
+ #[inline]
pub fn stream_capacity(&self, stream_id: u64) -> Result<usize> {
if let Some(stream) = self.streams.get(stream_id) {
- let cap = cmp::min(self.send_capacity(), stream.send.cap());
+ let cap = cmp::min(self.tx_cap, stream.send.cap()?);
return Ok(cap);
};
- Err(Error::InvalidStreamState)
+ Err(Error::InvalidStreamState(stream_id))
+ }
+
+ /// Returns true if the stream has data that can be read.
+ pub fn stream_readable(&self, stream_id: u64) -> bool {
+ let stream = match self.streams.get(stream_id) {
+ Some(v) => v,
+
+ None => return false,
+ };
+
+ stream.is_readable()
+ }
+
+ /// Returns true if the stream has enough send capacity.
+ ///
+ /// When `len` more bytes can be buffered into the given stream's send
+ /// buffer, `true` will be returned, `false` otherwise.
+ ///
+ /// In the latter case, if the additional data can't be buffered due to
+ /// flow control limits, the peer will also be notified.
+ ///
+ /// If the specified stream doesn't exist (including when it has already
+ /// been completed and closed), the [`InvalidStreamState`] error will be
+ /// returned.
+ ///
+ /// In addition, if the peer has signalled that it doesn't want to receive
+ /// any more data from this stream by sending the `STOP_SENDING` frame, the
+ /// [`StreamStopped`] error will be returned.
+ ///
+ /// [`InvalidStreamState`]: enum.Error.html#variant.InvalidStreamState
+ /// [`StreamStopped`]: enum.Error.html#variant.StreamStopped
+ #[inline]
+ pub fn stream_writable(
+ &mut self, stream_id: u64, len: usize,
+ ) -> Result<bool> {
+ if self.stream_capacity(stream_id)? >= len {
+ return Ok(true);
+ }
+
+ let stream = match self.streams.get(stream_id) {
+ Some(v) => v,
+
+ None => return Err(Error::InvalidStreamState(stream_id)),
+ };
+
+ if self.max_tx_data - self.tx_data < len as u64 {
+ self.blocked_limit = Some(self.max_tx_data);
+ }
+
+ if stream.send.cap()? < len {
+ let max_off = stream.send.max_off();
+ self.streams.mark_blocked(stream_id, true, max_off);
+ }
+
+ Ok(false)
}
/// Returns true if all the data has been read from the specified stream.
@@ -2924,6 +3831,7 @@
///
/// Basically this returns true when the peer either set the `fin` flag
/// for the stream, or sent `RESET_STREAM`.
+ #[inline]
pub fn stream_finished(&self, stream_id: u64) -> bool {
let stream = match self.streams.get(stream_id) {
Some(v) => v,
@@ -2934,6 +3842,26 @@
stream.recv.is_fin()
}
+ /// Returns the number of bidirectional streams that can be created
+ /// before the peer's stream count limit is reached.
+ ///
+ /// This can be useful to know if it's possible to create a bidirectional
+ /// stream without trying it first.
+ #[inline]
+ pub fn peer_streams_left_bidi(&self) -> u64 {
+ self.streams.peer_streams_left_bidi()
+ }
+
+ /// Returns the number of unidirectional streams that can be created
+ /// before the peer's stream count limit is reached.
+ ///
+ /// This can be useful to know if it's possible to create a unidirectional
+ /// stream without trying it first.
+ #[inline]
+ pub fn peer_streams_left_uni(&self) -> u64 {
+ self.streams.peer_streams_left_uni()
+ }
+
/// Initializes the stream's application data.
///
/// This can be used by applications to store per-stream information without
@@ -2947,7 +3875,7 @@
&mut self, stream_id: u64, data: T,
) -> Result<()>
where
- T: std::any::Any + Send,
+ T: std::any::Any + Send + Sync,
{
// Get existing stream.
let stream = self.streams.get_mut(stream_id).ok_or(Error::Done)?;
@@ -2994,8 +3922,9 @@
/// # let mut buf = [0; 512];
/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
/// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
- /// # let scid = [0xba; 16];
- /// # let mut conn = quiche::accept(&scid, None, &mut config)?;
+ /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+ /// # let from = "127.0.0.1:1234".parse().unwrap();
+ /// # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
/// // Iterate over readable streams.
/// for stream_id in conn.readable() {
/// // Stream is readable, read until there's no more data.
@@ -3005,6 +3934,7 @@
/// }
/// # Ok::<(), quiche::Error>(())
/// ```
+ #[inline]
pub fn readable(&self) -> StreamIter {
self.streams.readable()
}
@@ -3027,8 +3957,9 @@
/// # let mut buf = [0; 512];
/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
/// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
- /// # let scid = [0xba; 16];
- /// # let mut conn = quiche::accept(&scid, None, &mut config)?;
+ /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+ /// # let from = "127.0.0.1:1234".parse().unwrap();
+ /// # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
/// // Iterate over writable streams.
/// for stream_id in conn.writable() {
/// // Stream is writable, write some data.
@@ -3038,16 +3969,42 @@
/// }
/// # Ok::<(), quiche::Error>(())
/// ```
+ #[inline]
pub fn writable(&self) -> StreamIter {
// If there is not enough connection-level send capacity, none of the
// streams are writable, so return an empty iterator.
- if self.send_capacity() == 0 {
+ if self.tx_cap == 0 {
return StreamIter::default();
}
self.streams.writable()
}
+ /// Returns the maximum possible size of egress UDP payloads.
+ ///
+ /// This is the maximum size of UDP payloads that can be sent, and depends
+ /// on both the configured maximum send payload size of the local endpoint
+ /// (as configured with [`set_max_send_udp_payload_size()`]), as well as
+ /// the transport parameter advertised by the remote peer.
+ ///
+ /// Note that this value can change during the lifetime of the connection,
+ /// but should remain stable across consecutive calls to [`send()`].
+ ///
+ /// [`set_max_send_udp_payload_size()`]:
+ /// struct.Config.html#method.set_max_send_udp_payload_size
+ /// [`send()`]: struct.Connection.html#method.send
+ pub fn max_send_udp_payload_size(&self) -> usize {
+ if self.is_established() {
+ // We cap the maximum packet size to 16KB or so, so that it can be
+ // always encoded with a 2-byte varint.
+ cmp::min(16383, self.recovery.max_datagram_size())
+ } else {
+ // Allow for 1200 bytes (minimum QUIC packet size) during the
+ // handshake.
+ MIN_CLIENT_INITIAL_LEN
+ }
+ }
+
/// Reads the first received DATAGRAM.
///
/// On success the DATAGRAM's data is returned along with its size.
@@ -3066,14 +4023,16 @@
/// # let mut buf = [0; 512];
/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
/// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
- /// # let scid = [0xba; 16];
- /// # let mut conn = quiche::accept(&scid, None, &mut config)?;
+ /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+ /// # let from = "127.0.0.1:1234".parse().unwrap();
+ /// # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
/// let mut dgram_buf = [0; 512];
/// while let Ok((len)) = conn.dgram_recv(&mut dgram_buf) {
/// println!("Got {} bytes of DATAGRAM", len);
/// }
/// # Ok::<(), quiche::Error>(())
/// ```
+ #[inline]
pub fn dgram_recv(&mut self, buf: &mut [u8]) -> Result<usize> {
match self.dgram_recv_queue.pop() {
Some(d) => {
@@ -3089,6 +4048,21 @@
}
}
+ /// Reads the first received DATAGRAM.
+ ///
+ /// This is the same as [`dgram_recv()`] but returns the DATAGRAM as a
+ /// `Vec<u8>` instead of copying into the provided buffer.
+ ///
+ /// [`dgram_recv()`]: struct.Connection.html#method.dgram_recv
+ #[inline]
+ pub fn dgram_recv_vec(&mut self) -> Result<Vec<u8>> {
+ match self.dgram_recv_queue.pop() {
+ Some(d) => Ok(d),
+
+ None => Err(Error::Done),
+ }
+ }
+
/// Reads the first received DATAGRAM without removing it from the queue.
///
/// On success the DATAGRAM's data is returned along with the actual number
@@ -3102,15 +4076,41 @@
///
/// [`Done`]: enum.Error.html#variant.Done
/// [`BufferTooShort`]: enum.Error.html#variant.BufferTooShort
+ #[inline]
pub fn dgram_recv_peek(&self, buf: &mut [u8], len: usize) -> Result<usize> {
self.dgram_recv_queue.peek_front_bytes(buf, len)
}
/// Returns the length of the first stored DATAGRAM.
+ #[inline]
pub fn dgram_recv_front_len(&self) -> Option<usize> {
self.dgram_recv_queue.peek_front_len()
}
+ /// Returns the number of items in the DATAGRAM receive queue.
+ #[inline]
+ pub fn dgram_recv_queue_len(&self) -> usize {
+ self.dgram_recv_queue.len()
+ }
+
+ /// Returns the total size of all items in the DATAGRAM receive queue.
+ #[inline]
+ pub fn dgram_recv_queue_byte_size(&self) -> usize {
+ self.dgram_recv_queue.byte_size()
+ }
+
+ /// Returns the number of items in the DATAGRAM send queue.
+ #[inline]
+ pub fn dgram_send_queue_len(&self) -> usize {
+ self.dgram_send_queue.len()
+ }
+
+ /// Returns the total size of all items in the DATAGRAM send queue.
+ #[inline]
+ pub fn dgram_send_queue_byte_size(&self) -> usize {
+ self.dgram_send_queue.byte_size()
+ }
+
/// Sends data in a DATAGRAM frame.
///
/// [`Done`] is returned if no data was written.
@@ -3136,17 +4136,43 @@
/// # let mut buf = [0; 512];
/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
/// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
- /// # let scid = [0xba; 16];
- /// # let mut conn = quiche::accept(&scid, None, &mut config)?;
+ /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+ /// # let from = "127.0.0.1:1234".parse().unwrap();
+ /// # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
/// conn.dgram_send(b"hello")?;
/// # Ok::<(), quiche::Error>(())
/// ```
pub fn dgram_send(&mut self, buf: &[u8]) -> Result<()> {
let max_payload_len = match self.dgram_max_writable_len() {
- Some(v) => v as usize,
- None => {
- return Err(Error::InvalidState);
- },
+ Some(v) => v,
+
+ None => return Err(Error::InvalidState),
+ };
+
+ if buf.len() > max_payload_len {
+ return Err(Error::BufferTooShort);
+ }
+
+ self.dgram_send_queue.push(buf.to_vec())?;
+
+ if self.dgram_send_queue.byte_size() > self.recovery.cwnd_available() {
+ self.recovery.update_app_limited(false);
+ }
+
+ Ok(())
+ }
+
+ /// Sends data in a DATAGRAM frame.
+ ///
+ /// This is the same as [`dgram_send()`] but takes a `Vec<u8>` instead of
+ /// a slice.
+ ///
+ /// [`dgram_send()`]: struct.Connection.html#method.dgram_send
+ pub fn dgram_send_vec(&mut self, buf: Vec<u8>) -> Result<()> {
+ let max_payload_len = match self.dgram_max_writable_len() {
+ Some(v) => v,
+
+ None => return Err(Error::InvalidState),
};
if buf.len() > max_payload_len {
@@ -3170,12 +4196,14 @@
/// ```no_run
/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
/// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
- /// # let scid = [0xba; 16];
- /// # let mut conn = quiche::accept(&scid, None, &mut config)?;
+ /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+ /// # let from = "127.0.0.1:1234".parse().unwrap();
+ /// # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
/// conn.dgram_send(b"hello")?;
/// conn.dgram_purge_outgoing(&|d: &[u8]| -> bool { d[0] == 0 });
/// # Ok::<(), quiche::Error>(())
/// ```
+ #[inline]
pub fn dgram_purge_outgoing<F: Fn(&[u8]) -> bool>(&mut self, f: F) {
self.dgram_send_queue.purge(f);
}
@@ -3191,8 +4219,9 @@
/// # let mut buf = [0; 512];
/// # let socket = std::net::UdpSocket::bind("127.0.0.1:0").unwrap();
/// # let mut config = quiche::Config::new(quiche::PROTOCOL_VERSION)?;
- /// # let scid = [0xba; 16];
- /// # let mut conn = quiche::accept(&scid, None, &mut config)?;
+ /// # let scid = quiche::ConnectionId::from_ref(&[0xba; 16]);
+ /// # let from = "127.0.0.1:1234".parse().unwrap();
+ /// # let mut conn = quiche::accept(&scid, None, from, &mut config)?;
/// if let Some(payload_size) = conn.dgram_max_writable_len() {
/// if payload_size > 5 {
/// conn.dgram_send(b"hello")?;
@@ -3200,12 +4229,13 @@
/// }
/// # Ok::<(), quiche::Error>(())
/// ```
+ #[inline]
pub fn dgram_max_writable_len(&self) -> Option<usize> {
match self.peer_transport_params.max_datagram_frame_size {
None => None,
Some(peer_frame_len) => {
// Start from the maximum packet size...
- let mut max_len = self.max_send_udp_payload_len();
+ let mut max_len = self.max_send_udp_payload_size();
// ...subtract the Short packet header overhead...
// (1 byte of pkt_len + len of dcid)
max_len = max_len.saturating_sub(1 + self.dcid.len());
@@ -3219,7 +4249,8 @@
// ...clamp to what peer can support...
max_len = cmp::min(peer_frame_len as usize, max_len);
// ...subtract frame overhead, checked for underflow.
- max_len.checked_sub(frame::MAX_DGRAM_OVERHEAD)
+ // (1 byte of frame type + len of length )
+ max_len.checked_sub(1 + frame::MAX_DGRAM_OVERHEAD)
},
}
}
@@ -3227,7 +4258,7 @@
fn dgram_enabled(&self) -> bool {
self.local_transport_params
.max_datagram_frame_size
- .is_none()
+ .is_some()
}
/// Returns the amount of time until the next timeout event.
@@ -3241,7 +4272,7 @@
return None;
}
- let timeout = if self.draining_timer.is_some() {
+ let timeout = if self.is_draining() {
// Draining timer takes precedence over all other timers. If it is
// set it means the connection is closing so there's no point in
// processing the other timers.
@@ -3279,7 +4310,7 @@
if draining_timer <= now {
trace!("{} draining timeout expired", self.trace_id);
- qlog_with!(self.qlog_streamer, q, {
+ qlog_with!(self.qlog, q, {
q.finish_log().ok();
});
@@ -3296,11 +4327,12 @@
if timer <= now {
trace!("{} idle timeout expired", self.trace_id);
- qlog_with!(self.qlog_streamer, q, {
+ qlog_with!(self.qlog, q, {
q.finish_log().ok();
});
self.closed = true;
+ self.timed_out = true;
return;
}
}
@@ -3315,9 +4347,10 @@
&self.trace_id,
);
- qlog_with!(self.qlog_streamer, q, {
- let ev = self.recovery.to_qlog();
- q.add_event(ev).ok();
+ qlog_with_type!(QLOG_METRICS, self.qlog, q, {
+ if let Some(ev_data) = self.recovery.maybe_qlog() {
+ q.add_event_data_with_instant(ev_data, now).ok();
+ }
});
return;
@@ -3333,29 +4366,30 @@
/// Returns [`Done`] if the connection had already been closed.
///
/// Note that the connection will not be closed immediately. An application
- /// should continue calling [`recv()`], [`send()`] and [`timeout()`] as
- /// normal, until the [`is_closed()`] method returns `true`.
+ /// should continue calling the [`recv()`], [`send()`], [`timeout()`] and
+ /// [`on_timeout()`] methods as normal, until the [`is_closed()`] method
+ /// returns `true`.
///
/// [`Done`]: enum.Error.html#variant.Done
/// [`recv()`]: struct.Connection.html#method.recv
/// [`send()`]: struct.Connection.html#method.send
/// [`timeout()`]: struct.Connection.html#method.timeout
+ /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
/// [`is_closed()`]: struct.Connection.html#method.is_closed
pub fn close(&mut self, app: bool, err: u64, reason: &[u8]) -> Result<()> {
- if self.is_closed() || self.draining_timer.is_some() {
+ if self.is_closed() || self.is_draining() {
return Err(Error::Done);
}
- if self.error.is_some() || self.app_error.is_some() {
+ if self.local_error.is_some() {
return Err(Error::Done);
}
- if app {
- self.app_error = Some(err);
- self.app_reason.extend_from_slice(reason);
- } else {
- self.error = Some(err);
- }
+ self.local_error = Some(ConnectionError {
+ is_app: app,
+ error_code: err,
+ reason: reason.to_vec(),
+ });
// When no packet was successfully processed close connection immediately.
if self.recv_count == 0 {
@@ -3369,6 +4403,7 @@
///
/// This can be used for logging purposes to differentiate between multiple
/// connections.
+ #[inline]
pub fn trace_id(&self) -> &str {
&self.trace_id
}
@@ -3376,47 +4411,184 @@
/// Returns the negotiated ALPN protocol.
///
/// If no protocol has been negotiated, the returned value is empty.
+ #[inline]
pub fn application_proto(&self) -> &[u8] {
- self.handshake.alpn_protocol()
+ self.alpn.as_ref()
+ }
+
+ /// Returns the server name requested by the client.
+ #[inline]
+ pub fn server_name(&self) -> Option<&str> {
+ self.handshake.server_name()
}
/// Returns the peer's leaf certificate (if any) as a DER-encoded buffer.
+ #[inline]
pub fn peer_cert(&self) -> Option<Vec<u8>> {
self.handshake.peer_cert()
}
+ /// Returns the serialized cryptographic session for the connection.
+ ///
+ /// This can be used by a client to cache a connection's session, and resume
+ /// it later using the [`set_session()`] method.
+ ///
+ /// [`set_session()`]: struct.Connection.html#method.set_session
+ #[inline]
+ pub fn session(&self) -> Option<Vec<u8>> {
+ self.session.clone()
+ }
+
+ /// Returns the source connection ID.
+ ///
+ /// Note that the value returned can change throughout the connection's
+ /// lifetime.
+ #[inline]
+ pub fn source_id(&self) -> ConnectionId {
+ ConnectionId::from_ref(self.scid.as_ref())
+ }
+
+ /// Returns the destination connection ID.
+ ///
+ /// Note that the value returned can change throughout the connection's
+ /// lifetime.
+ #[inline]
+ pub fn destination_id(&self) -> ConnectionId {
+ ConnectionId::from_ref(self.dcid.as_ref())
+ }
+
/// Returns true if the connection handshake is complete.
+ #[inline]
pub fn is_established(&self) -> bool {
- self.handshake.is_completed()
+ self.handshake_completed
}
/// Returns true if the connection is resumed.
+ #[inline]
pub fn is_resumed(&self) -> bool {
self.handshake.is_resumed()
}
/// Returns true if the connection has a pending handshake that has
/// progressed enough to send or receive early data.
+ #[inline]
pub fn is_in_early_data(&self) -> bool {
self.handshake.is_in_early_data()
}
+ /// Returns whether there is stream or DATAGRAM data available to read.
+ #[inline]
+ pub fn is_readable(&self) -> bool {
+ self.streams.has_readable() || self.dgram_recv_front_len().is_some()
+ }
+
+ /// Returns true if the connection is draining.
+ ///
+ /// If this returns true, the connection object cannot yet be dropped, but
+ /// no new application data can be sent or received. An application should
+ /// continue calling the [`recv()`], [`send()`], [`timeout()`], and
+ /// [`on_timeout()`] methods as normal, until the [`is_closed()`] method
+ /// returns `true`.
+ ///
+ /// [`recv()`]: struct.Connection.html#method.recv
+ /// [`send()`]: struct.Connection.html#method.send
+ /// [`timeout()`]: struct.Connection.html#method.timeout
+ /// [`on_timeout()`]: struct.Connection.html#method.on_timeout
+ /// [`is_closed()`]: struct.Connection.html#method.is_closed
+ #[inline]
+ pub fn is_draining(&self) -> bool {
+ self.draining_timer.is_some()
+ }
+
/// Returns true if the connection is closed.
///
/// If this returns true, the connection object can be dropped.
+ #[inline]
pub fn is_closed(&self) -> bool {
self.closed
}
+ /// Returns true if the connection was closed due to the idle timeout.
+ #[inline]
+ pub fn is_timed_out(&self) -> bool {
+ self.timed_out
+ }
+
+ /// Returns the error received from the peer, if any.
+ ///
+ /// Note that a `Some` return value does not necessarily imply
+ /// [`is_closed()`] or any other connection state.
+ ///
+ /// [`is_closed()`]: struct.Connection.html#method.is_closed
+ #[inline]
+ pub fn peer_error(&self) -> Option<&ConnectionError> {
+ self.peer_error.as_ref()
+ }
+
+ /// Returns the error [`close()`] was called with, or internally
+ /// created quiche errors, if any.
+ ///
+ /// Note that a `Some` return value does not necessarily imply
+ /// [`is_closed()`] or any other connection state.
+ /// `Some` also does not guarantee that the error has been sent to
+ /// or recieved by the peer.
+ ///
+ /// [`close()`]: struct.Connection.html#method.close
+ /// [`is_closed()`]: struct.Connection.html#method.is_closed
+ #[inline]
+ pub fn local_error(&self) -> Option<&ConnectionError> {
+ self.local_error.as_ref()
+ }
+
/// Collects and returns statistics about the connection.
+ #[inline]
pub fn stats(&self) -> Stats {
Stats {
recv: self.recv_count,
sent: self.sent_count,
lost: self.recovery.lost_count,
+ retrans: self.retrans_count,
cwnd: self.recovery.cwnd(),
rtt: self.recovery.rtt(),
+ sent_bytes: self.sent_bytes,
+ lost_bytes: self.recovery.bytes_lost,
+ recv_bytes: self.recv_bytes,
+ stream_retrans_bytes: self.stream_retrans_bytes,
+ pmtu: self.recovery.max_datagram_size(),
delivery_rate: self.recovery.delivery_rate(),
+ peer_max_idle_timeout: self.peer_transport_params.max_idle_timeout,
+ peer_max_udp_payload_size: self
+ .peer_transport_params
+ .max_udp_payload_size,
+ peer_initial_max_data: self.peer_transport_params.initial_max_data,
+ peer_initial_max_stream_data_bidi_local: self
+ .peer_transport_params
+ .initial_max_stream_data_bidi_local,
+ peer_initial_max_stream_data_bidi_remote: self
+ .peer_transport_params
+ .initial_max_stream_data_bidi_remote,
+ peer_initial_max_stream_data_uni: self
+ .peer_transport_params
+ .initial_max_stream_data_uni,
+ peer_initial_max_streams_bidi: self
+ .peer_transport_params
+ .initial_max_streams_bidi,
+ peer_initial_max_streams_uni: self
+ .peer_transport_params
+ .initial_max_streams_uni,
+ peer_ack_delay_exponent: self
+ .peer_transport_params
+ .ack_delay_exponent,
+ peer_max_ack_delay: self.peer_transport_params.max_ack_delay,
+ peer_disable_active_migration: self
+ .peer_transport_params
+ .disable_active_migration,
+ peer_active_conn_id_limit: self
+ .peer_transport_params
+ .active_conn_id_limit,
+ peer_max_datagram_frame_size: self
+ .peer_transport_params
+ .max_datagram_frame_size,
}
}
@@ -3434,28 +4606,142 @@
Ok(())
}
+ fn parse_peer_transport_params(
+ &mut self, peer_params: TransportParams,
+ ) -> Result<()> {
+ if self.version >= PROTOCOL_VERSION_DRAFT28 ||
+ self.version == PROTOCOL_VERSION_V1
+ {
+ // Validate initial_source_connection_id.
+ match &peer_params.initial_source_connection_id {
+ Some(v) if v != &self.dcid =>
+ return Err(Error::InvalidTransportParam),
+
+ Some(_) => (),
+
+ // initial_source_connection_id must be sent by
+ // both endpoints.
+ None => return Err(Error::InvalidTransportParam),
+ }
+
+ // Validate original_destination_connection_id.
+ if let Some(odcid) = &self.odcid {
+ match &peer_params.original_destination_connection_id {
+ Some(v) if v != odcid =>
+ return Err(Error::InvalidTransportParam),
+
+ Some(_) => (),
+
+ // original_destination_connection_id must be
+ // sent by the server.
+ None if !self.is_server =>
+ return Err(Error::InvalidTransportParam),
+
+ None => (),
+ }
+ }
+
+ // Validate retry_source_connection_id.
+ if let Some(rscid) = &self.rscid {
+ match &peer_params.retry_source_connection_id {
+ Some(v) if v != rscid =>
+ return Err(Error::InvalidTransportParam),
+
+ Some(_) => (),
+
+ // retry_source_connection_id must be sent by
+ // the server.
+ None => return Err(Error::InvalidTransportParam),
+ }
+ }
+ } else {
+ // Legacy validation of the original connection ID when
+ // stateless retry is performed, for drafts < 28.
+ if self.did_retry &&
+ peer_params.original_destination_connection_id != self.odcid
+ {
+ return Err(Error::InvalidTransportParam);
+ }
+ }
+
+ self.process_peer_transport_params(peer_params);
+
+ self.parsed_peer_transport_params = true;
+
+ Ok(())
+ }
+
+ fn process_peer_transport_params(&mut self, peer_params: TransportParams) {
+ self.max_tx_data = peer_params.initial_max_data;
+
+ // Update send capacity.
+ self.update_tx_cap();
+
+ self.streams
+ .update_peer_max_streams_bidi(peer_params.initial_max_streams_bidi);
+ self.streams
+ .update_peer_max_streams_uni(peer_params.initial_max_streams_uni);
+
+ self.recovery.max_ack_delay =
+ time::Duration::from_millis(peer_params.max_ack_delay);
+
+ self.recovery
+ .update_max_datagram_size(peer_params.max_udp_payload_size as usize);
+
+ self.peer_transport_params = peer_params;
+ }
+
/// Continues the handshake.
///
/// If the connection is already established, it does nothing.
fn do_handshake(&mut self) -> Result<()> {
- // Handshake is already complete, there's nothing to do.
- if self.is_established() {
+ if self.handshake_completed {
+ // Handshake is already complete, nothing more to do.
return Ok(());
}
match self.handshake.do_handshake() {
Ok(_) => (),
- Err(Error::Done) => return Ok(()),
+ Err(Error::Done) => {
+ // Try to parse transport parameters as soon as the first flight
+ // of handshake data is processed.
+ //
+ // This is potentially dangerous as the handshake hasn't been
+ // completed yet, though it's required to be able to send data
+ // in 0.5 RTT.
+ let raw_params = self.handshake.quic_transport_params();
+
+ if !self.parsed_peer_transport_params && !raw_params.is_empty() {
+ let peer_params =
+ TransportParams::decode(raw_params, self.is_server)?;
+
+ self.parse_peer_transport_params(peer_params)?;
+ }
+
+ return Ok(());
+ },
Err(e) => return Err(e),
};
- if self.application_proto().is_empty() {
- // Send no_application_proto TLS alert when no protocol
- // can be negotiated.
- self.error = Some(0x178);
- return Err(Error::TlsFail);
+ self.handshake_completed = self.handshake.is_completed();
+
+ self.alpn = self.handshake.alpn_protocol().to_vec();
+
+ let raw_params = self.handshake.quic_transport_params();
+
+ if !self.parsed_peer_transport_params && !raw_params.is_empty() {
+ let peer_params =
+ TransportParams::decode(raw_params, self.is_server)?;
+
+ self.parse_peer_transport_params(peer_params)?;
+ }
+
+ // Once the handshake is completed there's no point in processing 0-RTT
+ // packets anymore, so clear the buffer now.
+ if self.handshake_completed {
+ self.undecryptable_pkts.clear();
}
trace!("{} connection established: proto={:?} cipher={:?} curve={:?} sigalg={:?} resumed={} {:?}",
@@ -3464,17 +4750,21 @@
self.handshake.cipher(),
self.handshake.curve(),
self.handshake.sigalg(),
- self.is_resumed(),
+ self.handshake.is_resumed(),
self.peer_transport_params);
Ok(())
}
- /// Selects the packet number space for outgoing packets.
- fn write_epoch(&self) -> Result<packet::Epoch> {
+ /// Selects the packet type for the next outgoing packet.
+ fn write_pkt_type(&self) -> Result<packet::Type> {
// On error send packet in the latest epoch available, but only send
// 1-RTT ones when the handshake is completed.
- if self.error.is_some() {
+ if self
+ .local_error
+ .as_ref()
+ .map_or(false, |conn_err| !conn_err.is_app)
+ {
let epoch = match self.handshake.write_level() {
crypto::Level::Initial => packet::EPOCH_INITIAL,
crypto::Level::ZeroRTT => unreachable!(),
@@ -3485,10 +4775,10 @@
if epoch == packet::EPOCH_APPLICATION && !self.is_established() {
// Downgrade the epoch to handshake as the handshake is not
// completed yet.
- return Ok(packet::EPOCH_HANDSHAKE);
+ return Ok(packet::Type::Handshake);
}
- return Ok(epoch);
+ return Ok(packet::Type::from_epoch(epoch));
}
for epoch in packet::EPOCH_INITIAL..packet::EPOCH_COUNT {
@@ -3499,33 +4789,44 @@
// We are ready to send data for this packet number space.
if self.pkt_num_spaces[epoch].ready() {
- return Ok(epoch);
+ return Ok(packet::Type::from_epoch(epoch));
}
// There are lost frames in this packet number space.
if !self.recovery.lost[epoch].is_empty() {
- return Ok(epoch);
+ return Ok(packet::Type::from_epoch(epoch));
}
// We need to send PTO probe packets.
if self.recovery.loss_probes[epoch] > 0 {
- return Ok(epoch);
+ return Ok(packet::Type::from_epoch(epoch));
}
}
// If there are flushable, almost full or blocked streams, use the
// Application epoch.
if (self.is_established() || self.is_in_early_data()) &&
- (self.almost_full ||
+ (self.should_send_handshake_done() ||
+ self.almost_full ||
self.blocked_limit.is_some() ||
self.dgram_send_queue.has_pending() ||
+ self.local_error
+ .as_ref()
+ .map_or(false, |conn_err| conn_err.is_app) ||
self.streams.should_update_max_streams_bidi() ||
self.streams.should_update_max_streams_uni() ||
self.streams.has_flushable() ||
self.streams.has_almost_full() ||
- self.streams.has_blocked())
+ self.streams.has_blocked() ||
+ self.streams.has_reset() ||
+ self.streams.has_stopped())
{
- return Ok(packet::EPOCH_APPLICATION);
+ // Only clients can send 0-RTT packets.
+ if !self.is_server && self.is_in_early_data() {
+ return Ok(packet::Type::ZeroRTT);
+ }
+
+ return Ok(packet::Type::Short);
}
Err(Error::Done)
@@ -3556,7 +4857,9 @@
frame::Frame::Ping => (),
- frame::Frame::ACK { ranges, ack_delay } => {
+ frame::Frame::ACK {
+ ranges, ack_delay, ..
+ } => {
let ack_delay = ack_delay
.checked_mul(2_u64.pow(
self.peer_transport_params.ack_delay_exponent as u32,
@@ -3592,14 +4895,61 @@
frame::Frame::ResetStream {
stream_id,
+ error_code,
final_size,
- ..
} => {
// Peer can't send on our unidirectional streams.
if !stream::is_bidi(stream_id) &&
stream::is_local(stream_id, self.is_server)
{
- return Err(Error::InvalidStreamState);
+ return Err(Error::InvalidStreamState(stream_id));
+ }
+
+ let max_rx_data_left = self.max_rx_data - self.rx_data;
+
+ // Get existing stream or create a new one, but if the stream
+ // has already been closed and collected, ignore the frame.
+ //
+ // This can happen if e.g. an ACK frame is lost, and the peer
+ // retransmits another frame before it realizes that the stream
+ // is gone.
+ //
+ // Note that it makes it impossible to check if the frame is
+ // illegal, since we have no state, but since we ignore the
+ // frame, it should be fine.
+ let stream = match self.get_or_create_stream(stream_id, false) {
+ Ok(v) => v,
+
+ Err(Error::Done) => return Ok(()),
+
+ Err(e) => return Err(e),
+ };
+
+ let was_readable = stream.is_readable();
+
+ let max_off_delta =
+ stream.recv.reset(error_code, final_size)? as u64;
+
+ if max_off_delta > max_rx_data_left {
+ return Err(Error::FlowControl);
+ }
+
+ if !was_readable && stream.is_readable() {
+ self.streams.mark_readable(stream_id, true);
+ }
+
+ self.rx_data += max_off_delta;
+ },
+
+ frame::Frame::StopSending {
+ stream_id,
+ error_code,
+ } => {
+ // STOP_SENDING on a receive-only stream is a fatal error.
+ if !stream::is_local(stream_id, self.is_server) &&
+ !stream::is_bidi(stream_id)
+ {
+ return Err(Error::InvalidStreamState(stream_id));
}
// Get existing stream or create a new one, but if the stream
@@ -3621,25 +4971,30 @@
Err(e) => return Err(e),
};
- self.rx_data += stream.recv.reset(final_size)? as u64;
+ let was_writable = stream.is_writable();
- if self.rx_data > self.max_rx_data {
- return Err(Error::FlowControl);
- }
- },
+ // Try stopping the stream.
+ if let Ok((final_size, unsent)) = stream.send.stop(error_code) {
+ // Claw back some flow control allowance from data that was
+ // buffered but not actually sent before the stream was
+ // reset.
+ //
+ // Note that `tx_cap` will be updated later on, so no need
+ // to touch it here.
+ self.tx_data = self.tx_data.saturating_sub(unsent);
- frame::Frame::StopSending { stream_id, .. } => {
- // STOP_SENDING on a receive-only stream is a fatal error.
- if !stream::is_local(stream_id, self.is_server) &&
- !stream::is_bidi(stream_id)
- {
- return Err(Error::InvalidStreamState);
+ self.streams
+ .mark_reset(stream_id, true, error_code, final_size);
+
+ if !was_writable {
+ self.streams.mark_writable(stream_id, true);
+ }
}
},
frame::Frame::Crypto { data } => {
// Push the data to the stream so it can be re-ordered.
- self.pkt_num_spaces[epoch].crypto_stream.recv.push(data)?;
+ self.pkt_num_spaces[epoch].crypto_stream.recv.write(data)?;
// Feed crypto data to the TLS state, if there's data
// available at the expected offset.
@@ -3649,99 +5004,20 @@
let stream = &mut self.pkt_num_spaces[epoch].crypto_stream;
- while let Ok((read, _)) = stream.recv.pop(&mut crypto_buf) {
+ while let Ok((read, _)) = stream.recv.emit(&mut crypto_buf) {
let recv_buf = &crypto_buf[..read];
- self.handshake.provide_data(level, &recv_buf)?;
+ self.handshake.provide_data(level, recv_buf)?;
}
- self.do_handshake()?;
-
- // Try to parse transport parameters as soon as the first flight
- // of handshake data is processed.
- //
- // This is potentially dangerous as the handshake hasn't been
- // completed yet, though it's required to be able to send data
- // in 0.5 RTT.
- let raw_params = self.handshake.quic_transport_params();
-
- if !self.parsed_peer_transport_params && !raw_params.is_empty() {
- let peer_params =
- TransportParams::decode(&raw_params, self.is_server)?;
-
- if self.version >= PROTOCOL_VERSION_DRAFT28 {
- // Validate initial_source_connection_id.
- match &peer_params.initial_source_connection_id {
- Some(v) if v != &self.dcid =>
- return Err(Error::InvalidTransportParam),
-
- Some(_) => (),
-
- // initial_source_connection_id must be sent by
- // both endpoints.
- None => return Err(Error::InvalidTransportParam),
- }
-
- // Validate original_destination_connection_id.
- if let Some(odcid) = &self.odcid {
- match &peer_params.original_destination_connection_id
- {
- Some(v) if v != odcid =>
- return Err(Error::InvalidTransportParam),
-
- Some(_) => (),
-
- // original_destination_connection_id must be
- // sent by the server.
- None if !self.is_server =>
- return Err(Error::InvalidTransportParam),
-
- None => (),
- }
- }
-
- // Validate retry_source_connection_id.
- if let Some(rscid) = &self.rscid {
- match &peer_params.retry_source_connection_id {
- Some(v) if v != rscid =>
- return Err(Error::InvalidTransportParam),
-
- Some(_) => (),
-
- // retry_source_connection_id must be sent by
- // the server.
- None => return Err(Error::InvalidTransportParam),
- }
- }
- } else {
- // Legacy validation of the original connection ID when
- // stateless retry is performed, for drafts < 28.
- if self.did_retry &&
- peer_params.original_destination_connection_id !=
- self.odcid
- {
- return Err(Error::InvalidTransportParam);
- }
- }
-
- // Update flow control limits.
- self.max_tx_data = peer_params.initial_max_data;
-
- self.streams.update_peer_max_streams_bidi(
- peer_params.initial_max_streams_bidi,
- );
- self.streams.update_peer_max_streams_uni(
- peer_params.initial_max_streams_uni,
- );
-
- self.recovery.max_ack_delay =
- time::Duration::from_millis(peer_params.max_ack_delay);
-
- self.peer_transport_params = peer_params;
-
- self.parsed_peer_transport_params = true;
+ if self.is_established() {
+ self.handshake.process_post_handshake()?;
+ } else {
+ self.do_handshake()?;
}
},
+ frame::Frame::CryptoHeader { .. } => unreachable!(),
+
// TODO: implement stateless retry
frame::Frame::NewToken { .. } => (),
@@ -3750,7 +5026,7 @@
if !stream::is_bidi(stream_id) &&
stream::is_local(stream_id, self.is_server)
{
- return Err(Error::InvalidStreamState);
+ return Err(Error::InvalidStreamState(stream_id));
}
let max_rx_data_left = self.max_rx_data - self.rx_data;
@@ -3782,20 +5058,31 @@
return Err(Error::FlowControl);
}
- stream.recv.push(data)?;
+ let was_readable = stream.is_readable();
- if stream.is_readable() {
+ stream.recv.write(data)?;
+
+ if !was_readable && stream.is_readable() {
self.streams.mark_readable(stream_id, true);
}
self.rx_data += max_off_delta;
},
+ frame::Frame::StreamHeader { .. } => unreachable!(),
+
frame::Frame::MaxData { max } => {
self.max_tx_data = cmp::max(self.max_tx_data, max);
},
frame::Frame::MaxStreamData { stream_id, max } => {
+ // Peer can't receive on its own unidirectional streams.
+ if !stream::is_bidi(stream_id) &&
+ !stream::is_local(stream_id, self.is_server)
+ {
+ return Err(Error::InvalidStreamState(stream_id));
+ }
+
// Get existing stream or create a new one, but if the stream
// has already been closed and collected, ignore the frame.
//
@@ -3876,11 +5163,23 @@
frame::Frame::PathResponse { .. } => (),
- frame::Frame::ConnectionClose { .. } => {
+ frame::Frame::ConnectionClose {
+ error_code, reason, ..
+ } => {
+ self.peer_error = Some(ConnectionError {
+ is_app: false,
+ error_code,
+ reason,
+ });
self.draining_timer = Some(now + (self.recovery.pto() * 3));
},
- frame::Frame::ApplicationClose { .. } => {
+ frame::Frame::ApplicationClose { error_code, reason } => {
+ self.peer_error = Some(ConnectionError {
+ is_app: true,
+ error_code,
+ reason,
+ });
self.draining_timer = Some(now + (self.recovery.pto() * 3));
},
@@ -3902,7 +5201,7 @@
// quiche always advertises support for 64K sized DATAGRAM
// frames, as recommended by the standard, so we don't need a
// size check.
- if self.dgram_enabled() {
+ if !self.dgram_enabled() {
return Err(Error::InvalidState);
}
@@ -3911,8 +5210,10 @@
self.dgram_recv_queue.pop();
}
- self.dgram_recv_queue.push(&data)?;
+ self.dgram_recv_queue.push(data)?;
},
+
+ frame::Frame::DatagramHeader { .. } => unreachable!(),
}
Ok(())
@@ -3946,6 +5247,11 @@
self.max_rx_data_next / 2 > self.max_rx_data - self.rx_data
}
+ /// Returns true if the HANDSHAKE_DONE frame needs to be sent.
+ fn should_send_handshake_done(&self) -> bool {
+ self.is_established() && !self.handshake_done_sent && self.is_server
+ }
+
/// Returns the idle timeout value.
///
/// `None` is returned if both end-points disabled the idle timeout.
@@ -3978,12 +5284,6 @@
Some(idle_timeout)
}
- /// Returns the connection's overall send capacity.
- fn send_capacity(&self) -> usize {
- let cap = self.max_tx_data - self.tx_data;
- cmp::min(cap, self.recovery.cwnd_available() as u64) as usize
- }
-
/// Returns the connection's handshake status for use in loss recovery.
fn handshake_status(&self) -> recovery::HandshakeStatus {
recovery::HandshakeStatus {
@@ -3995,6 +5295,14 @@
completed: self.is_established(),
}
}
+
+ /// Updates send capacity.
+ fn update_tx_cap(&mut self) {
+ self.tx_cap = cmp::min(
+ self.recovery.cwnd_available() as u64,
+ self.max_tx_data - self.tx_data,
+ ) as usize;
+ }
}
/// Maps an `Error` to `Error::Done`, or itself.
@@ -4034,48 +5342,167 @@
/// Statistics about the connection.
///
-/// A connections's statistics can be collected using the [`stats()`] method.
+/// A connection's statistics can be collected using the [`stats()`] method.
///
/// [`stats()`]: struct.Connection.html#method.stats
#[derive(Clone)]
pub struct Stats {
- /// The number of QUIC packets received on this connection.
+ /// The number of QUIC packets received.
pub recv: usize,
- /// The number of QUIC packets sent on this connection.
+ /// The number of QUIC packets sent.
pub sent: usize,
/// The number of QUIC packets that were lost.
pub lost: usize,
+ /// The number of sent QUIC packets with retransmitted data.
+ pub retrans: usize,
+
/// The estimated round-trip time of the connection.
pub rtt: time::Duration,
/// The size of the connection's congestion window in bytes.
pub cwnd: usize,
- /// The estimated data delivery rate in bytes/s.
+ /// The number of sent bytes.
+ pub sent_bytes: u64,
+
+ /// The number of received bytes.
+ pub recv_bytes: u64,
+
+ /// The number of bytes lost.
+ pub lost_bytes: u64,
+
+ /// The number of stream bytes retranmitted.
+ pub stream_retrans_bytes: u64,
+
+ /// The current PMTU for the connection.
+ pub pmtu: usize,
+
+ /// The most recent data delivery rate estimate in bytes/s.
pub delivery_rate: u64,
+
+ /// The maximum idle timeout.
+ pub peer_max_idle_timeout: u64,
+
+ /// The maximum UDP payload size.
+ pub peer_max_udp_payload_size: u64,
+
+ /// The initial flow control maximum data for the connection.
+ pub peer_initial_max_data: u64,
+
+ /// The initial flow control maximum data for local bidirectional streams.
+ pub peer_initial_max_stream_data_bidi_local: u64,
+
+ /// The initial flow control maximum data for remote bidirectional streams.
+ pub peer_initial_max_stream_data_bidi_remote: u64,
+
+ /// The initial flow control maximum data for unidirectional streams.
+ pub peer_initial_max_stream_data_uni: u64,
+
+ /// The initial maximum bidirectional streams.
+ pub peer_initial_max_streams_bidi: u64,
+
+ /// The initial maximum unidirectional streams.
+ pub peer_initial_max_streams_uni: u64,
+
+ /// The ACK delay exponent.
+ pub peer_ack_delay_exponent: u64,
+
+ /// The max ACK delay.
+ pub peer_max_ack_delay: u64,
+
+ /// Whether active migration is disabled.
+ pub peer_disable_active_migration: bool,
+
+ /// The active connection ID limit.
+ pub peer_active_conn_id_limit: u64,
+
+ /// DATAGRAM frame extension parameter, if any.
+ pub peer_max_datagram_frame_size: Option<u64>,
}
impl std::fmt::Debug for Stats {
+ #[inline]
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(
f,
- "recv={} sent={} lost={} rtt={:?} cwnd={} delivery_rate={}",
- self.recv,
- self.sent,
- self.lost,
- self.rtt,
- self.cwnd,
- self.delivery_rate
- )
+ "recv={} sent={} lost={} rtt={:?} cwnd={}",
+ self.recv, self.sent, self.lost, self.rtt, self.cwnd,
+ )?;
+
+ write!(f, " peer_tps={{")?;
+
+ write!(f, " max_idle_timeout={},", self.peer_max_idle_timeout,)?;
+
+ write!(
+ f,
+ " max_udp_payload_size={},",
+ self.peer_max_udp_payload_size,
+ )?;
+
+ write!(f, " initial_max_data={},", self.peer_initial_max_data,)?;
+
+ write!(
+ f,
+ " initial_max_stream_data_bidi_local={},",
+ self.peer_initial_max_stream_data_bidi_local,
+ )?;
+
+ write!(
+ f,
+ " initial_max_stream_data_bidi_remote={},",
+ self.peer_initial_max_stream_data_bidi_remote,
+ )?;
+
+ write!(
+ f,
+ " initial_max_stream_data_uni={},",
+ self.peer_initial_max_stream_data_uni,
+ )?;
+
+ write!(
+ f,
+ " initial_max_streams_bidi={},",
+ self.peer_initial_max_streams_bidi,
+ )?;
+
+ write!(
+ f,
+ " initial_max_streams_uni={},",
+ self.peer_initial_max_streams_uni,
+ )?;
+
+ write!(f, " ack_delay_exponent={},", self.peer_ack_delay_exponent,)?;
+
+ write!(f, " max_ack_delay={},", self.peer_max_ack_delay,)?;
+
+ write!(
+ f,
+ " disable_active_migration={},",
+ self.peer_disable_active_migration,
+ )?;
+
+ write!(
+ f,
+ " active_conn_id_limit={},",
+ self.peer_active_conn_id_limit,
+ )?;
+
+ write!(
+ f,
+ " max_datagram_frame_size={:?}",
+ self.peer_max_datagram_frame_size,
+ )?;
+
+ write!(f, " }}")
}
}
#[derive(Clone, Debug, PartialEq)]
struct TransportParams {
- pub original_destination_connection_id: Option<Vec<u8&g