chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,6 @@
{
"git": {
"sha1": "b23a13e2b7ee73e15ba008cd9b19dcd2d3861957"
},
"path_in_vcs": ""
}

View File

@@ -0,0 +1,128 @@
name: CI
on:
pull_request:
push:
branches:
- master
env:
RUST_BACKTRACE: 1
jobs:
ci-pass:
name: CI is green
runs-on: ubuntu-latest
needs:
- style
- test
- msrv
- miri
- features
- semver
- doc
steps:
- run: exit 0
style:
name: Check Style
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt
- run: cargo fmt --all --check
test:
name: Test ${{ matrix.rust }} on ${{ matrix.os }}
needs: [style]
strategy:
matrix:
rust:
- stable
- beta
- nightly
os:
- ubuntu-latest
- windows-latest
- macos-latest
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v6
- name: Install Rust (${{ matrix.rust }})
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.rust }}
- run: cargo test --all-features
msrv:
name: Check MSRV (${{ matrix.rust }}) on ${{ matrix.os }}
needs: [style]
strategy:
matrix:
rust: [ 1.64 ] # keep in sync with 'rust-version' in Cargo.toml
os:
- ubuntu-latest
- windows-latest
- macos-latest
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@stable
- uses: Swatinem/rust-cache@v2
- name: Resolve MSRV aware dependencies
run: |
cargo update
env:
CARGO_RESOLVER_INCOMPATIBLE_RUST_VERSIONS: fallback
- name: Install Rust (${{ matrix.rust }})
uses: dtolnay/rust-toolchain@master
with:
toolchain: ${{ matrix.rust }}
- run: cargo check --features full
miri:
name: Test with Miri
needs: [style]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@nightly
with:
components: miri
- name: Test
env:
# Can't enable tcp feature since Miri does not support the tokio runtime
MIRIFLAGS: "-Zmiri-disable-isolation"
run: cargo miri test --all-features
features:
name: features
needs: [style]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@stable
- uses: taiki-e/install-action@cargo-hack
- run: cargo hack --no-dev-deps check --feature-powerset --depth 2
semver:
name: semver
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- name: Check semver
uses: obi1kenobi/cargo-semver-checks-action@v2
with:
feature-group: only-explicit-features
features: full
release-type: minor
doc:
name: Build docs
needs: [style, test]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: dtolnay/rust-toolchain@nightly
- run: cargo rustdoc --features full -- --cfg docsrs -D rustdoc::broken_intra_doc_links

View File

@@ -0,0 +1,99 @@
name: Rustdoc PR Preview
on:
issue_comment:
types: [created]
pull_request:
types: [closed]
jobs:
rustdoc-preview:
# Only run on issue_comment, not on PR close
if: github.event_name == 'issue_comment' && github.event.issue.pull_request && contains(github.event.comment.body, '/rustdoc-preview')
runs-on: ubuntu-latest
steps:
- name: Check if commenter is a collaborator
id: collaborator-check
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const commenter = context.payload.comment.user.login;
const owner = context.repo.owner;
const repo = context.repo.repo;
try {
await github.rest.repos.checkCollaborator({
owner,
repo,
username: commenter
});
return true;
} catch (e) {
return false;
}
# Only continue if the check passes
- name: Fail if not collaborator
if: steps.collaborator-check.outputs.result != 'true'
run: |
echo "Commenter is not a collaborator. Skipping preview build."
exit 1
- name: Checkout PR branch
uses: actions/checkout@v6
with:
# Check out the PR's branch
ref: refs/pull/${{ github.event.issue.number }}/head
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@nightly
- name: Build rustdoc
run: cargo rustdoc --features full -- --cfg docsrs
- name: Deploy rustdoc to gh-pages/pr-<PR_NUMBER>
uses: peaceiris/actions-gh-pages@v3
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_dir: ./target/doc
# Publish to pr-<PR_NUMBER> subdir
destination_dir: pr-${{ github.event.issue.number }}
keep_files: true
- name: Comment preview link on PR
uses: actions/github-script@v7
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
const pr_number = context.issue.number;
const repo = context.repo.repo;
const owner = context.repo.owner;
const url = `https://${owner}.github.io/${repo}/pr-${pr_number}/hyper_util/`;
github.rest.issues.createComment({
issue_number: pr_number,
owner,
repo,
body: `📝 Rustdoc preview for this PR: [View docs](${url})`
});
rustdoc-preview-cleanup:
# Only run on PR close/merge
if: github.event_name == 'pull_request' && github.event.action == 'closed'
runs-on: ubuntu-latest
steps:
- name: Checkout gh-pages branch
uses: actions/checkout@v6
with:
ref: gh-pages
persist-credentials: true
- name: Remove PR preview directory
run: |
rm -rf pr-${{ github.event.pull_request.number }}
- name: Commit and push removal
run: |
git config user.name "github-actions"
git config user.email "github-actions@github.com"
git add .
git commit -m "Remove rustdoc preview for PR #${{ github.event.pull_request.number }}" || echo "Nothing to commit"
git push

151
vendor/hyper-util/CHANGELOG.md vendored Normal file
View File

@@ -0,0 +1,151 @@
# 0.1.20 (2026-02-02)
- Fix `proxy::Matcher` to properly match domains regardless of casing
- Fix system proxy matcher dependency on macOS when used in sandboxed environements.
- Increased MSRV to 1.64.
# 0.1.19 (2025-12-03)
- Add `client::pool` module for composable pools. Enable with the `client-pool` feature.
- Add `pool::singleton` for sharing a single cloneable connection.
- Add `pool::cache` for caching a list of connections.
- Add `pool::negotiate` for combining two pools with upgrade and fallback negotiation.
- Add `pool::map` for customizable mapping of keys and connections.
# 0.1.18 (2025-11-13)
- Fix `rt::TokioTimer` to support Tokio's paused time.
- Fix `client::proxy::match::Matcher` to parse auth without passwords.
# 0.1.17 (2025-09-15)
- Fix `legacy::Client` to allow absolute-form URIs when `Connected::proxy(true)` is passed and the scheme is `https`.
# 0.1.16 (2025-07-22)
- Add `impl Clone` for `proxy::Tunnel` service.
- Fix `proxy::Matcher` to detect SOCKS4 schemes.
- Fix `legacy::Client` pool idle checker to trigger less aggresively, saving CPU.
# 0.1.15 (2025-07-07)
- Add header casing options to `auto::Builder`.
- Fix `proxy::Socksv5` to check for enough bytes before parsing ipv6 responses.
- Fix including `client-proxy` in the `full` feature set.
# 0.1.14 (2025-06-04)
- Fix `HttpConnector` to defer address family order to resolver sort order.
- Fix `proxy::Matcher` to find HTTPS system proxies on Windows.
# 0.1.13 (2025-05-27)
- Fix `HttpConnector` to always prefer IPv6 addresses first, if happy eyeballs is enabled.
- Fix `legacy::Client` to return better errors if available on the connection.
# 0.1.12 (2025-05-19)
- Add `client::legacy::proxy::Tunnel` connector that wraps another connector with HTTP tunneling.
- Add `client::legacy::proxy::{SocksV4, SocksV5}` connectors that wraps another connector with SOCKS.
- Add `client::proxy::matcher::Matcher` type that can use environment variables to match proxy rules.
- Add `server::graceful::Watcher` type that can be sent to watch a connection in another task.
- Add `GracefulShutdown::count()` method to get number of currently watched connections.
- Fix missing `must_use` attributes on `Connection` futures.
- Fix tracing span in GAI resolver that can cause panics.
# 0.1.11 (2025-03-31)
- Add `tracing` crate feature with support in `TokioExecutor`.
- Add `HttpConnector::interface()` support for macOS and Solarish systems.
- Add `rt::WithHyperIo` and `rt::WithTokioIo` combinators.
- Add `auto_date_header()` for auto server builder.
- Add `max_local_error_reset_streams()` for auto server builder.
- Add `ignore_invalid_headers()` for auto server builder.
- Add methods to determine if auto server is configured for HTTP/1 or HTTP/2.
- Implement `Connection` for `UnixStream` and `NamedPipeClient`.
- Fix HTTP/2 websocket requests sent through `legacy::Client`.
# 0.1.10 (2024-10-28)
- Add `http2_max_header_list_size(num)` option to legacy client builder.
- Add `set_tcp_user_timeout(dur)` option to legacy `HttpConnector`.
# 0.1.9 (2024-09-24)
- Add support for `client::legacy` DNS resolvers to set non-zero ports on returned addresses.
- Fix `client::legacy` wrongly retrying pooled connections that were created successfully but failed immediately after, resulting in a retry loop.
# 0.1.8 (2024-09-09)
- Add `server::conn::auto::upgrade::downcast()` for use with auto connection upgrades.
# 0.1.7 (2024-08-06)
- Add `Connected::poison()` to `legacy` client, a port from hyper v0.14.x.
- Add `Error::connect_info()` to `legacy` client, a port from hyper v0.14.x.
# 0.1.6 (2024-07-01)
- Add support for AIX operating system to `legacy` client.
- Fix `legacy` client to better use dying pooled connections.
# 0.1.5 (2024-05-28)
- Add `server::graceful::GracefulShutdown` helper to coordinate over many connections.
- Add `server::conn::auto::Connection::into_owned()` to unlink lifetime from `Builder`.
- Allow `service` module to be available with only `service` feature enabled.
# 0.1.4 (2024-05-24)
- Add `initial_max_send_streams()` to `legacy` client builder
- Add `max_pending_accept_reset_streams()` to `legacy` client builder
- Add `max_headers(usize)` to `auto` server builder
- Add `http1_onl()` and `http2_only()` to `auto` server builder
- Add connection capturing API to `legacy` client
- Add `impl Connection for TokioIo`
- Fix graceful shutdown hanging on reading the HTTP version
# 0.1.3 (2024-01-31)
### Added
- Add `Error::is_connect()` which returns true if error came from client `Connect`.
- Add timer support to `legacy` pool.
- Add support to enable http1/http2 parts of `auto::Builder` individually.
### Fixed
- Fix `auto` connection so it can handle requests shorter than the h2 preface.
- Fix `legacy::Client` to no longer error when keep-alive is diabled.
# 0.1.2 (2023-12-20)
### Added
- Add `graceful_shutdown()` method to `auto` connections.
- Add `rt::TokioTimer` type that implements `hyper::rt::Timer`.
- Add `service::TowerToHyperService` adapter, allowing using `tower::Service`s as a `hyper::service::Service`.
- Implement `Clone` for `auto::Builder`.
- Exports `legacy::{Builder, ResponseFuture}`.
### Fixed
- Enable HTTP/1 upgrades on the `legacy::Client`.
- Prevent divide by zero if DNS returns 0 addresses.
# 0.1.1 (2023-11-17)
### Added
- Make `server-auto` enable the `server` feature.
### Fixed
- Reduce `Send` bounds requirements for `auto` connections.
- Docs: enable all features when generating.
# 0.1.0 (2023-11-16)
Initial release.

869
vendor/hyper-util/Cargo.lock generated vendored Normal file
View File

@@ -0,0 +1,869 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "aho-corasick"
version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301"
dependencies = [
"memchr",
]
[[package]]
name = "async-stream"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476"
dependencies = [
"async-stream-impl",
"futures-core",
"pin-project-lite",
]
[[package]]
name = "async-stream-impl"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "atomic-waker"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
[[package]]
name = "base64"
version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
[[package]]
name = "bitflags"
version = "2.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3"
[[package]]
name = "bytes"
version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
[[package]]
name = "core-foundation"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f"
dependencies = [
"core-foundation-sys",
"libc",
]
[[package]]
name = "core-foundation-sys"
version = "0.8.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b"
[[package]]
name = "env_logger"
version = "0.10.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580"
dependencies = [
"humantime",
"is-terminal",
"log",
"regex",
"termcolor",
]
[[package]]
name = "equivalent"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f"
[[package]]
name = "fnv"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "futures-channel"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
dependencies = [
"futures-core",
]
[[package]]
name = "futures-core"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
[[package]]
name = "futures-sink"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7"
[[package]]
name = "futures-task"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988"
[[package]]
name = "futures-util"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
dependencies = [
"futures-core",
"futures-task",
"pin-project-lite",
"pin-utils",
]
[[package]]
name = "h2"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386"
dependencies = [
"atomic-waker",
"bytes",
"fnv",
"futures-core",
"futures-sink",
"http",
"indexmap",
"slab",
"tokio",
"tokio-util",
"tracing",
]
[[package]]
name = "hashbrown"
version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d"
[[package]]
name = "hermit-abi"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c"
[[package]]
name = "http"
version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565"
dependencies = [
"bytes",
"fnv",
"itoa",
]
[[package]]
name = "http-body"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"
dependencies = [
"bytes",
"http",
]
[[package]]
name = "http-body-util"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a"
dependencies = [
"bytes",
"futures-core",
"http",
"http-body",
"pin-project-lite",
]
[[package]]
name = "httparse"
version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87"
[[package]]
name = "httpdate"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
[[package]]
name = "humantime"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424"
[[package]]
name = "hyper"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1744436df46f0bde35af3eda22aeaba453aada65d8f1c171cd8a5f59030bd69f"
dependencies = [
"atomic-waker",
"bytes",
"futures-channel",
"futures-core",
"h2",
"http",
"http-body",
"httparse",
"httpdate",
"itoa",
"pin-project-lite",
"pin-utils",
"smallvec",
"tokio",
"want",
]
[[package]]
name = "hyper-util"
version = "0.1.20"
dependencies = [
"base64",
"bytes",
"futures-channel",
"futures-util",
"http",
"http-body",
"http-body-util",
"hyper",
"ipnet",
"libc",
"percent-encoding",
"pin-project-lite",
"pnet_datalink",
"pretty_env_logger",
"socket2",
"system-configuration",
"tokio",
"tokio-test",
"tower-layer",
"tower-service",
"tower-test",
"tracing",
"windows-registry",
]
[[package]]
name = "indexmap"
version = "2.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f"
dependencies = [
"equivalent",
"hashbrown",
]
[[package]]
name = "ipnet"
version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130"
[[package]]
name = "ipnetwork"
version = "0.20.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf466541e9d546596ee94f9f69590f89473455f88372423e0008fc1a7daf100e"
dependencies = [
"serde",
]
[[package]]
name = "is-terminal"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46"
dependencies = [
"hermit-abi",
"libc",
"windows-sys 0.61.2",
]
[[package]]
name = "itoa"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
[[package]]
name = "libc"
version = "0.2.177"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976"
[[package]]
name = "log"
version = "0.4.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432"
[[package]]
name = "memchr"
version = "2.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273"
[[package]]
name = "mio"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873"
dependencies = [
"libc",
"wasi",
"windows-sys 0.61.2",
]
[[package]]
name = "no-std-net"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65"
[[package]]
name = "once_cell"
version = "1.21.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d"
[[package]]
name = "percent-encoding"
version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
[[package]]
name = "pin-project"
version = "1.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a"
dependencies = [
"pin-project-internal",
]
[[package]]
name = "pin-project-internal"
version = "1.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "pin-project-lite"
version = "0.2.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b"
[[package]]
name = "pin-utils"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "pnet_base"
version = "0.35.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ffc190d4067df16af3aba49b3b74c469e611cad6314676eaf1157f31aa0fb2f7"
dependencies = [
"no-std-net",
]
[[package]]
name = "pnet_datalink"
version = "0.35.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e79e70ec0be163102a332e1d2d5586d362ad76b01cec86f830241f2b6452a7b7"
dependencies = [
"ipnetwork",
"libc",
"pnet_base",
"pnet_sys",
"winapi",
]
[[package]]
name = "pnet_sys"
version = "0.35.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d4643d3d4db6b08741050c2f3afa9a892c4244c085a72fcda93c9c2c9a00f4b"
dependencies = [
"libc",
"winapi",
]
[[package]]
name = "pretty_env_logger"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "865724d4dbe39d9f3dd3b52b88d859d66bcb2d6a0acfd5ea68a65fb66d4bdc1c"
dependencies = [
"env_logger",
"log",
]
[[package]]
name = "proc-macro2"
version = "1.0.103"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.42"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f"
dependencies = [
"proc-macro2",
]
[[package]]
name = "regex"
version = "1.12.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "843bc0191f75f3e22651ae5f1e72939ab2f72a4bc30fa80a066bd66edefc24d4"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.4.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.8.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58"
[[package]]
name = "serde"
version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e"
dependencies = [
"serde_core",
]
[[package]]
name = "serde_core"
version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.228"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "signal-hook-registry"
version = "1.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b"
dependencies = [
"libc",
]
[[package]]
name = "slab"
version = "0.4.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589"
[[package]]
name = "smallvec"
version = "1.15.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
[[package]]
name = "socket2"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881"
dependencies = [
"libc",
"windows-sys 0.60.2",
]
[[package]]
name = "syn"
version = "2.0.110"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "system-configuration"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b"
dependencies = [
"bitflags",
"core-foundation",
"system-configuration-sys",
]
[[package]]
name = "system-configuration-sys"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4"
dependencies = [
"core-foundation-sys",
"libc",
]
[[package]]
name = "termcolor"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755"
dependencies = [
"winapi-util",
]
[[package]]
name = "tokio"
version = "1.48.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408"
dependencies = [
"bytes",
"libc",
"mio",
"pin-project-lite",
"signal-hook-registry",
"socket2",
"tokio-macros",
"windows-sys 0.61.2",
]
[[package]]
name = "tokio-macros"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "tokio-stream"
version = "0.1.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047"
dependencies = [
"futures-core",
"pin-project-lite",
"tokio",
]
[[package]]
name = "tokio-test"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2468baabc3311435b55dd935f702f42cd1b8abb7e754fb7dfb16bd36aa88f9f7"
dependencies = [
"async-stream",
"bytes",
"futures-core",
"tokio",
"tokio-stream",
]
[[package]]
name = "tokio-util"
version = "0.7.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594"
dependencies = [
"bytes",
"futures-core",
"futures-sink",
"pin-project-lite",
"tokio",
]
[[package]]
name = "tower-layer"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e"
[[package]]
name = "tower-service"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3"
[[package]]
name = "tower-test"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4546773ffeab9e4ea02b8872faa49bb616a80a7da66afc2f32688943f97efa7"
dependencies = [
"futures-util",
"pin-project",
"tokio",
"tokio-test",
"tower-layer",
"tower-service",
]
[[package]]
name = "tracing"
version = "0.1.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
dependencies = [
"pin-project-lite",
"tracing-core",
]
[[package]]
name = "tracing-core"
version = "0.1.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678"
dependencies = [
"once_cell",
]
[[package]]
name = "try-lock"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b"
[[package]]
name = "unicode-ident"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"
[[package]]
name = "want"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e"
dependencies = [
"try-lock",
]
[[package]]
name = "wasi"
version = "0.11.1+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22"
dependencies = [
"windows-sys 0.61.2",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows-link"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
[[package]]
name = "windows-registry"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720"
dependencies = [
"windows-link",
"windows-result",
"windows-strings",
]
[[package]]
name = "windows-result"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5"
dependencies = [
"windows-link",
]
[[package]]
name = "windows-strings"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091"
dependencies = [
"windows-link",
]
[[package]]
name = "windows-sys"
version = "0.60.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-sys"
version = "0.61.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc"
dependencies = [
"windows-link",
]
[[package]]
name = "windows-targets"
version = "0.53.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3"
dependencies = [
"windows-link",
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_gnullvm",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53"
[[package]]
name = "windows_aarch64_msvc"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006"
[[package]]
name = "windows_i686_gnu"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3"
[[package]]
name = "windows_i686_gnullvm"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c"
[[package]]
name = "windows_i686_msvc"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2"
[[package]]
name = "windows_x86_64_gnu"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1"
[[package]]
name = "windows_x86_64_msvc"
version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650"

259
vendor/hyper-util/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,259 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.64"
name = "hyper-util"
version = "0.1.20"
authors = ["Sean McArthur <sean@seanmonstar.com>"]
build = false
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "hyper utilities"
homepage = "https://hyper.rs"
documentation = "https://docs.rs/hyper-util"
readme = "README.md"
keywords = [
"http",
"hyper",
"hyperium",
]
categories = [
"network-programming",
"web-programming::http-client",
"web-programming::http-server",
]
license = "MIT"
repository = "https://github.com/hyperium/hyper-util"
[package.metadata.docs.rs]
features = ["full"]
rustdoc-args = [
"--cfg",
"docsrs",
]
[features]
__internal_happy_eyeballs_tests = []
client = [
"hyper/client",
"tokio/net",
"dep:tracing",
"dep:futures-channel",
"dep:tower-service",
]
client-legacy = [
"client",
"dep:socket2",
"tokio/sync",
"dep:libc",
"dep:futures-util",
]
client-pool = [
"client",
"dep:futures-util",
"dep:tower-layer",
]
client-proxy = [
"client",
"dep:base64",
"dep:ipnet",
"dep:percent-encoding",
]
client-proxy-system = [
"dep:system-configuration",
"dep:windows-registry",
]
default = []
full = [
"client",
"client-legacy",
"client-pool",
"client-proxy",
"client-proxy-system",
"server",
"server-auto",
"server-graceful",
"service",
"http1",
"http2",
"tokio",
"tracing",
]
http1 = ["hyper/http1"]
http2 = ["hyper/http2"]
server = ["hyper/server"]
server-auto = [
"server",
"http1",
"http2",
]
server-graceful = [
"server",
"tokio/sync",
]
service = ["dep:tower-service"]
tokio = [
"dep:tokio",
"tokio/rt",
"tokio/time",
]
tracing = ["dep:tracing"]
[lib]
name = "hyper_util"
path = "src/lib.rs"
[[example]]
name = "client"
path = "examples/client.rs"
required-features = [
"client-legacy",
"http1",
"tokio",
]
[[example]]
name = "server"
path = "examples/server.rs"
required-features = [
"server",
"http1",
"tokio",
]
[[example]]
name = "server_graceful"
path = "examples/server_graceful.rs"
required-features = [
"tokio",
"server-graceful",
"server-auto",
]
[[test]]
name = "legacy_client"
path = "tests/legacy_client.rs"
[[test]]
name = "proxy"
path = "tests/proxy.rs"
[dependencies.base64]
version = "0.22"
optional = true
[dependencies.bytes]
version = "1.7.1"
[dependencies.futures-channel]
version = "0.3"
optional = true
[dependencies.futures-util]
version = "0.3.16"
optional = true
default-features = false
[dependencies.http]
version = "1.0"
[dependencies.http-body]
version = "1.0.0"
[dependencies.hyper]
version = "1.8.0"
[dependencies.ipnet]
version = "2.9"
optional = true
[dependencies.libc]
version = "0.2"
optional = true
[dependencies.percent-encoding]
version = "2.3"
optional = true
[dependencies.pin-project-lite]
version = "0.2.4"
[dependencies.socket2]
version = ">=0.5.9, <0.7"
features = ["all"]
optional = true
[dependencies.tokio]
version = "1"
optional = true
default-features = false
[dependencies.tower-layer]
version = "0.3"
optional = true
[dependencies.tower-service]
version = "0.3"
optional = true
[dependencies.tracing]
version = "0.1"
features = ["std"]
optional = true
default-features = false
[dev-dependencies.bytes]
version = "1"
[dev-dependencies.futures-util]
version = "0.3.16"
features = ["alloc"]
default-features = false
[dev-dependencies.http-body-util]
version = "0.1.0"
[dev-dependencies.hyper]
version = "1.4.0"
features = ["full"]
[dev-dependencies.pretty_env_logger]
version = "0.5"
[dev-dependencies.tokio]
version = "1"
features = [
"macros",
"test-util",
"signal",
]
[dev-dependencies.tokio-test]
version = "0.4"
[dev-dependencies.tower-test]
version = "0.4"
[target.'cfg(any(target_os = "linux", target_os = "macos"))'.dev-dependencies.pnet_datalink]
version = "0.35.0"
[target.'cfg(target_os = "macos")'.dependencies.system-configuration]
version = "0.7"
optional = true
[target."cfg(windows)".dependencies.windows-registry]
version = ">=0.3, <0.7"
optional = true

19
vendor/hyper-util/LICENSE vendored Normal file
View File

@@ -0,0 +1,19 @@
Copyright (c) 2023-2025 Sean McArthur
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

11
vendor/hyper-util/README.md vendored Normal file
View File

@@ -0,0 +1,11 @@
# hyper-util
[![crates.io](https://img.shields.io/crates/v/hyper-util.svg)](https://crates.io/crates/hyper-util)
[![Released API docs](https://docs.rs/hyper-util/badge.svg)](https://docs.rs/hyper-util)
[![MIT licensed](https://img.shields.io/badge/license-MIT-blue.svg)](./LICENSE)
A collection of utilities to do common things with [hyper](https://hyper.rs).
## License
This project is licensed under the [MIT license](./LICENSE).

37
vendor/hyper-util/examples/client.rs vendored Normal file
View File

@@ -0,0 +1,37 @@
use std::env;
use http_body_util::Empty;
use hyper::Request;
use hyper_util::client::legacy::{connect::HttpConnector, Client};
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let url = match env::args().nth(1) {
Some(url) => url,
None => {
eprintln!("Usage: client <url>");
return Ok(());
}
};
// HTTPS requires picking a TLS implementation, so give a better
// warning if the user tries to request an 'https' URL.
let url = url.parse::<hyper::Uri>()?;
if url.scheme_str() != Some("http") {
eprintln!("This example only works with 'http' URLs.");
return Ok(());
}
let client = Client::builder(hyper_util::rt::TokioExecutor::new()).build(HttpConnector::new());
let req = Request::builder()
.uri(url)
.body(Empty::<bytes::Bytes>::new())?;
let resp = client.request(req).await?;
eprintln!("{:?} {:?}", resp.version(), resp.status());
eprintln!("{:#?}", resp.headers());
Ok(())
}

75
vendor/hyper-util/examples/server.rs vendored Normal file
View File

@@ -0,0 +1,75 @@
//! This example runs a server that responds to any request with "Hello, world!"
use std::{convert::Infallible, error::Error};
use bytes::Bytes;
use http::{header::CONTENT_TYPE, Request, Response};
use http_body_util::{combinators::BoxBody, BodyExt, Full};
use hyper::{body::Incoming, service::service_fn};
use hyper_util::{
rt::{TokioExecutor, TokioIo},
server::conn::auto::Builder,
};
use tokio::{net::TcpListener, task::JoinSet};
/// Function from an incoming request to an outgoing response
///
/// This function gets turned into a [`hyper::service::Service`] later via
/// [`service_fn`]. Instead of doing this, you could also write a type that
/// implements [`hyper::service::Service`] directly and pass that in place of
/// writing a function like this and calling [`service_fn`].
///
/// This function could use [`Full`] as the body type directly since that's
/// the only type that can be returned in this case, but this uses [`BoxBody`]
/// anyway for demonstration purposes, since this is what's usually used when
/// writing a more complex webserver library.
async fn handle_request(
_request: Request<Incoming>,
) -> Result<Response<BoxBody<Bytes, Infallible>>, Infallible> {
let response = Response::builder()
.header(CONTENT_TYPE, "text/plain")
.body(Full::new(Bytes::from("Hello, world!\n")).boxed())
.expect("values provided to the builder should be valid");
Ok(response)
}
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn Error + Send + Sync + 'static>> {
let listen_addr = "127.0.0.1:8000";
let tcp_listener = TcpListener::bind(listen_addr).await?;
println!("listening on http://{listen_addr}");
let mut join_set = JoinSet::new();
loop {
let (stream, addr) = match tcp_listener.accept().await {
Ok(x) => x,
Err(e) => {
eprintln!("failed to accept connection: {e}");
continue;
}
};
let serve_connection = async move {
println!("handling a request from {addr}");
let result = Builder::new(TokioExecutor::new())
.serve_connection(TokioIo::new(stream), service_fn(handle_request))
.await;
if let Err(e) = result {
eprintln!("error serving {addr}: {e}");
}
println!("handled a request from {addr}");
};
join_set.spawn(serve_connection);
}
// If you add a method for breaking the above loop (i.e. graceful shutdown),
// then you may also want to wait for all existing connections to finish
// being served before terminating the program, which can be done like this:
//
// while let Some(_) = join_set.join_next().await {}
}

View File

@@ -0,0 +1,64 @@
use bytes::Bytes;
use std::convert::Infallible;
use std::pin::pin;
use std::time::Duration;
use tokio::net::TcpListener;
#[tokio::main(flavor = "current_thread")]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let listener = TcpListener::bind("127.0.0.1:8080").await?;
let server = hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new());
let graceful = hyper_util::server::graceful::GracefulShutdown::new();
let mut ctrl_c = pin!(tokio::signal::ctrl_c());
loop {
tokio::select! {
conn = listener.accept() => {
let (stream, peer_addr) = match conn {
Ok(conn) => conn,
Err(e) => {
eprintln!("accept error: {}", e);
tokio::time::sleep(Duration::from_secs(1)).await;
continue;
}
};
eprintln!("incomming connection accepted: {}", peer_addr);
let stream = hyper_util::rt::TokioIo::new(Box::pin(stream));
let conn = server.serve_connection_with_upgrades(stream, hyper::service::service_fn(|_| async move {
tokio::time::sleep(Duration::from_secs(5)).await; // emulate slow request
let body = http_body_util::Full::<Bytes>::from("Hello World!".to_owned());
Ok::<_, Infallible>(http::Response::new(body))
}));
let conn = graceful.watch(conn.into_owned());
tokio::spawn(async move {
if let Err(err) = conn.await {
eprintln!("connection error: {}", err);
}
eprintln!("connection dropped: {}", peer_addr);
});
},
_ = ctrl_c.as_mut() => {
drop(listener);
eprintln!("Ctrl-C received, starting shutdown");
break;
}
}
}
tokio::select! {
_ = graceful.shutdown() => {
eprintln!("Gracefully shutdown!");
},
_ = tokio::time::sleep(Duration::from_secs(10)) => {
eprintln!("Waited 10 seconds for graceful shutdown, aborting...");
}
}
Ok(())
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,187 @@
use std::{ops::Deref, sync::Arc};
use http::Request;
use tokio::sync::watch;
use super::Connected;
/// [`CaptureConnection`] allows callers to capture [`Connected`] information
///
/// To capture a connection for a request, use [`capture_connection`].
#[derive(Debug, Clone)]
pub struct CaptureConnection {
rx: watch::Receiver<Option<Connected>>,
}
/// Capture the connection for a given request
///
/// When making a request with Hyper, the underlying connection must implement the [`Connection`] trait.
/// [`capture_connection`] allows a caller to capture the returned [`Connected`] structure as soon
/// as the connection is established.
///
/// [`Connection`]: crate::client::legacy::connect::Connection
///
/// *Note*: If establishing a connection fails, [`CaptureConnection::connection_metadata`] will always return none.
///
/// # Examples
///
/// **Synchronous access**:
/// The [`CaptureConnection::connection_metadata`] method allows callers to check if a connection has been
/// established. This is ideal for situations where you are certain the connection has already
/// been established (e.g. after the response future has already completed).
/// ```rust
/// use hyper_util::client::legacy::connect::capture_connection;
/// let mut request = http::Request::builder()
/// .uri("http://foo.com")
/// .body(())
/// .unwrap();
///
/// let captured_connection = capture_connection(&mut request);
/// // some time later after the request has been sent...
/// let connection_info = captured_connection.connection_metadata();
/// println!("we are connected! {:?}", connection_info.as_ref());
/// ```
///
/// **Asynchronous access**:
/// The [`CaptureConnection::wait_for_connection_metadata`] method returns a future resolves as soon as the
/// connection is available.
///
/// ```rust
/// # #[cfg(feature = "tokio")]
/// # async fn example() {
/// use hyper_util::client::legacy::connect::capture_connection;
/// use hyper_util::client::legacy::Client;
/// use hyper_util::rt::TokioExecutor;
/// use bytes::Bytes;
/// use http_body_util::Empty;
/// let mut request = http::Request::builder()
/// .uri("http://foo.com")
/// .body(Empty::<Bytes>::new())
/// .unwrap();
///
/// let mut captured = capture_connection(&mut request);
/// tokio::task::spawn(async move {
/// let connection_info = captured.wait_for_connection_metadata().await;
/// println!("we are connected! {:?}", connection_info.as_ref());
/// });
///
/// let client = Client::builder(TokioExecutor::new()).build_http();
/// client.request(request).await.expect("request failed");
/// # }
/// ```
pub fn capture_connection<B>(request: &mut Request<B>) -> CaptureConnection {
let (tx, rx) = CaptureConnection::new();
request.extensions_mut().insert(tx);
rx
}
/// TxSide for [`CaptureConnection`]
///
/// This is inserted into `Extensions` to allow Hyper to back channel connection info
#[derive(Clone)]
pub(crate) struct CaptureConnectionExtension {
tx: Arc<watch::Sender<Option<Connected>>>,
}
impl CaptureConnectionExtension {
pub(crate) fn set(&self, connected: &Connected) {
self.tx.send_replace(Some(connected.clone()));
}
}
impl CaptureConnection {
/// Internal API to create the tx and rx half of [`CaptureConnection`]
pub(crate) fn new() -> (CaptureConnectionExtension, Self) {
let (tx, rx) = watch::channel(None);
(
CaptureConnectionExtension { tx: Arc::new(tx) },
CaptureConnection { rx },
)
}
/// Retrieve the connection metadata, if available
pub fn connection_metadata(&self) -> impl Deref<Target = Option<Connected>> + '_ {
self.rx.borrow()
}
/// Wait for the connection to be established
///
/// If a connection was established, this will always return `Some(...)`. If the request never
/// successfully connected (e.g. DNS resolution failure), this method will never return.
pub async fn wait_for_connection_metadata(
&mut self,
) -> impl Deref<Target = Option<Connected>> + '_ {
if self.rx.borrow().is_some() {
return self.rx.borrow();
}
let _ = self.rx.changed().await;
self.rx.borrow()
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_sync_capture_connection() {
let (tx, rx) = CaptureConnection::new();
assert!(
rx.connection_metadata().is_none(),
"connection has not been set"
);
tx.set(&Connected::new().proxy(true));
assert!(rx
.connection_metadata()
.as_ref()
.expect("connected should be set")
.is_proxied());
// ensure it can be called multiple times
assert!(rx
.connection_metadata()
.as_ref()
.expect("connected should be set")
.is_proxied());
}
#[tokio::test]
async fn async_capture_connection() {
let (tx, mut rx) = CaptureConnection::new();
assert!(
rx.connection_metadata().is_none(),
"connection has not been set"
);
let test_task = tokio::spawn(async move {
assert!(rx
.wait_for_connection_metadata()
.await
.as_ref()
.expect("connection should be set")
.is_proxied());
// can be awaited multiple times
assert!(
rx.wait_for_connection_metadata().await.is_some(),
"should be awaitable multiple times"
);
assert!(rx.connection_metadata().is_some());
});
// can't be finished, we haven't set the connection yet
assert!(!test_task.is_finished());
tx.set(&Connected::new().proxy(true));
assert!(test_task.await.is_ok());
}
#[tokio::test]
async fn capture_connection_sender_side_dropped() {
let (tx, mut rx) = CaptureConnection::new();
assert!(
rx.connection_metadata().is_none(),
"connection has not been set"
);
drop(tx);
assert!(rx.wait_for_connection_metadata().await.is_none());
}
}

View File

@@ -0,0 +1,360 @@
//! DNS Resolution used by the `HttpConnector`.
//!
//! This module contains:
//!
//! - A [`GaiResolver`] that is the default resolver for the `HttpConnector`.
//! - The `Name` type used as an argument to custom resolvers.
//!
//! # Resolvers are `Service`s
//!
//! A resolver is just a
//! `Service<Name, Response = impl Iterator<Item = SocketAddr>>`.
//!
//! A simple resolver that ignores the name and always returns a specific
//! address:
//!
//! ```rust,ignore
//! use std::{convert::Infallible, iter, net::SocketAddr};
//!
//! let resolver = tower::service_fn(|_name| async {
//! Ok::<_, Infallible>(iter::once(SocketAddr::from(([127, 0, 0, 1], 8080))))
//! });
//! ```
use std::error::Error;
use std::future::Future;
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, ToSocketAddrs};
use std::pin::Pin;
use std::str::FromStr;
use std::task::{self, Poll};
use std::{fmt, io, vec};
use tokio::task::JoinHandle;
use tower_service::Service;
pub(super) use self::sealed::Resolve;
/// A domain name to resolve into IP addresses.
#[derive(Clone, Hash, Eq, PartialEq)]
pub struct Name {
host: Box<str>,
}
/// A resolver using blocking `getaddrinfo` calls in a threadpool.
#[derive(Clone)]
pub struct GaiResolver {
_priv: (),
}
/// An iterator of IP addresses returned from `getaddrinfo`.
pub struct GaiAddrs {
inner: SocketAddrs,
}
/// A future to resolve a name returned by `GaiResolver`.
pub struct GaiFuture {
inner: JoinHandle<Result<SocketAddrs, io::Error>>,
}
impl Name {
pub(super) fn new(host: Box<str>) -> Name {
Name { host }
}
/// View the hostname as a string slice.
pub fn as_str(&self) -> &str {
&self.host
}
}
impl fmt::Debug for Name {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.host, f)
}
}
impl fmt::Display for Name {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.host, f)
}
}
impl FromStr for Name {
type Err = InvalidNameError;
fn from_str(host: &str) -> Result<Self, Self::Err> {
// Possibly add validation later
Ok(Name::new(host.into()))
}
}
/// Error indicating a given string was not a valid domain name.
#[derive(Debug)]
pub struct InvalidNameError(());
impl fmt::Display for InvalidNameError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Not a valid domain name")
}
}
impl Error for InvalidNameError {}
impl GaiResolver {
/// Construct a new `GaiResolver`.
pub fn new() -> Self {
GaiResolver { _priv: () }
}
}
impl Service<Name> for GaiResolver {
type Response = GaiAddrs;
type Error = io::Error;
type Future = GaiFuture;
fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), io::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, name: Name) -> Self::Future {
let blocking = tokio::task::spawn_blocking(move || {
(&*name.host, 0)
.to_socket_addrs()
.map(|i| SocketAddrs { iter: i })
});
GaiFuture { inner: blocking }
}
}
impl fmt::Debug for GaiResolver {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("GaiResolver")
}
}
impl Future for GaiFuture {
type Output = Result<GaiAddrs, io::Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.inner).poll(cx).map(|res| match res {
Ok(Ok(addrs)) => Ok(GaiAddrs { inner: addrs }),
Ok(Err(err)) => Err(err),
Err(join_err) => {
if join_err.is_cancelled() {
Err(io::Error::new(io::ErrorKind::Interrupted, join_err))
} else {
panic!("gai background task failed: {join_err:?}")
}
}
})
}
}
impl fmt::Debug for GaiFuture {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("GaiFuture")
}
}
impl Drop for GaiFuture {
fn drop(&mut self) {
self.inner.abort();
}
}
impl Iterator for GaiAddrs {
type Item = SocketAddr;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
}
}
impl fmt::Debug for GaiAddrs {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("GaiAddrs")
}
}
pub(super) struct SocketAddrs {
iter: vec::IntoIter<SocketAddr>,
}
impl SocketAddrs {
pub(super) fn new(addrs: Vec<SocketAddr>) -> Self {
SocketAddrs {
iter: addrs.into_iter(),
}
}
pub(super) fn try_parse(host: &str, port: u16) -> Option<SocketAddrs> {
if let Ok(addr) = host.parse::<Ipv4Addr>() {
let addr = SocketAddrV4::new(addr, port);
return Some(SocketAddrs {
iter: vec![SocketAddr::V4(addr)].into_iter(),
});
}
if let Ok(addr) = host.parse::<Ipv6Addr>() {
let addr = SocketAddrV6::new(addr, port, 0, 0);
return Some(SocketAddrs {
iter: vec![SocketAddr::V6(addr)].into_iter(),
});
}
None
}
#[inline]
fn filter(self, predicate: impl FnMut(&SocketAddr) -> bool) -> SocketAddrs {
SocketAddrs::new(self.iter.filter(predicate).collect())
}
pub(super) fn split_by_preference(
self,
local_addr_ipv4: Option<Ipv4Addr>,
local_addr_ipv6: Option<Ipv6Addr>,
) -> (SocketAddrs, SocketAddrs) {
match (local_addr_ipv4, local_addr_ipv6) {
(Some(_), None) => (self.filter(SocketAddr::is_ipv4), SocketAddrs::new(vec![])),
(None, Some(_)) => (self.filter(SocketAddr::is_ipv6), SocketAddrs::new(vec![])),
_ => {
let preferring_v6 = self
.iter
.as_slice()
.first()
.map(SocketAddr::is_ipv6)
.unwrap_or(false);
let (preferred, fallback) = self
.iter
.partition::<Vec<_>, _>(|addr| addr.is_ipv6() == preferring_v6);
(SocketAddrs::new(preferred), SocketAddrs::new(fallback))
}
}
}
pub(super) fn is_empty(&self) -> bool {
self.iter.as_slice().is_empty()
}
pub(super) fn len(&self) -> usize {
self.iter.as_slice().len()
}
}
impl Iterator for SocketAddrs {
type Item = SocketAddr;
#[inline]
fn next(&mut self) -> Option<SocketAddr> {
self.iter.next()
}
}
mod sealed {
use std::future::Future;
use std::task::{self, Poll};
use super::{Name, SocketAddr};
use tower_service::Service;
// "Trait alias" for `Service<Name, Response = Addrs>`
pub trait Resolve {
type Addrs: Iterator<Item = SocketAddr>;
type Error: Into<Box<dyn std::error::Error + Send + Sync>>;
type Future: Future<Output = Result<Self::Addrs, Self::Error>>;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>>;
fn resolve(&mut self, name: Name) -> Self::Future;
}
impl<S> Resolve for S
where
S: Service<Name>,
S::Response: Iterator<Item = SocketAddr>,
S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
{
type Addrs = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
Service::poll_ready(self, cx)
}
fn resolve(&mut self, name: Name) -> Self::Future {
Service::call(self, name)
}
}
}
pub(super) async fn resolve<R>(resolver: &mut R, name: Name) -> Result<R::Addrs, R::Error>
where
R: Resolve,
{
std::future::poll_fn(|cx| resolver.poll_ready(cx)).await?;
resolver.resolve(name).await
}
#[cfg(test)]
mod tests {
use super::*;
use std::net::{Ipv4Addr, Ipv6Addr};
#[test]
fn test_ip_addrs_split_by_preference() {
let ip_v4 = Ipv4Addr::new(127, 0, 0, 1);
let ip_v6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1);
let v4_addr = (ip_v4, 80).into();
let v6_addr = (ip_v6, 80).into();
let (mut preferred, mut fallback) = SocketAddrs {
iter: vec![v4_addr, v6_addr].into_iter(),
}
.split_by_preference(None, None);
assert!(preferred.next().unwrap().is_ipv4());
assert!(fallback.next().unwrap().is_ipv6());
let (mut preferred, mut fallback) = SocketAddrs {
iter: vec![v6_addr, v4_addr].into_iter(),
}
.split_by_preference(None, None);
assert!(preferred.next().unwrap().is_ipv6());
assert!(fallback.next().unwrap().is_ipv4());
let (mut preferred, mut fallback) = SocketAddrs {
iter: vec![v4_addr, v6_addr].into_iter(),
}
.split_by_preference(Some(ip_v4), Some(ip_v6));
assert!(preferred.next().unwrap().is_ipv4());
assert!(fallback.next().unwrap().is_ipv6());
let (mut preferred, mut fallback) = SocketAddrs {
iter: vec![v6_addr, v4_addr].into_iter(),
}
.split_by_preference(Some(ip_v4), Some(ip_v6));
assert!(preferred.next().unwrap().is_ipv6());
assert!(fallback.next().unwrap().is_ipv4());
let (mut preferred, fallback) = SocketAddrs {
iter: vec![v4_addr, v6_addr].into_iter(),
}
.split_by_preference(Some(ip_v4), None);
assert!(preferred.next().unwrap().is_ipv4());
assert!(fallback.is_empty());
let (mut preferred, fallback) = SocketAddrs {
iter: vec![v4_addr, v6_addr].into_iter(),
}
.split_by_preference(None, Some(ip_v6));
assert!(preferred.next().unwrap().is_ipv6());
assert!(fallback.is_empty());
}
#[test]
fn test_name_from_str() {
const DOMAIN: &str = "test.example.com";
let name = Name::from_str(DOMAIN).expect("Should be a valid domain");
assert_eq!(name.as_str(), DOMAIN);
assert_eq!(name.to_string(), DOMAIN);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,444 @@
//! Connectors used by the `Client`.
//!
//! This module contains:
//!
//! - A default [`HttpConnector`][] that does DNS resolution and establishes
//! connections over TCP.
//! - Types to build custom connectors.
//!
//! # Connectors
//!
//! A "connector" is a [`Service`][] that takes a [`Uri`][] destination, and
//! its `Response` is some type implementing [`Read`][], [`Write`][],
//! and [`Connection`][].
//!
//! ## Custom Connectors
//!
//! A simple connector that ignores the `Uri` destination and always returns
//! a TCP connection to the same address could be written like this:
//!
//! ```rust,ignore
//! let connector = tower::service_fn(|_dst| async {
//! tokio::net::TcpStream::connect("127.0.0.1:1337")
//! })
//! ```
//!
//! Or, fully written out:
//!
//! ```
//! use std::{future::Future, net::SocketAddr, pin::Pin, task::{self, Poll}};
//! use http::Uri;
//! use tokio::net::TcpStream;
//! use tower_service::Service;
//!
//! #[derive(Clone)]
//! struct LocalConnector;
//!
//! impl Service<Uri> for LocalConnector {
//! type Response = TcpStream;
//! type Error = std::io::Error;
//! // We can't "name" an `async` generated future.
//! type Future = Pin<Box<
//! dyn Future<Output = Result<Self::Response, Self::Error>> + Send
//! >>;
//!
//! fn poll_ready(&mut self, _: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
//! // This connector is always ready, but others might not be.
//! Poll::Ready(Ok(()))
//! }
//!
//! fn call(&mut self, _: Uri) -> Self::Future {
//! Box::pin(TcpStream::connect(SocketAddr::from(([127, 0, 0, 1], 1337))))
//! }
//! }
//! ```
//!
//! It's worth noting that for `TcpStream`s, the [`HttpConnector`][] is a
//! better starting place to extend from.
//!
//! [`HttpConnector`]: HttpConnector
//! [`Service`]: tower_service::Service
//! [`Uri`]: ::http::Uri
//! [`Read`]: hyper::rt::Read
//! [`Write`]: hyper::rt::Write
//! [`Connection`]: Connection
use std::{
fmt::{self, Formatter},
sync::{
atomic::{AtomicBool, Ordering},
Arc,
},
};
use ::http::Extensions;
#[cfg(feature = "tokio")]
pub use self::http::{HttpConnector, HttpInfo};
#[cfg(feature = "tokio")]
pub mod dns;
#[cfg(feature = "tokio")]
mod http;
pub mod proxy;
pub(crate) mod capture;
pub use capture::{capture_connection, CaptureConnection};
pub use self::sealed::Connect;
/// Describes a type returned by a connector.
pub trait Connection {
/// Return metadata describing the connection.
fn connected(&self) -> Connected;
}
/// Extra information about the connected transport.
///
/// This can be used to inform recipients about things like if ALPN
/// was used, or if connected to an HTTP proxy.
#[derive(Debug)]
pub struct Connected {
pub(super) alpn: Alpn,
pub(super) is_proxied: bool,
pub(super) extra: Option<Extra>,
pub(super) poisoned: PoisonPill,
}
#[derive(Clone)]
pub(crate) struct PoisonPill {
poisoned: Arc<AtomicBool>,
}
impl fmt::Debug for PoisonPill {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
// print the address of the pill—this makes debugging issues much easier
write!(
f,
"PoisonPill@{:p} {{ poisoned: {} }}",
self.poisoned,
self.poisoned.load(Ordering::Relaxed)
)
}
}
impl PoisonPill {
pub(crate) fn healthy() -> Self {
Self {
poisoned: Arc::new(AtomicBool::new(false)),
}
}
pub(crate) fn poison(&self) {
self.poisoned.store(true, Ordering::Relaxed)
}
pub(crate) fn poisoned(&self) -> bool {
self.poisoned.load(Ordering::Relaxed)
}
}
pub(super) struct Extra(Box<dyn ExtraInner>);
#[derive(Clone, Copy, Debug, PartialEq)]
pub(super) enum Alpn {
H2,
None,
}
impl Connected {
/// Create new `Connected` type with empty metadata.
pub fn new() -> Connected {
Connected {
alpn: Alpn::None,
is_proxied: false,
extra: None,
poisoned: PoisonPill::healthy(),
}
}
/// Set whether the connected transport is to an HTTP proxy.
///
/// This setting will affect if HTTP/1 requests written on the transport
/// will have the request-target in absolute-form or origin-form:
///
/// - When `proxy(false)`:
///
/// ```http
/// GET /guide HTTP/1.1
/// ```
///
/// - When `proxy(true)`:
///
/// ```http
/// GET http://hyper.rs/guide HTTP/1.1
/// ```
///
/// Default is `false`.
pub fn proxy(mut self, is_proxied: bool) -> Connected {
self.is_proxied = is_proxied;
self
}
/// Determines if the connected transport is to an HTTP proxy.
pub fn is_proxied(&self) -> bool {
self.is_proxied
}
/// Set extra connection information to be set in the extensions of every `Response`.
pub fn extra<T: Clone + Send + Sync + 'static>(mut self, extra: T) -> Connected {
if let Some(prev) = self.extra {
self.extra = Some(Extra(Box::new(ExtraChain(prev.0, extra))));
} else {
self.extra = Some(Extra(Box::new(ExtraEnvelope(extra))));
}
self
}
/// Copies the extra connection information into an `Extensions` map.
pub fn get_extras(&self, extensions: &mut Extensions) {
if let Some(extra) = &self.extra {
extra.set(extensions);
}
}
/// Set that the connected transport negotiated HTTP/2 as its next protocol.
pub fn negotiated_h2(mut self) -> Connected {
self.alpn = Alpn::H2;
self
}
/// Determines if the connected transport negotiated HTTP/2 as its next protocol.
pub fn is_negotiated_h2(&self) -> bool {
self.alpn == Alpn::H2
}
/// Poison this connection
///
/// A poisoned connection will not be reused for subsequent requests by the pool
pub fn poison(&self) {
self.poisoned.poison();
tracing::debug!(
poison_pill = ?self.poisoned, "connection was poisoned. this connection will not be reused for subsequent requests"
);
}
// Don't public expose that `Connected` is `Clone`, unsure if we want to
// keep that contract...
pub(super) fn clone(&self) -> Connected {
Connected {
alpn: self.alpn,
is_proxied: self.is_proxied,
extra: self.extra.clone(),
poisoned: self.poisoned.clone(),
}
}
}
// ===== impl Extra =====
impl Extra {
pub(super) fn set(&self, res: &mut Extensions) {
self.0.set(res);
}
}
impl Clone for Extra {
fn clone(&self) -> Extra {
Extra(self.0.clone_box())
}
}
impl fmt::Debug for Extra {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Extra").finish()
}
}
trait ExtraInner: Send + Sync {
fn clone_box(&self) -> Box<dyn ExtraInner>;
fn set(&self, res: &mut Extensions);
}
// This indirection allows the `Connected` to have a type-erased "extra" value,
// while that type still knows its inner extra type. This allows the correct
// TypeId to be used when inserting into `res.extensions_mut()`.
#[derive(Clone)]
struct ExtraEnvelope<T>(T);
impl<T> ExtraInner for ExtraEnvelope<T>
where
T: Clone + Send + Sync + 'static,
{
fn clone_box(&self) -> Box<dyn ExtraInner> {
Box::new(self.clone())
}
fn set(&self, res: &mut Extensions) {
res.insert(self.0.clone());
}
}
struct ExtraChain<T>(Box<dyn ExtraInner>, T);
impl<T: Clone> Clone for ExtraChain<T> {
fn clone(&self) -> Self {
ExtraChain(self.0.clone_box(), self.1.clone())
}
}
impl<T> ExtraInner for ExtraChain<T>
where
T: Clone + Send + Sync + 'static,
{
fn clone_box(&self) -> Box<dyn ExtraInner> {
Box::new(self.clone())
}
fn set(&self, res: &mut Extensions) {
self.0.set(res);
res.insert(self.1.clone());
}
}
pub(super) mod sealed {
use std::error::Error as StdError;
use std::future::Future;
use ::http::Uri;
use hyper::rt::{Read, Write};
use super::Connection;
/// Connect to a destination, returning an IO transport.
///
/// A connector receives a [`Uri`](::http::Uri) and returns a `Future` of the
/// ready connection.
///
/// # Trait Alias
///
/// This is really just an *alias* for the `tower::Service` trait, with
/// additional bounds set for convenience *inside* hyper. You don't actually
/// implement this trait, but `tower::Service<Uri>` instead.
// The `Sized` bound is to prevent creating `dyn Connect`, since they cannot
// fit the `Connect` bounds because of the blanket impl for `Service`.
pub trait Connect: Sealed + Sized {
#[doc(hidden)]
type _Svc: ConnectSvc;
#[doc(hidden)]
fn connect(self, internal_only: Internal, dst: Uri) -> <Self::_Svc as ConnectSvc>::Future;
}
pub trait ConnectSvc {
type Connection: Read + Write + Connection + Unpin + Send + 'static;
type Error: Into<Box<dyn StdError + Send + Sync>>;
type Future: Future<Output = Result<Self::Connection, Self::Error>> + Unpin + Send + 'static;
fn connect(self, internal_only: Internal, dst: Uri) -> Self::Future;
}
impl<S, T> Connect for S
where
S: tower_service::Service<Uri, Response = T> + Send + 'static,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
S::Future: Unpin + Send,
T: Read + Write + Connection + Unpin + Send + 'static,
{
type _Svc = S;
fn connect(self, _: Internal, dst: Uri) -> crate::service::Oneshot<S, Uri> {
crate::service::Oneshot::new(self, dst)
}
}
impl<S, T> ConnectSvc for S
where
S: tower_service::Service<Uri, Response = T> + Send + 'static,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
S::Future: Unpin + Send,
T: Read + Write + Connection + Unpin + Send + 'static,
{
type Connection = T;
type Error = S::Error;
type Future = crate::service::Oneshot<S, Uri>;
fn connect(self, _: Internal, dst: Uri) -> Self::Future {
crate::service::Oneshot::new(self, dst)
}
}
impl<S, T> Sealed for S
where
S: tower_service::Service<Uri, Response = T> + Send,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
S::Future: Unpin + Send,
T: Read + Write + Connection + Unpin + Send + 'static,
{
}
pub trait Sealed {}
#[allow(missing_debug_implementations)]
pub struct Internal;
}
#[cfg(test)]
mod tests {
use super::Connected;
#[derive(Clone, Debug, PartialEq)]
struct Ex1(usize);
#[derive(Clone, Debug, PartialEq)]
struct Ex2(&'static str);
#[derive(Clone, Debug, PartialEq)]
struct Ex3(&'static str);
#[test]
fn test_connected_extra() {
let c1 = Connected::new().extra(Ex1(41));
let mut ex = ::http::Extensions::new();
assert_eq!(ex.get::<Ex1>(), None);
c1.extra.as_ref().expect("c1 extra").set(&mut ex);
assert_eq!(ex.get::<Ex1>(), Some(&Ex1(41)));
}
#[test]
fn test_connected_extra_chain() {
// If a user composes connectors and at each stage, there's "extra"
// info to attach, it shouldn't override the previous extras.
let c1 = Connected::new()
.extra(Ex1(45))
.extra(Ex2("zoom"))
.extra(Ex3("pew pew"));
let mut ex1 = ::http::Extensions::new();
assert_eq!(ex1.get::<Ex1>(), None);
assert_eq!(ex1.get::<Ex2>(), None);
assert_eq!(ex1.get::<Ex3>(), None);
c1.extra.as_ref().expect("c1 extra").set(&mut ex1);
assert_eq!(ex1.get::<Ex1>(), Some(&Ex1(45)));
assert_eq!(ex1.get::<Ex2>(), Some(&Ex2("zoom")));
assert_eq!(ex1.get::<Ex3>(), Some(&Ex3("pew pew")));
// Just like extensions, inserting the same type overrides previous type.
let c2 = Connected::new()
.extra(Ex1(33))
.extra(Ex2("hiccup"))
.extra(Ex1(99));
let mut ex2 = ::http::Extensions::new();
c2.extra.as_ref().expect("c2 extra").set(&mut ex2);
assert_eq!(ex2.get::<Ex1>(), Some(&Ex1(99)));
assert_eq!(ex2.get::<Ex2>(), Some(&Ex2("hiccup")));
}
}

View File

@@ -0,0 +1,6 @@
//! Proxy helpers
mod socks;
mod tunnel;
pub use self::socks::{SocksV4, SocksV5};
pub use self::tunnel::Tunnel;

View File

@@ -0,0 +1,154 @@
mod v5;
pub use v5::{SocksV5, SocksV5Error};
mod v4;
pub use v4::{SocksV4, SocksV4Error};
use pin_project_lite::pin_project;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use bytes::BytesMut;
use hyper::rt::Read;
#[derive(Debug)]
pub enum SocksError<C> {
Inner(C),
Io(std::io::Error),
DnsFailure,
MissingHost,
MissingPort,
V4(SocksV4Error),
V5(SocksV5Error),
Parsing(ParsingError),
Serialize(SerializeError),
}
#[derive(Debug)]
pub enum ParsingError {
Incomplete,
WouldOverflow,
Other,
}
#[derive(Debug)]
pub enum SerializeError {
WouldOverflow,
}
async fn read_message<T, M, C>(mut conn: &mut T, buf: &mut BytesMut) -> Result<M, SocksError<C>>
where
T: Read + Unpin,
M: for<'a> TryFrom<&'a mut BytesMut, Error = ParsingError>,
{
let mut tmp = [0; 513];
loop {
let n = crate::rt::read(&mut conn, &mut tmp).await?;
buf.extend_from_slice(&tmp[..n]);
match M::try_from(buf) {
Err(ParsingError::Incomplete) => {
if n == 0 {
if buf.spare_capacity_mut().is_empty() {
return Err(SocksError::Parsing(ParsingError::WouldOverflow));
} else {
return Err(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
"unexpected eof",
)
.into());
}
}
}
Err(err) => return Err(err.into()),
Ok(res) => return Ok(res),
}
}
}
impl<C> std::fmt::Display for SocksError<C> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("SOCKS error: ")?;
match self {
Self::Inner(_) => f.write_str("failed to create underlying connection"),
Self::Io(_) => f.write_str("io error during SOCKS handshake"),
Self::DnsFailure => f.write_str("could not resolve to acceptable address type"),
Self::MissingHost => f.write_str("missing destination host"),
Self::MissingPort => f.write_str("missing destination port"),
Self::Parsing(_) => f.write_str("failed parsing server response"),
Self::Serialize(_) => f.write_str("failed serialize request"),
Self::V4(e) => e.fmt(f),
Self::V5(e) => e.fmt(f),
}
}
}
impl<C: std::fmt::Debug + std::fmt::Display> std::error::Error for SocksError<C> {}
impl<C> From<std::io::Error> for SocksError<C> {
fn from(err: std::io::Error) -> Self {
Self::Io(err)
}
}
impl<C> From<ParsingError> for SocksError<C> {
fn from(err: ParsingError) -> Self {
Self::Parsing(err)
}
}
impl<C> From<SerializeError> for SocksError<C> {
fn from(err: SerializeError) -> Self {
Self::Serialize(err)
}
}
impl<C> From<SocksV4Error> for SocksError<C> {
fn from(err: SocksV4Error) -> Self {
Self::V4(err)
}
}
impl<C> From<SocksV5Error> for SocksError<C> {
fn from(err: SocksV5Error) -> Self {
Self::V5(err)
}
}
pin_project! {
// Not publicly exported (so missing_docs doesn't trigger).
//
// We return this `Future` instead of the `Pin<Box<dyn Future>>` directly
// so that users don't rely on it fitting in a `Pin<Box<dyn Future>>` slot
// (and thus we can change the type in the future).
#[must_use = "futures do nothing unless polled"]
#[allow(missing_debug_implementations)]
pub struct Handshaking<F, T, E> {
#[pin]
fut: BoxHandshaking<T, E>,
_marker: std::marker::PhantomData<F>
}
}
type BoxHandshaking<T, E> = Pin<Box<dyn Future<Output = Result<T, SocksError<E>>> + Send>>;
impl<F, T, E> Future for Handshaking<F, T, E>
where
F: Future<Output = Result<T, E>>,
{
type Output = Result<T, SocksError<E>>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.project().fut.poll(cx)
}
}

View File

@@ -0,0 +1,22 @@
use super::Status;
#[derive(Debug)]
pub enum SocksV4Error {
IpV6,
Command(Status),
}
impl From<Status> for SocksV4Error {
fn from(err: Status) -> Self {
Self::Command(err)
}
}
impl std::fmt::Display for SocksV4Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::IpV6 => f.write_str("IPV6 is not supported"),
Self::Command(status) => status.fmt(f),
}
}
}

View File

@@ -0,0 +1,131 @@
use super::super::{ParsingError, SerializeError};
use bytes::{Buf, BufMut, BytesMut};
use std::net::SocketAddrV4;
/// +-----+-----+----+----+----+----+----+----+-------------+------+------------+------+
/// | VN | CD | DSTPORT | DSTIP | USERID | NULL | DOMAIN | NULL |
/// +-----+-----+----+----+----+----+----+----+-------------+------+------------+------+
/// | 1 | 1 | 2 | 4 | Variable | 1 | Variable | 1 |
/// +-----+-----+----+----+----+----+----+----+-------------+------+------------+------+
/// ^^^^^^^^^^^^^^^^^^^^^
/// optional: only do IP is 0.0.0.X
#[derive(Debug)]
pub struct Request<'a>(pub &'a Address);
/// +-----+-----+----+----+----+----+----+----+
/// | VN | CD | DSTPORT | DSTIP |
/// +-----+-----+----+----+----+----+----+----+
/// | 1 | 1 | 2 | 4 |
/// +-----+-----+----+----+----+----+----+----+
/// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/// ignore: only for SOCKSv4 BIND
#[derive(Debug)]
pub struct Response(pub Status);
#[derive(Debug)]
pub enum Address {
Socket(SocketAddrV4),
Domain(String, u16),
}
#[derive(Debug, PartialEq)]
pub enum Status {
Success = 90,
Failed = 91,
IdentFailure = 92,
IdentMismatch = 93,
}
impl Request<'_> {
pub fn write_to_buf<B: BufMut>(&self, mut buf: B) -> Result<usize, SerializeError> {
match self.0 {
Address::Socket(socket) => {
if buf.remaining_mut() < 10 {
return Err(SerializeError::WouldOverflow);
}
buf.put_u8(0x04); // Version
buf.put_u8(0x01); // CONNECT
buf.put_u16(socket.port()); // Port
buf.put_slice(&socket.ip().octets()); // IP
buf.put_u8(0x00); // USERID
buf.put_u8(0x00); // NULL
Ok(10)
}
Address::Domain(domain, port) => {
if buf.remaining_mut() < 10 + domain.len() + 1 {
return Err(SerializeError::WouldOverflow);
}
buf.put_u8(0x04); // Version
buf.put_u8(0x01); // CONNECT
buf.put_u16(*port); // IP
buf.put_slice(&[0x00, 0x00, 0x00, 0xFF]); // Invalid IP
buf.put_u8(0x00); // USERID
buf.put_u8(0x00); // NULL
buf.put_slice(domain.as_bytes()); // Domain
buf.put_u8(0x00); // NULL
Ok(10 + domain.len() + 1)
}
}
}
}
impl TryFrom<&mut BytesMut> for Response {
type Error = ParsingError;
fn try_from(buf: &mut BytesMut) -> Result<Self, Self::Error> {
if buf.remaining() < 8 {
return Err(ParsingError::Incomplete);
}
if buf.get_u8() != 0x00 {
return Err(ParsingError::Other);
}
let status = buf.get_u8().try_into()?;
let _addr = {
let port = buf.get_u16();
let mut ip = [0; 4];
buf.copy_to_slice(&mut ip);
SocketAddrV4::new(ip.into(), port)
};
Ok(Self(status))
}
}
impl TryFrom<u8> for Status {
type Error = ParsingError;
fn try_from(byte: u8) -> Result<Self, Self::Error> {
Ok(match byte {
90 => Self::Success,
91 => Self::Failed,
92 => Self::IdentFailure,
93 => Self::IdentMismatch,
_ => return Err(ParsingError::Other),
})
}
}
impl std::fmt::Display for Status {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
Self::Success => "success",
Self::Failed => "server failed to execute command",
Self::IdentFailure => "server ident service failed",
Self::IdentMismatch => "server ident service did not recognise client identifier",
})
}
}

View File

@@ -0,0 +1,144 @@
mod errors;
pub use errors::*;
mod messages;
use messages::*;
use std::net::{IpAddr, SocketAddr, SocketAddrV4, ToSocketAddrs};
use std::task::{Context, Poll};
use http::Uri;
use hyper::rt::{Read, Write};
use tower_service::Service;
use bytes::BytesMut;
use super::{Handshaking, SocksError};
/// Tunnel Proxy via SOCKSv4
///
/// This is a connector that can be used by the `legacy::Client`. It wraps
/// another connector, and after getting an underlying connection, it establishes
/// a TCP tunnel over it using SOCKSv4.
#[derive(Debug, Clone)]
pub struct SocksV4<C> {
inner: C,
config: SocksConfig,
}
#[derive(Debug, Clone)]
struct SocksConfig {
proxy: Uri,
local_dns: bool,
}
impl<C> SocksV4<C> {
/// Create a new SOCKSv4 handshake service
///
/// Wraps an underlying connector and stores the address of a tunneling
/// proxying server.
///
/// A `SocksV4` can then be called with any destination. The `dst` passed to
/// `call` will not be used to create the underlying connection, but will
/// be used in a SOCKS handshake with the proxy destination.
pub fn new(proxy_dst: Uri, connector: C) -> Self {
Self {
inner: connector,
config: SocksConfig::new(proxy_dst),
}
}
/// Resolve domain names locally on the client, rather than on the proxy server.
///
/// Disabled by default as local resolution of domain names can be detected as a
/// DNS leak.
pub fn local_dns(mut self, local_dns: bool) -> Self {
self.config.local_dns = local_dns;
self
}
}
impl SocksConfig {
pub fn new(proxy: Uri) -> Self {
Self {
proxy,
local_dns: false,
}
}
async fn execute<T, E>(self, mut conn: T, host: String, port: u16) -> Result<T, SocksError<E>>
where
T: Read + Write + Unpin,
{
let address = match host.parse::<IpAddr>() {
Ok(IpAddr::V6(_)) => return Err(SocksV4Error::IpV6.into()),
Ok(IpAddr::V4(ip)) => Address::Socket(SocketAddrV4::new(ip, port)),
Err(_) => {
if self.local_dns {
(host, port)
.to_socket_addrs()?
.find_map(|s| {
if let SocketAddr::V4(v4) = s {
Some(Address::Socket(v4))
} else {
None
}
})
.ok_or(SocksError::DnsFailure)?
} else {
Address::Domain(host, port)
}
}
};
let mut send_buf = BytesMut::with_capacity(1024);
let mut recv_buf = BytesMut::with_capacity(1024);
// Send Request
let req = Request(&address);
let n = req.write_to_buf(&mut send_buf)?;
crate::rt::write_all(&mut conn, &send_buf[..n]).await?;
// Read Response
let res: Response = super::read_message(&mut conn, &mut recv_buf).await?;
if res.0 == Status::Success {
Ok(conn)
} else {
Err(SocksV4Error::Command(res.0).into())
}
}
}
impl<C> Service<Uri> for SocksV4<C>
where
C: Service<Uri>,
C::Future: Send + 'static,
C::Response: Read + Write + Unpin + Send + 'static,
C::Error: Send + 'static,
{
type Response = C::Response;
type Error = SocksError<C::Error>;
type Future = Handshaking<C::Future, C::Response, C::Error>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx).map_err(SocksError::Inner)
}
fn call(&mut self, dst: Uri) -> Self::Future {
let config = self.config.clone();
let connecting = self.inner.call(config.proxy.clone());
let fut = async move {
let port = dst.port().map(|p| p.as_u16()).unwrap_or(443);
let host = dst.host().ok_or(SocksError::MissingHost)?.to_string();
let conn = connecting.await.map_err(SocksError::Inner)?;
config.execute(conn, host, port).await
};
Handshaking {
fut: Box::pin(fut),
_marker: Default::default(),
}
}
}

View File

@@ -0,0 +1,47 @@
use super::Status;
#[derive(Debug)]
pub enum SocksV5Error {
HostTooLong,
Auth(AuthError),
Command(Status),
}
#[derive(Debug)]
pub enum AuthError {
Unsupported,
MethodMismatch,
Failed,
}
impl From<Status> for SocksV5Error {
fn from(err: Status) -> Self {
Self::Command(err)
}
}
impl From<AuthError> for SocksV5Error {
fn from(err: AuthError) -> Self {
Self::Auth(err)
}
}
impl std::fmt::Display for SocksV5Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::HostTooLong => f.write_str("host address is more than 255 characters"),
Self::Command(e) => e.fmt(f),
Self::Auth(e) => e.fmt(f),
}
}
}
impl std::fmt::Display for AuthError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
Self::Unsupported => "server does not support user/pass authentication",
Self::MethodMismatch => "server implements authentication incorrectly",
Self::Failed => "credentials not accepted",
})
}
}

View File

@@ -0,0 +1,348 @@
use super::super::{ParsingError, SerializeError};
use bytes::{Buf, BufMut, BytesMut};
use std::net::SocketAddr;
/// +----+----------+----------+
/// |VER | NMETHODS | METHODS |
/// +----+----------+----------+
/// | 1 | 1 | 1 to 255 |
/// +----+----------+----------+
#[derive(Debug)]
pub struct NegotiationReq<'a>(pub &'a AuthMethod);
/// +----+--------+
/// |VER | METHOD |
/// +----+--------+
/// | 1 | 1 |
/// +----+--------+
#[derive(Debug)]
pub struct NegotiationRes(pub AuthMethod);
/// +----+------+----------+------+----------+
/// |VER | ULEN | UNAME | PLEN | PASSWD |
/// +----+------+----------+------+----------+
/// | 1 | 1 | 1 to 255 | 1 | 1 to 255 |
/// +----+------+----------+------+----------+
#[derive(Debug)]
pub struct AuthenticationReq<'a>(pub &'a str, pub &'a str);
/// +----+--------+
/// |VER | STATUS |
/// +----+--------+
/// | 1 | 1 |
/// +----+--------+
#[derive(Debug)]
pub struct AuthenticationRes(pub bool);
/// +----+-----+-------+------+----------+----------+
/// |VER | CMD | RSV | ATYP | DST.ADDR | DST.PORT |
/// +----+-----+-------+------+----------+----------+
/// | 1 | 1 | X'00' | 1 | Variable | 2 |
/// +----+-----+-------+------+----------+----------+
#[derive(Debug)]
pub struct ProxyReq<'a>(pub &'a Address);
/// +----+-----+-------+------+----------+----------+
/// |VER | REP | RSV | ATYP | BND.ADDR | BND.PORT |
/// +----+-----+-------+------+----------+----------+
/// | 1 | 1 | X'00' | 1 | Variable | 2 |
/// +----+-----+-------+------+----------+----------+
#[derive(Debug)]
pub struct ProxyRes(pub Status);
#[repr(u8)]
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum AuthMethod {
NoAuth = 0x00,
UserPass = 0x02,
NoneAcceptable = 0xFF,
}
#[derive(Debug)]
pub enum Address {
Socket(SocketAddr),
Domain(String, u16),
}
#[derive(Debug, Copy, Clone, PartialEq)]
pub enum Status {
Success,
GeneralServerFailure,
ConnectionNotAllowed,
NetworkUnreachable,
HostUnreachable,
ConnectionRefused,
TtlExpired,
CommandNotSupported,
AddressTypeNotSupported,
}
impl NegotiationReq<'_> {
pub fn write_to_buf(&self, buf: &mut BytesMut) -> Result<usize, SerializeError> {
if buf.capacity() - buf.len() < 3 {
return Err(SerializeError::WouldOverflow);
}
buf.put_u8(0x05); // Version
buf.put_u8(0x01); // Number of authentication methods
buf.put_u8(*self.0 as u8); // Authentication method
Ok(3)
}
}
impl TryFrom<&mut BytesMut> for NegotiationRes {
type Error = ParsingError;
fn try_from(buf: &mut BytesMut) -> Result<Self, ParsingError> {
if buf.remaining() < 2 {
return Err(ParsingError::Incomplete);
}
if buf.get_u8() != 0x05 {
return Err(ParsingError::Other);
}
let method = buf.get_u8().try_into()?;
Ok(Self(method))
}
}
impl AuthenticationReq<'_> {
pub fn write_to_buf(&self, buf: &mut BytesMut) -> Result<usize, SerializeError> {
if buf.capacity() - buf.len() < 3 + self.0.len() + self.1.len() {
return Err(SerializeError::WouldOverflow);
}
buf.put_u8(0x01); // Version
buf.put_u8(self.0.len() as u8); // Username length (guarenteed to be 255 or less)
buf.put_slice(self.0.as_bytes()); // Username
buf.put_u8(self.1.len() as u8); // Password length (guarenteed to be 255 or less)
buf.put_slice(self.1.as_bytes()); // Password
Ok(3 + self.0.len() + self.1.len())
}
}
impl TryFrom<&mut BytesMut> for AuthenticationRes {
type Error = ParsingError;
fn try_from(buf: &mut BytesMut) -> Result<Self, ParsingError> {
if buf.remaining() < 2 {
return Err(ParsingError::Incomplete);
}
if buf.get_u8() != 0x01 {
return Err(ParsingError::Other);
}
if buf.get_u8() == 0 {
Ok(Self(true))
} else {
Ok(Self(false))
}
}
}
impl ProxyReq<'_> {
pub fn write_to_buf(&self, buf: &mut BytesMut) -> Result<usize, SerializeError> {
let addr_len = match self.0 {
Address::Socket(SocketAddr::V4(_)) => 1 + 4 + 2,
Address::Socket(SocketAddr::V6(_)) => 1 + 16 + 2,
Address::Domain(ref domain, _) => 1 + 1 + domain.len() + 2,
};
if buf.capacity() - buf.len() < 3 + addr_len {
return Err(SerializeError::WouldOverflow);
}
buf.put_u8(0x05); // Version
buf.put_u8(0x01); // TCP tunneling command
buf.put_u8(0x00); // Reserved
let _ = self.0.write_to_buf(buf); // Address
Ok(3 + addr_len)
}
}
impl TryFrom<&mut BytesMut> for ProxyRes {
type Error = ParsingError;
fn try_from(buf: &mut BytesMut) -> Result<Self, ParsingError> {
if buf.remaining() < 3 {
return Err(ParsingError::Incomplete);
}
// VER
if buf.get_u8() != 0x05 {
return Err(ParsingError::Other);
}
// REP
let status = buf.get_u8().try_into()?;
// RSV
if buf.get_u8() != 0x00 {
return Err(ParsingError::Other);
}
// ATYP + ADDR
Address::try_from(buf)?;
Ok(Self(status))
}
}
impl Address {
pub fn write_to_buf(&self, buf: &mut BytesMut) -> Result<usize, SerializeError> {
match self {
Self::Socket(SocketAddr::V4(v4)) => {
if buf.capacity() - buf.len() < 1 + 4 + 2 {
return Err(SerializeError::WouldOverflow);
}
buf.put_u8(0x01);
buf.put_slice(&v4.ip().octets());
buf.put_u16(v4.port()); // Network Order/BigEndian for port
Ok(7)
}
Self::Socket(SocketAddr::V6(v6)) => {
if buf.capacity() - buf.len() < 1 + 16 + 2 {
return Err(SerializeError::WouldOverflow);
}
buf.put_u8(0x04);
buf.put_slice(&v6.ip().octets());
buf.put_u16(v6.port()); // Network Order/BigEndian for port
Ok(19)
}
Self::Domain(domain, port) => {
if buf.capacity() - buf.len() < 1 + 1 + domain.len() + 2 {
return Err(SerializeError::WouldOverflow);
}
buf.put_u8(0x03);
buf.put_u8(domain.len() as u8); // Guarenteed to be less than 255
buf.put_slice(domain.as_bytes());
buf.put_u16(*port);
Ok(4 + domain.len())
}
}
}
}
impl TryFrom<&mut BytesMut> for Address {
type Error = ParsingError;
fn try_from(buf: &mut BytesMut) -> Result<Self, Self::Error> {
if buf.remaining() < 2 {
return Err(ParsingError::Incomplete);
}
Ok(match buf.get_u8() {
// IPv4
0x01 => {
let mut ip = [0; 4];
if buf.remaining() < 6 {
return Err(ParsingError::Incomplete);
}
buf.copy_to_slice(&mut ip);
let port = buf.get_u16();
Self::Socket(SocketAddr::new(ip.into(), port))
}
// Domain
0x03 => {
let len = buf.get_u8();
if len == 0 {
return Err(ParsingError::Other);
} else if buf.remaining() < (len as usize) + 2 {
return Err(ParsingError::Incomplete);
}
let domain = std::str::from_utf8(&buf[..len as usize])
.map_err(|_| ParsingError::Other)?
.to_string();
let port = buf.get_u16();
Self::Domain(domain, port)
}
// IPv6
0x04 => {
let mut ip = [0; 16];
if buf.remaining() < 18 {
return Err(ParsingError::Incomplete);
}
buf.copy_to_slice(&mut ip);
let port = buf.get_u16();
Self::Socket(SocketAddr::new(ip.into(), port))
}
_ => return Err(ParsingError::Other),
})
}
}
impl TryFrom<u8> for Status {
type Error = ParsingError;
fn try_from(byte: u8) -> Result<Self, Self::Error> {
Ok(match byte {
0x00 => Self::Success,
0x01 => Self::GeneralServerFailure,
0x02 => Self::ConnectionNotAllowed,
0x03 => Self::NetworkUnreachable,
0x04 => Self::HostUnreachable,
0x05 => Self::ConnectionRefused,
0x06 => Self::TtlExpired,
0x07 => Self::CommandNotSupported,
0x08 => Self::AddressTypeNotSupported,
_ => return Err(ParsingError::Other),
})
}
}
impl TryFrom<u8> for AuthMethod {
type Error = ParsingError;
fn try_from(byte: u8) -> Result<Self, Self::Error> {
Ok(match byte {
0x00 => Self::NoAuth,
0x02 => Self::UserPass,
0xFF => Self::NoneAcceptable,
_ => return Err(ParsingError::Other),
})
}
}
impl std::fmt::Display for Status {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(match self {
Self::Success => "success",
Self::GeneralServerFailure => "general server failure",
Self::ConnectionNotAllowed => "connection not allowed",
Self::NetworkUnreachable => "network unreachable",
Self::HostUnreachable => "host unreachable",
Self::ConnectionRefused => "connection refused",
Self::TtlExpired => "ttl expired",
Self::CommandNotSupported => "command not supported",
Self::AddressTypeNotSupported => "address type not supported",
})
}
}

View File

@@ -0,0 +1,275 @@
mod errors;
pub use errors::*;
mod messages;
use messages::*;
use std::net::{IpAddr, SocketAddr, ToSocketAddrs};
use std::task::{Context, Poll};
use http::Uri;
use hyper::rt::{Read, Write};
use tower_service::Service;
use bytes::BytesMut;
use super::{Handshaking, SocksError};
/// Tunnel Proxy via SOCKSv5
///
/// This is a connector that can be used by the `legacy::Client`. It wraps
/// another connector, and after getting an underlying connection, it establishes
/// a TCP tunnel over it using SOCKSv5.
#[derive(Debug, Clone)]
pub struct SocksV5<C> {
inner: C,
config: SocksConfig,
}
#[derive(Debug, Clone)]
pub struct SocksConfig {
proxy: Uri,
proxy_auth: Option<(String, String)>,
local_dns: bool,
optimistic: bool,
}
#[derive(Debug)]
enum State {
SendingNegReq,
ReadingNegRes,
SendingAuthReq,
ReadingAuthRes,
SendingProxyReq,
ReadingProxyRes,
}
impl<C> SocksV5<C> {
/// Create a new SOCKSv5 handshake service.
///
/// Wraps an underlying connector and stores the address of a tunneling
/// proxying server.
///
/// A `SocksV5` can then be called with any destination. The `dst` passed to
/// `call` will not be used to create the underlying connection, but will
/// be used in a SOCKS handshake with the proxy destination.
pub fn new(proxy_dst: Uri, connector: C) -> Self {
Self {
inner: connector,
config: SocksConfig::new(proxy_dst),
}
}
/// Use User/Pass authentication method during handshake.
///
/// Username and Password must be maximum of 255 characters each.
/// 0 length strings are allowed despite RFC prohibiting it. This is done for
/// compatablity with server implementations that use empty credentials
/// to allow returning error codes during IP authentication.
pub fn with_auth(mut self, user: String, pass: String) -> Self {
self.config.proxy_auth = Some((user, pass));
self
}
/// Resolve domain names locally on the client, rather than on the proxy server.
///
/// Disabled by default as local resolution of domain names can be detected as a
/// DNS leak.
pub fn local_dns(mut self, local_dns: bool) -> Self {
self.config.local_dns = local_dns;
self
}
/// Send all messages of the handshake optmistically (without waiting for server response).
///
/// A typical SOCKS handshake with user/pass authentication takes 3 round trips Optimistic sending
/// can reduce round trip times and dramatically increase speed of handshake at the cost of
/// reduced portability; many server implementations do not support optimistic sending as it
/// is not defined in the RFC.
///
/// Recommended to ensure connector works correctly without optimistic sending before trying
/// with optimistic sending.
pub fn send_optimistically(mut self, optimistic: bool) -> Self {
self.config.optimistic = optimistic;
self
}
}
impl SocksConfig {
fn new(proxy: Uri) -> Self {
Self {
proxy,
proxy_auth: None,
local_dns: false,
optimistic: false,
}
}
async fn execute<T, E>(self, mut conn: T, host: String, port: u16) -> Result<T, SocksError<E>>
where
T: Read + Write + Unpin,
{
let address = match host.parse::<IpAddr>() {
Ok(ip) => Address::Socket(SocketAddr::new(ip, port)),
Err(_) if host.len() <= 255 => {
if self.local_dns {
let socket = (host, port)
.to_socket_addrs()?
.next()
.ok_or(SocksError::DnsFailure)?;
Address::Socket(socket)
} else {
Address::Domain(host, port)
}
}
Err(_) => return Err(SocksV5Error::HostTooLong.into()),
};
let method = if self.proxy_auth.is_some() {
AuthMethod::UserPass
} else {
AuthMethod::NoAuth
};
let mut recv_buf = BytesMut::with_capacity(513); // Max length of valid recievable message is 513 from Auth Request
let mut send_buf = BytesMut::with_capacity(262); // Max length of valid sendable message is 262 from Auth Response
let mut state = State::SendingNegReq;
loop {
match state {
State::SendingNegReq => {
let req = NegotiationReq(&method);
let start = send_buf.len();
req.write_to_buf(&mut send_buf)?;
crate::rt::write_all(&mut conn, &send_buf[start..]).await?;
if self.optimistic {
if method == AuthMethod::UserPass {
state = State::SendingAuthReq;
} else {
state = State::SendingProxyReq;
}
} else {
state = State::ReadingNegRes;
}
}
State::ReadingNegRes => {
let res: NegotiationRes = super::read_message(&mut conn, &mut recv_buf).await?;
if res.0 == AuthMethod::NoneAcceptable {
return Err(SocksV5Error::Auth(AuthError::Unsupported).into());
}
if res.0 != method {
return Err(SocksV5Error::Auth(AuthError::MethodMismatch).into());
}
if self.optimistic {
if res.0 == AuthMethod::UserPass {
state = State::ReadingAuthRes;
} else {
state = State::ReadingProxyRes;
}
} else if res.0 == AuthMethod::UserPass {
state = State::SendingAuthReq;
} else {
state = State::SendingProxyReq;
}
}
State::SendingAuthReq => {
let (user, pass) = self.proxy_auth.as_ref().unwrap();
let req = AuthenticationReq(user, pass);
let start = send_buf.len();
req.write_to_buf(&mut send_buf)?;
crate::rt::write_all(&mut conn, &send_buf[start..]).await?;
if self.optimistic {
state = State::SendingProxyReq;
} else {
state = State::ReadingAuthRes;
}
}
State::ReadingAuthRes => {
let res: AuthenticationRes =
super::read_message(&mut conn, &mut recv_buf).await?;
if !res.0 {
return Err(SocksV5Error::Auth(AuthError::Failed).into());
}
if self.optimistic {
state = State::ReadingProxyRes;
} else {
state = State::SendingProxyReq;
}
}
State::SendingProxyReq => {
let req = ProxyReq(&address);
let start = send_buf.len();
req.write_to_buf(&mut send_buf)?;
crate::rt::write_all(&mut conn, &send_buf[start..]).await?;
if self.optimistic {
state = State::ReadingNegRes;
} else {
state = State::ReadingProxyRes;
}
}
State::ReadingProxyRes => {
let res: ProxyRes = super::read_message(&mut conn, &mut recv_buf).await?;
if res.0 == Status::Success {
return Ok(conn);
} else {
return Err(SocksV5Error::Command(res.0).into());
}
}
}
}
}
}
impl<C> Service<Uri> for SocksV5<C>
where
C: Service<Uri>,
C::Future: Send + 'static,
C::Response: Read + Write + Unpin + Send + 'static,
C::Error: Send + 'static,
{
type Response = C::Response;
type Error = SocksError<C::Error>;
type Future = Handshaking<C::Future, C::Response, C::Error>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx).map_err(SocksError::Inner)
}
fn call(&mut self, dst: Uri) -> Self::Future {
let config = self.config.clone();
let connecting = self.inner.call(config.proxy.clone());
let fut = async move {
let port = dst.port().map(|p| p.as_u16()).unwrap_or(443);
let host = dst.host().ok_or(SocksError::MissingHost)?.to_string();
let conn = connecting.await.map_err(SocksError::Inner)?;
config.execute(conn, host, port).await
};
Handshaking {
fut: Box::pin(fut),
_marker: Default::default(),
}
}
}

View File

@@ -0,0 +1,257 @@
use std::error::Error as StdError;
use std::future::Future;
use std::marker::{PhantomData, Unpin};
use std::pin::Pin;
use std::task::{self, ready, Poll};
use http::{HeaderMap, HeaderValue, Uri};
use hyper::rt::{Read, Write};
use pin_project_lite::pin_project;
use tower_service::Service;
/// Tunnel Proxy via HTTP CONNECT
///
/// This is a connector that can be used by the `legacy::Client`. It wraps
/// another connector, and after getting an underlying connection, it creates
/// an HTTP CONNECT tunnel over it.
#[derive(Debug, Clone)]
pub struct Tunnel<C> {
headers: Headers,
inner: C,
proxy_dst: Uri,
}
#[derive(Clone, Debug)]
enum Headers {
Empty,
Auth(HeaderValue),
Extra(HeaderMap),
}
#[derive(Debug)]
pub enum TunnelError {
ConnectFailed(Box<dyn StdError + Send + Sync>),
Io(std::io::Error),
MissingHost,
ProxyAuthRequired,
ProxyHeadersTooLong,
TunnelUnexpectedEof,
TunnelUnsuccessful,
}
pin_project! {
// Not publicly exported (so missing_docs doesn't trigger).
//
// We return this `Future` instead of the `Pin<Box<dyn Future>>` directly
// so that users don't rely on it fitting in a `Pin<Box<dyn Future>>` slot
// (and thus we can change the type in the future).
#[must_use = "futures do nothing unless polled"]
#[allow(missing_debug_implementations)]
pub struct Tunneling<F, T> {
#[pin]
fut: BoxTunneling<T>,
_marker: PhantomData<F>,
}
}
type BoxTunneling<T> = Pin<Box<dyn Future<Output = Result<T, TunnelError>> + Send>>;
impl<C> Tunnel<C> {
/// Create a new Tunnel service.
///
/// This wraps an underlying connector, and stores the address of a
/// tunneling proxy server.
///
/// A `Tunnel` can then be called with any destination. The `dst` passed to
/// `call` will not be used to create the underlying connection, but will
/// be used in an HTTP CONNECT request sent to the proxy destination.
pub fn new(proxy_dst: Uri, connector: C) -> Self {
Self {
headers: Headers::Empty,
inner: connector,
proxy_dst,
}
}
/// Add `proxy-authorization` header value to the CONNECT request.
pub fn with_auth(mut self, mut auth: HeaderValue) -> Self {
// just in case the user forgot
auth.set_sensitive(true);
match self.headers {
Headers::Empty => {
self.headers = Headers::Auth(auth);
}
Headers::Auth(ref mut existing) => {
*existing = auth;
}
Headers::Extra(ref mut extra) => {
extra.insert(http::header::PROXY_AUTHORIZATION, auth);
}
}
self
}
/// Add extra headers to be sent with the CONNECT request.
///
/// If existing headers have been set, these will be merged.
pub fn with_headers(mut self, mut headers: HeaderMap) -> Self {
match self.headers {
Headers::Empty => {
self.headers = Headers::Extra(headers);
}
Headers::Auth(auth) => {
headers
.entry(http::header::PROXY_AUTHORIZATION)
.or_insert(auth);
self.headers = Headers::Extra(headers);
}
Headers::Extra(ref mut extra) => {
extra.extend(headers);
}
}
self
}
}
impl<C> Service<Uri> for Tunnel<C>
where
C: Service<Uri>,
C::Future: Send + 'static,
C::Response: Read + Write + Unpin + Send + 'static,
C::Error: Into<Box<dyn StdError + Send + Sync>>,
{
type Response = C::Response;
type Error = TunnelError;
type Future = Tunneling<C::Future, C::Response>;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
ready!(self.inner.poll_ready(cx)).map_err(|e| TunnelError::ConnectFailed(e.into()))?;
Poll::Ready(Ok(()))
}
fn call(&mut self, dst: Uri) -> Self::Future {
let connecting = self.inner.call(self.proxy_dst.clone());
let headers = self.headers.clone();
Tunneling {
fut: Box::pin(async move {
let conn = connecting
.await
.map_err(|e| TunnelError::ConnectFailed(e.into()))?;
tunnel(
conn,
dst.host().ok_or(TunnelError::MissingHost)?,
dst.port().map(|p| p.as_u16()).unwrap_or(443),
&headers,
)
.await
}),
_marker: PhantomData,
}
}
}
impl<F, T, E> Future for Tunneling<F, T>
where
F: Future<Output = Result<T, E>>,
{
type Output = Result<T, TunnelError>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
self.project().fut.poll(cx)
}
}
async fn tunnel<T>(mut conn: T, host: &str, port: u16, headers: &Headers) -> Result<T, TunnelError>
where
T: Read + Write + Unpin,
{
let mut buf = format!(
"\
CONNECT {host}:{port} HTTP/1.1\r\n\
Host: {host}:{port}\r\n\
"
)
.into_bytes();
match headers {
Headers::Auth(auth) => {
buf.extend_from_slice(b"Proxy-Authorization: ");
buf.extend_from_slice(auth.as_bytes());
buf.extend_from_slice(b"\r\n");
}
Headers::Extra(extra) => {
for (name, value) in extra {
buf.extend_from_slice(name.as_str().as_bytes());
buf.extend_from_slice(b": ");
buf.extend_from_slice(value.as_bytes());
buf.extend_from_slice(b"\r\n");
}
}
Headers::Empty => (),
}
// headers end
buf.extend_from_slice(b"\r\n");
crate::rt::write_all(&mut conn, &buf)
.await
.map_err(TunnelError::Io)?;
let mut buf = [0; 8192];
let mut pos = 0;
loop {
let n = crate::rt::read(&mut conn, &mut buf[pos..])
.await
.map_err(TunnelError::Io)?;
if n == 0 {
return Err(TunnelError::TunnelUnexpectedEof);
}
pos += n;
let recvd = &buf[..pos];
if recvd.starts_with(b"HTTP/1.1 200") || recvd.starts_with(b"HTTP/1.0 200") {
if recvd.ends_with(b"\r\n\r\n") {
return Ok(conn);
}
if pos == buf.len() {
return Err(TunnelError::ProxyHeadersTooLong);
}
// else read more
} else if recvd.starts_with(b"HTTP/1.1 407") {
return Err(TunnelError::ProxyAuthRequired);
} else {
return Err(TunnelError::TunnelUnsuccessful);
}
}
}
impl std::fmt::Display for TunnelError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("tunnel error: ")?;
f.write_str(match self {
TunnelError::MissingHost => "missing destination host",
TunnelError::ProxyAuthRequired => "proxy authorization required",
TunnelError::ProxyHeadersTooLong => "proxy response headers too long",
TunnelError::TunnelUnexpectedEof => "unexpected end of file",
TunnelError::TunnelUnsuccessful => "unsuccessful",
TunnelError::ConnectFailed(_) => "failed to create underlying connection",
TunnelError::Io(_) => "io error establishing tunnel",
})
}
}
impl std::error::Error for TunnelError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
TunnelError::Io(ref e) => Some(e),
TunnelError::ConnectFailed(ref e) => Some(&**e),
_ => None,
}
}
}

View File

@@ -0,0 +1,10 @@
#[cfg(any(feature = "http1", feature = "http2"))]
mod client;
#[cfg(any(feature = "http1", feature = "http2"))]
pub use client::{Builder, Client, Error, ResponseFuture};
pub mod connect;
#[doc(hidden)]
// Publicly available, but just for legacy purposes. A better pool will be
// designed.
pub mod pool;

File diff suppressed because it is too large Load Diff

11
vendor/hyper-util/src/client/mod.rs vendored Normal file
View File

@@ -0,0 +1,11 @@
//! HTTP client utilities
/// Legacy implementations of `connect` module and `Client`
#[cfg(feature = "client-legacy")]
pub mod legacy;
#[cfg(feature = "client-pool")]
pub mod pool;
#[cfg(feature = "client-proxy")]
pub mod proxy;

View File

@@ -0,0 +1,494 @@
//! A cache of services
//!
//! The cache is a single list of cached services, bundled with a `MakeService`.
//! Calling the cache returns either an existing service, or makes a new one.
//! The returned `impl Service` can be used to send requests, and when dropped,
//! it will try to be returned back to the cache.
pub use self::internal::builder;
#[cfg(docsrs)]
pub use self::internal::Builder;
#[cfg(docsrs)]
pub use self::internal::Cache;
#[cfg(docsrs)]
pub use self::internal::Cached;
// For now, nothing else in this module is nameable. We can always make things
// more public, but we can't change type shapes (generics) once things are
// public.
mod internal {
use std::fmt;
use std::future::Future;
use std::pin::Pin;
use std::sync::{Arc, Mutex, Weak};
use std::task::{self, ready, Poll};
use futures_util::future;
use tokio::sync::oneshot;
use tower_service::Service;
use super::events;
/// Start a builder to construct a `Cache` pool.
pub fn builder() -> Builder<events::Ignore> {
Builder {
events: events::Ignore,
}
}
/// A cache pool of services from the inner make service.
///
/// Created with [`builder()`].
///
/// # Unnameable
///
/// This type is normally unnameable, forbidding naming of the type within
/// code. The type is exposed in the documentation to show which methods
/// can be publicly called.
#[derive(Debug)]
pub struct Cache<M, Dst, Ev>
where
M: Service<Dst>,
{
connector: M,
shared: Arc<Mutex<Shared<M::Response>>>,
events: Ev,
}
/// A builder to configure a `Cache`.
///
/// # Unnameable
///
/// This type is normally unnameable, forbidding naming of the type within
/// code. The type is exposed in the documentation to show which methods
/// can be publicly called.
#[derive(Debug)]
pub struct Builder<Ev> {
events: Ev,
}
/// A cached service returned from a [`Cache`].
///
/// Implements `Service` by delegating to the inner service. Once dropped,
/// tries to reinsert into the `Cache`.
///
/// # Unnameable
///
/// This type is normally unnameable, forbidding naming of the type within
/// code. The type is exposed in the documentation to show which methods
/// can be publicly called.
pub struct Cached<S> {
is_closed: bool,
inner: Option<S>,
shared: Weak<Mutex<Shared<S>>>,
// todo: on_idle
}
pub enum CacheFuture<M, Dst, Ev>
where
M: Service<Dst>,
{
Racing {
shared: Arc<Mutex<Shared<M::Response>>>,
select: future::Select<oneshot::Receiver<M::Response>, M::Future>,
events: Ev,
},
Connecting {
// TODO: could be Weak even here...
shared: Arc<Mutex<Shared<M::Response>>>,
future: M::Future,
},
Cached {
svc: Option<Cached<M::Response>>,
},
}
// shouldn't be pub
#[derive(Debug)]
pub struct Shared<S> {
services: Vec<S>,
waiters: Vec<oneshot::Sender<S>>,
}
// impl Builder
impl<Ev> Builder<Ev> {
/// Provide a `Future` executor to be used by the `Cache`.
///
/// The executor is used handle some optional background tasks that
/// can improve the behavior of the cache, such as reducing connection
/// thrashing when a race is won. If not configured with an executor,
/// the default behavior is to ignore any of these optional background
/// tasks.
///
/// The executor should implmenent [`hyper::rt::Executor`].
///
/// # Example
///
/// ```rust
/// # #[cfg(feature = "tokio")]
/// # fn run() {
/// let builder = hyper_util::client::pool::cache::builder()
/// .executor(hyper_util::rt::TokioExecutor::new());
/// # }
/// ```
pub fn executor<E>(self, exec: E) -> Builder<events::WithExecutor<E>> {
Builder {
events: events::WithExecutor(exec),
}
}
/// Build a `Cache` pool around the `connector`.
pub fn build<M, Dst>(self, connector: M) -> Cache<M, Dst, Ev>
where
M: Service<Dst>,
{
Cache {
connector,
events: self.events,
shared: Arc::new(Mutex::new(Shared {
services: Vec::new(),
waiters: Vec::new(),
})),
}
}
}
// impl Cache
impl<M, Dst, Ev> Cache<M, Dst, Ev>
where
M: Service<Dst>,
{
/// Retain all cached services indicated by the predicate.
pub fn retain<F>(&mut self, predicate: F)
where
F: FnMut(&mut M::Response) -> bool,
{
self.shared.lock().unwrap().services.retain_mut(predicate);
}
/// Check whether this cache has no cached services.
pub fn is_empty(&self) -> bool {
self.shared.lock().unwrap().services.is_empty()
}
}
impl<M, Dst, Ev> Service<Dst> for Cache<M, Dst, Ev>
where
M: Service<Dst>,
M::Future: Unpin,
M::Response: Unpin,
Ev: events::Events<BackgroundConnect<M::Future, M::Response>> + Clone + Unpin,
{
type Response = Cached<M::Response>;
type Error = M::Error;
type Future = CacheFuture<M, Dst, Ev>;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
if !self.shared.lock().unwrap().services.is_empty() {
Poll::Ready(Ok(()))
} else {
self.connector.poll_ready(cx)
}
}
fn call(&mut self, target: Dst) -> Self::Future {
// 1. If already cached, easy!
let waiter = {
let mut locked = self.shared.lock().unwrap();
if let Some(found) = locked.take() {
return CacheFuture::Cached {
svc: Some(Cached::new(found, Arc::downgrade(&self.shared))),
};
}
let (tx, rx) = oneshot::channel();
locked.waiters.push(tx);
rx
};
// 2. Otherwise, we start a new connect, and also listen for
// any newly idle.
CacheFuture::Racing {
shared: self.shared.clone(),
select: future::select(waiter, self.connector.call(target)),
events: self.events.clone(),
}
}
}
impl<M, Dst, Ev> Clone for Cache<M, Dst, Ev>
where
M: Service<Dst> + Clone,
Ev: Clone,
{
fn clone(&self) -> Self {
Self {
connector: self.connector.clone(),
events: self.events.clone(),
shared: self.shared.clone(),
}
}
}
impl<M, Dst, Ev> Future for CacheFuture<M, Dst, Ev>
where
M: Service<Dst>,
M::Future: Unpin,
M::Response: Unpin,
Ev: events::Events<BackgroundConnect<M::Future, M::Response>> + Unpin,
{
type Output = Result<Cached<M::Response>, M::Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
loop {
match &mut *self.as_mut() {
CacheFuture::Racing {
shared,
select,
events,
} => {
match ready!(Pin::new(select).poll(cx)) {
future::Either::Left((Err(_pool_closed), connecting)) => {
// pool was dropped, so we'll never get it from a waiter,
// but if this future still exists, then the user still
// wants a connection. just wait for the connecting
*self = CacheFuture::Connecting {
shared: shared.clone(),
future: connecting,
};
}
future::Either::Left((Ok(pool_got), connecting)) => {
events.on_race_lost(BackgroundConnect {
future: connecting,
shared: Arc::downgrade(&shared),
});
return Poll::Ready(Ok(Cached::new(
pool_got,
Arc::downgrade(&shared),
)));
}
future::Either::Right((connected, _waiter)) => {
let inner = connected?;
return Poll::Ready(Ok(Cached::new(
inner,
Arc::downgrade(&shared),
)));
}
}
}
CacheFuture::Connecting { shared, future } => {
let inner = ready!(Pin::new(future).poll(cx))?;
return Poll::Ready(Ok(Cached::new(inner, Arc::downgrade(&shared))));
}
CacheFuture::Cached { svc } => {
return Poll::Ready(Ok(svc.take().unwrap()));
}
}
}
}
}
// impl Cached
impl<S> Cached<S> {
fn new(inner: S, shared: Weak<Mutex<Shared<S>>>) -> Self {
Cached {
is_closed: false,
inner: Some(inner),
shared,
}
}
// TODO: inner()? looks like `tower` likes `get_ref()` and `get_mut()`.
/// Get a reference to the inner service.
pub fn inner(&self) -> &S {
self.inner.as_ref().expect("inner only taken in drop")
}
/// Get a mutable reference to the inner service.
pub fn inner_mut(&mut self) -> &mut S {
self.inner.as_mut().expect("inner only taken in drop")
}
}
impl<S, Req> Service<Req> for Cached<S>
where
S: Service<Req>,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.as_mut().unwrap().poll_ready(cx).map_err(|err| {
self.is_closed = true;
err
})
}
fn call(&mut self, req: Req) -> Self::Future {
self.inner.as_mut().unwrap().call(req)
}
}
impl<S> Drop for Cached<S> {
fn drop(&mut self) {
if self.is_closed {
return;
}
if let Some(value) = self.inner.take() {
if let Some(shared) = self.shared.upgrade() {
if let Ok(mut shared) = shared.lock() {
shared.put(value);
}
}
}
}
}
impl<S: fmt::Debug> fmt::Debug for Cached<S> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Cached")
.field(self.inner.as_ref().unwrap())
.finish()
}
}
// impl Shared
impl<V> Shared<V> {
fn put(&mut self, val: V) {
let mut val = Some(val);
while let Some(tx) = self.waiters.pop() {
if !tx.is_closed() {
match tx.send(val.take().unwrap()) {
Ok(()) => break,
Err(v) => {
val = Some(v);
}
}
}
}
if let Some(val) = val {
self.services.push(val);
}
}
fn take(&mut self) -> Option<V> {
// TODO: take in a loop
self.services.pop()
}
}
pub struct BackgroundConnect<CF, S> {
future: CF,
shared: Weak<Mutex<Shared<S>>>,
}
impl<CF, S, E> Future for BackgroundConnect<CF, S>
where
CF: Future<Output = Result<S, E>> + Unpin,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
match ready!(Pin::new(&mut self.future).poll(cx)) {
Ok(svc) => {
if let Some(shared) = self.shared.upgrade() {
if let Ok(mut locked) = shared.lock() {
locked.put(svc);
}
}
Poll::Ready(())
}
Err(_e) => Poll::Ready(()),
}
}
}
}
mod events {
#[derive(Clone, Debug)]
#[non_exhaustive]
pub struct Ignore;
#[derive(Clone, Debug)]
pub struct WithExecutor<E>(pub(super) E);
pub trait Events<CF> {
fn on_race_lost(&self, fut: CF);
}
impl<CF> Events<CF> for Ignore {
fn on_race_lost(&self, _fut: CF) {}
}
impl<E, CF> Events<CF> for WithExecutor<E>
where
E: hyper::rt::Executor<CF>,
{
fn on_race_lost(&self, fut: CF) {
self.0.execute(fut);
}
}
}
#[cfg(test)]
mod tests {
use futures_util::future;
use tower_service::Service;
use tower_test::assert_request_eq;
#[tokio::test]
async fn test_makes_svc_when_empty() {
let (mock, mut handle) = tower_test::mock::pair();
let mut cache = super::builder().build(mock);
handle.allow(1);
std::future::poll_fn(|cx| cache.poll_ready(cx))
.await
.unwrap();
let f = cache.call(1);
future::join(f, async move {
assert_request_eq!(handle, 1).send_response("one");
})
.await
.0
.expect("call");
}
#[tokio::test]
async fn test_reuses_after_idle() {
let (mock, mut handle) = tower_test::mock::pair();
let mut cache = super::builder().build(mock);
// only 1 connection should ever be made
handle.allow(1);
std::future::poll_fn(|cx| cache.poll_ready(cx))
.await
.unwrap();
let f = cache.call(1);
let cached = future::join(f, async {
assert_request_eq!(handle, 1).send_response("one");
})
.await
.0
.expect("call");
drop(cached);
std::future::poll_fn(|cx| cache.poll_ready(cx))
.await
.unwrap();
let f = cache.call(1);
let cached = f.await.expect("call");
drop(cached);
}
}

226
vendor/hyper-util/src/client/pool/map.rs vendored Normal file
View File

@@ -0,0 +1,226 @@
//! Map pool utilities
//!
//! The map isn't a typical `Service`, but rather stand-alone type that can map
//! requests to a key and service factory. This is because the service is more
//! of a router, and cannot determine which inner service to check for
//! backpressure since it's not know until the request is made.
//!
//! The map implementation allows customization of extracting a key, and how to
//! construct a MakeService for that key.
//!
//! # Example
//!
//! ```rust,ignore
//! # async fn run() {
//! # use hyper_util::client::pool;
//! # let req = http::Request::new(());
//! # let some_http1_connector = || {
//! # tower::service::service_fn(|_req| async { Ok::<_, &'static str>(()) })
//! # };
//! let mut map = pool::map::Map::builder()
//! .keys(|uri| (uri.scheme().clone(), uri.authority().clone()))
//! .values(|_uri| {
//! some_http1_connector()
//! })
//! .build();
//!
//! let resp = map.service(req.uri()).call(req).await;
//! # }
//! ```
use std::collections::HashMap;
// expose the documentation
#[cfg(docsrs)]
pub use self::builder::Builder;
/// A map caching `MakeService`s per key.
///
/// Create one with the [`Map::builder()`].
pub struct Map<T, Req>
where
T: target::Target<Req>,
{
map: HashMap<T::Key, T::Service>,
targeter: T,
}
// impl Map
impl Map<builder::StartHere, builder::StartHere> {
/// Create a [`Builder`] to configure a new `Map`.
pub fn builder<Dst>() -> builder::Builder<Dst, builder::WantsKeyer, builder::WantsServiceMaker>
{
builder::Builder::new()
}
}
impl<T, Req> Map<T, Req>
where
T: target::Target<Req>,
{
fn new(targeter: T) -> Self {
Map {
map: HashMap::new(),
targeter,
}
}
}
impl<T, Req> Map<T, Req>
where
T: target::Target<Req>,
T::Key: Eq + std::hash::Hash,
{
/// Get a service after extracting the key from `req`.
pub fn service(&mut self, req: &Req) -> &mut T::Service {
let key = self.targeter.key(req);
self.map
.entry(key)
.or_insert_with(|| self.targeter.service(req))
}
/// Retains only the services specified by the predicate.
pub fn retain<F>(&mut self, predicate: F)
where
F: FnMut(&T::Key, &mut T::Service) -> bool,
{
self.map.retain(predicate);
}
/// Clears the map, removing all key-value pairs.
pub fn clear(&mut self) {
self.map.clear();
}
}
// sealed and unnameable for now
mod target {
pub trait Target<Dst> {
type Key;
type Service;
fn key(&self, dst: &Dst) -> Self::Key;
fn service(&self, dst: &Dst) -> Self::Service;
}
}
// sealed and unnameable for now
mod builder {
use std::marker::PhantomData;
/// A builder to configure a `Map`.
///
/// # Unnameable
///
/// This type is normally unnameable, forbidding naming of the type within
/// code. The type is exposed in the documentation to show which methods
/// can be publicly called.
pub struct Builder<Dst, K, S> {
_dst: PhantomData<fn(Dst)>,
keys: K,
svcs: S,
}
pub struct WantsKeyer;
pub struct WantsServiceMaker;
pub enum StartHere {}
pub struct Built<K, S> {
keys: K,
svcs: S,
}
impl<Dst> Builder<Dst, WantsKeyer, WantsServiceMaker> {
pub(super) fn new() -> Self {
Builder {
_dst: PhantomData,
keys: WantsKeyer,
svcs: WantsServiceMaker,
}
}
}
impl<Dst, S> Builder<Dst, WantsKeyer, S> {
/// Provide a closure that extracts a pool key for the destination.
pub fn keys<K, KK>(self, keyer: K) -> Builder<Dst, K, S>
where
K: Fn(&Dst) -> KK,
{
Builder {
_dst: PhantomData,
keys: keyer,
svcs: self.svcs,
}
}
}
impl<Dst, K> Builder<Dst, K, WantsServiceMaker> {
/// Provide a closure to create a new `MakeService` for the destination.
pub fn values<S, SS>(self, svcs: S) -> Builder<Dst, K, S>
where
S: Fn(&Dst) -> SS,
{
Builder {
_dst: PhantomData,
keys: self.keys,
svcs,
}
}
}
impl<Dst, K, S> Builder<Dst, K, S>
where
Built<K, S>: super::target::Target<Dst>,
<Built<K, S> as super::target::Target<Dst>>::Key: Eq + std::hash::Hash,
{
/// Build the `Map` pool.
pub fn build(self) -> super::Map<Built<K, S>, Dst> {
super::Map::new(Built {
keys: self.keys,
svcs: self.svcs,
})
}
}
impl super::target::Target<StartHere> for StartHere {
type Key = StartHere;
type Service = StartHere;
fn key(&self, _: &StartHere) -> Self::Key {
match *self {}
}
fn service(&self, _: &StartHere) -> Self::Service {
match *self {}
}
}
impl<K, KK, S, SS, Dst> super::target::Target<Dst> for Built<K, S>
where
K: Fn(&Dst) -> KK,
S: Fn(&Dst) -> SS,
KK: Eq + std::hash::Hash,
{
type Key = KK;
type Service = SS;
fn key(&self, dst: &Dst) -> Self::Key {
(self.keys)(dst)
}
fn service(&self, dst: &Dst) -> Self::Service {
(self.svcs)(dst)
}
}
}
#[cfg(test)]
mod tests {
#[test]
fn smoke() {
let mut pool = super::Map::builder().keys(|_| "a").values(|_| "b").build();
pool.service(&"hello");
}
}

View File

@@ -0,0 +1,10 @@
//! Composable pool services
//!
//! This module contains various concepts of a connection pool separated into
//! their own concerns. This allows for users to compose the layers, along with
//! any other layers, when constructing custom connection pools.
pub mod cache;
pub mod map;
pub mod negotiate;
pub mod singleton;

View File

@@ -0,0 +1,638 @@
//! Negotiate a pool of services
//!
//! The negotiate pool allows for a service that can decide between two service
//! types based on an intermediate return value. It differs from typical
//! routing since it doesn't depend on the request, but the response.
//!
//! The original use case is support ALPN upgrades to HTTP/2, with a fallback
//! to HTTP/1.
//!
//! # Example
//!
//! ```rust,ignore
//! # async fn run() -> Result<(), Box<dyn std::error::Error>> {
//! # struct Conn;
//! # impl Conn { fn negotiated_protocol(&self) -> &[u8] { b"h2" } }
//! # let some_tls_connector = tower::service::service_fn(|_| async move {
//! # Ok::<_, std::convert::Infallible>(Conn)
//! # });
//! # let http1_layer = tower::layer::layer_fn(|s| s);
//! # let http2_layer = tower::layer::layer_fn(|s| s);
//! let mut pool = hyper_util::client::pool::negotiate::builder()
//! .connect(some_tls_connector)
//! .inspect(|c| c.negotiated_protocol() == b"h2")
//! .fallback(http1_layer)
//! .upgrade(http2_layer)
//! .build();
//!
//! // connect
//! let mut svc = pool.call(http::Uri::from_static("https://hyper.rs")).await?;
//! svc.ready().await;
//!
//! // http1 or http2 is now set up
//! # let some_http_req = http::Request::new(());
//! let resp = svc.call(some_http_req).await?;
//! # Ok(())
//! # }
//! ```
pub use self::internal::builder;
#[cfg(docsrs)]
pub use self::internal::Builder;
#[cfg(docsrs)]
pub use self::internal::Negotiate;
#[cfg(docsrs)]
pub use self::internal::Negotiated;
mod internal {
use std::future::Future;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{self, ready, Poll};
use pin_project_lite::pin_project;
use tower_layer::Layer;
use tower_service::Service;
type BoxError = Box<dyn std::error::Error + Send + Sync>;
/// A negotiating pool over an inner make service.
///
/// Created with [`builder()`].
///
/// # Unnameable
///
/// This type is normally unnameable, forbidding naming of the type within
/// code. The type is exposed in the documentation to show which methods
/// can be publicly called.
#[derive(Clone)]
pub struct Negotiate<L, R> {
left: L,
right: R,
}
/// A negotiated service returned by [`Negotiate`].
///
/// # Unnameable
///
/// This type is normally unnameable, forbidding naming of the type within
/// code. The type is exposed in the documentation to show which methods
/// can be publicly called.
#[derive(Clone, Debug)]
pub enum Negotiated<L, R> {
#[doc(hidden)]
Fallback(L),
#[doc(hidden)]
Upgraded(R),
}
pin_project! {
pub struct Negotiating<Dst, L, R>
where
L: Service<Dst>,
R: Service<()>,
{
#[pin]
state: State<Dst, L::Future, R::Future>,
left: L,
right: R,
}
}
pin_project! {
#[project = StateProj]
enum State<Dst, FL, FR> {
Eager {
#[pin]
future: FR,
dst: Option<Dst>,
},
Fallback {
#[pin]
future: FL,
},
Upgrade {
#[pin]
future: FR,
}
}
}
pin_project! {
#[project = NegotiatedProj]
pub enum NegotiatedFuture<L, R> {
Fallback {
#[pin]
future: L
},
Upgraded {
#[pin]
future: R
},
}
}
/// A builder to configure a `Negotiate`.
///
/// # Unnameable
///
/// This type is normally unnameable, forbidding naming of the type within
/// code. The type is exposed in the documentation to show which methods
/// can be publicly called.
#[derive(Debug)]
pub struct Builder<C, I, L, R> {
connect: C,
inspect: I,
fallback: L,
upgrade: R,
}
#[derive(Debug)]
pub struct WantsConnect;
#[derive(Debug)]
pub struct WantsInspect;
#[derive(Debug)]
pub struct WantsFallback;
#[derive(Debug)]
pub struct WantsUpgrade;
/// Start a builder to construct a `Negotiate` pool.
pub fn builder() -> Builder<WantsConnect, WantsInspect, WantsFallback, WantsUpgrade> {
Builder {
connect: WantsConnect,
inspect: WantsInspect,
fallback: WantsFallback,
upgrade: WantsUpgrade,
}
}
impl<C, I, L, R> Builder<C, I, L, R> {
/// Provide the initial connector.
pub fn connect<CC>(self, connect: CC) -> Builder<CC, I, L, R> {
Builder {
connect,
inspect: self.inspect,
fallback: self.fallback,
upgrade: self.upgrade,
}
}
/// Provide the inspector that determines the result of the negotiation.
pub fn inspect<II>(self, inspect: II) -> Builder<C, II, L, R> {
Builder {
connect: self.connect,
inspect,
fallback: self.fallback,
upgrade: self.upgrade,
}
}
/// Provide the layer to fallback to if negotiation fails.
pub fn fallback<LL>(self, fallback: LL) -> Builder<C, I, LL, R> {
Builder {
connect: self.connect,
inspect: self.inspect,
fallback,
upgrade: self.upgrade,
}
}
/// Provide the layer to upgrade to if negotiation succeeds.
pub fn upgrade<RR>(self, upgrade: RR) -> Builder<C, I, L, RR> {
Builder {
connect: self.connect,
inspect: self.inspect,
fallback: self.fallback,
upgrade,
}
}
/// Build the `Negotiate` pool.
pub fn build<Dst>(self) -> Negotiate<L::Service, R::Service>
where
C: Service<Dst>,
C::Error: Into<BoxError>,
L: Layer<Inspector<C, C::Response, I>>,
L::Service: Service<Dst> + Clone,
<L::Service as Service<Dst>>::Error: Into<BoxError>,
R: Layer<Inspected<C::Response>>,
R::Service: Service<()> + Clone,
<R::Service as Service<()>>::Error: Into<BoxError>,
I: Fn(&C::Response) -> bool + Clone,
{
let Builder {
connect,
inspect,
fallback,
upgrade,
} = self;
let slot = Arc::new(Mutex::new(None));
let wrapped = Inspector {
svc: connect,
inspect,
slot: slot.clone(),
};
let left = fallback.layer(wrapped);
let right = upgrade.layer(Inspected { slot });
Negotiate { left, right }
}
}
impl<L, R> Negotiate<L, R> {
/// Get a mutable reference to the fallback service.
pub fn fallback_mut(&mut self) -> &mut L {
&mut self.left
}
/// Get a mutable reference to the upgrade service.
pub fn upgrade_mut(&mut self) -> &mut R {
&mut self.right
}
}
impl<L, R, Target> Service<Target> for Negotiate<L, R>
where
L: Service<Target> + Clone,
L::Error: Into<BoxError>,
R: Service<()> + Clone,
R::Error: Into<BoxError>,
{
type Response = Negotiated<L::Response, R::Response>;
type Error = BoxError;
type Future = Negotiating<Target, L, R>;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
self.left.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, dst: Target) -> Self::Future {
let left = self.left.clone();
Negotiating {
state: State::Eager {
future: self.right.call(()),
dst: Some(dst),
},
// place clone, take original that we already polled-ready.
left: std::mem::replace(&mut self.left, left),
right: self.right.clone(),
}
}
}
impl<Dst, L, R> Future for Negotiating<Dst, L, R>
where
L: Service<Dst>,
L::Error: Into<BoxError>,
R: Service<()>,
R::Error: Into<BoxError>,
{
type Output = Result<Negotiated<L::Response, R::Response>, BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
// States:
// - `Eager`: try the "right" path first; on `UseOther` sentinel, fall back to left.
// - `Fallback`: try the left path; on `UseOther` sentinel, upgrade back to right.
// - `Upgrade`: retry the right path after a fallback.
// If all fail, give up.
let mut me = self.project();
loop {
match me.state.as_mut().project() {
StateProj::Eager { future, dst } => match ready!(future.poll(cx)) {
Ok(out) => return Poll::Ready(Ok(Negotiated::Upgraded(out))),
Err(err) => {
let err = err.into();
if UseOther::is(&*err) {
let dst = dst.take().unwrap();
let f = me.left.call(dst);
me.state.set(State::Fallback { future: f });
continue;
} else {
return Poll::Ready(Err(err));
}
}
},
StateProj::Fallback { future } => match ready!(future.poll(cx)) {
Ok(out) => return Poll::Ready(Ok(Negotiated::Fallback(out))),
Err(err) => {
let err = err.into();
if UseOther::is(&*err) {
let f = me.right.call(());
me.state.set(State::Upgrade { future: f });
continue;
} else {
return Poll::Ready(Err(err));
}
}
},
StateProj::Upgrade { future } => match ready!(future.poll(cx)) {
Ok(out) => return Poll::Ready(Ok(Negotiated::Upgraded(out))),
Err(err) => return Poll::Ready(Err(err.into())),
},
}
}
}
}
impl<L, R> Negotiated<L, R> {
// Could be useful?
#[cfg(test)]
pub(super) fn is_fallback(&self) -> bool {
matches!(self, Negotiated::Fallback(_))
}
#[cfg(test)]
pub(super) fn is_upgraded(&self) -> bool {
matches!(self, Negotiated::Upgraded(_))
}
// TODO: are these the correct methods? Or .as_ref().fallback(), etc?
/// Get a reference to the fallback service if this is it.
pub fn fallback_ref(&self) -> Option<&L> {
if let Negotiated::Fallback(ref left) = self {
Some(left)
} else {
None
}
}
/// Get a mutable reference to the fallback service if this is it.
pub fn fallback_mut(&mut self) -> Option<&mut L> {
if let Negotiated::Fallback(ref mut left) = self {
Some(left)
} else {
None
}
}
/// Get a reference to the upgraded service if this is it.
pub fn upgraded_ref(&self) -> Option<&R> {
if let Negotiated::Upgraded(ref right) = self {
Some(right)
} else {
None
}
}
/// Get a mutable reference to the upgraded service if this is it.
pub fn upgraded_mut(&mut self) -> Option<&mut R> {
if let Negotiated::Upgraded(ref mut right) = self {
Some(right)
} else {
None
}
}
}
impl<L, R, Req, Res, E> Service<Req> for Negotiated<L, R>
where
L: Service<Req, Response = Res, Error = E>,
R: Service<Req, Response = Res, Error = E>,
{
type Response = Res;
type Error = E;
type Future = NegotiatedFuture<L::Future, R::Future>;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
match self {
Negotiated::Fallback(ref mut s) => s.poll_ready(cx),
Negotiated::Upgraded(ref mut s) => s.poll_ready(cx),
}
}
fn call(&mut self, req: Req) -> Self::Future {
match self {
Negotiated::Fallback(ref mut s) => NegotiatedFuture::Fallback {
future: s.call(req),
},
Negotiated::Upgraded(ref mut s) => NegotiatedFuture::Upgraded {
future: s.call(req),
},
}
}
}
impl<L, R, Out> Future for NegotiatedFuture<L, R>
where
L: Future<Output = Out>,
R: Future<Output = Out>,
{
type Output = Out;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
match self.project() {
NegotiatedProj::Fallback { future } => future.poll(cx),
NegotiatedProj::Upgraded { future } => future.poll(cx),
}
}
}
// ===== internal =====
pub struct Inspector<M, S, I> {
svc: M,
inspect: I,
slot: Arc<Mutex<Option<S>>>,
}
pin_project! {
pub struct InspectFuture<F, S, I> {
#[pin]
future: F,
inspect: I,
slot: Arc<Mutex<Option<S>>>,
}
}
impl<M: Clone, S, I: Clone> Clone for Inspector<M, S, I> {
fn clone(&self) -> Self {
Self {
svc: self.svc.clone(),
inspect: self.inspect.clone(),
slot: self.slot.clone(),
}
}
}
impl<M, S, I, Target> Service<Target> for Inspector<M, S, I>
where
M: Service<Target, Response = S>,
M::Error: Into<BoxError>,
I: Clone + Fn(&S) -> bool,
{
type Response = M::Response;
type Error = BoxError;
type Future = InspectFuture<M::Future, S, I>;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
self.svc.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, dst: Target) -> Self::Future {
InspectFuture {
future: self.svc.call(dst),
inspect: self.inspect.clone(),
slot: self.slot.clone(),
}
}
}
impl<F, I, S, E> Future for InspectFuture<F, S, I>
where
F: Future<Output = Result<S, E>>,
E: Into<BoxError>,
I: Fn(&S) -> bool,
{
type Output = Result<S, BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
let me = self.project();
let s = ready!(me.future.poll(cx)).map_err(Into::into)?;
Poll::Ready(if (me.inspect)(&s) {
*me.slot.lock().unwrap() = Some(s);
Err(UseOther.into())
} else {
Ok(s)
})
}
}
pub struct Inspected<S> {
slot: Arc<Mutex<Option<S>>>,
}
impl<S, Target> Service<Target> for Inspected<S> {
type Response = S;
type Error = BoxError;
type Future = std::future::Ready<Result<S, BoxError>>;
fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.slot.lock().unwrap().is_some() {
Poll::Ready(Ok(()))
} else {
Poll::Ready(Err(UseOther.into()))
}
}
fn call(&mut self, _dst: Target) -> Self::Future {
let s = self
.slot
.lock()
.unwrap()
.take()
.ok_or_else(|| UseOther.into());
std::future::ready(s)
}
}
impl<S> Clone for Inspected<S> {
fn clone(&self) -> Inspected<S> {
Inspected {
slot: self.slot.clone(),
}
}
}
#[derive(Debug)]
struct UseOther;
impl std::fmt::Display for UseOther {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("sentinel error; using other")
}
}
impl std::error::Error for UseOther {}
impl UseOther {
fn is(err: &(dyn std::error::Error + 'static)) -> bool {
let mut source = Some(err);
while let Some(err) = source {
if err.is::<UseOther>() {
return true;
}
source = err.source();
}
false
}
}
}
#[cfg(test)]
mod tests {
use futures_util::future;
use tower_service::Service;
use tower_test::assert_request_eq;
#[tokio::test]
async fn not_negotiated_falls_back_to_left() {
let (mock_svc, mut handle) = tower_test::mock::pair::<(), &'static str>();
let mut negotiate = super::builder()
.connect(mock_svc)
.inspect(|_: &&str| false)
.fallback(layer_fn(|s| s))
.upgrade(layer_fn(|s| s))
.build();
std::future::poll_fn(|cx| negotiate.poll_ready(cx))
.await
.unwrap();
let fut = negotiate.call(());
let nsvc = future::join(fut, async move {
assert_request_eq!(handle, ()).send_response("one");
})
.await
.0
.expect("call");
assert!(nsvc.is_fallback());
}
#[tokio::test]
async fn negotiated_uses_right() {
let (mock_svc, mut handle) = tower_test::mock::pair::<(), &'static str>();
let mut negotiate = super::builder()
.connect(mock_svc)
.inspect(|_: &&str| true)
.fallback(layer_fn(|s| s))
.upgrade(layer_fn(|s| s))
.build();
std::future::poll_fn(|cx| negotiate.poll_ready(cx))
.await
.unwrap();
let fut = negotiate.call(());
let nsvc = future::join(fut, async move {
assert_request_eq!(handle, ()).send_response("one");
})
.await
.0
.expect("call");
assert!(nsvc.is_upgraded());
}
fn layer_fn<F>(f: F) -> LayerFn<F> {
LayerFn(f)
}
#[derive(Clone)]
struct LayerFn<F>(F);
impl<F, S, Out> tower_layer::Layer<S> for LayerFn<F>
where
F: Fn(S) -> Out,
{
type Service = Out;
fn layer(&self, inner: S) -> Self::Service {
(self.0)(inner)
}
}
}

View File

@@ -0,0 +1,492 @@
//! Singleton pools
//!
//! This ensures that only one active connection is made.
//!
//! The singleton pool wraps a `MakeService<T, Req>` so that it only produces a
//! single `Service<Req>`. It bundles all concurrent calls to it, so that only
//! one connection is made. All calls to the singleton will return a clone of
//! the inner service once established.
//!
//! This fits the HTTP/2 case well.
//!
//! ## Example
//!
//! ```rust,ignore
//! let mut pool = Singleton::new(some_make_svc);
//!
//! let svc1 = pool.call(some_dst).await?;
//!
//! let svc2 = pool.call(some_dst).await?;
//! // svc1 == svc2
//! ```
use std::sync::{Arc, Mutex};
use std::task::{self, Poll};
use tokio::sync::oneshot;
use tower_service::Service;
use self::internal::{DitchGuard, SingletonError, SingletonFuture, State};
type BoxError = Box<dyn std::error::Error + Send + Sync>;
#[cfg(docsrs)]
pub use self::internal::Singled;
/// A singleton pool over an inner service.
///
/// The singleton wraps an inner service maker, bundling all calls to ensure
/// only one service is created. Once made, it returns clones of the made
/// service.
#[derive(Debug)]
pub struct Singleton<M, Dst>
where
M: Service<Dst>,
{
mk_svc: M,
state: Arc<Mutex<State<M::Response>>>,
}
impl<M, Target> Singleton<M, Target>
where
M: Service<Target>,
M::Response: Clone,
{
/// Create a new singleton pool over an inner make service.
pub fn new(mk_svc: M) -> Self {
Singleton {
mk_svc,
state: Arc::new(Mutex::new(State::Empty)),
}
}
// pub fn clear? cancel?
/// Retains the inner made service if specified by the predicate.
pub fn retain<F>(&mut self, mut predicate: F)
where
F: FnMut(&mut M::Response) -> bool,
{
let mut locked = self.state.lock().unwrap();
match *locked {
State::Empty => {}
State::Making(..) => {}
State::Made(ref mut svc) => {
if !predicate(svc) {
*locked = State::Empty;
}
}
}
}
/// Returns whether this singleton pool is empty.
///
/// If this pool has created a shared instance, or is currently in the
/// process of creating one, this returns false.
pub fn is_empty(&self) -> bool {
matches!(*self.state.lock().unwrap(), State::Empty)
}
}
impl<M, Target> Service<Target> for Singleton<M, Target>
where
M: Service<Target>,
M::Response: Clone,
M::Error: Into<BoxError>,
{
type Response = internal::Singled<M::Response>;
type Error = SingletonError;
type Future = SingletonFuture<M::Future, M::Response>;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
if let State::Empty = *self.state.lock().unwrap() {
return self
.mk_svc
.poll_ready(cx)
.map_err(|e| SingletonError(e.into()));
}
Poll::Ready(Ok(()))
}
fn call(&mut self, dst: Target) -> Self::Future {
let mut locked = self.state.lock().unwrap();
match *locked {
State::Empty => {
let fut = self.mk_svc.call(dst);
*locked = State::Making(Vec::new());
SingletonFuture::Driving {
future: fut,
singleton: DitchGuard(Arc::downgrade(&self.state)),
}
}
State::Making(ref mut waiters) => {
let (tx, rx) = oneshot::channel();
waiters.push(tx);
SingletonFuture::Waiting {
rx,
state: Arc::downgrade(&self.state),
}
}
State::Made(ref svc) => SingletonFuture::Made {
svc: Some(svc.clone()),
state: Arc::downgrade(&self.state),
},
}
}
}
impl<M, Target> Clone for Singleton<M, Target>
where
M: Service<Target> + Clone,
{
fn clone(&self) -> Self {
Self {
mk_svc: self.mk_svc.clone(),
state: self.state.clone(),
}
}
}
// Holds some "pub" items that otherwise shouldn't be public.
mod internal {
use std::future::Future;
use std::pin::Pin;
use std::sync::{Mutex, Weak};
use std::task::{self, ready, Poll};
use pin_project_lite::pin_project;
use tokio::sync::oneshot;
use tower_service::Service;
use super::BoxError;
pin_project! {
#[project = SingletonFutureProj]
pub enum SingletonFuture<F, S> {
Driving {
#[pin]
future: F,
singleton: DitchGuard<S>,
},
Waiting {
rx: oneshot::Receiver<S>,
state: Weak<Mutex<State<S>>>,
},
Made {
svc: Option<S>,
state: Weak<Mutex<State<S>>>,
},
}
}
// XXX: pub because of the enum SingletonFuture
#[derive(Debug)]
pub enum State<S> {
Empty,
Making(Vec<oneshot::Sender<S>>),
Made(S),
}
// XXX: pub because of the enum SingletonFuture
pub struct DitchGuard<S>(pub(super) Weak<Mutex<State<S>>>);
/// A cached service returned from a [`Singleton`].
///
/// Implements `Service` by delegating to the inner service. If
/// `poll_ready` returns an error, this will clear the cache in the related
/// `Singleton`.
///
/// [`Singleton`]: super::Singleton
///
/// # Unnameable
///
/// This type is normally unnameable, forbidding naming of the type within
/// code. The type is exposed in the documentation to show which methods
/// can be publicly called.
#[derive(Debug)]
pub struct Singled<S> {
inner: S,
state: Weak<Mutex<State<S>>>,
}
impl<F, S, E> Future for SingletonFuture<F, S>
where
F: Future<Output = Result<S, E>>,
E: Into<BoxError>,
S: Clone,
{
type Output = Result<Singled<S>, SingletonError>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
match self.project() {
SingletonFutureProj::Driving { future, singleton } => {
match ready!(future.poll(cx)) {
Ok(svc) => {
if let Some(state) = singleton.0.upgrade() {
let mut locked = state.lock().unwrap();
match std::mem::replace(&mut *locked, State::Made(svc.clone())) {
State::Making(waiters) => {
for tx in waiters {
let _ = tx.send(svc.clone());
}
}
State::Empty | State::Made(_) => {
// shouldn't happen!
unreachable!()
}
}
}
// take out of the DitchGuard so it doesn't treat as "ditched"
let state = std::mem::replace(&mut singleton.0, Weak::new());
Poll::Ready(Ok(Singled::new(svc, state)))
}
Err(e) => {
if let Some(state) = singleton.0.upgrade() {
let mut locked = state.lock().unwrap();
singleton.0 = Weak::new();
*locked = State::Empty;
}
Poll::Ready(Err(SingletonError(e.into())))
}
}
}
SingletonFutureProj::Waiting { rx, state } => match ready!(Pin::new(rx).poll(cx)) {
Ok(svc) => Poll::Ready(Ok(Singled::new(svc, state.clone()))),
Err(_canceled) => Poll::Ready(Err(SingletonError(Canceled.into()))),
},
SingletonFutureProj::Made { svc, state } => {
Poll::Ready(Ok(Singled::new(svc.take().unwrap(), state.clone())))
}
}
}
}
impl<S> Drop for DitchGuard<S> {
fn drop(&mut self) {
if let Some(state) = self.0.upgrade() {
if let Ok(mut locked) = state.lock() {
*locked = State::Empty;
}
}
}
}
impl<S> Singled<S> {
fn new(inner: S, state: Weak<Mutex<State<S>>>) -> Self {
Singled { inner, state }
}
}
impl<S, Req> Service<Req> for Singled<S>
where
S: Service<Req>,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&mut self, cx: &mut task::Context<'_>) -> Poll<Result<(), Self::Error>> {
// We notice if the cached service dies, and clear the singleton cache.
match self.inner.poll_ready(cx) {
Poll::Ready(Err(err)) => {
if let Some(state) = self.state.upgrade() {
*state.lock().unwrap() = State::Empty;
}
Poll::Ready(Err(err))
}
other => other,
}
}
fn call(&mut self, req: Req) -> Self::Future {
self.inner.call(req)
}
}
// An opaque error type. By not exposing the type, nor being specifically
// Box<dyn Error>, we can _change_ the type once we no longer need the Canceled
// error type. This will be possible with the refactor to baton passing.
#[derive(Debug)]
pub struct SingletonError(pub(super) BoxError);
impl std::fmt::Display for SingletonError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("singleton connection error")
}
}
impl std::error::Error for SingletonError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
Some(&*self.0)
}
}
#[derive(Debug)]
struct Canceled;
impl std::fmt::Display for Canceled {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("singleton connection canceled")
}
}
impl std::error::Error for Canceled {}
}
#[cfg(test)]
mod tests {
use std::future::Future;
use std::pin::Pin;
use std::task::Poll;
use tower_service::Service;
use super::Singleton;
#[tokio::test]
async fn first_call_drives_subsequent_wait() {
let (mock_svc, mut handle) = tower_test::mock::pair::<(), &'static str>();
let mut singleton = Singleton::new(mock_svc);
handle.allow(1);
std::future::poll_fn(|cx| singleton.poll_ready(cx))
.await
.unwrap();
// First call: should go into Driving
let fut1 = singleton.call(());
// Second call: should go into Waiting
let fut2 = singleton.call(());
// Expect exactly one request to the inner service
let ((), send_response) = handle.next_request().await.unwrap();
send_response.send_response("svc");
// Both futures should resolve to the same value
fut1.await.unwrap();
fut2.await.unwrap();
}
#[tokio::test]
async fn made_state_returns_immediately() {
let (mock_svc, mut handle) = tower_test::mock::pair::<(), &'static str>();
let mut singleton = Singleton::new(mock_svc);
handle.allow(1);
std::future::poll_fn(|cx| singleton.poll_ready(cx))
.await
.unwrap();
// Drive first call to completion
let fut1 = singleton.call(());
let ((), send_response) = handle.next_request().await.unwrap();
send_response.send_response("svc");
fut1.await.unwrap();
// Second call should not hit inner service
singleton.call(()).await.unwrap();
}
#[tokio::test]
async fn cached_service_poll_ready_error_clears_singleton() {
// Outer mock returns an inner mock service
let (outer, mut outer_handle) =
tower_test::mock::pair::<(), tower_test::mock::Mock<(), &'static str>>();
let mut singleton = Singleton::new(outer);
// Allow the singleton to be made
outer_handle.allow(2);
std::future::poll_fn(|cx| singleton.poll_ready(cx))
.await
.unwrap();
// First call produces an inner mock service
let fut1 = singleton.call(());
let ((), send_inner) = outer_handle.next_request().await.unwrap();
let (inner, mut inner_handle) = tower_test::mock::pair::<(), &'static str>();
send_inner.send_response(inner);
let mut cached = fut1.await.unwrap();
// Now: allow readiness on the inner mock, then inject error
inner_handle.allow(1);
// Inject error so next poll_ready fails
inner_handle.send_error(std::io::Error::new(
std::io::ErrorKind::Other,
"cached poll_ready failed",
));
// Drive poll_ready on cached service
let err = std::future::poll_fn(|cx| cached.poll_ready(cx))
.await
.err()
.expect("expected poll_ready error");
assert_eq!(err.to_string(), "cached poll_ready failed");
// After error, the singleton should be cleared, so a new call drives outer again
outer_handle.allow(1);
std::future::poll_fn(|cx| singleton.poll_ready(cx))
.await
.unwrap();
let fut2 = singleton.call(());
let ((), send_inner2) = outer_handle.next_request().await.unwrap();
let (inner2, mut inner_handle2) = tower_test::mock::pair::<(), &'static str>();
send_inner2.send_response(inner2);
let mut cached2 = fut2.await.unwrap();
// The new cached service should still work
inner_handle2.allow(1);
std::future::poll_fn(|cx| cached2.poll_ready(cx))
.await
.expect("expected poll_ready");
let cfut2 = cached2.call(());
let ((), send_cached2) = inner_handle2.next_request().await.unwrap();
send_cached2.send_response("svc2");
cfut2.await.unwrap();
}
#[tokio::test]
async fn cancel_waiter_does_not_affect_others() {
let (mock_svc, mut handle) = tower_test::mock::pair::<(), &'static str>();
let mut singleton = Singleton::new(mock_svc);
std::future::poll_fn(|cx| singleton.poll_ready(cx))
.await
.unwrap();
let fut1 = singleton.call(());
let fut2 = singleton.call(());
drop(fut2); // cancel one waiter
let ((), send_response) = handle.next_request().await.unwrap();
send_response.send_response("svc");
fut1.await.unwrap();
}
// TODO: this should be able to be improved with a cooperative baton refactor
#[tokio::test]
async fn cancel_driver_cancels_all() {
let (mock_svc, mut handle) = tower_test::mock::pair::<(), &'static str>();
let mut singleton = Singleton::new(mock_svc);
std::future::poll_fn(|cx| singleton.poll_ready(cx))
.await
.unwrap();
let mut fut1 = singleton.call(());
let fut2 = singleton.call(());
// poll driver just once, and then drop
std::future::poll_fn(move |cx| {
let _ = Pin::new(&mut fut1).poll(cx);
Poll::Ready(())
})
.await;
let ((), send_response) = handle.next_request().await.unwrap();
send_response.send_response("svc");
assert_eq!(
fut2.await.unwrap_err().0.to_string(),
"singleton connection canceled"
);
}
}

View File

@@ -0,0 +1,929 @@
//! Proxy matchers
//!
//! This module contains different matchers to configure rules for when a proxy
//! should be used, and if so, with what arguments.
//!
//! A [`Matcher`] can be constructed either using environment variables, or
//! a [`Matcher::builder()`].
//!
//! Once constructed, the `Matcher` can be asked if it intercepts a `Uri` by
//! calling [`Matcher::intercept()`].
//!
//! An [`Intercept`] includes the destination for the proxy, and any parsed
//! authentication to be used.
use std::fmt;
use std::net::IpAddr;
use http::header::HeaderValue;
use ipnet::IpNet;
use percent_encoding::percent_decode_str;
#[cfg(docsrs)]
pub use self::builder::IntoValue;
#[cfg(not(docsrs))]
use self::builder::IntoValue;
/// A proxy matcher, usually built from environment variables.
pub struct Matcher {
http: Option<Intercept>,
https: Option<Intercept>,
no: NoProxy,
}
/// A matched proxy,
///
/// This is returned by a matcher if a proxy should be used.
#[derive(Clone)]
pub struct Intercept {
uri: http::Uri,
auth: Auth,
}
/// A builder to create a [`Matcher`].
///
/// Construct with [`Matcher::builder()`].
#[derive(Default)]
pub struct Builder {
is_cgi: bool,
all: String,
http: String,
https: String,
no: String,
}
#[derive(Clone)]
enum Auth {
Empty,
Basic(http::header::HeaderValue),
Raw(String, String),
}
/// A filter for proxy matchers.
///
/// This type is based off the `NO_PROXY` rules used by curl.
#[derive(Clone, Debug, Default)]
struct NoProxy {
ips: IpMatcher,
domains: DomainMatcher,
}
#[derive(Clone, Debug, Default)]
struct DomainMatcher(Vec<String>);
#[derive(Clone, Debug, Default)]
struct IpMatcher(Vec<Ip>);
#[derive(Clone, Debug)]
enum Ip {
Address(IpAddr),
Network(IpNet),
}
// ===== impl Matcher =====
impl Matcher {
/// Create a matcher reading the current environment variables.
///
/// This checks for values in the following variables, treating them the
/// same as curl does:
///
/// - `ALL_PROXY`/`all_proxy`
/// - `HTTPS_PROXY`/`https_proxy`
/// - `HTTP_PROXY`/`http_proxy`
/// - `NO_PROXY`/`no_proxy`
pub fn from_env() -> Self {
Builder::from_env().build()
}
/// Create a matcher from the environment or system.
///
/// This checks the same environment variables as `from_env()`, and if not
/// set, checks the system configuration for values for the OS.
///
/// This constructor is always available, but if the `client-proxy-system`
/// feature is enabled, it will check more configuration. Use this
/// constructor if you want to allow users to optionally enable more, or
/// use `from_env` if you do not want the values to change based on an
/// enabled feature.
pub fn from_system() -> Self {
Builder::from_system().build()
}
/// Start a builder to configure a matcher.
pub fn builder() -> Builder {
Builder::default()
}
/// Check if the destination should be intercepted by a proxy.
///
/// If the proxy rules match the destination, a new `Uri` will be returned
/// to connect to.
pub fn intercept(&self, dst: &http::Uri) -> Option<Intercept> {
// TODO(perf): don't need to check `no` if below doesn't match...
if self.no.contains(dst.host()?) {
return None;
}
match dst.scheme_str() {
Some("http") => self.http.clone(),
Some("https") => self.https.clone(),
_ => None,
}
}
}
impl fmt::Debug for Matcher {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut b = f.debug_struct("Matcher");
if let Some(ref http) = self.http {
b.field("http", http);
}
if let Some(ref https) = self.https {
b.field("https", https);
}
if !self.no.is_empty() {
b.field("no", &self.no);
}
b.finish()
}
}
// ===== impl Intercept =====
impl Intercept {
/// Get the `http::Uri` for the target proxy.
pub fn uri(&self) -> &http::Uri {
&self.uri
}
/// Get any configured basic authorization.
///
/// This should usually be used with a `Proxy-Authorization` header, to
/// send in Basic format.
///
/// # Example
///
/// ```rust
/// # use hyper_util::client::proxy::matcher::Matcher;
/// # let uri = http::Uri::from_static("https://hyper.rs");
/// let m = Matcher::builder()
/// .all("https://Aladdin:opensesame@localhost:8887")
/// .build();
///
/// let proxy = m.intercept(&uri).expect("example");
/// let auth = proxy.basic_auth().expect("example");
/// assert_eq!(auth, "Basic QWxhZGRpbjpvcGVuc2VzYW1l");
/// ```
pub fn basic_auth(&self) -> Option<&HeaderValue> {
if let Auth::Basic(ref val) = self.auth {
Some(val)
} else {
None
}
}
/// Get any configured raw authorization.
///
/// If not detected as another scheme, this is the username and password
/// that should be sent with whatever protocol the proxy handshake uses.
///
/// # Example
///
/// ```rust
/// # use hyper_util::client::proxy::matcher::Matcher;
/// # let uri = http::Uri::from_static("https://hyper.rs");
/// let m = Matcher::builder()
/// .all("socks5h://Aladdin:opensesame@localhost:8887")
/// .build();
///
/// let proxy = m.intercept(&uri).expect("example");
/// let auth = proxy.raw_auth().expect("example");
/// assert_eq!(auth, ("Aladdin", "opensesame"));
/// ```
pub fn raw_auth(&self) -> Option<(&str, &str)> {
if let Auth::Raw(ref u, ref p) = self.auth {
Some((u.as_str(), p.as_str()))
} else {
None
}
}
}
impl fmt::Debug for Intercept {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Intercept")
.field("uri", &self.uri)
// dont output auth, its sensitive
.finish()
}
}
// ===== impl Builder =====
impl Builder {
fn from_env() -> Self {
Builder {
is_cgi: std::env::var_os("REQUEST_METHOD").is_some(),
all: get_first_env(&["ALL_PROXY", "all_proxy"]),
http: get_first_env(&["HTTP_PROXY", "http_proxy"]),
https: get_first_env(&["HTTPS_PROXY", "https_proxy"]),
no: get_first_env(&["NO_PROXY", "no_proxy"]),
}
}
fn from_system() -> Self {
#[allow(unused_mut)]
let mut builder = Self::from_env();
#[cfg(all(feature = "client-proxy-system", target_os = "macos"))]
mac::with_system(&mut builder);
#[cfg(all(feature = "client-proxy-system", windows))]
win::with_system(&mut builder);
builder
}
/// Set the target proxy for all destinations.
pub fn all<S>(mut self, val: S) -> Self
where
S: IntoValue,
{
self.all = val.into_value();
self
}
/// Set the target proxy for HTTP destinations.
pub fn http<S>(mut self, val: S) -> Self
where
S: IntoValue,
{
self.http = val.into_value();
self
}
/// Set the target proxy for HTTPS destinations.
pub fn https<S>(mut self, val: S) -> Self
where
S: IntoValue,
{
self.https = val.into_value();
self
}
/// Set the "no" proxy filter.
///
/// The rules are as follows:
/// * Entries are expected to be comma-separated (whitespace between entries is ignored)
/// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding /size,
/// for example "`192.168.1.0/24`").
/// * An entry "`*`" matches all hostnames (this is the only wildcard allowed)
/// * Any other entry is considered a domain name (and may contain a leading dot, for example `google.com`
/// and `.google.com` are equivalent) and would match both that domain AND all subdomains.
///
/// For example, if `"NO_PROXY=google.com, 192.168.1.0/24"` was set, all of the following would match
/// (and therefore would bypass the proxy):
/// * `http://google.com/`
/// * `http://www.google.com/`
/// * `http://192.168.1.42/`
///
/// The URL `http://notgoogle.com/` would not match.
pub fn no<S>(mut self, val: S) -> Self
where
S: IntoValue,
{
self.no = val.into_value();
self
}
/// Construct a [`Matcher`] using the configured values.
pub fn build(self) -> Matcher {
if self.is_cgi {
return Matcher {
http: None,
https: None,
no: NoProxy::empty(),
};
}
let all = parse_env_uri(&self.all);
Matcher {
http: parse_env_uri(&self.http).or_else(|| all.clone()),
https: parse_env_uri(&self.https).or(all),
no: NoProxy::from_string(&self.no),
}
}
}
fn get_first_env(names: &[&str]) -> String {
for name in names {
if let Ok(val) = std::env::var(name) {
return val;
}
}
String::new()
}
fn parse_env_uri(val: &str) -> Option<Intercept> {
use std::borrow::Cow;
let uri = val.parse::<http::Uri>().ok()?;
let mut builder = http::Uri::builder();
let mut is_httpish = false;
let mut auth = Auth::Empty;
builder = builder.scheme(match uri.scheme() {
Some(s) => {
if s == &http::uri::Scheme::HTTP || s == &http::uri::Scheme::HTTPS {
is_httpish = true;
s.clone()
} else if matches!(s.as_str(), "socks4" | "socks4a" | "socks5" | "socks5h") {
s.clone()
} else {
// can't use this proxy scheme
return None;
}
}
// if no scheme provided, assume they meant 'http'
None => {
is_httpish = true;
http::uri::Scheme::HTTP
}
});
let authority = uri.authority()?;
if let Some((userinfo, host_port)) = authority.as_str().split_once('@') {
let (user, pass) = match userinfo.split_once(':') {
Some((user, pass)) => (user, Some(pass)),
None => (userinfo, None),
};
let user = percent_decode_str(user).decode_utf8_lossy();
let pass = pass.map(|pass| percent_decode_str(pass).decode_utf8_lossy());
if is_httpish {
auth = Auth::Basic(encode_basic_auth(&user, pass.as_deref()));
} else {
auth = Auth::Raw(
user.into_owned(),
pass.map_or_else(String::new, Cow::into_owned),
);
}
builder = builder.authority(host_port);
} else {
builder = builder.authority(authority.clone());
}
// removing any path, but we MUST specify one or the builder errors
builder = builder.path_and_query("/");
let dst = builder.build().ok()?;
Some(Intercept { uri: dst, auth })
}
fn encode_basic_auth(user: &str, pass: Option<&str>) -> HeaderValue {
use base64::prelude::BASE64_STANDARD;
use base64::write::EncoderWriter;
use std::io::Write;
let mut buf = b"Basic ".to_vec();
{
let mut encoder = EncoderWriter::new(&mut buf, &BASE64_STANDARD);
let _ = write!(encoder, "{user}:");
if let Some(password) = pass {
let _ = write!(encoder, "{password}");
}
}
let mut header = HeaderValue::from_bytes(&buf).expect("base64 is always valid HeaderValue");
header.set_sensitive(true);
header
}
impl NoProxy {
/*
fn from_env() -> NoProxy {
let raw = std::env::var("NO_PROXY")
.or_else(|_| std::env::var("no_proxy"))
.unwrap_or_default();
Self::from_string(&raw)
}
*/
fn empty() -> NoProxy {
NoProxy {
ips: IpMatcher(Vec::new()),
domains: DomainMatcher(Vec::new()),
}
}
/// Returns a new no-proxy configuration based on a `no_proxy` string (or `None` if no variables
/// are set)
/// The rules are as follows:
/// * The environment variable `NO_PROXY` is checked, if it is not set, `no_proxy` is checked
/// * If neither environment variable is set, `None` is returned
/// * Entries are expected to be comma-separated (whitespace between entries is ignored)
/// * IP addresses (both IPv4 and IPv6) are allowed, as are optional subnet masks (by adding /size,
/// for example "`192.168.1.0/24`").
/// * An entry "`*`" matches all hostnames (this is the only wildcard allowed)
/// * Any other entry is considered a domain name (and may contain a leading dot, for example `google.com`
/// and `.google.com` are equivalent) and would match both that domain AND all subdomains.
///
/// For example, if `"NO_PROXY=google.com, 192.168.1.0/24"` was set, all of the following would match
/// (and therefore would bypass the proxy):
/// * `http://google.com/`
/// * `http://www.google.com/`
/// * `http://192.168.1.42/`
///
/// The URL `http://notgoogle.com/` would not match.
pub fn from_string(no_proxy_list: &str) -> Self {
let mut ips = Vec::new();
let mut domains = Vec::new();
let parts = no_proxy_list.split(',').map(str::trim);
for part in parts {
match part.parse::<IpNet>() {
// If we can parse an IP net or address, then use it, otherwise, assume it is a domain
Ok(ip) => ips.push(Ip::Network(ip)),
Err(_) => match part.parse::<IpAddr>() {
Ok(addr) => ips.push(Ip::Address(addr)),
Err(_) => {
if !part.trim().is_empty() {
domains.push(part.to_owned())
}
}
},
}
}
NoProxy {
ips: IpMatcher(ips),
domains: DomainMatcher(domains),
}
}
/// Return true if this matches the host (domain or IP).
pub fn contains(&self, host: &str) -> bool {
// According to RFC3986, raw IPv6 hosts will be wrapped in []. So we need to strip those off
// the end in order to parse correctly
let host = if host.starts_with('[') {
let x: &[_] = &['[', ']'];
host.trim_matches(x)
} else {
host
};
match host.parse::<IpAddr>() {
// If we can parse an IP addr, then use it, otherwise, assume it is a domain
Ok(ip) => self.ips.contains(ip),
Err(_) => self.domains.contains(host),
}
}
fn is_empty(&self) -> bool {
self.ips.0.is_empty() && self.domains.0.is_empty()
}
}
impl IpMatcher {
fn contains(&self, addr: IpAddr) -> bool {
for ip in &self.0 {
match ip {
Ip::Address(address) => {
if &addr == address {
return true;
}
}
Ip::Network(net) => {
if net.contains(&addr) {
return true;
}
}
}
}
false
}
}
impl DomainMatcher {
// The following links may be useful to understand the origin of these rules:
// * https://curl.se/libcurl/c/CURLOPT_NOPROXY.html
// * https://github.com/curl/curl/issues/1208
fn contains(&self, domain: &str) -> bool {
let domain_len = domain.len();
for d in &self.0 {
if d.eq_ignore_ascii_case(domain)
|| d.strip_prefix('.')
.map_or(false, |s| s.eq_ignore_ascii_case(domain))
{
return true;
} else if domain
.get(domain_len.saturating_sub(d.len())..)
.map_or(false, |s| s.eq_ignore_ascii_case(d))
{
if d.starts_with('.') {
// If the first character of d is a dot, that means the first character of domain
// must also be a dot, so we are looking at a subdomain of d and that matches
return true;
} else if domain.as_bytes().get(domain_len - d.len() - 1) == Some(&b'.') {
// Given that d is a prefix of domain, if the prior character in domain is a dot
// then that means we must be matching a subdomain of d, and that matches
return true;
}
} else if d == "*" {
return true;
}
}
false
}
}
mod builder {
/// A type that can used as a `Builder` value.
///
/// Private and sealed, only visible in docs.
pub trait IntoValue {
#[doc(hidden)]
fn into_value(self) -> String;
}
impl IntoValue for String {
#[doc(hidden)]
fn into_value(self) -> String {
self
}
}
impl IntoValue for &String {
#[doc(hidden)]
fn into_value(self) -> String {
self.into()
}
}
impl IntoValue for &str {
#[doc(hidden)]
fn into_value(self) -> String {
self.into()
}
}
}
#[cfg(feature = "client-proxy-system")]
#[cfg(target_os = "macos")]
mod mac {
use system_configuration::core_foundation::base::CFType;
use system_configuration::core_foundation::dictionary::CFDictionary;
use system_configuration::core_foundation::number::CFNumber;
use system_configuration::core_foundation::string::{CFString, CFStringRef};
use system_configuration::dynamic_store::SCDynamicStoreBuilder;
use system_configuration::sys::schema_definitions::{
kSCPropNetProxiesHTTPEnable, kSCPropNetProxiesHTTPPort, kSCPropNetProxiesHTTPProxy,
kSCPropNetProxiesHTTPSEnable, kSCPropNetProxiesHTTPSPort, kSCPropNetProxiesHTTPSProxy,
};
pub(super) fn with_system(builder: &mut super::Builder) {
let store = if let Some(store) = SCDynamicStoreBuilder::new("hyper-util").build() {
store
} else {
return;
};
let proxies_map = if let Some(proxies_map) = store.get_proxies() {
proxies_map
} else {
return;
};
if builder.http.is_empty() {
let http_proxy_config = parse_setting_from_dynamic_store(
&proxies_map,
unsafe { kSCPropNetProxiesHTTPEnable },
unsafe { kSCPropNetProxiesHTTPProxy },
unsafe { kSCPropNetProxiesHTTPPort },
);
if let Some(http) = http_proxy_config {
builder.http = http;
}
}
if builder.https.is_empty() {
let https_proxy_config = parse_setting_from_dynamic_store(
&proxies_map,
unsafe { kSCPropNetProxiesHTTPSEnable },
unsafe { kSCPropNetProxiesHTTPSProxy },
unsafe { kSCPropNetProxiesHTTPSPort },
);
if let Some(https) = https_proxy_config {
builder.https = https;
}
}
}
fn parse_setting_from_dynamic_store(
proxies_map: &CFDictionary<CFString, CFType>,
enabled_key: CFStringRef,
host_key: CFStringRef,
port_key: CFStringRef,
) -> Option<String> {
let proxy_enabled = proxies_map
.find(enabled_key)
.and_then(|flag| flag.downcast::<CFNumber>())
.and_then(|flag| flag.to_i32())
.unwrap_or(0)
== 1;
if proxy_enabled {
let proxy_host = proxies_map
.find(host_key)
.and_then(|host| host.downcast::<CFString>())
.map(|host| host.to_string());
let proxy_port = proxies_map
.find(port_key)
.and_then(|port| port.downcast::<CFNumber>())
.and_then(|port| port.to_i32());
return match (proxy_host, proxy_port) {
(Some(proxy_host), Some(proxy_port)) => Some(format!("{proxy_host}:{proxy_port}")),
(Some(proxy_host), None) => Some(proxy_host),
(None, Some(_)) => None,
(None, None) => None,
};
}
None
}
}
#[cfg(feature = "client-proxy-system")]
#[cfg(windows)]
mod win {
pub(super) fn with_system(builder: &mut super::Builder) {
let settings = if let Ok(settings) = windows_registry::CURRENT_USER
.open("Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings")
{
settings
} else {
return;
};
if settings.get_u32("ProxyEnable").unwrap_or(0) == 0 {
return;
}
if let Ok(val) = settings.get_string("ProxyServer") {
if builder.http.is_empty() {
builder.http = val.clone();
}
if builder.https.is_empty() {
builder.https = val;
}
}
if builder.no.is_empty() {
if let Ok(val) = settings.get_string("ProxyOverride") {
builder.no = val
.split(';')
.map(|s| s.trim())
.collect::<Vec<&str>>()
.join(",")
.replace("*.", "");
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_domain_matcher() {
let domains = vec![".foo.bar".into(), "bar.foo".into()];
let matcher = DomainMatcher(domains);
// domains match with leading `.`
assert!(matcher.contains("foo.bar"));
assert!(matcher.contains("FOO.BAR"));
// subdomains match with leading `.`
assert!(matcher.contains("www.foo.bar"));
assert!(matcher.contains("WWW.FOO.BAR"));
// domains match with no leading `.`
assert!(matcher.contains("bar.foo"));
assert!(matcher.contains("Bar.foo"));
// subdomains match with no leading `.`
assert!(matcher.contains("www.bar.foo"));
assert!(matcher.contains("WWW.BAR.FOO"));
// non-subdomain string prefixes don't match
assert!(!matcher.contains("notfoo.bar"));
assert!(!matcher.contains("notbar.foo"));
}
#[test]
fn test_no_proxy_wildcard() {
let no_proxy = NoProxy::from_string("*");
assert!(no_proxy.contains("any.where"));
}
#[test]
fn test_no_proxy_ip_ranges() {
let no_proxy =
NoProxy::from_string(".foo.bar, bar.baz,10.42.1.1/24,::1,10.124.7.8,2001::/17");
let should_not_match = [
// random url, not in no_proxy
"hyper.rs",
// make sure that random non-subdomain string prefixes don't match
"notfoo.bar",
// make sure that random non-subdomain string prefixes don't match
"notbar.baz",
// ipv4 address out of range
"10.43.1.1",
// ipv4 address out of range
"10.124.7.7",
// ipv6 address out of range
"[ffff:db8:a0b:12f0::1]",
// ipv6 address out of range
"[2005:db8:a0b:12f0::1]",
];
for host in &should_not_match {
assert!(!no_proxy.contains(host), "should not contain {host:?}");
}
let should_match = [
// make sure subdomains (with leading .) match
"hello.foo.bar",
// make sure exact matches (without leading .) match (also makes sure spaces between entries work)
"bar.baz",
// make sure subdomains (without leading . in no_proxy) match
"foo.bar.baz",
// make sure subdomains (without leading . in no_proxy) match - this differs from cURL
"foo.bar",
// ipv4 address match within range
"10.42.1.100",
// ipv6 address exact match
"[::1]",
// ipv6 address match within range
"[2001:db8:a0b:12f0::1]",
// ipv4 address exact match
"10.124.7.8",
];
for host in &should_match {
assert!(no_proxy.contains(host), "should contain {host:?}");
}
}
macro_rules! p {
($($n:ident = $v:expr,)*) => ({Builder {
$($n: $v.into(),)*
..Builder::default()
}.build()});
}
fn intercept(p: &Matcher, u: &str) -> Intercept {
p.intercept(&u.parse().unwrap()).unwrap()
}
#[test]
fn test_all_proxy() {
let p = p! {
all = "http://om.nom",
};
assert_eq!("http://om.nom", intercept(&p, "http://example.com").uri());
assert_eq!("http://om.nom", intercept(&p, "https://example.com").uri());
}
#[test]
fn test_specific_overrides_all() {
let p = p! {
all = "http://no.pe",
http = "http://y.ep",
};
assert_eq!("http://no.pe", intercept(&p, "https://example.com").uri());
// the http rule is "more specific" than the all rule
assert_eq!("http://y.ep", intercept(&p, "http://example.com").uri());
}
#[test]
fn test_parse_no_scheme_defaults_to_http() {
let p = p! {
https = "y.ep",
http = "127.0.0.1:8887",
};
assert_eq!(intercept(&p, "https://example.local").uri(), "http://y.ep");
assert_eq!(
intercept(&p, "http://example.local").uri(),
"http://127.0.0.1:8887"
);
}
#[test]
fn test_parse_http_auth() {
let p = p! {
all = "http://Aladdin:opensesame@y.ep",
};
let proxy = intercept(&p, "https://example.local");
assert_eq!(proxy.uri(), "http://y.ep");
assert_eq!(
proxy.basic_auth().expect("basic_auth"),
"Basic QWxhZGRpbjpvcGVuc2VzYW1l"
);
}
#[test]
fn test_parse_http_auth_without_password() {
let p = p! {
all = "http://Aladdin@y.ep",
};
let proxy = intercept(&p, "https://example.local");
assert_eq!(proxy.uri(), "http://y.ep");
assert_eq!(
proxy.basic_auth().expect("basic_auth"),
"Basic QWxhZGRpbjo="
);
}
#[test]
fn test_parse_http_auth_without_scheme() {
let p = p! {
all = "Aladdin:opensesame@y.ep",
};
let proxy = intercept(&p, "https://example.local");
assert_eq!(proxy.uri(), "http://y.ep");
assert_eq!(
proxy.basic_auth().expect("basic_auth"),
"Basic QWxhZGRpbjpvcGVuc2VzYW1l"
);
}
#[test]
fn test_dont_parse_http_when_is_cgi() {
let mut builder = Matcher::builder();
builder.is_cgi = true;
builder.http = "http://never.gonna.let.you.go".into();
let m = builder.build();
assert!(m.intercept(&"http://rick.roll".parse().unwrap()).is_none());
}
#[test]
fn test_domain_matcher_case_insensitive() {
let domains = vec![".foo.bar".into()];
let matcher = DomainMatcher(domains);
assert!(matcher.contains("foo.bar"));
assert!(matcher.contains("FOO.BAR"));
assert!(matcher.contains("Foo.Bar"));
assert!(matcher.contains("www.foo.bar"));
assert!(matcher.contains("WWW.FOO.BAR"));
assert!(matcher.contains("Www.Foo.Bar"));
}
#[test]
fn test_no_proxy_case_insensitive() {
let p = p! {
all = "http://proxy.local",
no = ".example.com",
};
// should bypass proxy (case insensitive match)
assert!(p
.intercept(&"http://example.com".parse().unwrap())
.is_none());
assert!(p
.intercept(&"http://EXAMPLE.COM".parse().unwrap())
.is_none());
assert!(p
.intercept(&"http://Example.com".parse().unwrap())
.is_none());
// subdomain should bypass proxy (case insensitive match)
assert!(p
.intercept(&"http://www.example.com".parse().unwrap())
.is_none());
assert!(p
.intercept(&"http://WWW.EXAMPLE.COM".parse().unwrap())
.is_none());
assert!(p
.intercept(&"http://Www.Example.Com".parse().unwrap())
.is_none());
}
}

View File

@@ -0,0 +1,3 @@
//! Proxy utilities
pub mod matcher;

53
vendor/hyper-util/src/common/exec.rs vendored Normal file
View File

@@ -0,0 +1,53 @@
#![allow(dead_code)]
use hyper::rt::Executor;
use std::fmt;
use std::future::Future;
use std::pin::Pin;
use std::sync::Arc;
pub(crate) type BoxSendFuture = Pin<Box<dyn Future<Output = ()> + Send>>;
// Either the user provides an executor for background tasks, or we use
// `tokio::spawn`.
#[derive(Clone)]
pub(crate) enum Exec {
Executor(Arc<dyn Executor<BoxSendFuture> + Send + Sync>),
}
// ===== impl Exec =====
impl Exec {
pub(crate) fn new<E>(inner: E) -> Self
where
E: Executor<BoxSendFuture> + Send + Sync + 'static,
{
Exec::Executor(Arc::new(inner))
}
pub(crate) fn execute<F>(&self, fut: F)
where
F: Future<Output = ()> + Send + 'static,
{
match *self {
Exec::Executor(ref e) => {
e.execute(Box::pin(fut));
}
}
}
}
impl fmt::Debug for Exec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Exec").finish()
}
}
impl<F> hyper::rt::Executor<F> for Exec
where
F: Future<Output = ()> + Send + 'static,
{
fn execute(&self, fut: F) {
Exec::execute(self, fut);
}
}

78
vendor/hyper-util/src/common/lazy.rs vendored Normal file
View File

@@ -0,0 +1,78 @@
use pin_project_lite::pin_project;
use std::future::Future;
use std::pin::Pin;
use std::task::{self, Poll};
pub(crate) trait Started: Future {
fn started(&self) -> bool;
}
pub(crate) fn lazy<F, R>(func: F) -> Lazy<F, R>
where
F: FnOnce() -> R,
R: Future + Unpin,
{
Lazy {
inner: Inner::Init { func },
}
}
// FIXME: allow() required due to `impl Trait` leaking types to this lint
pin_project! {
#[allow(missing_debug_implementations)]
pub(crate) struct Lazy<F, R> {
#[pin]
inner: Inner<F, R>,
}
}
pin_project! {
#[project = InnerProj]
#[project_replace = InnerProjReplace]
enum Inner<F, R> {
Init { func: F },
Fut { #[pin] fut: R },
Empty,
}
}
impl<F, R> Started for Lazy<F, R>
where
F: FnOnce() -> R,
R: Future,
{
fn started(&self) -> bool {
match self.inner {
Inner::Init { .. } => false,
Inner::Fut { .. } | Inner::Empty => true,
}
}
}
impl<F, R> Future for Lazy<F, R>
where
F: FnOnce() -> R,
R: Future,
{
type Output = R::Output;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
if let InnerProj::Fut { fut } = this.inner.as_mut().project() {
return fut.poll(cx);
}
match this.inner.as_mut().project_replace(Inner::Empty) {
InnerProjReplace::Init { func } => {
this.inner.set(Inner::Fut { fut: func() });
if let InnerProj::Fut { fut } = this.inner.project() {
return fut.poll(cx);
}
unreachable!()
}
_ => unreachable!("lazy state wrong"),
}
}
}

19
vendor/hyper-util/src/common/mod.rs vendored Normal file
View File

@@ -0,0 +1,19 @@
#![allow(missing_docs)]
pub(crate) mod exec;
#[cfg(feature = "client-legacy")]
mod lazy;
#[cfg(feature = "server")]
// #[cfg(feature = "server-auto")]
pub(crate) mod rewind;
#[cfg(feature = "client-legacy")]
mod sync;
pub(crate) mod timer;
#[cfg(feature = "client-legacy")]
pub(crate) use exec::Exec;
#[cfg(feature = "client-legacy")]
pub(crate) use lazy::{lazy, Started as Lazy};
#[cfg(feature = "client-legacy")]
pub(crate) use sync::SyncWrapper;

137
vendor/hyper-util/src/common/rewind.rs vendored Normal file
View File

@@ -0,0 +1,137 @@
use std::{cmp, io};
use bytes::{Buf, Bytes};
use hyper::rt::{Read, ReadBufCursor, Write};
use std::{
pin::Pin,
task::{self, Poll},
};
/// Combine a buffer with an IO, rewinding reads to use the buffer.
#[derive(Debug)]
pub(crate) struct Rewind<T> {
pub(crate) pre: Option<Bytes>,
pub(crate) inner: T,
}
impl<T> Rewind<T> {
#[cfg(all(feature = "server", any(feature = "http1", feature = "http2")))]
pub(crate) fn new_buffered(io: T, buf: Bytes) -> Self {
Rewind {
pre: Some(buf),
inner: io,
}
}
}
impl<T> Read for Rewind<T>
where
T: Read + Unpin,
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
mut buf: ReadBufCursor<'_>,
) -> Poll<io::Result<()>> {
if let Some(mut prefix) = self.pre.take() {
// If there are no remaining bytes, let the bytes get dropped.
if !prefix.is_empty() {
let copy_len = cmp::min(prefix.len(), buf.remaining());
buf.put_slice(&prefix[..copy_len]);
prefix.advance(copy_len);
// Put back what's left
if !prefix.is_empty() {
self.pre = Some(prefix);
}
return Poll::Ready(Ok(()));
}
}
Pin::new(&mut self.inner).poll_read(cx, buf)
}
}
impl<T> Write for Rewind<T>
where
T: Write + Unpin,
{
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.inner).poll_write(cx, buf)
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut task::Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.inner).poll_write_vectored(cx, bufs)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_flush(cx)
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<io::Result<()>> {
Pin::new(&mut self.inner).poll_shutdown(cx)
}
fn is_write_vectored(&self) -> bool {
self.inner.is_write_vectored()
}
}
/*
#[cfg(test)]
mod tests {
use super::Rewind;
use bytes::Bytes;
use tokio::io::AsyncReadExt;
#[cfg(not(miri))]
#[tokio::test]
async fn partial_rewind() {
let underlying = [104, 101, 108, 108, 111];
let mock = tokio_test::io::Builder::new().read(&underlying).build();
let mut stream = Rewind::new(mock);
// Read off some bytes, ensure we filled o1
let mut buf = [0; 2];
stream.read_exact(&mut buf).await.expect("read1");
// Rewind the stream so that it is as if we never read in the first place.
stream.rewind(Bytes::copy_from_slice(&buf[..]));
let mut buf = [0; 5];
stream.read_exact(&mut buf).await.expect("read1");
// At this point we should have read everything that was in the MockStream
assert_eq!(&buf, &underlying);
}
#[cfg(not(miri))]
#[tokio::test]
async fn full_rewind() {
let underlying = [104, 101, 108, 108, 111];
let mock = tokio_test::io::Builder::new().read(&underlying).build();
let mut stream = Rewind::new(mock);
let mut buf = [0; 5];
stream.read_exact(&mut buf).await.expect("read1");
// Rewind the stream so that it is as if we never read in the first place.
stream.rewind(Bytes::copy_from_slice(&buf[..]));
let mut buf = [0; 5];
stream.read_exact(&mut buf).await.expect("read1");
}
}
*/

67
vendor/hyper-util/src/common/sync.rs vendored Normal file
View File

@@ -0,0 +1,67 @@
pub(crate) struct SyncWrapper<T>(T);
impl<T> SyncWrapper<T> {
/// Creates a new SyncWrapper containing the given value.
///
/// # Examples
///
/// ```ignore
/// use hyper::common::sync_wrapper::SyncWrapper;
///
/// let wrapped = SyncWrapper::new(42);
/// ```
pub(crate) fn new(value: T) -> Self {
Self(value)
}
/// Acquires a reference to the protected value.
///
/// This is safe because it requires an exclusive reference to the wrapper. Therefore this method
/// neither panics nor does it return an error. This is in contrast to [`Mutex::get_mut`] which
/// returns an error if another thread panicked while holding the lock. It is not recommended
/// to send an exclusive reference to a potentially damaged value to another thread for further
/// processing.
///
/// [`Mutex::get_mut`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.get_mut
///
/// # Examples
///
/// ```ignore
/// use hyper::common::sync_wrapper::SyncWrapper;
///
/// let mut wrapped = SyncWrapper::new(42);
/// let value = wrapped.get_mut();
/// *value = 0;
/// assert_eq!(*wrapped.get_mut(), 0);
/// ```
pub(crate) fn get_mut(&mut self) -> &mut T {
&mut self.0
}
/// Consumes this wrapper, returning the underlying data.
///
/// This is safe because it requires ownership of the wrapper, aherefore this method will neither
/// panic nor does it return an error. This is in contrast to [`Mutex::into_inner`] which
/// returns an error if another thread panicked while holding the lock. It is not recommended
/// to send an exclusive reference to a potentially damaged value to another thread for further
/// processing.
///
/// [`Mutex::into_inner`]: https://doc.rust-lang.org/std/sync/struct.Mutex.html#method.into_inner
///
/// # Examples
///
/// ```ignore
/// use hyper::common::sync_wrapper::SyncWrapper;
///
/// let mut wrapped = SyncWrapper::new(42);
/// assert_eq!(wrapped.into_inner(), 42);
/// ```
#[allow(dead_code)]
pub(crate) fn into_inner(self) -> T {
self.0
}
}
// this is safe because the only operations permitted on this data structure require exclusive
// access or ownership
unsafe impl<T: Send> Sync for SyncWrapper<T> {}

42
vendor/hyper-util/src/common/timer.rs vendored Normal file
View File

@@ -0,0 +1,42 @@
#![allow(dead_code)]
use std::fmt;
use std::pin::Pin;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;
use hyper::rt::Sleep;
#[derive(Clone)]
pub(crate) struct Timer(Arc<dyn hyper::rt::Timer + Send + Sync>);
// =====impl Timer=====
impl Timer {
pub(crate) fn new<T>(inner: T) -> Self
where
T: hyper::rt::Timer + Send + Sync + 'static,
{
Self(Arc::new(inner))
}
}
impl fmt::Debug for Timer {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Timer").finish()
}
}
impl hyper::rt::Timer for Timer {
fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>> {
self.0.sleep(duration)
}
fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>> {
self.0.sleep_until(deadline)
}
fn now(&self) -> Instant {
self.0.now()
}
}

14
vendor/hyper-util/src/error.rs vendored Normal file
View File

@@ -0,0 +1,14 @@
/*
use std::error::Error;
pub(crate) fn find<'a, E: Error + 'static>(top: &'a (dyn Error + 'static)) -> Option<&'a E> {
let mut err = Some(top);
while let Some(src) = err {
if src.is::<E>() {
return src.downcast_ref();
}
err = src.source();
}
None
}
*/

18
vendor/hyper-util/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,18 @@
#![deny(missing_docs)]
#![cfg_attr(docsrs, feature(doc_cfg))]
//! Utilities for working with hyper.
//!
//! This crate is less-stable than [`hyper`](https://docs.rs/hyper). However,
//! does respect Rust's semantic version regarding breaking changes.
#[cfg(feature = "client")]
pub mod client;
mod common;
pub mod rt;
#[cfg(feature = "server")]
pub mod server;
#[cfg(any(feature = "service", feature = "client-legacy"))]
pub mod service;
mod error;

33
vendor/hyper-util/src/rt/io.rs vendored Normal file
View File

@@ -0,0 +1,33 @@
use std::marker::Unpin;
use std::pin::Pin;
use std::task::{ready, Poll};
use hyper::rt::{Read, ReadBuf, Write};
use std::future::poll_fn;
pub(crate) async fn read<T>(io: &mut T, buf: &mut [u8]) -> Result<usize, std::io::Error>
where
T: Read + Unpin,
{
poll_fn(move |cx| {
let mut buf = ReadBuf::new(buf);
ready!(Pin::new(&mut *io).poll_read(cx, buf.unfilled()))?;
Poll::Ready(Ok(buf.filled().len()))
})
.await
}
pub(crate) async fn write_all<T>(io: &mut T, buf: &[u8]) -> Result<(), std::io::Error>
where
T: Write + Unpin,
{
let mut n = 0;
poll_fn(move |cx| {
while n < buf.len() {
n += ready!(Pin::new(&mut *io).poll_write(cx, &buf[n..])?);
}
Poll::Ready(Ok(()))
})
.await
}

12
vendor/hyper-util/src/rt/mod.rs vendored Normal file
View File

@@ -0,0 +1,12 @@
//! Runtime utilities
#[cfg(feature = "client-legacy")]
mod io;
#[cfg(feature = "client-legacy")]
pub(crate) use self::io::{read, write_all};
#[cfg(feature = "tokio")]
pub mod tokio;
#[cfg(feature = "tokio")]
pub use self::tokio::{TokioExecutor, TokioIo, TokioTimer};

342
vendor/hyper-util/src/rt/tokio.rs vendored Normal file
View File

@@ -0,0 +1,342 @@
//! [`tokio`] runtime components integration for [`hyper`].
//!
//! [`hyper::rt`] exposes a set of traits to allow hyper to be agnostic to
//! its underlying asynchronous runtime. This submodule provides glue for
//! [`tokio`] users to bridge those types to [`hyper`]'s interfaces.
//!
//! # IO
//!
//! [`hyper`] abstracts over asynchronous readers and writers using [`Read`]
//! and [`Write`], while [`tokio`] abstracts over this using [`AsyncRead`]
//! and [`AsyncWrite`]. This submodule provides a collection of IO adaptors
//! to bridge these two IO ecosystems together: [`TokioIo<I>`],
//! [`WithHyperIo<I>`], and [`WithTokioIo<I>`].
//!
//! To compare and constrast these IO adaptors and to help explain which
//! is the proper choice for your needs, here is a table showing which IO
//! traits these implement, given two types `T` and `H` which implement
//! Tokio's and Hyper's corresponding IO traits:
//!
//! | | [`AsyncRead`] | [`AsyncWrite`] | [`Read`] | [`Write`] |
//! |--------------------|------------------|-------------------|--------------|--------------|
//! | `T` | ✅ **true** | ✅ **true** | ❌ **false** | ❌ **false** |
//! | `H` | ❌ **false** | ❌ **false** | ✅ **true** | ✅ **true** |
//! | [`TokioIo<T>`] | ❌ **false** | ❌ **false** | ✅ **true** | ✅ **true** |
//! | [`TokioIo<H>`] | ✅ **true** | ✅ **true** | ❌ **false** | ❌ **false** |
//! | [`WithHyperIo<T>`] | ✅ **true** | ✅ **true** | ✅ **true** | ✅ **true** |
//! | [`WithHyperIo<H>`] | ❌ **false** | ❌ **false** | ❌ **false** | ❌ **false** |
//! | [`WithTokioIo<T>`] | ❌ **false** | ❌ **false** | ❌ **false** | ❌ **false** |
//! | [`WithTokioIo<H>`] | ✅ **true** | ✅ **true** | ✅ **true** | ✅ **true** |
//!
//! For most situations, [`TokioIo<I>`] is the proper choice. This should be
//! constructed, wrapping some underlying [`hyper`] or [`tokio`] IO, at the
//! call-site of a function like [`hyper::client::conn::http1::handshake`].
//!
//! [`TokioIo<I>`] switches across these ecosystems, but notably does not
//! preserve the existing IO trait implementations of its underlying IO. If
//! one wishes to _extend_ IO with additional implementations,
//! [`WithHyperIo<I>`] and [`WithTokioIo<I>`] are the correct choice.
//!
//! For example, a Tokio reader/writer can be wrapped in [`WithHyperIo<I>`].
//! That will implement _both_ sets of IO traits. Conversely,
//! [`WithTokioIo<I>`] will implement both sets of IO traits given a
//! reader/writer that implements Hyper's [`Read`] and [`Write`].
//!
//! See [`tokio::io`] and ["_Asynchronous IO_"][tokio-async-docs] for more
//! information.
//!
//! [`AsyncRead`]: tokio::io::AsyncRead
//! [`AsyncWrite`]: tokio::io::AsyncWrite
//! [`Read`]: hyper::rt::Read
//! [`Write`]: hyper::rt::Write
//! [tokio-async-docs]: https://docs.rs/tokio/latest/tokio/#asynchronous-io
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
time::{Duration, Instant},
};
use hyper::rt::{Executor, Sleep, Timer};
use pin_project_lite::pin_project;
#[cfg(feature = "tracing")]
use tracing::instrument::Instrument;
pub use self::{with_hyper_io::WithHyperIo, with_tokio_io::WithTokioIo};
mod with_hyper_io;
mod with_tokio_io;
/// Future executor that utilises `tokio` threads.
#[non_exhaustive]
#[derive(Default, Debug, Clone)]
pub struct TokioExecutor {}
pin_project! {
/// A wrapper that implements Tokio's IO traits for an inner type that
/// implements hyper's IO traits, or vice versa (implements hyper's IO
/// traits for a type that implements Tokio's IO traits).
#[derive(Debug)]
pub struct TokioIo<T> {
#[pin]
inner: T,
}
}
/// A Timer that uses the tokio runtime.
#[non_exhaustive]
#[derive(Default, Clone, Debug)]
pub struct TokioTimer;
// Use TokioSleep to get tokio::time::Sleep to implement Unpin.
// see https://docs.rs/tokio/latest/tokio/time/struct.Sleep.html
pin_project! {
#[derive(Debug)]
struct TokioSleep {
#[pin]
inner: tokio::time::Sleep,
}
}
// ===== impl TokioExecutor =====
impl<Fut> Executor<Fut> for TokioExecutor
where
Fut: Future + Send + 'static,
Fut::Output: Send + 'static,
{
fn execute(&self, fut: Fut) {
#[cfg(feature = "tracing")]
tokio::spawn(fut.in_current_span());
#[cfg(not(feature = "tracing"))]
tokio::spawn(fut);
}
}
impl TokioExecutor {
/// Create new executor that relies on [`tokio::spawn`] to execute futures.
pub fn new() -> Self {
Self {}
}
}
// ==== impl TokioIo =====
impl<T> TokioIo<T> {
/// Wrap a type implementing Tokio's or hyper's IO traits.
pub fn new(inner: T) -> Self {
Self { inner }
}
/// Borrow the inner type.
pub fn inner(&self) -> &T {
&self.inner
}
/// Mut borrow the inner type.
pub fn inner_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Consume this wrapper and get the inner type.
pub fn into_inner(self) -> T {
self.inner
}
}
impl<T> hyper::rt::Read for TokioIo<T>
where
T: tokio::io::AsyncRead,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
mut buf: hyper::rt::ReadBufCursor<'_>,
) -> Poll<Result<(), std::io::Error>> {
let n = unsafe {
let mut tbuf = tokio::io::ReadBuf::uninit(buf.as_mut());
match tokio::io::AsyncRead::poll_read(self.project().inner, cx, &mut tbuf) {
Poll::Ready(Ok(())) => tbuf.filled().len(),
other => return other,
}
};
unsafe {
buf.advance(n);
}
Poll::Ready(Ok(()))
}
}
impl<T> hyper::rt::Write for TokioIo<T>
where
T: tokio::io::AsyncWrite,
{
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
tokio::io::AsyncWrite::poll_write(self.project().inner, cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
tokio::io::AsyncWrite::poll_flush(self.project().inner, cx)
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
tokio::io::AsyncWrite::poll_shutdown(self.project().inner, cx)
}
fn is_write_vectored(&self) -> bool {
tokio::io::AsyncWrite::is_write_vectored(&self.inner)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
tokio::io::AsyncWrite::poll_write_vectored(self.project().inner, cx, bufs)
}
}
impl<T> tokio::io::AsyncRead for TokioIo<T>
where
T: hyper::rt::Read,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
tbuf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<Result<(), std::io::Error>> {
//let init = tbuf.initialized().len();
let filled = tbuf.filled().len();
let sub_filled = unsafe {
let mut buf = hyper::rt::ReadBuf::uninit(tbuf.unfilled_mut());
match hyper::rt::Read::poll_read(self.project().inner, cx, buf.unfilled()) {
Poll::Ready(Ok(())) => buf.filled().len(),
other => return other,
}
};
let n_filled = filled + sub_filled;
// At least sub_filled bytes had to have been initialized.
let n_init = sub_filled;
unsafe {
tbuf.assume_init(n_init);
tbuf.set_filled(n_filled);
}
Poll::Ready(Ok(()))
}
}
impl<T> tokio::io::AsyncWrite for TokioIo<T>
where
T: hyper::rt::Write,
{
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
hyper::rt::Write::poll_write(self.project().inner, cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
hyper::rt::Write::poll_flush(self.project().inner, cx)
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
hyper::rt::Write::poll_shutdown(self.project().inner, cx)
}
fn is_write_vectored(&self) -> bool {
hyper::rt::Write::is_write_vectored(&self.inner)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
hyper::rt::Write::poll_write_vectored(self.project().inner, cx, bufs)
}
}
// ==== impl TokioTimer =====
impl Timer for TokioTimer {
fn sleep(&self, duration: Duration) -> Pin<Box<dyn Sleep>> {
Box::pin(TokioSleep {
inner: tokio::time::sleep(duration),
})
}
fn sleep_until(&self, deadline: Instant) -> Pin<Box<dyn Sleep>> {
Box::pin(TokioSleep {
inner: tokio::time::sleep_until(deadline.into()),
})
}
fn reset(&self, sleep: &mut Pin<Box<dyn Sleep>>, new_deadline: Instant) {
if let Some(sleep) = sleep.as_mut().downcast_mut_pin::<TokioSleep>() {
sleep.reset(new_deadline)
}
}
fn now(&self) -> Instant {
tokio::time::Instant::now().into()
}
}
impl TokioTimer {
/// Create a new TokioTimer
pub fn new() -> Self {
Self {}
}
}
impl Future for TokioSleep {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.project().inner.poll(cx)
}
}
impl Sleep for TokioSleep {}
impl TokioSleep {
fn reset(self: Pin<&mut Self>, deadline: Instant) {
self.project().inner.as_mut().reset(deadline.into());
}
}
#[cfg(test)]
mod tests {
use crate::rt::TokioExecutor;
use hyper::rt::Executor;
use tokio::sync::oneshot;
#[tokio::test]
async fn simple_execute() -> Result<(), Box<dyn std::error::Error>> {
let (tx, rx) = oneshot::channel();
let executor = TokioExecutor::new();
executor.execute(async move {
tx.send(()).unwrap();
});
rx.await.map_err(Into::into)
}
}

View File

@@ -0,0 +1,170 @@
use pin_project_lite::pin_project;
use std::{
pin::Pin,
task::{Context, Poll},
};
pin_project! {
/// Extends an underlying [`tokio`] I/O with [`hyper`] I/O implementations.
///
/// This implements [`Read`] and [`Write`] given an inner type that implements [`AsyncRead`]
/// and [`AsyncWrite`], respectively.
#[derive(Debug)]
pub struct WithHyperIo<I> {
#[pin]
inner: I,
}
}
// ==== impl WithHyperIo =====
impl<I> WithHyperIo<I> {
/// Wraps the inner I/O in an [`WithHyperIo<I>`]
pub fn new(inner: I) -> Self {
Self { inner }
}
/// Returns a reference to the inner type.
pub fn inner(&self) -> &I {
&self.inner
}
/// Returns a mutable reference to the inner type.
pub fn inner_mut(&mut self) -> &mut I {
&mut self.inner
}
/// Consumes this wrapper and returns the inner type.
pub fn into_inner(self) -> I {
self.inner
}
}
/// [`WithHyperIo<I>`] is [`Read`] if `I` is [`AsyncRead`].
///
/// [`AsyncRead`]: tokio::io::AsyncRead
/// [`Read`]: hyper::rt::Read
impl<I> hyper::rt::Read for WithHyperIo<I>
where
I: tokio::io::AsyncRead,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
mut buf: hyper::rt::ReadBufCursor<'_>,
) -> Poll<Result<(), std::io::Error>> {
let n = unsafe {
let mut tbuf = tokio::io::ReadBuf::uninit(buf.as_mut());
match tokio::io::AsyncRead::poll_read(self.project().inner, cx, &mut tbuf) {
Poll::Ready(Ok(())) => tbuf.filled().len(),
other => return other,
}
};
unsafe {
buf.advance(n);
}
Poll::Ready(Ok(()))
}
}
/// [`WithHyperIo<I>`] is [`Write`] if `I` is [`AsyncWrite`].
///
/// [`AsyncWrite`]: tokio::io::AsyncWrite
/// [`Write`]: hyper::rt::Write
impl<I> hyper::rt::Write for WithHyperIo<I>
where
I: tokio::io::AsyncWrite,
{
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
tokio::io::AsyncWrite::poll_write(self.project().inner, cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
tokio::io::AsyncWrite::poll_flush(self.project().inner, cx)
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
tokio::io::AsyncWrite::poll_shutdown(self.project().inner, cx)
}
fn is_write_vectored(&self) -> bool {
tokio::io::AsyncWrite::is_write_vectored(&self.inner)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
tokio::io::AsyncWrite::poll_write_vectored(self.project().inner, cx, bufs)
}
}
/// [`WithHyperIo<I>`] exposes its inner `I`'s [`AsyncRead`] implementation.
///
/// [`AsyncRead`]: tokio::io::AsyncRead
impl<I> tokio::io::AsyncRead for WithHyperIo<I>
where
I: tokio::io::AsyncRead,
{
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<Result<(), std::io::Error>> {
self.project().inner.poll_read(cx, buf)
}
}
/// [`WithHyperIo<I>`] exposes its inner `I`'s [`AsyncWrite`] implementation.
///
/// [`AsyncWrite`]: tokio::io::AsyncWrite
impl<I> tokio::io::AsyncWrite for WithHyperIo<I>
where
I: tokio::io::AsyncWrite,
{
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
self.project().inner.poll_write(cx, buf)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
self.project().inner.poll_flush(cx)
}
#[inline]
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
self.project().inner.poll_shutdown(cx)
}
#[inline]
fn is_write_vectored(&self) -> bool {
self.inner.is_write_vectored()
}
#[inline]
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
self.project().inner.poll_write_vectored(cx, bufs)
}
}

View File

@@ -0,0 +1,178 @@
use pin_project_lite::pin_project;
use std::{
pin::Pin,
task::{Context, Poll},
};
pin_project! {
/// Extends an underlying [`hyper`] I/O with [`tokio`] I/O implementations.
///
/// This implements [`AsyncRead`] and [`AsyncWrite`] given an inner type that implements
/// [`Read`] and [`Write`], respectively.
#[derive(Debug)]
pub struct WithTokioIo<I> {
#[pin]
inner: I,
}
}
// ==== impl WithTokioIo =====
/// [`WithTokioIo<I>`] is [`AsyncRead`] if `I` is [`Read`].
///
/// [`AsyncRead`]: tokio::io::AsyncRead
/// [`Read`]: hyper::rt::Read
impl<I> tokio::io::AsyncRead for WithTokioIo<I>
where
I: hyper::rt::Read,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
tbuf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<Result<(), std::io::Error>> {
//let init = tbuf.initialized().len();
let filled = tbuf.filled().len();
let sub_filled = unsafe {
let mut buf = hyper::rt::ReadBuf::uninit(tbuf.unfilled_mut());
match hyper::rt::Read::poll_read(self.project().inner, cx, buf.unfilled()) {
Poll::Ready(Ok(())) => buf.filled().len(),
other => return other,
}
};
let n_filled = filled + sub_filled;
// At least sub_filled bytes had to have been initialized.
let n_init = sub_filled;
unsafe {
tbuf.assume_init(n_init);
tbuf.set_filled(n_filled);
}
Poll::Ready(Ok(()))
}
}
/// [`WithTokioIo<I>`] is [`AsyncWrite`] if `I` is [`Write`].
///
/// [`AsyncWrite`]: tokio::io::AsyncWrite
/// [`Write`]: hyper::rt::Write
impl<I> tokio::io::AsyncWrite for WithTokioIo<I>
where
I: hyper::rt::Write,
{
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
hyper::rt::Write::poll_write(self.project().inner, cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
hyper::rt::Write::poll_flush(self.project().inner, cx)
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
hyper::rt::Write::poll_shutdown(self.project().inner, cx)
}
fn is_write_vectored(&self) -> bool {
hyper::rt::Write::is_write_vectored(&self.inner)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
hyper::rt::Write::poll_write_vectored(self.project().inner, cx, bufs)
}
}
/// [`WithTokioIo<I>`] exposes its inner `I`'s [`Write`] implementation.
///
/// [`Write`]: hyper::rt::Write
impl<I> hyper::rt::Write for WithTokioIo<I>
where
I: hyper::rt::Write,
{
#[inline]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
self.project().inner.poll_write(cx, buf)
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
self.project().inner.poll_flush(cx)
}
#[inline]
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
self.project().inner.poll_shutdown(cx)
}
#[inline]
fn is_write_vectored(&self) -> bool {
self.inner.is_write_vectored()
}
#[inline]
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
self.project().inner.poll_write_vectored(cx, bufs)
}
}
impl<I> WithTokioIo<I> {
/// Wraps the inner I/O in an [`WithTokioIo<I>`]
pub fn new(inner: I) -> Self {
Self { inner }
}
/// Returns a reference to the inner type.
pub fn inner(&self) -> &I {
&self.inner
}
/// Returns a mutable reference to the inner type.
pub fn inner_mut(&mut self) -> &mut I {
&mut self.inner
}
/// Consumes this wrapper and returns the inner type.
pub fn into_inner(self) -> I {
self.inner
}
}
/// [`WithTokioIo<I>`] exposes its inner `I`'s [`Read`] implementation.
///
/// [`Read`]: hyper::rt::Read
impl<I> hyper::rt::Read for WithTokioIo<I>
where
I: hyper::rt::Read,
{
#[inline]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: hyper::rt::ReadBufCursor<'_>,
) -> Poll<Result<(), std::io::Error>> {
self.project().inner.poll_read(cx, buf)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,68 @@
//! Upgrade utilities.
use bytes::{Bytes, BytesMut};
use hyper::{
rt::{Read, Write},
upgrade::Upgraded,
};
use crate::common::rewind::Rewind;
/// Tries to downcast the internal trait object to the type passed.
///
/// On success, returns the downcasted parts. On error, returns the Upgraded back.
/// This is a kludge to work around the fact that the machinery provided by
/// [`hyper_util::server::conn::auto`] wraps the inner `T` with a private type
/// that is not reachable from outside the crate.
///
/// [`hyper_util::server::conn::auto`]: crate::server::conn::auto
///
/// This kludge will be removed when this machinery is added back to the main
/// `hyper` code.
pub fn downcast<T>(upgraded: Upgraded) -> Result<Parts<T>, Upgraded>
where
T: Read + Write + Unpin + 'static,
{
let hyper::upgrade::Parts {
io: rewind,
mut read_buf,
..
} = upgraded.downcast::<Rewind<T>>()?;
if let Some(pre) = rewind.pre {
read_buf = if read_buf.is_empty() {
pre
} else {
let mut buf = BytesMut::from(read_buf);
buf.extend_from_slice(&pre);
buf.freeze()
};
}
Ok(Parts {
io: rewind.inner,
read_buf,
})
}
/// The deconstructed parts of an [`Upgraded`] type.
///
/// Includes the original IO type, and a read buffer of bytes that the
/// HTTP state machine may have already read before completing an upgrade.
#[derive(Debug)]
#[non_exhaustive]
pub struct Parts<T> {
/// The original IO object used before the upgrade.
pub io: T,
/// A buffer of bytes that have been read but not processed as HTTP.
///
/// For instance, if the `Connection` is used for an HTTP upgrade request,
/// it is possible the server sent back the first bytes of the new protocol
/// along with the response upgrade.
///
/// You will want to check for any existing bytes if you plan to continue
/// communicating on the IO object.
pub read_buf: Bytes,
}

View File

@@ -0,0 +1,4 @@
//! Connection utilities.
#[cfg(any(feature = "http1", feature = "http2"))]
pub mod auto;

488
vendor/hyper-util/src/server/graceful.rs vendored Normal file
View File

@@ -0,0 +1,488 @@
//! Utility to gracefully shutdown a server.
//!
//! This module provides a [`GracefulShutdown`] type,
//! which can be used to gracefully shutdown a server.
//!
//! See <https://github.com/hyperium/hyper-util/blob/master/examples/server_graceful.rs>
//! for an example of how to use this.
use std::{
fmt::{self, Debug},
future::Future,
pin::Pin,
task::{self, Poll},
};
use pin_project_lite::pin_project;
use tokio::sync::watch;
/// A graceful shutdown utility
// Purposefully not `Clone`, see `watcher()` method for why.
pub struct GracefulShutdown {
tx: watch::Sender<()>,
}
/// A watcher side of the graceful shutdown.
///
/// This type can only watch a connection, it cannot trigger a shutdown.
///
/// Call [`GracefulShutdown::watcher()`] to construct one of these.
pub struct Watcher {
rx: watch::Receiver<()>,
}
impl GracefulShutdown {
/// Create a new graceful shutdown helper.
pub fn new() -> Self {
let (tx, _) = watch::channel(());
Self { tx }
}
/// Wrap a future for graceful shutdown watching.
pub fn watch<C: GracefulConnection>(&self, conn: C) -> impl Future<Output = C::Output> {
self.watcher().watch(conn)
}
/// Create an owned type that can watch a connection.
///
/// This method allows created an owned type that can be sent onto another
/// task before calling [`Watcher::watch()`].
// Internal: this function exists because `Clone` allows footguns.
// If the `tx` were cloned (or the `rx`), race conditions can happens where
// one task starting a shutdown is scheduled and interwined with a task
// starting to watch a connection, and the "watch version" is one behind.
pub fn watcher(&self) -> Watcher {
let rx = self.tx.subscribe();
Watcher { rx }
}
/// Signal shutdown for all watched connections.
///
/// This returns a `Future` which will complete once all watched
/// connections have shutdown.
pub async fn shutdown(self) {
let Self { tx } = self;
// signal all the watched futures about the change
let _ = tx.send(());
// and then wait for all of them to complete
tx.closed().await;
}
/// Returns the number of the watching connections.
pub fn count(&self) -> usize {
self.tx.receiver_count()
}
}
impl Debug for GracefulShutdown {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("GracefulShutdown").finish()
}
}
impl Default for GracefulShutdown {
fn default() -> Self {
Self::new()
}
}
impl Watcher {
/// Wrap a future for graceful shutdown watching.
pub fn watch<C: GracefulConnection>(self, conn: C) -> impl Future<Output = C::Output> {
let Watcher { mut rx } = self;
GracefulConnectionFuture::new(conn, async move {
let _ = rx.changed().await;
// hold onto the rx until the watched future is completed
rx
})
}
}
impl Debug for Watcher {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("GracefulWatcher").finish()
}
}
pin_project! {
struct GracefulConnectionFuture<C, F: Future> {
#[pin]
conn: C,
#[pin]
cancel: F,
#[pin]
// If cancelled, this is held until the inner conn is done.
cancelled_guard: Option<F::Output>,
}
}
impl<C, F: Future> GracefulConnectionFuture<C, F> {
fn new(conn: C, cancel: F) -> Self {
Self {
conn,
cancel,
cancelled_guard: None,
}
}
}
impl<C, F: Future> Debug for GracefulConnectionFuture<C, F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("GracefulConnectionFuture").finish()
}
}
impl<C, F> Future for GracefulConnectionFuture<C, F>
where
C: GracefulConnection,
F: Future,
{
type Output = C::Output;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
if this.cancelled_guard.is_none() {
if let Poll::Ready(guard) = this.cancel.poll(cx) {
this.cancelled_guard.set(Some(guard));
this.conn.as_mut().graceful_shutdown();
}
}
this.conn.poll(cx)
}
}
/// An internal utility trait as an umbrella target for all (hyper) connection
/// types that the [`GracefulShutdown`] can watch.
pub trait GracefulConnection: Future<Output = Result<(), Self::Error>> + private::Sealed {
/// The error type returned by the connection when used as a future.
type Error;
/// Start a graceful shutdown process for this connection.
fn graceful_shutdown(self: Pin<&mut Self>);
}
#[cfg(feature = "http1")]
impl<I, B, S> GracefulConnection for hyper::server::conn::http1::Connection<I, S>
where
S: hyper::service::HttpService<hyper::body::Incoming, ResBody = B>,
S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
B: hyper::body::Body + 'static,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
{
type Error = hyper::Error;
fn graceful_shutdown(self: Pin<&mut Self>) {
hyper::server::conn::http1::Connection::graceful_shutdown(self);
}
}
#[cfg(feature = "http2")]
impl<I, B, S, E> GracefulConnection for hyper::server::conn::http2::Connection<I, S, E>
where
S: hyper::service::HttpService<hyper::body::Incoming, ResBody = B>,
S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
B: hyper::body::Body + 'static,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
E: hyper::rt::bounds::Http2ServerConnExec<S::Future, B>,
{
type Error = hyper::Error;
fn graceful_shutdown(self: Pin<&mut Self>) {
hyper::server::conn::http2::Connection::graceful_shutdown(self);
}
}
#[cfg(feature = "server-auto")]
impl<I, B, S, E> GracefulConnection for crate::server::conn::auto::Connection<'_, I, S, E>
where
S: hyper::service::Service<http::Request<hyper::body::Incoming>, Response = http::Response<B>>,
S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
S::Future: 'static,
I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
B: hyper::body::Body + 'static,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
E: hyper::rt::bounds::Http2ServerConnExec<S::Future, B>,
{
type Error = Box<dyn std::error::Error + Send + Sync>;
fn graceful_shutdown(self: Pin<&mut Self>) {
crate::server::conn::auto::Connection::graceful_shutdown(self);
}
}
#[cfg(feature = "server-auto")]
impl<I, B, S, E> GracefulConnection
for crate::server::conn::auto::UpgradeableConnection<'_, I, S, E>
where
S: hyper::service::Service<http::Request<hyper::body::Incoming>, Response = http::Response<B>>,
S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
S::Future: 'static,
I: hyper::rt::Read + hyper::rt::Write + Unpin + Send + 'static,
B: hyper::body::Body + 'static,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
E: hyper::rt::bounds::Http2ServerConnExec<S::Future, B>,
{
type Error = Box<dyn std::error::Error + Send + Sync>;
fn graceful_shutdown(self: Pin<&mut Self>) {
crate::server::conn::auto::UpgradeableConnection::graceful_shutdown(self);
}
}
mod private {
pub trait Sealed {}
#[cfg(feature = "http1")]
impl<I, B, S> Sealed for hyper::server::conn::http1::Connection<I, S>
where
S: hyper::service::HttpService<hyper::body::Incoming, ResBody = B>,
S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
B: hyper::body::Body + 'static,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
{
}
#[cfg(feature = "http1")]
impl<I, B, S> Sealed for hyper::server::conn::http1::UpgradeableConnection<I, S>
where
S: hyper::service::HttpService<hyper::body::Incoming, ResBody = B>,
S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
B: hyper::body::Body + 'static,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
{
}
#[cfg(feature = "http2")]
impl<I, B, S, E> Sealed for hyper::server::conn::http2::Connection<I, S, E>
where
S: hyper::service::HttpService<hyper::body::Incoming, ResBody = B>,
S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
B: hyper::body::Body + 'static,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
E: hyper::rt::bounds::Http2ServerConnExec<S::Future, B>,
{
}
#[cfg(feature = "server-auto")]
impl<I, B, S, E> Sealed for crate::server::conn::auto::Connection<'_, I, S, E>
where
S: hyper::service::Service<
http::Request<hyper::body::Incoming>,
Response = http::Response<B>,
>,
S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
S::Future: 'static,
I: hyper::rt::Read + hyper::rt::Write + Unpin + 'static,
B: hyper::body::Body + 'static,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
E: hyper::rt::bounds::Http2ServerConnExec<S::Future, B>,
{
}
#[cfg(feature = "server-auto")]
impl<I, B, S, E> Sealed for crate::server::conn::auto::UpgradeableConnection<'_, I, S, E>
where
S: hyper::service::Service<
http::Request<hyper::body::Incoming>,
Response = http::Response<B>,
>,
S::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
S::Future: 'static,
I: hyper::rt::Read + hyper::rt::Write + Unpin + Send + 'static,
B: hyper::body::Body + 'static,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
E: hyper::rt::bounds::Http2ServerConnExec<S::Future, B>,
{
}
}
#[cfg(test)]
mod test {
use super::*;
use pin_project_lite::pin_project;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
pin_project! {
#[derive(Debug)]
struct DummyConnection<F> {
#[pin]
future: F,
shutdown_counter: Arc<AtomicUsize>,
}
}
impl<F> private::Sealed for DummyConnection<F> {}
impl<F: Future> GracefulConnection for DummyConnection<F> {
type Error = ();
fn graceful_shutdown(self: Pin<&mut Self>) {
self.shutdown_counter.fetch_add(1, Ordering::SeqCst);
}
}
impl<F: Future> Future for DummyConnection<F> {
type Output = Result<(), ()>;
fn poll(self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> Poll<Self::Output> {
match self.project().future.poll(cx) {
Poll::Ready(_) => Poll::Ready(Ok(())),
Poll::Pending => Poll::Pending,
}
}
}
#[cfg(not(miri))]
#[tokio::test]
async fn test_graceful_shutdown_ok() {
let graceful = GracefulShutdown::new();
let shutdown_counter = Arc::new(AtomicUsize::new(0));
let (dummy_tx, _) = tokio::sync::broadcast::channel(1);
for i in 1..=3 {
let mut dummy_rx = dummy_tx.subscribe();
let shutdown_counter = shutdown_counter.clone();
let future = async move {
tokio::time::sleep(std::time::Duration::from_millis(i * 10)).await;
let _ = dummy_rx.recv().await;
};
let dummy_conn = DummyConnection {
future,
shutdown_counter,
};
let conn = graceful.watch(dummy_conn);
tokio::spawn(async move {
conn.await.unwrap();
});
}
assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0);
let _ = dummy_tx.send(());
tokio::select! {
_ = tokio::time::sleep(std::time::Duration::from_millis(100)) => {
panic!("timeout")
},
_ = graceful.shutdown() => {
assert_eq!(shutdown_counter.load(Ordering::SeqCst), 3);
}
}
}
#[cfg(not(miri))]
#[tokio::test]
async fn test_graceful_shutdown_delayed_ok() {
let graceful = GracefulShutdown::new();
let shutdown_counter = Arc::new(AtomicUsize::new(0));
for i in 1..=3 {
let shutdown_counter = shutdown_counter.clone();
//tokio::time::sleep(std::time::Duration::from_millis(i * 5)).await;
let future = async move {
tokio::time::sleep(std::time::Duration::from_millis(i * 50)).await;
};
let dummy_conn = DummyConnection {
future,
shutdown_counter,
};
let conn = graceful.watch(dummy_conn);
tokio::spawn(async move {
conn.await.unwrap();
});
}
assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0);
tokio::select! {
_ = tokio::time::sleep(std::time::Duration::from_millis(200)) => {
panic!("timeout")
},
_ = graceful.shutdown() => {
assert_eq!(shutdown_counter.load(Ordering::SeqCst), 3);
}
}
}
#[cfg(not(miri))]
#[tokio::test]
async fn test_graceful_shutdown_multi_per_watcher_ok() {
let graceful = GracefulShutdown::new();
let shutdown_counter = Arc::new(AtomicUsize::new(0));
for i in 1..=3 {
let shutdown_counter = shutdown_counter.clone();
let mut futures = Vec::new();
for u in 1..=i {
let future = tokio::time::sleep(std::time::Duration::from_millis(u * 50));
let dummy_conn = DummyConnection {
future,
shutdown_counter: shutdown_counter.clone(),
};
let conn = graceful.watch(dummy_conn);
futures.push(conn);
}
tokio::spawn(async move {
futures_util::future::join_all(futures).await;
});
}
assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0);
tokio::select! {
_ = tokio::time::sleep(std::time::Duration::from_millis(200)) => {
panic!("timeout")
},
_ = graceful.shutdown() => {
assert_eq!(shutdown_counter.load(Ordering::SeqCst), 6);
}
}
}
#[cfg(not(miri))]
#[tokio::test]
async fn test_graceful_shutdown_timeout() {
let graceful = GracefulShutdown::new();
let shutdown_counter = Arc::new(AtomicUsize::new(0));
for i in 1..=3 {
let shutdown_counter = shutdown_counter.clone();
let future = async move {
if i == 1 {
std::future::pending::<()>().await
} else {
std::future::ready(()).await
}
};
let dummy_conn = DummyConnection {
future,
shutdown_counter,
};
let conn = graceful.watch(dummy_conn);
tokio::spawn(async move {
conn.await.unwrap();
});
}
assert_eq!(shutdown_counter.load(Ordering::SeqCst), 0);
tokio::select! {
_ = tokio::time::sleep(std::time::Duration::from_millis(100)) => {
assert_eq!(shutdown_counter.load(Ordering::SeqCst), 3);
},
_ = graceful.shutdown() => {
panic!("shutdown should not be completed: as not all our conns finish")
}
}
}
}

6
vendor/hyper-util/src/server/mod.rs vendored Normal file
View File

@@ -0,0 +1,6 @@
//! Server utilities.
pub mod conn;
#[cfg(feature = "server-graceful")]
pub mod graceful;

72
vendor/hyper-util/src/service/glue.rs vendored Normal file
View File

@@ -0,0 +1,72 @@
use pin_project_lite::pin_project;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use super::Oneshot;
/// A tower [`Service`][tower-svc] converted into a hyper [`Service`][hyper-svc].
///
/// This wraps an inner tower service `S` in a [`hyper::service::Service`] implementation. See
/// the module-level documentation of [`service`][crate::service] for more information about using
/// [`tower`][tower] services and middleware with [`hyper`].
///
/// [hyper-svc]: hyper::service::Service
/// [tower]: https://docs.rs/tower/latest/tower/
/// [tower-svc]: https://docs.rs/tower/latest/tower/trait.Service.html
#[derive(Debug, Copy, Clone)]
pub struct TowerToHyperService<S> {
service: S,
}
impl<S> TowerToHyperService<S> {
/// Create a new [`TowerToHyperService`] from a tower service.
pub fn new(tower_service: S) -> Self {
Self {
service: tower_service,
}
}
}
impl<S, R> hyper::service::Service<R> for TowerToHyperService<S>
where
S: tower_service::Service<R> + Clone,
{
type Response = S::Response;
type Error = S::Error;
type Future = TowerToHyperServiceFuture<S, R>;
fn call(&self, req: R) -> Self::Future {
TowerToHyperServiceFuture {
future: Oneshot::new(self.service.clone(), req),
}
}
}
pin_project! {
/// Response future for [`TowerToHyperService`].
///
/// This future is acquired by [`call`][hyper::service::Service::call]ing a
/// [`TowerToHyperService`].
pub struct TowerToHyperServiceFuture<S, R>
where
S: tower_service::Service<R>,
{
#[pin]
future: Oneshot<S, R>,
}
}
impl<S, R> Future for TowerToHyperServiceFuture<S, R>
where
S: tower_service::Service<R>,
{
type Output = Result<S::Response, S::Error>;
#[inline]
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.project().future.poll(cx)
}
}

32
vendor/hyper-util/src/service/mod.rs vendored Normal file
View File

@@ -0,0 +1,32 @@
//! Service utilities.
//!
//! [`hyper::service`] provides a [`Service`][hyper-svc] trait, representing an asynchronous
//! function from a `Request` to a `Response`. This provides an interface allowing middleware for
//! network application to be written in a modular and reusable way.
//!
//! This submodule provides an assortment of utilities for working with [`Service`][hyper-svc]s.
//! See the module-level documentation of [`hyper::service`] for more information.
//!
//! # Tower
//!
//! While [`hyper`] uses its own notion of a [`Service`][hyper-svc] internally, many other
//! libraries use a library such as [`tower`][tower] to provide the fundamental model of an
//! asynchronous function.
//!
//! The [`TowerToHyperService`] type provided by this submodule can be used to bridge these
//! ecosystems together. By wrapping a [`tower::Service`][tower-svc] in [`TowerToHyperService`],
//! it can be passed into [`hyper`] interfaces that expect a [`hyper::service::Service`].
//!
//! [hyper-svc]: hyper::service::Service
//! [tower]: https://docs.rs/tower/latest/tower/
//! [tower-svc]: https://docs.rs/tower/latest/tower/trait.Service.html
#[cfg(feature = "service")]
mod glue;
#[cfg(any(feature = "client-legacy", feature = "service"))]
mod oneshot;
#[cfg(feature = "service")]
pub use self::glue::{TowerToHyperService, TowerToHyperServiceFuture};
#[cfg(any(feature = "client-legacy", feature = "service"))]
pub(crate) use self::oneshot::Oneshot;

View File

@@ -0,0 +1,62 @@
use pin_project_lite::pin_project;
use std::future::Future;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
use tower_service::Service;
// Vendored from tower::util to reduce dependencies, the code is small enough.
// Not really pub, but used in a trait for bounds
pin_project! {
#[project = OneshotProj]
#[derive(Debug)]
pub enum Oneshot<S: Service<Req>, Req> {
NotReady {
svc: S,
req: Option<Req>,
},
Called {
#[pin]
fut: S::Future,
},
Done,
}
}
impl<S, Req> Oneshot<S, Req>
where
S: Service<Req>,
{
pub(crate) const fn new(svc: S, req: Req) -> Self {
Oneshot::NotReady {
svc,
req: Some(req),
}
}
}
impl<S, Req> Future for Oneshot<S, Req>
where
S: Service<Req>,
{
type Output = Result<S::Response, S::Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
let this = self.as_mut().project();
match this {
OneshotProj::NotReady { svc, req } => {
ready!(svc.poll_ready(cx))?;
let fut = svc.call(req.take().expect("already called"));
self.set(Oneshot::Called { fut });
}
OneshotProj::Called { fut } => {
let res = ready!(fut.poll(cx))?;
self.set(Oneshot::Done);
return Poll::Ready(Ok(res));
}
OneshotProj::Done => panic!("polled after complete"),
}
}
}
}

Binary file not shown.

File diff suppressed because one or more lines are too long

1484
vendor/hyper-util/tests/legacy_client.rs vendored Normal file

File diff suppressed because it is too large Load Diff

478
vendor/hyper-util/tests/proxy.rs vendored Normal file
View File

@@ -0,0 +1,478 @@
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tower_service::Service;
use hyper_util::client::legacy::connect::proxy::{SocksV4, SocksV5, Tunnel};
use hyper_util::client::legacy::connect::HttpConnector;
#[cfg(not(miri))]
#[tokio::test]
async fn test_tunnel_works() {
let tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
let addr = tcp.local_addr().expect("local_addr");
let proxy_dst = format!("http://{addr}").parse().expect("uri");
let mut connector = Tunnel::new(proxy_dst, HttpConnector::new());
let t1 = tokio::spawn(async move {
let _conn = connector
.call("https://hyper.rs".parse().unwrap())
.await
.expect("tunnel");
});
let t2 = tokio::spawn(async move {
let (mut io, _) = tcp.accept().await.expect("accept");
let mut buf = [0u8; 64];
let n = io.read(&mut buf).await.expect("read 1");
assert_eq!(
&buf[..n],
b"CONNECT hyper.rs:443 HTTP/1.1\r\nHost: hyper.rs:443\r\n\r\n"
);
io.write_all(b"HTTP/1.1 200 OK\r\n\r\n")
.await
.expect("write 1");
});
t1.await.expect("task 1");
t2.await.expect("task 2");
}
#[cfg(not(miri))]
#[tokio::test]
async fn test_socks_v5_without_auth_works() {
let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
let proxy_addr = proxy_tcp.local_addr().expect("local_addr");
let proxy_dst = format!("http://{proxy_addr}").parse().expect("uri");
let target_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
let target_addr = target_tcp.local_addr().expect("local_addr");
let target_dst = format!("http://{target_addr}").parse().expect("uri");
let mut connector = SocksV5::new(proxy_dst, HttpConnector::new());
// Client
//
// Will use `SocksV5` to establish proxy tunnel.
// Will send "Hello World!" to the target and receive "Goodbye!" back.
let t1 = tokio::spawn(async move {
let conn = connector.call(target_dst).await.expect("tunnel");
let mut tcp = conn.into_inner();
tcp.write_all(b"Hello World!").await.expect("write 1");
let mut buf = [0u8; 64];
let n = tcp.read(&mut buf).await.expect("read 1");
assert_eq!(&buf[..n], b"Goodbye!");
});
// Proxy
//
// Will receive CONNECT command from client.
// Will connect to target and success code back to client.
// Will blindly tunnel between client and target.
let t2 = tokio::spawn(async move {
let (mut to_client, _) = proxy_tcp.accept().await.expect("accept");
let mut buf = [0u8; 513];
// negotiation req/res
let n = to_client.read(&mut buf).await.expect("read 1");
assert_eq!(&buf[..n], [0x05, 0x01, 0x00]);
to_client.write_all(&[0x05, 0x00]).await.expect("write 1");
// command req/rs
let [p1, p2] = target_addr.port().to_be_bytes();
let [ip1, ip2, ip3, ip4] = [0x7f, 0x00, 0x00, 0x01];
let message = [0x05, 0x01, 0x00, 0x01, ip1, ip2, ip3, ip4, p1, p2];
let n = to_client.read(&mut buf).await.expect("read 2");
assert_eq!(&buf[..n], message);
let mut to_target = TcpStream::connect(target_addr).await.expect("connect");
let message = [0x05, 0x00, 0x00, 0x01, ip1, ip2, ip3, ip4, p1, p2];
to_client.write_all(&message).await.expect("write 2");
let (from_client, from_target) =
tokio::io::copy_bidirectional(&mut to_client, &mut to_target)
.await
.expect("proxy");
assert_eq!(from_client, 12);
assert_eq!(from_target, 8)
});
// Target server
//
// Will accept connection from proxy server
// Will receive "Hello World!" from the client and return "Goodbye!"
let t3 = tokio::spawn(async move {
let (mut io, _) = target_tcp.accept().await.expect("accept");
let mut buf = [0u8; 64];
let n = io.read(&mut buf).await.expect("read 1");
assert_eq!(&buf[..n], b"Hello World!");
io.write_all(b"Goodbye!").await.expect("write 1");
});
t1.await.expect("task - client");
t2.await.expect("task - proxy");
t3.await.expect("task - target");
}
#[cfg(not(miri))]
#[tokio::test]
async fn test_socks_v5_with_auth_works() {
let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
let proxy_addr = proxy_tcp.local_addr().expect("local_addr");
let proxy_dst = format!("http://{proxy_addr}").parse().expect("uri");
let target_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
let target_addr = target_tcp.local_addr().expect("local_addr");
let target_dst = format!("http://{target_addr}").parse().expect("uri");
let mut connector =
SocksV5::new(proxy_dst, HttpConnector::new()).with_auth("user".into(), "pass".into());
// Client
//
// Will use `SocksV5` to establish proxy tunnel.
// Will send "Hello World!" to the target and receive "Goodbye!" back.
let t1 = tokio::spawn(async move {
let conn = connector.call(target_dst).await.expect("tunnel");
let mut tcp = conn.into_inner();
tcp.write_all(b"Hello World!").await.expect("write 1");
let mut buf = [0u8; 64];
let n = tcp.read(&mut buf).await.expect("read 1");
assert_eq!(&buf[..n], b"Goodbye!");
});
// Proxy
//
// Will receive CONNECT command from client.
// Will connect to target and success code back to client.
// Will blindly tunnel between client and target.
let t2 = tokio::spawn(async move {
let (mut to_client, _) = proxy_tcp.accept().await.expect("accept");
let mut buf = [0u8; 513];
// negotiation req/res
let n = to_client.read(&mut buf).await.expect("read 1");
assert_eq!(&buf[..n], [0x05, 0x01, 0x02]);
to_client.write_all(&[0x05, 0x02]).await.expect("write 1");
// auth req/res
let n = to_client.read(&mut buf).await.expect("read 2");
let [u1, u2, u3, u4] = b"user";
let [p1, p2, p3, p4] = b"pass";
let message = [0x01, 0x04, *u1, *u2, *u3, *u4, 0x04, *p1, *p2, *p3, *p4];
assert_eq!(&buf[..n], message);
to_client.write_all(&[0x01, 0x00]).await.expect("write 2");
// command req/res
let n = to_client.read(&mut buf).await.expect("read 3");
let [p1, p2] = target_addr.port().to_be_bytes();
let [ip1, ip2, ip3, ip4] = [0x7f, 0x00, 0x00, 0x01];
let message = [0x05, 0x01, 0x00, 0x01, ip1, ip2, ip3, ip4, p1, p2];
assert_eq!(&buf[..n], message);
let mut to_target = TcpStream::connect(target_addr).await.expect("connect");
let message = [0x05, 0x00, 0x00, 0x01, ip1, ip2, ip3, ip4, p1, p2];
to_client.write_all(&message).await.expect("write 3");
let (from_client, from_target) =
tokio::io::copy_bidirectional(&mut to_client, &mut to_target)
.await
.expect("proxy");
assert_eq!(from_client, 12);
assert_eq!(from_target, 8)
});
// Target server
//
// Will accept connection from proxy server
// Will receive "Hello World!" from the client and return "Goodbye!"
let t3 = tokio::spawn(async move {
let (mut io, _) = target_tcp.accept().await.expect("accept");
let mut buf = [0u8; 64];
let n = io.read(&mut buf).await.expect("read 1");
assert_eq!(&buf[..n], b"Hello World!");
io.write_all(b"Goodbye!").await.expect("write 1");
});
t1.await.expect("task - client");
t2.await.expect("task - proxy");
t3.await.expect("task - target");
}
#[cfg(not(miri))]
#[tokio::test]
async fn test_socks_v5_with_server_resolved_domain_works() {
let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
let proxy_addr = proxy_tcp.local_addr().expect("local_addr");
let proxy_addr = format!("http://{proxy_addr}").parse().expect("uri");
let mut connector = SocksV5::new(proxy_addr, HttpConnector::new())
.with_auth("user".into(), "pass".into())
.local_dns(false);
// Client
//
// Will use `SocksV5` to establish proxy tunnel.
// Will send "Hello World!" to the target and receive "Goodbye!" back.
let t1 = tokio::spawn(async move {
let _conn = connector
.call("https://hyper.rs:443".try_into().unwrap())
.await
.expect("tunnel");
});
// Proxy
//
// Will receive CONNECT command from client.
// Will connect to target and success code back to client.
// Will blindly tunnel between client and target.
let t2 = tokio::spawn(async move {
let (mut to_client, _) = proxy_tcp.accept().await.expect("accept");
let mut buf = [0u8; 513];
// negotiation req/res
let n = to_client.read(&mut buf).await.expect("read 1");
assert_eq!(&buf[..n], [0x05, 0x01, 0x02]);
to_client.write_all(&[0x05, 0x02]).await.expect("write 1");
// auth req/res
let n = to_client.read(&mut buf).await.expect("read 2");
let [u1, u2, u3, u4] = b"user";
let [p1, p2, p3, p4] = b"pass";
let message = [0x01, 0x04, *u1, *u2, *u3, *u4, 0x04, *p1, *p2, *p3, *p4];
assert_eq!(&buf[..n], message);
to_client.write_all(&[0x01, 0x00]).await.expect("write 2");
// command req/res
let n = to_client.read(&mut buf).await.expect("read 3");
let host = "hyper.rs";
let port: u16 = 443;
let mut message = vec![0x05, 0x01, 0x00, 0x03, host.len() as u8];
message.extend(host.bytes());
message.extend(port.to_be_bytes());
assert_eq!(&buf[..n], message);
let mut message = vec![0x05, 0x00, 0x00, 0x03, host.len() as u8];
message.extend(host.bytes());
message.extend(port.to_be_bytes());
to_client.write_all(&message).await.expect("write 3");
});
t1.await.expect("task - client");
t2.await.expect("task - proxy");
}
#[cfg(not(miri))]
#[tokio::test]
async fn test_socks_v5_with_locally_resolved_domain_works() {
let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
let proxy_addr = proxy_tcp.local_addr().expect("local_addr");
let proxy_addr = format!("http://{proxy_addr}").parse().expect("uri");
let mut connector = SocksV5::new(proxy_addr, HttpConnector::new())
.with_auth("user".into(), "pass".into())
.local_dns(true);
// Client
//
// Will use `SocksV5` to establish proxy tunnel.
// Will send "Hello World!" to the target and receive "Goodbye!" back.
let t1 = tokio::spawn(async move {
let _conn = connector
.call("https://hyper.rs:443".try_into().unwrap())
.await
.expect("tunnel");
});
// Proxy
//
// Will receive CONNECT command from client.
// Will connect to target and success code back to client.
// Will blindly tunnel between client and target.
let t2 = tokio::spawn(async move {
let (mut to_client, _) = proxy_tcp.accept().await.expect("accept");
let mut buf = [0u8; 513];
// negotiation req/res
let n = to_client.read(&mut buf).await.expect("read 1");
assert_eq!(&buf[..n], [0x05, 0x01, 0x02]);
to_client.write_all(&[0x05, 0x02]).await.expect("write 1");
// auth req/res
let n = to_client.read(&mut buf).await.expect("read 2");
let [u1, u2, u3, u4] = b"user";
let [p1, p2, p3, p4] = b"pass";
let message = [0x01, 0x04, *u1, *u2, *u3, *u4, 0x04, *p1, *p2, *p3, *p4];
assert_eq!(&buf[..n], message);
to_client.write_all(&[0x01, 0x00]).await.expect("write 2");
// command req/res
let n = to_client.read(&mut buf).await.expect("read 3");
let message = [0x05, 0x01, 0x00];
assert_eq!(&buf[..3], message);
assert!(buf[3] == 0x01 || buf[3] == 0x04); // IPv4 or IPv6
assert_eq!(n, 4 + 4 * (buf[3] as usize) + 2);
let message = vec![0x05, 0x00, 0x00, 0x01, 0, 0, 0, 0, 0, 0];
to_client.write_all(&message).await.expect("write 3");
});
t1.await.expect("task - client");
t2.await.expect("task - proxy");
}
#[cfg(not(miri))]
#[tokio::test]
async fn test_socks_v4_works() {
let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
let proxy_addr = proxy_tcp.local_addr().expect("local_addr");
let proxy_dst = format!("http://{proxy_addr}").parse().expect("uri");
let target_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
let target_addr = target_tcp.local_addr().expect("local_addr");
let target_dst = format!("http://{target_addr}").parse().expect("uri");
let mut connector = SocksV4::new(proxy_dst, HttpConnector::new());
// Client
//
// Will use `SocksV4` to establish proxy tunnel.
// Will send "Hello World!" to the target and receive "Goodbye!" back.
let t1 = tokio::spawn(async move {
let conn = connector.call(target_dst).await.expect("tunnel");
let mut tcp = conn.into_inner();
tcp.write_all(b"Hello World!").await.expect("write 1");
let mut buf = [0u8; 64];
let n = tcp.read(&mut buf).await.expect("read 1");
assert_eq!(&buf[..n], b"Goodbye!");
});
// Proxy
//
// Will receive CONNECT command from client.
// Will connect to target and success code back to client.
// Will blindly tunnel between client and target.
let t2 = tokio::spawn(async move {
let (mut to_client, _) = proxy_tcp.accept().await.expect("accept");
let mut buf = [0u8; 512];
let [p1, p2] = target_addr.port().to_be_bytes();
let [ip1, ip2, ip3, ip4] = [127, 0, 0, 1];
let message = [4, 0x01, p1, p2, ip1, ip2, ip3, ip4, 0, 0];
let n = to_client.read(&mut buf).await.expect("read");
assert_eq!(&buf[..n], message);
let mut to_target = TcpStream::connect(target_addr).await.expect("connect");
let message = [0, 90, p1, p2, ip1, ip2, ip3, ip4];
to_client.write_all(&message).await.expect("write");
let (from_client, from_target) =
tokio::io::copy_bidirectional(&mut to_client, &mut to_target)
.await
.expect("proxy");
assert_eq!(from_client, 12);
assert_eq!(from_target, 8)
});
// Target server
//
// Will accept connection from proxy server
// Will receive "Hello World!" from the client and return "Goodbye!"
let t3 = tokio::spawn(async move {
let (mut io, _) = target_tcp.accept().await.expect("accept");
let mut buf = [0u8; 64];
let n = io.read(&mut buf).await.expect("read 1");
assert_eq!(&buf[..n], b"Hello World!");
io.write_all(b"Goodbye!").await.expect("write 1");
});
t1.await.expect("task - client");
t2.await.expect("task - proxy");
t3.await.expect("task - target");
}
#[cfg(not(miri))]
#[tokio::test]
async fn test_socks_v5_optimistic_works() {
let proxy_tcp = TcpListener::bind("127.0.0.1:0").await.expect("bind");
let proxy_addr = proxy_tcp.local_addr().expect("local_addr");
let proxy_dst = format!("http://{proxy_addr}").parse().expect("uri");
let target_addr = std::net::SocketAddr::new([127, 0, 0, 1].into(), 1234);
let target_dst = format!("http://{target_addr}").parse().expect("uri");
let mut connector = SocksV5::new(proxy_dst, HttpConnector::new())
.with_auth("ABC".into(), "XYZ".into())
.send_optimistically(true);
// Client
//
// Will use `SocksV5` to establish proxy tunnel.
// Will send "Hello World!" to the target and receive "Goodbye!" back.
let t1 = tokio::spawn(async move {
let _ = connector.call(target_dst).await.expect("tunnel");
});
// Proxy
//
// Will receive SOCKS handshake from client.
// Will connect to target and success code back to client.
// Will blindly tunnel between client and target.
let t2 = tokio::spawn(async move {
let (mut to_client, _) = proxy_tcp.accept().await.expect("accept");
let [p1, p2] = target_addr.port().to_be_bytes();
let mut buf = [0; 22];
let request = vec![
5, 1, 2, // Negotiation
1, 3, 65, 66, 67, 3, 88, 89, 90, // Auth ("ABC"/"XYZ")
5, 1, 0, 1, 127, 0, 0, 1, p1, p2, // Reply
];
let response = vec![
5, 2, // Negotiation,
1, 0, // Auth,
5, 0, 0, 1, 127, 0, 0, 1, p1, p2, // Reply
];
// Accept all handshake messages
to_client.read_exact(&mut buf).await.expect("read");
assert_eq!(request.as_slice(), buf);
// Send all handshake messages back
to_client
.write_all(response.as_slice())
.await
.expect("write");
to_client.flush().await.expect("flush");
});
t1.await.expect("task - client");
t2.await.expect("task - proxy");
}

View File

@@ -0,0 +1,175 @@
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
use futures_channel::mpsc;
use futures_util::TryFutureExt;
use hyper::Uri;
use tokio::io::{self, AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use hyper::rt::ReadBufCursor;
use hyper_util::client::legacy::connect::HttpConnector;
use hyper_util::client::legacy::connect::{Connected, Connection};
use hyper_util::rt::TokioIo;
#[derive(Clone)]
pub struct DebugConnector {
pub http: HttpConnector,
pub closes: mpsc::Sender<()>,
pub connects: Arc<AtomicUsize>,
pub is_proxy: bool,
pub alpn_h2: bool,
}
impl DebugConnector {
pub fn new() -> DebugConnector {
let http = HttpConnector::new();
let (tx, _) = mpsc::channel(10);
DebugConnector::with_http_and_closes(http, tx)
}
pub fn with_http_and_closes(http: HttpConnector, closes: mpsc::Sender<()>) -> DebugConnector {
DebugConnector {
http,
closes,
connects: Arc::new(AtomicUsize::new(0)),
is_proxy: false,
alpn_h2: false,
}
}
pub fn proxy(mut self) -> Self {
self.is_proxy = true;
self
}
}
impl tower_service::Service<Uri> for DebugConnector {
type Response = DebugStream;
type Error = <HttpConnector as tower_service::Service<Uri>>::Error;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// don't forget to check inner service is ready :)
tower_service::Service::<Uri>::poll_ready(&mut self.http, cx)
}
fn call(&mut self, dst: Uri) -> Self::Future {
self.connects.fetch_add(1, Ordering::SeqCst);
let closes = self.closes.clone();
let is_proxy = self.is_proxy;
let is_alpn_h2 = self.alpn_h2;
Box::pin(self.http.call(dst).map_ok(move |tcp| DebugStream {
tcp,
on_drop: closes,
is_alpn_h2,
is_proxy,
}))
}
}
pub struct DebugStream {
tcp: TokioIo<TcpStream>,
on_drop: mpsc::Sender<()>,
is_alpn_h2: bool,
is_proxy: bool,
}
impl Drop for DebugStream {
fn drop(&mut self) {
let _ = self.on_drop.try_send(());
}
}
impl Connection for DebugStream {
fn connected(&self) -> Connected {
let connected = self.tcp.connected().proxy(self.is_proxy);
if self.is_alpn_h2 {
connected.negotiated_h2()
} else {
connected
}
}
}
impl hyper::rt::Read for DebugStream {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: ReadBufCursor<'_>,
) -> Poll<Result<(), std::io::Error>> {
hyper::rt::Read::poll_read(Pin::new(&mut self.tcp), cx, buf)
}
}
impl hyper::rt::Write for DebugStream {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
hyper::rt::Write::poll_write(Pin::new(&mut self.tcp), cx, buf)
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
hyper::rt::Write::poll_flush(Pin::new(&mut self.tcp), cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
hyper::rt::Write::poll_shutdown(Pin::new(&mut self.tcp), cx)
}
fn is_write_vectored(&self) -> bool {
hyper::rt::Write::is_write_vectored(&self.tcp)
}
fn poll_write_vectored(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
hyper::rt::Write::poll_write_vectored(Pin::new(&mut self.tcp), cx, bufs)
}
}
impl AsyncWrite for DebugStream {
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Pin::new(self.tcp.inner_mut()).poll_shutdown(cx)
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), io::Error>> {
Pin::new(self.tcp.inner_mut()).poll_flush(cx)
}
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>> {
Pin::new(self.tcp.inner_mut()).poll_write(cx, buf)
}
}
impl AsyncRead for DebugStream {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
Pin::new(self.tcp.inner_mut()).poll_read(cx, buf)
}
}