chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

21
vendor/tower/src/balance/error.rs vendored Normal file
View File

@@ -0,0 +1,21 @@
//! Error types for the [`tower::balance`] middleware.
//!
//! [`tower::balance`]: crate::balance
use std::fmt;
/// The balancer's endpoint discovery stream failed.
#[derive(Debug)]
pub struct Discover(pub(crate) crate::BoxError);
impl fmt::Display for Discover {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "load balancer discovery error: {}", self.0)
}
}
impl std::error::Error for Discover {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
Some(&*self.0)
}
}

50
vendor/tower/src/balance/mod.rs vendored Normal file
View File

@@ -0,0 +1,50 @@
//! Middleware that allows balancing load among multiple services.
//!
//! In larger systems, multiple endpoints are often available for a given service. As load
//! increases, you want to ensure that that load is spread evenly across the available services.
//! Otherwise, clients could see spikes in latency if their request goes to a particularly loaded
//! service, even when spare capacity is available to handle that request elsewhere.
//!
//! This module provides the [`p2c`] middleware, which implements the "[Power of
//! Two Random Choices]" algorithm. This is a simple but robust technique for
//! spreading load across services with only inexact load measurements. Use this
//! if the set of available services is not within your control, and you simply
//! want to spread load among that set of services.
//!
//! [Power of Two Random Choices]: http://www.eecs.harvard.edu/~michaelm/postscripts/handbook2001.pdf
//!
//! # Examples
//!
//! ```rust
//! # #[cfg(feature = "util")]
//! # #[cfg(feature = "load")]
//! # fn warnings_are_errors() {
//! use tower::balance::p2c::Balance;
//! use tower::load::Load;
//! use tower::{Service, ServiceExt};
//! use futures_util::pin_mut;
//! # use futures_core::Stream;
//! # use futures_util::StreamExt;
//!
//! async fn spread<Req, S: Service<Req> + Load>(svc1: S, svc2: S, reqs: impl Stream<Item = Req>)
//! where
//! S::Error: Into<tower::BoxError>,
//! # // this bound is pretty unfortunate, and the compiler does _not_ help
//! S::Metric: std::fmt::Debug,
//! {
//! // Spread load evenly across the two services
//! let p2c = Balance::new(tower::discover::ServiceList::new(vec![svc1, svc2]));
//!
//! // Issue all the requests that come in.
//! // Some will go to svc1, some will go to svc2.
//! pin_mut!(reqs);
//! let mut responses = p2c.call_all(reqs);
//! while let Some(rsp) = responses.next().await {
//! // ...
//! }
//! }
//! # }
//! ```
pub mod error;
pub mod p2c;

60
vendor/tower/src/balance/p2c/layer.rs vendored Normal file
View File

@@ -0,0 +1,60 @@
use super::MakeBalance;
use std::{fmt, marker::PhantomData};
use tower_layer::Layer;
/// Construct load balancers ([`Balance`]) over dynamic service sets ([`Discover`]) produced by the
/// "inner" service in response to requests coming from the "outer" service.
///
/// This construction may seem a little odd at first glance. This is not a layer that takes
/// requests and produces responses in the traditional sense. Instead, it is more like
/// [`MakeService`] in that it takes service _descriptors_ (see `Target` on [`MakeService`])
/// and produces _services_. Since [`Balance`] spreads requests across a _set_ of services,
/// the inner service should produce a [`Discover`], not just a single
/// [`Service`], given a service descriptor.
///
/// See the [module-level documentation](crate::balance) for details on load balancing.
///
/// [`Balance`]: crate::balance::p2c::Balance
/// [`Discover`]: crate::discover::Discover
/// [`MakeService`]: crate::MakeService
/// [`Service`]: crate::Service
pub struct MakeBalanceLayer<D, Req> {
_marker: PhantomData<fn(D, Req)>,
}
impl<D, Req> MakeBalanceLayer<D, Req> {
/// Build balancers using operating system entropy.
pub const fn new() -> Self {
Self {
_marker: PhantomData,
}
}
}
impl<D, Req> Default for MakeBalanceLayer<D, Req> {
fn default() -> Self {
Self::new()
}
}
impl<D, Req> Clone for MakeBalanceLayer<D, Req> {
fn clone(&self) -> Self {
Self {
_marker: PhantomData,
}
}
}
impl<S, Req> Layer<S> for MakeBalanceLayer<S, Req> {
type Service = MakeBalance<S, Req>;
fn layer(&self, make_discover: S) -> Self::Service {
MakeBalance::new(make_discover)
}
}
impl<D, Req> fmt::Debug for MakeBalanceLayer<D, Req> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("MakeBalanceLayer").finish()
}
}

124
vendor/tower/src/balance/p2c/make.rs vendored Normal file
View File

@@ -0,0 +1,124 @@
use super::Balance;
use crate::discover::Discover;
use pin_project_lite::pin_project;
use std::hash::Hash;
use std::marker::PhantomData;
use std::{
fmt,
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use tower_service::Service;
/// Constructs load balancers over dynamic service sets produced by a wrapped "inner" service.
///
/// This is effectively an implementation of [`MakeService`] except that it forwards the service
/// descriptors (`Target`) to an inner service (`S`), and expects that service to produce a
/// service set in the form of a [`Discover`]. It then wraps the service set in a [`Balance`]
/// before returning it as the "made" service.
///
/// See the [module-level documentation](crate::balance) for details on load balancing.
///
/// [`MakeService`]: crate::MakeService
/// [`Discover`]: crate::discover::Discover
/// [`Balance`]: crate::balance::p2c::Balance
pub struct MakeBalance<S, Req> {
inner: S,
_marker: PhantomData<fn(Req)>,
}
pin_project! {
/// A [`Balance`] in the making.
///
/// [`Balance`]: crate::balance::p2c::Balance
pub struct MakeFuture<F, Req> {
#[pin]
inner: F,
_marker: PhantomData<fn(Req)>,
}
}
impl<S, Req> MakeBalance<S, Req> {
/// Build balancers using operating system entropy.
pub const fn new(make_discover: S) -> Self {
Self {
inner: make_discover,
_marker: PhantomData,
}
}
}
impl<S, Req> Clone for MakeBalance<S, Req>
where
S: Clone,
{
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
_marker: PhantomData,
}
}
}
impl<S, Target, Req> Service<Target> for MakeBalance<S, Req>
where
S: Service<Target>,
S::Response: Discover,
<S::Response as Discover>::Key: Hash,
<S::Response as Discover>::Service: Service<Req>,
<<S::Response as Discover>::Service as Service<Req>>::Error: Into<crate::BoxError>,
{
type Response = Balance<S::Response, Req>;
type Error = S::Error;
type Future = MakeFuture<S::Future, Req>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx)
}
fn call(&mut self, target: Target) -> Self::Future {
MakeFuture {
inner: self.inner.call(target),
_marker: PhantomData,
}
}
}
impl<S, Req> fmt::Debug for MakeBalance<S, Req>
where
S: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let Self { inner, _marker } = self;
f.debug_struct("MakeBalance").field("inner", inner).finish()
}
}
impl<F, T, E, Req> Future for MakeFuture<F, Req>
where
F: Future<Output = Result<T, E>>,
T: Discover,
<T as Discover>::Key: Hash,
<T as Discover>::Service: Service<Req>,
<<T as Discover>::Service as Service<Req>>::Error: Into<crate::BoxError>,
{
type Output = Result<Balance<T, Req>, E>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let inner = ready!(this.inner.poll(cx))?;
let svc = Balance::new(inner);
Poll::Ready(Ok(svc))
}
}
impl<F, Req> fmt::Debug for MakeFuture<F, Req>
where
F: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let Self { inner, _marker } = self;
f.debug_struct("MakeFuture").field("inner", inner).finish()
}
}

41
vendor/tower/src/balance/p2c/mod.rs vendored Normal file
View File

@@ -0,0 +1,41 @@
//! This module implements the "[Power of Two Random Choices]" load balancing algorithm.
//!
//! It is a simple but robust technique for spreading load across services with only inexact load
//! measurements. As its name implies, whenever a request comes in, it samples two ready services
//! at random, and issues the request to whichever service is less loaded. How loaded a service is
//! is determined by the return value of [`Load`](crate::load::Load).
//!
//! As described in the [Finagle Guide][finagle]:
//!
//! > The algorithm randomly picks two services from the set of ready endpoints and
//! > selects the least loaded of the two. By repeatedly using this strategy, we can
//! > expect a manageable upper bound on the maximum load of any server.
//! >
//! > The maximum load variance between any two servers is bound by `ln(ln(n))` where
//! > `n` is the number of servers in the cluster.
//!
//! The balance service and layer implementations rely on _service discovery_ to provide the
//! underlying set of services to balance requests across. This happens through the
//! [`Discover`](crate::discover::Discover) trait, which is essentially a [`Stream`] that indicates
//! when services become available or go away. If you have a fixed set of services, consider using
//! [`ServiceList`](crate::discover::ServiceList).
//!
//! Since the load balancer needs to perform _random_ choices, the constructors in this module
//! usually come in two forms: one that uses randomness provided by the operating system, and one
//! that lets you specify the random seed to use. Usually the former is what you'll want, though
//! the latter may come in handy for reproducibility or to reduce reliance on the operating system.
//!
//! [Power of Two Random Choices]: http://www.eecs.harvard.edu/~michaelm/postscripts/handbook2001.pdf
//! [finagle]: https://twitter.github.io/finagle/guide/Clients.html#power-of-two-choices-p2c-least-loaded
//! [`Stream`]: https://docs.rs/futures/0.3/futures/stream/trait.Stream.html
mod layer;
mod make;
mod service;
#[cfg(test)]
mod test;
pub use layer::MakeBalanceLayer;
pub use make::{MakeBalance, MakeFuture};
pub use service::Balance;

258
vendor/tower/src/balance/p2c/service.rs vendored Normal file
View File

@@ -0,0 +1,258 @@
use super::super::error;
use crate::discover::{Change, Discover};
use crate::load::Load;
use crate::ready_cache::{error::Failed, ReadyCache};
use crate::util::rng::{sample_floyd2, HasherRng, Rng};
use futures_util::future::{self, TryFutureExt};
use std::hash::Hash;
use std::marker::PhantomData;
use std::{
fmt,
pin::Pin,
task::{ready, Context, Poll},
};
use tower_service::Service;
use tracing::{debug, trace};
/// Efficiently distributes requests across an arbitrary number of services.
///
/// See the [module-level documentation](..) for details.
///
/// Note that [`Balance`] requires that the [`Discover`] you use is [`Unpin`] in order to implement
/// [`Service`]. This is because it needs to be accessed from [`Service::poll_ready`], which takes
/// `&mut self`. You can achieve this easily by wrapping your [`Discover`] in [`Box::pin`] before you
/// construct the [`Balance`] instance. For more details, see [#319].
///
/// [`Box::pin`]: std::boxed::Box::pin()
/// [#319]: https://github.com/tower-rs/tower/issues/319
pub struct Balance<D, Req>
where
D: Discover,
D::Key: Hash,
{
discover: D,
services: ReadyCache<D::Key, D::Service, Req>,
ready_index: Option<usize>,
rng: Box<dyn Rng + Send + Sync>,
_req: PhantomData<Req>,
}
impl<D: Discover, Req> fmt::Debug for Balance<D, Req>
where
D: fmt::Debug,
D::Key: Hash + fmt::Debug,
D::Service: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Balance")
.field("discover", &self.discover)
.field("services", &self.services)
.finish()
}
}
impl<D, Req> Balance<D, Req>
where
D: Discover,
D::Key: Hash,
D::Service: Service<Req>,
<D::Service as Service<Req>>::Error: Into<crate::BoxError>,
{
/// Constructs a load balancer that uses operating system entropy.
pub fn new(discover: D) -> Self {
Self::from_rng(discover, HasherRng::default())
}
/// Constructs a load balancer seeded with the provided random number generator.
pub fn from_rng<R: Rng + Send + Sync + 'static>(discover: D, rng: R) -> Self {
let rng = Box::new(rng);
Self {
rng,
discover,
services: ReadyCache::default(),
ready_index: None,
_req: PhantomData,
}
}
/// Returns the number of endpoints currently tracked by the balancer.
pub fn len(&self) -> usize {
self.services.len()
}
/// Returns whether or not the balancer is empty.
pub fn is_empty(&self) -> bool {
self.services.is_empty()
}
}
impl<D, Req> Balance<D, Req>
where
D: Discover + Unpin,
D::Key: Hash + Clone,
D::Error: Into<crate::BoxError>,
D::Service: Service<Req> + Load,
<D::Service as Load>::Metric: std::fmt::Debug,
<D::Service as Service<Req>>::Error: Into<crate::BoxError>,
{
/// Polls `discover` for updates, adding new items to `not_ready`.
///
/// Removals may alter the order of either `ready` or `not_ready`.
fn update_pending_from_discover(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<Result<(), error::Discover>>> {
debug!("updating from discover");
loop {
match ready!(Pin::new(&mut self.discover).poll_discover(cx))
.transpose()
.map_err(|e| error::Discover(e.into()))?
{
None => return Poll::Ready(None),
Some(Change::Remove(key)) => {
trace!("remove");
self.services.evict(&key);
}
Some(Change::Insert(key, svc)) => {
trace!("insert");
// If this service already existed in the set, it will be
// replaced as the new one becomes ready.
self.services.push(key, svc);
}
}
}
}
fn promote_pending_to_ready(&mut self, cx: &mut Context<'_>) {
loop {
match self.services.poll_pending(cx) {
Poll::Ready(Ok(())) => {
// There are no remaining pending services.
debug_assert_eq!(self.services.pending_len(), 0);
break;
}
Poll::Pending => {
// None of the pending services are ready.
debug_assert!(self.services.pending_len() > 0);
break;
}
Poll::Ready(Err(error)) => {
// An individual service was lost; continue processing
// pending services.
debug!(%error, "dropping failed endpoint");
}
}
}
trace!(
ready = %self.services.ready_len(),
pending = %self.services.pending_len(),
"poll_unready"
);
}
/// Performs P2C on inner services to find a suitable endpoint.
fn p2c_ready_index(&mut self) -> Option<usize> {
match self.services.ready_len() {
0 => None,
1 => Some(0),
len => {
// Get two distinct random indexes (in a random order) and
// compare the loads of the service at each index.
let [aidx, bidx] = sample_floyd2(&mut self.rng, len as u64);
debug_assert_ne!(aidx, bidx, "random indices must be distinct");
let aload = self.ready_index_load(aidx as usize);
let bload = self.ready_index_load(bidx as usize);
let chosen = if aload <= bload { aidx } else { bidx };
trace!(
a.index = aidx,
a.load = ?aload,
b.index = bidx,
b.load = ?bload,
chosen = if chosen == aidx { "a" } else { "b" },
"p2c",
);
Some(chosen as usize)
}
}
}
/// Accesses a ready endpoint by index and returns its current load.
fn ready_index_load(&self, index: usize) -> <D::Service as Load>::Metric {
let (_, svc) = self.services.get_ready_index(index).expect("invalid index");
svc.load()
}
}
impl<D, Req> Service<Req> for Balance<D, Req>
where
D: Discover + Unpin,
D::Key: Hash + Clone,
D::Error: Into<crate::BoxError>,
D::Service: Service<Req> + Load,
<D::Service as Load>::Metric: std::fmt::Debug,
<D::Service as Service<Req>>::Error: Into<crate::BoxError>,
{
type Response = <D::Service as Service<Req>>::Response;
type Error = crate::BoxError;
type Future = future::MapErr<
<D::Service as Service<Req>>::Future,
fn(<D::Service as Service<Req>>::Error) -> crate::BoxError,
>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// `ready_index` may have already been set by a prior invocation. These
// updates cannot disturb the order of existing ready services.
let _ = self.update_pending_from_discover(cx)?;
self.promote_pending_to_ready(cx);
loop {
// If a service has already been selected, ensure that it is ready.
// This ensures that the underlying service is ready immediately
// before a request is dispatched to it (i.e. in the same task
// invocation). If, e.g., a failure detector has changed the state
// of the service, it may be evicted from the ready set so that
// another service can be selected.
if let Some(index) = self.ready_index.take() {
match self.services.check_ready_index(cx, index) {
Ok(true) => {
// The service remains ready.
self.ready_index = Some(index);
return Poll::Ready(Ok(()));
}
Ok(false) => {
// The service is no longer ready. Try to find a new one.
trace!("ready service became unavailable");
}
Err(Failed(_, error)) => {
// The ready endpoint failed, so log the error and try
// to find a new one.
debug!(%error, "endpoint failed");
}
}
}
// Select a new service by comparing two at random and using the
// lesser-loaded service.
self.ready_index = self.p2c_ready_index();
if self.ready_index.is_none() {
debug_assert_eq!(self.services.ready_len(), 0);
// We have previously registered interest in updates from
// discover and pending services.
return Poll::Pending;
}
}
}
fn call(&mut self, request: Req) -> Self::Future {
let index = self.ready_index.take().expect("called before ready");
self.services
.call_ready_index(index, request)
.map_err(Into::into)
}
}

125
vendor/tower/src/balance/p2c/test.rs vendored Normal file
View File

@@ -0,0 +1,125 @@
use crate::discover::ServiceList;
use crate::load;
use futures_util::pin_mut;
use std::task::Poll;
use tokio_test::{assert_pending, assert_ready, assert_ready_ok, task};
use tower_test::{assert_request_eq, mock};
use super::*;
#[tokio::test]
async fn empty() {
let empty: Vec<load::Constant<mock::Mock<(), &'static str>, usize>> = vec![];
let disco = ServiceList::new(empty);
let mut svc = mock::Spawn::new(Balance::new(disco));
assert_pending!(svc.poll_ready());
}
#[tokio::test]
async fn single_endpoint() {
let (mut svc, mut handle) = mock::spawn_with(|s| {
let mock = load::Constant::new(s, 0);
let disco = ServiceList::new(vec![mock].into_iter());
Balance::new(disco)
});
handle.allow(0);
assert_pending!(svc.poll_ready());
assert_eq!(
svc.get_ref().len(),
1,
"balancer must have discovered endpoint"
);
handle.allow(1);
assert_ready_ok!(svc.poll_ready());
let mut fut = task::spawn(svc.call(()));
assert_request_eq!(handle, ()).send_response(1);
assert_eq!(assert_ready_ok!(fut.poll()), 1);
handle.allow(1);
assert_ready_ok!(svc.poll_ready());
handle.send_error("endpoint lost");
assert_pending!(svc.poll_ready());
assert!(
svc.get_ref().is_empty(),
"balancer must drop failed endpoints"
);
}
#[tokio::test]
async fn two_endpoints_with_equal_load() {
let (mock_a, handle_a) = mock::pair();
let (mock_b, handle_b) = mock::pair();
let mock_a = load::Constant::new(mock_a, 1);
let mock_b = load::Constant::new(mock_b, 1);
pin_mut!(handle_a);
pin_mut!(handle_b);
let disco = ServiceList::new(vec![mock_a, mock_b].into_iter());
let mut svc = mock::Spawn::new(Balance::new(disco));
handle_a.allow(0);
handle_b.allow(0);
assert_pending!(svc.poll_ready());
assert_eq!(
svc.get_ref().len(),
2,
"balancer must have discovered both endpoints"
);
handle_a.allow(1);
handle_b.allow(0);
assert_ready_ok!(
svc.poll_ready(),
"must be ready when one of two services is ready"
);
{
let mut fut = task::spawn(svc.call(()));
assert_request_eq!(handle_a, ()).send_response("a");
assert_eq!(assert_ready_ok!(fut.poll()), "a");
}
handle_a.allow(0);
handle_b.allow(1);
assert_ready_ok!(
svc.poll_ready(),
"must be ready when both endpoints are ready"
);
{
let mut fut = task::spawn(svc.call(()));
assert_request_eq!(handle_b, ()).send_response("b");
assert_eq!(assert_ready_ok!(fut.poll()), "b");
}
handle_a.allow(1);
handle_b.allow(1);
for _ in 0..2 {
assert_ready_ok!(
svc.poll_ready(),
"must be ready when both endpoints are ready"
);
let mut fut = task::spawn(svc.call(()));
for (ref mut h, c) in &mut [(&mut handle_a, "a"), (&mut handle_b, "b")] {
if let Poll::Ready(Some((_, tx))) = h.as_mut().poll_request() {
tracing::info!("using {}", c);
tx.send_response(c);
h.allow(0);
}
}
assert_ready_ok!(fut.poll());
}
handle_a.send_error("endpoint lost");
assert_pending!(svc.poll_ready());
assert_eq!(
svc.get_ref().len(),
1,
"balancer must drop failed endpoints",
);
}

68
vendor/tower/src/buffer/error.rs vendored Normal file
View File

@@ -0,0 +1,68 @@
//! Error types for the `Buffer` middleware.
use crate::BoxError;
use std::{fmt, sync::Arc};
/// An error produced by a [`Service`] wrapped by a [`Buffer`]
///
/// [`Service`]: crate::Service
/// [`Buffer`]: crate::buffer::Buffer
#[derive(Debug)]
pub struct ServiceError {
inner: Arc<BoxError>,
}
/// An error produced when the a buffer's worker closes unexpectedly.
pub struct Closed {
_p: (),
}
// ===== impl ServiceError =====
impl ServiceError {
pub(crate) fn new(inner: BoxError) -> ServiceError {
let inner = Arc::new(inner);
ServiceError { inner }
}
// Private to avoid exposing `Clone` trait as part of the public API
pub(crate) fn clone(&self) -> ServiceError {
ServiceError {
inner: self.inner.clone(),
}
}
}
impl fmt::Display for ServiceError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "buffered service failed: {}", self.inner)
}
}
impl std::error::Error for ServiceError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
Some(&**self.inner)
}
}
// ===== impl Closed =====
impl Closed {
pub(crate) fn new() -> Self {
Closed { _p: () }
}
}
impl fmt::Debug for Closed {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_tuple("Closed").finish()
}
}
impl fmt::Display for Closed {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("buffer's worker closed unexpectedly")
}
}
impl std::error::Error for Closed {}

78
vendor/tower/src/buffer/future.rs vendored Normal file
View File

@@ -0,0 +1,78 @@
//! Future types for the [`Buffer`] middleware.
//!
//! [`Buffer`]: crate::buffer::Buffer
use super::{error::Closed, message};
use pin_project_lite::pin_project;
use std::{
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
pin_project! {
/// Future that completes when the buffered service eventually services the submitted request.
#[derive(Debug)]
pub struct ResponseFuture<T> {
#[pin]
state: ResponseState<T>,
}
}
pin_project! {
#[project = ResponseStateProj]
#[derive(Debug)]
enum ResponseState<T> {
Failed {
error: Option<crate::BoxError>,
},
Rx {
#[pin]
rx: message::Rx<T>,
},
Poll {
#[pin]
fut: T,
},
}
}
impl<T> ResponseFuture<T> {
pub(crate) fn new(rx: message::Rx<T>) -> Self {
ResponseFuture {
state: ResponseState::Rx { rx },
}
}
pub(crate) fn failed(err: crate::BoxError) -> Self {
ResponseFuture {
state: ResponseState::Failed { error: Some(err) },
}
}
}
impl<F, T, E> Future for ResponseFuture<F>
where
F: Future<Output = Result<T, E>>,
E: Into<crate::BoxError>,
{
type Output = Result<T, crate::BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
loop {
match this.state.as_mut().project() {
ResponseStateProj::Failed { error } => {
return Poll::Ready(Err(error.take().expect("polled after error")));
}
ResponseStateProj::Rx { rx } => match ready!(rx.poll(cx)) {
Ok(Ok(fut)) => this.state.set(ResponseState::Poll { fut }),
Ok(Err(e)) => return Poll::Ready(Err(e.into())),
Err(_) => return Poll::Ready(Err(Closed::new().into())),
},
ResponseStateProj::Poll { fut } => return fut.poll(cx).map_err(Into::into),
}
}
}
}

72
vendor/tower/src/buffer/layer.rs vendored Normal file
View File

@@ -0,0 +1,72 @@
use super::service::Buffer;
use std::{fmt, marker::PhantomData};
use tower_layer::Layer;
use tower_service::Service;
/// Adds an mpsc buffer in front of an inner service.
///
/// The default Tokio executor is used to run the given service,
/// which means that this layer can only be used on the Tokio runtime.
///
/// See the module documentation for more details.
pub struct BufferLayer<Request> {
bound: usize,
_p: PhantomData<fn(Request)>,
}
impl<Request> BufferLayer<Request> {
/// Creates a new [`BufferLayer`] with the provided `bound`.
///
/// `bound` gives the maximal number of requests that can be queued for the service before
/// backpressure is applied to callers.
///
/// # A note on choosing a `bound`
///
/// When [`Buffer`]'s implementation of [`poll_ready`] returns [`Poll::Ready`], it reserves a
/// slot in the channel for the forthcoming [`call`]. However, if this call doesn't arrive,
/// this reserved slot may be held up for a long time. As a result, it's advisable to set
/// `bound` to be at least the maximum number of concurrent requests the [`Buffer`] will see.
/// If you do not, all the slots in the buffer may be held up by futures that have just called
/// [`poll_ready`] but will not issue a [`call`], which prevents other senders from issuing new
/// requests.
///
/// [`Poll::Ready`]: std::task::Poll::Ready
/// [`call`]: crate::Service::call
/// [`poll_ready`]: crate::Service::poll_ready
pub const fn new(bound: usize) -> Self {
BufferLayer {
bound,
_p: PhantomData,
}
}
}
impl<S, Request> Layer<S> for BufferLayer<Request>
where
S: Service<Request> + Send + 'static,
S::Future: Send,
S::Error: Into<crate::BoxError> + Send + Sync,
Request: Send + 'static,
{
type Service = Buffer<Request, S::Future>;
fn layer(&self, service: S) -> Self::Service {
Buffer::new(service, self.bound)
}
}
impl<Request> fmt::Debug for BufferLayer<Request> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("BufferLayer")
.field("bound", &self.bound)
.finish()
}
}
impl<Request> Clone for BufferLayer<Request> {
fn clone(&self) -> Self {
*self
}
}
impl<Request> Copy for BufferLayer<Request> {}

16
vendor/tower/src/buffer/message.rs vendored Normal file
View File

@@ -0,0 +1,16 @@
use super::error::ServiceError;
use tokio::sync::oneshot;
/// Message sent over buffer
#[derive(Debug)]
pub(crate) struct Message<Request, Fut> {
pub(crate) request: Request,
pub(crate) tx: Tx<Fut>,
pub(crate) span: tracing::Span,
}
/// Response sender
pub(crate) type Tx<Fut> = oneshot::Sender<Result<Fut, ServiceError>>;
/// Response receiver
pub(crate) type Rx<Fut> = oneshot::Receiver<Result<Fut, ServiceError>>;

47
vendor/tower/src/buffer/mod.rs vendored Normal file
View File

@@ -0,0 +1,47 @@
//! Middleware that provides a buffered mpsc channel to a service.
//!
//! Sometimes you want to give out multiple handles to a single service, and allow each handle to
//! enqueue requests. That is, you want a [`Service`] to be [`Clone`]. This module allows you to do
//! that by placing the service behind a multi-producer, single-consumer buffering channel. Clients
//! enqueue requests by sending on the channel from any of the handles ([`Buffer`]), and the single
//! service running elsewhere (usually spawned) receives and services the requests one by one. Each
//! request is enqueued alongside a response channel that allows the service to report the result
//! of the request back to the caller.
//!
//! # Examples
//!
//! ```rust
//! # #[cfg(feature = "util")]
//! use tower::buffer::Buffer;
//! # #[cfg(feature = "util")]
//! use tower::{Service, ServiceExt};
//! # #[cfg(feature = "util")]
//! async fn mass_produce<S: Service<usize>>(svc: S)
//! where
//! S: 'static + Send,
//! S::Error: Send + Sync + std::error::Error,
//! S::Future: Send
//! {
//! let svc = Buffer::new(svc, 10 /* buffer length */);
//! for _ in 0..10 {
//! let mut svc = svc.clone();
//! tokio::spawn(async move {
//! for i in 0usize.. {
//! svc.ready().await.expect("service crashed").call(i).await;
//! }
//! });
//! }
//! }
//! ```
//!
//! [`Service`]: crate::Service
pub mod error;
pub mod future;
mod layer;
mod message;
mod service;
mod worker;
pub use self::layer::BufferLayer;
pub use self::service::Buffer;

144
vendor/tower/src/buffer/service.rs vendored Normal file
View File

@@ -0,0 +1,144 @@
use super::{
future::ResponseFuture,
message::Message,
worker::{Handle, Worker},
};
use std::{
future::Future,
task::{Context, Poll},
};
use tokio::sync::{mpsc, oneshot};
use tokio_util::sync::PollSender;
use tower_service::Service;
/// Adds an mpsc buffer in front of an inner service.
///
/// See the module documentation for more details.
#[derive(Debug)]
pub struct Buffer<Req, F> {
tx: PollSender<Message<Req, F>>,
handle: Handle,
}
impl<Req, F> Buffer<Req, F>
where
F: 'static,
{
/// Creates a new [`Buffer`] wrapping `service`.
///
/// `bound` gives the maximal number of requests that can be queued for the service before
/// backpressure is applied to callers.
///
/// The default Tokio executor is used to run the given service, which means that this method
/// must be called while on the Tokio runtime.
///
/// # A note on choosing a `bound`
///
/// When [`Buffer`]'s implementation of [`poll_ready`] returns [`Poll::Ready`], it reserves a
/// slot in the channel for the forthcoming [`call`]. However, if this call doesn't arrive,
/// this reserved slot may be held up for a long time. As a result, it's advisable to set
/// `bound` to be at least the maximum number of concurrent requests the [`Buffer`] will see.
/// If you do not, all the slots in the buffer may be held up by futures that have just called
/// [`poll_ready`] but will not issue a [`call`], which prevents other senders from issuing new
/// requests.
///
/// [`Poll::Ready`]: std::task::Poll::Ready
/// [`call`]: crate::Service::call
/// [`poll_ready`]: crate::Service::poll_ready
pub fn new<S>(service: S, bound: usize) -> Self
where
S: Service<Req, Future = F> + Send + 'static,
F: Send,
S::Error: Into<crate::BoxError> + Send + Sync,
Req: Send + 'static,
{
let (service, worker) = Self::pair(service, bound);
tokio::spawn(worker);
service
}
/// Creates a new [`Buffer`] wrapping `service`, but returns the background worker.
///
/// This is useful if you do not want to spawn directly onto the tokio runtime
/// but instead want to use your own executor. This will return the [`Buffer`] and
/// the background `Worker` that you can then spawn.
pub fn pair<S>(service: S, bound: usize) -> (Self, Worker<S, Req>)
where
S: Service<Req, Future = F> + Send + 'static,
F: Send,
S::Error: Into<crate::BoxError> + Send + Sync,
Req: Send + 'static,
{
let (tx, rx) = mpsc::channel(bound);
let (handle, worker) = Worker::new(service, rx);
let buffer = Self {
tx: PollSender::new(tx),
handle,
};
(buffer, worker)
}
fn get_worker_error(&self) -> crate::BoxError {
self.handle.get_error_on_closed()
}
}
impl<Req, Rsp, F, E> Service<Req> for Buffer<Req, F>
where
F: Future<Output = Result<Rsp, E>> + Send + 'static,
E: Into<crate::BoxError>,
Req: Send + 'static,
{
type Response = Rsp;
type Error = crate::BoxError;
type Future = ResponseFuture<F>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// First, check if the worker is still alive.
if self.tx.is_closed() {
// If the inner service has errored, then we error here.
return Poll::Ready(Err(self.get_worker_error()));
}
// Poll the sender to acquire a permit.
self.tx
.poll_reserve(cx)
.map_err(|_| self.get_worker_error())
}
fn call(&mut self, request: Req) -> Self::Future {
tracing::trace!("sending request to buffer worker");
// get the current Span so that we can explicitly propagate it to the worker
// if we didn't do this, events on the worker related to this span wouldn't be counted
// towards that span since the worker would have no way of entering it.
let span = tracing::Span::current();
// If we've made it here, then a channel permit has already been
// acquired, so we can freely allocate a oneshot.
let (tx, rx) = oneshot::channel();
match self.tx.send_item(Message { request, span, tx }) {
Ok(_) => ResponseFuture::new(rx),
// If the channel is closed, propagate the error from the worker.
Err(_) => {
tracing::trace!("buffer channel closed");
ResponseFuture::failed(self.get_worker_error())
}
}
}
}
impl<Req, F> Clone for Buffer<Req, F>
where
Req: Send + 'static,
F: Send + 'static,
{
fn clone(&self) -> Self {
Self {
handle: self.handle.clone(),
tx: self.tx.clone(),
}
}
}

226
vendor/tower/src/buffer/worker.rs vendored Normal file
View File

@@ -0,0 +1,226 @@
use super::{
error::{Closed, ServiceError},
message::Message,
};
use std::sync::{Arc, Mutex};
use std::{
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use tokio::sync::mpsc;
use tower_service::Service;
pin_project_lite::pin_project! {
/// Task that handles processing the buffer. This type should not be used
/// directly, instead `Buffer` requires an `Executor` that can accept this task.
///
/// The struct is `pub` in the private module and the type is *not* re-exported
/// as part of the public API. This is the "sealed" pattern to include "private"
/// types in public traits that are not meant for consumers of the library to
/// implement (only call).
#[derive(Debug)]
pub struct Worker<T, Request>
where
T: Service<Request>,
{
current_message: Option<Message<Request, T::Future>>,
rx: mpsc::Receiver<Message<Request, T::Future>>,
service: T,
finish: bool,
failed: Option<ServiceError>,
handle: Handle,
}
}
/// Get the error out
#[derive(Debug)]
pub(crate) struct Handle {
inner: Arc<Mutex<Option<ServiceError>>>,
}
impl<T, Request> Worker<T, Request>
where
T: Service<Request>,
T::Error: Into<crate::BoxError>,
{
pub(crate) fn new(
service: T,
rx: mpsc::Receiver<Message<Request, T::Future>>,
) -> (Handle, Worker<T, Request>) {
let handle = Handle {
inner: Arc::new(Mutex::new(None)),
};
let worker = Worker {
current_message: None,
finish: false,
failed: None,
rx,
service,
handle: handle.clone(),
};
(handle, worker)
}
/// Return the next queued Message that hasn't been canceled.
///
/// If a `Message` is returned, the `bool` is true if this is the first time we received this
/// message, and false otherwise (i.e., we tried to forward it to the backing service before).
fn poll_next_msg(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<(Message<Request, T::Future>, bool)>> {
if self.finish {
// We've already received None and are shutting down
return Poll::Ready(None);
}
tracing::trace!("worker polling for next message");
if let Some(msg) = self.current_message.take() {
// If the oneshot sender is closed, then the receiver is dropped,
// and nobody cares about the response. If this is the case, we
// should continue to the next request.
if !msg.tx.is_closed() {
tracing::trace!("resuming buffered request");
return Poll::Ready(Some((msg, false)));
}
tracing::trace!("dropping cancelled buffered request");
}
// Get the next request
while let Some(msg) = ready!(Pin::new(&mut self.rx).poll_recv(cx)) {
if !msg.tx.is_closed() {
tracing::trace!("processing new request");
return Poll::Ready(Some((msg, true)));
}
// Otherwise, request is canceled, so pop the next one.
tracing::trace!("dropping cancelled request");
}
Poll::Ready(None)
}
fn failed(&mut self, error: crate::BoxError) {
// The underlying service failed when we called `poll_ready` on it with the given `error`. We
// need to communicate this to all the `Buffer` handles. To do so, we wrap up the error in
// an `Arc`, send that `Arc<E>` to all pending requests, and store it so that subsequent
// requests will also fail with the same error.
// Note that we need to handle the case where some handle is concurrently trying to send us
// a request. We need to make sure that *either* the send of the request fails *or* it
// receives an error on the `oneshot` it constructed. Specifically, we want to avoid the
// case where we send errors to all outstanding requests, and *then* the caller sends its
// request. We do this by *first* exposing the error, *then* closing the channel used to
// send more requests (so the client will see the error when the send fails), and *then*
// sending the error to all outstanding requests.
let error = ServiceError::new(error);
let mut inner = self.handle.inner.lock().unwrap();
if inner.is_some() {
// Future::poll was called after we've already errored out!
return;
}
*inner = Some(error.clone());
drop(inner);
self.rx.close();
// By closing the mpsc::Receiver, we know that poll_next_msg will soon return Ready(None),
// which will trigger the `self.finish == true` phase. We just need to make sure that any
// requests that we receive before we've exhausted the receiver receive the error:
self.failed = Some(error);
}
}
impl<T, Request> Future for Worker<T, Request>
where
T: Service<Request>,
T::Error: Into<crate::BoxError>,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.finish {
return Poll::Ready(());
}
loop {
match ready!(self.poll_next_msg(cx)) {
Some((msg, first)) => {
let _guard = msg.span.enter();
if let Some(ref failed) = self.failed {
tracing::trace!("notifying caller about worker failure");
let _ = msg.tx.send(Err(failed.clone()));
continue;
}
// Wait for the service to be ready
tracing::trace!(
resumed = !first,
message = "worker received request; waiting for service readiness"
);
match self.service.poll_ready(cx) {
Poll::Ready(Ok(())) => {
tracing::debug!(service.ready = true, message = "processing request");
let response = self.service.call(msg.request);
// Send the response future back to the sender.
//
// An error means the request had been canceled in-between
// our calls, the response future will just be dropped.
tracing::trace!("returning response future");
let _ = msg.tx.send(Ok(response));
}
Poll::Pending => {
tracing::trace!(service.ready = false, message = "delay");
// Put out current message back in its slot.
drop(_guard);
self.current_message = Some(msg);
return Poll::Pending;
}
Poll::Ready(Err(e)) => {
let error = e.into();
tracing::debug!({ %error }, "service failed");
drop(_guard);
self.failed(error);
let _ = msg.tx.send(Err(self
.failed
.as_ref()
.expect("Worker::failed did not set self.failed?")
.clone()));
}
}
}
None => {
// No more more requests _ever_.
self.finish = true;
return Poll::Ready(());
}
}
}
}
}
impl Handle {
pub(crate) fn get_error_on_closed(&self) -> crate::BoxError {
self.inner
.lock()
.unwrap()
.as_ref()
.map(|svc_err| svc_err.clone().into())
.unwrap_or_else(|| Closed::new().into())
}
}
impl Clone for Handle {
fn clone(&self) -> Handle {
Handle {
inner: self.inner.clone(),
}
}
}

871
vendor/tower/src/builder/mod.rs vendored Normal file
View File

@@ -0,0 +1,871 @@
//! Builder types to compose layers and services
use tower_layer::{Identity, Layer, Stack};
use tower_service::Service;
use std::fmt;
/// Declaratively construct [`Service`] values.
///
/// [`ServiceBuilder`] provides a [builder-like interface][builder] for composing
/// layers to be applied to a [`Service`].
///
/// # Service
///
/// A [`Service`] is a trait representing an asynchronous function of a request
/// to a response. It is similar to `async fn(Request) -> Result<Response, Error>`.
///
/// A [`Service`] is typically bound to a single transport, such as a TCP
/// connection. It defines how _all_ inbound or outbound requests are handled
/// by that connection.
///
/// [builder]: https://doc.rust-lang.org/1.0.0/style/ownership/builders.html
///
/// # Order
///
/// The order in which layers are added impacts how requests are handled. Layers
/// that are added first will be called with the request first. The argument to
/// `service` will be last to see the request.
///
/// ```
/// # // this (and other) doctest is ignored because we don't have a way
/// # // to say that it should only be run with cfg(feature = "...")
/// # use tower::Service;
/// # use tower::builder::ServiceBuilder;
/// # #[cfg(all(feature = "buffer", feature = "limit"))]
/// # async fn wrap<S>(svc: S) where S: Service<(), Error = &'static str> + 'static + Send, S::Future: Send {
/// ServiceBuilder::new()
/// .buffer(100)
/// .concurrency_limit(10)
/// .service(svc)
/// # ;
/// # }
/// ```
///
/// In the above example, the buffer layer receives the request first followed
/// by `concurrency_limit`. `buffer` enables up to 100 request to be in-flight
/// **on top of** the requests that have already been forwarded to the next
/// layer. Combined with `concurrency_limit`, this allows up to 110 requests to be
/// in-flight.
///
/// ```
/// # use tower::Service;
/// # use tower::builder::ServiceBuilder;
/// # #[cfg(all(feature = "buffer", feature = "limit"))]
/// # async fn wrap<S>(svc: S) where S: Service<(), Error = &'static str> + 'static + Send, S::Future: Send {
/// ServiceBuilder::new()
/// .concurrency_limit(10)
/// .buffer(100)
/// .service(svc)
/// # ;
/// # }
/// ```
///
/// The above example is similar, but the order of layers is reversed. Now,
/// `concurrency_limit` applies first and only allows 10 requests to be in-flight
/// total.
///
/// # Examples
///
/// A [`Service`] stack with a single layer:
///
/// ```
/// # use tower::Service;
/// # use tower::builder::ServiceBuilder;
/// # #[cfg(feature = "limit")]
/// # use tower::limit::concurrency::ConcurrencyLimitLayer;
/// # #[cfg(feature = "limit")]
/// # async fn wrap<S>(svc: S) where S: Service<(), Error = &'static str> + 'static + Send, S::Future: Send {
/// ServiceBuilder::new()
/// .concurrency_limit(5)
/// .service(svc);
/// # ;
/// # }
/// ```
///
/// A [`Service`] stack with _multiple_ layers that contain rate limiting,
/// in-flight request limits, and a channel-backed, clonable [`Service`]:
///
/// ```
/// # use tower::Service;
/// # use tower::builder::ServiceBuilder;
/// # use std::time::Duration;
/// # #[cfg(all(feature = "buffer", feature = "limit"))]
/// # async fn wrap<S>(svc: S) where S: Service<(), Error = &'static str> + 'static + Send, S::Future: Send {
/// ServiceBuilder::new()
/// .buffer(5)
/// .concurrency_limit(5)
/// .rate_limit(5, Duration::from_secs(1))
/// .service(svc);
/// # ;
/// # }
/// ```
///
/// [`Service`]: crate::Service
#[derive(Clone)]
pub struct ServiceBuilder<L> {
layer: L,
}
impl Default for ServiceBuilder<Identity> {
fn default() -> Self {
Self::new()
}
}
impl ServiceBuilder<Identity> {
/// Create a new [`ServiceBuilder`].
pub const fn new() -> Self {
ServiceBuilder {
layer: Identity::new(),
}
}
}
impl<L> ServiceBuilder<L> {
/// Add a new layer `T` into the [`ServiceBuilder`].
///
/// This wraps the inner service with the service provided by a user-defined
/// [`Layer`]. The provided layer must implement the [`Layer`] trait.
///
/// [`Layer`]: crate::Layer
pub fn layer<T>(self, layer: T) -> ServiceBuilder<Stack<T, L>> {
ServiceBuilder {
layer: Stack::new(layer, self.layer),
}
}
/// Optionally add a new layer `T` into the [`ServiceBuilder`].
///
/// ```
/// # use std::time::Duration;
/// # use tower::Service;
/// # use tower::builder::ServiceBuilder;
/// # use tower::timeout::TimeoutLayer;
/// # async fn wrap<S>(svc: S) where S: Service<(), Error = &'static str> + 'static + Send, S::Future: Send {
/// # let timeout = Some(Duration::new(10, 0));
/// // Apply a timeout if configured
/// ServiceBuilder::new()
/// .option_layer(timeout.map(TimeoutLayer::new))
/// .service(svc)
/// # ;
/// # }
/// ```
#[cfg(feature = "util")]
pub fn option_layer<T>(
self,
layer: Option<T>,
) -> ServiceBuilder<Stack<crate::util::Either<T, Identity>, L>> {
self.layer(crate::util::option_layer(layer))
}
/// Add a [`Layer`] built from a function that accepts a service and returns another service.
///
/// See the documentation for [`layer_fn`] for more details.
///
/// [`layer_fn`]: crate::layer::layer_fn
pub fn layer_fn<F>(self, f: F) -> ServiceBuilder<Stack<crate::layer::LayerFn<F>, L>> {
self.layer(crate::layer::layer_fn(f))
}
/// Buffer requests when the next layer is not ready.
///
/// This wraps the inner service with an instance of the [`Buffer`]
/// middleware.
///
/// [`Buffer`]: crate::buffer
#[cfg(feature = "buffer")]
pub fn buffer<Request>(
self,
bound: usize,
) -> ServiceBuilder<Stack<crate::buffer::BufferLayer<Request>, L>> {
self.layer(crate::buffer::BufferLayer::new(bound))
}
/// Limit the max number of in-flight requests.
///
/// A request is in-flight from the time the request is received until the
/// response future completes. This includes the time spent in the next
/// layers.
///
/// This wraps the inner service with an instance of the
/// [`ConcurrencyLimit`] middleware.
///
/// [`ConcurrencyLimit`]: crate::limit::concurrency
#[cfg(feature = "limit")]
pub fn concurrency_limit(
self,
max: usize,
) -> ServiceBuilder<Stack<crate::limit::ConcurrencyLimitLayer, L>> {
self.layer(crate::limit::ConcurrencyLimitLayer::new(max))
}
/// Drop requests when the next layer is unable to respond to requests.
///
/// Usually, when a service or middleware does not have capacity to process a
/// request (i.e., [`poll_ready`] returns [`Pending`]), the caller waits until
/// capacity becomes available.
///
/// [`LoadShed`] immediately responds with an error when the next layer is
/// out of capacity.
///
/// This wraps the inner service with an instance of the [`LoadShed`]
/// middleware.
///
/// [`LoadShed`]: crate::load_shed
/// [`poll_ready`]: crate::Service::poll_ready
/// [`Pending`]: std::task::Poll::Pending
#[cfg(feature = "load-shed")]
pub fn load_shed(self) -> ServiceBuilder<Stack<crate::load_shed::LoadShedLayer, L>> {
self.layer(crate::load_shed::LoadShedLayer::new())
}
/// Limit requests to at most `num` per the given duration.
///
/// This wraps the inner service with an instance of the [`RateLimit`]
/// middleware.
///
/// [`RateLimit`]: crate::limit::rate
#[cfg(feature = "limit")]
pub fn rate_limit(
self,
num: u64,
per: std::time::Duration,
) -> ServiceBuilder<Stack<crate::limit::RateLimitLayer, L>> {
self.layer(crate::limit::RateLimitLayer::new(num, per))
}
/// Retry failed requests according to the given [retry policy][policy].
///
/// `policy` determines which failed requests will be retried. It must
/// implement the [`retry::Policy`][policy] trait.
///
/// This wraps the inner service with an instance of the [`Retry`]
/// middleware.
///
/// [`Retry`]: crate::retry
/// [policy]: crate::retry::Policy
#[cfg(feature = "retry")]
pub fn retry<P>(self, policy: P) -> ServiceBuilder<Stack<crate::retry::RetryLayer<P>, L>> {
self.layer(crate::retry::RetryLayer::new(policy))
}
/// Fail requests that take longer than `timeout`.
///
/// If the next layer takes more than `timeout` to respond to a request,
/// processing is terminated and an error is returned.
///
/// This wraps the inner service with an instance of the [`timeout`]
/// middleware.
///
/// [`timeout`]: crate::timeout
#[cfg(feature = "timeout")]
pub fn timeout(
self,
timeout: std::time::Duration,
) -> ServiceBuilder<Stack<crate::timeout::TimeoutLayer, L>> {
self.layer(crate::timeout::TimeoutLayer::new(timeout))
}
/// Conditionally reject requests based on `predicate`.
///
/// `predicate` must implement the [`Predicate`] trait.
///
/// This wraps the inner service with an instance of the [`Filter`]
/// middleware.
///
/// [`Filter`]: crate::filter
/// [`Predicate`]: crate::filter::Predicate
#[cfg(feature = "filter")]
pub fn filter<P>(
self,
predicate: P,
) -> ServiceBuilder<Stack<crate::filter::FilterLayer<P>, L>> {
self.layer(crate::filter::FilterLayer::new(predicate))
}
/// Conditionally reject requests based on an asynchronous `predicate`.
///
/// `predicate` must implement the [`AsyncPredicate`] trait.
///
/// This wraps the inner service with an instance of the [`AsyncFilter`]
/// middleware.
///
/// [`AsyncFilter`]: crate::filter::AsyncFilter
/// [`AsyncPredicate`]: crate::filter::AsyncPredicate
#[cfg(feature = "filter")]
pub fn filter_async<P>(
self,
predicate: P,
) -> ServiceBuilder<Stack<crate::filter::AsyncFilterLayer<P>, L>> {
self.layer(crate::filter::AsyncFilterLayer::new(predicate))
}
/// Map one request type to another.
///
/// This wraps the inner service with an instance of the [`MapRequest`]
/// middleware.
///
/// # Examples
///
/// Changing the type of a request:
///
/// ```rust
/// use tower::ServiceBuilder;
/// use tower::ServiceExt;
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), ()> {
/// // Suppose we have some `Service` whose request type is `String`:
/// let string_svc = tower::service_fn(|request: String| async move {
/// println!("request: {}", request);
/// Ok(())
/// });
///
/// // ...but we want to call that service with a `usize`. What do we do?
///
/// let usize_svc = ServiceBuilder::new()
/// // Add a middleware that converts the request type to a `String`:
/// .map_request(|request: usize| format!("{}", request))
/// // ...and wrap the string service with that middleware:
/// .service(string_svc);
///
/// // Now, we can call that service with a `usize`:
/// usize_svc.oneshot(42).await?;
/// # Ok(())
/// # }
/// ```
///
/// Modifying the request value:
///
/// ```rust
/// use tower::ServiceBuilder;
/// use tower::ServiceExt;
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), ()> {
/// // A service that takes a number and returns it:
/// let svc = tower::service_fn(|request: usize| async move {
/// Ok(request)
/// });
///
/// let svc = ServiceBuilder::new()
/// // Add a middleware that adds 1 to each request
/// .map_request(|request: usize| request + 1)
/// .service(svc);
///
/// let response = svc.oneshot(1).await?;
/// assert_eq!(response, 2);
/// # Ok(())
/// # }
/// ```
///
/// [`MapRequest`]: crate::util::MapRequest
#[cfg(feature = "util")]
pub fn map_request<F, R1, R2>(
self,
f: F,
) -> ServiceBuilder<Stack<crate::util::MapRequestLayer<F>, L>>
where
F: FnMut(R1) -> R2 + Clone,
{
self.layer(crate::util::MapRequestLayer::new(f))
}
/// Map one response type to another.
///
/// This wraps the inner service with an instance of the [`MapResponse`]
/// middleware.
///
/// See the documentation for the [`map_response` combinator] for details.
///
/// [`MapResponse`]: crate::util::MapResponse
/// [`map_response` combinator]: crate::util::ServiceExt::map_response
#[cfg(feature = "util")]
pub fn map_response<F>(
self,
f: F,
) -> ServiceBuilder<Stack<crate::util::MapResponseLayer<F>, L>> {
self.layer(crate::util::MapResponseLayer::new(f))
}
/// Map one error type to another.
///
/// This wraps the inner service with an instance of the [`MapErr`]
/// middleware.
///
/// See the documentation for the [`map_err` combinator] for details.
///
/// [`MapErr`]: crate::util::MapErr
/// [`map_err` combinator]: crate::util::ServiceExt::map_err
#[cfg(feature = "util")]
pub fn map_err<F>(self, f: F) -> ServiceBuilder<Stack<crate::util::MapErrLayer<F>, L>> {
self.layer(crate::util::MapErrLayer::new(f))
}
/// Composes a function that transforms futures produced by the service.
///
/// This wraps the inner service with an instance of the [`MapFutureLayer`] middleware.
///
/// See the documentation for the [`map_future`] combinator for details.
///
/// [`MapFutureLayer`]: crate::util::MapFutureLayer
/// [`map_future`]: crate::util::ServiceExt::map_future
#[cfg(feature = "util")]
pub fn map_future<F>(self, f: F) -> ServiceBuilder<Stack<crate::util::MapFutureLayer<F>, L>> {
self.layer(crate::util::MapFutureLayer::new(f))
}
/// Apply an asynchronous function after the service, regardless of whether the future
/// succeeds or fails.
///
/// This wraps the inner service with an instance of the [`Then`]
/// middleware.
///
/// This is similar to the [`map_response`] and [`map_err`] functions,
/// except that the *same* function is invoked when the service's future
/// completes, whether it completes successfully or fails. This function
/// takes the [`Result`] returned by the service's future, and returns a
/// [`Result`].
///
/// See the documentation for the [`then` combinator] for details.
///
/// [`Then`]: crate::util::Then
/// [`then` combinator]: crate::util::ServiceExt::then
/// [`map_response`]: ServiceBuilder::map_response
/// [`map_err`]: ServiceBuilder::map_err
#[cfg(feature = "util")]
pub fn then<F>(self, f: F) -> ServiceBuilder<Stack<crate::util::ThenLayer<F>, L>> {
self.layer(crate::util::ThenLayer::new(f))
}
/// Executes a new future after this service's future resolves. This does
/// not alter the behaviour of the [`poll_ready`] method.
///
/// This method can be used to change the [`Response`] type of the service
/// into a different type. You can use this method to chain along a computation once the
/// service's response has been resolved.
///
/// This wraps the inner service with an instance of the [`AndThen`]
/// middleware.
///
/// See the documentation for the [`and_then` combinator] for details.
///
/// [`Response`]: crate::Service::Response
/// [`poll_ready`]: crate::Service::poll_ready
/// [`and_then` combinator]: crate::util::ServiceExt::and_then
/// [`AndThen`]: crate::util::AndThen
#[cfg(feature = "util")]
pub fn and_then<F>(self, f: F) -> ServiceBuilder<Stack<crate::util::AndThenLayer<F>, L>> {
self.layer(crate::util::AndThenLayer::new(f))
}
/// Maps this service's result type (`Result<Self::Response, Self::Error>`)
/// to a different value, regardless of whether the future succeeds or
/// fails.
///
/// This wraps the inner service with an instance of the [`MapResult`]
/// middleware.
///
/// See the documentation for the [`map_result` combinator] for details.
///
/// [`map_result` combinator]: crate::util::ServiceExt::map_result
/// [`MapResult`]: crate::util::MapResult
#[cfg(feature = "util")]
pub fn map_result<F>(self, f: F) -> ServiceBuilder<Stack<crate::util::MapResultLayer<F>, L>> {
self.layer(crate::util::MapResultLayer::new(f))
}
/// Returns the underlying `Layer` implementation.
pub fn into_inner(self) -> L {
self.layer
}
/// Wrap the service `S` with the middleware provided by this
/// [`ServiceBuilder`]'s [`Layer`]'s, returning a new [`Service`].
///
/// [`Layer`]: crate::Layer
/// [`Service`]: crate::Service
pub fn service<S>(&self, service: S) -> L::Service
where
L: Layer<S>,
{
self.layer.layer(service)
}
/// Wrap the async function `F` with the middleware provided by this [`ServiceBuilder`]'s
/// [`Layer`]s, returning a new [`Service`].
///
/// This is a convenience method which is equivalent to calling
/// [`ServiceBuilder::service`] with a [`service_fn`], like this:
///
/// ```rust
/// # use tower::{ServiceBuilder, service_fn};
/// # async fn handler_fn(_: ()) -> Result<(), ()> { Ok(()) }
/// # let _ = {
/// ServiceBuilder::new()
/// // ...
/// .service(service_fn(handler_fn))
/// # };
/// ```
///
/// # Example
///
/// ```rust
/// use std::time::Duration;
/// use tower::{ServiceBuilder, ServiceExt, BoxError, service_fn};
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), BoxError> {
/// async fn handle(request: &'static str) -> Result<&'static str, BoxError> {
/// Ok(request)
/// }
///
/// let svc = ServiceBuilder::new()
/// .buffer(1024)
/// .timeout(Duration::from_secs(10))
/// .service_fn(handle);
///
/// let response = svc.oneshot("foo").await?;
///
/// assert_eq!(response, "foo");
/// # Ok(())
/// # }
/// ```
///
/// [`Layer`]: crate::Layer
/// [`Service`]: crate::Service
/// [`service_fn`]: crate::service_fn
#[cfg(feature = "util")]
pub fn service_fn<F>(self, f: F) -> L::Service
where
L: Layer<crate::util::ServiceFn<F>>,
{
self.service(crate::util::service_fn(f))
}
/// Check that the builder implements `Clone`.
///
/// This can be useful when debugging type errors in `ServiceBuilder`s with lots of layers.
///
/// Doesn't actually change the builder but serves as a type check.
///
/// # Example
///
/// ```rust
/// use tower::ServiceBuilder;
///
/// let builder = ServiceBuilder::new()
/// // Do something before processing the request
/// .map_request(|request: String| {
/// println!("got request!");
/// request
/// })
/// // Ensure our `ServiceBuilder` can be cloned
/// .check_clone()
/// // Do something after processing the request
/// .map_response(|response: String| {
/// println!("got response!");
/// response
/// });
/// ```
#[inline]
pub fn check_clone(self) -> Self
where
Self: Clone,
{
self
}
/// Check that the builder when given a service of type `S` produces a service that implements
/// `Clone`.
///
/// This can be useful when debugging type errors in `ServiceBuilder`s with lots of layers.
///
/// Doesn't actually change the builder but serves as a type check.
///
/// # Example
///
/// ```rust
/// use tower::ServiceBuilder;
///
/// # #[derive(Clone)]
/// # struct MyService;
/// #
/// let builder = ServiceBuilder::new()
/// // Do something before processing the request
/// .map_request(|request: String| {
/// println!("got request!");
/// request
/// })
/// // Ensure that the service produced when given a `MyService` implements
/// .check_service_clone::<MyService>()
/// // Do something after processing the request
/// .map_response(|response: String| {
/// println!("got response!");
/// response
/// });
/// ```
#[inline]
pub fn check_service_clone<S>(self) -> Self
where
L: Layer<S>,
L::Service: Clone,
{
self
}
/// Check that the builder when given a service of type `S` produces a service with the given
/// request, response, and error types.
///
/// This can be useful when debugging type errors in `ServiceBuilder`s with lots of layers.
///
/// Doesn't actually change the builder but serves as a type check.
///
/// # Example
///
/// ```rust
/// use tower::ServiceBuilder;
/// use std::task::{Poll, Context};
/// use tower::{Service, ServiceExt};
///
/// // An example service
/// struct MyService;
///
/// impl Service<Request> for MyService {
/// type Response = Response;
/// type Error = Error;
/// type Future = std::future::Ready<Result<Response, Error>>;
///
/// fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
/// // ...
/// # todo!()
/// }
///
/// fn call(&mut self, request: Request) -> Self::Future {
/// // ...
/// # todo!()
/// }
/// }
///
/// struct Request;
/// struct Response;
/// struct Error;
///
/// struct WrappedResponse(Response);
///
/// let builder = ServiceBuilder::new()
/// // At this point in the builder if given a `MyService` it produces a service that
/// // accepts `Request`s, produces `Response`s, and fails with `Error`s
/// .check_service::<MyService, Request, Response, Error>()
/// // Wrap responses in `WrappedResponse`
/// .map_response(|response: Response| WrappedResponse(response))
/// // Now the response type will be `WrappedResponse`
/// .check_service::<MyService, _, WrappedResponse, _>();
/// ```
#[inline]
pub fn check_service<S, T, U, E>(self) -> Self
where
L: Layer<S>,
L::Service: Service<T, Response = U, Error = E>,
{
self
}
/// This wraps the inner service with the [`Layer`] returned by [`BoxService::layer()`].
///
/// See that method for more details.
///
/// # Example
///
/// ```
/// use tower::{Service, ServiceBuilder, BoxError, util::BoxService};
/// use std::time::Duration;
/// #
/// # struct Request;
/// # struct Response;
/// # impl Response {
/// # fn new() -> Self { Self }
/// # }
///
/// let service: BoxService<Request, Response, BoxError> = ServiceBuilder::new()
/// .boxed()
/// .load_shed()
/// .concurrency_limit(64)
/// .timeout(Duration::from_secs(10))
/// .service_fn(|req: Request| async {
/// Ok::<_, BoxError>(Response::new())
/// });
/// # let service = assert_service(service);
/// # fn assert_service<S, R>(svc: S) -> S
/// # where S: Service<R> { svc }
/// ```
///
/// [`BoxService::layer()`]: crate::util::BoxService::layer()
#[cfg(feature = "util")]
pub fn boxed<S, R>(
self,
) -> ServiceBuilder<
Stack<
tower_layer::LayerFn<
fn(
L::Service,
) -> crate::util::BoxService<
R,
<L::Service as Service<R>>::Response,
<L::Service as Service<R>>::Error,
>,
>,
L,
>,
>
where
L: Layer<S>,
L::Service: Service<R> + Send + 'static,
<L::Service as Service<R>>::Future: Send + 'static,
{
self.layer(crate::util::BoxService::layer())
}
/// This wraps the inner service with the [`Layer`] returned by [`BoxCloneService::layer()`].
///
/// This is similar to the [`boxed`] method, but it requires that `Self` implement
/// [`Clone`], and the returned boxed service implements [`Clone`].
///
/// See [`BoxCloneService`] for more details.
///
/// # Example
///
/// ```
/// use tower::{Service, ServiceBuilder, BoxError, util::BoxCloneService};
/// use std::time::Duration;
/// #
/// # struct Request;
/// # struct Response;
/// # impl Response {
/// # fn new() -> Self { Self }
/// # }
///
/// let service: BoxCloneService<Request, Response, BoxError> = ServiceBuilder::new()
/// .boxed_clone()
/// .load_shed()
/// .concurrency_limit(64)
/// .timeout(Duration::from_secs(10))
/// .service_fn(|req: Request| async {
/// Ok::<_, BoxError>(Response::new())
/// });
/// # let service = assert_service(service);
///
/// // The boxed service can still be cloned.
/// service.clone();
/// # fn assert_service<S, R>(svc: S) -> S
/// # where S: Service<R> { svc }
/// ```
///
/// [`BoxCloneService::layer()`]: crate::util::BoxCloneService::layer()
/// [`BoxCloneService`]: crate::util::BoxCloneService
/// [`boxed`]: Self::boxed
#[cfg(feature = "util")]
pub fn boxed_clone<S, R>(
self,
) -> ServiceBuilder<
Stack<
tower_layer::LayerFn<
fn(
L::Service,
) -> crate::util::BoxCloneService<
R,
<L::Service as Service<R>>::Response,
<L::Service as Service<R>>::Error,
>,
>,
L,
>,
>
where
L: Layer<S>,
L::Service: Service<R> + Clone + Send + 'static,
<L::Service as Service<R>>::Future: Send + 'static,
{
self.layer(crate::util::BoxCloneService::layer())
}
/// This wraps the inner service with the [`Layer`] returned by [`BoxCloneSyncServiceLayer`].
///
/// This is similar to the [`boxed_clone`] method, but it requires that `Self` implement
/// [`Sync`], and the returned boxed service implements [`Sync`].
///
/// See [`BoxCloneSyncService`] for more details.
///
/// # Example
///
/// ```
/// use tower::{Service, ServiceBuilder, BoxError, util::BoxCloneSyncService};
/// use std::time::Duration;
/// #
/// # struct Request;
/// # struct Response;
/// # impl Response {
/// # fn new() -> Self { Self }
/// # }
///
/// let service: BoxCloneSyncService<Request, Response, BoxError> = ServiceBuilder::new()
/// .load_shed()
/// .concurrency_limit(64)
/// .timeout(Duration::from_secs(10))
/// .boxed_clone_sync()
/// .service_fn(|req: Request| async {
/// Ok::<_, BoxError>(Response::new())
/// });
/// # let service = assert_service(service);
///
/// // The boxed service can still be cloned.
/// service.clone();
/// # fn assert_service<S, R>(svc: S) -> S
/// # where S: Service<R> { svc }
/// ```
///
/// [`BoxCloneSyncServiceLayer`]: crate::util::BoxCloneSyncServiceLayer
/// [`BoxCloneSyncService`]: crate::util::BoxCloneSyncService
/// [`boxed_clone`]: Self::boxed_clone
#[cfg(feature = "util")]
pub fn boxed_clone_sync<S, R>(
self,
) -> ServiceBuilder<
Stack<
crate::util::BoxCloneSyncServiceLayer<
S,
R,
<L::Service as Service<R>>::Response,
<L::Service as Service<R>>::Error,
>,
Identity,
>,
>
where
L: Layer<S> + Send + Sync + 'static,
L::Service: Service<R> + Clone + Send + Sync + 'static,
<L::Service as Service<R>>::Future: Send + Sync + 'static,
{
let layer = self.into_inner();
ServiceBuilder::new().layer(crate::util::BoxCloneSyncServiceLayer::new(layer))
}
}
impl<L: fmt::Debug> fmt::Debug for ServiceBuilder<L> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("ServiceBuilder").field(&self.layer).finish()
}
}
impl<S, L> Layer<S> for ServiceBuilder<L>
where
L: Layer<S>,
{
type Service = L::Service;
fn layer(&self, inner: S) -> Self::Service {
self.layer.layer(inner)
}
}

62
vendor/tower/src/discover/list.rs vendored Normal file
View File

@@ -0,0 +1,62 @@
use super::Change;
use futures_core::Stream;
use pin_project_lite::pin_project;
use std::convert::Infallible;
use std::iter::{Enumerate, IntoIterator};
use std::{
pin::Pin,
task::{Context, Poll},
};
use tower_service::Service;
pin_project! {
/// Static service discovery based on a predetermined list of services.
///
/// [`ServiceList`] is created with an initial list of services. The discovery
/// process will yield this list once and do nothing after.
#[derive(Debug)]
pub struct ServiceList<T>
where
T: IntoIterator,
{
inner: Enumerate<T::IntoIter>,
}
}
impl<T, U> ServiceList<T>
where
T: IntoIterator<Item = U>,
{
#[allow(missing_docs)]
pub fn new<Request>(services: T) -> ServiceList<T>
where
U: Service<Request>,
{
ServiceList {
inner: services.into_iter().enumerate(),
}
}
}
impl<T, U> Stream for ServiceList<T>
where
T: IntoIterator<Item = U>,
{
type Item = Result<Change<usize, U>, Infallible>;
fn poll_next(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Option<Self::Item>> {
match self.project().inner.next() {
Some((i, service)) => Poll::Ready(Some(Ok(Change::Insert(i, service)))),
None => Poll::Ready(None),
}
}
}
// check that List can be directly over collections
#[cfg(test)]
#[allow(dead_code)]
type ListVecTest<T> = ServiceList<Vec<T>>;
#[cfg(test)]
#[allow(dead_code)]
type ListVecIterTest<T> = ServiceList<::std::vec::IntoIter<T>>;

107
vendor/tower/src/discover/mod.rs vendored Normal file
View File

@@ -0,0 +1,107 @@
//! Service discovery
//!
//! This module provides the [`Change`] enum, which indicates the arrival or departure of a service
//! from a collection of similar services. Most implementations should use the [`Discover`] trait
//! in their bounds to indicate that they can handle services coming and going. [`Discover`] itself
//! is primarily a convenience wrapper around [`TryStream<Ok = Change>`][`TryStream`].
//!
//! Every discovered service is assigned an identifier that is distinct among the currently active
//! services. If that service later goes away, a [`Change::Remove`] is yielded with that service's
//! identifier. From that point forward, the identifier may be re-used.
//!
//! # Examples
//!
//! ```rust
//! use std::future::poll_fn;
//! use futures_util::pin_mut;
//! use tower::discover::{Change, Discover};
//! async fn services_monitor<D: Discover>(services: D) {
//! pin_mut!(services);
//! while let Some(Ok(change)) = poll_fn(|cx| services.as_mut().poll_discover(cx)).await {
//! match change {
//! Change::Insert(key, svc) => {
//! // a new service with identifier `key` was discovered
//! # let _ = (key, svc);
//! }
//! Change::Remove(key) => {
//! // the service with identifier `key` has gone away
//! # let _ = (key);
//! }
//! }
//! }
//! }
//! ```
//!
//! [`TryStream`]: https://docs.rs/futures/latest/futures/stream/trait.TryStream.html
mod list;
pub use self::list::ServiceList;
use crate::sealed::Sealed;
use futures_core::TryStream;
use std::{
pin::Pin,
task::{Context, Poll},
};
/// A dynamically changing set of related services.
///
/// As new services arrive and old services are retired,
/// [`Change`]s are returned which provide unique identifiers
/// for the services.
///
/// See the module documentation for more details.
pub trait Discover: Sealed<Change<(), ()>> {
/// A unique identifier for each active service.
///
/// An identifier can be re-used once a [`Change::Remove`] has been yielded for its service.
type Key: Eq;
/// The type of [`Service`] yielded by this [`Discover`].
///
/// [`Service`]: crate::Service
type Service;
/// Error produced during discovery
type Error;
/// Yields the next discovery change set.
fn poll_discover(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Change<Self::Key, Self::Service>, Self::Error>>>;
}
impl<K, S, E, D: ?Sized> Sealed<Change<(), ()>> for D
where
D: TryStream<Ok = Change<K, S>, Error = E>,
K: Eq,
{
}
impl<K, S, E, D: ?Sized> Discover for D
where
D: TryStream<Ok = Change<K, S>, Error = E>,
K: Eq,
{
type Key = K;
type Service = S;
type Error = E;
fn poll_discover(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<D::Ok, D::Error>>> {
TryStream::try_poll_next(self, cx)
}
}
/// A change in the service set.
#[derive(Debug, Clone)]
pub enum Change<K, V> {
/// A new service identified by key `K` was identified.
Insert(K, V),
/// The service identified by key `K` disappeared.
Remove(K),
}

97
vendor/tower/src/filter/future.rs vendored Normal file
View File

@@ -0,0 +1,97 @@
//! Future types
use super::AsyncPredicate;
use crate::BoxError;
use pin_project_lite::pin_project;
use std::{
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use tower_service::Service;
pin_project! {
/// Filtered response future from [`AsyncFilter`] services.
///
/// [`AsyncFilter`]: crate::filter::AsyncFilter
#[derive(Debug)]
pub struct AsyncResponseFuture<P, S, Request>
where
P: AsyncPredicate<Request>,
S: Service<P::Request>,
{
#[pin]
state: State<P::Future, S::Future>,
// Inner service
service: S,
}
}
opaque_future! {
/// Filtered response future from [`Filter`] services.
///
/// [`Filter`]: crate::filter::Filter
pub type ResponseFuture<R, F> =
futures_util::future::Either<
std::future::Ready<Result<R, crate::BoxError>>,
futures_util::future::ErrInto<F, crate::BoxError>
>;
}
pin_project! {
#[project = StateProj]
#[derive(Debug)]
enum State<F, G> {
/// Waiting for the predicate future
Check {
#[pin]
check: F
},
/// Waiting for the response future
WaitResponse {
#[pin]
response: G
},
}
}
impl<P, S, Request> AsyncResponseFuture<P, S, Request>
where
P: AsyncPredicate<Request>,
S: Service<P::Request>,
S::Error: Into<BoxError>,
{
pub(crate) fn new(check: P::Future, service: S) -> Self {
Self {
state: State::Check { check },
service,
}
}
}
impl<P, S, Request> Future for AsyncResponseFuture<P, S, Request>
where
P: AsyncPredicate<Request>,
S: Service<P::Request>,
S::Error: Into<crate::BoxError>,
{
type Output = Result<S::Response, crate::BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
loop {
match this.state.as_mut().project() {
StateProj::Check { mut check } => {
let request = ready!(check.as_mut().poll(cx))?;
let response = this.service.call(request);
this.state.set(State::WaitResponse { response });
}
StateProj::WaitResponse { response } => {
return response.poll(cx).map_err(Into::into);
}
}
}
}
}

72
vendor/tower/src/filter/layer.rs vendored Normal file
View File

@@ -0,0 +1,72 @@
use super::{AsyncFilter, Filter};
use tower_layer::Layer;
/// Conditionally dispatch requests to the inner service based on a synchronous
/// [predicate].
///
/// This [`Layer`] produces instances of the [`Filter`] service.
///
/// [predicate]: crate::filter::Predicate
/// [`Layer`]: crate::Layer
/// [`Filter`]: crate::filter::Filter
#[derive(Debug, Clone)]
pub struct FilterLayer<U> {
predicate: U,
}
/// Conditionally dispatch requests to the inner service based on an asynchronous
/// [predicate].
///
/// This [`Layer`] produces instances of the [`AsyncFilter`] service.
///
/// [predicate]: crate::filter::AsyncPredicate
/// [`Layer`]: crate::Layer
/// [`Filter`]: crate::filter::AsyncFilter
#[derive(Debug, Clone)]
pub struct AsyncFilterLayer<U> {
predicate: U,
}
// === impl FilterLayer ===
impl<U> FilterLayer<U> {
/// Returns a new layer that produces [`Filter`] services with the given
/// [`Predicate`].
///
/// [`Predicate`]: crate::filter::Predicate
/// [`Filter`]: crate::filter::Filter
pub const fn new(predicate: U) -> Self {
Self { predicate }
}
}
impl<U: Clone, S> Layer<S> for FilterLayer<U> {
type Service = Filter<S, U>;
fn layer(&self, service: S) -> Self::Service {
let predicate = self.predicate.clone();
Filter::new(service, predicate)
}
}
// === impl AsyncFilterLayer ===
impl<U> AsyncFilterLayer<U> {
/// Returns a new layer that produces [`AsyncFilter`] services with the given
/// [`AsyncPredicate`].
///
/// [`AsyncPredicate`]: crate::filter::AsyncPredicate
/// [`Filter`]: crate::filter::Filter
pub const fn new(predicate: U) -> Self {
Self { predicate }
}
}
impl<U: Clone, S> Layer<S> for AsyncFilterLayer<U> {
type Service = AsyncFilter<S, U>;
fn layer(&self, service: S) -> Self::Service {
let predicate = self.predicate.clone();
AsyncFilter::new(service, predicate)
}
}

191
vendor/tower/src/filter/mod.rs vendored Normal file
View File

@@ -0,0 +1,191 @@
//! Conditionally dispatch requests to the inner service based on the result of
//! a predicate.
//!
//! A predicate takes some request type and returns a `Result<Request, Error>`.
//! If the predicate returns [`Ok`], the inner service is called with the request
//! returned by the predicate &mdash; which may be the original request or a
//! modified one. If the predicate returns [`Err`], the request is rejected and
//! the inner service is not called.
//!
//! Predicates may either be synchronous (simple functions from a `Request` to
//! a [`Result`]) or asynchronous (functions returning [`Future`]s). Separate
//! traits, [`Predicate`] and [`AsyncPredicate`], represent these two types of
//! predicate. Note that when it is not necessary to await some other
//! asynchronous operation in the predicate, the synchronous predicate should be
//! preferred, as it introduces less overhead.
//!
//! The predicate traits are implemented for closures and function pointers.
//! However, users may also implement them for other types, such as when the
//! predicate requires some state carried between requests. For example,
//! [`Predicate`] could be implemented for a type that rejects a fixed set of
//! requests by checking if they are contained by a a [`HashSet`] or other
//! collection.
//!
//! [`Future`]: std::future::Future
//! [`HashSet`]: std::collections::HashSet
pub mod future;
mod layer;
mod predicate;
pub use self::{
layer::{AsyncFilterLayer, FilterLayer},
predicate::{AsyncPredicate, Predicate},
};
use self::future::{AsyncResponseFuture, ResponseFuture};
use crate::BoxError;
use futures_util::{future::Either, TryFutureExt};
use std::task::{Context, Poll};
use tower_service::Service;
/// Conditionally dispatch requests to the inner service based on a [predicate].
///
/// [predicate]: Predicate
#[derive(Clone, Debug)]
pub struct Filter<T, U> {
inner: T,
predicate: U,
}
/// Conditionally dispatch requests to the inner service based on an
/// [asynchronous predicate].
///
/// [asynchronous predicate]: AsyncPredicate
#[derive(Clone, Debug)]
pub struct AsyncFilter<T, U> {
inner: T,
predicate: U,
}
// ==== impl Filter ====
impl<T, U> Filter<T, U> {
/// Returns a new [`Filter`] service wrapping `inner`.
pub const fn new(inner: T, predicate: U) -> Self {
Self { inner, predicate }
}
/// Returns a new [`Layer`] that wraps services with a [`Filter`] service
/// with the given [`Predicate`].
///
/// [`Layer`]: crate::Layer
pub fn layer(predicate: U) -> FilterLayer<U> {
FilterLayer::new(predicate)
}
/// Check a `Request` value against this filter's predicate.
pub fn check<R>(&mut self, request: R) -> Result<U::Request, BoxError>
where
U: Predicate<R>,
{
self.predicate.check(request)
}
/// Get a reference to the inner service
pub fn get_ref(&self) -> &T {
&self.inner
}
/// Get a mutable reference to the inner service
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Consume `self`, returning the inner service
pub fn into_inner(self) -> T {
self.inner
}
}
impl<T, U, Request> Service<Request> for Filter<T, U>
where
U: Predicate<Request>,
T: Service<U::Request>,
T::Error: Into<BoxError>,
{
type Response = T::Response;
type Error = BoxError;
type Future = ResponseFuture<T::Response, T::Future>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, request: Request) -> Self::Future {
ResponseFuture::new(match self.predicate.check(request) {
Ok(request) => Either::Right(self.inner.call(request).err_into()),
Err(e) => Either::Left(std::future::ready(Err(e))),
})
}
}
// ==== impl AsyncFilter ====
impl<T, U> AsyncFilter<T, U> {
/// Returns a new [`AsyncFilter`] service wrapping `inner`.
pub const fn new(inner: T, predicate: U) -> Self {
Self { inner, predicate }
}
/// Returns a new [`Layer`] that wraps services with an [`AsyncFilter`]
/// service with the given [`AsyncPredicate`].
///
/// [`Layer`]: crate::Layer
pub fn layer(predicate: U) -> FilterLayer<U> {
FilterLayer::new(predicate)
}
/// Check a `Request` value against this filter's predicate.
pub async fn check<R>(&mut self, request: R) -> Result<U::Request, BoxError>
where
U: AsyncPredicate<R>,
{
self.predicate.check(request).await
}
/// Get a reference to the inner service
pub fn get_ref(&self) -> &T {
&self.inner
}
/// Get a mutable reference to the inner service
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Consume `self`, returning the inner service
pub fn into_inner(self) -> T {
self.inner
}
}
impl<T, U, Request> Service<Request> for AsyncFilter<T, U>
where
U: AsyncPredicate<Request>,
T: Service<U::Request> + Clone,
T::Error: Into<BoxError>,
{
type Response = T::Response;
type Error = BoxError;
type Future = AsyncResponseFuture<U, T, Request>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, request: Request) -> Self::Future {
use std::mem;
let inner = self.inner.clone();
// In case the inner service has state that's driven to readiness and
// not tracked by clones (such as `Buffer`), pass the version we have
// already called `poll_ready` on into the future, and leave its clone
// behind.
let inner = mem::replace(&mut self.inner, inner);
// Check the request
let check = self.predicate.check(request);
AsyncResponseFuture::new(check, inner)
}
}

65
vendor/tower/src/filter/predicate.rs vendored Normal file
View File

@@ -0,0 +1,65 @@
use crate::BoxError;
use std::future::Future;
/// Checks a request asynchronously.
pub trait AsyncPredicate<Request> {
/// The future returned by [`check`].
///
/// [`check`]: crate::filter::AsyncPredicate::check
type Future: Future<Output = Result<Self::Request, BoxError>>;
/// The type of requests returned by [`check`].
///
/// This request is forwarded to the inner service if the predicate
/// succeeds.
///
/// [`check`]: crate::filter::AsyncPredicate::check
type Request;
/// Check whether the given request should be forwarded.
///
/// If the future resolves with [`Ok`], the request is forwarded to the inner service.
fn check(&mut self, request: Request) -> Self::Future;
}
/// Checks a request synchronously.
pub trait Predicate<Request> {
/// The type of requests returned by [`check`].
///
/// This request is forwarded to the inner service if the predicate
/// succeeds.
///
/// [`check`]: crate::filter::Predicate::check
type Request;
/// Check whether the given request should be forwarded.
///
/// If the future resolves with [`Ok`], the request is forwarded to the inner service.
fn check(&mut self, request: Request) -> Result<Self::Request, BoxError>;
}
impl<F, T, U, R, E> AsyncPredicate<T> for F
where
F: FnMut(T) -> U,
U: Future<Output = Result<R, E>>,
E: Into<BoxError>,
{
type Future = futures_util::future::ErrInto<U, BoxError>;
type Request = R;
fn check(&mut self, request: T) -> Self::Future {
use futures_util::TryFutureExt;
self(request).err_into()
}
}
impl<F, T, R, E> Predicate<T> for F
where
F: FnMut(T) -> Result<R, E>,
E: Into<BoxError>,
{
type Request = R;
fn check(&mut self, request: T) -> Result<Self::Request, BoxError> {
self(request).map_err(Into::into)
}
}

125
vendor/tower/src/hedge/delay.rs vendored Normal file
View File

@@ -0,0 +1,125 @@
use pin_project_lite::pin_project;
use std::time::Duration;
use std::{
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use tower_service::Service;
use crate::util::Oneshot;
/// A policy which specifies how long each request should be delayed for.
pub trait Policy<Request> {
fn delay(&self, req: &Request) -> Duration;
}
/// A middleware which delays sending the request to the underlying service
/// for an amount of time specified by the policy.
#[derive(Debug)]
pub struct Delay<P, S> {
policy: P,
service: S,
}
pin_project! {
#[derive(Debug)]
pub struct ResponseFuture<Request, S>
where
S: Service<Request>,
{
service: Option<S>,
#[pin]
state: State<Request, Oneshot<S, Request>>,
}
}
pin_project! {
#[project = StateProj]
#[derive(Debug)]
enum State<Request, F> {
Delaying {
#[pin]
delay: tokio::time::Sleep,
req: Option<Request>,
},
Called {
#[pin]
fut: F,
},
}
}
impl<Request, F> State<Request, F> {
fn delaying(delay: tokio::time::Sleep, req: Option<Request>) -> Self {
Self::Delaying { delay, req }
}
fn called(fut: F) -> Self {
Self::Called { fut }
}
}
impl<P, S> Delay<P, S> {
pub const fn new<Request>(policy: P, service: S) -> Self
where
P: Policy<Request>,
S: Service<Request> + Clone,
S::Error: Into<crate::BoxError>,
{
Delay { policy, service }
}
}
impl<Request, P, S> Service<Request> for Delay<P, S>
where
P: Policy<Request>,
S: Service<Request> + Clone,
S::Error: Into<crate::BoxError>,
{
type Response = S::Response;
type Error = crate::BoxError;
type Future = ResponseFuture<Request, S>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// Calling self.service.poll_ready would reserve a slot for the delayed request,
// potentially well in advance of actually making it. Instead, signal readiness here and
// treat the service as a Oneshot in the future.
Poll::Ready(Ok(()))
}
fn call(&mut self, request: Request) -> Self::Future {
let delay = self.policy.delay(&request);
ResponseFuture {
service: Some(self.service.clone()),
state: State::delaying(tokio::time::sleep(delay), Some(request)),
}
}
}
impl<Request, S, T, E> Future for ResponseFuture<Request, S>
where
E: Into<crate::BoxError>,
S: Service<Request, Response = T, Error = E>,
{
type Output = Result<T, crate::BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
loop {
match this.state.as_mut().project() {
StateProj::Delaying { delay, req } => {
ready!(delay.poll(cx));
let req = req.take().expect("Missing request in delay");
let svc = this.service.take().expect("Missing service in delay");
let fut = Oneshot::new(svc, req);
this.state.set(State::called(fut));
}
StateProj::Called { fut } => {
return fut.poll(cx).map_err(Into::into);
}
};
}
}
}

88
vendor/tower/src/hedge/latency.rs vendored Normal file
View File

@@ -0,0 +1,88 @@
use pin_project_lite::pin_project;
use std::time::Duration;
use std::{
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use tokio::time::Instant;
use tower_service::Service;
/// Record is the interface for accepting request latency measurements. When
/// a request completes, record is called with the elapsed duration between
/// when the service was called and when the future completed.
pub trait Record {
fn record(&mut self, latency: Duration);
}
/// Latency is a middleware that measures request latency and records it to the
/// provided Record instance.
#[derive(Clone, Debug)]
pub struct Latency<R, S> {
rec: R,
service: S,
}
pin_project! {
#[derive(Debug)]
pub struct ResponseFuture<R, F> {
start: Instant,
rec: R,
#[pin]
inner: F,
}
}
impl<S, R> Latency<R, S>
where
R: Record + Clone,
{
pub const fn new<Request>(rec: R, service: S) -> Self
where
S: Service<Request>,
S::Error: Into<crate::BoxError>,
{
Latency { rec, service }
}
}
impl<S, R, Request> Service<Request> for Latency<R, S>
where
S: Service<Request>,
S::Error: Into<crate::BoxError>,
R: Record + Clone,
{
type Response = S::Response;
type Error = crate::BoxError;
type Future = ResponseFuture<R, S::Future>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, request: Request) -> Self::Future {
ResponseFuture {
start: Instant::now(),
rec: self.rec.clone(),
inner: self.service.call(request),
}
}
}
impl<R, F, T, E> Future for ResponseFuture<R, F>
where
R: Record,
F: Future<Output = Result<T, E>>,
E: Into<crate::BoxError>,
{
type Output = Result<T, crate::BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let rsp = ready!(this.inner.poll(cx)).map_err(Into::into)?;
let duration = Instant::now().saturating_duration_since(*this.start);
this.rec.record(duration);
Poll::Ready(Ok(rsp))
}
}

267
vendor/tower/src/hedge/mod.rs vendored Normal file
View File

@@ -0,0 +1,267 @@
//! Pre-emptively retry requests which have been outstanding for longer
//! than a given latency percentile.
#![warn(missing_debug_implementations, missing_docs, unreachable_pub)]
use crate::filter::AsyncFilter;
use futures_util::future::Either;
use pin_project_lite::pin_project;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use std::{
future,
pin::Pin,
task::{Context, Poll},
};
use tracing::error;
mod delay;
mod latency;
mod rotating_histogram;
mod select;
use delay::Delay;
use latency::Latency;
use rotating_histogram::RotatingHistogram;
use select::Select;
type Histo = Arc<Mutex<RotatingHistogram>>;
type Service<S, P> = select::Select<
SelectPolicy<P>,
Latency<Histo, S>,
Delay<DelayPolicy, AsyncFilter<Latency<Histo, S>, PolicyPredicate<P>>>,
>;
/// A middleware that pre-emptively retries requests which have been outstanding
/// for longer than a given latency percentile. If either of the original
/// future or the retry future completes, that value is used.
#[derive(Debug)]
pub struct Hedge<S, P>(Service<S, P>);
pin_project! {
/// The [`Future`] returned by the [`Hedge`] service.
///
/// [`Future`]: std::future::Future
#[derive(Debug)]
pub struct Future<S, Request>
where
S: tower_service::Service<Request>,
{
#[pin]
inner: S::Future,
}
}
/// A policy which describes which requests can be cloned and then whether those
/// requests should be retried.
pub trait Policy<Request> {
/// Called when the request is first received to determine if the request is retryable.
fn clone_request(&self, req: &Request) -> Option<Request>;
/// Called after the hedge timeout to determine if the hedge retry should be issued.
fn can_retry(&self, req: &Request) -> bool;
}
// NOTE: these are pub only because they appear inside a Future<F>
#[doc(hidden)]
#[derive(Clone, Debug)]
pub struct PolicyPredicate<P>(P);
#[doc(hidden)]
#[derive(Debug)]
pub struct DelayPolicy {
histo: Histo,
latency_percentile: f32,
}
#[doc(hidden)]
#[derive(Debug)]
pub struct SelectPolicy<P> {
policy: P,
histo: Histo,
min_data_points: u64,
}
impl<S, P> Hedge<S, P> {
/// Create a new hedge middleware.
pub fn new<Request>(
service: S,
policy: P,
min_data_points: u64,
latency_percentile: f32,
period: Duration,
) -> Hedge<S, P>
where
S: tower_service::Service<Request> + Clone,
S::Error: Into<crate::BoxError>,
P: Policy<Request> + Clone,
{
let histo = Arc::new(Mutex::new(RotatingHistogram::new(period)));
Self::new_with_histo(service, policy, min_data_points, latency_percentile, histo)
}
/// A hedge middleware with a prepopulated latency histogram. This is usedful
/// for integration tests.
pub fn new_with_mock_latencies<Request>(
service: S,
policy: P,
min_data_points: u64,
latency_percentile: f32,
period: Duration,
latencies_ms: &[u64],
) -> Hedge<S, P>
where
S: tower_service::Service<Request> + Clone,
S::Error: Into<crate::BoxError>,
P: Policy<Request> + Clone,
{
let histo = Arc::new(Mutex::new(RotatingHistogram::new(period)));
{
let mut locked = histo.lock().unwrap();
for latency in latencies_ms.iter() {
locked.read().record(*latency).unwrap();
}
}
Self::new_with_histo(service, policy, min_data_points, latency_percentile, histo)
}
fn new_with_histo<Request>(
service: S,
policy: P,
min_data_points: u64,
latency_percentile: f32,
histo: Histo,
) -> Hedge<S, P>
where
S: tower_service::Service<Request> + Clone,
S::Error: Into<crate::BoxError>,
P: Policy<Request> + Clone,
{
// Clone the underlying service and wrap both copies in a middleware that
// records the latencies in a rotating histogram.
let recorded_a = Latency::new(histo.clone(), service.clone());
let recorded_b = Latency::new(histo.clone(), service);
// Check policy to see if the hedge request should be issued.
let filtered = AsyncFilter::new(recorded_b, PolicyPredicate(policy.clone()));
// Delay the second request by a percentile of the recorded request latency
// histogram.
let delay_policy = DelayPolicy {
histo: histo.clone(),
latency_percentile,
};
let delayed = Delay::new(delay_policy, filtered);
// If the request is retryable, issue two requests -- the second one delayed
// by a latency percentile. Use the first result to complete.
let select_policy = SelectPolicy {
policy,
histo,
min_data_points,
};
Hedge(Select::new(select_policy, recorded_a, delayed))
}
}
impl<S, P, Request> tower_service::Service<Request> for Hedge<S, P>
where
S: tower_service::Service<Request> + Clone,
S::Error: Into<crate::BoxError>,
P: Policy<Request> + Clone,
{
type Response = S::Response;
type Error = crate::BoxError;
type Future = Future<Service<S, P>, Request>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.poll_ready(cx)
}
fn call(&mut self, request: Request) -> Self::Future {
Future {
inner: self.0.call(request),
}
}
}
impl<S, Request> std::future::Future for Future<S, Request>
where
S: tower_service::Service<Request>,
S::Error: Into<crate::BoxError>,
{
type Output = Result<S::Response, crate::BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.project().inner.poll(cx).map_err(Into::into)
}
}
// TODO: Remove when Duration::as_millis() becomes stable.
const NANOS_PER_MILLI: u32 = 1_000_000;
const MILLIS_PER_SEC: u64 = 1_000;
fn millis(duration: Duration) -> u64 {
// Round up.
let millis = (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI;
duration
.as_secs()
.saturating_mul(MILLIS_PER_SEC)
.saturating_add(u64::from(millis))
}
impl latency::Record for Histo {
fn record(&mut self, latency: Duration) {
let mut locked = self.lock().unwrap();
locked.write().record(millis(latency)).unwrap_or_else(|e| {
error!("Failed to write to hedge histogram: {:?}", e);
})
}
}
impl<P, Request> crate::filter::AsyncPredicate<Request> for PolicyPredicate<P>
where
P: Policy<Request>,
{
type Future = Either<
future::Ready<Result<Request, crate::BoxError>>,
future::Pending<Result<Request, crate::BoxError>>,
>;
type Request = Request;
fn check(&mut self, request: Request) -> Self::Future {
if self.0.can_retry(&request) {
Either::Left(future::ready(Ok(request)))
} else {
// If the hedge retry should not be issued, we simply want to wait
// for the result of the original request. Therefore we don't want
// to return an error here. Instead, we use future::pending to ensure
// that the original request wins the select.
Either::Right(future::pending())
}
}
}
impl<Request> delay::Policy<Request> for DelayPolicy {
fn delay(&self, _req: &Request) -> Duration {
let mut locked = self.histo.lock().unwrap();
let millis = locked
.read()
.value_at_quantile(self.latency_percentile.into());
Duration::from_millis(millis)
}
}
impl<P, Request> select::Policy<Request> for SelectPolicy<P>
where
P: Policy<Request>,
{
fn clone_request(&self, req: &Request) -> Option<Request> {
self.policy.clone_request(req).filter(|_| {
let mut locked = self.histo.lock().unwrap();
// Do not attempt a retry if there are insufficiently many data
// points in the histogram.
locked.read().len() >= self.min_data_points
})
}
}

View File

@@ -0,0 +1,73 @@
use hdrhistogram::Histogram;
use std::time::Duration;
use tokio::time::Instant;
use tracing::trace;
/// This represents a "rotating" histogram which stores two histogram, one which
/// should be read and one which should be written to. Every period, the read
/// histogram is discarded and replaced by the write histogram. The idea here
/// is that the read histogram should always contain a full period (the previous
/// period) of write operations.
#[derive(Debug)]
pub struct RotatingHistogram {
read: Histogram<u64>,
write: Histogram<u64>,
last_rotation: Instant,
period: Duration,
}
impl RotatingHistogram {
pub fn new(period: Duration) -> RotatingHistogram {
RotatingHistogram {
// Use an auto-resizing histogram to avoid choosing
// a maximum latency bound for all users.
read: Histogram::<u64>::new(3).expect("Invalid histogram params"),
write: Histogram::<u64>::new(3).expect("Invalid histogram params"),
last_rotation: Instant::now(),
period,
}
}
pub fn read(&mut self) -> &mut Histogram<u64> {
self.maybe_rotate();
&mut self.read
}
pub fn write(&mut self) -> &mut Histogram<u64> {
self.maybe_rotate();
&mut self.write
}
fn maybe_rotate(&mut self) {
let delta = Instant::now().saturating_duration_since(self.last_rotation);
// TODO: replace with delta.duration_div when it becomes stable.
let rotations = (nanos(delta) / nanos(self.period)) as u32;
if rotations >= 2 {
trace!("Time since last rotation is {:?}. clearing!", delta);
self.clear();
} else if rotations == 1 {
trace!("Time since last rotation is {:?}. rotating!", delta);
self.rotate();
}
self.last_rotation += self.period * rotations;
}
fn rotate(&mut self) {
std::mem::swap(&mut self.read, &mut self.write);
trace!("Rotated {:?} points into read", self.read.len());
self.write.clear();
}
fn clear(&mut self) {
self.read.clear();
self.write.clear();
}
}
const NANOS_PER_SEC: u64 = 1_000_000_000;
fn nanos(duration: Duration) -> u64 {
duration
.as_secs()
.saturating_mul(NANOS_PER_SEC)
.saturating_add(u64::from(duration.subsec_nanos()))
}

105
vendor/tower/src/hedge/select.rs vendored Normal file
View File

@@ -0,0 +1,105 @@
use pin_project_lite::pin_project;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use tower_service::Service;
/// A policy which decides which requests can be cloned and sent to the B
/// service.
pub trait Policy<Request> {
fn clone_request(&self, req: &Request) -> Option<Request>;
}
/// Select is a middleware which attempts to clone the request and sends the
/// original request to the A service and, if the request was able to be cloned,
/// the cloned request to the B service. Both resulting futures will be polled
/// and whichever future completes first will be used as the result.
#[derive(Debug)]
pub struct Select<P, A, B> {
policy: P,
a: A,
b: B,
}
pin_project! {
#[derive(Debug)]
pub struct ResponseFuture<AF, BF> {
#[pin]
a_fut: AF,
#[pin]
b_fut: Option<BF>,
}
}
impl<P, A, B> Select<P, A, B> {
pub const fn new<Request>(policy: P, a: A, b: B) -> Self
where
P: Policy<Request>,
A: Service<Request>,
A::Error: Into<crate::BoxError>,
B: Service<Request, Response = A::Response>,
B::Error: Into<crate::BoxError>,
{
Select { policy, a, b }
}
}
impl<P, A, B, Request> Service<Request> for Select<P, A, B>
where
P: Policy<Request>,
A: Service<Request>,
A::Error: Into<crate::BoxError>,
B: Service<Request, Response = A::Response>,
B::Error: Into<crate::BoxError>,
{
type Response = A::Response;
type Error = crate::BoxError;
type Future = ResponseFuture<A::Future, B::Future>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match (self.a.poll_ready(cx), self.b.poll_ready(cx)) {
(Poll::Ready(Ok(())), Poll::Ready(Ok(()))) => Poll::Ready(Ok(())),
(Poll::Ready(Err(e)), _) => Poll::Ready(Err(e.into())),
(_, Poll::Ready(Err(e))) => Poll::Ready(Err(e.into())),
_ => Poll::Pending,
}
}
fn call(&mut self, request: Request) -> Self::Future {
let b_fut = if let Some(cloned_req) = self.policy.clone_request(&request) {
Some(self.b.call(cloned_req))
} else {
None
};
ResponseFuture {
a_fut: self.a.call(request),
b_fut,
}
}
}
impl<AF, BF, T, AE, BE> Future for ResponseFuture<AF, BF>
where
AF: Future<Output = Result<T, AE>>,
AE: Into<crate::BoxError>,
BF: Future<Output = Result<T, BE>>,
BE: Into<crate::BoxError>,
{
type Output = Result<T, crate::BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if let Poll::Ready(r) = this.a_fut.poll(cx) {
return Poll::Ready(Ok(r.map_err(Into::into)?));
}
if let Some(b_fut) = this.b_fut.as_pin_mut() {
if let Poll::Ready(r) = b_fut.poll(cx) {
return Poll::Ready(Ok(r.map_err(Into::into)?));
}
}
Poll::Pending
}
}

14
vendor/tower/src/layer.rs vendored Normal file
View File

@@ -0,0 +1,14 @@
//! A collection of [`Layer`] based tower services
//!
//! [`Layer`]: crate::Layer
pub use tower_layer::{layer_fn, Layer, LayerFn};
/// Utilities for combining layers
///
/// [`Identity`]: crate::layer::util::Identity
/// [`Layer`]: crate::Layer
/// [`Stack`]: crate::layer::util::Stack
pub mod util {
pub use tower_layer::{Identity, Stack};
}

228
vendor/tower/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,228 @@
#![warn(
missing_debug_implementations,
missing_docs,
rust_2018_idioms,
unreachable_pub
)]
#![forbid(unsafe_code)]
#![allow(elided_lifetimes_in_paths, clippy::type_complexity)]
#![cfg_attr(test, allow(clippy::float_cmp))]
#![cfg_attr(docsrs, feature(doc_cfg))]
// `rustdoc::broken_intra_doc_links` is checked on CI
//! `async fn(Request) -> Result<Response, Error>`
//!
//! # Overview
//!
//! Tower is a library of modular and reusable components for building
//! robust networking clients and servers.
//!
//! Tower provides a simple core abstraction, the [`Service`] trait, which
//! represents an asynchronous function taking a request and returning either a
//! response or an error. This abstraction can be used to model both clients and
//! servers.
//!
//! Generic components, like [`timeout`], [rate limiting], and [load balancing],
//! can be modeled as [`Service`]s that wrap some inner service and apply
//! additional behavior before or after the inner service is called. This allows
//! implementing these components in a protocol-agnostic, composable way. Typically,
//! such services are referred to as _middleware_.
//!
//! An additional abstraction, the [`Layer`] trait, is used to compose
//! middleware with [`Service`]s. If a [`Service`] can be thought of as an
//! asynchronous function from a request type to a response type, a [`Layer`] is
//! a function taking a [`Service`] of one type and returning a [`Service`] of a
//! different type. The [`ServiceBuilder`] type is used to add middleware to a
//! service by composing it with multiple [`Layer`]s.
//!
//! ## The Tower Ecosystem
//!
//! Tower is made up of the following crates:
//!
//! * [`tower`] (this crate)
//! * [`tower-service`]
//! * [`tower-layer`]
//! * [`tower-test`]
//!
//! Since the [`Service`] and [`Layer`] traits are important integration points
//! for all libraries using Tower, they are kept as stable as possible, and
//! breaking changes are made rarely. Therefore, they are defined in separate
//! crates, [`tower-service`] and [`tower-layer`]. This crate contains
//! re-exports of those core traits, implementations of commonly-used
//! middleware, and [utilities] for working with [`Service`]s and [`Layer`]s.
//! Finally, the [`tower-test`] crate provides tools for testing programs using
//! Tower.
//!
//! # Usage
//!
//! Tower provides an abstraction layer, and generic implementations of various
//! middleware. This means that the `tower` crate on its own does *not* provide
//! a working implementation of a network client or server. Instead, Tower's
//! [`Service` trait][`Service`] provides an integration point between
//! application code, libraries providing middleware implementations, and
//! libraries that implement servers and/or clients for various network
//! protocols.
//!
//! Depending on your particular use case, you might use Tower in several ways:
//!
//! * **Implementing application logic** for a networked program. You might
//! use the [`Service`] trait to model your application's behavior, and use
//! the middleware [provided by this crate](#modules) and by other libraries
//! to add functionality to clients and servers provided by one or more
//! protocol implementations.
//! * **Implementing middleware** to add custom behavior to network clients and
//! servers in a reusable manner. This might be general-purpose middleware
//! (and if it is, please consider releasing your middleware as a library for
//! other Tower users!) or application-specific behavior that needs to be
//! shared between multiple clients or servers.
//! * **Implementing a network protocol**. Libraries that implement network
//! protocols (such as HTTP) can depend on `tower-service` to use the
//! [`Service`] trait as an integration point between the protocol and user
//! code. For example, a client for some protocol might implement [`Service`],
//! allowing users to add arbitrary Tower middleware to those clients.
//! Similarly, a server might be created from a user-provided [`Service`].
//!
//! Additionally, when a network protocol requires functionality already
//! provided by existing Tower middleware, a protocol implementation might use
//! Tower middleware internally, as well as as an integration point.
//!
//! ## Library Support
//!
//! A number of third-party libraries support Tower and the [`Service`] trait.
//! The following is an incomplete list of such libraries:
//!
//! * [`hyper`]: A fast and correct low-level HTTP implementation.
//! * [`tonic`]: A [gRPC-over-HTTP/2][grpc] implementation built on top of
//! [`hyper`]. See [here][tonic-examples] for examples of using [`tonic`] with
//! Tower.
//! * [`warp`]: A lightweight, composable web framework. See
//! [here][warp-service] for details on using [`warp`] with Tower.
//! * [`tower-lsp`]: implementations of the [Language
//! Server Protocol][lsp] based on Tower.
//!
//! [`hyper`]: https://crates.io/crates/hyper
//! [`tonic`]: https://crates.io/crates/tonic
//! [tonic-examples]: https://github.com/hyperium/tonic/tree/master/examples/src/tower
//! [grpc]: https://grpc.io
//! [`warp`]: https://crates.io/crates/warp
//! [warp-service]: https://docs.rs/warp/0.2.5/warp/fn.service.html
//! [`tower-lsp`]: https://crates.io/crates/tower-lsp
//! [lsp]: https://microsoft.github.io/language-server-protocol/
//!
//! If you're the maintainer of a crate that supports Tower, we'd love to add
//! your crate to this list! Please [open a PR] adding a brief description of
//! your library!
//!
//! ## Getting Started
//!
//! If you're brand new to Tower and want to start with the basics, we recommend you
//! check out some of our [guides].
//!
//! The various middleware implementations provided by this crate are feature
//! flagged, so that users can only compile the parts of Tower they need. By
//! default, all the optional middleware are disabled.
//!
//! To get started using all of Tower's optional middleware, add this to your
//! `Cargo.toml`:
//!
//! ```toml
//! tower = { version = "0.4", features = ["full"] }
//! ```
//!
//! Alternatively, you can only enable some features. For example, to enable
//! only the [`retry`] and [`timeout`] middleware, write:
//!
//! ```toml
//! tower = { version = "0.4", features = ["retry", "timeout"] }
//! ```
//!
//! See [here](#modules) for a complete list of all middleware provided by
//! Tower.
//!
//!
//! ## Supported Rust Versions
//!
//! Tower will keep a rolling MSRV (minimum supported Rust version) policy of **at
//! least** 6 months. When increasing the MSRV, the new Rust version must have been
//! released at least six months ago. The current MSRV is 1.64.0.
//!
//! [`Service`]: crate::Service
//! [`Layer`]: crate::Layer
//! [rate limiting]: crate::limit::rate
//! [load balancing]: crate::balance
//! [`ServiceBuilder`]: crate::ServiceBuilder
//! [utilities]: crate::ServiceExt
//! [`tower`]: https://crates.io/crates/tower
//! [`tower-service`]: https://crates.io/crates/tower-service
//! [`tower-layer`]: https://crates.io/crates/tower-layer
//! [`tower-test`]: https://crates.io/crates/tower-test
//! [`retry`]: crate::retry
//! [open a PR]: https://github.com/tower-rs/tower/compare
//! [guides]: https://github.com/tower-rs/tower/tree/master/guides
#[macro_use]
pub(crate) mod macros;
#[cfg(feature = "balance")]
pub mod balance;
#[cfg(feature = "buffer")]
pub mod buffer;
#[cfg(feature = "discover")]
pub mod discover;
#[cfg(feature = "filter")]
pub mod filter;
#[cfg(feature = "hedge")]
pub mod hedge;
#[cfg(feature = "limit")]
pub mod limit;
#[cfg(feature = "load")]
pub mod load;
#[cfg(feature = "load-shed")]
pub mod load_shed;
#[cfg(feature = "make")]
pub mod make;
#[cfg(feature = "ready-cache")]
pub mod ready_cache;
#[cfg(feature = "reconnect")]
pub mod reconnect;
#[cfg(feature = "retry")]
pub mod retry;
#[cfg(feature = "spawn-ready")]
pub mod spawn_ready;
#[cfg(feature = "steer")]
pub mod steer;
#[cfg(feature = "timeout")]
pub mod timeout;
#[cfg(feature = "util")]
pub mod util;
pub mod builder;
pub mod layer;
#[cfg(feature = "util")]
#[doc(inline)]
#[cfg_attr(docsrs, doc(cfg(feature = "util")))]
pub use self::util::{service_fn, ServiceExt};
#[doc(inline)]
pub use crate::builder::ServiceBuilder;
#[cfg(feature = "make")]
#[doc(inline)]
#[cfg_attr(docsrs, doc(cfg(feature = "make")))]
pub use crate::make::MakeService;
#[doc(inline)]
pub use tower_layer::Layer;
#[doc(inline)]
pub use tower_service::Service;
#[allow(unreachable_pub)]
#[cfg(any(feature = "balance", feature = "discover", feature = "make"))]
mod sealed {
pub trait Sealed<T> {}
}
/// Alias for a type-erased error type.
pub type BoxError = Box<dyn std::error::Error + Send + Sync>;

View File

@@ -0,0 +1,40 @@
//! [`Future`] types
//!
//! [`Future`]: std::future::Future
use pin_project_lite::pin_project;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use tokio::sync::OwnedSemaphorePermit;
pin_project! {
/// Future for the [`ConcurrencyLimit`] service.
///
/// [`ConcurrencyLimit`]: crate::limit::ConcurrencyLimit
#[derive(Debug)]
pub struct ResponseFuture<T> {
#[pin]
inner: T,
// Keep this around so that it is dropped when the future completes
_permit: OwnedSemaphorePermit,
}
}
impl<T> ResponseFuture<T> {
pub(crate) fn new(inner: T, _permit: OwnedSemaphorePermit) -> ResponseFuture<T> {
ResponseFuture { inner, _permit }
}
}
impl<F, T, E> Future for ResponseFuture<F>
where
F: Future<Output = Result<T, E>>,
{
type Output = Result<T, E>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.project().inner.poll(cx)
}
}

View File

@@ -0,0 +1,60 @@
use std::sync::Arc;
use super::ConcurrencyLimit;
use tokio::sync::Semaphore;
use tower_layer::Layer;
/// Enforces a limit on the concurrent number of requests the underlying
/// service can handle.
#[derive(Debug, Clone)]
pub struct ConcurrencyLimitLayer {
max: usize,
}
impl ConcurrencyLimitLayer {
/// Create a new concurrency limit layer.
pub const fn new(max: usize) -> Self {
ConcurrencyLimitLayer { max }
}
}
impl<S> Layer<S> for ConcurrencyLimitLayer {
type Service = ConcurrencyLimit<S>;
fn layer(&self, service: S) -> Self::Service {
ConcurrencyLimit::new(service, self.max)
}
}
/// Enforces a limit on the concurrent number of requests the underlying
/// service can handle.
///
/// Unlike [`ConcurrencyLimitLayer`], which enforces a per-service concurrency
/// limit, this layer accepts a owned semaphore (`Arc<Semaphore>`) which can be
/// shared across multiple services.
///
/// Cloning this layer will not create a new semaphore.
#[derive(Debug, Clone)]
pub struct GlobalConcurrencyLimitLayer {
semaphore: Arc<Semaphore>,
}
impl GlobalConcurrencyLimitLayer {
/// Create a new `GlobalConcurrencyLimitLayer`.
pub fn new(max: usize) -> Self {
Self::with_semaphore(Arc::new(Semaphore::new(max)))
}
/// Create a new `GlobalConcurrencyLimitLayer` from a `Arc<Semaphore>`
pub fn with_semaphore(semaphore: Arc<Semaphore>) -> Self {
GlobalConcurrencyLimitLayer { semaphore }
}
}
impl<S> Layer<S> for GlobalConcurrencyLimitLayer {
type Service = ConcurrencyLimit<S>;
fn layer(&self, service: S) -> Self::Service {
ConcurrencyLimit::with_semaphore(service, self.semaphore.clone())
}
}

View File

@@ -0,0 +1,10 @@
//! Limit the max number of requests being concurrently processed.
pub mod future;
mod layer;
mod service;
pub use self::{
layer::{ConcurrencyLimitLayer, GlobalConcurrencyLimitLayer},
service::ConcurrencyLimit,
};

View File

@@ -0,0 +1,117 @@
use super::future::ResponseFuture;
use tokio::sync::{OwnedSemaphorePermit, Semaphore};
use tokio_util::sync::PollSemaphore;
use tower_service::Service;
use std::{
sync::Arc,
task::{ready, Context, Poll},
};
/// Enforces a limit on the concurrent number of requests the underlying
/// service can handle.
#[derive(Debug)]
pub struct ConcurrencyLimit<T> {
inner: T,
semaphore: PollSemaphore,
/// The currently acquired semaphore permit, if there is sufficient
/// concurrency to send a new request.
///
/// The permit is acquired in `poll_ready`, and taken in `call` when sending
/// a new request.
permit: Option<OwnedSemaphorePermit>,
}
impl<T> ConcurrencyLimit<T> {
/// Create a new concurrency limiter.
pub fn new(inner: T, max: usize) -> Self {
Self::with_semaphore(inner, Arc::new(Semaphore::new(max)))
}
/// Create a new concurrency limiter with a provided shared semaphore
pub fn with_semaphore(inner: T, semaphore: Arc<Semaphore>) -> Self {
ConcurrencyLimit {
inner,
semaphore: PollSemaphore::new(semaphore),
permit: None,
}
}
/// Get a reference to the inner service
pub fn get_ref(&self) -> &T {
&self.inner
}
/// Get a mutable reference to the inner service
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Consume `self`, returning the inner service
pub fn into_inner(self) -> T {
self.inner
}
}
impl<S, Request> Service<Request> for ConcurrencyLimit<S>
where
S: Service<Request>,
{
type Response = S::Response;
type Error = S::Error;
type Future = ResponseFuture<S::Future>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// If we haven't already acquired a permit from the semaphore, try to
// acquire one first.
if self.permit.is_none() {
self.permit = ready!(self.semaphore.poll_acquire(cx));
debug_assert!(
self.permit.is_some(),
"ConcurrencyLimit semaphore is never closed, so `poll_acquire` \
should never fail",
);
}
// Once we've acquired a permit (or if we already had one), poll the
// inner service.
self.inner.poll_ready(cx)
}
fn call(&mut self, request: Request) -> Self::Future {
// Take the permit
let permit = self
.permit
.take()
.expect("max requests in-flight; poll_ready must be called first");
// Call the inner service
let future = self.inner.call(request);
ResponseFuture::new(future, permit)
}
}
impl<T: Clone> Clone for ConcurrencyLimit<T> {
fn clone(&self) -> Self {
// Since we hold an `OwnedSemaphorePermit`, we can't derive `Clone`.
// Instead, when cloning the service, create a new service with the
// same semaphore, but with the permit in the un-acquired state.
Self {
inner: self.inner.clone(),
semaphore: self.semaphore.clone(),
permit: None,
}
}
}
#[cfg(feature = "load")]
impl<S> crate::load::Load for ConcurrencyLimit<S>
where
S: crate::load::Load,
{
type Metric = S::Metric;
fn load(&self) -> Self::Metric {
self.inner.load()
}
}

9
vendor/tower/src/limit/mod.rs vendored Normal file
View File

@@ -0,0 +1,9 @@
//! Tower middleware for limiting requests.
pub mod concurrency;
pub mod rate;
pub use self::{
concurrency::{ConcurrencyLimit, ConcurrencyLimitLayer, GlobalConcurrencyLimitLayer},
rate::{RateLimit, RateLimitLayer},
};

26
vendor/tower/src/limit/rate/layer.rs vendored Normal file
View File

@@ -0,0 +1,26 @@
use super::{Rate, RateLimit};
use std::time::Duration;
use tower_layer::Layer;
/// Enforces a rate limit on the number of requests the underlying
/// service can handle over a period of time.
#[derive(Debug, Clone)]
pub struct RateLimitLayer {
rate: Rate,
}
impl RateLimitLayer {
/// Create new rate limit layer.
pub const fn new(num: u64, per: Duration) -> Self {
let rate = Rate::new(num, per);
RateLimitLayer { rate }
}
}
impl<S> Layer<S> for RateLimitLayer {
type Service = RateLimit<S>;
fn layer(&self, service: S) -> Self::Service {
RateLimit::new(service, self.rate)
}
}

8
vendor/tower/src/limit/rate/mod.rs vendored Normal file
View File

@@ -0,0 +1,8 @@
//! Limit the rate at which requests are processed.
mod layer;
#[allow(clippy::module_inception)]
mod rate;
mod service;
pub use self::{layer::RateLimitLayer, rate::Rate, service::RateLimit};

30
vendor/tower/src/limit/rate/rate.rs vendored Normal file
View File

@@ -0,0 +1,30 @@
use std::time::Duration;
/// A rate of requests per time period.
#[derive(Debug, Copy, Clone)]
pub struct Rate {
num: u64,
per: Duration,
}
impl Rate {
/// Create a new rate.
///
/// # Panics
///
/// This function panics if `num` or `per` is 0.
pub const fn new(num: u64, per: Duration) -> Self {
assert!(num > 0);
assert!(per.as_nanos() > 0);
Rate { num, per }
}
pub(crate) fn num(&self) -> u64 {
self.num
}
pub(crate) fn per(&self) -> Duration {
self.per
}
}

129
vendor/tower/src/limit/rate/service.rs vendored Normal file
View File

@@ -0,0 +1,129 @@
use super::Rate;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use tokio::time::{Instant, Sleep};
use tower_service::Service;
/// Enforces a rate limit on the number of requests the underlying
/// service can handle over a period of time.
#[derive(Debug)]
pub struct RateLimit<T> {
inner: T,
rate: Rate,
state: State,
sleep: Pin<Box<Sleep>>,
}
#[derive(Debug)]
enum State {
// The service has hit its limit
Limited,
Ready { until: Instant, rem: u64 },
}
impl<T> RateLimit<T> {
/// Create a new rate limiter
pub fn new(inner: T, rate: Rate) -> Self {
let until = Instant::now();
let state = State::Ready {
until,
rem: rate.num(),
};
RateLimit {
inner,
rate,
state,
// The sleep won't actually be used with this duration, but
// we create it eagerly so that we can reset it in place rather than
// `Box::pin`ning a new `Sleep` every time we need one.
sleep: Box::pin(tokio::time::sleep_until(until)),
}
}
/// Get a reference to the inner service
pub fn get_ref(&self) -> &T {
&self.inner
}
/// Get a mutable reference to the inner service
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Consume `self`, returning the inner service
pub fn into_inner(self) -> T {
self.inner
}
}
impl<S, Request> Service<Request> for RateLimit<S>
where
S: Service<Request>,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self.state {
State::Ready { .. } => return self.inner.poll_ready(cx),
State::Limited => {
if Pin::new(&mut self.sleep).poll(cx).is_pending() {
tracing::trace!("rate limit exceeded; sleeping.");
return Poll::Pending;
}
}
}
self.state = State::Ready {
until: Instant::now() + self.rate.per(),
rem: self.rate.num(),
};
self.inner.poll_ready(cx)
}
fn call(&mut self, request: Request) -> Self::Future {
match self.state {
State::Ready { mut until, mut rem } => {
let now = Instant::now();
// If the period has elapsed, reset it.
if now >= until {
until = now + self.rate.per();
rem = self.rate.num();
}
if rem > 1 {
rem -= 1;
self.state = State::Ready { until, rem };
} else {
// The service is disabled until further notice
// Reset the sleep future in place, so that we don't have to
// deallocate the existing box and allocate a new one.
self.sleep.as_mut().reset(until);
self.state = State::Limited;
}
// Call the inner future
self.inner.call(request)
}
State::Limited => panic!("service not ready; poll_ready must be called first"),
}
}
}
#[cfg(feature = "load")]
impl<S> crate::load::Load for RateLimit<S>
where
S: crate::load::Load,
{
type Metric = S::Metric;
fn load(&self) -> Self::Metric {
self.inner.load()
}
}

94
vendor/tower/src/load/completion.rs vendored Normal file
View File

@@ -0,0 +1,94 @@
//! Application-specific request completion semantics.
use pin_project_lite::pin_project;
use std::{
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
/// Attaches `H`-typed completion tracker to `V` typed values.
///
/// Handles (of type `H`) are intended to be RAII guards that primarily implement [`Drop`] and update
/// load metric state as they are dropped. This trait allows implementors to "forward" the handle
/// to later parts of the request-handling pipeline, so that the handle is only dropped when the
/// request has truly completed.
///
/// This utility allows load metrics to have a protocol-agnostic means to track streams past their
/// initial response future. For example, if `V` represents an HTTP response type, an
/// implementation could add `H`-typed handles to each response's extensions to detect when all the
/// response's extensions have been dropped.
///
/// A base `impl<H, V> TrackCompletion<H, V> for CompleteOnResponse` is provided to drop the handle
/// once the response future is resolved. This is appropriate when a response is discrete and
/// cannot comprise multiple messages.
///
/// In many cases, the `Output` type is simply `V`. However, [`TrackCompletion`] may alter the type
/// in order to instrument it appropriately. For example, an HTTP [`TrackCompletion`] may modify
/// the body type: so a [`TrackCompletion`] that takes values of type
/// [`http::Response<A>`][response] may output values of type [`http::Response<B>`][response].
///
/// [response]: https://docs.rs/http/latest/http/response/struct.Response.html
pub trait TrackCompletion<H, V>: Clone {
/// The instrumented value type.
type Output;
/// Attaches a `H`-typed handle to a `V`-typed value.
fn track_completion(&self, handle: H, value: V) -> Self::Output;
}
/// A [`TrackCompletion`] implementation that considers the request completed when the response
/// future is resolved.
#[derive(Clone, Copy, Debug, Default)]
#[non_exhaustive]
pub struct CompleteOnResponse;
pin_project! {
/// Attaches a `C`-typed completion tracker to the result of an `F`-typed [`Future`].
#[derive(Debug)]
pub struct TrackCompletionFuture<F, C, H> {
#[pin]
future: F,
handle: Option<H>,
completion: C,
}
}
// ===== impl InstrumentFuture =====
impl<F, C, H> TrackCompletionFuture<F, C, H> {
/// Wraps a future, propagating the tracker into its value if successful.
pub const fn new(completion: C, handle: H, future: F) -> Self {
TrackCompletionFuture {
future,
completion,
handle: Some(handle),
}
}
}
impl<F, C, H, T, E> Future for TrackCompletionFuture<F, C, H>
where
F: Future<Output = Result<T, E>>,
C: TrackCompletion<H, T>,
{
type Output = Result<C::Output, E>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let rsp = ready!(this.future.poll(cx))?;
let h = this.handle.take().expect("handle");
Poll::Ready(Ok(this.completion.track_completion(h, rsp)))
}
}
// ===== CompleteOnResponse =====
impl<H, V> TrackCompletion<H, V> for CompleteOnResponse {
type Output = V;
fn track_completion(&self, handle: H, value: V) -> V {
drop(handle);
value
}
}

79
vendor/tower/src/load/constant.rs vendored Normal file
View File

@@ -0,0 +1,79 @@
//! A constant [`Load`] implementation.
#[cfg(feature = "discover")]
use crate::discover::{Change, Discover};
#[cfg(feature = "discover")]
use futures_core::Stream;
#[cfg(feature = "discover")]
use std::{pin::Pin, task::ready};
use super::Load;
use pin_project_lite::pin_project;
use std::task::{Context, Poll};
use tower_service::Service;
pin_project! {
#[derive(Debug)]
/// Wraps a type so that it implements [`Load`] and returns a constant load metric.
///
/// This load estimator is primarily useful for testing.
pub struct Constant<T, M> {
inner: T,
load: M,
}
}
// ===== impl Constant =====
impl<T, M: Copy> Constant<T, M> {
/// Wraps a `T`-typed service with a constant `M`-typed load metric.
pub const fn new(inner: T, load: M) -> Self {
Self { inner, load }
}
}
impl<T, M: Copy + PartialOrd> Load for Constant<T, M> {
type Metric = M;
fn load(&self) -> M {
self.load
}
}
impl<S, M, Request> Service<Request> for Constant<S, M>
where
S: Service<Request>,
M: Copy,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx)
}
fn call(&mut self, req: Request) -> Self::Future {
self.inner.call(req)
}
}
/// Proxies [`Discover`] such that all changes are wrapped with a constant load.
#[cfg(feature = "discover")]
impl<D: Discover + Unpin, M: Copy> Stream for Constant<D, M> {
type Item = Result<Change<D::Key, Constant<D::Service, M>>, D::Error>;
/// Yields the next discovery change set.
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
use self::Change::*;
let this = self.project();
let change = match ready!(Pin::new(this.inner).poll_discover(cx)).transpose()? {
None => return Poll::Ready(None),
Some(Insert(k, svc)) => Insert(k, Constant::new(svc, *this.load)),
Some(Remove(k)) => Remove(k),
};
Poll::Ready(Some(Ok(change)))
}
}

89
vendor/tower/src/load/mod.rs vendored Normal file
View File

@@ -0,0 +1,89 @@
//! Service load measurement
//!
//! This module provides the [`Load`] trait, which allows measuring how loaded a service is.
//! It also provides several wrapper types that measure load in different ways:
//!
//! - [`Constant`] — Always returns the same constant load value for a service.
//! - [`PendingRequests`] — Measures load by tracking the number of in-flight requests.
//! - [`PeakEwma`] — Measures load using a moving average of the peak latency for the service.
//!
//! In general, you will want to use one of these when using the types in [`tower::balance`] which
//! balance services depending on their load. Which load metric to use depends on your exact
//! use-case, but the ones above should get you quite far!
//!
//! When the `discover` feature is enabled, wrapper types for [`Discover`] that
//! wrap the discovered services with the given load estimator are also provided.
//!
//! # When does a request complete?
//!
//! For many applications, the request life-cycle is relatively simple: when a service responds to
//! a request, that request is done, and the system can forget about it. However, for some
//! applications, the service may respond to the initial request while other parts of the system
//! are still acting on that request. In such an application, the system load must take these
//! requests into account as well, or risk the system underestimating its own load.
//!
//! To support these use-cases, the load estimators in this module are parameterized by the
//! [`TrackCompletion`] trait, with [`CompleteOnResponse`] as the default type. The behavior of
//! [`CompleteOnResponse`] is what you would normally expect for a request-response cycle: when the
//! response is produced, the request is considered "finished", and load goes down. This can be
//! overridden by your own user-defined type to track more complex request completion semantics. See
//! the documentation for [`completion`] for more details.
//!
//! # Examples
//!
//! ```rust
//! # #[cfg(feature = "util")]
//! use tower::util::ServiceExt;
//! # #[cfg(feature = "util")]
//! use tower::{load::Load, Service};
//! # #[cfg(feature = "util")]
//! async fn simple_balance<S1, S2, R>(
//! svc1: &mut S1,
//! svc2: &mut S2,
//! request: R
//! ) -> Result<S1::Response, S1::Error>
//! where
//! S1: Load + Service<R>,
//! S2: Load<Metric = S1::Metric> + Service<R, Response = S1::Response, Error = S1::Error>
//! {
//! if svc1.load() < svc2.load() {
//! svc1.ready().await?.call(request).await
//! } else {
//! svc2.ready().await?.call(request).await
//! }
//! }
//! ```
//!
//! [`tower::balance`]: crate::balance
//! [`Discover`]: crate::discover::Discover
//! [`CompleteOnResponse`]: crate::load::completion::CompleteOnResponse
// TODO: a custom completion example would be good here
pub mod completion;
mod constant;
pub mod peak_ewma;
pub mod pending_requests;
pub use self::{
completion::{CompleteOnResponse, TrackCompletion},
constant::Constant,
peak_ewma::PeakEwma,
pending_requests::PendingRequests,
};
#[cfg(feature = "discover")]
pub use self::{peak_ewma::PeakEwmaDiscover, pending_requests::PendingRequestsDiscover};
/// Types that implement this trait can give an estimate of how loaded they are.
///
/// See the module documentation for more details.
pub trait Load {
/// A comparable load metric.
///
/// Lesser values indicate that the service is less loaded, and should be preferred for new
/// requests over another service with a higher value.
type Metric: PartialOrd;
/// Estimate the service's current load.
fn load(&self) -> Self::Metric;
}

405
vendor/tower/src/load/peak_ewma.rs vendored Normal file
View File

@@ -0,0 +1,405 @@
//! A `Load` implementation that measures load using the PeakEWMA response latency.
#[cfg(feature = "discover")]
use crate::discover::{Change, Discover};
#[cfg(feature = "discover")]
use futures_core::Stream;
#[cfg(feature = "discover")]
use pin_project_lite::pin_project;
#[cfg(feature = "discover")]
use std::{pin::Pin, task::ready};
use super::completion::{CompleteOnResponse, TrackCompletion, TrackCompletionFuture};
use super::Load;
use std::task::{Context, Poll};
use std::{
sync::{Arc, Mutex},
time::Duration,
};
use tokio::time::Instant;
use tower_service::Service;
use tracing::trace;
/// Measures the load of the underlying service using Peak-EWMA load measurement.
///
/// [`PeakEwma`] implements [`Load`] with the [`Cost`] metric that estimates the amount of
/// pending work to an endpoint. Work is calculated by multiplying the
/// exponentially-weighted moving average (EWMA) of response latencies by the number of
/// pending requests. The Peak-EWMA algorithm is designed to be especially sensitive to
/// worst-case latencies. Over time, the peak latency value decays towards the moving
/// average of latencies to the endpoint.
///
/// When no latency information has been measured for an endpoint, an arbitrary default
/// RTT of 1 second is used to prevent the endpoint from being overloaded before a
/// meaningful baseline can be established..
///
/// ## Note
///
/// This is derived from [Finagle][finagle], which is distributed under the Apache V2
/// license. Copyright 2017, Twitter Inc.
///
/// [finagle]:
/// https://github.com/twitter/finagle/blob/9cc08d15216497bb03a1cafda96b7266cfbbcff1/finagle-core/src/main/scala/com/twitter/finagle/loadbalancer/PeakEwma.scala
#[derive(Debug)]
pub struct PeakEwma<S, C = CompleteOnResponse> {
service: S,
decay_ns: f64,
rtt_estimate: Arc<Mutex<RttEstimate>>,
completion: C,
}
#[cfg(feature = "discover")]
pin_project! {
/// Wraps a `D`-typed stream of discovered services with `PeakEwma`.
#[cfg_attr(docsrs, doc(cfg(feature = "discover")))]
#[derive(Debug)]
pub struct PeakEwmaDiscover<D, C = CompleteOnResponse> {
#[pin]
discover: D,
decay_ns: f64,
default_rtt: Duration,
completion: C,
}
}
/// Represents the relative cost of communicating with a service.
///
/// The underlying value estimates the amount of pending work to a service: the Peak-EWMA
/// latency estimate multiplied by the number of pending requests.
#[derive(Copy, Clone, Debug, PartialEq, PartialOrd)]
pub struct Cost(f64);
/// Tracks an in-flight request and updates the RTT-estimate on Drop.
#[derive(Debug)]
pub struct Handle {
sent_at: Instant,
decay_ns: f64,
rtt_estimate: Arc<Mutex<RttEstimate>>,
}
/// Holds the current RTT estimate and the last time this value was updated.
#[derive(Debug)]
struct RttEstimate {
update_at: Instant,
rtt_ns: f64,
}
const NANOS_PER_MILLI: f64 = 1_000_000.0;
// ===== impl PeakEwma =====
impl<S, C> PeakEwma<S, C> {
/// Wraps an `S`-typed service so that its load is tracked by the EWMA of its peak latency.
pub fn new(service: S, default_rtt: Duration, decay_ns: f64, completion: C) -> Self {
debug_assert!(decay_ns > 0.0, "decay_ns must be positive");
Self {
service,
decay_ns,
rtt_estimate: Arc::new(Mutex::new(RttEstimate::new(nanos(default_rtt)))),
completion,
}
}
fn handle(&self) -> Handle {
Handle {
decay_ns: self.decay_ns,
sent_at: Instant::now(),
rtt_estimate: self.rtt_estimate.clone(),
}
}
}
impl<S, C, Request> Service<Request> for PeakEwma<S, C>
where
S: Service<Request>,
C: TrackCompletion<Handle, S::Response>,
{
type Response = C::Output;
type Error = S::Error;
type Future = TrackCompletionFuture<S::Future, C, Handle>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
}
fn call(&mut self, req: Request) -> Self::Future {
TrackCompletionFuture::new(
self.completion.clone(),
self.handle(),
self.service.call(req),
)
}
}
impl<S, C> Load for PeakEwma<S, C> {
type Metric = Cost;
fn load(&self) -> Self::Metric {
let pending = Arc::strong_count(&self.rtt_estimate) as u32 - 1;
// Update the RTT estimate to account for decay since the last update.
// If an estimate has not been established, a default is provided
let estimate = self.update_estimate();
let cost = Cost(estimate * f64::from(pending + 1));
trace!(
"load estimate={:.0}ms pending={} cost={:?}",
estimate / NANOS_PER_MILLI,
pending,
cost,
);
cost
}
}
impl<S, C> PeakEwma<S, C> {
fn update_estimate(&self) -> f64 {
let mut rtt = self.rtt_estimate.lock().expect("peak ewma prior_estimate");
rtt.decay(self.decay_ns)
}
}
// ===== impl PeakEwmaDiscover =====
#[cfg(feature = "discover")]
impl<D, C> PeakEwmaDiscover<D, C> {
/// Wraps a `D`-typed [`Discover`] so that services have a [`PeakEwma`] load metric.
///
/// The provided `default_rtt` is used as the default RTT estimate for newly
/// added services.
///
/// They `decay` value determines over what time period a RTT estimate should
/// decay.
pub fn new<Request>(discover: D, default_rtt: Duration, decay: Duration, completion: C) -> Self
where
D: Discover,
D::Service: Service<Request>,
C: TrackCompletion<Handle, <D::Service as Service<Request>>::Response>,
{
PeakEwmaDiscover {
discover,
decay_ns: nanos(decay),
default_rtt,
completion,
}
}
}
#[cfg(feature = "discover")]
impl<D, C> Stream for PeakEwmaDiscover<D, C>
where
D: Discover,
C: Clone,
{
type Item = Result<Change<D::Key, PeakEwma<D::Service, C>>, D::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.project();
let change = match ready!(this.discover.poll_discover(cx)).transpose()? {
None => return Poll::Ready(None),
Some(Change::Remove(k)) => Change::Remove(k),
Some(Change::Insert(k, svc)) => {
let peak_ewma = PeakEwma::new(
svc,
*this.default_rtt,
*this.decay_ns,
this.completion.clone(),
);
Change::Insert(k, peak_ewma)
}
};
Poll::Ready(Some(Ok(change)))
}
}
// ===== impl RttEstimate =====
impl RttEstimate {
fn new(rtt_ns: f64) -> Self {
debug_assert!(0.0 < rtt_ns, "rtt must be positive");
Self {
rtt_ns,
update_at: Instant::now(),
}
}
/// Decays the RTT estimate with a decay period of `decay_ns`.
fn decay(&mut self, decay_ns: f64) -> f64 {
// Updates with a 0 duration so that the estimate decays towards 0.
let now = Instant::now();
self.update(now, now, decay_ns)
}
/// Updates the Peak-EWMA RTT estimate.
///
/// The elapsed time from `sent_at` to `recv_at` is added
fn update(&mut self, sent_at: Instant, recv_at: Instant, decay_ns: f64) -> f64 {
debug_assert!(
sent_at <= recv_at,
"recv_at={:?} after sent_at={:?}",
recv_at,
sent_at
);
let rtt = nanos(recv_at.saturating_duration_since(sent_at));
let now = Instant::now();
debug_assert!(
self.update_at <= now,
"update_at={:?} in the future",
self.update_at
);
self.rtt_ns = if self.rtt_ns < rtt {
// For Peak-EWMA, always use the worst-case (peak) value as the estimate for
// subsequent requests.
trace!(
"update peak rtt={}ms prior={}ms",
rtt / NANOS_PER_MILLI,
self.rtt_ns / NANOS_PER_MILLI,
);
rtt
} else {
// When an RTT is observed that is less than the estimated RTT, we decay the
// prior estimate according to how much time has elapsed since the last
// update. The inverse of the decay is used to scale the estimate towards the
// observed RTT value.
let elapsed = nanos(now.saturating_duration_since(self.update_at));
let decay = (-elapsed / decay_ns).exp();
let recency = 1.0 - decay;
let next_estimate = (self.rtt_ns * decay) + (rtt * recency);
trace!(
"update rtt={:03.0}ms decay={:06.0}ns; next={:03.0}ms",
rtt / NANOS_PER_MILLI,
self.rtt_ns - next_estimate,
next_estimate / NANOS_PER_MILLI,
);
next_estimate
};
self.update_at = now;
self.rtt_ns
}
}
// ===== impl Handle =====
impl Drop for Handle {
fn drop(&mut self) {
let recv_at = Instant::now();
if let Ok(mut rtt) = self.rtt_estimate.lock() {
rtt.update(self.sent_at, recv_at, self.decay_ns);
}
}
}
// ===== impl Cost =====
// Utility that converts durations to nanos in f64.
//
// Due to a lossy transformation, the maximum value that can be represented is ~585 years,
// which, I hope, is more than enough to represent request latencies.
fn nanos(d: Duration) -> f64 {
const NANOS_PER_SEC: u64 = 1_000_000_000;
let n = f64::from(d.subsec_nanos());
let s = d.as_secs().saturating_mul(NANOS_PER_SEC) as f64;
n + s
}
#[cfg(test)]
mod tests {
use std::{future, time::Duration};
use tokio::time;
use tokio_test::{assert_ready, assert_ready_ok, task};
use super::*;
struct Svc;
impl Service<()> for Svc {
type Response = ();
type Error = ();
type Future = future::Ready<Result<(), ()>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), ()>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, (): ()) -> Self::Future {
future::ready(Ok(()))
}
}
/// The default RTT estimate decays, so that new nodes are considered if the
/// default RTT is too high.
#[tokio::test]
async fn default_decay() {
time::pause();
let svc = PeakEwma::new(
Svc,
Duration::from_millis(10),
NANOS_PER_MILLI * 1_000.0,
CompleteOnResponse,
);
let Cost(load) = svc.load();
assert_eq!(load, 10.0 * NANOS_PER_MILLI);
time::advance(Duration::from_millis(100)).await;
let Cost(load) = svc.load();
assert!(9.0 * NANOS_PER_MILLI < load && load < 10.0 * NANOS_PER_MILLI);
time::advance(Duration::from_millis(100)).await;
let Cost(load) = svc.load();
assert!(8.0 * NANOS_PER_MILLI < load && load < 9.0 * NANOS_PER_MILLI);
}
// The default RTT estimate decays, so that new nodes are considered if the default RTT is too
// high.
#[tokio::test]
async fn compound_decay() {
time::pause();
let mut svc = PeakEwma::new(
Svc,
Duration::from_millis(20),
NANOS_PER_MILLI * 1_000.0,
CompleteOnResponse,
);
assert_eq!(svc.load(), Cost(20.0 * NANOS_PER_MILLI));
time::advance(Duration::from_millis(100)).await;
let mut rsp0 = task::spawn(svc.call(()));
assert!(svc.load() > Cost(20.0 * NANOS_PER_MILLI));
time::advance(Duration::from_millis(100)).await;
let mut rsp1 = task::spawn(svc.call(()));
assert!(svc.load() > Cost(40.0 * NANOS_PER_MILLI));
time::advance(Duration::from_millis(100)).await;
let () = assert_ready_ok!(rsp0.poll());
assert_eq!(svc.load(), Cost(400_000_000.0));
time::advance(Duration::from_millis(100)).await;
let () = assert_ready_ok!(rsp1.poll());
assert_eq!(svc.load(), Cost(200_000_000.0));
// Check that values decay as time elapses
time::advance(Duration::from_secs(1)).await;
assert!(svc.load() < Cost(100_000_000.0));
time::advance(Duration::from_secs(10)).await;
assert!(svc.load() < Cost(100_000.0));
}
#[test]
fn nanos() {
assert_eq!(super::nanos(Duration::new(0, 0)), 0.0);
assert_eq!(super::nanos(Duration::new(0, 123)), 123.0);
assert_eq!(super::nanos(Duration::new(1, 23)), 1_000_000_023.0);
assert_eq!(
super::nanos(Duration::new(u64::MAX, 999_999_999)),
18446744074709553000.0
);
}
}

View File

@@ -0,0 +1,219 @@
//! A [`Load`] implementation that measures load using the number of in-flight requests.
#[cfg(feature = "discover")]
use crate::discover::{Change, Discover};
#[cfg(feature = "discover")]
use futures_core::Stream;
#[cfg(feature = "discover")]
use pin_project_lite::pin_project;
#[cfg(feature = "discover")]
use std::{pin::Pin, task::ready};
use super::completion::{CompleteOnResponse, TrackCompletion, TrackCompletionFuture};
use super::Load;
use std::sync::Arc;
use std::task::{Context, Poll};
use tower_service::Service;
/// Measures the load of the underlying service using the number of currently-pending requests.
#[derive(Debug)]
pub struct PendingRequests<S, C = CompleteOnResponse> {
service: S,
ref_count: RefCount,
completion: C,
}
/// Shared between instances of [`PendingRequests`] and [`Handle`] to track active references.
#[derive(Clone, Debug, Default)]
struct RefCount(Arc<()>);
#[cfg(feature = "discover")]
pin_project! {
/// Wraps a `D`-typed stream of discovered services with [`PendingRequests`].
#[cfg_attr(docsrs, doc(cfg(feature = "discover")))]
#[derive(Debug)]
pub struct PendingRequestsDiscover<D, C = CompleteOnResponse> {
#[pin]
discover: D,
completion: C,
}
}
/// Represents the number of currently-pending requests to a given service.
#[derive(Clone, Copy, Debug, Default, PartialOrd, PartialEq, Ord, Eq)]
pub struct Count(usize);
/// Tracks an in-flight request by reference count.
#[derive(Debug)]
#[allow(dead_code)]
pub struct Handle(RefCount);
// ===== impl PendingRequests =====
impl<S, C> PendingRequests<S, C> {
/// Wraps an `S`-typed service so that its load is tracked by the number of pending requests.
pub fn new(service: S, completion: C) -> Self {
Self {
service,
completion,
ref_count: RefCount::default(),
}
}
fn handle(&self) -> Handle {
Handle(self.ref_count.clone())
}
}
impl<S, C> Load for PendingRequests<S, C> {
type Metric = Count;
fn load(&self) -> Count {
// Count the number of references that aren't `self`.
Count(self.ref_count.ref_count() - 1)
}
}
impl<S, C, Request> Service<Request> for PendingRequests<S, C>
where
S: Service<Request>,
C: TrackCompletion<Handle, S::Response>,
{
type Response = C::Output;
type Error = S::Error;
type Future = TrackCompletionFuture<S::Future, C, Handle>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
}
fn call(&mut self, req: Request) -> Self::Future {
TrackCompletionFuture::new(
self.completion.clone(),
self.handle(),
self.service.call(req),
)
}
}
// ===== impl PendingRequestsDiscover =====
#[cfg(feature = "discover")]
impl<D, C> PendingRequestsDiscover<D, C> {
/// Wraps a [`Discover`], wrapping all of its services with [`PendingRequests`].
pub const fn new<Request>(discover: D, completion: C) -> Self
where
D: Discover,
D::Service: Service<Request>,
C: TrackCompletion<Handle, <D::Service as Service<Request>>::Response>,
{
Self {
discover,
completion,
}
}
}
#[cfg(feature = "discover")]
impl<D, C> Stream for PendingRequestsDiscover<D, C>
where
D: Discover,
C: Clone,
{
type Item = Result<Change<D::Key, PendingRequests<D::Service, C>>, D::Error>;
/// Yields the next discovery change set.
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
use self::Change::*;
let this = self.project();
let change = match ready!(this.discover.poll_discover(cx)).transpose()? {
None => return Poll::Ready(None),
Some(Insert(k, svc)) => Insert(k, PendingRequests::new(svc, this.completion.clone())),
Some(Remove(k)) => Remove(k),
};
Poll::Ready(Some(Ok(change)))
}
}
// ==== RefCount ====
impl RefCount {
pub(crate) fn ref_count(&self) -> usize {
Arc::strong_count(&self.0)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::{
future,
task::{Context, Poll},
};
struct Svc;
impl Service<()> for Svc {
type Response = ();
type Error = ();
type Future = future::Ready<Result<(), ()>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), ()>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, (): ()) -> Self::Future {
future::ready(Ok(()))
}
}
#[test]
fn default() {
let mut svc = PendingRequests::new(Svc, CompleteOnResponse);
assert_eq!(svc.load(), Count(0));
let rsp0 = svc.call(());
assert_eq!(svc.load(), Count(1));
let rsp1 = svc.call(());
assert_eq!(svc.load(), Count(2));
let () = tokio_test::block_on(rsp0).unwrap();
assert_eq!(svc.load(), Count(1));
let () = tokio_test::block_on(rsp1).unwrap();
assert_eq!(svc.load(), Count(0));
}
#[test]
fn with_completion() {
#[derive(Clone)]
struct IntoHandle;
impl TrackCompletion<Handle, ()> for IntoHandle {
type Output = Handle;
fn track_completion(&self, i: Handle, (): ()) -> Handle {
i
}
}
let mut svc = PendingRequests::new(Svc, IntoHandle);
assert_eq!(svc.load(), Count(0));
let rsp = svc.call(());
assert_eq!(svc.load(), Count(1));
let i0 = tokio_test::block_on(rsp).unwrap();
assert_eq!(svc.load(), Count(1));
let rsp = svc.call(());
assert_eq!(svc.load(), Count(2));
let i1 = tokio_test::block_on(rsp).unwrap();
assert_eq!(svc.load(), Count(2));
drop(i1);
assert_eq!(svc.load(), Count(1));
drop(i0);
assert_eq!(svc.load(), Count(0));
}
}

34
vendor/tower/src/load_shed/error.rs vendored Normal file
View File

@@ -0,0 +1,34 @@
//! Error types
use std::fmt;
/// An error returned by [`LoadShed`] when the underlying service
/// is not ready to handle any requests at the time of being
/// called.
///
/// [`LoadShed`]: crate::load_shed::LoadShed
#[derive(Default)]
pub struct Overloaded {
_p: (),
}
impl Overloaded {
/// Construct a new overloaded error
pub const fn new() -> Self {
Overloaded { _p: () }
}
}
impl fmt::Debug for Overloaded {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("Overloaded")
}
}
impl fmt::Display for Overloaded {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("service overloaded")
}
}
impl std::error::Error for Overloaded {}

70
vendor/tower/src/load_shed/future.rs vendored Normal file
View File

@@ -0,0 +1,70 @@
//! Future types
use std::fmt;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use pin_project_lite::pin_project;
use super::error::Overloaded;
pin_project! {
/// Future for the [`LoadShed`] service.
///
/// [`LoadShed`]: crate::load_shed::LoadShed
pub struct ResponseFuture<F> {
#[pin]
state: ResponseState<F>,
}
}
pin_project! {
#[project = ResponseStateProj]
enum ResponseState<F> {
Called {
#[pin]
fut: F
},
Overloaded,
}
}
impl<F> ResponseFuture<F> {
pub(crate) fn called(fut: F) -> Self {
ResponseFuture {
state: ResponseState::Called { fut },
}
}
pub(crate) fn overloaded() -> Self {
ResponseFuture {
state: ResponseState::Overloaded,
}
}
}
impl<F, T, E> Future for ResponseFuture<F>
where
F: Future<Output = Result<T, E>>,
E: Into<crate::BoxError>,
{
type Output = Result<T, crate::BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.project().state.project() {
ResponseStateProj::Called { fut } => fut.poll(cx).map_err(Into::into),
ResponseStateProj::Overloaded => Poll::Ready(Err(Overloaded::new().into())),
}
}
}
impl<F> fmt::Debug for ResponseFuture<F>
where
// bounds for future-proofing...
F: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("ResponseFuture")
}
}

33
vendor/tower/src/load_shed/layer.rs vendored Normal file
View File

@@ -0,0 +1,33 @@
use std::fmt;
use tower_layer::Layer;
use super::LoadShed;
/// A [`Layer`] to wrap services in [`LoadShed`] middleware.
///
/// [`Layer`]: crate::Layer
#[derive(Clone, Default)]
pub struct LoadShedLayer {
_p: (),
}
impl LoadShedLayer {
/// Creates a new layer.
pub const fn new() -> Self {
LoadShedLayer { _p: () }
}
}
impl<S> Layer<S> for LoadShedLayer {
type Service = LoadShed<S>;
fn layer(&self, service: S) -> Self::Service {
LoadShed::new(service)
}
}
impl fmt::Debug for LoadShedLayer {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("LoadShedLayer").finish()
}
}

76
vendor/tower/src/load_shed/mod.rs vendored Normal file
View File

@@ -0,0 +1,76 @@
//! Middleware for shedding load when inner services aren't ready.
use std::task::{Context, Poll};
use tower_service::Service;
pub mod error;
pub mod future;
mod layer;
use self::future::ResponseFuture;
pub use self::layer::LoadShedLayer;
/// A [`Service`] that sheds load when the inner service isn't ready.
///
/// [`Service`]: crate::Service
#[derive(Debug)]
pub struct LoadShed<S> {
inner: S,
is_ready: bool,
}
// ===== impl LoadShed =====
impl<S> LoadShed<S> {
/// Wraps a service in [`LoadShed`] middleware.
pub const fn new(inner: S) -> Self {
LoadShed {
inner,
is_ready: false,
}
}
}
impl<S, Req> Service<Req> for LoadShed<S>
where
S: Service<Req>,
S::Error: Into<crate::BoxError>,
{
type Response = S::Response;
type Error = crate::BoxError;
type Future = ResponseFuture<S::Future>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// We check for readiness here, so that we can know in `call` if
// the inner service is overloaded or not.
self.is_ready = match self.inner.poll_ready(cx) {
Poll::Ready(Err(e)) => return Poll::Ready(Err(e.into())),
r => r.is_ready(),
};
// But we always report Ready, so that layers above don't wait until
// the inner service is ready (the entire point of this layer!)
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Req) -> Self::Future {
if self.is_ready {
// readiness only counts once, you need to check again!
self.is_ready = false;
ResponseFuture::called(self.inner.call(req))
} else {
ResponseFuture::overloaded()
}
}
}
impl<S: Clone> Clone for LoadShed<S> {
fn clone(&self) -> Self {
LoadShed {
inner: self.inner.clone(),
// new clones shouldn't carry the readiness state, as a cloneable
// inner service likely tracks readiness per clone.
is_ready: false,
}
}
}

42
vendor/tower/src/macros.rs vendored Normal file
View File

@@ -0,0 +1,42 @@
#[cfg(any(
feature = "util",
feature = "spawn-ready",
feature = "filter",
feature = "make"
))]
macro_rules! opaque_future {
($(#[$m:meta])* pub type $name:ident<$($param:ident),+> = $actual:ty;) => {
pin_project_lite::pin_project! {
$(#[$m])*
pub struct $name<$($param),+> {
#[pin]
inner: $actual
}
}
impl<$($param),+> $name<$($param),+> {
pub(crate) fn new(inner: $actual) -> Self {
Self {
inner
}
}
}
impl<$($param),+> std::fmt::Debug for $name<$($param),+> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple(stringify!($name)).field(&format_args!("...")).finish()
}
}
impl<$($param),+> std::future::Future for $name<$($param),+>
where
$actual: std::future::Future,
{
type Output = <$actual as std::future::Future>::Output;
#[inline]
fn poll(self: std::pin::Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> std::task::Poll<Self::Output> {
self.project().inner.poll(cx)
}
}
}
}

View File

@@ -0,0 +1,47 @@
use crate::sealed::Sealed;
use std::future::Future;
use std::task::{Context, Poll};
use tokio::io::{AsyncRead, AsyncWrite};
use tower_service::Service;
/// The [`MakeConnection`] trait is used to create transports.
///
/// The goal of this service is to allow composable methods for creating
/// `AsyncRead + AsyncWrite` transports. This could mean creating a TLS
/// based connection or using some other method to authenticate the connection.
pub trait MakeConnection<Target>: Sealed<(Target,)> {
/// The transport provided by this service
type Connection: AsyncRead + AsyncWrite;
/// Errors produced by the connecting service
type Error;
/// The future that eventually produces the transport
type Future: Future<Output = Result<Self::Connection, Self::Error>>;
/// Returns `Poll::Ready(Ok(()))` when it is able to make more connections.
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>>;
/// Connect and return a transport asynchronously
fn make_connection(&mut self, target: Target) -> Self::Future;
}
impl<S, Target> Sealed<(Target,)> for S where S: Service<Target> {}
impl<C, Target> MakeConnection<Target> for C
where
C: Service<Target>,
C::Response: AsyncRead + AsyncWrite,
{
type Connection = C::Response;
type Error = C::Error;
type Future = C::Future;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Service::poll_ready(self, cx)
}
fn make_connection(&mut self, target: Target) -> Self::Future {
Service::call(self, target)
}
}

251
vendor/tower/src/make/make_service.rs vendored Normal file
View File

@@ -0,0 +1,251 @@
//! Contains [`MakeService`] which is a trait alias for a [`Service`] of [`Service`]s.
use crate::sealed::Sealed;
use std::fmt;
use std::future::Future;
use std::marker::PhantomData;
use std::task::{Context, Poll};
use tower_service::Service;
pub(crate) mod shared;
/// Creates new [`Service`] values.
///
/// Acts as a service factory. This is useful for cases where new [`Service`]
/// values must be produced. One case is a TCP server listener. The listener
/// accepts new TCP streams, obtains a new [`Service`] value using the
/// [`MakeService`] trait, and uses that new [`Service`] value to process inbound
/// requests on that new TCP stream.
///
/// This is essentially a trait alias for a [`Service`] of [`Service`]s.
pub trait MakeService<Target, Request>: Sealed<(Target, Request)> {
/// Responses given by the service
type Response;
/// Errors produced by the service
type Error;
/// The [`Service`] value created by this factory
type Service: Service<Request, Response = Self::Response, Error = Self::Error>;
/// Errors produced while building a service.
type MakeError;
/// The future of the [`Service`] instance.
type Future: Future<Output = Result<Self::Service, Self::MakeError>>;
/// Returns [`Poll::Ready`] when the factory is able to create more services.
///
/// If the service is at capacity, then [`Poll::Pending`] is returned and the task
/// is notified when the service becomes ready again. This function is
/// expected to be called while on a task.
///
/// [`Poll::Ready`]: std::task::Poll::Ready
/// [`Poll::Pending`]: std::task::Poll::Pending
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::MakeError>>;
/// Create and return a new service value asynchronously.
fn make_service(&mut self, target: Target) -> Self::Future;
/// Consume this [`MakeService`] and convert it into a [`Service`].
///
/// # Example
/// ```
/// use std::convert::Infallible;
/// use tower::Service;
/// use tower::make::MakeService;
/// use tower::service_fn;
///
/// # fn main() {
/// # async {
/// // A `MakeService`
/// let make_service = service_fn(|make_req: ()| async {
/// Ok::<_, Infallible>(service_fn(|req: String| async {
/// Ok::<_, Infallible>(req)
/// }))
/// });
///
/// // Convert the `MakeService` into a `Service`
/// let mut svc = make_service.into_service();
///
/// // Make a new service
/// let mut new_svc = svc.call(()).await.unwrap();
///
/// // Call the service
/// let res = new_svc.call("foo".to_string()).await.unwrap();
/// # };
/// # }
/// ```
fn into_service(self) -> IntoService<Self, Request>
where
Self: Sized,
{
IntoService {
make: self,
_marker: PhantomData,
}
}
/// Convert this [`MakeService`] into a [`Service`] without consuming the original [`MakeService`].
///
/// # Example
/// ```
/// use std::convert::Infallible;
/// use tower::Service;
/// use tower::make::MakeService;
/// use tower::service_fn;
///
/// # fn main() {
/// # async {
/// // A `MakeService`
/// let mut make_service = service_fn(|make_req: ()| async {
/// Ok::<_, Infallible>(service_fn(|req: String| async {
/// Ok::<_, Infallible>(req)
/// }))
/// });
///
/// // Convert the `MakeService` into a `Service`
/// let mut svc = make_service.as_service();
///
/// // Make a new service
/// let mut new_svc = svc.call(()).await.unwrap();
///
/// // Call the service
/// let res = new_svc.call("foo".to_string()).await.unwrap();
///
/// // The original `MakeService` is still accessible
/// let new_svc = make_service.make_service(()).await.unwrap();
/// # };
/// # }
/// ```
fn as_service(&mut self) -> AsService<'_, Self, Request>
where
Self: Sized,
{
AsService {
make: self,
_marker: PhantomData,
}
}
}
impl<M, S, Target, Request> Sealed<(Target, Request)> for M
where
M: Service<Target, Response = S>,
S: Service<Request>,
{
}
impl<M, S, Target, Request> MakeService<Target, Request> for M
where
M: Service<Target, Response = S>,
S: Service<Request>,
{
type Response = S::Response;
type Error = S::Error;
type Service = S;
type MakeError = M::Error;
type Future = M::Future;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::MakeError>> {
Service::poll_ready(self, cx)
}
fn make_service(&mut self, target: Target) -> Self::Future {
Service::call(self, target)
}
}
/// Service returned by [`MakeService::into_service`][into].
///
/// See the documentation on [`into_service`][into] for details.
///
/// [into]: MakeService::into_service
pub struct IntoService<M, Request> {
make: M,
_marker: PhantomData<Request>,
}
impl<M, Request> Clone for IntoService<M, Request>
where
M: Clone,
{
fn clone(&self) -> Self {
Self {
make: self.make.clone(),
_marker: PhantomData,
}
}
}
impl<M, Request> fmt::Debug for IntoService<M, Request>
where
M: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("IntoService")
.field("make", &self.make)
.finish()
}
}
impl<M, S, Target, Request> Service<Target> for IntoService<M, Request>
where
M: Service<Target, Response = S>,
S: Service<Request>,
{
type Response = M::Response;
type Error = M::Error;
type Future = M::Future;
#[inline]
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.make.poll_ready(cx)
}
#[inline]
fn call(&mut self, target: Target) -> Self::Future {
self.make.make_service(target)
}
}
/// Service returned by [`MakeService::as_service`][as].
///
/// See the documentation on [`as_service`][as] for details.
///
/// [as]: MakeService::as_service
pub struct AsService<'a, M, Request> {
make: &'a mut M,
_marker: PhantomData<Request>,
}
impl<M, Request> fmt::Debug for AsService<'_, M, Request>
where
M: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AsService")
.field("make", &self.make)
.finish()
}
}
impl<M, S, Target, Request> Service<Target> for AsService<'_, M, Request>
where
M: Service<Target, Response = S>,
S: Service<Request>,
{
type Response = M::Response;
type Error = M::Error;
type Future = M::Future;
#[inline]
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.make.poll_ready(cx)
}
#[inline]
fn call(&mut self, target: Target) -> Self::Future {
self.make.make_service(target)
}
}

View File

@@ -0,0 +1,146 @@
use std::convert::Infallible;
use std::task::{Context, Poll};
use tower_service::Service;
/// A [`MakeService`] that produces services by cloning an inner service.
///
/// [`MakeService`]: super::MakeService
///
/// # Example
///
/// ```
/// # use std::task::{Context, Poll};
/// # use std::pin::Pin;
/// # use std::convert::Infallible;
/// use std::future::{Ready, ready};
/// use tower::make::{MakeService, Shared};
/// use tower::buffer::Buffer;
/// use tower::Service;
///
/// // An example connection type
/// struct Connection {}
///
/// // An example request type
/// struct Request {}
///
/// // An example response type
/// struct Response {}
///
/// // Some service that doesn't implement `Clone`
/// struct MyService;
///
/// impl Service<Request> for MyService {
/// type Response = Response;
/// type Error = Infallible;
/// type Future = Ready<Result<Response, Infallible>>;
///
/// fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
/// Poll::Ready(Ok(()))
/// }
///
/// fn call(&mut self, req: Request) -> Self::Future {
/// ready(Ok(Response {}))
/// }
/// }
///
/// // Example function that runs a service by accepting new connections and using
/// // `Make` to create new services that might be bound to the connection.
/// //
/// // This is similar to what you might find in hyper.
/// async fn serve_make_service<Make>(make: Make)
/// where
/// Make: MakeService<Connection, Request>
/// {
/// // ...
/// }
///
/// # async {
/// // Our service
/// let svc = MyService;
///
/// // Make it `Clone` by putting a channel in front
/// let buffered = Buffer::new(svc, 1024);
///
/// // Convert it into a `MakeService`
/// let make = Shared::new(buffered);
///
/// // Run the service and just ignore the `Connection`s as `MyService` doesn't need them
/// serve_make_service(make).await;
/// # };
/// ```
#[derive(Debug, Clone, Copy)]
pub struct Shared<S> {
service: S,
}
impl<S> Shared<S> {
/// Create a new [`Shared`] from a service.
pub const fn new(service: S) -> Self {
Self { service }
}
}
impl<S, T> Service<T> for Shared<S>
where
S: Clone,
{
type Response = S;
type Error = Infallible;
type Future = SharedFuture<S>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, _target: T) -> Self::Future {
SharedFuture::new(std::future::ready(Ok(self.service.clone())))
}
}
opaque_future! {
/// Response future from [`Shared`] services.
pub type SharedFuture<S> = std::future::Ready<Result<S, Infallible>>;
}
#[cfg(test)]
mod tests {
use super::*;
use crate::make::MakeService;
use crate::service_fn;
use std::future::poll_fn;
async fn echo<R>(req: R) -> Result<R, Infallible> {
Ok(req)
}
#[tokio::test]
async fn as_make_service() {
let mut shared = Shared::new(service_fn(echo::<&'static str>));
poll_fn(|cx| MakeService::<(), _>::poll_ready(&mut shared, cx))
.await
.unwrap();
let mut svc = shared.make_service(()).await.unwrap();
poll_fn(|cx| svc.poll_ready(cx)).await.unwrap();
let res = svc.call("foo").await.unwrap();
assert_eq!(res, "foo");
}
#[tokio::test]
async fn as_make_service_into_service() {
let shared = Shared::new(service_fn(echo::<&'static str>));
let mut shared = MakeService::<(), _>::into_service(shared);
poll_fn(|cx| Service::<()>::poll_ready(&mut shared, cx))
.await
.unwrap();
let mut svc = shared.call(()).await.unwrap();
poll_fn(|cx| svc.poll_ready(cx)).await.unwrap();
let res = svc.call("foo").await.unwrap();
assert_eq!(res, "foo");
}
}

14
vendor/tower/src/make/mod.rs vendored Normal file
View File

@@ -0,0 +1,14 @@
//! Trait aliases for Services that produce specific types of Responses.
mod make_connection;
mod make_service;
pub use self::make_connection::MakeConnection;
pub use self::make_service::shared::Shared;
pub use self::make_service::{AsService, IntoService, MakeService};
pub mod future {
//! Future types
pub use super::make_service::shared::SharedFuture;
}

503
vendor/tower/src/ready_cache/cache.rs vendored Normal file
View File

@@ -0,0 +1,503 @@
//! A cache of services.
use super::error;
use futures_core::Stream;
use futures_util::{stream::FuturesUnordered, task::AtomicWaker};
pub use indexmap::Equivalent;
use indexmap::IndexMap;
use std::fmt;
use std::future::Future;
use std::hash::Hash;
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
use tower_service::Service;
use tracing::{debug, trace};
/// Drives readiness over a set of services.
///
/// The cache maintains two internal data structures:
///
/// * a set of _pending_ services that have not yet become ready; and
/// * a set of _ready_ services that have previously polled ready.
///
/// As each `S` typed [`Service`] is added to the cache via [`ReadyCache::push`], it
/// is added to the _pending set_. As [`ReadyCache::poll_pending`] is invoked,
/// pending services are polled and added to the _ready set_.
///
/// [`ReadyCache::call_ready`] (or [`ReadyCache::call_ready_index`]) dispatches a
/// request to the specified service, but panics if the specified service is not
/// in the ready set. The `ReadyCache::check_*` functions can be used to ensure
/// that a service is ready before dispatching a request.
///
/// The ready set can hold services for an arbitrarily long time. During this
/// time, the runtime may process events that invalidate that ready state (for
/// instance, if a keepalive detects a lost connection). In such cases, callers
/// should use [`ReadyCache::check_ready`] (or [`ReadyCache::check_ready_index`])
/// immediately before dispatching a request to ensure that the service has not
/// become unavailable.
///
/// Once `ReadyCache::call_ready*` is invoked, the service is placed back into
/// the _pending_ set to be driven to readiness again.
///
/// When `ReadyCache::check_ready*` returns `false`, it indicates that the
/// specified service is _not_ ready. If an error is returned, this indicates that
/// the server failed and has been removed from the cache entirely.
///
/// [`ReadyCache::evict`] can be used to remove a service from the cache (by key),
/// though the service may not be dropped (if it is currently pending) until
/// [`ReadyCache::poll_pending`] is invoked.
///
/// Note that the by-index accessors are provided to support use cases (like
/// power-of-two-choices load balancing) where the caller does not care to keep
/// track of each service's key. Instead, it needs only to access _some_ ready
/// service. In such a case, it should be noted that calls to
/// [`ReadyCache::poll_pending`] and [`ReadyCache::evict`] may perturb the order of
/// the ready set, so any cached indexes should be discarded after such a call.
pub struct ReadyCache<K, S, Req>
where
K: Eq + Hash,
{
/// A stream of services that are not yet ready.
pending: FuturesUnordered<Pending<K, S, Req>>,
/// An index of cancelation handles for pending streams.
pending_cancel_txs: IndexMap<K, CancelTx>,
/// Services that have previously become ready. Readiness can become stale,
/// so a given service should be polled immediately before use.
///
/// The cancelation oneshot is preserved (though unused) while the service is
/// ready so that it need not be reallocated each time a request is
/// dispatched.
ready: IndexMap<K, (S, CancelPair)>,
}
// Safety: This is safe because we do not use `Pin::new_unchecked`.
impl<S, K: Eq + Hash, Req> Unpin for ReadyCache<K, S, Req> {}
#[derive(Debug)]
struct Cancel {
waker: AtomicWaker,
canceled: AtomicBool,
}
#[derive(Debug)]
struct CancelRx(Arc<Cancel>);
#[derive(Debug)]
struct CancelTx(Arc<Cancel>);
type CancelPair = (CancelTx, CancelRx);
#[derive(Debug)]
enum PendingError<K, E> {
Canceled(K),
Inner(K, E),
}
pin_project_lite::pin_project! {
/// A [`Future`] that becomes satisfied when an `S`-typed service is ready.
///
/// May fail due to cancelation, i.e. if the service is evicted from the balancer.
struct Pending<K, S, Req> {
key: Option<K>,
cancel: Option<CancelRx>,
ready: Option<S>,
_pd: std::marker::PhantomData<Req>,
}
}
// === ReadyCache ===
impl<K, S, Req> Default for ReadyCache<K, S, Req>
where
K: Eq + Hash,
S: Service<Req>,
{
fn default() -> Self {
Self {
ready: IndexMap::default(),
pending: FuturesUnordered::new(),
pending_cancel_txs: IndexMap::default(),
}
}
}
impl<K, S, Req> fmt::Debug for ReadyCache<K, S, Req>
where
K: fmt::Debug + Eq + Hash,
S: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let Self {
pending,
pending_cancel_txs,
ready,
} = self;
f.debug_struct("ReadyCache")
.field("pending", pending)
.field("pending_cancel_txs", pending_cancel_txs)
.field("ready", ready)
.finish()
}
}
impl<K, S, Req> ReadyCache<K, S, Req>
where
K: Eq + Hash,
{
/// Returns the total number of services in the cache.
pub fn len(&self) -> usize {
self.ready_len() + self.pending_len()
}
/// Returns whether or not there are any services in the cache.
pub fn is_empty(&self) -> bool {
self.ready.is_empty() && self.pending.is_empty()
}
/// Returns the number of services in the ready set.
pub fn ready_len(&self) -> usize {
self.ready.len()
}
/// Returns the number of services in the unready set.
pub fn pending_len(&self) -> usize {
self.pending.len()
}
/// Returns true iff the given key is in the unready set.
pub fn pending_contains<Q: Hash + Equivalent<K>>(&self, key: &Q) -> bool {
self.pending_cancel_txs.contains_key(key)
}
/// Obtains a reference to a service in the ready set by key.
pub fn get_ready<Q: Hash + Equivalent<K>>(&self, key: &Q) -> Option<(usize, &K, &S)> {
self.ready.get_full(key).map(|(i, k, v)| (i, k, &v.0))
}
/// Obtains a mutable reference to a service in the ready set by key.
pub fn get_ready_mut<Q: Hash + Equivalent<K>>(
&mut self,
key: &Q,
) -> Option<(usize, &K, &mut S)> {
self.ready
.get_full_mut(key)
.map(|(i, k, v)| (i, k, &mut v.0))
}
/// Obtains a reference to a service in the ready set by index.
pub fn get_ready_index(&self, idx: usize) -> Option<(&K, &S)> {
self.ready.get_index(idx).map(|(k, v)| (k, &v.0))
}
/// Obtains a mutable reference to a service in the ready set by index.
pub fn get_ready_index_mut(&mut self, idx: usize) -> Option<(&K, &mut S)> {
self.ready.get_index_mut(idx).map(|(k, v)| (k, &mut v.0))
}
/// Returns an iterator over the ready keys and services.
pub fn iter_ready(&self) -> impl Iterator<Item = (&K, &S)> {
self.ready.iter().map(|(k, s)| (k, &s.0))
}
/// Returns a mutable iterator over the ready keys and services.
pub fn iter_ready_mut(&mut self) -> impl Iterator<Item = (&K, &mut S)> {
self.ready.iter_mut().map(|(k, s)| (k, &mut s.0))
}
/// Evicts an item from the cache.
///
/// Returns true if a service was marked for eviction.
///
/// Services are dropped from the ready set immediately. Services in the
/// pending set are marked for cancellation, but [`ReadyCache::poll_pending`]
/// must be called to cause the service to be dropped.
pub fn evict<Q: Hash + Equivalent<K>>(&mut self, key: &Q) -> bool {
let canceled = if let Some(c) = self.pending_cancel_txs.swap_remove(key) {
c.cancel();
true
} else {
false
};
self.ready
.swap_remove_full(key)
.map(|_| true)
.unwrap_or(canceled)
}
}
impl<K, S, Req> ReadyCache<K, S, Req>
where
K: Clone + Eq + Hash,
S: Service<Req>,
<S as Service<Req>>::Error: Into<crate::BoxError>,
S::Error: Into<crate::BoxError>,
{
/// Pushes a new service onto the pending set.
///
/// The service will be promoted to the ready set as [`poll_pending`] is invoked.
///
/// Note that this does **not** remove services from the ready set. Once the
/// old service is used, it will be dropped instead of being added back to
/// the pending set; OR, when the new service becomes ready, it will replace
/// the prior service in the ready set.
///
/// [`poll_pending`]: crate::ready_cache::cache::ReadyCache::poll_pending
pub fn push(&mut self, key: K, svc: S) {
let cancel = cancelable();
self.push_pending(key, svc, cancel);
}
fn push_pending(&mut self, key: K, svc: S, (cancel_tx, cancel_rx): CancelPair) {
if let Some(c) = self.pending_cancel_txs.insert(key.clone(), cancel_tx) {
// If there is already a service for this key, cancel it.
c.cancel();
}
self.pending.push(Pending {
key: Some(key),
cancel: Some(cancel_rx),
ready: Some(svc),
_pd: std::marker::PhantomData,
});
}
/// Polls services pending readiness, adding ready services to the ready set.
///
/// Returns [`Poll::Ready`] when there are no remaining unready services.
/// [`poll_pending`] should be called again after [`push`] or
/// [`call_ready_index`] are invoked.
///
/// Failures indicate that an individual pending service failed to become
/// ready (and has been removed from the cache). In such a case,
/// [`poll_pending`] should typically be called again to continue driving
/// pending services to readiness.
///
/// [`poll_pending`]: crate::ready_cache::cache::ReadyCache::poll_pending
/// [`push`]: crate::ready_cache::cache::ReadyCache::push
/// [`call_ready_index`]: crate::ready_cache::cache::ReadyCache::call_ready_index
pub fn poll_pending(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), error::Failed<K>>> {
loop {
match Pin::new(&mut self.pending).poll_next(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(Ok(())),
Poll::Ready(Some(Ok((key, svc, cancel_rx)))) => {
trace!("endpoint ready");
let cancel_tx = self.pending_cancel_txs.swap_remove(&key);
if let Some(cancel_tx) = cancel_tx {
// Keep track of the cancelation so that it need not be
// recreated after the service is used.
self.ready.insert(key, (svc, (cancel_tx, cancel_rx)));
} else {
assert!(
cancel_tx.is_some(),
"services that become ready must have a pending cancelation"
);
}
}
Poll::Ready(Some(Err(PendingError::Canceled(_)))) => {
debug!("endpoint canceled");
// The cancellation for this service was removed in order to
// cause this cancellation.
}
Poll::Ready(Some(Err(PendingError::Inner(key, e)))) => {
let cancel_tx = self.pending_cancel_txs.swap_remove(&key);
assert!(
cancel_tx.is_some(),
"services that return an error must have a pending cancelation"
);
return Err(error::Failed(key, e.into())).into();
}
}
}
}
/// Checks whether the referenced endpoint is ready.
///
/// Returns true if the endpoint is ready and false if it is not. An error is
/// returned if the endpoint fails.
pub fn check_ready<Q: Hash + Equivalent<K>>(
&mut self,
cx: &mut Context<'_>,
key: &Q,
) -> Result<bool, error::Failed<K>> {
match self.ready.get_full_mut(key) {
Some((index, _, _)) => self.check_ready_index(cx, index),
None => Ok(false),
}
}
/// Checks whether the referenced endpoint is ready.
///
/// If the service is no longer ready, it is moved back into the pending set
/// and `false` is returned.
///
/// If the service errors, it is removed and dropped and the error is returned.
pub fn check_ready_index(
&mut self,
cx: &mut Context<'_>,
index: usize,
) -> Result<bool, error::Failed<K>> {
let svc = match self.ready.get_index_mut(index) {
None => return Ok(false),
Some((_, (svc, _))) => svc,
};
match svc.poll_ready(cx) {
Poll::Ready(Ok(())) => Ok(true),
Poll::Pending => {
// became unready; so move it back there.
let (key, (svc, cancel)) = self
.ready
.swap_remove_index(index)
.expect("invalid ready index");
// If a new version of this service has been added to the
// unready set, don't overwrite it.
if !self.pending_contains(&key) {
self.push_pending(key, svc, cancel);
}
Ok(false)
}
Poll::Ready(Err(e)) => {
// failed, so drop it.
let (key, _) = self
.ready
.swap_remove_index(index)
.expect("invalid ready index");
Err(error::Failed(key, e.into()))
}
}
}
/// Calls a ready service by key.
///
/// # Panics
///
/// If the specified key does not exist in the ready
pub fn call_ready<Q: Hash + Equivalent<K>>(&mut self, key: &Q, req: Req) -> S::Future {
let (index, _, _) = self
.ready
.get_full_mut(key)
.expect("check_ready was not called");
self.call_ready_index(index, req)
}
/// Calls a ready service by index.
///
/// # Panics
///
/// If the specified index is out of range.
pub fn call_ready_index(&mut self, index: usize, req: Req) -> S::Future {
let (key, (mut svc, cancel)) = self
.ready
.swap_remove_index(index)
.expect("check_ready_index was not called");
let fut = svc.call(req);
// If a new version of this service has been added to the
// unready set, don't overwrite it.
if !self.pending_contains(&key) {
self.push_pending(key, svc, cancel);
}
fut
}
}
// === impl Cancel ===
/// Creates a cancelation sender and receiver.
///
/// A `tokio::sync::oneshot` is NOT used, as a `Receiver` is not guaranteed to
/// observe results as soon as a `Sender` fires. Using an `AtomicBool` allows
/// the state to be observed as soon as the cancelation is triggered.
fn cancelable() -> CancelPair {
let cx = Arc::new(Cancel {
waker: AtomicWaker::new(),
canceled: AtomicBool::new(false),
});
(CancelTx(cx.clone()), CancelRx(cx))
}
impl CancelTx {
fn cancel(self) {
self.0.canceled.store(true, Ordering::SeqCst);
self.0.waker.wake();
}
}
// === Pending ===
impl<K, S, Req> Future for Pending<K, S, Req>
where
S: Service<Req>,
{
type Output = Result<(K, S, CancelRx), PendingError<K, S::Error>>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
// Before checking whether the service is ready, check to see whether
// readiness has been canceled.
let CancelRx(cancel) = this.cancel.as_mut().expect("polled after complete");
if cancel.canceled.load(Ordering::SeqCst) {
let key = this.key.take().expect("polled after complete");
return Err(PendingError::Canceled(key)).into();
}
match this
.ready
.as_mut()
.expect("polled after ready")
.poll_ready(cx)
{
Poll::Pending => {
// Before returning Pending, register interest in cancelation so
// that this future is polled again if the state changes.
let CancelRx(cancel) = this.cancel.as_mut().expect("polled after complete");
cancel.waker.register(cx.waker());
// Because both the cancel receiver and cancel sender are held
// by the `ReadyCache` (i.e., on a single task), then it must
// not be possible for the cancelation state to change while
// polling a `Pending` service.
assert!(
!cancel.canceled.load(Ordering::SeqCst),
"cancelation cannot be notified while polling a pending service"
);
Poll::Pending
}
Poll::Ready(Ok(())) => {
let key = this.key.take().expect("polled after complete");
let cancel = this.cancel.take().expect("polled after complete");
Ok((key, this.ready.take().expect("polled after ready"), cancel)).into()
}
Poll::Ready(Err(e)) => {
let key = this.key.take().expect("polled after compete");
Err(PendingError::Inner(key, e)).into()
}
}
}
}
impl<K, S, Req> fmt::Debug for Pending<K, S, Req>
where
K: fmt::Debug,
S: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let Self {
key,
cancel,
ready,
_pd,
} = self;
f.debug_struct("Pending")
.field("key", key)
.field("cancel", cancel)
.field("ready", ready)
.finish()
}
}

28
vendor/tower/src/ready_cache/error.rs vendored Normal file
View File

@@ -0,0 +1,28 @@
//! Errors
/// An error indicating that the service with a `K`-typed key failed with an
/// error.
pub struct Failed<K>(pub K, pub crate::BoxError);
// === Failed ===
impl<K: std::fmt::Debug> std::fmt::Debug for Failed<K> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_tuple("Failed")
.field(&self.0)
.field(&self.1)
.finish()
}
}
impl<K> std::fmt::Display for Failed<K> {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
self.1.fmt(f)
}
}
impl<K: std::fmt::Debug> std::error::Error for Failed<K> {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
Some(&*self.1)
}
}

6
vendor/tower/src/ready_cache/mod.rs vendored Normal file
View File

@@ -0,0 +1,6 @@
//! A cache of services
pub mod cache;
pub mod error;
pub use self::cache::ReadyCache;

73
vendor/tower/src/reconnect/future.rs vendored Normal file
View File

@@ -0,0 +1,73 @@
use pin_project_lite::pin_project;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
pin_project! {
/// Future that resolves to the response or failure to connect.
#[derive(Debug)]
pub struct ResponseFuture<F, E> {
#[pin]
inner: Inner<F, E>,
}
}
pin_project! {
#[project = InnerProj]
#[derive(Debug)]
enum Inner<F, E> {
Future {
#[pin]
fut: F,
},
Error {
error: Option<E>,
},
}
}
impl<F, E> Inner<F, E> {
fn future(fut: F) -> Self {
Self::Future { fut }
}
fn error(error: Option<E>) -> Self {
Self::Error { error }
}
}
impl<F, E> ResponseFuture<F, E> {
pub(crate) fn new(inner: F) -> Self {
ResponseFuture {
inner: Inner::future(inner),
}
}
pub(crate) fn error(error: E) -> Self {
ResponseFuture {
inner: Inner::error(Some(error)),
}
}
}
impl<F, T, E, ME> Future for ResponseFuture<F, ME>
where
F: Future<Output = Result<T, E>>,
E: Into<crate::BoxError>,
ME: Into<crate::BoxError>,
{
type Output = Result<T, crate::BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let me = self.project();
match me.inner.project() {
InnerProj::Future { fut } => fut.poll(cx).map_err(Into::into),
InnerProj::Error { error } => {
let e = error.take().expect("Polled after ready.").into();
Poll::Ready(Err(e))
}
}
}
}

171
vendor/tower/src/reconnect/mod.rs vendored Normal file
View File

@@ -0,0 +1,171 @@
//! Reconnect services when they fail.
//!
//! Reconnect takes some [`MakeService`] and transforms it into a
//! [`Service`]. It then attempts to lazily connect and
//! reconnect on failure. The `Reconnect` service becomes unavailable
//! when the inner `MakeService::poll_ready` returns an error. When the
//! connection future returned from `MakeService::call` fails this will be
//! returned in the next call to `Reconnect::call`. This allows the user to
//! call the service again even if the inner `MakeService` was unable to
//! connect on the last call.
//!
//! [`MakeService`]: crate::make::MakeService
//! [`Service`]: crate::Service
mod future;
pub use future::ResponseFuture;
use crate::make::MakeService;
use std::fmt;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use tower_service::Service;
use tracing::trace;
/// Reconnect to failed services.
pub struct Reconnect<M, Target>
where
M: Service<Target>,
{
mk_service: M,
state: State<M::Future, M::Response>,
target: Target,
error: Option<M::Error>,
}
#[derive(Debug)]
enum State<F, S> {
Idle,
Connecting(F),
Connected(S),
}
impl<M, Target> Reconnect<M, Target>
where
M: Service<Target>,
{
/// Lazily connect and reconnect to a [`Service`].
pub const fn new(mk_service: M, target: Target) -> Self {
Reconnect {
mk_service,
state: State::Idle,
target,
error: None,
}
}
/// Reconnect to a already connected [`Service`].
pub const fn with_connection(init_conn: M::Response, mk_service: M, target: Target) -> Self {
Reconnect {
mk_service,
state: State::Connected(init_conn),
target,
error: None,
}
}
}
impl<M, Target, S, Request> Service<Request> for Reconnect<M, Target>
where
M: Service<Target, Response = S>,
S: Service<Request>,
M::Future: Unpin,
crate::BoxError: From<M::Error> + From<S::Error>,
Target: Clone,
{
type Response = S::Response;
type Error = crate::BoxError;
type Future = ResponseFuture<S::Future, M::Error>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
loop {
match &mut self.state {
State::Idle => {
trace!("poll_ready; idle");
match self.mk_service.poll_ready(cx) {
Poll::Ready(r) => r?,
Poll::Pending => {
trace!("poll_ready; MakeService not ready");
return Poll::Pending;
}
}
let fut = self.mk_service.make_service(self.target.clone());
self.state = State::Connecting(fut);
continue;
}
State::Connecting(ref mut f) => {
trace!("poll_ready; connecting");
match Pin::new(f).poll(cx) {
Poll::Ready(Ok(service)) => {
self.state = State::Connected(service);
}
Poll::Pending => {
trace!("poll_ready; not ready");
return Poll::Pending;
}
Poll::Ready(Err(e)) => {
trace!("poll_ready; error");
self.state = State::Idle;
self.error = Some(e);
break;
}
}
}
State::Connected(ref mut inner) => {
trace!("poll_ready; connected");
match inner.poll_ready(cx) {
Poll::Ready(Ok(())) => {
trace!("poll_ready; ready");
return Poll::Ready(Ok(()));
}
Poll::Pending => {
trace!("poll_ready; not ready");
return Poll::Pending;
}
Poll::Ready(Err(_)) => {
trace!("poll_ready; error");
self.state = State::Idle;
}
}
}
}
}
Poll::Ready(Ok(()))
}
fn call(&mut self, request: Request) -> Self::Future {
if let Some(error) = self.error.take() {
return ResponseFuture::error(error);
}
let service = match self.state {
State::Connected(ref mut service) => service,
_ => panic!("service not ready; poll_ready must be called first"),
};
let fut = service.call(request);
ResponseFuture::new(fut)
}
}
impl<M, Target> fmt::Debug for Reconnect<M, Target>
where
M: Service<Target> + fmt::Debug,
M::Future: fmt::Debug,
M::Response: fmt::Debug,
Target: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Reconnect")
.field("mk_service", &self.mk_service)
.field("state", &self.state)
.field("target", &self.target)
.finish()
}
}

279
vendor/tower/src/retry/backoff.rs vendored Normal file
View File

@@ -0,0 +1,279 @@
//! This module contains generic [backoff] utilities to be used with the retry
//! layer.
//!
//! The [`Backoff`] trait is a generic way to represent backoffs that can use
//! any timer type.
//!
//! [`ExponentialBackoffMaker`] implements the maker type for
//! [`ExponentialBackoff`] which implements the [`Backoff`] trait and provides
//! a batteries included exponential backoff and jitter strategy.
//!
//! [backoff]: https://en.wikipedia.org/wiki/Exponential_backoff
use std::fmt::Display;
use std::future::Future;
use std::time::Duration;
use tokio::time;
use crate::util::rng::{HasherRng, Rng};
/// Trait used to construct [`Backoff`] trait implementors.
pub trait MakeBackoff {
/// The backoff type produced by this maker.
type Backoff: Backoff;
/// Constructs a new backoff type.
fn make_backoff(&mut self) -> Self::Backoff;
}
/// A backoff trait where a single mutable reference represents a single
/// backoff session. Implementors must also implement [`Clone`] which will
/// reset the backoff back to the default state for the next session.
pub trait Backoff {
/// The future associated with each backoff. This usually will be some sort
/// of timer.
type Future: Future<Output = ()>;
/// Initiate the next backoff in the sequence.
fn next_backoff(&mut self) -> Self::Future;
}
/// A maker type for [`ExponentialBackoff`].
#[derive(Debug, Clone)]
pub struct ExponentialBackoffMaker<R = HasherRng> {
/// The minimum amount of time to wait before resuming an operation.
min: time::Duration,
/// The maximum amount of time to wait before resuming an operation.
max: time::Duration,
/// The ratio of the base timeout that may be randomly added to a backoff.
///
/// Must be greater than or equal to 0.0.
jitter: f64,
rng: R,
}
/// A jittered [exponential backoff] strategy.
///
/// The backoff duration will increase exponentially for every subsequent
/// backoff, up to a maximum duration. A small amount of [random jitter] is
/// added to each backoff duration, in order to avoid retry spikes.
///
/// [exponential backoff]: https://en.wikipedia.org/wiki/Exponential_backoff
/// [random jitter]: https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
#[derive(Debug, Clone)]
pub struct ExponentialBackoff<R = HasherRng> {
min: time::Duration,
max: time::Duration,
jitter: f64,
rng: R,
iterations: u32,
}
impl<R> ExponentialBackoffMaker<R>
where
R: Rng,
{
/// Create a new `ExponentialBackoff`.
///
/// # Error
///
/// Returns a config validation error if:
/// - `min` > `max`
/// - `max` == 0
/// - `jitter` < `0.0`
/// - `jitter` > `100.0`
/// - `jitter` is NaN
pub fn new(
min: time::Duration,
max: time::Duration,
jitter: f64,
rng: R,
) -> Result<Self, InvalidBackoff> {
if min > max {
return Err(InvalidBackoff("maximum must not be less than minimum"));
}
if max == time::Duration::from_millis(0) {
return Err(InvalidBackoff("maximum must be non-zero"));
}
if jitter < 0.0 {
return Err(InvalidBackoff("jitter must not be negative"));
}
if jitter > 100.0 {
return Err(InvalidBackoff("jitter must not be greater than 100"));
}
if jitter.is_nan() {
return Err(InvalidBackoff("jitter must not be NaN"));
}
Ok(ExponentialBackoffMaker {
min,
max,
jitter,
rng,
})
}
}
impl<R> MakeBackoff for ExponentialBackoffMaker<R>
where
R: Rng + Clone,
{
type Backoff = ExponentialBackoff<R>;
fn make_backoff(&mut self) -> Self::Backoff {
ExponentialBackoff {
max: self.max,
min: self.min,
jitter: self.jitter,
rng: self.rng.clone(),
iterations: 0,
}
}
}
impl<R: Rng> ExponentialBackoff<R> {
fn base(&self) -> time::Duration {
debug_assert!(
self.min <= self.max,
"maximum backoff must not be less than minimum backoff"
);
debug_assert!(
self.max > time::Duration::from_millis(0),
"Maximum backoff must be non-zero"
);
self.min
.checked_mul(2_u32.saturating_pow(self.iterations))
.unwrap_or(self.max)
.min(self.max)
}
/// Returns a random, uniform duration on `[0, base*self.jitter]` no greater
/// than `self.max`.
fn jitter(&mut self, base: time::Duration) -> time::Duration {
if self.jitter == 0.0 {
time::Duration::default()
} else {
let jitter_factor = self.rng.next_f64();
debug_assert!(
jitter_factor > 0.0,
"rng returns values between 0.0 and 1.0"
);
let rand_jitter = jitter_factor * self.jitter;
let secs = (base.as_secs() as f64) * rand_jitter;
let nanos = (base.subsec_nanos() as f64) * rand_jitter;
let remaining = self.max - base;
time::Duration::new(secs as u64, nanos as u32).min(remaining)
}
}
}
impl<R> Backoff for ExponentialBackoff<R>
where
R: Rng,
{
type Future = tokio::time::Sleep;
fn next_backoff(&mut self) -> Self::Future {
let base = self.base();
let next = base + self.jitter(base);
self.iterations += 1;
tokio::time::sleep(next)
}
}
impl Default for ExponentialBackoffMaker {
fn default() -> Self {
ExponentialBackoffMaker::new(
Duration::from_millis(50),
Duration::from_millis(u64::MAX),
0.99,
HasherRng::default(),
)
.expect("Unable to create ExponentialBackoff")
}
}
/// Backoff validation error.
#[derive(Debug)]
pub struct InvalidBackoff(&'static str);
impl Display for InvalidBackoff {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "invalid backoff: {}", self.0)
}
}
impl std::error::Error for InvalidBackoff {}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::*;
quickcheck! {
fn backoff_base_first(min_ms: u64, max_ms: u64) -> TestResult {
let min = time::Duration::from_millis(min_ms);
let max = time::Duration::from_millis(max_ms);
let rng = HasherRng::default();
let mut backoff = match ExponentialBackoffMaker::new(min, max, 0.0, rng) {
Err(_) => return TestResult::discard(),
Ok(backoff) => backoff,
};
let backoff = backoff.make_backoff();
let delay = backoff.base();
TestResult::from_bool(min == delay)
}
fn backoff_base(min_ms: u64, max_ms: u64, iterations: u32) -> TestResult {
let min = time::Duration::from_millis(min_ms);
let max = time::Duration::from_millis(max_ms);
let rng = HasherRng::default();
let mut backoff = match ExponentialBackoffMaker::new(min, max, 0.0, rng) {
Err(_) => return TestResult::discard(),
Ok(backoff) => backoff,
};
let mut backoff = backoff.make_backoff();
backoff.iterations = iterations;
let delay = backoff.base();
TestResult::from_bool(min <= delay && delay <= max)
}
fn backoff_jitter(base_ms: u64, max_ms: u64, jitter: f64) -> TestResult {
let base = time::Duration::from_millis(base_ms);
let max = time::Duration::from_millis(max_ms);
let rng = HasherRng::default();
let mut backoff = match ExponentialBackoffMaker::new(base, max, jitter, rng) {
Err(_) => return TestResult::discard(),
Ok(backoff) => backoff,
};
let mut backoff = backoff.make_backoff();
let j = backoff.jitter(base);
if jitter == 0.0 || base_ms == 0 || max_ms == base_ms {
TestResult::from_bool(j == time::Duration::default())
} else {
TestResult::from_bool(j > time::Duration::default())
}
}
}
#[test]
fn jitter_must_be_finite() {
let min = time::Duration::from_millis(0);
let max = time::Duration::from_millis(1);
let rng = HasherRng::default();
for n in [f64::INFINITY, f64::NEG_INFINITY, f64::NAN] {
let result = ExponentialBackoffMaker::new(min, max, n, rng.clone());
assert!(
matches!(result, Err(InvalidBackoff(_))),
"{} should be an invalid jitter",
n
);
}
}
}

90
vendor/tower/src/retry/budget/mod.rs vendored Normal file
View File

@@ -0,0 +1,90 @@
//! A retry "budget" for allowing only a certain amount of retries over time.
//!
//! # Why budgets and not max retries?
//!
//! The most common way of configuring retries is to specify a maximum
//! number of retry attempts to perform before giving up. This is a familiar idea to anyone
//! whos used a web browser: you try to load a webpage, and if it doesnt load, you try again.
//! If it still doesnt load, you try a third time. Finally you give up.
//!
//! Unfortunately, there are at least two problems with configuring retries this way:
//!
//! **Choosing the maximum number of retry attempts is a guessing game.**
//! You need to pick a number thats high enough to make a difference when things are somewhat failing,
//! but not so high that it generates extra load on the system when its really failing. In practice,
//! you usually pick a maximum retry attempts number out of a hat (e.g. 3) and hope for the best.
//!
//! **Systems configured this way are vulnerable to retry storms.**
//! A retry storm begins when one service starts to experience a larger than normal failure rate.
//! This causes its clients to retry those failed requests. The extra load from the retries causes the
//! service to slow down further and fail more requests, triggering more retries. If each client is
//! configured to retry up to 3 times, this can quadruple the number of requests being sent! To make
//! matters even worse, if any of the clients clients are configured with retries, the number of retries
//! compounds multiplicatively and can turn a small number of errors into a self-inflicted denial of service attack.
//!
//! It's generally dangerous to implement retries without some limiting factor. [`Budget`]s are that limit.
//!
//! # Examples
//!
//! ```rust
//! use std::{future, sync::Arc};
//!
//! use tower::retry::{budget::{Budget, TpsBudget}, Policy};
//!
//! type Req = String;
//! type Res = String;
//!
//! #[derive(Clone, Debug)]
//! struct RetryPolicy {
//! budget: Arc<TpsBudget>,
//! }
//!
//! impl<E> Policy<Req, Res, E> for RetryPolicy {
//! type Future = future::Ready<()>;
//!
//! fn retry(&mut self, req: &mut Req, result: &mut Result<Res, E>) -> Option<Self::Future> {
//! match result {
//! Ok(_) => {
//! // Treat all `Response`s as success,
//! // so deposit budget and don't retry...
//! self.budget.deposit();
//! None
//! }
//! Err(_) => {
//! // Treat all errors as failures...
//! // Withdraw the budget, don't retry if we overdrew.
//! let withdrew = self.budget.withdraw();
//! if !withdrew {
//! return None;
//! }
//!
//! // Try again!
//! Some(future::ready(()))
//! }
//! }
//! }
//!
//! fn clone_request(&mut self, req: &Req) -> Option<Req> {
//! Some(req.clone())
//! }
//! }
//! ```
pub mod tps_budget;
pub use tps_budget::TpsBudget;
/// For more info about [`Budget`], please see the [module-level documentation].
///
/// [module-level documentation]: self
pub trait Budget {
/// Store a "deposit" in the budget, which will be used to permit future
/// withdrawals.
fn deposit(&self);
/// Check whether there is enough "balance" in the budget to issue a new
/// retry.
///
/// If there is not enough, false is returned.
fn withdraw(&self) -> bool;
}

View File

@@ -0,0 +1,260 @@
//! Transactions Per Minute (Tps) Budget implementations
use std::{
fmt,
sync::{
atomic::{AtomicIsize, Ordering},
Mutex,
},
time::Duration,
};
use tokio::time::Instant;
use super::Budget;
/// A Transactions Per Minute config for managing retry tokens.
///
/// [`TpsBudget`] uses a token bucket to decide if the request should be retried.
///
/// [`TpsBudget`] works by checking how much retries have been made in a certain period of time.
/// Minimum allowed number of retries are effectively reset on an interval. Allowed number of
/// retries depends on failed request count in recent time frame.
///
/// For more info about [`Budget`], please see the [module-level documentation].
///
/// [module-level documentation]: super
pub struct TpsBudget {
generation: Mutex<Generation>,
/// Initial budget allowed for every second.
reserve: isize,
/// Slots of a the TTL divided evenly.
slots: Box<[AtomicIsize]>,
/// The amount of time represented by each slot.
window: Duration,
/// The changers for the current slot to be committed
/// after the slot expires.
writer: AtomicIsize,
/// Amount of tokens to deposit for each put().
deposit_amount: isize,
/// Amount of tokens to withdraw for each try_get().
withdraw_amount: isize,
}
#[derive(Debug)]
struct Generation {
/// Slot index of the last generation.
index: usize,
/// The timestamp since the last generation expired.
time: Instant,
}
// ===== impl TpsBudget =====
impl TpsBudget {
/// Create a [`TpsBudget`] that allows for a certain percent of the total
/// requests to be retried.
///
/// - The `ttl` is the duration of how long a single `deposit` should be
/// considered. Must be between 1 and 60 seconds.
/// - The `min_per_sec` is the minimum rate of retries allowed to accommodate
/// clients that have just started issuing requests, or clients that do
/// not issue many requests per window.
/// - The `retry_percent` is the percentage of calls to `deposit` that can
/// be retried. This is in addition to any retries allowed for via
/// `min_per_sec`. Must be between 0 and 1000.
///
/// As an example, if `0.1` is used, then for every 10 calls to `deposit`,
/// 1 retry will be allowed. If `2.0` is used, then every `deposit`
/// allows for 2 retries.
pub fn new(ttl: Duration, min_per_sec: u32, retry_percent: f32) -> Self {
// assertions taken from finagle
assert!(ttl >= Duration::from_secs(1));
assert!(ttl <= Duration::from_secs(60));
assert!(retry_percent >= 0.0);
assert!(retry_percent <= 1000.0);
assert!(min_per_sec < i32::MAX as u32);
let (deposit_amount, withdraw_amount) = if retry_percent == 0.0 {
// If there is no percent, then you gain nothing from deposits.
// Withdrawals can only be made against the reserve, over time.
(0, 1)
} else if retry_percent <= 1.0 {
(1, (1.0 / retry_percent) as isize)
} else {
// Support for when retry_percent is between 1.0 and 1000.0,
// meaning for every deposit D, D * retry_percent withdrawals
// can be made.
(1000, (1000.0 / retry_percent) as isize)
};
let reserve = (min_per_sec as isize)
.saturating_mul(ttl.as_secs() as isize) // ttl is between 1 and 60 seconds
.saturating_mul(withdraw_amount);
// AtomicIsize isn't clone, so the slots need to be built in a loop...
let windows = 10u32;
let mut slots = Vec::with_capacity(windows as usize);
for _ in 0..windows {
slots.push(AtomicIsize::new(0));
}
TpsBudget {
generation: Mutex::new(Generation {
index: 0,
time: Instant::now(),
}),
reserve,
slots: slots.into_boxed_slice(),
window: ttl / windows,
writer: AtomicIsize::new(0),
deposit_amount,
withdraw_amount,
}
}
fn expire(&self) {
let mut gen = self.generation.lock().expect("generation lock");
let now = Instant::now();
let diff = now.saturating_duration_since(gen.time);
if diff < self.window {
// not expired yet
return;
}
let to_commit = self.writer.swap(0, Ordering::SeqCst);
self.slots[gen.index].store(to_commit, Ordering::SeqCst);
let mut diff = diff;
let mut idx = (gen.index + 1) % self.slots.len();
while diff > self.window {
self.slots[idx].store(0, Ordering::SeqCst);
diff -= self.window;
idx = (idx + 1) % self.slots.len();
}
gen.index = idx;
gen.time = now;
}
fn sum(&self) -> isize {
let current = self.writer.load(Ordering::SeqCst);
let windowed_sum: isize = self
.slots
.iter()
.map(|slot| slot.load(Ordering::SeqCst))
// fold() is used instead of sum() to determine overflow behavior
.fold(0, isize::saturating_add);
current
.saturating_add(windowed_sum)
.saturating_add(self.reserve)
}
fn put(&self, amt: isize) {
self.expire();
self.writer.fetch_add(amt, Ordering::SeqCst);
}
fn try_get(&self, amt: isize) -> bool {
debug_assert!(amt >= 0);
self.expire();
let sum = self.sum();
if sum >= amt {
self.writer.fetch_add(-amt, Ordering::SeqCst);
true
} else {
false
}
}
}
impl Budget for TpsBudget {
fn deposit(&self) {
self.put(self.deposit_amount)
}
fn withdraw(&self) -> bool {
self.try_get(self.withdraw_amount)
}
}
impl Default for TpsBudget {
fn default() -> Self {
TpsBudget::new(Duration::from_secs(10), 10, 0.2)
}
}
impl fmt::Debug for TpsBudget {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Budget")
.field("deposit", &self.deposit_amount)
.field("withdraw", &self.withdraw_amount)
.field("balance", &self.sum())
.finish()
}
}
#[cfg(test)]
mod tests {
use crate::retry::budget::Budget;
use super::*;
use tokio::time;
#[test]
fn tps_empty() {
let bgt = TpsBudget::new(Duration::from_secs(1), 0, 1.0);
assert!(!bgt.withdraw());
}
#[tokio::test]
async fn tps_leaky() {
time::pause();
let bgt = TpsBudget::new(Duration::from_secs(1), 0, 1.0);
bgt.deposit();
time::advance(Duration::from_secs(3)).await;
assert!(!bgt.withdraw());
}
#[tokio::test]
async fn tps_slots() {
time::pause();
let bgt = TpsBudget::new(Duration::from_secs(1), 0, 0.5);
bgt.deposit();
bgt.deposit();
time::advance(Duration::from_millis(901)).await;
// 900ms later, the deposit should still be valid
assert!(bgt.withdraw());
// blank slate
time::advance(Duration::from_millis(2001)).await;
bgt.deposit();
time::advance(Duration::from_millis(301)).await;
bgt.deposit();
time::advance(Duration::from_millis(801)).await;
bgt.deposit();
// the first deposit is expired, but the 2nd should still be valid,
// combining with the 3rd
assert!(bgt.withdraw());
}
#[tokio::test]
async fn tps_reserve() {
let bgt = TpsBudget::new(Duration::from_secs(1), 5, 1.0);
assert!(bgt.withdraw());
assert!(bgt.withdraw());
assert!(bgt.withdraw());
assert!(bgt.withdraw());
assert!(bgt.withdraw());
assert!(!bgt.withdraw());
}
}

119
vendor/tower/src/retry/future.rs vendored Normal file
View File

@@ -0,0 +1,119 @@
//! Future types
use super::{Policy, Retry};
use pin_project_lite::pin_project;
use std::future::Future;
use std::pin::Pin;
use std::task::{ready, Context, Poll};
use tower_service::Service;
pin_project! {
/// The [`Future`] returned by a [`Retry`] service.
#[derive(Debug)]
pub struct ResponseFuture<P, S, Request>
where
P: Policy<Request, S::Response, S::Error>,
S: Service<Request>,
{
request: Option<Request>,
#[pin]
retry: Retry<P, S>,
#[pin]
state: State<S::Future, P::Future>,
}
}
pin_project! {
#[project = StateProj]
#[derive(Debug)]
enum State<F, P> {
// Polling the future from [`Service::call`]
Called {
#[pin]
future: F
},
// Polling the future from [`Policy::retry`]
Waiting {
#[pin]
waiting: P
},
// Polling [`Service::poll_ready`] after [`Waiting`] was OK.
Retrying,
}
}
impl<P, S, Request> ResponseFuture<P, S, Request>
where
P: Policy<Request, S::Response, S::Error>,
S: Service<Request>,
{
pub(crate) fn new(
request: Option<Request>,
retry: Retry<P, S>,
future: S::Future,
) -> ResponseFuture<P, S, Request> {
ResponseFuture {
request,
retry,
state: State::Called { future },
}
}
}
impl<P, S, Request> Future for ResponseFuture<P, S, Request>
where
P: Policy<Request, S::Response, S::Error>,
S: Service<Request>,
{
type Output = Result<S::Response, S::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
loop {
match this.state.as_mut().project() {
StateProj::Called { future } => {
let mut result = ready!(future.poll(cx));
if let Some(req) = &mut this.request {
match this.retry.policy.retry(req, &mut result) {
Some(waiting) => {
this.state.set(State::Waiting { waiting });
}
None => return Poll::Ready(result),
}
} else {
// request wasn't cloned, so no way to retry it
return Poll::Ready(result);
}
}
StateProj::Waiting { waiting } => {
ready!(waiting.poll(cx));
this.state.set(State::Retrying);
}
StateProj::Retrying => {
// NOTE: we assume here that
//
// this.retry.poll_ready()
//
// is equivalent to
//
// this.retry.service.poll_ready()
//
// we need to make that assumption to avoid adding an Unpin bound to the Policy
// in Ready to make it Unpin so that we can get &mut Ready as needed to call
// poll_ready on it.
ready!(this.retry.as_mut().project().service.poll_ready(cx))?;
let req = this
.request
.take()
.expect("retrying requires cloned request");
*this.request = this.retry.policy.clone_request(&req);
this.state.set(State::Called {
future: this.retry.as_mut().project().service.call(req),
});
}
}
}
}
}

27
vendor/tower/src/retry/layer.rs vendored Normal file
View File

@@ -0,0 +1,27 @@
use super::Retry;
use tower_layer::Layer;
/// Retry requests based on a policy
#[derive(Debug, Clone)]
pub struct RetryLayer<P> {
policy: P,
}
impl<P> RetryLayer<P> {
/// Creates a new [`RetryLayer`] from a retry policy.
pub const fn new(policy: P) -> Self {
RetryLayer { policy }
}
}
impl<P, S> Layer<S> for RetryLayer<P>
where
P: Clone,
{
type Service = Retry<P, S>;
fn layer(&self, service: S) -> Self::Service {
let policy = self.policy.clone();
Retry::new(policy, service)
}
}

94
vendor/tower/src/retry/mod.rs vendored Normal file
View File

@@ -0,0 +1,94 @@
//! Middleware for retrying "failed" requests.
pub mod backoff;
pub mod budget;
pub mod future;
mod layer;
mod policy;
pub use self::layer::RetryLayer;
pub use self::policy::Policy;
use self::future::ResponseFuture;
use pin_project_lite::pin_project;
use std::task::{Context, Poll};
use tower_service::Service;
pin_project! {
/// Configure retrying requests of "failed" responses.
///
/// A [`Policy`] classifies what is a "failed" response.
///
/// # Clone
///
/// This middleware requires that the inner `Service` implements [`Clone`],
/// because the `Service` must be stored in each [`ResponseFuture`] in
/// order to retry the request in the event of a failure. If the inner
/// `Service` type does not implement `Clone`, the [`Buffer`] middleware
/// can be added to make any `Service` cloneable.
///
/// [`Buffer`]: crate::buffer::Buffer
///
/// The `Policy` must also implement `Clone`. This middleware will
/// clone the policy for each _request session_. This means a new clone
/// of the policy will be created for each initial request and any subsequent
/// retries of that request. Therefore, any state stored in the `Policy` instance
/// is for that request session only. In order to share data across request
/// sessions, that shared state may be stored in an [`Arc`], so that all clones
/// of the `Policy` type reference the same instance of the shared state.
///
/// [`Arc`]: std::sync::Arc
#[derive(Clone, Debug)]
pub struct Retry<P, S> {
policy: P,
service: S,
}
}
// ===== impl Retry =====
impl<P, S> Retry<P, S> {
/// Retry the inner service depending on this [`Policy`].
pub const fn new(policy: P, service: S) -> Self {
Retry { policy, service }
}
/// Get a reference to the inner service
pub fn get_ref(&self) -> &S {
&self.service
}
/// Get a mutable reference to the inner service
pub fn get_mut(&mut self) -> &mut S {
&mut self.service
}
/// Consume `self`, returning the inner service
pub fn into_inner(self) -> S {
self.service
}
}
impl<P, S, Request> Service<Request> for Retry<P, S>
where
P: Policy<Request, S::Response, S::Error> + Clone,
S: Service<Request> + Clone,
{
type Response = S::Response;
type Error = S::Error;
type Future = ResponseFuture<P, S, Request>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// NOTE: the Future::poll impl for ResponseFuture assumes that Retry::poll_ready is
// equivalent to Ready.service.poll_ready. If this ever changes, that code must be updated
// as well.
self.service.poll_ready(cx)
}
fn call(&mut self, request: Request) -> Self::Future {
let cloned = self.policy.clone_request(&request);
let future = self.service.call(request);
ResponseFuture::new(cloned, self.clone(), future)
}
}

94
vendor/tower/src/retry/policy.rs vendored Normal file
View File

@@ -0,0 +1,94 @@
use std::future::Future;
/// A "retry policy" to classify if a request should be retried.
///
/// # Example
///
/// ```
/// use tower::retry::Policy;
/// use std::future;
///
/// type Req = String;
/// type Res = String;
///
/// struct Attempts(usize);
///
/// impl<E> Policy<Req, Res, E> for Attempts {
/// type Future = future::Ready<()>;
///
/// fn retry(&mut self, req: &mut Req, result: &mut Result<Res, E>) -> Option<Self::Future> {
/// match result {
/// Ok(_) => {
/// // Treat all `Response`s as success,
/// // so don't retry...
/// None
/// },
/// Err(_) => {
/// // Treat all errors as failures...
/// // But we limit the number of attempts...
/// if self.0 > 0 {
/// // Try again!
/// self.0 -= 1;
/// Some(future::ready(()))
/// } else {
/// // Used all our attempts, no retry...
/// None
/// }
/// }
/// }
/// }
///
/// fn clone_request(&mut self, req: &Req) -> Option<Req> {
/// Some(req.clone())
/// }
/// }
/// ```
pub trait Policy<Req, Res, E> {
/// The [`Future`] type returned by [`Policy::retry`].
type Future: Future<Output = ()>;
/// Check the policy if a certain request should be retried.
///
/// This method is passed a reference to the original request, and either
/// the [`Service::Response`] or [`Service::Error`] from the inner service.
///
/// If the request should **not** be retried, return `None`.
///
/// If the request *should* be retried, return `Some` future that will delay
/// the next retry of the request. This can be used to sleep for a certain
/// duration, to wait for some external condition to be met before retrying,
/// or resolve right away, if the request should be retried immediately.
///
/// ## Mutating Requests
///
/// The policy MAY chose to mutate the `req`: if the request is mutated, the
/// mutated request will be sent to the inner service in the next retry.
/// This can be helpful for use cases like tracking the retry count in a
/// header.
///
/// ## Mutating Results
///
/// The policy MAY chose to mutate the result. This enables the retry
/// policy to convert a failure into a success and vice versa. For example,
/// if the policy is used to poll while waiting for a state change, the
/// policy can switch the result to emit a specific error when retries are
/// exhausted.
///
/// The policy can also record metadata on the request to include
/// information about the number of retries required or to record that a
/// failure failed after exhausting all retries.
///
/// [`Service::Response`]: crate::Service::Response
/// [`Service::Error`]: crate::Service::Error
fn retry(&mut self, req: &mut Req, result: &mut Result<Res, E>) -> Option<Self::Future>;
/// Tries to clone a request before being passed to the inner service.
///
/// If the request cannot be cloned, return [`None`]. Moreover, the retry
/// function will not be called if the [`None`] is returned.
fn clone_request(&mut self, req: &Req) -> Option<Req>;
}
// Ensure `Policy` is object safe
#[cfg(test)]
fn _obj_safe(_: Box<dyn Policy<(), (), (), Future = std::future::Ready<()>>>) {}

View File

@@ -0,0 +1,8 @@
//! Background readiness types
opaque_future! {
/// Response future from [`SpawnReady`] services.
///
/// [`SpawnReady`]: crate::spawn_ready::SpawnReady
pub type ResponseFuture<F, E> = futures_util::future::MapErr<F, fn(E) -> crate::BoxError>;
}

18
vendor/tower/src/spawn_ready/layer.rs vendored Normal file
View File

@@ -0,0 +1,18 @@
/// Spawns tasks to drive its inner service to readiness.
#[derive(Clone, Debug, Default)]
pub struct SpawnReadyLayer(());
impl SpawnReadyLayer {
/// Builds a [`SpawnReadyLayer`].
pub fn new() -> Self {
Self::default()
}
}
impl<S> tower_layer::Layer<S> for SpawnReadyLayer {
type Service = super::SpawnReady<S>;
fn layer(&self, service: S) -> Self::Service {
super::SpawnReady::new(service)
}
}

9
vendor/tower/src/spawn_ready/mod.rs vendored Normal file
View File

@@ -0,0 +1,9 @@
//! When an underlying service is not ready, drive it to readiness on a
//! background task.
pub mod future;
mod layer;
mod service;
pub use self::layer::SpawnReadyLayer;
pub use self::service::SpawnReady;

87
vendor/tower/src/spawn_ready/service.rs vendored Normal file
View File

@@ -0,0 +1,87 @@
use super::{future::ResponseFuture, SpawnReadyLayer};
use crate::{util::ServiceExt, BoxError};
use futures_util::future::TryFutureExt;
use std::{
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use tower_service::Service;
use tracing::Instrument;
/// Spawns tasks to drive an inner service to readiness.
///
/// See crate level documentation for more details.
#[derive(Debug)]
pub struct SpawnReady<S> {
inner: Inner<S>,
}
#[derive(Debug)]
enum Inner<S> {
Service(Option<S>),
Future(tokio::task::JoinHandle<Result<S, BoxError>>),
}
impl<S> SpawnReady<S> {
/// Creates a new [`SpawnReady`] wrapping `service`.
pub const fn new(service: S) -> Self {
Self {
inner: Inner::Service(Some(service)),
}
}
/// Creates a layer that wraps services with [`SpawnReady`].
pub fn layer() -> SpawnReadyLayer {
SpawnReadyLayer::default()
}
}
impl<S> Drop for SpawnReady<S> {
fn drop(&mut self) {
if let Inner::Future(ref mut task) = self.inner {
task.abort();
}
}
}
impl<S, Req> Service<Req> for SpawnReady<S>
where
Req: 'static,
S: Service<Req> + Send + 'static,
S::Error: Into<BoxError>,
{
type Response = S::Response;
type Error = BoxError;
type Future = ResponseFuture<S::Future, S::Error>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), BoxError>> {
loop {
self.inner = match self.inner {
Inner::Service(ref mut svc) => {
if let Poll::Ready(r) = svc.as_mut().expect("illegal state").poll_ready(cx) {
return Poll::Ready(r.map_err(Into::into));
}
let svc = svc.take().expect("illegal state");
let rx =
tokio::spawn(svc.ready_oneshot().map_err(Into::into).in_current_span());
Inner::Future(rx)
}
Inner::Future(ref mut fut) => {
let svc = ready!(Pin::new(fut).poll(cx))??;
Inner::Service(Some(svc))
}
}
}
}
fn call(&mut self, request: Req) -> Self::Future {
match self.inner {
Inner::Service(Some(ref mut svc)) => {
ResponseFuture::new(svc.call(request).map_err(Into::into))
}
_ => unreachable!("poll_ready must be called"),
}
}
}

203
vendor/tower/src/steer/mod.rs vendored Normal file
View File

@@ -0,0 +1,203 @@
//! This module provides functionality to aid managing routing requests between [`Service`]s.
//!
//! # Example
//!
//! [`Steer`] can for example be used to create a router, akin to what you might find in web
//! frameworks.
//!
//! Here, `GET /` will be sent to the `root` service, while all other requests go to `not_found`.
//!
//! ```rust
//! # use std::task::{Context, Poll, ready};
//! # use tower_service::Service;
//! # use tower::steer::Steer;
//! # use tower::service_fn;
//! # use tower::util::BoxService;
//! # use tower::ServiceExt;
//! # use std::convert::Infallible;
//! use http::{Request, Response, StatusCode, Method};
//!
//! # #[tokio::main]
//! # async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Service that responds to `GET /`
//! let root = service_fn(|req: Request<String>| async move {
//! # assert_eq!(req.uri().path(), "/");
//! let res = Response::new("Hello, World!".to_string());
//! Ok::<_, Infallible>(res)
//! });
//! // We have to box the service so its type gets erased and we can put it in a `Vec` with other
//! // services
//! let root = BoxService::new(root);
//!
//! // Service that responds with `404 Not Found` to all requests
//! let not_found = service_fn(|req: Request<String>| async move {
//! let res = Response::builder()
//! .status(StatusCode::NOT_FOUND)
//! .body(String::new())
//! .expect("response is valid");
//! Ok::<_, Infallible>(res)
//! });
//! // Box that as well
//! let not_found = BoxService::new(not_found);
//!
//! let mut svc = Steer::new(
//! // All services we route between
//! vec![root, not_found],
//! // How we pick which service to send the request to
//! |req: &Request<String>, _services: &[_]| {
//! if req.method() == Method::GET && req.uri().path() == "/" {
//! 0 // Index of `root`
//! } else {
//! 1 // Index of `not_found`
//! }
//! },
//! );
//!
//! // This request will get sent to `root`
//! let req = Request::get("/").body(String::new()).unwrap();
//! let res = svc.ready().await?.call(req).await?;
//! assert_eq!(res.into_body(), "Hello, World!");
//!
//! // This request will get sent to `not_found`
//! let req = Request::get("/does/not/exist").body(String::new()).unwrap();
//! let res = svc.ready().await?.call(req).await?;
//! assert_eq!(res.status(), StatusCode::NOT_FOUND);
//! assert_eq!(res.into_body(), "");
//! #
//! # Ok(())
//! # }
//! ```
use std::task::{Context, Poll};
use std::{collections::VecDeque, fmt, marker::PhantomData};
use tower_service::Service;
/// This is how callers of [`Steer`] tell it which `Service` a `Req` corresponds to.
pub trait Picker<S, Req> {
/// Return an index into the iterator of `Service` passed to [`Steer::new`].
fn pick(&mut self, r: &Req, services: &[S]) -> usize;
}
impl<S, F, Req> Picker<S, Req> for F
where
F: Fn(&Req, &[S]) -> usize,
{
fn pick(&mut self, r: &Req, services: &[S]) -> usize {
self(r, services)
}
}
/// [`Steer`] manages a list of [`Service`]s which all handle the same type of request.
///
/// An example use case is a sharded service.
/// It accepts new requests, then:
/// 1. Determines, via the provided [`Picker`], which [`Service`] the request corresponds to.
/// 2. Waits (in [`Service::poll_ready`]) for *all* services to be ready.
/// 3. Calls the correct [`Service`] with the request, and returns a future corresponding to the
/// call.
///
/// Note that [`Steer`] must wait for all services to be ready since it can't know ahead of time
/// which [`Service`] the next message will arrive for, and is unwilling to buffer items
/// indefinitely. This will cause head-of-line blocking unless paired with a [`Service`] that does
/// buffer items indefinitely, and thus always returns [`Poll::Ready`]. For example, wrapping each
/// component service with a [`Buffer`] with a high enough limit (the maximum number of concurrent
/// requests) will prevent head-of-line blocking in [`Steer`].
///
/// [`Buffer`]: crate::buffer::Buffer
pub struct Steer<S, F, Req> {
router: F,
services: Vec<S>,
not_ready: VecDeque<usize>,
_phantom: PhantomData<Req>,
}
impl<S, F, Req> Steer<S, F, Req> {
/// Make a new [`Steer`] with a list of [`Service`]'s and a [`Picker`].
///
/// Note: the order of the [`Service`]'s is significant for [`Picker::pick`]'s return value.
pub fn new(services: impl IntoIterator<Item = S>, router: F) -> Self {
let services: Vec<_> = services.into_iter().collect();
let not_ready: VecDeque<_> = services.iter().enumerate().map(|(i, _)| i).collect();
Self {
router,
services,
not_ready,
_phantom: PhantomData,
}
}
}
impl<S, Req, F> Service<Req> for Steer<S, F, Req>
where
S: Service<Req>,
F: Picker<S, Req>,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
loop {
// must wait for *all* services to be ready.
// this will cause head-of-line blocking unless the underlying services are always ready.
if self.not_ready.is_empty() {
return Poll::Ready(Ok(()));
} else {
if self.services[self.not_ready[0]]
.poll_ready(cx)?
.is_pending()
{
return Poll::Pending;
}
self.not_ready.pop_front();
}
}
}
fn call(&mut self, req: Req) -> Self::Future {
assert!(
self.not_ready.is_empty(),
"Steer must wait for all services to be ready. Did you forget to call poll_ready()?"
);
let idx = self.router.pick(&req, &self.services[..]);
let cl = &mut self.services[idx];
self.not_ready.push_back(idx);
cl.call(req)
}
}
impl<S, F, Req> Clone for Steer<S, F, Req>
where
S: Clone,
F: Clone,
{
fn clone(&self) -> Self {
Self {
router: self.router.clone(),
services: self.services.clone(),
not_ready: self.not_ready.clone(),
_phantom: PhantomData,
}
}
}
impl<S, F, Req> fmt::Debug for Steer<S, F, Req>
where
S: fmt::Debug,
F: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let Self {
router,
services,
not_ready,
_phantom,
} = self;
f.debug_struct("Steer")
.field("router", router)
.field("services", services)
.field("not_ready", not_ready)
.finish()
}
}

22
vendor/tower/src/timeout/error.rs vendored Normal file
View File

@@ -0,0 +1,22 @@
//! Error types
use std::{error, fmt};
/// The timeout elapsed.
#[derive(Debug, Default)]
pub struct Elapsed(pub(super) ());
impl Elapsed {
/// Construct a new elapsed error
pub const fn new() -> Self {
Elapsed(())
}
}
impl fmt::Display for Elapsed {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.pad("request timed out")
}
}
impl error::Error for Elapsed {}

53
vendor/tower/src/timeout/future.rs vendored Normal file
View File

@@ -0,0 +1,53 @@
//! Future types
use super::error::Elapsed;
use pin_project_lite::pin_project;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use tokio::time::Sleep;
pin_project! {
/// [`Timeout`] response future
///
/// [`Timeout`]: crate::timeout::Timeout
#[derive(Debug)]
pub struct ResponseFuture<T> {
#[pin]
response: T,
#[pin]
sleep: Sleep,
}
}
impl<T> ResponseFuture<T> {
pub(crate) fn new(response: T, sleep: Sleep) -> Self {
ResponseFuture { response, sleep }
}
}
impl<F, T, E> Future for ResponseFuture<F>
where
F: Future<Output = Result<T, E>>,
E: Into<crate::BoxError>,
{
type Output = Result<T, crate::BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
// First, try polling the future
match this.response.poll(cx) {
Poll::Ready(v) => return Poll::Ready(v.map_err(Into::into)),
Poll::Pending => {}
}
// Now check the sleep
match this.sleep.poll(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(_) => Poll::Ready(Err(Elapsed(()).into())),
}
}
}

24
vendor/tower/src/timeout/layer.rs vendored Normal file
View File

@@ -0,0 +1,24 @@
use super::Timeout;
use std::time::Duration;
use tower_layer::Layer;
/// Applies a timeout to requests via the supplied inner service.
#[derive(Debug, Clone)]
pub struct TimeoutLayer {
timeout: Duration,
}
impl TimeoutLayer {
/// Create a timeout from a duration
pub const fn new(timeout: Duration) -> Self {
TimeoutLayer { timeout }
}
}
impl<S> Layer<S> for TimeoutLayer {
type Service = Timeout<S>;
fn layer(&self, service: S) -> Self::Service {
Timeout::new(service, self.timeout)
}
}

70
vendor/tower/src/timeout/mod.rs vendored Normal file
View File

@@ -0,0 +1,70 @@
//! Middleware that applies a timeout to requests.
//!
//! If the response does not complete within the specified timeout, the response
//! will be aborted.
pub mod error;
pub mod future;
mod layer;
pub use self::layer::TimeoutLayer;
use self::future::ResponseFuture;
use std::task::{Context, Poll};
use std::time::Duration;
use tower_service::Service;
/// Applies a timeout to requests.
#[derive(Debug, Clone)]
pub struct Timeout<T> {
inner: T,
timeout: Duration,
}
// ===== impl Timeout =====
impl<T> Timeout<T> {
/// Creates a new [`Timeout`]
pub const fn new(inner: T, timeout: Duration) -> Self {
Timeout { inner, timeout }
}
/// Get a reference to the inner service
pub fn get_ref(&self) -> &T {
&self.inner
}
/// Get a mutable reference to the inner service
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
/// Consume `self`, returning the inner service
pub fn into_inner(self) -> T {
self.inner
}
}
impl<S, Request> Service<Request> for Timeout<S>
where
S: Service<Request>,
S::Error: Into<crate::BoxError>,
{
type Response = S::Response;
type Error = crate::BoxError;
type Future = ResponseFuture<S::Future>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self.inner.poll_ready(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(r) => Poll::Ready(r.map_err(Into::into)),
}
}
fn call(&mut self, request: Request) -> Self::Future {
let response = self.inner.call(request);
let sleep = tokio::time::sleep(self.timeout);
ResponseFuture::new(response, sleep)
}
}

130
vendor/tower/src/util/and_then.rs vendored Normal file
View File

@@ -0,0 +1,130 @@
use futures_core::TryFuture;
use futures_util::{future, TryFutureExt};
use std::fmt;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use tower_layer::Layer;
use tower_service::Service;
/// Service returned by the [`and_then`] combinator.
///
/// [`and_then`]: crate::util::ServiceExt::and_then
#[derive(Clone)]
pub struct AndThen<S, F> {
inner: S,
f: F,
}
impl<S, F> fmt::Debug for AndThen<S, F>
where
S: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("AndThen")
.field("inner", &self.inner)
.field("f", &format_args!("{}", std::any::type_name::<F>()))
.finish()
}
}
pin_project_lite::pin_project! {
/// Response future from [`AndThen`] services.
///
/// [`AndThen`]: crate::util::AndThen
pub struct AndThenFuture<F1, F2: TryFuture, N> {
#[pin]
inner: future::AndThen<future::ErrInto<F1, F2::Error>, F2, N>,
}
}
impl<F1, F2: TryFuture, N> AndThenFuture<F1, F2, N> {
pub(crate) fn new(inner: future::AndThen<future::ErrInto<F1, F2::Error>, F2, N>) -> Self {
Self { inner }
}
}
impl<F1, F2: TryFuture, N> std::fmt::Debug for AndThenFuture<F1, F2, N> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_tuple("AndThenFuture")
.field(&format_args!("..."))
.finish()
}
}
impl<F1, F2: TryFuture, N> Future for AndThenFuture<F1, F2, N>
where
future::AndThen<future::ErrInto<F1, F2::Error>, F2, N>: Future,
{
type Output = <future::AndThen<future::ErrInto<F1, F2::Error>, F2, N> as Future>::Output;
#[inline]
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.project().inner.poll(cx)
}
}
/// A [`Layer`] that produces a [`AndThen`] service.
///
/// [`Layer`]: tower_layer::Layer
#[derive(Clone, Debug)]
pub struct AndThenLayer<F> {
f: F,
}
impl<S, F> AndThen<S, F> {
/// Creates a new `AndThen` service.
pub const fn new(inner: S, f: F) -> Self {
AndThen { f, inner }
}
/// Returns a new [`Layer`] that produces [`AndThen`] services.
///
/// This is a convenience function that simply calls [`AndThenLayer::new`].
///
/// [`Layer`]: tower_layer::Layer
pub fn layer(f: F) -> AndThenLayer<F> {
AndThenLayer { f }
}
}
impl<S, F, Request, Fut> Service<Request> for AndThen<S, F>
where
S: Service<Request>,
S::Error: Into<Fut::Error>,
F: FnOnce(S::Response) -> Fut + Clone,
Fut: TryFuture,
{
type Response = Fut::Ok;
type Error = Fut::Error;
type Future = AndThenFuture<S::Future, Fut, F>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, request: Request) -> Self::Future {
AndThenFuture::new(self.inner.call(request).err_into().and_then(self.f.clone()))
}
}
impl<F> AndThenLayer<F> {
/// Creates a new [`AndThenLayer`] layer.
pub const fn new(f: F) -> Self {
AndThenLayer { f }
}
}
impl<S, F> Layer<S> for AndThenLayer<F>
where
F: Clone,
{
type Service = AndThen<S, F>;
fn layer(&self, inner: S) -> Self::Service {
AndThen {
f: self.f.clone(),
inner,
}
}
}

97
vendor/tower/src/util/boxed/layer.rs vendored Normal file
View File

@@ -0,0 +1,97 @@
use crate::util::BoxService;
use std::{fmt, sync::Arc};
use tower_layer::{layer_fn, Layer};
use tower_service::Service;
/// A boxed [`Layer`] trait object.
///
/// [`BoxLayer`] turns a layer into a trait object, allowing both the [`Layer`] itself
/// and the output [`Service`] to be dynamic, while having consistent types.
///
/// This [`Layer`] produces [`BoxService`] instances erasing the type of the
/// [`Service`] produced by the wrapped [`Layer`].
///
/// # Example
///
/// `BoxLayer` can, for example, be useful to create layers dynamically that otherwise wouldn't have
/// the same types. In this example, we include a [`Timeout`] layer
/// only if an environment variable is set. We can use `BoxLayer`
/// to return a consistent type regardless of runtime configuration:
///
/// ```
/// use std::time::Duration;
/// use tower::{Service, ServiceBuilder, BoxError, util::BoxLayer};
///
/// fn common_layer<S, T>() -> BoxLayer<S, T, S::Response, BoxError>
/// where
/// S: Service<T> + Send + 'static,
/// S::Future: Send + 'static,
/// S::Error: Into<BoxError> + 'static,
/// {
/// let builder = ServiceBuilder::new()
/// .concurrency_limit(100);
///
/// if std::env::var("SET_TIMEOUT").is_ok() {
/// let layer = builder
/// .timeout(Duration::from_secs(30))
/// .into_inner();
///
/// BoxLayer::new(layer)
/// } else {
/// let layer = builder
/// .map_err(Into::into)
/// .into_inner();
///
/// BoxLayer::new(layer)
/// }
/// }
/// ```
///
/// [`Layer`]: tower_layer::Layer
/// [`Service`]: tower_service::Service
/// [`BoxService`]: super::BoxService
/// [`Timeout`]: crate::timeout
pub struct BoxLayer<In, T, U, E> {
boxed: Arc<dyn Layer<In, Service = BoxService<T, U, E>> + Send + Sync + 'static>,
}
impl<In, T, U, E> BoxLayer<In, T, U, E> {
/// Create a new [`BoxLayer`].
pub fn new<L>(inner_layer: L) -> Self
where
L: Layer<In> + Send + Sync + 'static,
L::Service: Service<T, Response = U, Error = E> + Send + 'static,
<L::Service as Service<T>>::Future: Send + 'static,
{
let layer = layer_fn(move |inner: In| {
let out = inner_layer.layer(inner);
BoxService::new(out)
});
Self {
boxed: Arc::new(layer),
}
}
}
impl<In, T, U, E> Layer<In> for BoxLayer<In, T, U, E> {
type Service = BoxService<T, U, E>;
fn layer(&self, inner: In) -> Self::Service {
self.boxed.layer(inner)
}
}
impl<In, T, U, E> Clone for BoxLayer<In, T, U, E> {
fn clone(&self) -> Self {
Self {
boxed: Arc::clone(&self.boxed),
}
}
}
impl<In, T, U, E> fmt::Debug for BoxLayer<In, T, U, E> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("BoxLayer").finish()
}
}

View File

@@ -0,0 +1,128 @@
use crate::util::BoxCloneService;
use std::{fmt, sync::Arc};
use tower_layer::{layer_fn, Layer};
use tower_service::Service;
/// A [`Clone`] + [`Send`] boxed [`Layer`].
///
/// [`BoxCloneServiceLayer`] turns a layer into a trait object, allowing both the [`Layer`] itself
/// and the output [`Service`] to be dynamic, while having consistent types.
///
/// This [`Layer`] produces [`BoxCloneService`] instances erasing the type of the
/// [`Service`] produced by the wrapped [`Layer`].
///
/// This is similar to [`BoxLayer`](super::BoxLayer) except the layer and resulting
/// service implements [`Clone`].
///
/// # Example
///
/// `BoxCloneServiceLayer` can, for example, be useful to create layers dynamically that otherwise wouldn't have
/// the same types, when the underlying service must be clone (for example, when building a MakeService)
/// In this example, we include a [`Timeout`] layer only if an environment variable is set. We can use
/// `BoxCloneService` to return a consistent type regardless of runtime configuration:
///
/// ```
/// use std::time::Duration;
/// use tower::{Service, ServiceBuilder, BoxError};
/// use tower::util::{BoxCloneServiceLayer, BoxCloneService};
///
/// #
/// # struct Request;
/// # struct Response;
/// # impl Response {
/// # fn new() -> Self { Self }
/// # }
///
/// fn common_layer<S, T>() -> BoxCloneServiceLayer<S, T, S::Response, BoxError>
/// where
/// S: Service<T> + Clone + Send + 'static,
/// S::Future: Send + 'static,
/// S::Error: Into<BoxError> + 'static,
/// {
/// let builder = ServiceBuilder::new()
/// .concurrency_limit(100);
///
/// if std::env::var("SET_TIMEOUT").is_ok() {
/// let layer = builder
/// .timeout(Duration::from_secs(30))
/// .into_inner();
///
/// BoxCloneServiceLayer::new(layer)
/// } else {
/// let layer = builder
/// .map_err(Into::into)
/// .into_inner();
///
/// BoxCloneServiceLayer::new(layer)
/// }
/// }
///
/// // We can clone the layer (this is true of BoxLayer as well)
/// let boxed_clone_layer = common_layer();
///
/// let cloned_layer = boxed_clone_layer.clone();
///
/// // Using the `BoxCloneServiceLayer` we can create a `BoxCloneService`
/// let service: BoxCloneService<Request, Response, BoxError> = ServiceBuilder::new().layer(boxed_clone_layer)
/// .service_fn(|req: Request| async {
/// Ok::<_, BoxError>(Response::new())
/// });
///
/// # let service = assert_service(service);
///
/// // And we can still clone the service
/// let cloned_service = service.clone();
/// #
/// # fn assert_service<S, R>(svc: S) -> S
/// # where S: Service<R> { svc }
///
/// ```
///
/// [`Layer`]: tower_layer::Layer
/// [`Service`]: tower_service::Service
/// [`BoxService`]: super::BoxService
/// [`Timeout`]: crate::timeout
pub struct BoxCloneServiceLayer<In, T, U, E> {
boxed: Arc<dyn Layer<In, Service = BoxCloneService<T, U, E>> + Send + Sync + 'static>,
}
impl<In, T, U, E> BoxCloneServiceLayer<In, T, U, E> {
/// Create a new [`BoxCloneServiceLayer`].
pub fn new<L>(inner_layer: L) -> Self
where
L: Layer<In> + Send + Sync + 'static,
L::Service: Service<T, Response = U, Error = E> + Send + Clone + 'static,
<L::Service as Service<T>>::Future: Send + 'static,
{
let layer = layer_fn(move |inner: In| {
let out = inner_layer.layer(inner);
BoxCloneService::new(out)
});
Self {
boxed: Arc::new(layer),
}
}
}
impl<In, T, U, E> Layer<In> for BoxCloneServiceLayer<In, T, U, E> {
type Service = BoxCloneService<T, U, E>;
fn layer(&self, inner: In) -> Self::Service {
self.boxed.layer(inner)
}
}
impl<In, T, U, E> Clone for BoxCloneServiceLayer<In, T, U, E> {
fn clone(&self) -> Self {
Self {
boxed: Arc::clone(&self.boxed),
}
}
}
impl<In, T, U, E> fmt::Debug for BoxCloneServiceLayer<In, T, U, E> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("BoxCloneServiceLayer").finish()
}
}

View File

@@ -0,0 +1,129 @@
use std::{fmt, sync::Arc};
use tower_layer::{layer_fn, Layer};
use tower_service::Service;
use crate::util::BoxCloneSyncService;
/// A [`Clone`] + [`Send`] + [`Sync`] boxed [`Layer`].
///
/// [`BoxCloneSyncServiceLayer`] turns a layer into a trait object, allowing both the [`Layer`] itself
/// and the output [`Service`] to be dynamic, while having consistent types.
///
/// This [`Layer`] produces [`BoxCloneSyncService`] instances erasing the type of the
/// [`Service`] produced by the wrapped [`Layer`].
///
/// This is similar to [`BoxCloneServiceLayer`](super::BoxCloneServiceLayer) except the layer and resulting
/// service implements [`Sync`].
///
/// # Example
///
/// `BoxCloneSyncServiceLayer` can, for example, be useful to create layers dynamically that otherwise wouldn't have
/// the same types, when the underlying service must be clone and sync (for example, when building a Hyper connector).
/// In this example, we include a [`Timeout`] layer only if an environment variable is set. We can use
/// `BoxCloneSyncServiceLayer` to return a consistent type regardless of runtime configuration:
///
/// ```
/// use std::time::Duration;
/// use tower::{Service, ServiceBuilder, BoxError};
/// use tower::util::{BoxCloneSyncServiceLayer, BoxCloneSyncService};
///
/// #
/// # struct Request;
/// # struct Response;
/// # impl Response {
/// # fn new() -> Self { Self }
/// # }
///
/// fn common_layer<S, T>() -> BoxCloneSyncServiceLayer<S, T, S::Response, BoxError>
/// where
/// S: Service<T> + Clone + Send + Sync + 'static,
/// S::Future: Send + 'static,
/// S::Error: Into<BoxError> + 'static,
/// {
/// let builder = ServiceBuilder::new()
/// .concurrency_limit(100);
///
/// if std::env::var("SET_TIMEOUT").is_ok() {
/// let layer = builder
/// .timeout(Duration::from_secs(30))
/// .into_inner();
///
/// BoxCloneSyncServiceLayer::new(layer)
/// } else {
/// let layer = builder
/// .map_err(Into::into)
/// .into_inner();
///
/// BoxCloneSyncServiceLayer::new(layer)
/// }
/// }
///
/// // We can clone the layer (this is true of BoxLayer as well)
/// let boxed_clone_sync_layer = common_layer();
///
/// let cloned_sync_layer = boxed_clone_sync_layer.clone();
///
/// // Using the `BoxCloneSyncServiceLayer` we can create a `BoxCloneSyncService`
/// let service: BoxCloneSyncService<Request, Response, BoxError> = ServiceBuilder::new().layer(cloned_sync_layer)
/// .service_fn(|req: Request| async {
/// Ok::<_, BoxError>(Response::new())
/// });
///
/// # let service = assert_service(service);
///
/// // And we can still clone the service
/// let cloned_service = service.clone();
/// #
/// # fn assert_service<S, R>(svc: S) -> S
/// # where S: Service<R> { svc }
///
/// ```
///
/// [`Layer`]: tower_layer::Layer
/// [`Service`]: tower_service::Service
/// [`BoxService`]: super::BoxService
/// [`Timeout`]: crate::timeout
pub struct BoxCloneSyncServiceLayer<In, T, U, E> {
boxed: Arc<dyn Layer<In, Service = BoxCloneSyncService<T, U, E>> + Send + Sync + 'static>,
}
impl<In, T, U, E> BoxCloneSyncServiceLayer<In, T, U, E> {
/// Create a new [`BoxCloneSyncServiceLayer`].
pub fn new<L>(inner_layer: L) -> Self
where
L: Layer<In> + Send + Sync + 'static,
L::Service: Service<T, Response = U, Error = E> + Send + Sync + Clone + 'static,
<L::Service as Service<T>>::Future: Send + 'static,
{
let layer = layer_fn(move |inner: In| {
let out = inner_layer.layer(inner);
BoxCloneSyncService::new(out)
});
Self {
boxed: Arc::new(layer),
}
}
}
impl<In, T, U, E> Layer<In> for BoxCloneSyncServiceLayer<In, T, U, E> {
type Service = BoxCloneSyncService<T, U, E>;
fn layer(&self, inner: In) -> Self::Service {
self.boxed.layer(inner)
}
}
impl<In, T, U, E> Clone for BoxCloneSyncServiceLayer<In, T, U, E> {
fn clone(&self) -> Self {
Self {
boxed: Arc::clone(&self.boxed),
}
}
}
impl<In, T, U, E> fmt::Debug for BoxCloneSyncServiceLayer<In, T, U, E> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("BoxCloneSyncServiceLayer").finish()
}
}

11
vendor/tower/src/util/boxed/mod.rs vendored Normal file
View File

@@ -0,0 +1,11 @@
mod layer;
mod layer_clone;
mod layer_clone_sync;
mod sync;
mod unsync;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::{
layer::BoxLayer, layer_clone::BoxCloneServiceLayer, layer_clone_sync::BoxCloneSyncServiceLayer,
sync::BoxService, unsync::UnsyncBoxService,
};

111
vendor/tower/src/util/boxed/sync.rs vendored Normal file
View File

@@ -0,0 +1,111 @@
use crate::ServiceExt;
use tower_layer::{layer_fn, LayerFn};
use tower_service::Service;
use sync_wrapper::SyncWrapper;
use std::fmt;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
/// A boxed `Service + Send` trait object.
///
/// [`BoxService`] turns a service into a trait object, allowing the response
/// future type to be dynamic. This type requires both the service and the
/// response future to be [`Send`].
///
/// If you need a boxed [`Service`] that implements [`Clone`] consider using
/// [`BoxCloneService`](crate::util::BoxCloneService).
///
/// Dynamically dispatched [`Service`] objects allow for erasing the underlying
/// [`Service`] type and using the `Service` instances as opaque handles. This can
/// be useful when the service instance cannot be explicitly named for whatever
/// reason.
///
/// # Examples
///
/// ```
/// use std::future::ready;
/// # use tower_service::Service;
/// # use tower::util::{BoxService, service_fn};
/// // Respond to requests using a closure, but closures cannot be named...
/// # pub fn main() {
/// let svc = service_fn(|mut request: String| {
/// request.push_str(" response");
/// ready(Ok(request))
/// });
///
/// let service: BoxService<String, String, ()> = BoxService::new(svc);
/// # drop(service);
/// }
/// ```
///
/// [`Service`]: crate::Service
/// [`Rc`]: std::rc::Rc
pub struct BoxService<T, U, E> {
inner:
SyncWrapper<Box<dyn Service<T, Response = U, Error = E, Future = BoxFuture<U, E>> + Send>>,
}
/// A boxed `Future + Send` trait object.
///
/// This type alias represents a boxed future that is [`Send`] and can be moved
/// across threads.
type BoxFuture<T, E> = Pin<Box<dyn Future<Output = Result<T, E>> + Send>>;
impl<T, U, E> BoxService<T, U, E> {
#[allow(missing_docs)]
pub fn new<S>(inner: S) -> Self
where
S: Service<T, Response = U, Error = E> + Send + 'static,
S::Future: Send + 'static,
{
// rust can't infer the type
let inner: Box<dyn Service<T, Response = U, Error = E, Future = BoxFuture<U, E>> + Send> =
Box::new(inner.map_future(|f: S::Future| Box::pin(f) as _));
let inner = SyncWrapper::new(inner);
BoxService { inner }
}
/// Returns a [`Layer`] for wrapping a [`Service`] in a [`BoxService`]
/// middleware.
///
/// [`Layer`]: crate::Layer
pub fn layer<S>() -> LayerFn<fn(S) -> Self>
where
S: Service<T, Response = U, Error = E> + Send + 'static,
S::Future: Send + 'static,
{
layer_fn(Self::new)
}
}
impl<T, U, E> Service<T> for BoxService<T, U, E> {
type Response = U;
type Error = E;
type Future = BoxFuture<U, E>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), E>> {
self.inner.get_mut().poll_ready(cx)
}
fn call(&mut self, request: T) -> BoxFuture<U, E> {
self.inner.get_mut().call(request)
}
}
impl<T, U, E> fmt::Debug for BoxService<T, U, E> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("BoxService").finish()
}
}
#[test]
fn is_sync() {
fn assert_sync<T: Sync>() {}
assert_sync::<BoxService<(), (), ()>>();
}

86
vendor/tower/src/util/boxed/unsync.rs vendored Normal file
View File

@@ -0,0 +1,86 @@
use tower_layer::{layer_fn, LayerFn};
use tower_service::Service;
use std::fmt;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
/// A boxed [`Service`] trait object.
pub struct UnsyncBoxService<T, U, E> {
inner: Box<dyn Service<T, Response = U, Error = E, Future = UnsyncBoxFuture<U, E>>>,
}
/// A boxed [`Future`] trait object.
///
/// This type alias represents a boxed future that is *not* [`Send`] and must
/// remain on the current thread.
type UnsyncBoxFuture<T, E> = Pin<Box<dyn Future<Output = Result<T, E>>>>;
#[derive(Debug)]
struct UnsyncBoxed<S> {
inner: S,
}
impl<T, U, E> UnsyncBoxService<T, U, E> {
#[allow(missing_docs)]
pub fn new<S>(inner: S) -> Self
where
S: Service<T, Response = U, Error = E> + 'static,
S::Future: 'static,
{
let inner = Box::new(UnsyncBoxed { inner });
UnsyncBoxService { inner }
}
/// Returns a [`Layer`] for wrapping a [`Service`] in an [`UnsyncBoxService`] middleware.
///
/// [`Layer`]: crate::Layer
pub fn layer<S>() -> LayerFn<fn(S) -> Self>
where
S: Service<T, Response = U, Error = E> + 'static,
S::Future: 'static,
{
layer_fn(Self::new)
}
}
impl<T, U, E> Service<T> for UnsyncBoxService<T, U, E> {
type Response = U;
type Error = E;
type Future = UnsyncBoxFuture<U, E>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), E>> {
self.inner.poll_ready(cx)
}
fn call(&mut self, request: T) -> UnsyncBoxFuture<U, E> {
self.inner.call(request)
}
}
impl<T, U, E> fmt::Debug for UnsyncBoxService<T, U, E> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("UnsyncBoxService").finish()
}
}
impl<S, Request> Service<Request> for UnsyncBoxed<S>
where
S: Service<Request> + 'static,
S::Future: 'static,
{
type Response = S::Response;
type Error = S::Error;
type Future = Pin<Box<dyn Future<Output = Result<S::Response, S::Error>>>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx)
}
fn call(&mut self, request: Request) -> Self::Future {
Box::pin(self.inner.call(request))
}
}

136
vendor/tower/src/util/boxed_clone.rs vendored Normal file
View File

@@ -0,0 +1,136 @@
use super::ServiceExt;
use futures_util::future::BoxFuture;
use std::{
fmt,
task::{Context, Poll},
};
use tower_layer::{layer_fn, LayerFn};
use tower_service::Service;
/// A [`Clone`] + [`Send`] boxed [`Service`].
///
/// [`BoxCloneService`] turns a service into a trait object, allowing the
/// response future type to be dynamic, and allowing the service to be cloned.
///
/// This is similar to [`BoxService`](super::BoxService) except the resulting
/// service implements [`Clone`].
///
/// # Example
///
/// ```
/// use tower::{Service, ServiceBuilder, BoxError, util::BoxCloneService};
/// use std::time::Duration;
/// #
/// # struct Request;
/// # struct Response;
/// # impl Response {
/// # fn new() -> Self { Self }
/// # }
///
/// // This service has a complex type that is hard to name
/// let service = ServiceBuilder::new()
/// .map_request(|req| {
/// println!("received request");
/// req
/// })
/// .map_response(|res| {
/// println!("response produced");
/// res
/// })
/// .load_shed()
/// .concurrency_limit(64)
/// .timeout(Duration::from_secs(10))
/// .service_fn(|req: Request| async {
/// Ok::<_, BoxError>(Response::new())
/// });
/// # let service = assert_service(service);
///
/// // `BoxCloneService` will erase the type so it's nameable
/// let service: BoxCloneService<Request, Response, BoxError> = BoxCloneService::new(service);
/// # let service = assert_service(service);
///
/// // And we can still clone the service
/// let cloned_service = service.clone();
/// #
/// # fn assert_service<S, R>(svc: S) -> S
/// # where S: Service<R> { svc }
/// ```
pub struct BoxCloneService<T, U, E>(
Box<
dyn CloneService<T, Response = U, Error = E, Future = BoxFuture<'static, Result<U, E>>>
+ Send,
>,
);
impl<T, U, E> BoxCloneService<T, U, E> {
/// Create a new `BoxCloneService`.
pub fn new<S>(inner: S) -> Self
where
S: Service<T, Response = U, Error = E> + Clone + Send + 'static,
S::Future: Send + 'static,
{
let inner = inner.map_future(|f| Box::pin(f) as _);
BoxCloneService(Box::new(inner))
}
/// Returns a [`Layer`] for wrapping a [`Service`] in a [`BoxCloneService`]
/// middleware.
///
/// [`Layer`]: crate::Layer
pub fn layer<S>() -> LayerFn<fn(S) -> Self>
where
S: Service<T, Response = U, Error = E> + Clone + Send + 'static,
S::Future: Send + 'static,
{
layer_fn(Self::new)
}
}
impl<T, U, E> Service<T> for BoxCloneService<T, U, E> {
type Response = U;
type Error = E;
type Future = BoxFuture<'static, Result<U, E>>;
#[inline]
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), E>> {
self.0.poll_ready(cx)
}
#[inline]
fn call(&mut self, request: T) -> Self::Future {
self.0.call(request)
}
}
impl<T, U, E> Clone for BoxCloneService<T, U, E> {
fn clone(&self) -> Self {
Self(self.0.clone_box())
}
}
trait CloneService<R>: Service<R> {
fn clone_box(
&self,
) -> Box<
dyn CloneService<R, Response = Self::Response, Error = Self::Error, Future = Self::Future>
+ Send,
>;
}
impl<R, T> CloneService<R> for T
where
T: Service<R> + Send + Clone + 'static,
{
fn clone_box(
&self,
) -> Box<dyn CloneService<R, Response = T::Response, Error = T::Error, Future = T::Future> + Send>
{
Box::new(self.clone())
}
}
impl<T, U, E> fmt::Debug for BoxCloneService<T, U, E> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("BoxCloneService").finish()
}
}

View File

@@ -0,0 +1,100 @@
use super::ServiceExt;
use futures_util::future::BoxFuture;
use std::{
fmt,
task::{Context, Poll},
};
use tower_layer::{layer_fn, LayerFn};
use tower_service::Service;
/// A [`Clone`] + [`Send`] + [`Sync`] boxed [`Service`].
///
/// [`BoxCloneSyncService`] turns a service into a trait object, allowing the
/// response future type to be dynamic, and allowing the service to be cloned and shared.
///
/// This is similar to [`BoxCloneService`](super::BoxCloneService) except the resulting
/// service implements [`Sync`].
pub struct BoxCloneSyncService<T, U, E>(
Box<
dyn CloneService<T, Response = U, Error = E, Future = BoxFuture<'static, Result<U, E>>>
+ Send
+ Sync,
>,
);
impl<T, U, E> BoxCloneSyncService<T, U, E> {
/// Create a new `BoxCloneSyncService`.
pub fn new<S>(inner: S) -> Self
where
S: Service<T, Response = U, Error = E> + Clone + Send + Sync + 'static,
S::Future: Send + 'static,
{
let inner = inner.map_future(|f| Box::pin(f) as _);
BoxCloneSyncService(Box::new(inner))
}
/// Returns a [`Layer`] for wrapping a [`Service`] in a [`BoxCloneSyncService`]
/// middleware.
///
/// [`Layer`]: crate::Layer
pub fn layer<S>() -> LayerFn<fn(S) -> Self>
where
S: Service<T, Response = U, Error = E> + Clone + Send + Sync + 'static,
S::Future: Send + 'static,
{
layer_fn(Self::new)
}
}
impl<T, U, E> Service<T> for BoxCloneSyncService<T, U, E> {
type Response = U;
type Error = E;
type Future = BoxFuture<'static, Result<U, E>>;
#[inline]
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), E>> {
self.0.poll_ready(cx)
}
#[inline]
fn call(&mut self, request: T) -> Self::Future {
self.0.call(request)
}
}
impl<T, U, E> Clone for BoxCloneSyncService<T, U, E> {
fn clone(&self) -> Self {
Self(self.0.clone_box())
}
}
trait CloneService<R>: Service<R> {
fn clone_box(
&self,
) -> Box<
dyn CloneService<R, Response = Self::Response, Error = Self::Error, Future = Self::Future>
+ Send
+ Sync,
>;
}
impl<R, T> CloneService<R> for T
where
T: Service<R> + Send + Sync + Clone + 'static,
{
fn clone_box(
&self,
) -> Box<
dyn CloneService<R, Response = T::Response, Error = T::Error, Future = T::Future>
+ Send
+ Sync,
> {
Box::new(self.clone())
}
}
impl<T, U, E> fmt::Debug for BoxCloneSyncService<T, U, E> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("BoxCloneSyncService").finish()
}
}

141
vendor/tower/src/util/call_all/common.rs vendored Normal file
View File

@@ -0,0 +1,141 @@
use futures_core::Stream;
use pin_project_lite::pin_project;
use std::{
fmt,
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use tower_service::Service;
pin_project! {
/// The [`Future`] returned by the [`ServiceExt::call_all`] combinator.
pub(crate) struct CallAll<Svc, S, Q>
where
S: Stream,
{
service: Option<Svc>,
#[pin]
stream: S,
queue: Q,
eof: bool,
curr_req: Option<S::Item>
}
}
impl<Svc, S, Q> fmt::Debug for CallAll<Svc, S, Q>
where
Svc: fmt::Debug,
S: Stream + fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("CallAll")
.field("service", &self.service)
.field("stream", &self.stream)
.field("eof", &self.eof)
.finish()
}
}
pub(crate) trait Drive<F: Future> {
fn is_empty(&self) -> bool;
fn push(&mut self, future: F);
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<Option<F::Output>>;
}
impl<Svc, S, Q> CallAll<Svc, S, Q>
where
Svc: Service<S::Item>,
S: Stream,
Q: Drive<Svc::Future>,
{
pub(crate) const fn new(service: Svc, stream: S, queue: Q) -> CallAll<Svc, S, Q> {
CallAll {
service: Some(service),
stream,
queue,
eof: false,
curr_req: None,
}
}
/// Extract the wrapped [`Service`].
pub(crate) fn into_inner(mut self) -> Svc {
self.service.take().expect("Service already taken")
}
/// Extract the wrapped [`Service`].
pub(crate) fn take_service(self: Pin<&mut Self>) -> Svc {
self.project()
.service
.take()
.expect("Service already taken")
}
pub(crate) fn unordered(mut self) -> super::CallAllUnordered<Svc, S> {
assert!(self.queue.is_empty() && !self.eof);
super::CallAllUnordered::new(self.service.take().unwrap(), self.stream)
}
}
impl<Svc, S, Q> Stream for CallAll<Svc, S, Q>
where
Svc: Service<S::Item>,
S: Stream,
Q: Drive<Svc::Future>,
{
type Item = Result<Svc::Response, Svc::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
loop {
// First, see if we have any responses to yield
if let Poll::Ready(r) = this.queue.poll(cx) {
if let Some(rsp) = r.transpose()? {
return Poll::Ready(Some(Ok(rsp)));
}
}
// If there are no more requests coming, check if we're done
if *this.eof {
if this.queue.is_empty() {
return Poll::Ready(None);
} else {
return Poll::Pending;
}
}
// If not done, and we don't have a stored request, gather the next request from the
// stream (if there is one), or return `Pending` if the stream is not ready.
if this.curr_req.is_none() {
*this.curr_req = match ready!(this.stream.as_mut().poll_next(cx)) {
Some(next_req) => Some(next_req),
None => {
// Mark that there will be no more requests.
*this.eof = true;
continue;
}
};
}
// Then, see that the service is ready for another request
let svc = this
.service
.as_mut()
.expect("Using CallAll after extracting inner Service");
if let Err(e) = ready!(svc.poll_ready(cx)) {
// Set eof to prevent the service from being called again after a `poll_ready` error
*this.eof = true;
return Poll::Ready(Some(Err(e)));
}
// Unwrap: The check above always sets `this.curr_req` if none.
this.queue.push(svc.call(this.curr_req.take().unwrap()));
}
}
}

11
vendor/tower/src/util/call_all/mod.rs vendored Normal file
View File

@@ -0,0 +1,11 @@
//! [`Stream<Item = Request>`][stream] + [`Service<Request>`] => [`Stream<Item = Response>`][stream].
//!
//! [`Service<Request>`]: crate::Service
//! [stream]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html
mod common;
mod ordered;
mod unordered;
#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/57411
pub use self::{ordered::CallAll, unordered::CallAllUnordered};

View File

@@ -0,0 +1,177 @@
//! [`Stream<Item = Request>`][stream] + [`Service<Request>`] => [`Stream<Item = Response>`][stream].
//!
//! [`Service<Request>`]: crate::Service
//! [stream]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html
use super::common;
use futures_core::Stream;
use futures_util::stream::FuturesOrdered;
use pin_project_lite::pin_project;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use tower_service::Service;
pin_project! {
/// This is a [`Stream`] of responses resulting from calling the wrapped [`Service`] for each
/// request received on the wrapped [`Stream`].
///
/// ```rust
/// # use std::task::{Poll, Context};
/// # use std::cell::Cell;
/// # use std::error::Error;
/// # use std::rc::Rc;
/// #
/// use std::future::{ready, Ready};
/// use futures::StreamExt;
/// use futures::channel::mpsc;
/// use tower_service::Service;
/// use tower::util::ServiceExt;
///
/// // First, we need to have a Service to process our requests.
/// #[derive(Debug, Eq, PartialEq)]
/// struct FirstLetter;
/// impl Service<&'static str> for FirstLetter {
/// type Response = &'static str;
/// type Error = Box<dyn Error + Send + Sync>;
/// type Future = Ready<Result<Self::Response, Self::Error>>;
///
/// fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
/// Poll::Ready(Ok(()))
/// }
///
/// fn call(&mut self, req: &'static str) -> Self::Future {
/// ready(Ok(&req[..1]))
/// }
/// }
///
/// #[tokio::main]
/// async fn main() {
/// // Next, we need a Stream of requests.
// TODO(eliza): when `tokio-util` has a nice way to convert MPSCs to streams,
// tokio::sync::mpsc again?
/// let (mut reqs, rx) = mpsc::unbounded();
/// // Note that we have to help Rust out here by telling it what error type to use.
/// // Specifically, it has to be From<Service::Error> + From<Stream::Error>.
/// let mut rsps = FirstLetter.call_all(rx);
///
/// // Now, let's send a few requests and then check that we get the corresponding responses.
/// reqs.unbounded_send("one").unwrap();
/// reqs.unbounded_send("two").unwrap();
/// reqs.unbounded_send("three").unwrap();
/// drop(reqs);
///
/// // We then loop over the response `Stream` that we get back from call_all.
/// let mut i = 0usize;
/// while let Some(rsp) = rsps.next().await {
/// // Each response is a Result (we could also have used TryStream::try_next)
/// match (i + 1, rsp.unwrap()) {
/// (1, "o") |
/// (2, "t") |
/// (3, "t") => {}
/// (n, i) => {
/// unreachable!("{}. response was '{}'", n, i);
/// }
/// }
/// i += 1;
/// }
///
/// // And at the end, we can get the Service back when there are no more requests.
/// assert_eq!(rsps.into_inner(), FirstLetter);
/// }
/// ```
///
/// [`Stream`]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html
#[derive(Debug)]
pub struct CallAll<Svc, S>
where
Svc: Service<S::Item>,
S: Stream,
{
#[pin]
inner: common::CallAll<Svc, S, FuturesOrdered<Svc::Future>>,
}
}
impl<Svc, S> CallAll<Svc, S>
where
Svc: Service<S::Item>,
S: Stream,
{
/// Create new [`CallAll`] combinator.
///
/// Each request yielded by `stream` is passed to `svc`, and the resulting responses are
/// yielded in the same order by the implementation of [`Stream`] for [`CallAll`].
///
/// [`Stream`]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html
pub fn new(service: Svc, stream: S) -> CallAll<Svc, S> {
CallAll {
inner: common::CallAll::new(service, stream, FuturesOrdered::new()),
}
}
/// Extract the wrapped [`Service`].
///
/// # Panics
///
/// Panics if [`take_service`] was already called.
///
/// [`take_service`]: crate::util::CallAll::take_service
pub fn into_inner(self) -> Svc {
self.inner.into_inner()
}
/// Extract the wrapped [`Service`].
///
/// This [`CallAll`] can no longer be used after this function has been called.
///
/// # Panics
///
/// Panics if [`take_service`] was already called.
///
/// [`take_service`]: crate::util::CallAll::take_service
pub fn take_service(self: Pin<&mut Self>) -> Svc {
self.project().inner.take_service()
}
/// Return responses as they are ready, regardless of the initial order.
///
/// This function must be called before the stream is polled.
///
/// # Panics
///
/// Panics if [`poll`] was called.
///
/// [`poll`]: std::future::Future::poll
pub fn unordered(self) -> super::CallAllUnordered<Svc, S> {
self.inner.unordered()
}
}
impl<Svc, S> Stream for CallAll<Svc, S>
where
Svc: Service<S::Item>,
S: Stream,
{
type Item = Result<Svc::Response, Svc::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.project().inner.poll_next(cx)
}
}
impl<F: Future> common::Drive<F> for FuturesOrdered<F> {
fn is_empty(&self) -> bool {
FuturesOrdered::is_empty(self)
}
fn push(&mut self, future: F) {
FuturesOrdered::push_back(self, future)
}
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<Option<F::Output>> {
Stream::poll_next(Pin::new(self), cx)
}
}

View File

@@ -0,0 +1,98 @@
//! [`Stream<Item = Request>`][stream] + [`Service<Request>`] => [`Stream<Item = Response>`][stream].
//!
//! [`Service<Request>`]: crate::Service
//! [stream]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html
use super::common;
use futures_core::Stream;
use futures_util::stream::FuturesUnordered;
use pin_project_lite::pin_project;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use tower_service::Service;
pin_project! {
/// A stream of responses received from the inner service in received order.
///
/// Similar to [`CallAll`] except, instead of yielding responses in request order,
/// responses are returned as they are available.
///
/// [`CallAll`]: crate::util::CallAll
#[derive(Debug)]
pub struct CallAllUnordered<Svc, S>
where
Svc: Service<S::Item>,
S: Stream,
{
#[pin]
inner: common::CallAll<Svc, S, FuturesUnordered<Svc::Future>>,
}
}
impl<Svc, S> CallAllUnordered<Svc, S>
where
Svc: Service<S::Item>,
S: Stream,
{
/// Create new [`CallAllUnordered`] combinator.
///
/// [`Stream`]: https://docs.rs/futures/latest/futures/stream/trait.Stream.html
pub fn new(service: Svc, stream: S) -> CallAllUnordered<Svc, S> {
CallAllUnordered {
inner: common::CallAll::new(service, stream, FuturesUnordered::new()),
}
}
/// Extract the wrapped [`Service`].
///
/// # Panics
///
/// Panics if [`take_service`] was already called.
///
/// [`take_service`]: crate::util::CallAllUnordered::take_service
pub fn into_inner(self) -> Svc {
self.inner.into_inner()
}
/// Extract the wrapped `Service`.
///
/// This [`CallAllUnordered`] can no longer be used after this function has been called.
///
/// # Panics
///
/// Panics if [`take_service`] was already called.
///
/// [`take_service`]: crate::util::CallAllUnordered::take_service
pub fn take_service(self: Pin<&mut Self>) -> Svc {
self.project().inner.take_service()
}
}
impl<Svc, S> Stream for CallAllUnordered<Svc, S>
where
Svc: Service<S::Item>,
S: Stream,
{
type Item = Result<Svc::Response, Svc::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.project().inner.poll_next(cx)
}
}
impl<F: Future> common::Drive<F> for FuturesUnordered<F> {
fn is_empty(&self) -> bool {
FuturesUnordered::is_empty(self)
}
fn push(&mut self, future: F) {
FuturesUnordered::push(self, future)
}
fn poll(&mut self, cx: &mut Context<'_>) -> Poll<Option<F::Output>> {
Stream::poll_next(Pin::new(self), cx)
}
}

103
vendor/tower/src/util/either.rs vendored Normal file
View File

@@ -0,0 +1,103 @@
//! Contains [`Either`] and related types and functions.
//!
//! See [`Either`] documentation for more details.
use pin_project_lite::pin_project;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use tower_layer::Layer;
use tower_service::Service;
/// Combine two different service types into a single type.
///
/// Both services must be of the same request, response, and error types.
/// [`Either`] is useful for handling conditional branching in service middleware
/// to different inner service types.
#[derive(Clone, Copy, Debug)]
pub enum Either<A, B> {
#[allow(missing_docs)]
Left(A),
#[allow(missing_docs)]
Right(B),
}
impl<A, B, Request> Service<Request> for Either<A, B>
where
A: Service<Request>,
B: Service<Request, Response = A::Response, Error = A::Error>,
{
type Response = A::Response;
type Error = A::Error;
type Future = EitherResponseFuture<A::Future, B::Future>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self {
Either::Left(service) => service.poll_ready(cx),
Either::Right(service) => service.poll_ready(cx),
}
}
fn call(&mut self, request: Request) -> Self::Future {
match self {
Either::Left(service) => EitherResponseFuture {
kind: Kind::Left {
inner: service.call(request),
},
},
Either::Right(service) => EitherResponseFuture {
kind: Kind::Right {
inner: service.call(request),
},
},
}
}
}
pin_project! {
/// Response future for [`Either`].
pub struct EitherResponseFuture<A, B> {
#[pin]
kind: Kind<A, B>
}
}
pin_project! {
#[project = KindProj]
enum Kind<A, B> {
Left { #[pin] inner: A },
Right { #[pin] inner: B },
}
}
impl<A, B> Future for EitherResponseFuture<A, B>
where
A: Future,
B: Future<Output = A::Output>,
{
type Output = A::Output;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.project().kind.project() {
KindProj::Left { inner } => inner.poll(cx),
KindProj::Right { inner } => inner.poll(cx),
}
}
}
impl<S, A, B> Layer<S> for Either<A, B>
where
A: Layer<S>,
B: Layer<S>,
{
type Service = Either<A::Service, B::Service>;
fn layer(&self, inner: S) -> Self::Service {
match self {
Either::Left(layer) => Either::Left(layer.layer(inner)),
Either::Right(layer) => Either::Right(layer.layer(inner)),
}
}
}

217
vendor/tower/src/util/future_service.rs vendored Normal file
View File

@@ -0,0 +1,217 @@
use std::fmt;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use tower_service::Service;
/// Returns a new [`FutureService`] for the given future.
///
/// A [`FutureService`] allows you to treat a future that resolves to a service as a service. This
/// can be useful for services that are created asynchronously.
///
/// # Example
/// ```
/// use tower::{service_fn, Service, ServiceExt};
/// use tower::util::future_service;
/// use std::convert::Infallible;
///
/// # fn main() {
/// # async {
/// // A future which outputs a type implementing `Service`.
/// let future_of_a_service = async {
/// let svc = service_fn(|_req: ()| async { Ok::<_, Infallible>("ok") });
/// Ok::<_, Infallible>(svc)
/// };
///
/// // Wrap the future with a `FutureService`, allowing it to be used
/// // as a service without awaiting the future's completion:
/// let mut svc = future_service(Box::pin(future_of_a_service));
///
/// // Now, when we wait for the service to become ready, it will
/// // drive the future to completion internally.
/// let svc = svc.ready().await.unwrap();
/// let res = svc.call(()).await.unwrap();
/// # };
/// # }
/// ```
///
/// # Regarding the [`Unpin`] bound
///
/// The [`Unpin`] bound on `F` is necessary because the future will be polled in
/// [`Service::poll_ready`] which doesn't have a pinned receiver (it takes `&mut self` and not `self:
/// Pin<&mut Self>`). So we cannot put the future into a `Pin` without requiring `Unpin`.
///
/// This will most likely come up if you're calling `future_service` with an async block. In that
/// case you can use `Box::pin(async { ... })` as shown in the example.
pub fn future_service<F, S, R, E>(future: F) -> FutureService<F, S>
where
F: Future<Output = Result<S, E>> + Unpin,
S: Service<R, Error = E>,
{
FutureService::new(future)
}
/// A type that implements [`Service`] for a [`Future`] that produces a [`Service`].
///
/// See [`future_service`] for more details.
#[derive(Clone)]
pub struct FutureService<F, S> {
state: State<F, S>,
}
impl<F, S> FutureService<F, S> {
/// Returns a new [`FutureService`] for the given future.
///
/// A [`FutureService`] allows you to treat a future that resolves to a service as a service. This
/// can be useful for services that are created asynchronously.
///
/// # Example
/// ```
/// use tower::{service_fn, Service, ServiceExt};
/// use tower::util::FutureService;
/// use std::convert::Infallible;
///
/// # fn main() {
/// # async {
/// // A future which outputs a type implementing `Service`.
/// let future_of_a_service = async {
/// let svc = service_fn(|_req: ()| async { Ok::<_, Infallible>("ok") });
/// Ok::<_, Infallible>(svc)
/// };
///
/// // Wrap the future with a `FutureService`, allowing it to be used
/// // as a service without awaiting the future's completion:
/// let mut svc = FutureService::new(Box::pin(future_of_a_service));
///
/// // Now, when we wait for the service to become ready, it will
/// // drive the future to completion internally.
/// let svc = svc.ready().await.unwrap();
/// let res = svc.call(()).await.unwrap();
/// # };
/// # }
/// ```
///
/// # Regarding the [`Unpin`] bound
///
/// The [`Unpin`] bound on `F` is necessary because the future will be polled in
/// [`Service::poll_ready`] which doesn't have a pinned receiver (it takes `&mut self` and not `self:
/// Pin<&mut Self>`). So we cannot put the future into a `Pin` without requiring `Unpin`.
///
/// This will most likely come up if you're calling `future_service` with an async block. In that
/// case you can use `Box::pin(async { ... })` as shown in the example.
pub const fn new(future: F) -> Self {
Self {
state: State::Future(future),
}
}
}
impl<F, S> fmt::Debug for FutureService<F, S>
where
S: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FutureService")
.field("state", &format_args!("{:?}", self.state))
.finish()
}
}
#[derive(Clone)]
enum State<F, S> {
Future(F),
Service(S),
}
impl<F, S> fmt::Debug for State<F, S>
where
S: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
State::Future(_) => f
.debug_tuple("State::Future")
.field(&format_args!("<{}>", std::any::type_name::<F>()))
.finish(),
State::Service(svc) => f.debug_tuple("State::Service").field(svc).finish(),
}
}
}
impl<F, S, R, E> Service<R> for FutureService<F, S>
where
F: Future<Output = Result<S, E>> + Unpin,
S: Service<R, Error = E>,
{
type Response = S::Response;
type Error = E;
type Future = S::Future;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
loop {
self.state = match &mut self.state {
State::Future(fut) => {
let fut = Pin::new(fut);
let svc = std::task::ready!(fut.poll(cx)?);
State::Service(svc)
}
State::Service(svc) => return svc.poll_ready(cx),
};
}
}
fn call(&mut self, req: R) -> Self::Future {
if let State::Service(svc) = &mut self.state {
svc.call(req)
} else {
panic!("FutureService::call was called before FutureService::poll_ready")
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::util::{future_service, ServiceExt};
use crate::Service;
use std::{
convert::Infallible,
future::{ready, Ready},
};
#[tokio::test]
async fn pending_service_debug_impl() {
let mut pending_svc = future_service(ready(Ok(DebugService)));
assert_eq!(
format!("{pending_svc:?}"),
"FutureService { state: State::Future(<core::future::ready::Ready<core::result::Result<tower::util::future_service::tests::DebugService, core::convert::Infallible>>>) }"
);
pending_svc.ready().await.unwrap();
assert_eq!(
format!("{pending_svc:?}"),
"FutureService { state: State::Service(DebugService) }"
);
}
#[derive(Debug)]
struct DebugService;
impl Service<()> for DebugService {
type Response = ();
type Error = Infallible;
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Ok(()).into()
}
fn call(&mut self, _req: ()) -> Self::Future {
ready(Ok(()))
}
}
}

98
vendor/tower/src/util/map_err.rs vendored Normal file
View File

@@ -0,0 +1,98 @@
use futures_util::{future, TryFutureExt};
use std::fmt;
use std::task::{Context, Poll};
use tower_layer::Layer;
use tower_service::Service;
/// Service returned by the [`map_err`] combinator.
///
/// [`map_err`]: crate::util::ServiceExt::map_err
#[derive(Clone)]
pub struct MapErr<S, F> {
inner: S,
f: F,
}
impl<S, F> fmt::Debug for MapErr<S, F>
where
S: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MapErr")
.field("inner", &self.inner)
.field("f", &format_args!("{}", std::any::type_name::<F>()))
.finish()
}
}
/// A [`Layer`] that produces [`MapErr`] services.
///
/// [`Layer`]: tower_layer::Layer
#[derive(Clone, Debug)]
pub struct MapErrLayer<F> {
f: F,
}
opaque_future! {
/// Response future from [`MapErr`] services.
///
/// [`MapErr`]: crate::util::MapErr
pub type MapErrFuture<F, N> = future::MapErr<F, N>;
}
impl<S, F> MapErr<S, F> {
/// Creates a new [`MapErr`] service.
pub const fn new(inner: S, f: F) -> Self {
MapErr { f, inner }
}
/// Returns a new [`Layer`] that produces [`MapErr`] services.
///
/// This is a convenience function that simply calls [`MapErrLayer::new`].
///
/// [`Layer`]: tower_layer::Layer
pub fn layer(f: F) -> MapErrLayer<F> {
MapErrLayer { f }
}
}
impl<S, F, Request, Error> Service<Request> for MapErr<S, F>
where
S: Service<Request>,
F: FnOnce(S::Error) -> Error + Clone,
{
type Response = S::Response;
type Error = Error;
type Future = MapErrFuture<S::Future, F>;
#[inline]
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx).map_err(self.f.clone())
}
#[inline]
fn call(&mut self, request: Request) -> Self::Future {
MapErrFuture::new(self.inner.call(request).map_err(self.f.clone()))
}
}
impl<F> MapErrLayer<F> {
/// Creates a new [`MapErrLayer`].
pub const fn new(f: F) -> Self {
MapErrLayer { f }
}
}
impl<S, F> Layer<S> for MapErrLayer<F>
where
F: Clone,
{
type Service = MapErr<S, F>;
fn layer(&self, inner: S) -> Self::Service {
MapErr {
f: self.f.clone(),
inner,
}
}
}

113
vendor/tower/src/util/map_future.rs vendored Normal file
View File

@@ -0,0 +1,113 @@
use std::{
fmt,
future::Future,
task::{Context, Poll},
};
use tower_layer::Layer;
use tower_service::Service;
/// [`Service`] returned by the [`map_future`] combinator.
///
/// [`map_future`]: crate::util::ServiceExt::map_future
#[derive(Clone)]
pub struct MapFuture<S, F> {
inner: S,
f: F,
}
impl<S, F> MapFuture<S, F> {
/// Creates a new [`MapFuture`] service.
pub const fn new(inner: S, f: F) -> Self {
Self { inner, f }
}
/// Returns a new [`Layer`] that produces [`MapFuture`] services.
///
/// This is a convenience function that simply calls [`MapFutureLayer::new`].
///
/// [`Layer`]: tower_layer::Layer
pub fn layer(f: F) -> MapFutureLayer<F> {
MapFutureLayer::new(f)
}
/// Get a reference to the inner service
pub fn get_ref(&self) -> &S {
&self.inner
}
/// Get a mutable reference to the inner service
pub fn get_mut(&mut self) -> &mut S {
&mut self.inner
}
/// Consume `self`, returning the inner service
pub fn into_inner(self) -> S {
self.inner
}
}
impl<R, S, F, T, E, Fut> Service<R> for MapFuture<S, F>
where
S: Service<R>,
F: FnMut(S::Future) -> Fut,
E: From<S::Error>,
Fut: Future<Output = Result<T, E>>,
{
type Response = T;
type Error = E;
type Future = Fut;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx).map_err(From::from)
}
fn call(&mut self, req: R) -> Self::Future {
(self.f)(self.inner.call(req))
}
}
impl<S, F> fmt::Debug for MapFuture<S, F>
where
S: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MapFuture")
.field("inner", &self.inner)
.field("f", &format_args!("{}", std::any::type_name::<F>()))
.finish()
}
}
/// A [`Layer`] that produces a [`MapFuture`] service.
///
/// [`Layer`]: tower_layer::Layer
#[derive(Clone)]
pub struct MapFutureLayer<F> {
f: F,
}
impl<F> MapFutureLayer<F> {
/// Creates a new [`MapFutureLayer`] layer.
pub const fn new(f: F) -> Self {
Self { f }
}
}
impl<S, F> Layer<S> for MapFutureLayer<F>
where
F: Clone,
{
type Service = MapFuture<S, F>;
fn layer(&self, inner: S) -> Self::Service {
MapFuture::new(inner, self.f.clone())
}
}
impl<F> fmt::Debug for MapFutureLayer<F> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MapFutureLayer")
.field("f", &format_args!("{}", std::any::type_name::<F>()))
.finish()
}
}

90
vendor/tower/src/util/map_request.rs vendored Normal file
View File

@@ -0,0 +1,90 @@
use std::fmt;
use std::task::{Context, Poll};
use tower_layer::Layer;
use tower_service::Service;
/// Service returned by the [`MapRequest`] combinator.
///
/// [`MapRequest`]: crate::util::ServiceExt::map_request
#[derive(Clone)]
pub struct MapRequest<S, F> {
inner: S,
f: F,
}
impl<S, F> fmt::Debug for MapRequest<S, F>
where
S: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MapRequest")
.field("inner", &self.inner)
.field("f", &format_args!("{}", std::any::type_name::<F>()))
.finish()
}
}
impl<S, F> MapRequest<S, F> {
/// Creates a new [`MapRequest`] service.
pub const fn new(inner: S, f: F) -> Self {
MapRequest { inner, f }
}
/// Returns a new [`Layer`] that produces [`MapRequest`] services.
///
/// This is a convenience function that simply calls [`MapRequestLayer::new`].
///
/// [`Layer`]: tower_layer::Layer
pub fn layer(f: F) -> MapRequestLayer<F> {
MapRequestLayer { f }
}
}
impl<S, F, R1, R2> Service<R1> for MapRequest<S, F>
where
S: Service<R2>,
F: FnMut(R1) -> R2,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
#[inline]
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), S::Error>> {
self.inner.poll_ready(cx)
}
#[inline]
fn call(&mut self, request: R1) -> S::Future {
self.inner.call((self.f)(request))
}
}
/// A [`Layer`] that produces [`MapRequest`] services.
///
/// [`Layer`]: tower_layer::Layer
#[derive(Clone, Debug)]
pub struct MapRequestLayer<F> {
f: F,
}
impl<F> MapRequestLayer<F> {
/// Creates a new [`MapRequestLayer`].
pub const fn new(f: F) -> Self {
MapRequestLayer { f }
}
}
impl<S, F> Layer<S> for MapRequestLayer<F>
where
F: Clone,
{
type Service = MapRequest<S, F>;
fn layer(&self, inner: S) -> Self::Service {
MapRequest {
f: self.f.clone(),
inner,
}
}
}

98
vendor/tower/src/util/map_response.rs vendored Normal file
View File

@@ -0,0 +1,98 @@
use futures_util::{future::MapOk, TryFutureExt};
use std::fmt;
use std::task::{Context, Poll};
use tower_layer::Layer;
use tower_service::Service;
/// Service returned by the [`map_response`] combinator.
///
/// [`map_response`]: crate::util::ServiceExt::map_response
#[derive(Clone)]
pub struct MapResponse<S, F> {
inner: S,
f: F,
}
impl<S, F> fmt::Debug for MapResponse<S, F>
where
S: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MapResponse")
.field("inner", &self.inner)
.field("f", &format_args!("{}", std::any::type_name::<F>()))
.finish()
}
}
/// A [`Layer`] that produces a [`MapResponse`] service.
///
/// [`Layer`]: tower_layer::Layer
#[derive(Debug, Clone)]
pub struct MapResponseLayer<F> {
f: F,
}
opaque_future! {
/// Response future from [`MapResponse`] services.
///
/// [`MapResponse`]: crate::util::MapResponse
pub type MapResponseFuture<F, N> = MapOk<F, N>;
}
impl<S, F> MapResponse<S, F> {
/// Creates a new `MapResponse` service.
pub const fn new(inner: S, f: F) -> Self {
MapResponse { f, inner }
}
/// Returns a new [`Layer`] that produces [`MapResponse`] services.
///
/// This is a convenience function that simply calls [`MapResponseLayer::new`].
///
/// [`Layer`]: tower_layer::Layer
pub fn layer(f: F) -> MapResponseLayer<F> {
MapResponseLayer { f }
}
}
impl<S, F, Request, Response> Service<Request> for MapResponse<S, F>
where
S: Service<Request>,
F: FnOnce(S::Response) -> Response + Clone,
{
type Response = Response;
type Error = S::Error;
type Future = MapResponseFuture<S::Future, F>;
#[inline]
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx)
}
#[inline]
fn call(&mut self, request: Request) -> Self::Future {
MapResponseFuture::new(self.inner.call(request).map_ok(self.f.clone()))
}
}
impl<F> MapResponseLayer<F> {
/// Creates a new [`MapResponseLayer`] layer.
pub const fn new(f: F) -> Self {
MapResponseLayer { f }
}
}
impl<S, F> Layer<S> for MapResponseLayer<F>
where
F: Clone,
{
type Service = MapResponse<S, F>;
fn layer(&self, inner: S) -> Self::Service {
MapResponse {
f: self.f.clone(),
inner,
}
}
}

99
vendor/tower/src/util/map_result.rs vendored Normal file
View File

@@ -0,0 +1,99 @@
use futures_util::{future::Map, FutureExt};
use std::fmt;
use std::task::{Context, Poll};
use tower_layer::Layer;
use tower_service::Service;
/// Service returned by the [`map_result`] combinator.
///
/// [`map_result`]: crate::util::ServiceExt::map_result
#[derive(Clone)]
pub struct MapResult<S, F> {
inner: S,
f: F,
}
impl<S, F> fmt::Debug for MapResult<S, F>
where
S: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MapResult")
.field("inner", &self.inner)
.field("f", &format_args!("{}", std::any::type_name::<F>()))
.finish()
}
}
/// A [`Layer`] that produces a [`MapResult`] service.
///
/// [`Layer`]: tower_layer::Layer
#[derive(Debug, Clone)]
pub struct MapResultLayer<F> {
f: F,
}
opaque_future! {
/// Response future from [`MapResult`] services.
///
/// [`MapResult`]: crate::util::MapResult
pub type MapResultFuture<F, N> = Map<F, N>;
}
impl<S, F> MapResult<S, F> {
/// Creates a new [`MapResult`] service.
pub const fn new(inner: S, f: F) -> Self {
MapResult { f, inner }
}
/// Returns a new [`Layer`] that produces [`MapResult`] services.
///
/// This is a convenience function that simply calls [`MapResultLayer::new`].
///
/// [`Layer`]: tower_layer::Layer
pub fn layer(f: F) -> MapResultLayer<F> {
MapResultLayer { f }
}
}
impl<S, F, Request, Response, Error> Service<Request> for MapResult<S, F>
where
S: Service<Request>,
Error: From<S::Error>,
F: FnOnce(Result<S::Response, S::Error>) -> Result<Response, Error> + Clone,
{
type Response = Response;
type Error = Error;
type Future = MapResultFuture<S::Future, F>;
#[inline]
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx).map_err(Into::into)
}
#[inline]
fn call(&mut self, request: Request) -> Self::Future {
MapResultFuture::new(self.inner.call(request).map(self.f.clone()))
}
}
impl<F> MapResultLayer<F> {
/// Creates a new [`MapResultLayer`] layer.
pub const fn new(f: F) -> Self {
MapResultLayer { f }
}
}
impl<S, F> Layer<S> for MapResultLayer<F>
where
F: Clone,
{
type Service = MapResult<S, F>;
fn layer(&self, inner: S) -> Self::Service {
MapResult {
f: self.f.clone(),
inner,
}
}
}

1073
vendor/tower/src/util/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

104
vendor/tower/src/util/oneshot.rs vendored Normal file
View File

@@ -0,0 +1,104 @@
use pin_project_lite::pin_project;
use std::{
fmt,
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use tower_service::Service;
pin_project! {
/// A [`Future`] consuming a [`Service`] and request, waiting until the [`Service`]
/// is ready, and then calling [`Service::call`] with the request, and
/// waiting for that [`Future`].
#[derive(Debug)]
pub struct Oneshot<S: Service<Req>, Req> {
#[pin]
state: State<S, Req>,
}
}
pin_project! {
#[project = StateProj]
enum State<S: Service<Req>, Req> {
NotReady {
svc: S,
req: Option<Req>,
},
Called {
#[pin]
fut: S::Future,
},
Done,
}
}
impl<S: Service<Req>, Req> State<S, Req> {
const fn not_ready(svc: S, req: Option<Req>) -> Self {
Self::NotReady { svc, req }
}
const fn called(fut: S::Future) -> Self {
Self::Called { fut }
}
}
impl<S, Req> fmt::Debug for State<S, Req>
where
S: Service<Req> + fmt::Debug,
Req: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
State::NotReady {
svc,
req: Some(req),
} => f
.debug_tuple("State::NotReady")
.field(svc)
.field(req)
.finish(),
State::NotReady { req: None, .. } => unreachable!(),
State::Called { .. } => f.debug_tuple("State::Called").field(&"S::Future").finish(),
State::Done => f.debug_tuple("State::Done").finish(),
}
}
}
impl<S, Req> Oneshot<S, Req>
where
S: Service<Req>,
{
#[allow(missing_docs)]
pub const fn new(svc: S, req: Req) -> Self {
Oneshot {
state: State::not_ready(svc, Some(req)),
}
}
}
impl<S, Req> Future for Oneshot<S, Req>
where
S: Service<Req>,
{
type Output = Result<S::Response, S::Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
loop {
match this.state.as_mut().project() {
StateProj::NotReady { svc, req } => {
ready!(svc.poll_ready(cx))?;
let f = svc.call(req.take().expect("already called"));
this.state.set(State::called(f));
}
StateProj::Called { fut } => {
let res = ready!(fut.poll(cx))?;
this.state.set(State::Done);
return Poll::Ready(Ok(res));
}
StateProj::Done => panic!("polled after complete"),
}
}
}
}

21
vendor/tower/src/util/optional/error.rs vendored Normal file
View File

@@ -0,0 +1,21 @@
use std::{error, fmt};
/// Error returned if the inner [`Service`] has not been set.
///
/// [`Service`]: crate::Service
#[derive(Debug)]
pub struct None(());
impl None {
pub(crate) fn new() -> None {
None(())
}
}
impl fmt::Display for None {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "None")
}
}
impl error::Error for None {}

View File

@@ -0,0 +1,39 @@
use super::error;
use pin_project_lite::pin_project;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
pin_project! {
/// Response future returned by [`Optional`].
///
/// [`Optional`]: crate::util::Optional
#[derive(Debug)]
pub struct ResponseFuture<T> {
#[pin]
inner: Option<T>,
}
}
impl<T> ResponseFuture<T> {
pub(crate) fn new(inner: Option<T>) -> ResponseFuture<T> {
ResponseFuture { inner }
}
}
impl<F, T, E> Future for ResponseFuture<F>
where
F: Future<Output = Result<T, E>>,
E: Into<crate::BoxError>,
{
type Output = Result<T, crate::BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.project().inner.as_pin_mut() {
Some(inner) => inner.poll(cx).map_err(Into::into),
None => Poll::Ready(Err(error::None::new().into())),
}
}
}

59
vendor/tower/src/util/optional/mod.rs vendored Normal file
View File

@@ -0,0 +1,59 @@
//! Contains [`Optional`] and related types and functions.
//!
//! See [`Optional`] documentation for more details.
/// Error types for [`Optional`].
pub mod error;
/// Future types for [`Optional`].
pub mod future;
use self::future::ResponseFuture;
use std::task::{Context, Poll};
use tower_service::Service;
/// Optionally forwards requests to an inner service.
///
/// If the inner service is [`None`], [`optional::None`] is returned as the response.
///
/// [`optional::None`]: crate::util::error::optional::None
#[derive(Debug)]
pub struct Optional<T> {
inner: Option<T>,
}
impl<T> Optional<T> {
/// Create a new [`Optional`].
pub const fn new<Request>(inner: Option<T>) -> Optional<T>
where
T: Service<Request>,
T::Error: Into<crate::BoxError>,
{
Optional { inner }
}
}
impl<T, Request> Service<Request> for Optional<T>
where
T: Service<Request>,
T::Error: Into<crate::BoxError>,
{
type Response = T::Response;
type Error = crate::BoxError;
type Future = ResponseFuture<T::Future>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match self.inner {
Some(ref mut inner) => match inner.poll_ready(cx) {
Poll::Ready(r) => Poll::Ready(r.map_err(Into::into)),
Poll::Pending => Poll::Pending,
},
// None services are always ready
None => Poll::Ready(Ok(())),
}
}
fn call(&mut self, request: Request) -> Self::Future {
let inner = self.inner.as_mut().map(|i| i.call(request));
ResponseFuture::new(inner)
}
}

102
vendor/tower/src/util/ready.rs vendored Normal file
View File

@@ -0,0 +1,102 @@
use std::{fmt, marker::PhantomData};
use std::{
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use tower_service::Service;
/// A [`Future`] that yields the service when it is ready to accept a request.
///
/// [`ReadyOneshot`] values are produced by [`ServiceExt::ready_oneshot`].
///
/// [`ServiceExt::ready_oneshot`]: crate::util::ServiceExt::ready_oneshot
pub struct ReadyOneshot<T, Request> {
inner: Option<T>,
_p: PhantomData<fn() -> Request>,
}
// Safety: This is safe because `Services`'s are always `Unpin`.
impl<T, Request> Unpin for ReadyOneshot<T, Request> {}
impl<T, Request> ReadyOneshot<T, Request>
where
T: Service<Request>,
{
#[allow(missing_docs)]
pub const fn new(service: T) -> Self {
Self {
inner: Some(service),
_p: PhantomData,
}
}
}
impl<T, Request> Future for ReadyOneshot<T, Request>
where
T: Service<Request>,
{
type Output = Result<T, T::Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
ready!(self
.inner
.as_mut()
.expect("poll after Poll::Ready")
.poll_ready(cx))?;
Poll::Ready(Ok(self.inner.take().expect("poll after Poll::Ready")))
}
}
impl<T, Request> fmt::Debug for ReadyOneshot<T, Request>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("ReadyOneshot")
.field("inner", &self.inner)
.finish()
}
}
/// A future that yields a mutable reference to the service when it is ready to accept a request.
///
/// [`Ready`] values are produced by [`ServiceExt::ready`].
///
/// [`ServiceExt::ready`]: crate::util::ServiceExt::ready
pub struct Ready<'a, T, Request>(ReadyOneshot<&'a mut T, Request>);
// Safety: This is safe for the same reason that the impl for ReadyOneshot is safe.
impl<T, Request> Unpin for Ready<'_, T, Request> {}
impl<'a, T, Request> Ready<'a, T, Request>
where
T: Service<Request>,
{
#[allow(missing_docs)]
pub fn new(service: &'a mut T) -> Self {
Self(ReadyOneshot::new(service))
}
}
impl<'a, T, Request> Future for Ready<'a, T, Request>
where
T: Service<Request>,
{
type Output = Result<&'a mut T, T::Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Pin::new(&mut self.0).poll(cx)
}
}
impl<T, Request> fmt::Debug for Ready<'_, T, Request>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("Ready").field(&self.0).finish()
}
}

187
vendor/tower/src/util/rng.rs vendored Normal file
View File

@@ -0,0 +1,187 @@
//! [PRNG] utilities for tower middleware.
//!
//! This module provides a generic [`Rng`] trait and a [`HasherRng`] that
//! implements the trait based on [`RandomState`] or any other [`Hasher`].
//!
//! These utilities replace tower's internal usage of `rand` with these smaller,
//! more lightweight methods. Most of the implementations are extracted from
//! their corresponding `rand` implementations.
//!
//! [PRNG]: https://en.wikipedia.org/wiki/Pseudorandom_number_generator
use std::{
collections::hash_map::RandomState,
hash::{BuildHasher, Hasher},
ops::Range,
};
/// A simple [PRNG] trait for use within tower middleware.
///
/// [PRNG]: https://en.wikipedia.org/wiki/Pseudorandom_number_generator
pub trait Rng {
/// Generate a random [`u64`].
fn next_u64(&mut self) -> u64;
/// Generate a random [`f64`] between `[0, 1)`.
fn next_f64(&mut self) -> f64 {
// Borrowed from:
// https://github.com/rust-random/rand/blob/master/src/distr/float.rs#L108
let float_size = std::mem::size_of::<f64>() as u32 * 8;
let precision = 52 + 1;
let scale = 1.0 / ((1u64 << precision) as f64);
let value = self.next_u64();
let value = value >> (float_size - precision);
scale * value as f64
}
/// Randomly pick a value within the range.
///
/// # Panic
///
/// - If `range.start >= range.end` this will panic in debug mode.
fn next_range(&mut self, range: Range<u64>) -> u64 {
debug_assert!(
range.start < range.end,
"The range start must be smaller than the end"
);
let start = range.start;
let end = range.end;
let range = end - start;
let n = self.next_u64();
(n % range) + start
}
}
impl<R: Rng + ?Sized> Rng for Box<R> {
fn next_u64(&mut self) -> u64 {
(**self).next_u64()
}
}
/// A [`Rng`] implementation that uses a [`Hasher`] to generate the random
/// values. The implementation uses an internal counter to pass to the hasher
/// for each iteration of [`Rng::next_u64`].
///
/// # Default
///
/// This hasher has a default type of [`RandomState`] which just uses the
/// libstd method of getting a random u64.
#[derive(Clone, Debug)]
pub struct HasherRng<H = RandomState> {
hasher: H,
counter: u64,
}
impl HasherRng {
/// Create a new default [`HasherRng`].
pub fn new() -> Self {
HasherRng::default()
}
}
impl Default for HasherRng {
fn default() -> Self {
HasherRng::with_hasher(RandomState::default())
}
}
impl<H> HasherRng<H> {
/// Create a new [`HasherRng`] with the provided hasher.
pub fn with_hasher(hasher: H) -> Self {
HasherRng { hasher, counter: 0 }
}
}
impl<H> Rng for HasherRng<H>
where
H: BuildHasher,
{
fn next_u64(&mut self) -> u64 {
let mut hasher = self.hasher.build_hasher();
hasher.write_u64(self.counter);
self.counter = self.counter.wrapping_add(1);
hasher.finish()
}
}
/// A sampler modified from the Rand implementation for use internally for the balance middleware.
///
/// It's an implementation of Floyd's combination algorithm with amount fixed at 2. This uses no allocated
/// memory and finishes in constant time (only 2 random calls).
///
/// ref: This was borrowed and modified from the following Rand implementation
/// https://github.com/rust-random/rand/blob/b73640705d6714509f8ceccc49e8df996fa19f51/src/seq/index.rs#L375-L411
#[cfg(feature = "balance")]
pub(crate) fn sample_floyd2<R: Rng>(rng: &mut R, length: u64) -> [u64; 2] {
debug_assert!(2 <= length);
let aidx = rng.next_range(0..length - 1);
let bidx = rng.next_range(0..length);
let aidx = if aidx == bidx { length - 1 } else { aidx };
[aidx, bidx]
}
#[cfg(test)]
mod tests {
use super::*;
use quickcheck::*;
quickcheck! {
fn next_f64(counter: u64) -> TestResult {
let mut rng = HasherRng {
counter,
..HasherRng::default()
};
let n = rng.next_f64();
TestResult::from_bool((0.0..1.0).contains(&n))
}
fn next_range(counter: u64, range: Range<u64>) -> TestResult {
if range.start >= range.end{
return TestResult::discard();
}
let mut rng = HasherRng {
counter,
..HasherRng::default()
};
let n = rng.next_range(range.clone());
TestResult::from_bool(n >= range.start && (n < range.end || range.start == range.end))
}
fn sample_floyd2(counter: u64, length: u64) -> TestResult {
if !(2..=256).contains(&length) {
return TestResult::discard();
}
let mut rng = HasherRng {
counter,
..HasherRng::default()
};
let [a, b] = super::sample_floyd2(&mut rng, length);
if a >= length || b >= length || a == b {
return TestResult::failed();
}
TestResult::passed()
}
}
#[test]
fn sample_inplace_boundaries() {
let mut r = HasherRng::default();
match super::sample_floyd2(&mut r, 2) {
[0, 1] | [1, 0] => (),
array => panic!("unexpected inplace boundaries: {:?}", array),
}
}
}

82
vendor/tower/src/util/service_fn.rs vendored Normal file
View File

@@ -0,0 +1,82 @@
use std::fmt;
use std::future::Future;
use std::task::{Context, Poll};
use tower_service::Service;
/// Returns a new [`ServiceFn`] with the given closure.
///
/// This lets you build a [`Service`] from an async function that returns a [`Result`].
///
/// # Example
///
/// ```
/// use tower::{service_fn, Service, ServiceExt, BoxError};
/// # struct Request;
/// # impl Request {
/// # fn new() -> Self { Self }
/// # }
/// # struct Response(&'static str);
/// # impl Response {
/// # fn new(body: &'static str) -> Self {
/// # Self(body)
/// # }
/// # fn into_body(self) -> &'static str { self.0 }
/// # }
///
/// # #[tokio::main]
/// # async fn main() -> Result<(), BoxError> {
/// async fn handle(request: Request) -> Result<Response, BoxError> {
/// let response = Response::new("Hello, World!");
/// Ok(response)
/// }
///
/// let mut service = service_fn(handle);
///
/// let response = service
/// .ready()
/// .await?
/// .call(Request::new())
/// .await?;
///
/// assert_eq!("Hello, World!", response.into_body());
/// #
/// # Ok(())
/// # }
/// ```
pub fn service_fn<T>(f: T) -> ServiceFn<T> {
ServiceFn { f }
}
/// A [`Service`] implemented by a closure.
///
/// See [`service_fn`] for more details.
#[derive(Copy, Clone)]
pub struct ServiceFn<T> {
f: T,
}
impl<T> fmt::Debug for ServiceFn<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ServiceFn")
.field("f", &format_args!("{}", std::any::type_name::<T>()))
.finish()
}
}
impl<T, F, Request, R, E> Service<Request> for ServiceFn<T>
where
T: FnMut(Request) -> F,
F: Future<Output = Result<R, E>>,
{
type Response = R;
type Error = E;
type Future = F;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), E>> {
Ok(()).into()
}
fn call(&mut self, req: Request) -> Self::Future {
(self.f)(req)
}
}

Some files were not shown because too many files have changed in this diff Show More