chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

125
vendor/tower/src/hedge/delay.rs vendored Normal file
View File

@@ -0,0 +1,125 @@
use pin_project_lite::pin_project;
use std::time::Duration;
use std::{
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use tower_service::Service;
use crate::util::Oneshot;
/// A policy which specifies how long each request should be delayed for.
pub trait Policy<Request> {
fn delay(&self, req: &Request) -> Duration;
}
/// A middleware which delays sending the request to the underlying service
/// for an amount of time specified by the policy.
#[derive(Debug)]
pub struct Delay<P, S> {
policy: P,
service: S,
}
pin_project! {
#[derive(Debug)]
pub struct ResponseFuture<Request, S>
where
S: Service<Request>,
{
service: Option<S>,
#[pin]
state: State<Request, Oneshot<S, Request>>,
}
}
pin_project! {
#[project = StateProj]
#[derive(Debug)]
enum State<Request, F> {
Delaying {
#[pin]
delay: tokio::time::Sleep,
req: Option<Request>,
},
Called {
#[pin]
fut: F,
},
}
}
impl<Request, F> State<Request, F> {
fn delaying(delay: tokio::time::Sleep, req: Option<Request>) -> Self {
Self::Delaying { delay, req }
}
fn called(fut: F) -> Self {
Self::Called { fut }
}
}
impl<P, S> Delay<P, S> {
pub const fn new<Request>(policy: P, service: S) -> Self
where
P: Policy<Request>,
S: Service<Request> + Clone,
S::Error: Into<crate::BoxError>,
{
Delay { policy, service }
}
}
impl<Request, P, S> Service<Request> for Delay<P, S>
where
P: Policy<Request>,
S: Service<Request> + Clone,
S::Error: Into<crate::BoxError>,
{
type Response = S::Response;
type Error = crate::BoxError;
type Future = ResponseFuture<Request, S>;
fn poll_ready(&mut self, _cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
// Calling self.service.poll_ready would reserve a slot for the delayed request,
// potentially well in advance of actually making it. Instead, signal readiness here and
// treat the service as a Oneshot in the future.
Poll::Ready(Ok(()))
}
fn call(&mut self, request: Request) -> Self::Future {
let delay = self.policy.delay(&request);
ResponseFuture {
service: Some(self.service.clone()),
state: State::delaying(tokio::time::sleep(delay), Some(request)),
}
}
}
impl<Request, S, T, E> Future for ResponseFuture<Request, S>
where
E: Into<crate::BoxError>,
S: Service<Request, Response = T, Error = E>,
{
type Output = Result<T, crate::BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project();
loop {
match this.state.as_mut().project() {
StateProj::Delaying { delay, req } => {
ready!(delay.poll(cx));
let req = req.take().expect("Missing request in delay");
let svc = this.service.take().expect("Missing service in delay");
let fut = Oneshot::new(svc, req);
this.state.set(State::called(fut));
}
StateProj::Called { fut } => {
return fut.poll(cx).map_err(Into::into);
}
};
}
}
}

88
vendor/tower/src/hedge/latency.rs vendored Normal file
View File

@@ -0,0 +1,88 @@
use pin_project_lite::pin_project;
use std::time::Duration;
use std::{
future::Future,
pin::Pin,
task::{ready, Context, Poll},
};
use tokio::time::Instant;
use tower_service::Service;
/// Record is the interface for accepting request latency measurements. When
/// a request completes, record is called with the elapsed duration between
/// when the service was called and when the future completed.
pub trait Record {
fn record(&mut self, latency: Duration);
}
/// Latency is a middleware that measures request latency and records it to the
/// provided Record instance.
#[derive(Clone, Debug)]
pub struct Latency<R, S> {
rec: R,
service: S,
}
pin_project! {
#[derive(Debug)]
pub struct ResponseFuture<R, F> {
start: Instant,
rec: R,
#[pin]
inner: F,
}
}
impl<S, R> Latency<R, S>
where
R: Record + Clone,
{
pub const fn new<Request>(rec: R, service: S) -> Self
where
S: Service<Request>,
S::Error: Into<crate::BoxError>,
{
Latency { rec, service }
}
}
impl<S, R, Request> Service<Request> for Latency<R, S>
where
S: Service<Request>,
S::Error: Into<crate::BoxError>,
R: Record + Clone,
{
type Response = S::Response;
type Error = crate::BoxError;
type Future = ResponseFuture<R, S::Future>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx).map_err(Into::into)
}
fn call(&mut self, request: Request) -> Self::Future {
ResponseFuture {
start: Instant::now(),
rec: self.rec.clone(),
inner: self.service.call(request),
}
}
}
impl<R, F, T, E> Future for ResponseFuture<R, F>
where
R: Record,
F: Future<Output = Result<T, E>>,
E: Into<crate::BoxError>,
{
type Output = Result<T, crate::BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
let rsp = ready!(this.inner.poll(cx)).map_err(Into::into)?;
let duration = Instant::now().saturating_duration_since(*this.start);
this.rec.record(duration);
Poll::Ready(Ok(rsp))
}
}

267
vendor/tower/src/hedge/mod.rs vendored Normal file
View File

@@ -0,0 +1,267 @@
//! Pre-emptively retry requests which have been outstanding for longer
//! than a given latency percentile.
#![warn(missing_debug_implementations, missing_docs, unreachable_pub)]
use crate::filter::AsyncFilter;
use futures_util::future::Either;
use pin_project_lite::pin_project;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use std::{
future,
pin::Pin,
task::{Context, Poll},
};
use tracing::error;
mod delay;
mod latency;
mod rotating_histogram;
mod select;
use delay::Delay;
use latency::Latency;
use rotating_histogram::RotatingHistogram;
use select::Select;
type Histo = Arc<Mutex<RotatingHistogram>>;
type Service<S, P> = select::Select<
SelectPolicy<P>,
Latency<Histo, S>,
Delay<DelayPolicy, AsyncFilter<Latency<Histo, S>, PolicyPredicate<P>>>,
>;
/// A middleware that pre-emptively retries requests which have been outstanding
/// for longer than a given latency percentile. If either of the original
/// future or the retry future completes, that value is used.
#[derive(Debug)]
pub struct Hedge<S, P>(Service<S, P>);
pin_project! {
/// The [`Future`] returned by the [`Hedge`] service.
///
/// [`Future`]: std::future::Future
#[derive(Debug)]
pub struct Future<S, Request>
where
S: tower_service::Service<Request>,
{
#[pin]
inner: S::Future,
}
}
/// A policy which describes which requests can be cloned and then whether those
/// requests should be retried.
pub trait Policy<Request> {
/// Called when the request is first received to determine if the request is retryable.
fn clone_request(&self, req: &Request) -> Option<Request>;
/// Called after the hedge timeout to determine if the hedge retry should be issued.
fn can_retry(&self, req: &Request) -> bool;
}
// NOTE: these are pub only because they appear inside a Future<F>
#[doc(hidden)]
#[derive(Clone, Debug)]
pub struct PolicyPredicate<P>(P);
#[doc(hidden)]
#[derive(Debug)]
pub struct DelayPolicy {
histo: Histo,
latency_percentile: f32,
}
#[doc(hidden)]
#[derive(Debug)]
pub struct SelectPolicy<P> {
policy: P,
histo: Histo,
min_data_points: u64,
}
impl<S, P> Hedge<S, P> {
/// Create a new hedge middleware.
pub fn new<Request>(
service: S,
policy: P,
min_data_points: u64,
latency_percentile: f32,
period: Duration,
) -> Hedge<S, P>
where
S: tower_service::Service<Request> + Clone,
S::Error: Into<crate::BoxError>,
P: Policy<Request> + Clone,
{
let histo = Arc::new(Mutex::new(RotatingHistogram::new(period)));
Self::new_with_histo(service, policy, min_data_points, latency_percentile, histo)
}
/// A hedge middleware with a prepopulated latency histogram. This is usedful
/// for integration tests.
pub fn new_with_mock_latencies<Request>(
service: S,
policy: P,
min_data_points: u64,
latency_percentile: f32,
period: Duration,
latencies_ms: &[u64],
) -> Hedge<S, P>
where
S: tower_service::Service<Request> + Clone,
S::Error: Into<crate::BoxError>,
P: Policy<Request> + Clone,
{
let histo = Arc::new(Mutex::new(RotatingHistogram::new(period)));
{
let mut locked = histo.lock().unwrap();
for latency in latencies_ms.iter() {
locked.read().record(*latency).unwrap();
}
}
Self::new_with_histo(service, policy, min_data_points, latency_percentile, histo)
}
fn new_with_histo<Request>(
service: S,
policy: P,
min_data_points: u64,
latency_percentile: f32,
histo: Histo,
) -> Hedge<S, P>
where
S: tower_service::Service<Request> + Clone,
S::Error: Into<crate::BoxError>,
P: Policy<Request> + Clone,
{
// Clone the underlying service and wrap both copies in a middleware that
// records the latencies in a rotating histogram.
let recorded_a = Latency::new(histo.clone(), service.clone());
let recorded_b = Latency::new(histo.clone(), service);
// Check policy to see if the hedge request should be issued.
let filtered = AsyncFilter::new(recorded_b, PolicyPredicate(policy.clone()));
// Delay the second request by a percentile of the recorded request latency
// histogram.
let delay_policy = DelayPolicy {
histo: histo.clone(),
latency_percentile,
};
let delayed = Delay::new(delay_policy, filtered);
// If the request is retryable, issue two requests -- the second one delayed
// by a latency percentile. Use the first result to complete.
let select_policy = SelectPolicy {
policy,
histo,
min_data_points,
};
Hedge(Select::new(select_policy, recorded_a, delayed))
}
}
impl<S, P, Request> tower_service::Service<Request> for Hedge<S, P>
where
S: tower_service::Service<Request> + Clone,
S::Error: Into<crate::BoxError>,
P: Policy<Request> + Clone,
{
type Response = S::Response;
type Error = crate::BoxError;
type Future = Future<Service<S, P>, Request>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.poll_ready(cx)
}
fn call(&mut self, request: Request) -> Self::Future {
Future {
inner: self.0.call(request),
}
}
}
impl<S, Request> std::future::Future for Future<S, Request>
where
S: tower_service::Service<Request>,
S::Error: Into<crate::BoxError>,
{
type Output = Result<S::Response, crate::BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.project().inner.poll(cx).map_err(Into::into)
}
}
// TODO: Remove when Duration::as_millis() becomes stable.
const NANOS_PER_MILLI: u32 = 1_000_000;
const MILLIS_PER_SEC: u64 = 1_000;
fn millis(duration: Duration) -> u64 {
// Round up.
let millis = (duration.subsec_nanos() + NANOS_PER_MILLI - 1) / NANOS_PER_MILLI;
duration
.as_secs()
.saturating_mul(MILLIS_PER_SEC)
.saturating_add(u64::from(millis))
}
impl latency::Record for Histo {
fn record(&mut self, latency: Duration) {
let mut locked = self.lock().unwrap();
locked.write().record(millis(latency)).unwrap_or_else(|e| {
error!("Failed to write to hedge histogram: {:?}", e);
})
}
}
impl<P, Request> crate::filter::AsyncPredicate<Request> for PolicyPredicate<P>
where
P: Policy<Request>,
{
type Future = Either<
future::Ready<Result<Request, crate::BoxError>>,
future::Pending<Result<Request, crate::BoxError>>,
>;
type Request = Request;
fn check(&mut self, request: Request) -> Self::Future {
if self.0.can_retry(&request) {
Either::Left(future::ready(Ok(request)))
} else {
// If the hedge retry should not be issued, we simply want to wait
// for the result of the original request. Therefore we don't want
// to return an error here. Instead, we use future::pending to ensure
// that the original request wins the select.
Either::Right(future::pending())
}
}
}
impl<Request> delay::Policy<Request> for DelayPolicy {
fn delay(&self, _req: &Request) -> Duration {
let mut locked = self.histo.lock().unwrap();
let millis = locked
.read()
.value_at_quantile(self.latency_percentile.into());
Duration::from_millis(millis)
}
}
impl<P, Request> select::Policy<Request> for SelectPolicy<P>
where
P: Policy<Request>,
{
fn clone_request(&self, req: &Request) -> Option<Request> {
self.policy.clone_request(req).filter(|_| {
let mut locked = self.histo.lock().unwrap();
// Do not attempt a retry if there are insufficiently many data
// points in the histogram.
locked.read().len() >= self.min_data_points
})
}
}

View File

@@ -0,0 +1,73 @@
use hdrhistogram::Histogram;
use std::time::Duration;
use tokio::time::Instant;
use tracing::trace;
/// This represents a "rotating" histogram which stores two histogram, one which
/// should be read and one which should be written to. Every period, the read
/// histogram is discarded and replaced by the write histogram. The idea here
/// is that the read histogram should always contain a full period (the previous
/// period) of write operations.
#[derive(Debug)]
pub struct RotatingHistogram {
read: Histogram<u64>,
write: Histogram<u64>,
last_rotation: Instant,
period: Duration,
}
impl RotatingHistogram {
pub fn new(period: Duration) -> RotatingHistogram {
RotatingHistogram {
// Use an auto-resizing histogram to avoid choosing
// a maximum latency bound for all users.
read: Histogram::<u64>::new(3).expect("Invalid histogram params"),
write: Histogram::<u64>::new(3).expect("Invalid histogram params"),
last_rotation: Instant::now(),
period,
}
}
pub fn read(&mut self) -> &mut Histogram<u64> {
self.maybe_rotate();
&mut self.read
}
pub fn write(&mut self) -> &mut Histogram<u64> {
self.maybe_rotate();
&mut self.write
}
fn maybe_rotate(&mut self) {
let delta = Instant::now().saturating_duration_since(self.last_rotation);
// TODO: replace with delta.duration_div when it becomes stable.
let rotations = (nanos(delta) / nanos(self.period)) as u32;
if rotations >= 2 {
trace!("Time since last rotation is {:?}. clearing!", delta);
self.clear();
} else if rotations == 1 {
trace!("Time since last rotation is {:?}. rotating!", delta);
self.rotate();
}
self.last_rotation += self.period * rotations;
}
fn rotate(&mut self) {
std::mem::swap(&mut self.read, &mut self.write);
trace!("Rotated {:?} points into read", self.read.len());
self.write.clear();
}
fn clear(&mut self) {
self.read.clear();
self.write.clear();
}
}
const NANOS_PER_SEC: u64 = 1_000_000_000;
fn nanos(duration: Duration) -> u64 {
duration
.as_secs()
.saturating_mul(NANOS_PER_SEC)
.saturating_add(u64::from(duration.subsec_nanos()))
}

105
vendor/tower/src/hedge/select.rs vendored Normal file
View File

@@ -0,0 +1,105 @@
use pin_project_lite::pin_project;
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use tower_service::Service;
/// A policy which decides which requests can be cloned and sent to the B
/// service.
pub trait Policy<Request> {
fn clone_request(&self, req: &Request) -> Option<Request>;
}
/// Select is a middleware which attempts to clone the request and sends the
/// original request to the A service and, if the request was able to be cloned,
/// the cloned request to the B service. Both resulting futures will be polled
/// and whichever future completes first will be used as the result.
#[derive(Debug)]
pub struct Select<P, A, B> {
policy: P,
a: A,
b: B,
}
pin_project! {
#[derive(Debug)]
pub struct ResponseFuture<AF, BF> {
#[pin]
a_fut: AF,
#[pin]
b_fut: Option<BF>,
}
}
impl<P, A, B> Select<P, A, B> {
pub const fn new<Request>(policy: P, a: A, b: B) -> Self
where
P: Policy<Request>,
A: Service<Request>,
A::Error: Into<crate::BoxError>,
B: Service<Request, Response = A::Response>,
B::Error: Into<crate::BoxError>,
{
Select { policy, a, b }
}
}
impl<P, A, B, Request> Service<Request> for Select<P, A, B>
where
P: Policy<Request>,
A: Service<Request>,
A::Error: Into<crate::BoxError>,
B: Service<Request, Response = A::Response>,
B::Error: Into<crate::BoxError>,
{
type Response = A::Response;
type Error = crate::BoxError;
type Future = ResponseFuture<A::Future, B::Future>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
match (self.a.poll_ready(cx), self.b.poll_ready(cx)) {
(Poll::Ready(Ok(())), Poll::Ready(Ok(()))) => Poll::Ready(Ok(())),
(Poll::Ready(Err(e)), _) => Poll::Ready(Err(e.into())),
(_, Poll::Ready(Err(e))) => Poll::Ready(Err(e.into())),
_ => Poll::Pending,
}
}
fn call(&mut self, request: Request) -> Self::Future {
let b_fut = if let Some(cloned_req) = self.policy.clone_request(&request) {
Some(self.b.call(cloned_req))
} else {
None
};
ResponseFuture {
a_fut: self.a.call(request),
b_fut,
}
}
}
impl<AF, BF, T, AE, BE> Future for ResponseFuture<AF, BF>
where
AF: Future<Output = Result<T, AE>>,
AE: Into<crate::BoxError>,
BF: Future<Output = Result<T, BE>>,
BE: Into<crate::BoxError>,
{
type Output = Result<T, crate::BoxError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if let Poll::Ready(r) = this.a_fut.poll(cx) {
return Poll::Ready(Ok(r.map_err(Into::into)?));
}
if let Some(b_fut) = this.b_fut.as_pin_mut() {
if let Poll::Ready(r) = b_fut.poll(cx) {
return Poll::Ready(Ok(r.map_err(Into::into)?));
}
}
Poll::Pending
}
}