chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

View File

@@ -0,0 +1 @@
{"files":{".cargo_vcs_info.json":"2035b550f0a2c1ef7c7dd32c17c85669d48698483fc2f2eab41bf7ef3501e7c7","Cargo.lock":"bb8730297052a27abfd9cdb5cecb5a2fdad74288efca3fe654872e96b1802a5e","Cargo.toml":"8dfd7690d74ebffa3464aee3ab1d30f2489a9ebceb26d45cdc87314162ec5b2c","Cargo.toml.orig":"53e8e6f93a941527f84d2513cc447413f542acc864c77f1cddfdfd682870dc3a","README.md":"beb1d5ce57c5311f246ad02e808e709f11bae316c36dbeac75bea24a46ef6d61","src/controller/future_hash_map.rs":"f0be749595a5ed1f9954b29745b439b8f9d824871b4e18a74835fb60204796c7","src/controller/mod.rs":"9802d8f71920d9b164d49117ce5a23ea25b8d2136bf1cbac0abc94385dafab77","src/controller/runner.rs":"f4461d97516693c595eedbd179387cb77833939f15b0ce1ec8f0b2cc963df414","src/events.rs":"b2e0dea5e4c53e6186284c008b859fcdaa0a16b3f9bf89493cb9fa6549350e27","src/finalizer.rs":"03eeacd7d6af0212ffcb9be461c1e4ccd279f840c3a95590ff0a17c194516af4","src/lib.rs":"76e2151a4f58ffa656789c0ba775741dc142a9489ca55a4b42401b9d9df1fc14","src/reflector/dispatcher.rs":"052d7b2f05213ca4d7eb89f321de261cc375e08362171aeeea40478b1dd2a85b","src/reflector/mod.rs":"f256f596890ef1688dae949bea39df64a5c445ac92600c0b853fee1a15949462","src/reflector/object_ref.rs":"4c5b39c5f47c45b1c1b7c505e86d838fada9e2b70cdc16af0fdd5e24628cc0a0","src/reflector/store.rs":"700636b748ee8ee7a237f7eedc2fec66b6c171064fc43be566124b857aa562c6","src/scheduler.rs":"1e3488dc81414988934dfc5a484e0d19d485fdbb6c4de467389aa79261da7cd2","src/utils/backoff_reset_timer.rs":"86454b0a814ab6ce07e8820b0a50fbe9e5485120bff2aed68766d8028c7d45c5","src/utils/delayed_init.rs":"169518cf53516e92040389083e6237f593e47fb1f84ee6d8a2a790b097fdd584","src/utils/event_decode.rs":"ee4a2c585639041c8234170fe5196889c6a2a8e7467da9e805e5977a9215b033","src/utils/event_modify.rs":"8fe1ac80d801fc82a898714e4204fdc651e373711568c4faa4590a0889543a57","src/utils/mod.rs":"60ea367a6f9d1c0ef5efbb67961e6fcdd5450c463f4ac0ce92fef1695083f815","src/utils/predicate.rs":"2f636cb4b4a5fe0603b8ca9bd813aa3b0a4fdc4dcab85fb0db4152a089a941f9","src/utils/reflect.rs":"98bbcb83b043f4a1f4b01f9dee61fa407a46efa75c62b17a1ee626fb492aff65","src/utils/stream_backoff.rs":"35181bb93f779a7570210bd86cf66848585595277b16c388e0a03072edc644e3","src/utils/watch_ext.rs":"29c12cb13446bee7d7454163e77131c4488e6da7064628e0f2446efa50f3d3f9","src/wait.rs":"f3e0401093304a5856c1ed8da072dd06019e52c852cd39ae31be3d4936e7639d","src/watcher.rs":"3223ac547d5783a7392f9bf68007fac8198e22543d7cc288886e77547f6a7c39"},"package":"88f34cfab9b4bd8633062e0e85edb81df23cb09f159f2e31c60b069ae826ffdc"}

View File

@@ -0,0 +1,6 @@
{
"git": {
"sha1": "c9b7b70f7fa0378ea1cd6ac697c1a0c0bb7b7dd3"
},
"path_in_vcs": "kube-runtime"
}

2250
vendor/kube-runtime/Cargo.lock generated vendored Normal file

File diff suppressed because it is too large Load Diff

183
vendor/kube-runtime/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,183 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.81.0"
name = "kube-runtime"
version = "0.99.0"
authors = [
"clux <sszynrae@gmail.com>",
"Natalie Klestrup Röijezon <nat@nullable.se>",
"kazk <kazk.dev@gmail.com>",
]
build = false
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "Kubernetes controller runtime"
readme = "README.md"
keywords = [
"kubernetes",
"runtime",
"reflector",
"watcher",
"controller",
]
categories = [
"web-programming::http-client",
"caching",
"network-programming",
]
license = "Apache-2.0"
repository = "https://github.com/kube-rs/kube"
resolver = "1"
[package.metadata.docs.rs]
features = [
"k8s-openapi/latest",
"unstable-runtime",
]
rustdoc-args = [
"--cfg",
"docsrs",
]
[lib]
name = "kube_runtime"
path = "src/lib.rs"
[dependencies.ahash]
version = "0.8"
[dependencies.async-broadcast]
version = "0.7.0"
[dependencies.async-stream]
version = "0.3.5"
[dependencies.async-trait]
version = "0.1.64"
[dependencies.backon]
version = "1.3"
[dependencies.educe]
version = "0.6.0"
features = [
"Clone",
"Debug",
"Hash",
"PartialEq",
]
default-features = false
[dependencies.futures]
version = "0.3.17"
features = ["async-await"]
default-features = false
[dependencies.hashbrown]
version = "0.15.0"
[dependencies.hostname]
version = "0.4"
[dependencies.json-patch]
version = "4"
[dependencies.k8s-openapi]
version = "0.24.0"
default-features = false
[dependencies.kube-client]
version = "=0.99.0"
features = [
"jsonpatch",
"client",
]
default-features = false
[dependencies.parking_lot]
version = "0.12.0"
[dependencies.pin-project]
version = "1.0.4"
[dependencies.serde]
version = "1.0.130"
[dependencies.serde_json]
version = "1.0.68"
[dependencies.thiserror]
version = "2.0.3"
[dependencies.tokio]
version = "1.14.0"
features = ["time"]
[dependencies.tokio-util]
version = "0.7.0"
features = ["time"]
[dependencies.tracing]
version = "0.1.36"
[dev-dependencies.k8s-openapi]
version = "0.24.0"
features = ["latest"]
default-features = false
[dev-dependencies.kube]
version = "<2.0.0, >=0.98.0"
features = [
"derive",
"client",
"runtime",
]
[dev-dependencies.rand]
version = "0.9.0"
[dev-dependencies.schemars]
version = "0.8.6"
[dev-dependencies.serde_json]
version = "1.0.68"
[dev-dependencies.serde_yaml]
version = "0.9.19"
[dev-dependencies.tokio]
version = "1.14.0"
features = [
"full",
"test-util",
]
[dev-dependencies.tracing-subscriber]
version = "0.3.17"
[features]
unstable-runtime = [
"unstable-runtime-subscribe",
"unstable-runtime-stream-control",
"unstable-runtime-reconcile-on",
]
unstable-runtime-reconcile-on = []
unstable-runtime-stream-control = []
unstable-runtime-subscribe = []
[lints.rust]
unsafe_code = "forbid"

177
vendor/kube-runtime/README.md vendored Normal file
View File

@@ -0,0 +1,177 @@
# kube-rs
[![Crates.io](https://img.shields.io/crates/v/kube.svg)](https://crates.io/crates/kube)
[![Rust 1.81](https://img.shields.io/badge/MSRV-1.81-dea584.svg)](https://github.com/rust-lang/rust/releases/tag/1.81.0)
[![Tested against Kubernetes v1.28 and above](https://img.shields.io/badge/MK8SV-v1.28-326ce5.svg)](https://kube.rs/kubernetes-version)
[![Best Practices](https://bestpractices.coreinfrastructure.org/projects/5413/badge)](https://bestpractices.coreinfrastructure.org/projects/5413)
[![Discord chat](https://img.shields.io/discord/500028886025895936.svg?logo=discord&style=plastic)](https://discord.gg/tokio)
A [Rust](https://rust-lang.org/) client for [Kubernetes](http://kubernetes.io) in the style of a more generic [client-go](https://github.com/kubernetes/client-go), a runtime abstraction inspired by [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime), and a derive macro for [CRDs](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/) inspired by [kubebuilder](https://book.kubebuilder.io/reference/generating-crd.html). Hosted by [CNCF](https://cncf.io/) as a [Sandbox Project](https://www.cncf.io/sandbox-projects/).
These crates build upon Kubernetes [apimachinery](https://github.com/kubernetes/apimachinery/blob/master/pkg/apis/meta/v1/types.go) + [api concepts](https://kubernetes.io/docs/reference/using-api/api-concepts/) to enable generic abstractions. These abstractions allow Rust reinterpretations of reflectors, controllers, and custom resource interfaces, so that you can write applications easily.
## Installation
Select a version of `kube` along with the generated [k8s-openapi](https://github.com/Arnavion/k8s-openapi) structs at your chosen [Kubernetes version](https://kube.rs/kubernetes-version/):
```toml
[dependencies]
kube = { version = "0.99.0", features = ["runtime", "derive"] }
k8s-openapi = { version = "0.24.0", features = ["latest"] }
```
See [features](https://kube.rs/features/) for a quick overview of default-enabled / opt-in functionality.
## Upgrading
See [kube.rs/upgrading](https://kube.rs/upgrading/).
Noteworthy changes are highlighted in [releases](https://github.com/kube-rs/kube/releases), and archived in the [changelog](https://kube.rs/changelog/).
## Usage
See the **[examples directory](https://github.com/kube-rs/kube/blob/main/examples)** for how to use any of these crates.
- **[kube API Docs](https://docs.rs/kube/)**
- **[kube.rs](https://kube.rs)**
Official examples:
- [version-rs](https://github.com/kube-rs/version-rs): lightweight deployment `reflector` using axum
- [controller-rs](https://github.com/kube-rs/controller-rs): `Controller` of a crd inside actix
For real world projects see [ADOPTERS](https://kube.rs/adopters/).
## Api
The [`Api`](https://docs.rs/kube/latest/kube/struct.Api.html) is what interacts with Kubernetes resources, and is generic over [`Resource`](https://docs.rs/kube/latest/kube/trait.Resource.html):
```rust
use k8s_openapi::api::core::v1::Pod;
let pods: Api<Pod> = Api::default_namespaced(client);
let pod = pods.get("blog").await?;
println!("Got pod: {pod:?}");
let patch = json!({"spec": {
"activeDeadlineSeconds": 5
}});
let pp = PatchParams::apply("kube");
let patched = pods.patch("blog", &pp, &Patch::Apply(patch)).await?;
assert_eq!(patched.spec.active_deadline_seconds, Some(5));
pods.delete("blog", &DeleteParams::default()).await?;
```
See the examples ending in `_api` examples for more detail.
## Custom Resource Definitions
Working with custom resources uses automatic code-generation via [proc_macros in kube-derive](https://docs.rs/kube/latest/kube/derive.CustomResource.html).
You need to add `#[derive(CustomResource, JsonSchema)]` and some `#[kube(attrs..)]` on a __spec__ struct:
```rust
#[derive(CustomResource, Debug, Serialize, Deserialize, Default, Clone, JsonSchema)]
#[kube(group = "kube.rs", version = "v1", kind = "Document", namespaced)]
pub struct DocumentSpec {
title: String,
content: String,
}
```
Then you can use the generated wrapper struct `Document` as a [`kube::Resource`](https://docs.rs/kube/*/kube/trait.Resource.html):
```rust
let docs: Api<Document> = Api::default_namespaced(client);
let d = Document::new("guide", DocumentSpec::default());
println!("doc: {:?}", d);
println!("crd: {:?}", serde_yaml::to_string(&Document::crd()));
```
There are a ton of kubebuilder-like instructions that you can annotate with here. See the [documentation](https://docs.rs/kube/latest/kube/derive.CustomResource.html) or the `crd_` prefixed [examples](https://github.com/kube-rs/kube/blob/main/examples) for more.
**NB:** `#[derive(CustomResource)]` requires the `derive` feature enabled on `kube`.
## Runtime
The `runtime` module exports the `kube_runtime` crate and contains higher level abstractions on top of the `Api` and `Resource` types so that you don't have to do all the `watch`/`resourceVersion`/storage book-keeping yourself.
### Watchers
A streaming interface (similar to informers) that presents [`watcher::Event`](https://docs.rs/kube/latest/kube/runtime/watcher/enum.Event.html)s and does automatic relists under the hood.
```rust
let api = Api::<Pod>::default_namespaced(client);
let stream = watcher(api, Config::default()).default_backoff().applied_objects();
```
This now gives a continual stream of events and you do not need to care about the watch having to restart, or connections dropping.
```rust
while let Some(event) = stream.try_next().await? {
println!("Applied: {}", event.name_any());
}
```
Note the base items from a `watcher` stream are an abstraction above the native `WatchEvent` to allow for store buffering. If you are following along to "see what changed", you can use utilities from [`WatchStreamExt`](https://docs.rs/kube/latest/kube/runtime/trait.WatchStreamExt.html), such as `applied_objects` to get a more conventional stream.
## Reflectors
A `reflector` is a `watcher` with `Store` on `K`. It acts on all the `Event<K>` exposed by `watcher` to ensure that the state in the `Store` is as accurate as possible.
```rust
let nodes: Api<Node> = Api::all(client);
let lp = Config::default().labels("kubernetes.io/arch=amd64");
let (reader, writer) = reflector::store();
let rf = reflector(writer, watcher(nodes, lp));
```
At this point you can listen to the `reflector` as if it was a `watcher`, but you can also query the `reader` at any point.
### Controllers
A `Controller` is a `reflector` along with an arbitrary number of watchers that schedule events internally to send events through a reconciler:
```rust
Controller::new(root_kind_api, Config::default())
.owns(child_kind_api, Config::default())
.run(reconcile, error_policy, context)
.for_each(|res| async move {
match res {
Ok(o) => info!("reconciled {:?}", o),
Err(e) => warn!("reconcile failed: {}", Report::from(e)),
}
})
.await;
```
Here `reconcile` and `error_policy` refer to functions you define. The first will be called when the root or child elements change, and the second when the `reconciler` returns an `Err`.
See the [controller guide](https://kube.rs/controllers/intro/) for how to write these.
## TLS
Uses [rustls](https://github.com/rustls/rustls) with `ring` provider (default) or `aws-lc-rs` provider (optional).
To switch [rustls providers](https://docs.rs/rustls/latest/rustls/crypto/struct.CryptoProvider.html), turn off `default-features` and enable the `aws-lc-rs` feature:
```toml
kube = { version = "0.99.0", default-features = false, features = ["client", "rustls-tls", "aws-lc-rs"] }
```
To switch to `openssl`, turn off `default-features`, and enable the `openssl-tls` feature:
```toml
kube = { version = "0.99.0", default-features = false, features = ["client", "openssl-tls"] }
```
This will pull in `openssl` and `hyper-openssl`. If `default-features` is left enabled, you will pull in two TLS stacks, and the default will remain as `rustls`.
## musl-libc
Kube will work with [distroless](https://github.com/kube-rs/controller-rs/blob/main/Dockerfile), [scratch](https://github.com/constellation-rs/constellation/blob/27dc89d0d0e34896fd37d638692e7dfe60a904fc/Dockerfile), and `alpine` (it's also possible to use alpine as a builder [with some caveats](https://github.com/kube-rs/kube/issues/331#issuecomment-715962188)).
## License
Apache 2.0 licensed. See LICENSE for details.

View File

@@ -0,0 +1,112 @@
use futures::{Future, FutureExt, Stream};
use std::{
collections::HashMap,
hash::Hash,
pin::Pin,
task::{Context, Poll},
};
/// Variant of [`tokio_stream::StreamMap`](https://docs.rs/tokio-stream/0.1.3/tokio_stream/struct.StreamMap.html)
/// that only runs [`Future`]s, and uses a [`HashMap`] as the backing store, giving (amortized) O(1) insertion
/// and membership checks.
///
/// Just like for `StreamMap`'s `S`, `F` must be [`Unpin`], since [`HashMap`] is free to move
/// entries as it pleases (for example: resizing the backing array).
///
/// NOTE: Contrary to `StreamMap`, `FutureHashMap` does *not* try to be fair. The polling order
/// is arbitrary, but generally stable while the future set is (although this should not be relied on!).
#[derive(Debug)]
pub struct FutureHashMap<K, F> {
futures: HashMap<K, F>,
}
impl<K, F> Default for FutureHashMap<K, F> {
fn default() -> Self {
Self {
futures: HashMap::new(),
}
}
}
impl<K, F> FutureHashMap<K, F>
where
K: Hash + Eq,
{
/// Inserts `future` into the key `key`, returning the old future if there was one
pub fn insert(&mut self, key: K, future: F) -> Option<F> {
self.futures.insert(key, future)
}
pub fn contains_key(&self, key: &K) -> bool {
self.futures.contains_key(key)
}
pub fn len(&self) -> usize {
self.futures.len()
}
}
impl<K, F> Stream for FutureHashMap<K, F>
where
K: Hash + Clone + Eq,
F: Future + Unpin,
Self: Unpin,
{
type Item = F::Output;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let key_and_msg =
self.as_mut()
.futures
.iter_mut()
.find_map(|(key, future)| match future.poll_unpin(cx) {
Poll::Ready(msg) => Some((key.clone(), msg)),
Poll::Pending => None,
});
//dbg!((key_and_msg.is_some(), &self.futures.len()));
match key_and_msg {
Some((key, msg)) => {
self.as_mut().futures.remove(&key);
Poll::Ready(Some(msg))
}
None if self.futures.is_empty() => Poll::Ready(None),
None => Poll::Pending,
}
}
}
#[cfg(test)]
mod tests {
use std::{future, task::Poll};
use super::FutureHashMap;
use futures::{channel::mpsc, poll, StreamExt};
#[tokio::test]
async fn fhm_should_forward_all_values_and_shut_down() {
let mut fhm = FutureHashMap::default();
let count = 100;
for i in 0..count {
fhm.insert(i, future::ready(i));
}
let mut values = fhm.collect::<Vec<u16>>().await;
values.sort_unstable();
assert_eq!(values, (0..count).collect::<Vec<u16>>());
}
#[tokio::test]
async fn fhm_should_stay_alive_until_all_sources_finish() {
let mut fhm = FutureHashMap::default();
let (tx0, mut rx0) = mpsc::unbounded::<()>();
let (tx1, mut rx1) = mpsc::unbounded::<()>();
fhm.insert(0, rx0.next());
fhm.insert(1, rx1.next());
assert_eq!(poll!(fhm.next()), Poll::Pending);
drop(tx0);
assert_eq!(poll!(fhm.next()), Poll::Ready(Some(None)));
assert_eq!(poll!(fhm.next()), Poll::Pending);
drop(tx1);
assert_eq!(poll!(fhm.next()), Poll::Ready(Some(None)));
assert_eq!(poll!(fhm.next()), Poll::Ready(None));
}
}

1804
vendor/kube-runtime/src/controller/mod.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,481 @@
use super::future_hash_map::FutureHashMap;
use crate::scheduler::{ScheduleRequest, Scheduler};
use futures::{FutureExt, Stream, StreamExt};
use pin_project::pin_project;
use std::{
convert::Infallible,
future::{self, Future},
hash::Hash,
pin::Pin,
task::{Context, Poll},
};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum Error<ReadyErr> {
#[error("readiness gate failed to become ready")]
Readiness(#[source] ReadyErr),
}
/// Pulls items from a [`Scheduler`], and runs an action for each item in parallel,
/// while making sure to not process [equal](`Eq`) items multiple times at once.
///
/// If an item is to be emitted from the [`Scheduler`] while an equal item is
/// already being processed then it will be held pending until the current item
/// is finished.
#[pin_project]
pub struct Runner<T, R, F, MkF, Ready = future::Ready<Result<(), Infallible>>> {
#[pin]
scheduler: Scheduler<T, R>,
run_msg: MkF,
slots: FutureHashMap<T, F>,
#[pin]
ready_to_execute_after: futures::future::Fuse<Ready>,
is_ready_to_execute: bool,
stopped: bool,
max_concurrent_executions: u16,
}
impl<T, R, F, MkF> Runner<T, R, F, MkF>
where
F: Future + Unpin,
MkF: FnMut(&T) -> F,
{
/// Creates a new [`Runner`]. [`max_concurrent_executions`] can be used to
/// limit the number of items are run concurrently. It can be set to 0 to
/// allow for unbounded concurrency.
pub fn new(scheduler: Scheduler<T, R>, max_concurrent_executions: u16, run_msg: MkF) -> Self {
Self {
scheduler,
run_msg,
slots: FutureHashMap::default(),
ready_to_execute_after: future::ready(Ok(())).fuse(),
is_ready_to_execute: false,
stopped: false,
max_concurrent_executions,
}
}
/// Wait for `ready_to_execute_after` to complete before starting to run any scheduled tasks.
///
/// `scheduler` will still be polled in the meantime.
pub fn delay_tasks_until<Ready, ReadyErr>(
self,
ready_to_execute_after: Ready,
) -> Runner<T, R, F, MkF, Ready>
where
Ready: Future<Output = Result<(), ReadyErr>>,
{
Runner {
scheduler: self.scheduler,
run_msg: self.run_msg,
slots: self.slots,
ready_to_execute_after: ready_to_execute_after.fuse(),
is_ready_to_execute: false,
stopped: false,
max_concurrent_executions: self.max_concurrent_executions,
}
}
}
#[allow(clippy::match_wildcard_for_single_variants)]
impl<T, R, F, MkF, Ready, ReadyErr> Stream for Runner<T, R, F, MkF, Ready>
where
T: Eq + Hash + Clone + Unpin,
R: Stream<Item = ScheduleRequest<T>>,
F: Future + Unpin,
MkF: FnMut(&T) -> F,
Ready: Future<Output = Result<(), ReadyErr>>,
{
type Item = Result<F::Output, Error<ReadyErr>>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
if *this.stopped {
return Poll::Ready(None);
}
let slots = this.slots;
let scheduler = &mut this.scheduler;
let has_active_slots = match slots.poll_next_unpin(cx) {
Poll::Ready(Some(result)) => return Poll::Ready(Some(Ok(result))),
Poll::Ready(None) => false,
Poll::Pending => true,
};
match this.ready_to_execute_after.poll(cx) {
Poll::Ready(Ok(())) => *this.is_ready_to_execute = true,
Poll::Ready(Err(err)) => {
*this.stopped = true;
return Poll::Ready(Some(Err(Error::Readiness(err))));
}
Poll::Pending => {}
}
loop {
// If we are at our limit or not ready to start executing, then there's
// no point in trying to get something from the scheduler, so just put
// all expired messages emitted from the queue into pending.
if (*this.max_concurrent_executions > 0
&& slots.len() >= *this.max_concurrent_executions as usize)
|| !*this.is_ready_to_execute
{
match scheduler.as_mut().hold().poll_next_unpin(cx) {
Poll::Pending | Poll::Ready(None) => break Poll::Pending,
// The above future never returns Poll::Ready(Some(_)).
_ => unreachable!(),
}
}
// Try to take a new message that isn't already being processed
// leave the already-processing ones in the queue, so that we can take them once
// we're free again.
let next_msg_poll = scheduler
.as_mut()
.hold_unless(|msg| !slots.contains_key(msg))
.poll_next_unpin(cx);
match next_msg_poll {
Poll::Ready(Some(msg)) => {
let msg_fut = (this.run_msg)(&msg);
assert!(
slots.insert(msg, msg_fut).is_none(),
"Runner tried to replace a running future.. please report this as a kube-rs bug!"
);
cx.waker().wake_by_ref();
}
Poll::Ready(None) => {
break if has_active_slots {
// We're done listening for new messages, but still have some that
// haven't finished quite yet
Poll::Pending
} else {
Poll::Ready(None)
};
}
Poll::Pending => break Poll::Pending,
}
}
}
}
#[cfg(test)]
mod tests {
use super::{Error, Runner};
use crate::{
scheduler::{scheduler, ScheduleRequest},
utils::delayed_init::{self, DelayedInit},
};
use futures::{
channel::{mpsc, oneshot},
future, poll, stream, Future, SinkExt, StreamExt, TryStreamExt,
};
use std::{
cell::RefCell,
collections::HashMap,
pin::Pin,
sync::{Arc, Mutex},
task::{Context, Poll},
time::Duration,
};
use tokio::{
runtime::Handle,
task::yield_now,
time::{advance, pause, sleep, timeout, Instant},
};
#[tokio::test]
async fn runner_should_never_run_two_instances_at_once() {
pause();
let rc = RefCell::new(());
let mut count = 0;
let (mut sched_tx, sched_rx) = mpsc::unbounded();
let mut runner = Box::pin(
// The debounce period needs to zero because a debounce period > 0
// will lead to the second request to be discarded.
Runner::new(scheduler(sched_rx), 0, |()| {
count += 1;
// Panic if this ref is already held, to simulate some unsafe action..
let mutex_ref = rc.borrow_mut();
Box::pin(async move {
sleep(Duration::from_secs(1)).await;
drop(mutex_ref);
})
})
.for_each(|_| async {}),
);
sched_tx
.send(ScheduleRequest {
message: (),
run_at: Instant::now(),
})
.await
.unwrap();
assert!(poll!(runner.as_mut()).is_pending());
sched_tx
.send(ScheduleRequest {
message: (),
run_at: Instant::now(),
})
.await
.unwrap();
future::join(
async {
tokio::time::sleep(Duration::from_secs(5)).await;
drop(sched_tx);
},
runner,
)
.await;
// Validate that we actually ran both requests
assert_eq!(count, 2);
}
// Test MUST be single-threaded to be consistent, since it concerns a relatively messy
// interplay between multiple tasks
#[tokio::test(flavor = "current_thread")]
async fn runner_should_wake_when_scheduling_messages() {
// pause();
let (mut sched_tx, sched_rx) = mpsc::unbounded();
let (result_tx, result_rx) = oneshot::channel();
let mut runner = Runner::new(scheduler(sched_rx), 0, |msg: &u8| std::future::ready(*msg));
// Start a background task that starts listening /before/ we enqueue the message
// We can't just use Stream::poll_next(), since that bypasses the waker system
Handle::current().spawn(async move { result_tx.send(runner.next().await).unwrap() });
// Ensure that the background task actually gets to initiate properly, and starts polling the runner
yield_now().await;
sched_tx
.send(ScheduleRequest {
message: 8,
run_at: Instant::now(),
})
.await
.unwrap();
// Eventually the background task should finish up and report the message received,
// a timeout here *should* mean that the background task isn't getting awoken properly
// when the new message is ready.
assert_eq!(
timeout(Duration::from_secs(1), result_rx)
.await
.unwrap()
.unwrap()
.transpose()
.unwrap(),
Some(8)
);
}
#[tokio::test]
async fn runner_should_wait_for_readiness() {
let is_ready = Mutex::new(false);
let (delayed_init, ready) = DelayedInit::<()>::new();
let mut runner = Box::pin(
Runner::new(
scheduler(
stream::iter([ScheduleRequest {
message: 1u8,
run_at: Instant::now(),
}])
.chain(stream::pending()),
),
0,
|msg| {
assert!(*is_ready.lock().unwrap());
std::future::ready(*msg)
},
)
.delay_tasks_until(ready.get()),
);
assert!(poll!(runner.next()).is_pending());
*is_ready.lock().unwrap() = true;
delayed_init.init(());
assert_eq!(runner.next().await.transpose().unwrap(), Some(1));
}
#[tokio::test]
async fn runner_should_dedupe_while_waiting_for_readiness() {
let is_ready = Mutex::new(false);
let (delayed_init, ready) = DelayedInit::<()>::new();
let mut runner = Box::pin(
Runner::new(
scheduler(
stream::iter([
ScheduleRequest {
message: 'a',
run_at: Instant::now(),
},
ScheduleRequest {
message: 'b',
run_at: Instant::now(),
},
ScheduleRequest {
message: 'a',
run_at: Instant::now(),
},
])
.chain(stream::pending()),
),
0,
|msg| {
assert!(*is_ready.lock().unwrap());
std::future::ready(*msg)
},
)
.delay_tasks_until(ready.get()),
);
assert!(poll!(runner.next()).is_pending());
*is_ready.lock().unwrap() = true;
delayed_init.init(());
let mut message_counts = HashMap::new();
assert!(timeout(
Duration::from_secs(1),
runner.try_for_each(|msg| {
*message_counts.entry(msg).or_default() += 1;
async { Ok(()) }
})
)
.await
.is_err());
assert_eq!(message_counts, HashMap::from([('a', 1), ('b', 1)]));
}
#[tokio::test]
async fn runner_should_report_readiness_errors() {
let (delayed_init, ready) = DelayedInit::<()>::new();
let mut runner = Box::pin(
Runner::new(
scheduler(
stream::iter([ScheduleRequest {
message: (),
run_at: Instant::now(),
}])
.chain(stream::pending()),
),
0,
|()| {
panic!("run_msg should never be invoked if readiness gate fails");
// It's "useless", but it helps to direct rustc to the correct types
#[allow(unreachable_code)]
std::future::ready(())
},
)
.delay_tasks_until(ready.get()),
);
assert!(poll!(runner.next()).is_pending());
drop(delayed_init);
assert!(matches!(
runner.try_collect::<Vec<_>>().await.unwrap_err(),
Error::Readiness(delayed_init::InitDropped)
));
}
// A Future that is Ready after the specified duration from its initialization.
struct DurationalFuture {
start: Instant,
ready_after: Duration,
}
impl DurationalFuture {
fn new(expires_in: Duration) -> Self {
let start = Instant::now();
DurationalFuture {
start,
ready_after: expires_in,
}
}
}
impl Future for DurationalFuture {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let now = Instant::now();
if now.duration_since(self.start) > self.ready_after {
Poll::Ready(())
} else {
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
#[tokio::test]
async fn runner_should_respect_max_concurrent_executions() {
pause();
let count = Arc::new(Mutex::new(0));
let (mut sched_tx, sched_rx) = mpsc::unbounded();
let mut runner = Box::pin(
Runner::new(scheduler(sched_rx), 2, |_| {
let mut num = count.lock().unwrap();
*num += 1;
DurationalFuture::new(Duration::from_secs(2))
})
.for_each(|_| async {}),
);
sched_tx
.send(ScheduleRequest {
message: 1,
run_at: Instant::now(),
})
.await
.unwrap();
assert!(poll!(runner.as_mut()).is_pending());
sched_tx
.send(ScheduleRequest {
message: 2,
run_at: Instant::now(),
})
.await
.unwrap();
assert!(poll!(runner.as_mut()).is_pending());
sched_tx
.send(ScheduleRequest {
message: 3,
run_at: Instant::now(),
})
.await
.unwrap();
assert!(poll!(runner.as_mut()).is_pending());
// Assert that we only ran two out of the three requests
assert_eq!(*count.lock().unwrap(), 2);
advance(Duration::from_secs(3)).await;
assert!(poll!(runner.as_mut()).is_pending());
// Assert that we run the third request when we have the capacity to
assert_eq!(*count.lock().unwrap(), 3);
advance(Duration::from_secs(3)).await;
assert!(poll!(runner.as_mut()).is_pending());
// Send the third message again and check it's ran
sched_tx
.send(ScheduleRequest {
message: 3,
run_at: Instant::now(),
})
.await
.unwrap();
advance(Duration::from_secs(3)).await;
assert!(poll!(runner.as_mut()).is_pending());
assert_eq!(*count.lock().unwrap(), 4);
let (mut sched_tx, sched_rx) = mpsc::unbounded();
let mut runner = Box::pin(
Runner::new(scheduler(sched_rx), 1, |_| {
DurationalFuture::new(Duration::from_secs(2))
})
.for_each(|_| async {}),
);
sched_tx
.send(ScheduleRequest {
message: 1,
run_at: Instant::now(),
})
.await
.unwrap();
assert!(poll!(runner.as_mut()).is_pending());
// Drop the sender to test that we stop the runner when the requests
// stream finishes.
drop(sched_tx);
assert_eq!(poll!(runner.as_mut()), Poll::Pending);
}
}

531
vendor/kube-runtime/src/events.rs vendored Normal file
View File

@@ -0,0 +1,531 @@
//! Publishes events for objects for kubernetes >= 1.19
use std::{
collections::HashMap,
hash::{Hash, Hasher},
sync::Arc,
};
use k8s_openapi::{
api::{
core::v1::ObjectReference,
events::v1::{Event as K8sEvent, EventSeries},
},
apimachinery::pkg::apis::meta::v1::{MicroTime, ObjectMeta},
chrono::{Duration, Utc},
};
use kube_client::{
api::{Api, Patch, PatchParams, PostParams},
Client, ResourceExt,
};
use tokio::sync::RwLock;
const CACHE_TTL: Duration = Duration::minutes(6);
/// Minimal event type for publishing through [`Recorder::publish`].
///
/// All string fields must be human readable.
pub struct Event {
/// The event severity.
///
/// Shows up in `kubectl describe` as `Type`.
pub type_: EventType,
/// The short reason explaining why the `action` was taken.
///
/// This must be at most 128 characters, generally in `PascalCase`. Shows up in `kubectl describe` as `Reason`.
pub reason: String,
/// A optional description of the status of the `action`.
///
/// This must be at most 1kB in size. Shows up in `kubectl describe` as `Message`.
pub note: Option<String>,
/// The action that was taken (either successfully or unsuccessfully) against main object
///
/// This must be at most 128 characters. It does not currently show up in `kubectl describe`.
/// A common convention is a short identifier of the action that caused the outcome described in `reason`.
/// Usually denoted in `PascalCase`.
pub action: String,
/// Optional secondary object related to the main object
///
/// Some events are emitted for actions that affect multiple objects.
/// `secondary` can be populated to capture this detail.
///
/// For example: the event concerns a `Deployment` and it affects the current `ReplicaSet` underneath it.
/// You would therefore populate `events` using the object reference of the `ReplicaSet`.
///
/// Set `secondary` to `None`, instead, if the event affects only the object whose reference
/// you passed to [`Recorder::new`].
///
/// # Naming note
///
/// `secondary` is mapped to `related` in
/// [`Events API`](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#event-v1-events-k8s-io).
///
/// [`Recorder::new`]: crate::events::Recorder::new
pub secondary: Option<ObjectReference>,
}
/// The event severity or type.
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
pub enum EventType {
/// An event took place - nothing to worry about.
Normal,
/// Something is not working as expected - it might be worth to have a look.
Warning,
}
/// [`ObjectReference`] with Hash and Eq implementations
///
/// [`ObjectReference`]: k8s_openapi::api::core::v1::ObjectReference
#[derive(Clone, Debug, PartialEq)]
pub struct Reference(ObjectReference);
impl Eq for Reference {}
impl Hash for Reference {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0.api_version.hash(state);
self.0.kind.hash(state);
self.0.name.hash(state);
self.0.namespace.hash(state);
self.0.uid.hash(state);
}
}
/// Cache key for event deduplication
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
struct EventKey {
pub event_type: EventType,
pub action: String,
pub reason: String,
pub reporting_controller: String,
pub reporting_instance: Option<String>,
pub regarding: Reference,
pub related: Option<Reference>,
}
/// Information about the reporting controller.
///
/// ```
/// use kube::runtime::events::Reporter;
///
/// let reporter = Reporter {
/// controller: "my-awesome-controller".into(),
/// instance: std::env::var("CONTROLLER_POD_NAME").ok(),
/// };
/// ```
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct Reporter {
/// The name of the reporting controller that is publishing the event.
///
/// This is likely your deployment.metadata.name.
pub controller: String,
/// The id of the controller publishing the event. Likely your pod name.
///
/// Useful when running more than one replica on your controller and you need to disambiguate
/// where events came from.
///
/// The name of the controller pod can be retrieved using Kubernetes' API or
/// it can be injected as an environment variable using
///
/// ```yaml
/// env:
/// - name: CONTROLLER_POD_NAME
/// valueFrom:
/// fieldRef:
/// fieldPath: metadata.name
/// ```
///
/// in the manifest of your controller.
///
/// Note: If `instance` is not provided, the hostname is used. If the hostname is also
/// unavailable, `reporting_instance` defaults to `reporting_controller` in the `Event`.
pub instance: Option<String>,
}
// simple conversions for when instance == controller
impl From<String> for Reporter {
fn from(es: String) -> Self {
Self {
controller: es,
instance: None,
}
}
}
impl From<&str> for Reporter {
fn from(es: &str) -> Self {
let instance = hostname::get().ok().and_then(|h| h.into_string().ok());
Self {
controller: es.into(),
instance,
}
}
}
/// A publisher abstraction to emit Kubernetes' events.
///
/// All events emitted by an `Recorder` are attached to the [`ObjectReference`]
/// specified when building the recorder using [`Recorder::new`].
///
/// ```
/// use kube::runtime::events::{Reporter, Recorder, Event, EventType};
/// use k8s_openapi::api::core::v1::ObjectReference;
///
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let reporter = Reporter {
/// controller: "my-awesome-controller".into(),
/// instance: std::env::var("CONTROLLER_POD_NAME").ok(),
/// };
///
/// let recorder = Recorder::new(client, reporter);
///
/// // references can be made manually using `ObjectMeta` and `ApiResource`/`Resource` info
/// let reference = ObjectReference {
/// // [...]
/// ..Default::default()
/// };
/// // or for k8s-openapi / kube-derive types, use Resource::object_ref:
/// // let reference = myobject.object_ref();
/// recorder
/// .publish(
/// &Event {
/// action: "Scheduling".into(),
/// reason: "Pulling".into(),
/// note: Some("Pulling image `nginx`".into()),
/// type_: EventType::Normal,
/// secondary: None,
/// },
/// &reference,
/// ).await?;
/// # Ok(())
/// # }
/// ```
///
/// Events attached to an object will be shown in the `Events` section of the output of
/// of `kubectl describe` for that object.
///
/// ## RBAC
///
/// Note that usage of the event recorder minimally requires the following RBAC rules:
///
/// ```yaml
/// - apiGroups: ["events.k8s.io"]
/// resources: ["events"]
/// verbs: ["create", "patch"]
/// ```
#[derive(Clone)]
pub struct Recorder {
client: Client,
reporter: Reporter,
cache: Arc<RwLock<HashMap<EventKey, K8sEvent>>>,
}
impl Recorder {
/// Create a new recorder that can publish events for one specific object
///
/// This is intended to be created at the start of your controller's reconcile fn.
///
/// Cluster scoped objects will publish events in the "default" namespace.
#[must_use]
pub fn new(client: Client, reporter: Reporter) -> Self {
let cache = Arc::default();
Self {
client,
reporter,
cache,
}
}
/// Builds unique event key based on reportingController, reportingInstance, regarding, reason
/// and note
fn get_event_key(&self, ev: &Event, regarding: &ObjectReference) -> EventKey {
EventKey {
event_type: ev.type_,
action: ev.action.clone(),
reason: ev.reason.clone(),
reporting_controller: self.reporter.controller.clone(),
reporting_instance: self.reporter.instance.clone(),
regarding: Reference(regarding.clone()),
related: ev.secondary.clone().map(Reference),
}
}
// See https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.22/#event-v1-events-k8s-io
// for more detail on the fields
// and what's expected: https://kubernetes.io/docs/reference/using-api/deprecation-guide/#event-v125
fn generate_event(&self, ev: &Event, reference: &ObjectReference) -> K8sEvent {
let now = Utc::now();
K8sEvent {
action: Some(ev.action.clone()),
reason: Some(ev.reason.clone()),
deprecated_count: None,
deprecated_first_timestamp: None,
deprecated_last_timestamp: None,
deprecated_source: None,
event_time: Some(MicroTime(now)),
regarding: Some(reference.clone()),
note: ev.note.clone(),
metadata: ObjectMeta {
namespace: reference.namespace.clone(),
name: Some(format!(
"{}.{:x}",
reference.name.as_ref().unwrap_or(&self.reporter.controller),
now.timestamp_nanos_opt().unwrap_or_else(|| now.timestamp())
)),
..Default::default()
},
reporting_controller: Some(self.reporter.controller.clone()),
reporting_instance: Some(
self.reporter
.instance
.clone()
.unwrap_or_else(|| self.reporter.controller.clone()),
),
series: None,
type_: match ev.type_ {
EventType::Normal => Some("Normal".into()),
EventType::Warning => Some("Warning".into()),
},
related: ev.secondary.clone(),
}
}
/// Publish a new Kubernetes' event.
///
/// # Access control
///
/// The event object is created in the same namespace of the [`ObjectReference`].
/// Make sure that your controller has `create` permissions in the required namespaces
/// for the `event` resource in the API group `events.k8s.io`.
///
/// # Errors
///
/// Returns an [`Error`](`kube_client::Error`) if the event is rejected by Kubernetes.
pub async fn publish(&self, ev: &Event, reference: &ObjectReference) -> Result<(), kube_client::Error> {
let now = Utc::now();
// gc past events older than now + CACHE_TTL
self.cache.write().await.retain(|_, v| {
if let Some(series) = v.series.as_ref() {
series.last_observed_time.0 + CACHE_TTL > now
} else if let Some(event_time) = v.event_time.as_ref() {
event_time.0 + CACHE_TTL > now
} else {
true
}
});
let key = self.get_event_key(ev, reference);
let event = match self.cache.read().await.get(&key) {
Some(e) => {
let count = if let Some(s) = &e.series { s.count + 1 } else { 2 };
let series = EventSeries {
count,
last_observed_time: MicroTime(now),
};
let mut event = e.clone();
event.series = Some(series);
event
}
None => self.generate_event(ev, reference),
};
let events = Api::namespaced(
self.client.clone(),
reference.namespace.as_ref().unwrap_or(&"default".to_string()),
);
if event.series.is_some() {
events
.patch(&event.name_any(), &PatchParams::default(), &Patch::Merge(&event))
.await?;
} else {
events.create(&PostParams::default(), &event).await?;
}
{
let mut cache = self.cache.write().await;
cache.insert(key, event);
}
Ok(())
}
}
#[cfg(test)]
mod test {
use super::{Event, EventKey, EventType, Recorder, Reference, Reporter};
use k8s_openapi::{
api::{
core::v1::{ComponentStatus, Service},
events::v1::Event as K8sEvent,
},
apimachinery::pkg::apis::meta::v1::MicroTime,
chrono::{Duration, Utc},
};
use kube::{Api, Client, Resource};
#[tokio::test]
#[ignore = "needs cluster (creates an event for the default kubernetes service)"]
async fn event_recorder_attaches_events() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::try_default().await?;
let svcs: Api<Service> = Api::namespaced(client.clone(), "default");
let s = svcs.get("kubernetes").await?; // always a kubernetes service in default
let recorder = Recorder::new(client.clone(), "kube".into());
recorder
.publish(
&Event {
type_: EventType::Normal,
reason: "VeryCoolService".into(),
note: Some("Sending kubernetes to detention".into()),
action: "Test event - plz ignore".into(),
secondary: None,
},
&s.object_ref(&()),
)
.await?;
let events: Api<K8sEvent> = Api::namespaced(client, "default");
let event_list = events.list(&Default::default()).await?;
let found_event = event_list
.into_iter()
.find(|e| std::matches!(e.reason.as_deref(), Some("VeryCoolService")))
.unwrap();
assert_eq!(found_event.note.unwrap(), "Sending kubernetes to detention");
recorder
.publish(
&Event {
type_: EventType::Normal,
reason: "VeryCoolService".into(),
note: Some("Sending kubernetes to detention twice".into()),
action: "Test event - plz ignore".into(),
secondary: None,
},
&s.object_ref(&()),
)
.await?;
let event_list = events.list(&Default::default()).await?;
let found_event = event_list
.into_iter()
.find(|e| std::matches!(e.reason.as_deref(), Some("VeryCoolService")))
.unwrap();
assert!(found_event.series.is_some());
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (creates an event for the default kubernetes service)"]
async fn event_recorder_attaches_events_without_namespace() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::try_default().await?;
let component_status_api: Api<ComponentStatus> = Api::all(client.clone());
let s = component_status_api.get("scheduler").await?;
let recorder = Recorder::new(client.clone(), "kube".into());
recorder
.publish(
&Event {
type_: EventType::Normal,
reason: "VeryCoolServiceNoNamespace".into(),
note: Some("Sending kubernetes to detention without namespace".into()),
action: "Test event - plz ignore".into(),
secondary: None,
},
&s.object_ref(&()),
)
.await?;
let events: Api<K8sEvent> = Api::namespaced(client, "default");
let event_list = events.list(&Default::default()).await?;
let found_event = event_list
.into_iter()
.find(|e| std::matches!(e.reason.as_deref(), Some("VeryCoolServiceNoNamespace")))
.unwrap();
assert_eq!(
found_event.note.unwrap(),
"Sending kubernetes to detention without namespace"
);
recorder
.publish(
&Event {
type_: EventType::Normal,
reason: "VeryCoolServiceNoNamespace".into(),
note: Some("Sending kubernetes to detention without namespace twice".into()),
action: "Test event - plz ignore".into(),
secondary: None,
},
&s.object_ref(&()),
)
.await?;
let event_list = events.list(&Default::default()).await?;
let found_event = event_list
.into_iter()
.find(|e| std::matches!(e.reason.as_deref(), Some("VeryCoolServiceNoNamespace")))
.unwrap();
assert!(found_event.series.is_some());
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (creates an event for the default kubernetes service)"]
async fn event_recorder_cache_retain() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::try_default().await?;
let svcs: Api<Service> = Api::namespaced(client.clone(), "default");
let s = svcs.get("kubernetes").await?; // always a kubernetes service in default
let reference = s.object_ref(&());
let reporter: Reporter = "kube".into();
let ev = Event {
type_: EventType::Normal,
reason: "TestCacheTtl".into(),
note: Some("Sending kubernetes to detention".into()),
action: "Test event - plz ignore".into(),
secondary: None,
};
let key = EventKey {
event_type: ev.type_,
action: ev.action.clone(),
reason: ev.reason.clone(),
reporting_controller: reporter.controller.clone(),
regarding: Reference(reference.clone()),
reporting_instance: None,
related: None,
};
let reporter = Reporter {
controller: "kube".into(),
instance: None,
};
let recorder = Recorder::new(client.clone(), reporter);
recorder.publish(&ev, &s.object_ref(&())).await?;
let now = Utc::now();
let past = now - Duration::minutes(10);
recorder.cache.write().await.entry(key).and_modify(|e| {
e.event_time = Some(MicroTime(past));
});
recorder.publish(&ev, &s.object_ref(&())).await?;
let events: Api<K8sEvent> = Api::namespaced(client, "default");
let event_list = events.list(&Default::default()).await?;
let found_event = event_list
.into_iter()
.find(|e| std::matches!(e.reason.as_deref(), Some("TestCacheTtl")))
.unwrap();
assert_eq!(found_event.note.unwrap(), "Sending kubernetes to detention");
assert!(found_event.series.is_none());
Ok(())
}
}

233
vendor/kube-runtime/src/finalizer.rs vendored Normal file
View File

@@ -0,0 +1,233 @@
//! Finalizer helper for [`Controller`](crate::Controller) reconcilers
use crate::controller::Action;
use futures::{TryFuture, TryFutureExt};
use json_patch::{jsonptr::PointerBuf, AddOperation, PatchOperation, RemoveOperation, TestOperation};
use kube_client::{
api::{Patch, PatchParams},
Api, Resource, ResourceExt,
};
use serde::{de::DeserializeOwned, Serialize};
use std::{error::Error as StdError, fmt::Debug, str::FromStr, sync::Arc};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum Error<ReconcileErr>
where
ReconcileErr: StdError + 'static,
{
#[error("failed to apply object: {0}")]
ApplyFailed(#[source] ReconcileErr),
#[error("failed to clean up object: {0}")]
CleanupFailed(#[source] ReconcileErr),
#[error("failed to add finalizer: {0}")]
AddFinalizer(#[source] kube_client::Error),
#[error("failed to remove finalizer: {0}")]
RemoveFinalizer(#[source] kube_client::Error),
#[error("object has no name")]
UnnamedObject,
#[error("invalid finalizer")]
InvalidFinalizer,
}
struct FinalizerState {
finalizer_index: Option<usize>,
is_deleting: bool,
}
impl FinalizerState {
fn for_object<K: Resource>(obj: &K, finalizer_name: &str) -> Self {
Self {
finalizer_index: obj
.finalizers()
.iter()
.enumerate()
.find(|(_, fin)| *fin == finalizer_name)
.map(|(i, _)| i),
is_deleting: obj.meta().deletion_timestamp.is_some(),
}
}
}
/// Reconcile an object in a way that requires cleanup before an object can be deleted.
///
/// It does this by managing a [`ObjectMeta::finalizers`] entry,
/// which prevents the object from being deleted before the cleanup is done.
///
/// In typical usage, if you use `finalizer` then it should be the only top-level "action"
/// in your [`applier`](crate::applier)/[`Controller`](crate::Controller)'s `reconcile` function.
///
/// # Expected Flow
///
/// 1. User creates object
/// 2. Reconciler sees object
/// 3. `finalizer` adds `finalizer_name` to [`ObjectMeta::finalizers`]
/// 4. Reconciler sees updated object
/// 5. `finalizer` runs [`Event::Apply`]
/// 6. User updates object
/// 7. Reconciler sees updated object
/// 8. `finalizer` runs [`Event::Apply`]
/// 9. User deletes object
/// 10. Reconciler sees deleting object
/// 11. `finalizer` runs [`Event::Cleanup`]
/// 12. `finalizer` removes `finalizer_name` from [`ObjectMeta::finalizers`]
/// 13. Kubernetes sees that all [`ObjectMeta::finalizers`] are gone and finally deletes the object
///
/// # Guarantees
///
/// If [`Event::Apply`] is ever started then [`Event::Cleanup`] must succeed before the Kubernetes object deletion completes.
///
/// # Assumptions
///
/// `finalizer_name` must be unique among the controllers interacting with the object
///
/// [`Event::Apply`] and [`Event::Cleanup`] must both be idempotent, and tolerate being executed several times (even if previously cancelled).
///
/// [`Event::Cleanup`] must tolerate [`Event::Apply`] never having ran at all, or never having succeeded. Keep in mind that
/// even infallible `.await`s are cancellation points.
///
/// # Caveats
///
/// Object deletes will get stuck while the controller is not running, or if `cleanup` fails for some reason.
///
/// `reconcile` should take the object that the [`Event`] contains, rather than trying to reuse `obj`, since it may have been updated.
///
/// # Errors
///
/// [`Event::Apply`] and [`Event::Cleanup`] are both fallible, their errors are passed through as [`Error::ApplyFailed`]
/// and [`Error::CleanupFailed`], respectively.
///
/// In addition, adding and removing the finalizer itself may fail. In particular, this may be because of
/// network errors, lacking permissions, or because another `finalizer` was updated in the meantime on the same object.
///
/// [`ObjectMeta::finalizers`]: kube_client::api::ObjectMeta#structfield.finalizers
pub async fn finalizer<K, ReconcileFut>(
api: &Api<K>,
finalizer_name: &str,
obj: Arc<K>,
reconcile: impl FnOnce(Event<K>) -> ReconcileFut,
) -> Result<Action, Error<ReconcileFut::Error>>
where
K: Resource + Clone + DeserializeOwned + Serialize + Debug,
ReconcileFut: TryFuture<Ok = Action>,
ReconcileFut::Error: StdError + 'static,
{
match FinalizerState::for_object(&*obj, finalizer_name) {
FinalizerState {
finalizer_index: Some(_),
is_deleting: false,
} => reconcile(Event::Apply(obj))
.into_future()
.await
.map_err(Error::ApplyFailed),
FinalizerState {
finalizer_index: Some(finalizer_i),
is_deleting: true,
} => {
// Cleanup reconciliation must succeed before it's safe to remove the finalizer
let name = obj.meta().name.clone().ok_or(Error::UnnamedObject)?;
let action = reconcile(Event::Cleanup(obj))
.into_future()
.await
// Short-circuit, so that we keep the finalizer if cleanup fails
.map_err(Error::CleanupFailed)?;
// Cleanup was successful, remove the finalizer so that deletion can continue
let finalizer_path = format!("/metadata/finalizers/{finalizer_i}");
api.patch::<K>(
&name,
&PatchParams::default(),
&Patch::Json(json_patch::Patch(vec![
// All finalizers run concurrently and we use an integer index
// `Test` ensures that we fail instead of deleting someone else's finalizer
// (in which case a new `Cleanup` event will be sent)
PatchOperation::Test(TestOperation {
path: PointerBuf::from_str(finalizer_path.as_str())
.map_err(|_err| Error::InvalidFinalizer)?,
value: finalizer_name.into(),
}),
PatchOperation::Remove(RemoveOperation {
path: PointerBuf::from_str(finalizer_path.as_str())
.map_err(|_err| Error::InvalidFinalizer)?,
}),
])),
)
.await
.map_err(Error::RemoveFinalizer)?;
Ok(action)
}
FinalizerState {
finalizer_index: None,
is_deleting: false,
} => {
// Finalizer must be added before it's safe to run an `Apply` reconciliation
let patch = json_patch::Patch(if obj.finalizers().is_empty() {
vec![
PatchOperation::Test(TestOperation {
path: PointerBuf::from_str("/metadata/finalizers")
.map_err(|_err| Error::InvalidFinalizer)?,
value: serde_json::Value::Null,
}),
PatchOperation::Add(AddOperation {
path: PointerBuf::from_str("/metadata/finalizers")
.map_err(|_err| Error::InvalidFinalizer)?,
value: vec![finalizer_name].into(),
}),
]
} else {
vec![
// Kubernetes doesn't automatically deduplicate finalizers (see
// https://github.com/kube-rs/kube/issues/964#issuecomment-1197311254),
// so we need to fail and retry if anyone else has added the finalizer in the meantime
PatchOperation::Test(TestOperation {
path: PointerBuf::from_str("/metadata/finalizers")
.map_err(|_err| Error::InvalidFinalizer)?,
value: obj.finalizers().into(),
}),
PatchOperation::Add(AddOperation {
path: PointerBuf::from_str("/metadata/finalizers/-")
.map_err(|_err| Error::InvalidFinalizer)?,
value: finalizer_name.into(),
}),
]
});
api.patch::<K>(
obj.meta().name.as_deref().ok_or(Error::UnnamedObject)?,
&PatchParams::default(),
&Patch::Json(patch),
)
.await
.map_err(Error::AddFinalizer)?;
// No point applying here, since the patch will cause a new reconciliation
Ok(Action::await_change())
}
FinalizerState {
finalizer_index: None,
is_deleting: true,
} => {
// Our work here is done
Ok(Action::await_change())
}
}
}
/// A representation of an action that should be taken by a reconciler.
pub enum Event<K> {
/// The reconciler should ensure that the actual state matches the state desired in the object.
///
/// This must be idempotent, since it may be recalled if, for example (this list is non-exhaustive):
///
/// - The controller is restarted
/// - The object is updated
/// - The reconciliation fails
/// - The grinch attacks
Apply(Arc<K>),
/// The object is being deleted, and the reconciler should remove all resources that it owns.
///
/// This must be idempotent, since it may be recalled if, for example (this list is non-exhaustive):
///
/// - The controller is restarted while the deletion is in progress
/// - The reconciliation fails
/// - Another finalizer was removed in the meantime
/// - The grinch's heart grows a size or two
Cleanup(Arc<K>),
}

41
vendor/kube-runtime/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,41 @@
//! Common components for building Kubernetes operators
//!
//! This crate contains the core building blocks to allow users to build
//! controllers/operators/watchers that need to synchronize/reconcile kubernetes
//! state.
//!
//! Newcomers are recommended to start with the [`Controller`] builder, which gives an
//! opinionated starting point that should be appropriate for simple operators, but all
//! components are designed to be usable á la carte if your operator doesn't quite fit that mold.
#![deny(clippy::all)]
#![deny(clippy::pedantic)]
// Triggered by many derive macros (kube-derive, educe)
#![allow(clippy::default_trait_access)]
#![allow(clippy::type_repetition_in_bounds)]
// Triggered by educe derives on enums
#![allow(clippy::used_underscore_binding)]
// Triggered by Tokio macros
#![allow(clippy::semicolon_if_nothing_returned)]
// Triggered by nightly clippy on idiomatic code
#![allow(clippy::let_underscore_untyped)]
pub mod controller;
pub mod events;
pub mod finalizer;
pub mod reflector;
pub mod scheduler;
pub mod utils;
pub mod wait;
pub mod watcher;
pub use controller::{applier, Config, Controller};
pub use finalizer::finalizer;
pub use reflector::reflector;
pub use scheduler::scheduler;
pub use utils::WatchStreamExt;
pub use watcher::{metadata_watcher, watcher};
pub use utils::{predicates, Predicate};
pub use wait::conditions;

View File

@@ -0,0 +1,428 @@
use core::{
pin::Pin,
task::{Context, Poll},
};
use std::{fmt::Debug, sync::Arc};
use educe::Educe;
use futures::Stream;
use pin_project::pin_project;
use std::task::ready;
use crate::reflector::{ObjectRef, Store};
use async_broadcast::{InactiveReceiver, Receiver, Sender};
use super::Lookup;
#[derive(Educe)]
#[educe(Debug(bound("K: Debug, K::DynamicType: Debug")), Clone)]
// A helper type that holds a broadcast transmitter and a broadcast receiver,
// used to fan-out events from a root stream to multiple listeners.
pub(crate) struct Dispatcher<K>
where
K: Lookup + Clone + 'static,
K::DynamicType: Eq + std::hash::Hash + Clone,
{
dispatch_tx: Sender<ObjectRef<K>>,
// An inactive reader that prevents the channel from closing until the
// writer is dropped.
_dispatch_rx: InactiveReceiver<ObjectRef<K>>,
}
impl<K> Dispatcher<K>
where
K: Lookup + Clone + 'static,
K::DynamicType: Eq + std::hash::Hash + Clone,
{
/// Creates and returns a new self that wraps a broadcast sender and an
/// inactive broadcast receiver
///
/// A buffer size is required to create the underlying broadcast channel.
/// Messages will be buffered until all active readers have received a copy
/// of the message. When the channel is full, senders will apply
/// backpressure by waiting for space to free up.
//
// N.B messages are eagerly broadcasted, meaning no active receivers are
// required for a message to be broadcasted.
pub(crate) fn new(buf_size: usize) -> Dispatcher<K> {
// Create a broadcast (tx, rx) pair
let (mut dispatch_tx, dispatch_rx) = async_broadcast::broadcast(buf_size);
// The tx half will not wait for any receivers to be active before
// broadcasting events. If no receivers are active, events will be
// buffered.
dispatch_tx.set_await_active(false);
Self {
dispatch_tx,
_dispatch_rx: dispatch_rx.deactivate(),
}
}
// Calls broadcast on the channel. Will return when the channel has enough
// space to send an event.
pub(crate) async fn broadcast(&mut self, obj_ref: ObjectRef<K>) {
let _ = self.dispatch_tx.broadcast_direct(obj_ref).await;
}
// Creates a `ReflectHandle` by creating a receiver from the tx half.
// N.B: the new receiver will be fast-forwarded to the _latest_ event.
// The receiver won't have access to any events that are currently waiting
// to be acked by listeners.
pub(crate) fn subscribe(&self, reader: Store<K>) -> ReflectHandle<K> {
ReflectHandle::new(reader, self.dispatch_tx.new_receiver())
}
}
/// A handle to a shared stream reader
///
/// [`ReflectHandle`]s are created by calling [`subscribe()`] on a [`Writer`],
/// or by calling `clone()` on an already existing [`ReflectHandle`]. Each
/// shared stream reader should be polled independently and driven to readiness
/// to avoid deadlocks. When the [`Writer`]'s buffer is filled, backpressure
/// will be applied on the root stream side.
///
/// When the root stream is dropped, or it ends, all [`ReflectHandle`]s
/// subscribed to the stream will also terminate after all events yielded by
/// the root stream have been observed. This means [`ReflectHandle`] streams
/// can still be polled after the root stream has been dropped.
///
/// [`Writer`]: crate::reflector::Writer
#[pin_project]
pub struct ReflectHandle<K>
where
K: Lookup + Clone + 'static,
K::DynamicType: Eq + std::hash::Hash + Clone,
{
#[pin]
rx: Receiver<ObjectRef<K>>,
reader: Store<K>,
}
impl<K> Clone for ReflectHandle<K>
where
K: Lookup + Clone + 'static,
K::DynamicType: Eq + std::hash::Hash + Clone,
{
fn clone(&self) -> Self {
ReflectHandle::new(self.reader.clone(), self.rx.clone())
}
}
impl<K> ReflectHandle<K>
where
K: Lookup + Clone,
K::DynamicType: Eq + std::hash::Hash + Clone,
{
pub(super) fn new(reader: Store<K>, rx: Receiver<ObjectRef<K>>) -> ReflectHandle<K> {
Self { rx, reader }
}
#[must_use]
pub fn reader(&self) -> Store<K> {
self.reader.clone()
}
}
impl<K> Stream for ReflectHandle<K>
where
K: Lookup + Clone,
K::DynamicType: Eq + std::hash::Hash + Clone + Default,
{
type Item = Arc<K>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
match ready!(this.rx.as_mut().poll_next(cx)) {
Some(obj_ref) => this
.reader
.get(&obj_ref)
.map_or(Poll::Pending, |obj| Poll::Ready(Some(obj))),
None => Poll::Ready(None),
}
}
}
#[cfg(feature = "unstable-runtime-subscribe")]
#[cfg(test)]
pub(crate) mod test {
use crate::{
watcher::{Error, Event},
WatchStreamExt,
};
use std::{pin::pin, sync::Arc, task::Poll};
use crate::reflector;
use futures::{poll, stream, StreamExt};
use k8s_openapi::api::core::v1::Pod;
fn testpod(name: &str) -> Pod {
let mut pod = Pod::default();
pod.metadata.name = Some(name.to_string());
pod
}
#[tokio::test]
async fn events_are_passed_through() {
let foo = testpod("foo");
let bar = testpod("bar");
let st = stream::iter([
Ok(Event::Apply(foo.clone())),
Err(Error::NoResourceVersion),
Ok(Event::Init),
Ok(Event::InitApply(foo)),
Ok(Event::InitApply(bar)),
Ok(Event::InitDone),
]);
let (reader, writer) = reflector::store_shared(10);
let mut reflect = pin!(st.reflect_shared(writer));
// Prior to any polls, we should have an empty store.
assert_eq!(reader.len(), 0);
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::Apply(_))))
));
// Make progress and assert all events are seen
assert_eq!(reader.len(), 1);
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Err(Error::NoResourceVersion)))
));
assert_eq!(reader.len(), 1);
let restarted = poll!(reflect.next());
assert!(matches!(restarted, Poll::Ready(Some(Ok(Event::Init)))));
assert_eq!(reader.len(), 1);
let restarted = poll!(reflect.next());
assert!(matches!(restarted, Poll::Ready(Some(Ok(Event::InitApply(_))))));
assert_eq!(reader.len(), 1);
let restarted = poll!(reflect.next());
assert!(matches!(restarted, Poll::Ready(Some(Ok(Event::InitApply(_))))));
assert_eq!(reader.len(), 1);
let restarted = poll!(reflect.next());
assert!(matches!(restarted, Poll::Ready(Some(Ok(Event::InitDone)))));
assert_eq!(reader.len(), 2);
assert!(matches!(poll!(reflect.next()), Poll::Ready(None)));
assert_eq!(reader.len(), 2);
}
#[tokio::test]
async fn readers_yield_touched_objects() {
// Readers should yield touched objects they receive from Stream events.
//
// NOTE: a Delete(_) event will be ignored if the item does not exist in
// the cache. Same with a Restarted(vec![delete_item])
let foo = testpod("foo");
let bar = testpod("bar");
let st = stream::iter([
Ok(Event::Delete(foo.clone())),
Ok(Event::Apply(foo.clone())),
Err(Error::NoResourceVersion),
Ok(Event::Init),
Ok(Event::InitApply(foo.clone())),
Ok(Event::InitApply(bar.clone())),
Ok(Event::InitDone),
]);
let foo = Arc::new(foo);
let _bar = Arc::new(bar);
let (_, writer) = reflector::store_shared(10);
let mut subscriber = pin!(writer.subscribe().unwrap());
let mut reflect = pin!(st.reflect_shared(writer));
// Deleted events should be skipped by subscriber.
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::Delete(_))))
));
assert_eq!(poll!(subscriber.next()), Poll::Pending);
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::Apply(_))))
));
assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone())));
// Errors are not propagated to subscribers.
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Err(Error::NoResourceVersion)))
));
assert!(matches!(poll!(subscriber.next()), Poll::Pending));
// Restart event will yield all objects in the list
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::Init)))
));
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::InitApply(_))))
));
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::InitApply(_))))
));
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::InitDone)))
));
// these don't come back in order atm:
assert!(matches!(poll!(subscriber.next()), Poll::Ready(Some(_))));
assert!(matches!(poll!(subscriber.next()), Poll::Ready(Some(_))));
// When main channel is closed, it is propagated to subscribers
assert!(matches!(poll!(reflect.next()), Poll::Ready(None)));
assert_eq!(poll!(subscriber.next()), Poll::Ready(None));
}
#[tokio::test]
async fn readers_yield_when_tx_drops() {
// Once the main stream is dropped, readers should continue to make
// progress and read values that have been sent on the channel.
let foo = testpod("foo");
let bar = testpod("bar");
let st = stream::iter([
Ok(Event::Apply(foo.clone())),
Ok(Event::Init),
Ok(Event::InitApply(foo.clone())),
Ok(Event::InitApply(bar.clone())),
Ok(Event::InitDone),
]);
let foo = Arc::new(foo);
let _bar = Arc::new(bar);
let (_, writer) = reflector::store_shared(10);
let mut subscriber = pin!(writer.subscribe().unwrap());
let mut reflect = Box::pin(st.reflect_shared(writer));
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::Apply(_))))
));
assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone())));
// Restart event will yield all objects in the list. Broadcast values
// without polling and then drop.
//
// First, subscribers should be pending.
assert_eq!(poll!(subscriber.next()), Poll::Pending);
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::Init)))
));
assert_eq!(poll!(subscriber.next()), Poll::Pending);
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::InitApply(_))))
));
assert_eq!(poll!(subscriber.next()), Poll::Pending);
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::InitApply(_))))
));
assert_eq!(poll!(subscriber.next()), Poll::Pending);
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::InitDone)))
));
drop(reflect);
// we will get foo and bar here, but we dont have a guaranteed ordering on page events
assert!(matches!(poll!(subscriber.next()), Poll::Ready(Some(_))));
assert!(matches!(poll!(subscriber.next()), Poll::Ready(Some(_))));
assert_eq!(poll!(subscriber.next()), Poll::Ready(None));
}
#[tokio::test]
async fn reflect_applies_backpressure() {
// When the channel is full, we should observe backpressure applied.
//
// This will be manifested by receiving Poll::Pending on the reflector
// stream while the reader stream is not polled. Once we unblock the
// buffer, the reflector will make progress.
let foo = testpod("foo");
let bar = testpod("bar");
let st = stream::iter([
//TODO: include a ready event here to avoid dealing with Init?
Ok(Event::Apply(foo.clone())),
Ok(Event::Apply(bar.clone())),
Ok(Event::Apply(foo.clone())),
]);
let foo = Arc::new(foo);
let bar = Arc::new(bar);
let (_, writer) = reflector::store_shared(1);
let mut subscriber = pin!(writer.subscribe().unwrap());
let mut subscriber_slow = pin!(writer.subscribe().unwrap());
let mut reflect = pin!(st.reflect_shared(writer));
assert_eq!(poll!(subscriber.next()), Poll::Pending);
assert_eq!(poll!(subscriber_slow.next()), Poll::Pending);
// Poll first subscriber, but not the second.
//
// The buffer can hold one object value, so even if we have a slow subscriber,
// we will still get an event from the root.
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::Apply(_))))
));
assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone())));
// One subscriber is not reading, so we need to apply backpressure until
// channel has capacity.
//
// At this point, the buffer is full. Polling again will trigger the
// backpressure logic.
assert!(matches!(poll!(reflect.next()), Poll::Pending));
// Our "fast" subscriber will also have nothing else to poll until the
// slower subscriber advances its pointer in the buffer.
assert_eq!(poll!(subscriber.next()), Poll::Pending);
// Advance slow reader
assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(Some(foo.clone())));
// We now have room for only one more item. In total, the previous event
// had two. We repeat the same pattern.
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::Apply(_))))
));
assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(bar.clone())));
assert!(matches!(poll!(reflect.next()), Poll::Pending));
assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(Some(bar.clone())));
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::Apply(_))))
));
// Poll again to drain the queue.
assert!(matches!(poll!(reflect.next()), Poll::Ready(None)));
assert_eq!(poll!(subscriber.next()), Poll::Ready(Some(foo.clone())));
assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(Some(foo.clone())));
assert_eq!(poll!(subscriber.next()), Poll::Ready(None));
assert_eq!(poll!(subscriber_slow.next()), Poll::Ready(None));
}
// TODO (matei): tests around cloning subscribers once a watch stream has already
// been established. This will depend on the interfaces & impl so are left
// out for now.
}

295
vendor/kube-runtime/src/reflector/mod.rs vendored Normal file
View File

@@ -0,0 +1,295 @@
//! Caches objects in memory
mod dispatcher;
mod object_ref;
pub mod store;
pub use self::{
dispatcher::ReflectHandle,
object_ref::{Extra as ObjectRefExtra, Lookup, ObjectRef},
};
use crate::watcher;
use async_stream::stream;
use futures::{Stream, StreamExt};
use std::hash::Hash;
#[cfg(feature = "unstable-runtime-subscribe")] pub use store::store_shared;
pub use store::{store, Store};
/// Cache objects from a [`watcher()`] stream into a local [`Store`]
///
/// Observes the raw `Stream` of [`watcher::Event`] objects, and modifies the cache.
/// It passes the raw [`watcher()`] stream through unmodified.
///
/// ## Usage
/// Create a [`Store`] through e.g. [`store::store()`]. The `writer` part is not-clonable,
/// and must be moved into the reflector. The `reader` part is the [`Store`] interface
/// that you can send to other parts of your program as state.
///
/// The cache contains the last-seen state of objects,
/// which may lag slightly behind the actual state.
///
/// ## Example
///
/// Infinite watch of [`Node`](k8s_openapi::api::core::v1::Node) resources with a certain label.
///
/// The `reader` part being passed around to a webserver is omitted.
/// For examples see [version-rs](https://github.com/kube-rs/version-rs) for integration with [axum](https://github.com/tokio-rs/axum),
/// or [controller-rs](https://github.com/kube-rs/controller-rs) for the similar controller integration with [actix-web](https://actix.rs/).
///
/// ```no_run
/// use std::future::ready;
/// use k8s_openapi::api::core::v1::Node;
/// use kube::runtime::{reflector, watcher, WatchStreamExt, watcher::Config};
/// use futures::StreamExt;
/// # use kube::api::Api;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
///
/// let nodes: Api<Node> = Api::all(client);
/// let node_filter = Config::default().labels("kubernetes.io/arch=amd64");
/// let (reader, writer) = reflector::store();
///
/// // Create the infinite reflector stream
/// let rf = reflector(writer, watcher(nodes, node_filter));
///
/// // !!! pass reader to your webserver/manager as state !!!
///
/// // Poll the stream (needed to keep the store up-to-date)
/// let infinite_watch = rf.applied_objects().for_each(|o| { ready(()) });
/// infinite_watch.await;
/// # Ok(())
/// # }
/// ```
///
///
/// ## Memory Usage
///
/// A reflector often constitutes one of the biggest components of a controller's memory use.
/// Given a ~2000 pods cluster, a reflector saving everything (including injected sidecars, managed fields)
/// can quickly consume a couple of hundred megabytes or more, depending on how much of this you are storing.
///
/// While generally acceptable, there are techniques you can leverage to reduce the memory usage
/// depending on your use case.
///
/// 1. Reflect a [`PartialObjectMeta<K>`](kube_client::core::PartialObjectMeta) stream rather than a stream of `K`
///
/// You can send in a [`metadata_watcher()`](crate::watcher::metadata_watcher()) for a type rather than a [`watcher()`],
/// and this can drop your memory usage by more than a factor of two,
/// depending on the size of `K`. 60% reduction seen for `Pod`. Usage is otherwise identical.
///
/// 2. Use `modify` the raw [`watcher::Event`] object stream to clear unneeded properties
///
/// For instance, managed fields typically constitutes around half the size of `ObjectMeta` and can often be dropped:
///
/// ```no_run
/// # use futures::TryStreamExt;
/// # use kube::{ResourceExt, Api, runtime::watcher};
/// # let api: Api<k8s_openapi::api::core::v1::Node> = todo!();
/// let stream = watcher(api, Default::default()).map_ok(|ev| {
/// ev.modify(|pod| {
/// pod.managed_fields_mut().clear();
/// pod.annotations_mut().clear();
/// pod.status = None;
/// })
/// });
/// ```
/// The `stream` can then be passed to `reflector` causing smaller objects to be written to its store.
/// Note that you **cannot drop everything**; you minimally need the spec properties your app relies on.
/// Additionally, only `labels`, `annotations` and `managed_fields` are safe to drop from `ObjectMeta`.
///
/// For more information check out: <https://kube.rs/controllers/optimization/> for graphs and techniques.
///
/// ## Stream sharing
///
/// `reflector()` as an interface may optionally create a stream that can be
/// shared with other components to help with resource usage.
///
/// To share a stream, the `Writer<K>` consumed by `reflector()` must be
/// created through an interface that allows a store to be subscribed on, such
/// as [`store_shared()`]. When the store supports being subscribed on, it will
/// broadcast an event to all active listeners after caching any object
/// contained in the event.
///
/// Creating subscribers requires an
/// [`unstable`](https://github.com/kube-rs/kube/blob/main/kube-runtime/Cargo.toml#L17-L21)
/// feature
pub fn reflector<K, W>(mut writer: store::Writer<K>, stream: W) -> impl Stream<Item = W::Item>
where
K: Lookup + Clone,
K::DynamicType: Eq + Hash + Clone,
W: Stream<Item = watcher::Result<watcher::Event<K>>>,
{
let mut stream = Box::pin(stream);
stream! {
while let Some(event) = stream.next().await {
match event {
Ok(ev) => {
writer.apply_watcher_event(&ev);
writer.dispatch_event(&ev).await;
yield Ok(ev);
},
Err(ev) => yield Err(ev)
}
}
}
}
#[cfg(test)]
mod tests {
use super::{reflector, store, ObjectRef};
use crate::watcher;
use futures::{stream, StreamExt, TryStreamExt};
use k8s_openapi::{api::core::v1::ConfigMap, apimachinery::pkg::apis::meta::v1::ObjectMeta};
use rand::{
distr::{Bernoulli, Uniform},
Rng,
};
use std::collections::{BTreeMap, HashMap};
#[tokio::test]
async fn reflector_applied_should_add_object() {
let store_w = store::Writer::default();
let store = store_w.as_reader();
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("a".to_string()),
..ObjectMeta::default()
},
..ConfigMap::default()
};
reflector(store_w, stream::iter(vec![Ok(watcher::Event::Apply(cm.clone()))]))
.map(|_| ())
.collect::<()>()
.await;
assert_eq!(store.get(&ObjectRef::from_obj(&cm)).as_deref(), Some(&cm));
}
#[tokio::test]
async fn reflector_applied_should_update_object() {
let store_w = store::Writer::default();
let store = store_w.as_reader();
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("a".to_string()),
..ObjectMeta::default()
},
..ConfigMap::default()
};
let updated_cm = ConfigMap {
data: Some({
let mut data = BTreeMap::new();
data.insert("data".to_string(), "present!".to_string());
data
}),
..cm.clone()
};
reflector(
store_w,
stream::iter(vec![
Ok(watcher::Event::Apply(cm.clone())),
Ok(watcher::Event::Apply(updated_cm.clone())),
]),
)
.map(|_| ())
.collect::<()>()
.await;
assert_eq!(store.get(&ObjectRef::from_obj(&cm)).as_deref(), Some(&updated_cm));
}
#[tokio::test]
async fn reflector_deleted_should_remove_object() {
let store_w = store::Writer::default();
let store = store_w.as_reader();
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("a".to_string()),
..ObjectMeta::default()
},
..ConfigMap::default()
};
reflector(
store_w,
stream::iter(vec![
Ok(watcher::Event::Apply(cm.clone())),
Ok(watcher::Event::Delete(cm.clone())),
]),
)
.map(|_| ())
.collect::<()>()
.await;
assert_eq!(store.get(&ObjectRef::from_obj(&cm)), None);
}
#[tokio::test]
async fn reflector_restarted_should_clear_objects() {
let store_w = store::Writer::default();
let store = store_w.as_reader();
let cm_a = ConfigMap {
metadata: ObjectMeta {
name: Some("a".to_string()),
..ObjectMeta::default()
},
..ConfigMap::default()
};
let cm_b = ConfigMap {
metadata: ObjectMeta {
name: Some("b".to_string()),
..ObjectMeta::default()
},
..ConfigMap::default()
};
reflector(
store_w,
stream::iter(vec![
Ok(watcher::Event::Apply(cm_a.clone())),
Ok(watcher::Event::Init),
Ok(watcher::Event::InitApply(cm_b.clone())),
Ok(watcher::Event::InitDone),
]),
)
.map(|_| ())
.collect::<()>()
.await;
assert_eq!(store.get(&ObjectRef::from_obj(&cm_a)), None);
assert_eq!(store.get(&ObjectRef::from_obj(&cm_b)).as_deref(), Some(&cm_b));
}
#[tokio::test]
async fn reflector_store_should_not_contain_duplicates() {
let mut rng = rand::rng();
let item_dist = Uniform::new(0_u8, 100).unwrap();
let deleted_dist = Bernoulli::new(0.40).unwrap();
let store_w = store::Writer::default();
let store = store_w.as_reader();
reflector(
store_w,
stream::iter((0_u32..100_000).map(|gen| {
let item = rng.sample(item_dist);
let deleted = rng.sample(deleted_dist);
let obj = ConfigMap {
metadata: ObjectMeta {
name: Some(item.to_string()),
resource_version: Some(gen.to_string()),
..ObjectMeta::default()
},
..ConfigMap::default()
};
Ok(if deleted {
watcher::Event::Delete(obj)
} else {
watcher::Event::Apply(obj)
})
})),
)
.map_ok(|_| ())
.try_collect::<()>()
.await
.unwrap();
let mut seen_objects = HashMap::new();
for obj in store.state() {
assert_eq!(seen_objects.get(obj.metadata.name.as_ref().unwrap()), None);
seen_objects.insert(obj.metadata.name.clone().unwrap(), obj);
}
}
}

View File

@@ -0,0 +1,369 @@
use educe::Educe;
use k8s_openapi::{api::core::v1::ObjectReference, apimachinery::pkg::apis::meta::v1::OwnerReference};
#[cfg(doc)] use kube_client::core::ObjectMeta;
use kube_client::{
api::{DynamicObject, Resource},
core::api_version_from_group_version,
};
use std::{
borrow::Cow,
fmt::{Debug, Display},
hash::Hash,
};
/// Minimal lookup behaviour needed by a [reflector store](super::Store).
///
/// This trait is blanket-implemented for all [`Resource`] objects.
pub trait Lookup {
/// Type information for types that do not know their resource information at compile time.
/// This is equivalent to [`Resource::DynamicType`].
type DynamicType;
/// The [kind](Resource::kind) for this object.
fn kind(dyntype: &Self::DynamicType) -> Cow<'_, str>;
/// The [group](Resource::group) for this object.
fn group(dyntype: &Self::DynamicType) -> Cow<'_, str>;
/// The [version](Resource::version) for this object.
fn version(dyntype: &Self::DynamicType) -> Cow<'_, str>;
/// The [apiVersion](Resource::_version) for this object.
fn api_version(dyntype: &Self::DynamicType) -> Cow<'_, str> {
api_version_from_group_version(Self::group(dyntype), Self::version(dyntype))
}
/// The [plural](Resource::plural) for this object.
fn plural(dyntype: &Self::DynamicType) -> Cow<'_, str>;
/// The [name](ObjectMeta#structfield.name) of the object.
fn name(&self) -> Option<Cow<'_, str>>;
/// The [namespace](ObjectMeta#structfield.namespace) of the object.
fn namespace(&self) -> Option<Cow<'_, str>>;
/// The [resource version](ObjectMeta#structfield.resource_version) of the object.
fn resource_version(&self) -> Option<Cow<'_, str>>;
/// The [UID](ObjectMeta#structfield.uid) of the object.
fn uid(&self) -> Option<Cow<'_, str>>;
/// Constructs an [`ObjectRef`] for this object.
fn to_object_ref(&self, dyntype: Self::DynamicType) -> ObjectRef<Self> {
ObjectRef {
dyntype,
name: self.name().expect(".metadata.name missing").into_owned(),
namespace: self.namespace().map(Cow::into_owned),
extra: Extra {
resource_version: self.resource_version().map(Cow::into_owned),
uid: self.uid().map(Cow::into_owned),
},
}
}
}
impl<K: Resource> Lookup for K {
type DynamicType = K::DynamicType;
fn kind(dyntype: &Self::DynamicType) -> Cow<'_, str> {
K::kind(dyntype)
}
fn version(dyntype: &Self::DynamicType) -> Cow<'_, str> {
K::version(dyntype)
}
fn group(dyntype: &Self::DynamicType) -> Cow<'_, str> {
K::group(dyntype)
}
fn plural(dyntype: &Self::DynamicType) -> Cow<'_, str> {
K::plural(dyntype)
}
fn name(&self) -> Option<Cow<'_, str>> {
self.meta().name.as_deref().map(Cow::Borrowed)
}
fn namespace(&self) -> Option<Cow<'_, str>> {
self.meta().namespace.as_deref().map(Cow::Borrowed)
}
fn resource_version(&self) -> Option<Cow<'_, str>> {
self.meta().resource_version.as_deref().map(Cow::Borrowed)
}
fn uid(&self) -> Option<Cow<'_, str>> {
self.meta().uid.as_deref().map(Cow::Borrowed)
}
}
#[derive(Educe)]
#[educe(
Debug(bound("K::DynamicType: Debug")),
PartialEq(bound("K::DynamicType: PartialEq")),
Hash(bound("K::DynamicType: Hash")),
Clone(bound("K::DynamicType: Clone"))
)]
/// A typed and namedspaced (if relevant) reference to a Kubernetes object
///
/// `K` may be either the object type or `DynamicObject`, in which case the
/// type is stored at runtime. Erased `ObjectRef`s pointing to different types
/// are still considered different.
///
/// ```
/// use kube_runtime::reflector::ObjectRef;
/// use k8s_openapi::api::core::v1::{ConfigMap, Secret};
/// assert_ne!(
/// ObjectRef::<ConfigMap>::new("a").erase(),
/// ObjectRef::<Secret>::new("a").erase(),
/// );
/// ```
#[non_exhaustive]
pub struct ObjectRef<K: Lookup + ?Sized> {
pub dyntype: K::DynamicType,
/// The name of the object
pub name: String,
/// The namespace of the object
///
/// May only be `None` if the kind is cluster-scoped (not located in a namespace).
/// Note that it *is* acceptable for an `ObjectRef` to a cluster-scoped resource to
/// have a namespace. These are, however, not considered equal:
///
/// ```
/// # use kube_runtime::reflector::ObjectRef;
/// # use k8s_openapi::api::core::v1::ConfigMap;
/// assert_ne!(ObjectRef::<ConfigMap>::new("foo"), ObjectRef::new("foo").within("bar"));
/// ```
pub namespace: Option<String>,
/// Extra information about the object being referred to
///
/// This is *not* considered when comparing objects, but may be used when converting to and from other representations,
/// such as [`OwnerReference`] or [`ObjectReference`].
#[educe(Hash(ignore), PartialEq(ignore))]
pub extra: Extra,
}
impl<K: Lookup + ?Sized> Eq for ObjectRef<K> where K::DynamicType: Eq {}
/// Non-vital information about an object being referred to
///
/// See [`ObjectRef::extra`].
#[derive(Default, Debug, Clone)]
#[non_exhaustive]
pub struct Extra {
/// The version of the resource at the time of reference
pub resource_version: Option<String>,
/// The uid of the object
pub uid: Option<String>,
}
impl<K: Lookup> ObjectRef<K>
where
K::DynamicType: Default,
{
#[must_use]
pub fn new(name: &str) -> Self {
Self::new_with(name, Default::default())
}
#[must_use]
pub fn from_obj(obj: &K) -> Self {
obj.to_object_ref(Default::default())
}
}
impl<K: Lookup> From<&K> for ObjectRef<K>
where
K::DynamicType: Default,
{
fn from(obj: &K) -> Self {
Self::from_obj(obj)
}
}
impl<K: Lookup> ObjectRef<K> {
#[must_use]
pub fn new_with(name: &str, dyntype: K::DynamicType) -> Self {
Self {
dyntype,
name: name.into(),
namespace: None,
extra: Extra::default(),
}
}
#[must_use]
pub fn within(mut self, namespace: &str) -> Self {
self.namespace = Some(namespace.to_string());
self
}
/// Creates `ObjectRef` from the resource and dynamic type.
#[must_use]
pub fn from_obj_with(obj: &K, dyntype: K::DynamicType) -> Self
where
K: Lookup,
{
obj.to_object_ref(dyntype)
}
/// Create an `ObjectRef` from an `OwnerReference`
///
/// Returns `None` if the types do not match.
#[must_use]
pub fn from_owner_ref(
namespace: Option<&str>,
owner: &OwnerReference,
dyntype: K::DynamicType,
) -> Option<Self> {
if owner.api_version == K::api_version(&dyntype) && owner.kind == K::kind(&dyntype) {
Some(Self {
dyntype,
name: owner.name.clone(),
namespace: namespace.map(String::from),
extra: Extra {
resource_version: None,
uid: Some(owner.uid.clone()),
},
})
} else {
None
}
}
/// Convert into a reference to `K2`
///
/// Note that no checking is done on whether this conversion makes sense. For example, every `Service`
/// has a corresponding `Endpoints`, but it wouldn't make sense to convert a `Pod` into a `Deployment`.
#[must_use]
pub fn into_kind_unchecked<K2: Lookup>(self, dt2: K2::DynamicType) -> ObjectRef<K2> {
ObjectRef {
dyntype: dt2,
name: self.name,
namespace: self.namespace,
extra: self.extra,
}
}
pub fn erase(self) -> ObjectRef<DynamicObject> {
ObjectRef {
dyntype: kube_client::api::ApiResource {
group: K::group(&self.dyntype).to_string(),
version: K::version(&self.dyntype).to_string(),
api_version: K::api_version(&self.dyntype).to_string(),
kind: K::kind(&self.dyntype).to_string(),
plural: K::plural(&self.dyntype).to_string(),
},
name: self.name,
namespace: self.namespace,
extra: self.extra,
}
}
}
impl<K: Lookup> From<ObjectRef<K>> for ObjectReference {
fn from(val: ObjectRef<K>) -> Self {
let ObjectRef {
dyntype: dt,
name,
namespace,
extra: Extra {
resource_version,
uid,
},
} = val;
ObjectReference {
api_version: Some(K::api_version(&dt).into_owned()),
kind: Some(K::kind(&dt).into_owned()),
field_path: None,
name: Some(name),
namespace,
resource_version,
uid,
}
}
}
impl<K: Lookup> Display for ObjectRef<K> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}.{}.{}/{}",
K::kind(&self.dyntype),
K::version(&self.dyntype),
K::group(&self.dyntype),
self.name
)?;
if let Some(namespace) = &self.namespace {
write!(f, ".{namespace}")?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::{
collections::hash_map::DefaultHasher,
hash::{Hash, Hasher},
};
use super::{Extra, ObjectRef};
use k8s_openapi::api::{
apps::v1::Deployment,
core::v1::{Node, Pod},
};
#[test]
fn display_should_follow_expected_format() {
assert_eq!(
format!("{}", ObjectRef::<Pod>::new("my-pod").within("my-namespace")),
"Pod.v1./my-pod.my-namespace"
);
assert_eq!(
format!(
"{}",
ObjectRef::<Deployment>::new("my-deploy").within("my-namespace")
),
"Deployment.v1.apps/my-deploy.my-namespace"
);
assert_eq!(
format!("{}", ObjectRef::<Node>::new("my-node")),
"Node.v1./my-node"
);
}
#[test]
fn display_should_be_transparent_to_representation() {
let pod_ref = ObjectRef::<Pod>::new("my-pod").within("my-namespace");
assert_eq!(format!("{pod_ref}"), format!("{}", pod_ref.erase()));
let deploy_ref = ObjectRef::<Deployment>::new("my-deploy").within("my-namespace");
assert_eq!(format!("{deploy_ref}"), format!("{}", deploy_ref.erase()));
let node_ref = ObjectRef::<Node>::new("my-node");
assert_eq!(format!("{node_ref}"), format!("{}", node_ref.erase()));
}
#[test]
fn comparison_should_ignore_extra() {
let minimal = ObjectRef::<Pod>::new("my-pod").within("my-namespace");
let with_extra = ObjectRef {
extra: Extra {
resource_version: Some("123".to_string()),
uid: Some("638ffacd-f666-4402-ba10-7848c66ef576".to_string()),
},
..minimal.clone()
};
// Eq and PartialEq should be unaffected by the contents of `extra`
assert_eq!(minimal, with_extra);
// Hash should be unaffected by the contents of `extra`
let hash_value = |value: &ObjectRef<Pod>| {
let mut hasher = DefaultHasher::new();
value.hash(&mut hasher);
hasher.finish()
};
assert_eq!(hash_value(&minimal), hash_value(&with_extra));
}
}

View File

@@ -0,0 +1,411 @@
use super::{dispatcher::Dispatcher, Lookup, ObjectRef};
#[cfg(feature = "unstable-runtime-subscribe")]
use crate::reflector::ReflectHandle;
use crate::{
utils::delayed_init::{self, DelayedInit},
watcher,
};
use ahash::AHashMap;
use educe::Educe;
use parking_lot::RwLock;
use std::{fmt::Debug, hash::Hash, sync::Arc};
use thiserror::Error;
type Cache<K> = Arc<RwLock<AHashMap<ObjectRef<K>, Arc<K>>>>;
/// A writable Store handle
///
/// This is exclusive since it's not safe to share a single `Store` between multiple reflectors.
/// In particular, `Restarted` events will clobber the state of other connected reflectors.
#[derive(Debug)]
pub struct Writer<K: 'static + Lookup + Clone>
where
K::DynamicType: Eq + Hash + Clone,
{
store: Cache<K>,
buffer: AHashMap<ObjectRef<K>, Arc<K>>,
dyntype: K::DynamicType,
ready_tx: Option<delayed_init::Initializer<()>>,
ready_rx: Arc<DelayedInit<()>>,
dispatcher: Option<Dispatcher<K>>,
}
impl<K: 'static + Lookup + Clone> Writer<K>
where
K::DynamicType: Eq + Hash + Clone,
{
/// Creates a new Writer with the specified dynamic type.
///
/// If the dynamic type is default-able (for example when writer is used with
/// `k8s_openapi` types) you can use `Default` instead.
pub fn new(dyntype: K::DynamicType) -> Self {
let (ready_tx, ready_rx) = DelayedInit::new();
Writer {
store: Default::default(),
buffer: Default::default(),
dyntype,
ready_tx: Some(ready_tx),
ready_rx: Arc::new(ready_rx),
dispatcher: None,
}
}
/// Creates a new Writer with the specified dynamic type and buffer size.
///
/// When the Writer is created through `new_shared`, it will be able to
/// be subscribed. Stored objects will be propagated to all subscribers. The
/// buffer size is used for the underlying channel. An object is cleared
/// from the buffer only when all subscribers have seen it.
///
/// If the dynamic type is default-able (for example when writer is used with
/// `k8s_openapi` types) you can use `Default` instead.
#[cfg(feature = "unstable-runtime-subscribe")]
pub fn new_shared(buf_size: usize, dyntype: K::DynamicType) -> Self {
let (ready_tx, ready_rx) = DelayedInit::new();
Writer {
store: Default::default(),
buffer: Default::default(),
dyntype,
ready_tx: Some(ready_tx),
ready_rx: Arc::new(ready_rx),
dispatcher: Some(Dispatcher::new(buf_size)),
}
}
/// Return a read handle to the store
///
/// Multiple read handles may be obtained, by either calling `as_reader` multiple times,
/// or by calling `Store::clone()` afterwards.
#[must_use]
pub fn as_reader(&self) -> Store<K> {
Store {
store: self.store.clone(),
ready_rx: self.ready_rx.clone(),
}
}
/// Return a handle to a subscriber
///
/// Multiple subscribe handles may be obtained, by either calling
/// `subscribe` multiple times, or by calling `clone()`
///
/// This function returns a `Some` when the [`Writer`] is constructed through
/// [`Writer::new_shared`] or [`store_shared`], and a `None` otherwise.
#[cfg(feature = "unstable-runtime-subscribe")]
pub fn subscribe(&self) -> Option<ReflectHandle<K>> {
self.dispatcher
.as_ref()
.map(|dispatcher| dispatcher.subscribe(self.as_reader()))
}
/// Applies a single watcher event to the store
pub fn apply_watcher_event(&mut self, event: &watcher::Event<K>) {
match event {
watcher::Event::Apply(obj) => {
let key = obj.to_object_ref(self.dyntype.clone());
let obj = Arc::new(obj.clone());
self.store.write().insert(key, obj);
}
watcher::Event::Delete(obj) => {
let key = obj.to_object_ref(self.dyntype.clone());
self.store.write().remove(&key);
}
watcher::Event::Init => {
self.buffer = AHashMap::new();
}
watcher::Event::InitApply(obj) => {
let key = obj.to_object_ref(self.dyntype.clone());
let obj = Arc::new(obj.clone());
self.buffer.insert(key, obj);
}
watcher::Event::InitDone => {
let mut store = self.store.write();
// Swap the buffer into the store
std::mem::swap(&mut *store, &mut self.buffer);
// Clear the buffer
// This is preferred over self.buffer.clear(), as clear() will keep the allocated memory for reuse.
// This way, the old buffer is dropped.
self.buffer = AHashMap::new();
// Mark as ready after the Restart, "releasing" any calls to Store::wait_until_ready()
if let Some(ready_tx) = self.ready_tx.take() {
ready_tx.init(())
}
}
}
}
/// Broadcast an event to any downstream listeners subscribed on the store
pub(crate) async fn dispatch_event(&mut self, event: &watcher::Event<K>) {
if let Some(ref mut dispatcher) = self.dispatcher {
match event {
watcher::Event::Apply(obj) => {
let obj_ref = obj.to_object_ref(self.dyntype.clone());
// TODO (matei): should this take a timeout to log when backpressure has
// been applied for too long, e.g. 10s
dispatcher.broadcast(obj_ref).await;
}
watcher::Event::InitDone => {
let obj_refs: Vec<_> = {
let store = self.store.read();
store.keys().cloned().collect()
};
for obj_ref in obj_refs {
dispatcher.broadcast(obj_ref).await;
}
}
_ => {}
}
}
}
}
impl<K> Default for Writer<K>
where
K: Lookup + Clone + 'static,
K::DynamicType: Default + Eq + Hash + Clone,
{
fn default() -> Self {
Self::new(K::DynamicType::default())
}
}
/// A readable cache of Kubernetes objects of kind `K`
///
/// Cloning will produce a new reference to the same backing store.
///
/// Cannot be constructed directly since one writer handle is required,
/// use `Writer::as_reader()` instead.
#[derive(Educe)]
#[educe(Debug(bound("K: Debug, K::DynamicType: Debug")), Clone)]
pub struct Store<K: 'static + Lookup>
where
K::DynamicType: Hash + Eq,
{
store: Cache<K>,
ready_rx: Arc<DelayedInit<()>>,
}
#[derive(Debug, Error)]
#[error("writer was dropped before store became ready")]
pub struct WriterDropped(delayed_init::InitDropped);
impl<K: 'static + Clone + Lookup> Store<K>
where
K::DynamicType: Eq + Hash + Clone,
{
/// Wait for the store to be populated by Kubernetes.
///
/// Note that polling this will _not_ await the source of the stream that populates the [`Writer`].
/// The [`reflector`](crate::reflector()) stream must be awaited separately.
///
/// # Errors
/// Returns an error if the [`Writer`] was dropped before any value was written.
pub async fn wait_until_ready(&self) -> Result<(), WriterDropped> {
self.ready_rx.get().await.map_err(WriterDropped)
}
/// Retrieve a `clone()` of the entry referred to by `key`, if it is in the cache.
///
/// `key.namespace` is ignored for cluster-scoped resources.
///
/// Note that this is a cache and may be stale. Deleted objects may still exist in the cache
/// despite having been deleted in the cluster, and new objects may not yet exist in the cache.
/// If any of these are a problem for you then you should abort your reconciler and retry later.
/// If you use `kube_rt::controller` then you can do this by returning an error and specifying a
/// reasonable `error_policy`.
#[must_use]
pub fn get(&self, key: &ObjectRef<K>) -> Option<Arc<K>> {
let store = self.store.read();
store
.get(key)
// Try to erase the namespace and try again, in case the object is cluster-scoped
.or_else(|| {
store.get(&{
let mut cluster_key = key.clone();
cluster_key.namespace = None;
cluster_key
})
})
// Clone to let go of the entry lock ASAP
.cloned()
}
/// Return a full snapshot of the current values
#[must_use]
pub fn state(&self) -> Vec<Arc<K>> {
let s = self.store.read();
s.values().cloned().collect()
}
/// Retrieve a `clone()` of the entry found by the given predicate
#[must_use]
pub fn find<P>(&self, predicate: P) -> Option<Arc<K>>
where
P: Fn(&K) -> bool,
{
self.store
.read()
.iter()
.map(|(_, k)| k)
.find(|k| predicate(k.as_ref()))
.cloned()
}
/// Return the number of elements in the store
#[must_use]
pub fn len(&self) -> usize {
self.store.read().len()
}
/// Return whether the store is empty
#[must_use]
pub fn is_empty(&self) -> bool {
self.store.read().is_empty()
}
}
/// Create a (Reader, Writer) for a `Store<K>` for a typed resource `K`
///
/// The `Writer` should be passed to a [`reflector`](crate::reflector()),
/// and the [`Store`] is a read-only handle.
#[must_use]
pub fn store<K>() -> (Store<K>, Writer<K>)
where
K: Lookup + Clone + 'static,
K::DynamicType: Eq + Hash + Clone + Default,
{
let w = Writer::<K>::default();
let r = w.as_reader();
(r, w)
}
/// Create a (Reader, Writer) for a `Store<K>` for a typed resource `K`
///
/// The resulting `Writer` can be subscribed on in order to fan out events from
/// a watcher. The `Writer` should be passed to a [`reflector`](crate::reflector()),
/// and the [`Store`] is a read-only handle.
///
/// A buffer size is used for the underlying message channel. When the buffer is
/// full, backpressure will be applied by waiting for capacity.
#[must_use]
#[allow(clippy::module_name_repetitions)]
#[cfg(feature = "unstable-runtime-subscribe")]
pub fn store_shared<K>(buf_size: usize) -> (Store<K>, Writer<K>)
where
K: Lookup + Clone + 'static,
K::DynamicType: Eq + Hash + Clone + Default,
{
let w = Writer::<K>::new_shared(buf_size, Default::default());
let r = w.as_reader();
(r, w)
}
#[cfg(test)]
mod tests {
use super::{store, Writer};
use crate::{reflector::ObjectRef, watcher};
use k8s_openapi::api::core::v1::ConfigMap;
use kube_client::api::ObjectMeta;
#[test]
fn should_allow_getting_namespaced_object_by_namespaced_ref() {
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("obj".to_string()),
namespace: Some("ns".to_string()),
..ObjectMeta::default()
},
..ConfigMap::default()
};
let mut store_w = Writer::default();
store_w.apply_watcher_event(&watcher::Event::Apply(cm.clone()));
let store = store_w.as_reader();
assert_eq!(store.get(&ObjectRef::from_obj(&cm)).as_deref(), Some(&cm));
}
#[test]
fn should_not_allow_getting_namespaced_object_by_clusterscoped_ref() {
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("obj".to_string()),
namespace: Some("ns".to_string()),
..ObjectMeta::default()
},
..ConfigMap::default()
};
let mut cluster_cm = cm.clone();
cluster_cm.metadata.namespace = None;
let mut store_w = Writer::default();
store_w.apply_watcher_event(&watcher::Event::Apply(cm));
let store = store_w.as_reader();
assert_eq!(store.get(&ObjectRef::from_obj(&cluster_cm)), None);
}
#[test]
fn should_allow_getting_clusterscoped_object_by_clusterscoped_ref() {
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("obj".to_string()),
namespace: None,
..ObjectMeta::default()
},
..ConfigMap::default()
};
let (store, mut writer) = store();
writer.apply_watcher_event(&watcher::Event::Apply(cm.clone()));
assert_eq!(store.get(&ObjectRef::from_obj(&cm)).as_deref(), Some(&cm));
}
#[test]
fn should_allow_getting_clusterscoped_object_by_namespaced_ref() {
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("obj".to_string()),
namespace: None,
..ObjectMeta::default()
},
..ConfigMap::default()
};
#[allow(clippy::redundant_clone)] // false positive
let mut nsed_cm = cm.clone();
nsed_cm.metadata.namespace = Some("ns".to_string());
let mut store_w = Writer::default();
store_w.apply_watcher_event(&watcher::Event::Apply(cm.clone()));
let store = store_w.as_reader();
assert_eq!(store.get(&ObjectRef::from_obj(&nsed_cm)).as_deref(), Some(&cm));
}
#[test]
fn find_element_in_store() {
let cm = ConfigMap {
metadata: ObjectMeta {
name: Some("obj".to_string()),
namespace: None,
..ObjectMeta::default()
},
..ConfigMap::default()
};
let mut target_cm = cm.clone();
let (reader, mut writer) = store::<ConfigMap>();
assert!(reader.is_empty());
writer.apply_watcher_event(&watcher::Event::Apply(cm));
assert_eq!(reader.len(), 1);
assert!(reader.find(|k| k.metadata.generation == Some(1234)).is_none());
target_cm.metadata.name = Some("obj1".to_string());
target_cm.metadata.generation = Some(1234);
writer.apply_watcher_event(&watcher::Event::Apply(target_cm.clone()));
assert!(!reader.is_empty());
assert_eq!(reader.len(), 2);
let found = reader.find(|k| k.metadata.generation == Some(1234));
assert_eq!(found.as_deref(), Some(&target_cm));
}
}

587
vendor/kube-runtime/src/scheduler.rs vendored Normal file
View File

@@ -0,0 +1,587 @@
//! Delays and deduplicates [`Stream`](futures::stream::Stream) items
use futures::{stream::Fuse, Stream, StreamExt};
use hashbrown::{hash_map::RawEntryMut, HashMap};
use pin_project::pin_project;
use std::{
collections::HashSet,
hash::Hash,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use tokio::time::Instant;
use tokio_util::time::delay_queue::{self, DelayQueue};
/// A request to re-emit `message` at a given `Instant` (`run_at`).
#[derive(Debug)]
pub struct ScheduleRequest<T> {
pub message: T,
pub run_at: Instant,
}
/// Internal metadata for a scheduled message.
struct ScheduledEntry {
run_at: Instant,
queue_key: delay_queue::Key,
}
#[pin_project(project = SchedulerProj)]
pub struct Scheduler<T, R> {
/// Queue of already-scheduled messages.
///
/// To ensure that the metadata is kept up-to-date, use `schedule_message` and
/// `poll_pop_queue_message` rather than manipulating this directly.
///
/// NOTE: `scheduled` should be considered to hold the "canonical" representation of the message.
/// Always pull the message out of `scheduled` once it has been retrieved from `queue`.
queue: DelayQueue<T>,
/// Metadata for all currently scheduled messages. Used to detect duplicate messages.
///
/// `scheduled` is considered to hold the "canonical" representation of the message.
scheduled: HashMap<T, ScheduledEntry>,
/// Messages that are scheduled to have happened, but have been held using `hold_unless`.
pending: HashSet<T>,
/// Incoming queue of scheduling requests.
#[pin]
requests: Fuse<R>,
/// Debounce time to allow for deduplication of requests. It is added to the request's
/// initial expiration time. If another request with the same message arrives before
/// the request expires, its added to the new request's expiration time. This allows
/// for a request to be emitted, if the scheduler is "uninterrupted" for the configured
/// debounce period. Its primary purpose to deduplicate requests that expire instantly.
debounce: Duration,
}
impl<T, R: Stream> Scheduler<T, R> {
fn new(requests: R, debounce: Duration) -> Self {
Self {
queue: DelayQueue::new(),
scheduled: HashMap::new(),
pending: HashSet::new(),
requests: requests.fuse(),
debounce,
}
}
}
impl<T: Hash + Eq + Clone, R> SchedulerProj<'_, T, R> {
/// Attempt to schedule a message into the queue.
///
/// If the message is already in the queue then the earlier `request.run_at` takes precedence.
fn schedule_message(&mut self, request: ScheduleRequest<T>) {
if self.pending.contains(&request.message) {
// Message is already pending, so we can't even expedite it
return;
}
let next_time = request
.run_at
.checked_add(*self.debounce)
.unwrap_or_else(far_future);
match self.scheduled.raw_entry_mut().from_key(&request.message) {
// If new request is supposed to be earlier than the current entry's scheduled
// time (for eg: the new request is user triggered and the current entry is the
// reconciler's usual retry), then give priority to the new request.
RawEntryMut::Occupied(mut old_entry) if old_entry.get().run_at >= request.run_at => {
// Old entry will run after the new request, so replace it..
let entry = old_entry.get_mut();
self.queue.reset_at(&entry.queue_key, next_time);
entry.run_at = next_time;
old_entry.insert_key(request.message);
}
RawEntryMut::Occupied(_old_entry) => {
// Old entry will run before the new request, so ignore the new request..
}
RawEntryMut::Vacant(entry) => {
// No old entry, we're free to go!
let message = request.message.clone();
entry.insert(request.message, ScheduledEntry {
run_at: next_time,
queue_key: self.queue.insert_at(message, next_time),
});
}
}
}
/// Attempt to retrieve a message from the queue.
fn poll_pop_queue_message(
&mut self,
cx: &mut Context<'_>,
can_take_message: impl Fn(&T) -> bool,
) -> Poll<T> {
if let Some(msg) = self.pending.iter().find(|msg| can_take_message(*msg)).cloned() {
return Poll::Ready(self.pending.take(&msg).unwrap());
}
loop {
match self.queue.poll_expired(cx) {
Poll::Ready(Some(msg)) => {
let msg = msg.into_inner();
let (msg, _) = self.scheduled.remove_entry(&msg).expect(
"Expired message was popped from the Scheduler queue, but was not in the metadata map",
);
if can_take_message(&msg) {
break Poll::Ready(msg);
}
self.pending.insert(msg);
}
Poll::Ready(None) | Poll::Pending => break Poll::Pending,
}
}
}
/// Attempt to retrieve a message from queue and mark it as pending.
pub fn pop_queue_message_into_pending(&mut self, cx: &mut Context<'_>) {
while let Poll::Ready(Some(msg)) = self.queue.poll_expired(cx) {
let msg = msg.into_inner();
self.scheduled.remove_entry(&msg).expect(
"Expired message was popped from the Scheduler queue, but was not in the metadata map",
);
self.pending.insert(msg);
}
}
}
/// See [`Scheduler::hold`]
pub struct Hold<'a, T, R> {
scheduler: Pin<&'a mut Scheduler<T, R>>,
}
impl<T, R> Stream for Hold<'_, T, R>
where
T: Eq + Hash + Clone,
R: Stream<Item = ScheduleRequest<T>>,
{
type Item = T;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
let mut scheduler = this.scheduler.as_mut().project();
loop {
match scheduler.requests.as_mut().poll_next(cx) {
Poll::Ready(Some(request)) => scheduler.schedule_message(request),
Poll::Ready(None) => return Poll::Ready(None),
Poll::Pending => break,
}
}
scheduler.pop_queue_message_into_pending(cx);
Poll::Pending
}
}
/// See [`Scheduler::hold_unless`]
pub struct HoldUnless<'a, T, R, C> {
scheduler: Pin<&'a mut Scheduler<T, R>>,
can_take_message: C,
}
impl<T, R, C> Stream for HoldUnless<'_, T, R, C>
where
T: Eq + Hash + Clone,
R: Stream<Item = ScheduleRequest<T>>,
C: Fn(&T) -> bool + Unpin,
{
type Item = T;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
let can_take_message = &this.can_take_message;
let mut scheduler = this.scheduler.as_mut().project();
loop {
match scheduler.requests.as_mut().poll_next(cx) {
Poll::Ready(Some(request)) => scheduler.schedule_message(request),
Poll::Ready(None) => return Poll::Ready(None),
Poll::Pending => break,
}
}
match scheduler.poll_pop_queue_message(cx, can_take_message) {
Poll::Ready(expired) => Poll::Ready(Some(expired)),
Poll::Pending => Poll::Pending,
}
}
}
impl<T, R> Scheduler<T, R>
where
T: Eq + Hash + Clone,
R: Stream<Item = ScheduleRequest<T>>,
{
/// A filtered view of the [`Scheduler`], which will keep items "pending" if
/// `can_take_message` returns `false`, allowing them to be handled as soon as
/// they are ready.
///
/// The returned [`HoldUnless`] is designed to be short-lived: it has no allocations, and
/// no messages will be lost, even if it is reconstructed on each call to [`poll_next`](Self::poll_next).
/// In fact, this is often desirable, to avoid long-lived borrows in `can_take_message`'s closure.
///
/// NOTE: `can_take_message` should be considered to be fairly performance-sensitive, since
/// it will generally be executed for each pending message, for each [`poll_next`](Self::poll_next).
pub fn hold_unless<C: Fn(&T) -> bool>(self: Pin<&mut Self>, can_take_message: C) -> HoldUnless<T, R, C> {
HoldUnless {
scheduler: self,
can_take_message,
}
}
/// A restricted view of the [`Scheduler`], which will keep all items "pending".
/// Its equivalent to doing `self.hold_unless(|_| false)` and is useful when the
/// consumer is not ready to consume the expired messages that the [`Scheduler`] emits.
#[must_use]
pub fn hold(self: Pin<&mut Self>) -> Hold<T, R> {
Hold { scheduler: self }
}
/// Checks whether `msg` is currently a pending message (held by `hold_unless`)
#[cfg(test)]
pub fn contains_pending(&self, msg: &T) -> bool {
self.pending.contains(msg)
}
}
impl<T, R> Stream for Scheduler<T, R>
where
T: Eq + Hash + Clone,
R: Stream<Item = ScheduleRequest<T>>,
{
type Item = T;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
Pin::new(&mut self.hold_unless(|_| true)).poll_next(cx)
}
}
/// Stream transformer that delays and deduplicates items.
///
/// Items are deduplicated: if an item is submitted multiple times before being emitted then it will only be
/// emitted at the earliest `Instant`.
///
/// Items can be "held pending" if the item doesn't match some predicate. Items trying to schedule an item
/// that is already pending will be discarded (since it is already going to be emitted as soon as the consumer
/// is ready for it).
///
/// The [`Scheduler`] terminates as soon as `requests` does.
pub fn scheduler<T: Eq + Hash + Clone, S: Stream<Item = ScheduleRequest<T>>>(requests: S) -> Scheduler<T, S> {
Scheduler::new(requests, Duration::ZERO)
}
/// Stream transformer that delays and deduplicates [`Stream`] items.
///
/// The debounce period lets the scheduler deduplicate requests that ask to be
/// emitted instantly, by making sure we wait for the configured period of time
/// to receive an uninterrupted request before actually emitting it.
///
/// For more info, see [`scheduler()`].
#[allow(clippy::module_name_repetitions)]
pub fn debounced_scheduler<T: Eq + Hash + Clone, S: Stream<Item = ScheduleRequest<T>>>(
requests: S,
debounce: Duration,
) -> Scheduler<T, S> {
Scheduler::new(requests, debounce)
}
// internal fallback for overflows in schedule times
pub(crate) fn far_future() -> Instant {
// private method from tokio for convenience - remove if upstream becomes pub
// https://github.com/tokio-rs/tokio/blob/6fcd9c02176bf3cd570bc7de88edaa3b95ea480a/tokio/src/time/instant.rs#L57-L63
Instant::now() + Duration::from_secs(86400 * 365 * 30)
}
#[cfg(test)]
mod tests {
use crate::utils::KubeRuntimeStreamExt;
use super::{debounced_scheduler, scheduler, ScheduleRequest};
use educe::Educe;
use futures::{channel::mpsc, future, poll, stream, FutureExt, SinkExt, StreamExt};
use std::{pin::pin, task::Poll};
use tokio::time::{advance, pause, sleep, Duration, Instant};
fn unwrap_poll<T>(poll: Poll<T>) -> T {
if let Poll::Ready(x) = poll {
x
} else {
panic!("Tried to unwrap a pending poll!")
}
}
/// Message type that is always considered equal to itself
#[derive(Educe, Eq, Clone, Debug)]
#[educe(PartialEq, Hash)]
struct SingletonMessage(#[educe(PartialEq(ignore), Hash(ignore))] u8);
#[tokio::test]
async fn scheduler_should_hold_and_release_items() {
pause();
let mut scheduler = Box::pin(scheduler(
stream::iter(vec![ScheduleRequest {
message: 1_u8,
run_at: Instant::now(),
}])
.on_complete(sleep(Duration::from_secs(4))),
));
assert!(!scheduler.contains_pending(&1));
assert!(poll!(scheduler.as_mut().hold_unless(|_| false).next()).is_pending());
assert!(scheduler.contains_pending(&1));
assert_eq!(
unwrap_poll(poll!(scheduler.as_mut().hold_unless(|_| true).next())).unwrap(),
1_u8
);
assert!(!scheduler.contains_pending(&1));
assert!(scheduler.as_mut().hold_unless(|_| true).next().await.is_none());
}
#[tokio::test]
async fn scheduler_should_not_reschedule_pending_items() {
pause();
let (mut tx, rx) = mpsc::unbounded::<ScheduleRequest<u8>>();
let mut scheduler = Box::pin(scheduler(rx));
tx.send(ScheduleRequest {
message: 1,
run_at: Instant::now(),
})
.await
.unwrap();
assert!(poll!(scheduler.as_mut().hold_unless(|_| false).next()).is_pending());
tx.send(ScheduleRequest {
message: 1,
run_at: Instant::now(),
})
.await
.unwrap();
future::join(
async {
sleep(Duration::from_secs(2)).await;
drop(tx);
},
async {
assert_eq!(scheduler.next().await.unwrap(), 1);
assert!(scheduler.next().await.is_none())
},
)
.await;
}
#[tokio::test]
async fn scheduler_pending_message_should_not_block_head_of_line() {
let mut scheduler = Box::pin(scheduler(
stream::iter(vec![
ScheduleRequest {
message: 1,
run_at: Instant::now(),
},
ScheduleRequest {
message: 2,
run_at: Instant::now(),
},
])
.on_complete(sleep(Duration::from_secs(2))),
));
assert_eq!(
scheduler.as_mut().hold_unless(|x| *x != 1).next().await.unwrap(),
2
);
}
#[tokio::test]
async fn scheduler_should_emit_items_as_requested() {
pause();
let mut scheduler = pin!(scheduler(
stream::iter(vec![
ScheduleRequest {
message: 1_u8,
run_at: Instant::now() + Duration::from_secs(1),
},
ScheduleRequest {
message: 2,
run_at: Instant::now() + Duration::from_secs(3),
},
])
.on_complete(sleep(Duration::from_secs(5))),
));
assert!(poll!(scheduler.next()).is_pending());
advance(Duration::from_secs(2)).await;
assert_eq!(scheduler.next().now_or_never().unwrap().unwrap(), 1);
assert!(poll!(scheduler.next()).is_pending());
advance(Duration::from_secs(2)).await;
assert_eq!(scheduler.next().now_or_never().unwrap().unwrap(), 2);
// Stream has terminated
assert!(scheduler.next().await.is_none());
}
#[tokio::test]
async fn scheduler_dedupe_should_keep_earlier_item() {
pause();
let mut scheduler = pin!(scheduler(
stream::iter(vec![
ScheduleRequest {
message: (),
run_at: Instant::now() + Duration::from_secs(1),
},
ScheduleRequest {
message: (),
run_at: Instant::now() + Duration::from_secs(3),
},
])
.on_complete(sleep(Duration::from_secs(5))),
));
assert!(poll!(scheduler.next()).is_pending());
advance(Duration::from_secs(2)).await;
scheduler.next().now_or_never().unwrap().unwrap();
// Stream has terminated
assert!(scheduler.next().await.is_none());
}
#[tokio::test]
async fn scheduler_dedupe_should_replace_later_item() {
pause();
let mut scheduler = pin!(scheduler(
stream::iter(vec![
ScheduleRequest {
message: (),
run_at: Instant::now() + Duration::from_secs(3),
},
ScheduleRequest {
message: (),
run_at: Instant::now() + Duration::from_secs(1),
},
])
.on_complete(sleep(Duration::from_secs(5))),
));
assert!(poll!(scheduler.next()).is_pending());
advance(Duration::from_secs(2)).await;
scheduler.next().now_or_never().unwrap().unwrap();
// Stream has terminated
assert!(scheduler.next().await.is_none());
}
#[tokio::test]
async fn scheduler_dedupe_should_allow_rescheduling_emitted_item() {
pause();
let (mut schedule_tx, schedule_rx) = mpsc::unbounded();
let mut scheduler = scheduler(schedule_rx);
schedule_tx
.send(ScheduleRequest {
message: (),
run_at: Instant::now() + Duration::from_secs(1),
})
.await
.unwrap();
assert!(poll!(scheduler.next()).is_pending());
advance(Duration::from_secs(2)).await;
scheduler.next().now_or_never().unwrap().unwrap();
assert!(poll!(scheduler.next()).is_pending());
schedule_tx
.send(ScheduleRequest {
message: (),
run_at: Instant::now() + Duration::from_secs(1),
})
.await
.unwrap();
assert!(poll!(scheduler.next()).is_pending());
advance(Duration::from_secs(2)).await;
scheduler.next().now_or_never().unwrap().unwrap();
assert!(poll!(scheduler.next()).is_pending());
}
#[tokio::test]
async fn scheduler_should_overwrite_message_with_soonest_version() {
pause();
let now = Instant::now();
let scheduler = scheduler(
stream::iter([
ScheduleRequest {
message: SingletonMessage(1),
run_at: now + Duration::from_secs(2),
},
ScheduleRequest {
message: SingletonMessage(2),
run_at: now + Duration::from_secs(1),
},
])
.on_complete(sleep(Duration::from_secs(5))),
);
assert_eq!(scheduler.map(|msg| msg.0).collect::<Vec<_>>().await, vec![2]);
}
#[tokio::test]
async fn scheduler_should_not_overwrite_message_with_later_version() {
pause();
let now = Instant::now();
let scheduler = scheduler(
stream::iter([
ScheduleRequest {
message: SingletonMessage(1),
run_at: now + Duration::from_secs(1),
},
ScheduleRequest {
message: SingletonMessage(2),
run_at: now + Duration::from_secs(2),
},
])
.on_complete(sleep(Duration::from_secs(5))),
);
assert_eq!(scheduler.map(|msg| msg.0).collect::<Vec<_>>().await, vec![1]);
}
#[tokio::test]
async fn scheduler_should_add_debounce_to_a_request() {
pause();
let now = Instant::now();
let (mut sched_tx, sched_rx) = mpsc::unbounded::<ScheduleRequest<SingletonMessage>>();
let mut scheduler = debounced_scheduler(sched_rx, Duration::from_secs(2));
sched_tx
.send(ScheduleRequest {
message: SingletonMessage(1),
run_at: now,
})
.await
.unwrap();
advance(Duration::from_secs(1)).await;
assert!(poll!(scheduler.next()).is_pending());
advance(Duration::from_secs(3)).await;
assert_eq!(scheduler.next().now_or_never().unwrap().unwrap().0, 1);
}
#[tokio::test]
async fn scheduler_should_dedup_message_within_debounce_period() {
pause();
let mut now = Instant::now();
let (mut sched_tx, sched_rx) = mpsc::unbounded::<ScheduleRequest<SingletonMessage>>();
let mut scheduler = debounced_scheduler(sched_rx, Duration::from_secs(3));
sched_tx
.send(ScheduleRequest {
message: SingletonMessage(1),
run_at: now,
})
.await
.unwrap();
assert!(poll!(scheduler.next()).is_pending());
advance(Duration::from_secs(1)).await;
now = Instant::now();
sched_tx
.send(ScheduleRequest {
message: SingletonMessage(2),
run_at: now,
})
.await
.unwrap();
// Check if the initial request was indeed duplicated.
advance(Duration::from_millis(2500)).await;
assert!(poll!(scheduler.next()).is_pending());
advance(Duration::from_secs(3)).await;
assert_eq!(scheduler.next().now_or_never().unwrap().unwrap().0, 2);
assert!(poll!(scheduler.next()).is_pending());
}
}

View File

@@ -0,0 +1,82 @@
use std::time::{Duration, Instant};
pub trait Backoff: Iterator<Item = Duration> + Send + Sync + Unpin {
/// Resets the internal state to the initial value.
fn reset(&mut self);
}
impl<B: Backoff + ?Sized> Backoff for Box<B> {
fn reset(&mut self) {
let this: &mut B = self;
this.reset()
}
}
/// A [`Backoff`] wrapper that resets after a fixed duration has elapsed.
pub struct ResetTimerBackoff<B: Backoff> {
backoff: B,
last_backoff: Option<Instant>,
reset_duration: Duration,
}
impl<B: Backoff> ResetTimerBackoff<B> {
pub fn new(backoff: B, reset_duration: Duration) -> Self {
Self {
backoff,
last_backoff: None,
reset_duration,
}
}
}
impl<B: Backoff> Iterator for ResetTimerBackoff<B> {
type Item = Duration;
fn next(&mut self) -> Option<Duration> {
if let Some(last_backoff) = self.last_backoff {
if tokio::time::Instant::now().into_std() > last_backoff + self.reset_duration {
tracing::debug!(
?last_backoff,
reset_duration = ?self.reset_duration,
"Resetting backoff, since reset duration has expired"
);
self.backoff.reset();
}
}
self.last_backoff = Some(tokio::time::Instant::now().into_std());
self.backoff.next()
}
}
impl<B: Backoff> Backoff for ResetTimerBackoff<B> {
fn reset(&mut self) {
self.backoff.reset();
}
}
#[cfg(test)]
mod tests {
use tokio::time::advance;
use super::ResetTimerBackoff;
use crate::utils::stream_backoff::tests::LinearBackoff;
use std::time::Duration;
#[tokio::test]
async fn should_reset_when_timer_expires() {
tokio::time::pause();
let mut backoff = ResetTimerBackoff::new(
LinearBackoff::new(Duration::from_secs(2)),
Duration::from_secs(60),
);
assert_eq!(backoff.next(), Some(Duration::from_secs(2)));
advance(Duration::from_secs(40)).await;
assert_eq!(backoff.next(), Some(Duration::from_secs(4)));
advance(Duration::from_secs(40)).await;
assert_eq!(backoff.next(), Some(Duration::from_secs(6)));
advance(Duration::from_secs(80)).await;
assert_eq!(backoff.next(), Some(Duration::from_secs(2)));
advance(Duration::from_secs(80)).await;
assert_eq!(backoff.next(), Some(Duration::from_secs(2)));
}
}

View File

@@ -0,0 +1,171 @@
use std::{fmt::Debug, sync::Mutex, task::Poll};
use futures::{channel, Future, FutureExt};
use thiserror::Error;
use tracing::trace;
/// The sending counterpart to a [`DelayedInit`]
pub struct Initializer<T>(channel::oneshot::Sender<T>);
impl<T> Initializer<T> {
/// Sends `value` to the linked [`DelayedInit`].
pub fn init(self, value: T) {
// oneshot::Sender::send fails if no recipients remain, this is not really a relevant
// case to signal for our use case
let _ = self.0.send(value);
}
}
impl<T> Debug for Initializer<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("delayed_init::Initializer")
.finish_non_exhaustive()
}
}
/// A value that must be initialized by an external writer
///
/// Can be considered equivalent to a [`channel::oneshot`] channel, except for that
/// the value produced is retained for subsequent calls to [`Self::get`].
#[derive(Debug)]
pub struct DelayedInit<T> {
state: Mutex<ReceiverState<T>>,
}
#[derive(Debug)]
enum ReceiverState<T> {
Waiting(channel::oneshot::Receiver<T>),
Ready(Result<T, InitDropped>),
}
impl<T> DelayedInit<T> {
/// Returns an empty `DelayedInit` that has no value, along with a linked [`Initializer`]
#[must_use]
pub fn new() -> (Initializer<T>, Self) {
let (tx, rx) = channel::oneshot::channel();
(Initializer(tx), DelayedInit {
state: Mutex::new(ReceiverState::Waiting(rx)),
})
}
}
impl<T: Clone + Send + Sync> DelayedInit<T> {
/// Wait for the value to be available and then return it
///
/// Calling `get` again if a value has already been returned is guaranteed to return (a clone of)
/// the same value.
///
/// # Errors
///
/// Fails if the associated [`Initializer`] has been dropped before calling [`Initializer::init`].
pub async fn get(&self) -> Result<T, InitDropped> {
Get(self).await
}
}
// Using a manually implemented future because we don't want to hold the lock across poll calls
// since that would mean that an unpolled writer would stall all other tasks from being able to poll it
struct Get<'a, T>(&'a DelayedInit<T>);
impl<T> Future for Get<'_, T>
where
T: Clone,
{
type Output = Result<T, InitDropped>;
#[tracing::instrument(name = "DelayedInit::get", level = "trace", skip(self, cx))]
fn poll(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
let mut state = self.0.state.lock().unwrap();
trace!("got lock lock");
match &mut *state {
ReceiverState::Waiting(rx) => {
trace!("channel still active, polling");
if let Poll::Ready(value) = rx.poll_unpin(cx).map_err(|_| InitDropped) {
trace!("got value on slow path, memoizing");
*state = ReceiverState::Ready(value.clone());
Poll::Ready(value)
} else {
trace!("channel is still pending");
Poll::Pending
}
}
ReceiverState::Ready(v) => {
trace!("slow path but value was already initialized, another writer already initialized");
Poll::Ready(v.clone())
}
}
}
}
#[derive(Debug, Error, Clone, Copy, PartialEq, Eq)]
#[error("initializer was dropped before value was initialized")]
pub struct InitDropped;
#[cfg(test)]
mod tests {
use std::{pin::pin, task::Poll};
use super::DelayedInit;
use futures::poll;
use tracing::Level;
use tracing_subscriber::util::SubscriberInitExt;
fn setup_tracing() -> tracing::dispatcher::DefaultGuard {
tracing_subscriber::fmt()
.with_max_level(Level::TRACE)
.with_test_writer()
.finish()
.set_default()
}
#[tokio::test]
async fn must_allow_single_reader() {
let _tracing = setup_tracing();
let (tx, rx) = DelayedInit::<u8>::new();
let mut get1 = pin!(rx.get());
assert_eq!(poll!(get1.as_mut()), Poll::Pending);
tx.init(1);
assert_eq!(poll!(get1), Poll::Ready(Ok(1)));
}
#[tokio::test]
async fn must_allow_concurrent_readers_while_waiting() {
let _tracing = setup_tracing();
let (tx, rx) = DelayedInit::<u8>::new();
let mut get1 = pin!(rx.get());
let mut get2 = pin!(rx.get());
let mut get3 = pin!(rx.get());
assert_eq!(poll!(get1.as_mut()), Poll::Pending);
assert_eq!(poll!(get2.as_mut()), Poll::Pending);
assert_eq!(poll!(get3.as_mut()), Poll::Pending);
tx.init(1);
assert_eq!(poll!(get1), Poll::Ready(Ok(1)));
assert_eq!(poll!(get2), Poll::Ready(Ok(1)));
assert_eq!(poll!(get3), Poll::Ready(Ok(1)));
}
#[tokio::test]
async fn must_allow_reading_after_init() {
let _tracing = setup_tracing();
let (tx, rx) = DelayedInit::<u8>::new();
let mut get1 = pin!(rx.get());
assert_eq!(poll!(get1.as_mut()), Poll::Pending);
tx.init(1);
assert_eq!(poll!(get1), Poll::Ready(Ok(1)));
assert_eq!(rx.get().await, Ok(1));
assert_eq!(rx.get().await, Ok(1));
}
#[tokio::test]
async fn must_allow_concurrent_readers_in_any_order() {
let _tracing = setup_tracing();
let (tx, rx) = DelayedInit::<u8>::new();
let mut get1 = pin!(rx.get());
let mut get2 = pin!(rx.get());
let mut get3 = pin!(rx.get());
assert_eq!(poll!(get1.as_mut()), Poll::Pending);
assert_eq!(poll!(get2.as_mut()), Poll::Pending);
assert_eq!(poll!(get3.as_mut()), Poll::Pending);
tx.init(1);
assert_eq!(poll!(get3), Poll::Ready(Ok(1)));
assert_eq!(poll!(get2), Poll::Ready(Ok(1)));
assert_eq!(poll!(get1), Poll::Ready(Ok(1)));
}
}

View File

@@ -0,0 +1,86 @@
use crate::watcher::{Error, Event};
use core::{
pin::Pin,
task::{ready, Context, Poll},
};
use futures::{Stream, TryStream};
use pin_project::pin_project;
#[pin_project]
/// Stream returned by the [`applied_objects`](super::WatchStreamExt::applied_objects) and [`touched_objects`](super::WatchStreamExt::touched_objects) method.
#[must_use = "streams do nothing unless polled"]
pub struct EventDecode<St> {
#[pin]
stream: St,
emit_deleted: bool,
}
impl<St: TryStream<Ok = Event<K>>, K> EventDecode<St> {
pub(super) fn new(stream: St, emit_deleted: bool) -> Self {
Self { stream, emit_deleted }
}
}
impl<St, K> Stream for EventDecode<St>
where
St: Stream<Item = Result<Event<K>, Error>>,
{
type Item = Result<K, Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut me = self.project();
Poll::Ready(loop {
let var_name = match ready!(me.stream.as_mut().poll_next(cx)) {
Some(Ok(Event::Apply(obj) | Event::InitApply(obj))) => Some(Ok(obj)),
Some(Ok(Event::Delete(obj))) => {
if *me.emit_deleted {
Some(Ok(obj))
} else {
continue;
}
}
Some(Ok(Event::Init | Event::InitDone)) => continue,
Some(Err(err)) => Some(Err(err)),
None => return Poll::Ready(None),
};
break var_name;
})
}
}
#[cfg(test)]
pub(crate) mod tests {
use std::{pin::pin, task::Poll};
use super::{Error, Event, EventDecode};
use futures::{poll, stream, StreamExt};
#[tokio::test]
async fn watches_applies_uses_correct_stream() {
let data = stream::iter([
Ok(Event::Apply(0)),
Ok(Event::Apply(1)),
Ok(Event::Delete(0)),
Ok(Event::Apply(2)),
Ok(Event::InitApply(1)),
Ok(Event::InitApply(2)),
Err(Error::NoResourceVersion),
Ok(Event::Apply(2)),
]);
let mut rx = pin!(EventDecode::new(data, false));
assert!(matches!(poll!(rx.next()), Poll::Ready(Some(Ok(0)))));
assert!(matches!(poll!(rx.next()), Poll::Ready(Some(Ok(1)))));
// NB: no Deleted events here
assert!(matches!(poll!(rx.next()), Poll::Ready(Some(Ok(2)))));
// Restart comes through, currently in reverse order
// (normally on restart they just come in alphabetical order by name)
// this is fine though, alphabetical event order has no functional meaning in watchers
assert!(matches!(poll!(rx.next()), Poll::Ready(Some(Ok(1)))));
assert!(matches!(poll!(rx.next()), Poll::Ready(Some(Ok(2)))));
// Error passed through
assert!(matches!(
poll!(rx.next()),
Poll::Ready(Some(Err(Error::NoResourceVersion)))
));
assert!(matches!(poll!(rx.next()), Poll::Ready(Some(Ok(2)))));
assert!(matches!(poll!(rx.next()), Poll::Ready(None)));
}
}

View File

@@ -0,0 +1,83 @@
use core::{
pin::Pin,
task::{Context, Poll},
};
use futures::{Stream, TryStream};
use pin_project::pin_project;
use crate::watcher::{Error, Event};
#[pin_project]
/// Stream returned by the [`modify`](super::WatchStreamExt::modify) method.
/// Modifies the [`Event`] item returned by the inner stream by calling
/// [`modify`](Event::modify()) on it.
pub struct EventModify<St, F> {
#[pin]
stream: St,
f: F,
}
impl<St, F, K> EventModify<St, F>
where
St: TryStream<Ok = Event<K>>,
F: FnMut(&mut K),
{
pub(super) fn new(stream: St, f: F) -> EventModify<St, F> {
Self { stream, f }
}
}
impl<St, F, K> Stream for EventModify<St, F>
where
St: Stream<Item = Result<Event<K>, Error>>,
F: FnMut(&mut K),
{
type Item = Result<Event<K>, Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut me = self.project();
me.stream
.as_mut()
.poll_next(cx)
.map_ok(|event| event.modify(me.f))
}
}
#[cfg(test)]
pub(crate) mod test {
use std::{pin::pin, task::Poll};
use super::{Error, Event, EventModify};
use futures::{poll, stream, StreamExt};
#[tokio::test]
async fn eventmodify_modifies_innner_value_of_event() {
let st = stream::iter([
Ok(Event::Apply(0)),
Err(Error::NoResourceVersion),
Ok(Event::InitApply(10)),
]);
let mut ev_modify = pin!(EventModify::new(st, |x| {
*x += 1;
}));
assert!(matches!(
poll!(ev_modify.next()),
Poll::Ready(Some(Ok(Event::Apply(1))))
));
assert!(matches!(
poll!(ev_modify.next()),
Poll::Ready(Some(Err(Error::NoResourceVersion)))
));
let restarted = poll!(ev_modify.next());
assert!(matches!(
restarted,
Poll::Ready(Some(Ok(Event::InitApply(x)))) if x == 11
));
assert!(matches!(poll!(ev_modify.next()), Poll::Ready(None)));
}
}

251
vendor/kube-runtime/src/utils/mod.rs vendored Normal file
View File

@@ -0,0 +1,251 @@
//! Helpers for manipulating built-in streams
mod backoff_reset_timer;
pub(crate) mod delayed_init;
mod event_decode;
mod event_modify;
mod predicate;
mod reflect;
mod stream_backoff;
mod watch_ext;
pub use backoff_reset_timer::{Backoff, ResetTimerBackoff};
pub use event_decode::EventDecode;
pub use event_modify::EventModify;
pub use predicate::{predicates, Predicate, PredicateFilter};
pub use reflect::Reflect;
pub use stream_backoff::StreamBackoff;
pub use watch_ext::WatchStreamExt;
/// Deprecated type alias for `EventDecode`
#[deprecated(
since = "0.96.0",
note = "renamed to by `EventDecode`. This alias will be removed in 0.100.0."
)]
pub use EventDecode as EventFlatten;
use futures::{
stream::{self, Peekable},
Future, FutureExt, Stream, StreamExt, TryStream, TryStreamExt,
};
use pin_project::pin_project;
use std::{
fmt::Debug,
pin::{pin, Pin},
sync::{Arc, Mutex},
task::Poll,
};
use stream::IntoStream;
use tokio::{runtime::Handle, task::JoinHandle};
/// Allows splitting a `Stream` into several streams that each emit a disjoint subset of the input stream's items,
/// like a streaming variant of pattern matching.
///
/// NOTE: The cases MUST be reunited into the same final stream (using `futures::stream::select` or similar),
/// since cases for rejected items will *not* register wakeup correctly, and may otherwise lose items and/or deadlock.
///
/// NOTE: The whole set of cases will deadlock if there is ever an item that no live case wants to consume.
#[pin_project]
pub(crate) struct SplitCase<S: Stream, Case> {
// Future-unaware `Mutex` is OK because it's only taken inside single poll()s
inner: Arc<Mutex<Peekable<S>>>,
/// Tests whether an item from the stream should be consumed
///
/// NOTE: This MUST be total over all `SplitCase`s, otherwise the input stream
/// will get stuck deadlocked because no candidate tries to consume the item.
should_consume_item: fn(&S::Item) -> bool,
/// Narrows the type of the consumed type, using the same precondition as `should_consume_item`.
///
/// NOTE: This MUST return `Some` if `should_consume_item` returns `true`, since we can't put
/// an item back into the input stream once consumed.
try_extract_item_case: fn(S::Item) -> Option<Case>,
}
impl<S, Case> Stream for SplitCase<S, Case>
where
S: Stream + Unpin,
S::Item: Debug,
{
type Item = Case;
#[allow(clippy::mut_mutex_lock)]
fn poll_next(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
let this = self.project();
// this code triggers false positive in Clippy
// https://github.com/rust-lang/rust-clippy/issues/9415
// TODO: remove #[allow] once fix reaches nightly.
let inner = this.inner.lock().unwrap();
let mut inner = Pin::new(inner);
let inner_peek = pin!(inner.as_mut().peek());
match inner_peek.poll(cx) {
Poll::Ready(Some(x_ref)) => {
if (this.should_consume_item)(x_ref) {
let item = inner.as_mut().poll_next(cx);
match item {
Poll::Ready(Some(x)) => Poll::Ready(Some((this.try_extract_item_case)(x).expect(
"`try_extract_item_case` returned `None` despite `should_consume_item` returning `true`",
))),
res => panic!(
"Peekable::poll_next() returned {res:?} when Peekable::peek() returned Ready(Some(_))"
),
}
} else {
// Handled by another SplitCase instead
Poll::Pending
}
}
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
}
}
}
/// Splits a `TryStream` into separate `Ok` and `Error` streams.
///
/// Note: This will deadlock if one branch outlives the other
#[allow(clippy::type_complexity, clippy::arc_with_non_send_sync)]
fn trystream_split_result<S>(
stream: S,
) -> (
SplitCase<IntoStream<S>, S::Ok>,
SplitCase<IntoStream<S>, S::Error>,
)
where
S: TryStream + Unpin,
S::Ok: Debug,
S::Error: Debug,
{
let stream = Arc::new(Mutex::new(stream.into_stream().peekable()));
(
SplitCase {
inner: stream.clone(),
should_consume_item: Result::is_ok,
try_extract_item_case: Result::ok,
},
SplitCase {
inner: stream,
should_consume_item: Result::is_err,
try_extract_item_case: Result::err,
},
)
}
/// Forwards Ok elements via a stream built from `make_via_stream`, while passing errors through unmodified
pub(crate) fn trystream_try_via<S1, S2>(
input_stream: S1,
make_via_stream: impl FnOnce(SplitCase<IntoStream<S1>, S1::Ok>) -> S2,
) -> impl Stream<Item = Result<S2::Ok, S1::Error>>
where
S1: TryStream + Unpin,
S2: TryStream<Error = S1::Error>,
S1::Ok: Debug,
S1::Error: Debug,
{
let (oks, errs) = trystream_split_result(input_stream); // the select -> SplitCase
let via = make_via_stream(oks); // the map_ok/err function
stream::select(via.into_stream(), errs.map(Err)) // recombine
}
/// A [`JoinHandle`] that cancels the [`Future`] when dropped, rather than detaching it
pub struct CancelableJoinHandle<T> {
inner: JoinHandle<T>,
}
impl<T> CancelableJoinHandle<T>
where
T: Send + 'static,
{
pub fn spawn(future: impl Future<Output = T> + Send + 'static, runtime: &Handle) -> Self {
CancelableJoinHandle {
inner: runtime.spawn(future),
}
}
}
impl<T> Drop for CancelableJoinHandle<T> {
fn drop(&mut self) {
self.inner.abort()
}
}
impl<T> Future for CancelableJoinHandle<T> {
type Output = T;
fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Self::Output> {
self.inner.poll_unpin(cx).map(
// JoinError => underlying future was either aborted (which should only happen when the handle is dropped), or
// panicked (which should be propagated)
Result::unwrap,
)
}
}
#[pin_project]
pub(crate) struct OnComplete<S, F> {
#[pin]
stream: stream::Fuse<S>,
#[pin]
on_complete: F,
}
impl<S: Stream, F: Future<Output = ()>> Stream for OnComplete<S, F> {
type Item = S::Item;
fn poll_next(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.project();
match this.stream.poll_next(cx) {
Poll::Ready(None) => match this.on_complete.poll(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(()) => Poll::Ready(None),
},
x => x,
}
}
}
pub(crate) trait KubeRuntimeStreamExt: Stream + Sized {
/// Runs the [`Future`] `on_complete` once the [`Stream`] finishes (by returning [`None`]).
fn on_complete<F: Future<Output = ()>>(self, on_complete: F) -> OnComplete<Self, F> {
OnComplete {
stream: self.fuse(),
on_complete,
}
}
}
impl<S: Stream> KubeRuntimeStreamExt for S {}
#[cfg(test)]
mod tests {
use std::convert::Infallible;
use futures::stream::{self, StreamExt};
use super::trystream_try_via;
// Type-level test does not need to be executed
#[allow(dead_code)]
fn trystream_try_via_should_be_able_to_borrow() {
struct WeirdComplexObject {}
impl Drop for WeirdComplexObject {
fn drop(&mut self) {}
}
let mut x = WeirdComplexObject {};
let y = WeirdComplexObject {};
drop(trystream_try_via(
Box::pin(stream::once(async {
let _ = &mut x;
Result::<_, Infallible>::Ok(())
})),
|s| {
s.map(|()| {
let _ = &y;
Ok(())
})
},
));
}
}

View File

@@ -0,0 +1,254 @@
use crate::{reflector::ObjectRef, watcher::Error};
use core::{
pin::Pin,
task::{ready, Context, Poll},
};
use futures::Stream;
use kube_client::Resource;
use pin_project::pin_project;
use std::{
collections::{hash_map::DefaultHasher, HashMap},
hash::{Hash, Hasher},
};
fn hash<T: Hash + ?Sized>(t: &T) -> u64 {
let mut hasher = DefaultHasher::new();
t.hash(&mut hasher);
hasher.finish()
}
/// A predicate is a hasher of Kubernetes objects stream filtering
pub trait Predicate<K> {
/// A predicate only needs to implement optional hashing when keys exist
fn hash_property(&self, obj: &K) -> Option<u64>;
/// Returns a `Predicate` that falls back to an alternate property if the first does not exist
///
/// # Usage
///
/// ```
/// # use k8s_openapi::api::core::v1::Pod;
/// use kube::runtime::{predicates, Predicate};
/// # fn blah<K>(a: impl Predicate<K>) {}
/// let pred = predicates::generation.fallback(predicates::resource_version);
/// blah::<Pod>(pred);
/// ```
fn fallback<F: Predicate<K>>(self, f: F) -> Fallback<Self, F>
where
Self: Sized,
{
Fallback(self, f)
}
/// Returns a `Predicate` that combines all available hashes
///
/// # Usage
///
/// ```
/// # use k8s_openapi::api::core::v1::Pod;
/// use kube::runtime::{predicates, Predicate};
/// # fn blah<K>(a: impl Predicate<K>) {}
/// let pred = predicates::labels.combine(predicates::annotations);
/// blah::<Pod>(pred);
/// ```
fn combine<F: Predicate<K>>(self, f: F) -> Combine<Self, F>
where
Self: Sized,
{
Combine(self, f)
}
}
impl<K, F: Fn(&K) -> Option<u64>> Predicate<K> for F {
fn hash_property(&self, obj: &K) -> Option<u64> {
(self)(obj)
}
}
/// See [`Predicate::fallback`]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct Fallback<A, B>(pub(super) A, pub(super) B);
impl<A, B, K> Predicate<K> for Fallback<A, B>
where
A: Predicate<K>,
B: Predicate<K>,
{
fn hash_property(&self, obj: &K) -> Option<u64> {
self.0.hash_property(obj).or_else(|| self.1.hash_property(obj))
}
}
/// See [`Predicate::combine`]
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct Combine<A, B>(pub(super) A, pub(super) B);
impl<A, B, K> Predicate<K> for Combine<A, B>
where
A: Predicate<K>,
B: Predicate<K>,
{
fn hash_property(&self, obj: &K) -> Option<u64> {
match (self.0.hash_property(obj), self.1.hash_property(obj)) {
// pass on both missing properties so people can chain .fallback
(None, None) => None,
// but any other combination of properties are hashed together
(a, b) => Some(hash(&(a, b))),
}
}
}
#[allow(clippy::pedantic)]
#[pin_project]
/// Stream returned by the [`predicate_filter`](super::WatchStreamExt::predicate_filter) method.
#[must_use = "streams do nothing unless polled"]
pub struct PredicateFilter<St, K: Resource, P: Predicate<K>> {
#[pin]
stream: St,
predicate: P,
cache: HashMap<ObjectRef<K>, u64>,
}
impl<St, K, P> PredicateFilter<St, K, P>
where
St: Stream<Item = Result<K, Error>>,
K: Resource,
P: Predicate<K>,
{
pub(super) fn new(stream: St, predicate: P) -> Self {
Self {
stream,
predicate,
cache: HashMap::new(),
}
}
}
impl<St, K, P> Stream for PredicateFilter<St, K, P>
where
St: Stream<Item = Result<K, Error>>,
K: Resource,
K::DynamicType: Default + Eq + Hash,
P: Predicate<K>,
{
type Item = Result<K, Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut me = self.project();
Poll::Ready(loop {
break match ready!(me.stream.as_mut().poll_next(cx)) {
Some(Ok(obj)) => {
if let Some(val) = me.predicate.hash_property(&obj) {
let key = ObjectRef::from_obj(&obj);
let changed = if let Some(old) = me.cache.get(&key) {
*old != val
} else {
true
};
if let Some(old) = me.cache.get_mut(&key) {
*old = val;
} else {
me.cache.insert(key, val);
}
if changed {
Some(Ok(obj))
} else {
continue;
}
} else {
// if we can't evaluate predicate, always emit K
Some(Ok(obj))
}
}
Some(Err(err)) => Some(Err(err)),
None => return Poll::Ready(None),
};
})
}
}
/// Predicate functions for [`WatchStreamExt::predicate_filter`](crate::WatchStreamExt::predicate_filter)
///
/// These functions just return a hash of commonly compared values,
/// to help decide whether to pass a watch event along or not.
///
/// Functional rewrite of the [controller-runtime/predicate module](https://github.com/kubernetes-sigs/controller-runtime/blob/main/pkg/predicate/predicate.go).
pub mod predicates {
use super::hash;
use kube_client::{Resource, ResourceExt};
/// Hash the generation of a Resource K
pub fn generation<K: Resource>(obj: &K) -> Option<u64> {
obj.meta().generation.map(|g| hash(&g))
}
/// Hash the resource version of a Resource K
pub fn resource_version<K: Resource>(obj: &K) -> Option<u64> {
obj.meta().resource_version.as_ref().map(hash)
}
/// Hash the labels of a Resource K
pub fn labels<K: Resource>(obj: &K) -> Option<u64> {
Some(hash(obj.labels()))
}
/// Hash the annotations of a Resource K
pub fn annotations<K: Resource>(obj: &K) -> Option<u64> {
Some(hash(obj.annotations()))
}
/// Hash the finalizers of a Resource K
pub fn finalizers<K: Resource>(obj: &K) -> Option<u64> {
Some(hash(obj.finalizers()))
}
}
#[cfg(test)]
pub(crate) mod tests {
use std::{pin::pin, task::Poll};
use super::{predicates, Error, PredicateFilter};
use futures::{poll, stream, FutureExt, StreamExt};
use kube_client::Resource;
use serde_json::json;
#[tokio::test]
async fn predicate_filtering_hides_equal_predicate_values() {
use k8s_openapi::api::core::v1::Pod;
let mkobj = |gen: i32| {
let p: Pod = serde_json::from_value(json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "blog",
"generation": Some(gen),
},
"spec": {
"containers": [{
"name": "blog",
"image": "clux/blog:0.1.0"
}],
}
}))
.unwrap();
p
};
let data = stream::iter([
Ok(mkobj(1)),
Err(Error::NoResourceVersion),
Ok(mkobj(1)),
Ok(mkobj(2)),
]);
let mut rx = pin!(PredicateFilter::new(data, predicates::generation));
// mkobj(1) passed through
let first = rx.next().now_or_never().unwrap().unwrap().unwrap();
assert_eq!(first.meta().generation, Some(1));
// Error passed through
assert!(matches!(
poll!(rx.next()),
Poll::Ready(Some(Err(Error::NoResourceVersion)))
));
// (no repeat mkobj(1) - same generation)
// mkobj(2) next
let second = rx.next().now_or_never().unwrap().unwrap().unwrap();
assert_eq!(second.meta().generation, Some(2));
assert!(matches!(poll!(rx.next()), Poll::Ready(None)));
}
}

121
vendor/kube-runtime/src/utils/reflect.rs vendored Normal file
View File

@@ -0,0 +1,121 @@
use core::{
pin::Pin,
task::{Context, Poll},
};
use futures::{Stream, TryStream};
use pin_project::pin_project;
use crate::{
reflector::store::Writer,
watcher::{Error, Event},
};
use kube_client::Resource;
/// Stream returned by the [`reflect`](super::WatchStreamExt::reflect) method
#[pin_project]
pub struct Reflect<St, K>
where
K: Resource + Clone + 'static,
K::DynamicType: Eq + std::hash::Hash + Clone,
{
#[pin]
stream: St,
writer: Writer<K>,
}
impl<St, K> Reflect<St, K>
where
St: TryStream<Ok = Event<K>>,
K: Resource + Clone,
K::DynamicType: Eq + std::hash::Hash + Clone,
{
pub(super) fn new(stream: St, writer: Writer<K>) -> Reflect<St, K> {
Self { stream, writer }
}
}
impl<St, K> Stream for Reflect<St, K>
where
K: Resource + Clone,
K::DynamicType: Eq + std::hash::Hash + Clone,
St: Stream<Item = Result<Event<K>, Error>>,
{
type Item = Result<Event<K>, Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut me = self.project();
me.stream.as_mut().poll_next(cx).map_ok(move |event| {
me.writer.apply_watcher_event(&event);
event
})
}
}
#[cfg(test)]
pub(crate) mod test {
use std::{pin::pin, task::Poll};
use super::{Error, Event, Reflect};
use crate::reflector;
use futures::{poll, stream, StreamExt};
use k8s_openapi::api::core::v1::Pod;
fn testpod(name: &str) -> Pod {
let mut pod = Pod::default();
pod.metadata.name = Some(name.to_string());
pod
}
#[tokio::test]
async fn reflect_passes_events_through() {
let foo = testpod("foo");
let bar = testpod("bar");
let st = stream::iter([
Ok(Event::Apply(foo.clone())),
Err(Error::NoResourceVersion),
Ok(Event::Init),
Ok(Event::InitApply(foo)),
Ok(Event::InitApply(bar)),
Ok(Event::InitDone),
]);
let (reader, writer) = reflector::store();
let mut reflect = pin!(Reflect::new(st, writer));
assert_eq!(reader.len(), 0);
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::Apply(_))))
));
assert_eq!(reader.len(), 1);
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Err(Error::NoResourceVersion)))
));
assert_eq!(reader.len(), 1);
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::Init)))
));
assert_eq!(reader.len(), 1);
let restarted = poll!(reflect.next());
assert!(matches!(restarted, Poll::Ready(Some(Ok(Event::InitApply(_))))));
assert_eq!(reader.len(), 1);
let restarted = poll!(reflect.next());
assert!(matches!(restarted, Poll::Ready(Some(Ok(Event::InitApply(_))))));
assert_eq!(reader.len(), 1);
assert!(matches!(
poll!(reflect.next()),
Poll::Ready(Some(Ok(Event::InitDone)))
));
assert_eq!(reader.len(), 2);
assert!(matches!(poll!(reflect.next()), Poll::Ready(None)));
assert_eq!(reader.len(), 2);
}
}

View File

@@ -0,0 +1,241 @@
use std::{future::Future, pin::Pin, task::Poll};
use futures::{Stream, TryStream};
use pin_project::pin_project;
use tokio::time::{sleep, Instant, Sleep};
use crate::utils::Backoff;
/// Applies a [`Backoff`] policy to a [`Stream`]
///
/// After any [`Err`] is emitted, the stream is paused for [`Backoff::next_backoff`]. The
/// [`Backoff`] is [`reset`](`Backoff::reset`) on any [`Ok`] value.
///
/// If [`Backoff::next_backoff`] returns [`None`] then the backing stream is given up on, and closed.
#[pin_project]
pub struct StreamBackoff<S, B> {
#[pin]
stream: S,
backoff: B,
#[pin]
state: State,
}
#[pin_project(project = StreamBackoffStateProj)]
// It's expected to have relatively few but long-lived `StreamBackoff`s in a project, so we would rather have
// cheaper sleeps than a smaller `StreamBackoff`.
#[allow(clippy::large_enum_variant)]
enum State {
BackingOff(#[pin] Sleep),
GivenUp,
Awake,
}
impl<S: TryStream, B: Backoff> StreamBackoff<S, B> {
pub fn new(stream: S, backoff: B) -> Self {
Self {
stream,
backoff,
state: State::Awake,
}
}
}
impl<S: TryStream, B: Backoff> Stream for StreamBackoff<S, B> {
type Item = Result<S::Ok, S::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
match this.state.as_mut().project() {
StreamBackoffStateProj::BackingOff(mut backoff_sleep) => match backoff_sleep.as_mut().poll(cx) {
Poll::Ready(()) => {
tracing::debug!(deadline = ?backoff_sleep.deadline(), "Backoff complete, waking up");
this.state.set(State::Awake)
}
Poll::Pending => {
let deadline = backoff_sleep.deadline();
tracing::trace!(
?deadline,
remaining_duration = ?deadline.saturating_duration_since(Instant::now()),
"Still waiting for backoff sleep to complete"
);
return Poll::Pending;
}
},
StreamBackoffStateProj::GivenUp => {
tracing::debug!("Backoff has given up, stream is closed");
return Poll::Ready(None);
}
StreamBackoffStateProj::Awake => {}
}
let next_item = this.stream.try_poll_next(cx);
match &next_item {
Poll::Ready(Some(Err(_))) => {
if let Some(backoff_duration) = this.backoff.next() {
let backoff_sleep = sleep(backoff_duration);
tracing::debug!(
deadline = ?backoff_sleep.deadline(),
duration = ?backoff_duration,
"Error received, backing off"
);
this.state.set(State::BackingOff(backoff_sleep));
} else {
tracing::debug!("Error received, giving up");
this.state.set(State::GivenUp);
}
}
Poll::Ready(_) => {
tracing::trace!("Non-error received, resetting backoff");
this.backoff.reset();
}
Poll::Pending => {}
}
next_item
}
}
#[cfg(test)]
pub(crate) mod tests {
use std::{pin::pin, task::Poll, time::Duration};
use crate::utils::Backoff;
use super::StreamBackoff;
use backon::BackoffBuilder;
use futures::{channel::mpsc, poll, stream, StreamExt};
pub struct ConstantBackoff {
inner: backon::ConstantBackoff,
delay: Duration,
max_times: usize,
}
impl ConstantBackoff {
pub fn new(delay: Duration, max_times: usize) -> Self {
Self {
inner: backon::ConstantBuilder::default()
.with_delay(delay)
.with_max_times(max_times)
.build(),
delay,
max_times,
}
}
}
impl Iterator for ConstantBackoff {
type Item = Duration;
fn next(&mut self) -> Option<Duration> {
self.inner.next()
}
}
impl Backoff for ConstantBackoff {
fn reset(&mut self) {
self.inner = backon::ConstantBuilder::default()
.with_delay(self.delay)
.with_max_times(self.max_times)
.build();
}
}
#[tokio::test]
async fn stream_should_back_off() {
tokio::time::pause();
let tick = Duration::from_secs(1);
let rx = stream::iter([Ok(0), Ok(1), Err(2), Ok(3), Ok(4)]);
let mut rx = pin!(StreamBackoff::new(rx, ConstantBackoff::new(tick, 10)));
assert_eq!(poll!(rx.next()), Poll::Ready(Some(Ok(0))));
assert_eq!(poll!(rx.next()), Poll::Ready(Some(Ok(1))));
assert_eq!(poll!(rx.next()), Poll::Ready(Some(Err(2))));
assert_eq!(poll!(rx.next()), Poll::Pending);
tokio::time::advance(tick * 2).await;
assert_eq!(poll!(rx.next()), Poll::Ready(Some(Ok(3))));
assert_eq!(poll!(rx.next()), Poll::Ready(Some(Ok(4))));
assert_eq!(poll!(rx.next()), Poll::Ready(None));
}
#[tokio::test]
async fn backoff_time_should_update() {
tokio::time::pause();
let (tx, rx) = mpsc::unbounded();
// let rx = stream::iter([Ok(0), Ok(1), Err(2), Ok(3)]);
let mut rx = pin!(StreamBackoff::new(rx, LinearBackoff::new(Duration::from_secs(2))));
tx.unbounded_send(Ok(0)).unwrap();
assert_eq!(poll!(rx.next()), Poll::Ready(Some(Ok(0))));
tx.unbounded_send(Ok(1)).unwrap();
assert_eq!(poll!(rx.next()), Poll::Ready(Some(Ok(1))));
tx.unbounded_send(Err(2)).unwrap();
assert_eq!(poll!(rx.next()), Poll::Ready(Some(Err(2))));
assert_eq!(poll!(rx.next()), Poll::Pending);
tokio::time::advance(Duration::from_secs(3)).await;
assert_eq!(poll!(rx.next()), Poll::Pending);
tx.unbounded_send(Err(3)).unwrap();
assert_eq!(poll!(rx.next()), Poll::Ready(Some(Err(3))));
tx.unbounded_send(Ok(4)).unwrap();
assert_eq!(poll!(rx.next()), Poll::Pending);
tokio::time::advance(Duration::from_secs(3)).await;
assert_eq!(poll!(rx.next()), Poll::Pending);
tokio::time::advance(Duration::from_secs(2)).await;
assert_eq!(poll!(rx.next()), Poll::Ready(Some(Ok(4))));
assert_eq!(poll!(rx.next()), Poll::Pending);
drop(tx);
assert_eq!(poll!(rx.next()), Poll::Ready(None));
}
#[tokio::test]
async fn backoff_should_close_when_requested() {
assert_eq!(
StreamBackoff::new(stream::iter([Ok(0), Ok(1), Err(2), Ok(3)]), StoppedBackoff {})
.collect::<Vec<_>>()
.await,
vec![Ok(0), Ok(1), Err(2)]
);
}
struct StoppedBackoff;
impl Backoff for StoppedBackoff {
fn reset(&mut self) {}
}
impl Iterator for StoppedBackoff {
type Item = Duration;
fn next(&mut self) -> Option<Duration> {
None
}
}
/// Dynamic backoff policy that is still deterministic and testable
pub struct LinearBackoff {
interval: Duration,
current_duration: Duration,
}
impl LinearBackoff {
pub fn new(interval: Duration) -> Self {
Self {
interval,
current_duration: Duration::ZERO,
}
}
}
impl Iterator for LinearBackoff {
type Item = Duration;
fn next(&mut self) -> Option<Duration> {
self.current_duration += self.interval;
Some(self.current_duration)
}
}
impl Backoff for LinearBackoff {
fn reset(&mut self) {
self.current_duration = Duration::ZERO
}
}
}

View File

@@ -0,0 +1,308 @@
use crate::{
utils::{
event_decode::EventDecode,
event_modify::EventModify,
predicate::{Predicate, PredicateFilter},
stream_backoff::StreamBackoff,
},
watcher,
};
use kube_client::Resource;
use crate::{
reflector::store::Writer,
utils::{Backoff, Reflect},
};
use crate::watcher::DefaultBackoff;
use futures::{Stream, TryStream};
/// Extension trait for streams returned by [`watcher`](watcher()) or [`reflector`](crate::reflector::reflector)
pub trait WatchStreamExt: Stream {
/// Apply the [`DefaultBackoff`] watcher [`Backoff`] policy
///
/// This is recommended for controllers that want to play nicely with the apiserver.
fn default_backoff(self) -> StreamBackoff<Self, DefaultBackoff>
where
Self: TryStream + Sized,
{
StreamBackoff::new(self, DefaultBackoff::default())
}
/// Apply a specific [`Backoff`] policy to a [`Stream`] using [`StreamBackoff`]
fn backoff<B>(self, b: B) -> StreamBackoff<Self, B>
where
B: Backoff,
Self: TryStream + Sized,
{
StreamBackoff::new(self, b)
}
/// Decode a [`watcher()`] stream into a stream of applied objects
///
/// All Added/Modified events are passed through, and critical errors bubble up.
fn applied_objects<K>(self) -> EventDecode<Self>
where
Self: Stream<Item = Result<watcher::Event<K>, watcher::Error>> + Sized,
{
EventDecode::new(self, false)
}
/// Decode a [`watcher()`] stream into a stream of touched objects
///
/// All Added/Modified/Deleted events are passed through, and critical errors bubble up.
fn touched_objects<K>(self) -> EventDecode<Self>
where
Self: Stream<Item = Result<watcher::Event<K>, watcher::Error>> + Sized,
{
EventDecode::new(self, true)
}
/// Modify elements of a [`watcher()`] stream.
///
/// Calls [`watcher::Event::modify()`] on every element.
/// Stream shorthand for `stream.map_ok(|event| { event.modify(f) })`.
///
/// ```no_run
/// # use std::pin::pin;
/// # use futures::{Stream, StreamExt, TryStreamExt};
/// # use kube::{Api, Client, ResourceExt};
/// # use kube_runtime::{watcher, WatchStreamExt};
/// # use k8s_openapi::api::apps::v1::Deployment;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let deploys: Api<Deployment> = Api::all(client);
/// let mut truncated_deploy_stream = pin!(watcher(deploys, watcher::Config::default())
/// .modify(|deploy| {
/// deploy.managed_fields_mut().clear();
/// deploy.status = None;
/// })
/// .applied_objects());
///
/// while let Some(d) = truncated_deploy_stream.try_next().await? {
/// println!("Truncated Deployment: '{:?}'", serde_json::to_string(&d)?);
/// }
/// # Ok(())
/// # }
/// ```
fn modify<F, K>(self, f: F) -> EventModify<Self, F>
where
Self: Stream<Item = Result<watcher::Event<K>, watcher::Error>> + Sized,
F: FnMut(&mut K),
{
EventModify::new(self, f)
}
/// Filter a stream based on on [`predicates`](crate::predicates).
///
/// This will filter out repeat calls where the predicate returns the same result.
/// Common use case for this is to avoid repeat events for status updates
/// by filtering on [`predicates::generation`](crate::predicates::generation).
///
/// ## Usage
/// ```no_run
/// # use std::pin::pin;
/// # use futures::{Stream, StreamExt, TryStreamExt};
/// use kube::{Api, Client, ResourceExt};
/// use kube_runtime::{watcher, WatchStreamExt, predicates};
/// use k8s_openapi::api::apps::v1::Deployment;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let deploys: Api<Deployment> = Api::default_namespaced(client);
/// let mut changed_deploys = pin!(watcher(deploys, watcher::Config::default())
/// .applied_objects()
/// .predicate_filter(predicates::generation));
///
/// while let Some(d) = changed_deploys.try_next().await? {
/// println!("saw Deployment '{} with hitherto unseen generation", d.name_any());
/// }
/// # Ok(())
/// # }
/// ```
fn predicate_filter<K, P>(self, predicate: P) -> PredicateFilter<Self, K, P>
where
Self: Stream<Item = Result<K, watcher::Error>> + Sized,
K: Resource + 'static,
P: Predicate<K> + 'static,
{
PredicateFilter::new(self, predicate)
}
/// Reflect a [`watcher()`] stream into a [`Store`] through a [`Writer`]
///
/// Returns the stream unmodified, but passes every [`watcher::Event`] through a [`Writer`].
/// This populates a [`Store`] as the stream is polled.
///
/// ## Usage
/// ```no_run
/// # use futures::{Stream, StreamExt, TryStreamExt};
/// # use std::time::Duration;
/// # use tracing::{info, warn};
/// use kube::{Api, Client, ResourceExt};
/// use kube_runtime::{watcher, WatchStreamExt, reflector};
/// use k8s_openapi::api::apps::v1::Deployment;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
///
/// let deploys: Api<Deployment> = Api::default_namespaced(client);
/// let (reader, writer) = reflector::store::<Deployment>();
///
/// tokio::spawn(async move {
/// // start polling the store once the reader is ready
/// reader.wait_until_ready().await.unwrap();
/// loop {
/// let names = reader.state().iter().map(|d| d.name_any()).collect::<Vec<_>>();
/// info!("Current {} deploys: {:?}", names.len(), names);
/// tokio::time::sleep(Duration::from_secs(10)).await;
/// }
/// });
///
/// // configure the watcher stream and populate the store while polling
/// watcher(deploys, watcher::Config::default())
/// .reflect(writer)
/// .applied_objects()
/// .for_each(|res| async move {
/// match res {
/// Ok(o) => info!("saw {}", o.name_any()),
/// Err(e) => warn!("watcher error: {}", e),
/// }
/// })
/// .await;
///
/// # Ok(())
/// # }
/// ```
///
/// [`Store`]: crate::reflector::Store
fn reflect<K>(self, writer: Writer<K>) -> Reflect<Self, K>
where
Self: Stream<Item = watcher::Result<watcher::Event<K>>> + Sized,
K: Resource + Clone + 'static,
K::DynamicType: Eq + std::hash::Hash + Clone,
{
Reflect::new(self, writer)
}
/// Reflect a shared [`watcher()`] stream into a [`Store`] through a [`Writer`]
///
/// Returns the stream unmodified, but passes every [`watcher::Event`]
/// through a [`Writer`]. This populates a [`Store`] as the stream is
/// polled. When the [`watcher::Event`] is not an error or a
/// [`watcher::Event::Deleted`] then its inner object will also be
/// propagated to subscribers.
///
/// Subscribers can be created by calling [`subscribe()`] on a [`Writer`].
/// This will return a [`ReflectHandle`] stream that should be polled
/// independently. When the root stream is dropped, or it ends, all [`ReflectHandle`]s
/// subscribed to the stream will also terminate after all events yielded by
/// the root stream have been observed. This means [`ReflectHandle`] streams
/// can still be polled after the root stream has been dropped.
///
/// **NB**: This adapter requires an
/// [`unstable`](https://github.com/kube-rs/kube/blob/main/kube-runtime/Cargo.toml#L17-L21)
/// feature
///
/// ## Warning
///
/// If the root [`Stream`] is not polled, [`ReflectHandle`] streams will
/// never receive any events. This will cause the streams to deadlock since
/// the root stream will apply backpressure when downstream readers are not
/// consuming events.
///
///
/// [`Store`]: crate::reflector::Store
/// [`subscribe()`]: crate::reflector::store::Writer::subscribe()
/// [`Stream`]: futures::stream::Stream
/// [`ReflectHandle`]: crate::reflector::dispatcher::ReflectHandle
/// ## Usage
/// ```no_run
/// # use futures::StreamExt;
/// # use std::time::Duration;
/// # use tracing::{info, warn};
/// use kube::{Api, ResourceExt};
/// use kube_runtime::{watcher, WatchStreamExt, reflector};
/// use k8s_openapi::api::apps::v1::Deployment;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
///
/// let deploys: Api<Deployment> = Api::default_namespaced(client);
/// let subscriber_buf_sz = 100;
/// let (reader, writer) = reflector::store_shared::<Deployment>(subscriber_buf_sz);
/// let subscriber = writer.subscribe().unwrap();
///
/// tokio::spawn(async move {
/// // start polling the store once the reader is ready
/// reader.wait_until_ready().await.unwrap();
/// loop {
/// let names = reader.state().iter().map(|d| d.name_any()).collect::<Vec<_>>();
/// info!("Current {} deploys: {:?}", names.len(), names);
/// tokio::time::sleep(Duration::from_secs(10)).await;
/// }
/// });
///
/// tokio::spawn(async move {
/// // subscriber can be used to receive applied_objects
/// subscriber.for_each(|obj| async move {
/// info!("saw in subscriber {}", &obj.name_any())
/// }).await;
/// });
///
/// // configure the watcher stream and populate the store while polling
/// watcher(deploys, watcher::Config::default())
/// .reflect_shared(writer)
/// .applied_objects()
/// .for_each(|res| async move {
/// match res {
/// Ok(o) => info!("saw in root stream {}", o.name_any()),
/// Err(e) => warn!("watcher error in root stream: {}", e),
/// }
/// })
/// .await;
///
/// # Ok(())
/// # }
/// ```
#[cfg(feature = "unstable-runtime-subscribe")]
fn reflect_shared<K>(self, writer: Writer<K>) -> impl Stream<Item = Self::Item>
where
Self: Stream<Item = watcher::Result<watcher::Event<K>>> + Sized,
K: Resource + Clone + 'static,
K::DynamicType: Eq + std::hash::Hash + Clone,
{
crate::reflector(writer, self)
}
}
impl<St: ?Sized> WatchStreamExt for St where St: Stream {}
// Compile tests
#[cfg(test)]
pub(crate) mod tests {
use super::watcher;
use crate::{predicates, WatchStreamExt as _};
use futures::prelude::*;
use k8s_openapi::api::core::v1::Pod;
use kube_client::{Api, Resource};
fn compile_type<T>() -> T {
unimplemented!("not called - compile test only")
}
pub fn assert_stream<T, K>(x: T) -> T
where
T: Stream<Item = watcher::Result<K>> + Send,
K: Resource + Clone + Send + 'static,
{
x
}
// not #[test] because this is only a compile check verification
#[allow(dead_code, unused_must_use)]
fn test_watcher_stream_type_drift() {
let pred_watch = watcher(compile_type::<Api<Pod>>(), Default::default())
.touched_objects()
.predicate_filter(predicates::generation)
.boxed();
assert_stream(pred_watch);
}
}

1066
vendor/kube-runtime/src/wait.rs vendored Normal file

File diff suppressed because it is too large Load Diff

967
vendor/kube-runtime/src/watcher.rs vendored Normal file
View File

@@ -0,0 +1,967 @@
//! Watches a Kubernetes Resource for changes, with error recovery
//!
//! See [`watcher`] for the primary entry point.
use crate::utils::{Backoff, ResetTimerBackoff};
use async_trait::async_trait;
use backon::BackoffBuilder;
use educe::Educe;
use futures::{stream::BoxStream, Stream, StreamExt};
use kube_client::{
api::{ListParams, Resource, ResourceExt, VersionMatch, WatchEvent, WatchParams},
core::{metadata::PartialObjectMeta, ObjectList, Selector},
error::ErrorResponse,
Api, Error as ClientErr,
};
use serde::de::DeserializeOwned;
use std::{clone::Clone, collections::VecDeque, fmt::Debug, future, time::Duration};
use thiserror::Error;
use tracing::{debug, error, warn};
#[derive(Debug, Error)]
pub enum Error {
#[error("failed to perform initial object list: {0}")]
InitialListFailed(#[source] kube_client::Error),
#[error("failed to start watching object: {0}")]
WatchStartFailed(#[source] kube_client::Error),
#[error("error returned by apiserver during watch: {0}")]
WatchError(#[source] ErrorResponse),
#[error("watch stream failed: {0}")]
WatchFailed(#[source] kube_client::Error),
#[error("no metadata.resourceVersion in watch result (does resource support watch?)")]
NoResourceVersion,
}
pub type Result<T, E = Error> = std::result::Result<T, E>;
#[derive(Debug, Clone)]
/// Watch events returned from the [`watcher`]
pub enum Event<K> {
/// An object was added or modified
Apply(K),
/// An object was deleted
///
/// NOTE: This should not be used for managing persistent state elsewhere, since
/// events may be lost if the watcher is unavailable. Use Finalizers instead.
Delete(K),
/// The watch stream was restarted.
///
/// A series of `InitApply` events are expected to follow until all matching objects
/// have been listed. This event can be used to prepare a buffer for `InitApply` events.
Init,
/// Received an object during `Init`.
///
/// Objects returned here are either from the initial stream using the `StreamingList` strategy,
/// or from pages using the `ListWatch` strategy.
///
/// These events can be passed up if having a complete set of objects is not a concern.
/// If you need to wait for a complete set, please buffer these events until an `InitDone`.
InitApply(K),
/// The initialisation is complete.
///
/// This can be used as a signal to replace buffered store contents atomically.
/// No more `InitApply` events will happen until the next `Init` event.
///
/// Any objects that were previously [`Applied`](Event::Applied) but are not listed in any of
/// the `InitApply` events should be assumed to have been [`Deleted`](Event::Deleted).
InitDone,
}
impl<K> Event<K> {
/// Flattens out all objects that were added or modified in the event.
///
/// `Deleted` objects are ignored, all objects mentioned by `Restarted` events are
/// emitted individually.
#[deprecated(
since = "0.92.0",
note = "unnecessary to flatten a single object. This fn will be removed in 0.96.0."
)]
pub fn into_iter_applied(self) -> impl Iterator<Item = K> {
match self {
Self::Apply(obj) | Self::InitApply(obj) => Some(obj),
Self::Delete(_) | Self::Init | Self::InitDone => None,
}
.into_iter()
}
/// Flattens out all objects that were added, modified, or deleted in the event.
///
/// Note that `Deleted` events may be missed when restarting the stream. Use finalizers
/// or owner references instead if you care about cleaning up external resources after
/// deleted objects.
#[deprecated(
since = "0.92.0",
note = "unnecessary to flatten a single object. This fn will be removed in 0.96.0."
)]
pub fn into_iter_touched(self) -> impl Iterator<Item = K> {
match self {
Self::Apply(obj) | Self::Delete(obj) | Self::InitApply(obj) => Some(obj),
Self::Init | Self::InitDone => None,
}
.into_iter()
}
/// Map each object in an event through a mutator fn
///
/// This allows for memory optimizations in watch streams.
/// If you are chaining a watch stream into a reflector as an in memory state store,
/// you can control the space used by each object by dropping fields.
///
/// ```no_run
/// use k8s_openapi::api::core::v1::Pod;
/// use kube::ResourceExt;
/// # use kube::runtime::watcher::Event;
/// # let event: Event<Pod> = todo!();
/// event.modify(|pod| {
/// pod.managed_fields_mut().clear();
/// pod.annotations_mut().clear();
/// pod.status = None;
/// });
/// ```
#[must_use]
pub fn modify(mut self, mut f: impl FnMut(&mut K)) -> Self {
match &mut self {
Self::Apply(obj) | Self::Delete(obj) | Self::InitApply(obj) => (f)(obj),
Self::Init | Self::InitDone => {} // markers, nothing to modify
}
self
}
}
#[derive(Educe, Default)]
#[educe(Debug)]
/// The internal finite state machine driving the [`watcher`]
enum State<K> {
/// The Watcher is empty, and the next [`poll`](Stream::poll_next) will start the initial LIST to get all existing objects
#[default]
Empty,
/// The Watcher is in the process of paginating through the initial LIST
InitPage {
continue_token: Option<String>,
objects: VecDeque<K>,
last_bookmark: Option<String>,
},
/// Kubernetes 1.27 Streaming Lists
/// The initial watch is in progress
InitialWatch {
#[educe(Debug(ignore))]
stream: BoxStream<'static, kube_client::Result<WatchEvent<K>>>,
},
/// The initial LIST was successful, so we should move on to starting the actual watch.
InitListed { resource_version: String },
/// The watch is in progress, from this point we just return events from the server.
///
/// If the connection is disrupted then we propagate the error but try to restart the watch stream by
/// returning to the `InitListed` state.
/// If we fall out of the K8s watch window then we propagate the error and fall back doing a re-list
/// with `Empty`.
Watching {
resource_version: String,
#[educe(Debug(ignore))]
stream: BoxStream<'static, kube_client::Result<WatchEvent<K>>>,
},
}
/// Used to control whether the watcher receives the full object, or only the
/// metadata
#[async_trait]
trait ApiMode {
type Value: Clone;
async fn list(&self, lp: &ListParams) -> kube_client::Result<ObjectList<Self::Value>>;
async fn watch(
&self,
wp: &WatchParams,
version: &str,
) -> kube_client::Result<BoxStream<'static, kube_client::Result<WatchEvent<Self::Value>>>>;
}
/// A wrapper around the `Api` of a `Resource` type that when used by the
/// watcher will return the entire (full) object
struct FullObject<'a, K> {
api: &'a Api<K>,
}
/// Configurable list semantics for `watcher` relists
#[derive(Clone, Default, Debug, PartialEq)]
pub enum ListSemantic {
/// List calls perform a full quorum read for most recent results
///
/// Prefer this if you have strong consistency requirements. Note that this
/// is more taxing for the apiserver and can be less scalable for the cluster.
///
/// If you are observing large resource sets (such as congested `Controller` cases),
/// you typically have a delay between the list call completing, and all the events
/// getting processed. In such cases, it is probably worth picking `Any` over `MostRecent`,
/// as your events are not guaranteed to be up-to-date by the time you get to them anyway.
#[default]
MostRecent,
/// List calls returns cached results from apiserver
///
/// This is faster and much less taxing on the apiserver, but can result
/// in much older results than has previously observed for `Restarted` events,
/// particularly in HA configurations, due to partitions or stale caches.
///
/// This option makes the most sense for controller usage where events have
/// some delay between being seen by the runtime, and it being sent to the reconciler.
Any,
}
/// Configurable watcher listwatch semantics
#[derive(Clone, Default, Debug, PartialEq)]
pub enum InitialListStrategy {
/// List first, then watch from given resouce version
///
/// This is the old and default way of watching. The watcher will do a paginated list call first before watching.
/// When using this mode, you can configure the `page_size` on the watcher.
#[default]
ListWatch,
/// Kubernetes 1.27 Streaming Lists
///
/// See [upstream documentation on streaming lists](https://kubernetes.io/docs/reference/using-api/api-concepts/#streaming-lists),
/// and the [KEP](https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#design-details).
StreamingList,
}
/// Accumulates all options that can be used on the watcher invocation.
#[derive(Clone, Debug, PartialEq)]
pub struct Config {
/// A selector to restrict the list of returned objects by their labels.
///
/// Defaults to everything if `None`.
pub label_selector: Option<String>,
/// A selector to restrict the list of returned objects by their fields.
///
/// Defaults to everything if `None`.
pub field_selector: Option<String>,
/// Timeout for the list/watch call.
///
/// This limits the duration of the call, regardless of any activity or inactivity.
/// If unset for a watch call, we will use 290s.
/// We limit this to 295s due to [inherent watch limitations](https://github.com/kubernetes/kubernetes/issues/6513).
pub timeout: Option<u32>,
/// Semantics for list calls.
///
/// Configures re-list for performance vs. consistency.
///
/// NB: This option only has an effect for [`InitialListStrategy::ListWatch`].
pub list_semantic: ListSemantic,
/// Control how the watcher fetches the initial list of objects.
///
/// - `ListWatch`: The watcher will fetch the initial list of objects using a list call.
/// - `StreamingList`: The watcher will fetch the initial list of objects using a watch call.
///
/// `StreamingList` is more efficient than `ListWatch`, but it requires the server to support
/// streaming list bookmarks (opt-in feature gate in Kubernetes 1.27).
///
/// See [upstream documentation on streaming lists](https://kubernetes.io/docs/reference/using-api/api-concepts/#streaming-lists),
/// and the [KEP](https://github.com/kubernetes/enhancements/tree/master/keps/sig-api-machinery/3157-watch-list#design-details).
pub initial_list_strategy: InitialListStrategy,
/// Maximum number of objects retrieved per list operation resyncs.
///
/// This can reduce the memory consumption during resyncs, at the cost of requiring more
/// API roundtrips to complete.
///
/// Defaults to 500. Note that `None` represents unbounded.
///
/// NB: This option only has an effect for [`InitialListStrategy::ListWatch`].
pub page_size: Option<u32>,
/// Enables watch events with type "BOOKMARK".
///
/// Requests watch bookmarks from the apiserver when enabled for improved watch precision and reduced list calls.
/// This is default enabled and should generally not be turned off.
pub bookmarks: bool,
}
impl Default for Config {
fn default() -> Self {
Self {
bookmarks: true,
label_selector: None,
field_selector: None,
timeout: None,
list_semantic: ListSemantic::default(),
// same default page size limit as client-go
// https://github.com/kubernetes/client-go/blob/aed71fa5cf054e1c196d67b2e21f66fd967b8ab1/tools/pager/pager.go#L31
page_size: Some(500),
initial_list_strategy: InitialListStrategy::ListWatch,
}
}
}
/// Builder interface to Config
///
/// Usage:
/// ```
/// use kube::runtime::watcher::Config;
/// let wc = Config::default()
/// .timeout(60)
/// .labels("kubernetes.io/lifecycle=spot");
/// ```
impl Config {
/// Configure the timeout for list/watch calls
///
/// This limits the duration of the call, regardless of any activity or inactivity.
/// Defaults to 290s
#[must_use]
pub fn timeout(mut self, timeout_secs: u32) -> Self {
self.timeout = Some(timeout_secs);
self
}
/// Configure the selector to restrict the list of returned objects by their fields.
///
/// Defaults to everything.
/// Supports `=`, `==`, `!=`, and can be comma separated: `key1=value1,key2=value2`.
/// The server only supports a limited number of field queries per type.
#[must_use]
pub fn fields(mut self, field_selector: &str) -> Self {
self.field_selector = Some(field_selector.to_string());
self
}
/// Configure the selector to restrict the list of returned objects by their labels.
///
/// Defaults to everything.
/// Supports `=`, `==`, `!=`, and can be comma separated: `key1=value1,key2=value2`.
#[must_use]
pub fn labels(mut self, label_selector: &str) -> Self {
self.label_selector = Some(label_selector.to_string());
self
}
/// Configure typed label selectors
///
/// Configure typed selectors from [`Selector`](kube_client::core::Selector) and [`Expression`](kube_client::core::Expression) lists.
///
/// ```
/// use kube_runtime::watcher::Config;
/// use kube_client::core::{Expression, Selector, ParseExpressionError};
/// use k8s_openapi::apimachinery::pkg::apis::meta::v1::LabelSelector;
/// let selector: Selector = Expression::In("env".into(), ["development".into(), "sandbox".into()].into()).into();
/// let cfg = Config::default().labels_from(&selector);
/// let cfg = Config::default().labels_from(&Expression::Exists("foo".into()).into());
/// let selector: Selector = LabelSelector::default().try_into()?;
/// let cfg = Config::default().labels_from(&selector);
/// # Ok::<(), ParseExpressionError>(())
///```
#[must_use]
pub fn labels_from(mut self, selector: &Selector) -> Self {
self.label_selector = Some(selector.to_string());
self
}
/// Sets list semantic to configure re-list performance and consistency
///
/// NB: This option only has an effect for [`InitialListStrategy::ListWatch`].
#[must_use]
pub fn list_semantic(mut self, semantic: ListSemantic) -> Self {
self.list_semantic = semantic;
self
}
/// Sets list semantic to `Any` to improve list performance
///
/// NB: This option only has an effect for [`InitialListStrategy::ListWatch`].
#[must_use]
pub fn any_semantic(self) -> Self {
self.list_semantic(ListSemantic::Any)
}
/// Disables watch bookmarks to simplify watch handling
///
/// This is not recommended to use with production watchers as it can cause desyncs.
/// See [#219](https://github.com/kube-rs/kube/issues/219) for details.
#[must_use]
pub fn disable_bookmarks(mut self) -> Self {
self.bookmarks = false;
self
}
/// Limits the number of objects retrieved in each list operation during resync.
///
/// This can reduce the memory consumption during resyncs, at the cost of requiring more
/// API roundtrips to complete.
///
/// NB: This option only has an effect for [`InitialListStrategy::ListWatch`].
#[must_use]
pub fn page_size(mut self, page_size: u32) -> Self {
self.page_size = Some(page_size);
self
}
/// Kubernetes 1.27 Streaming Lists
/// Sets list semantic to `Stream` to make use of watch bookmarks
#[must_use]
pub fn streaming_lists(mut self) -> Self {
self.initial_list_strategy = InitialListStrategy::StreamingList;
self
}
/// Converts generic `watcher::Config` structure to the instance of `ListParams` used for list requests.
fn to_list_params(&self) -> ListParams {
let (resource_version, version_match) = match self.list_semantic {
ListSemantic::Any => (Some("0".into()), Some(VersionMatch::NotOlderThan)),
ListSemantic::MostRecent => (None, None),
};
ListParams {
label_selector: self.label_selector.clone(),
field_selector: self.field_selector.clone(),
timeout: self.timeout,
version_match,
resource_version,
// The watcher handles pagination internally.
limit: self.page_size,
continue_token: None,
}
}
/// Converts generic `watcher::Config` structure to the instance of `WatchParams` used for watch requests.
fn to_watch_params(&self) -> WatchParams {
WatchParams {
label_selector: self.label_selector.clone(),
field_selector: self.field_selector.clone(),
timeout: self.timeout,
bookmarks: self.bookmarks,
send_initial_events: self.initial_list_strategy == InitialListStrategy::StreamingList,
}
}
}
#[async_trait]
impl<K> ApiMode for FullObject<'_, K>
where
K: Clone + Debug + DeserializeOwned + Send + 'static,
{
type Value = K;
async fn list(&self, lp: &ListParams) -> kube_client::Result<ObjectList<Self::Value>> {
self.api.list(lp).await
}
async fn watch(
&self,
wp: &WatchParams,
version: &str,
) -> kube_client::Result<BoxStream<'static, kube_client::Result<WatchEvent<Self::Value>>>> {
self.api.watch(wp, version).await.map(StreamExt::boxed)
}
}
/// A wrapper around the `Api` of a `Resource` type that when used by the
/// watcher will return only the metadata associated with an object
struct MetaOnly<'a, K> {
api: &'a Api<K>,
}
#[async_trait]
impl<K> ApiMode for MetaOnly<'_, K>
where
K: Clone + Debug + DeserializeOwned + Send + 'static,
{
type Value = PartialObjectMeta<K>;
async fn list(&self, lp: &ListParams) -> kube_client::Result<ObjectList<Self::Value>> {
self.api.list_metadata(lp).await
}
async fn watch(
&self,
wp: &WatchParams,
version: &str,
) -> kube_client::Result<BoxStream<'static, kube_client::Result<WatchEvent<Self::Value>>>> {
self.api.watch_metadata(wp, version).await.map(StreamExt::boxed)
}
}
/// Progresses the watcher a single step, returning (event, state)
///
/// This function should be trampolined: if event == `None`
/// then the function should be called again until it returns a Some.
#[allow(clippy::too_many_lines)] // for now
async fn step_trampolined<A>(
api: &A,
wc: &Config,
state: State<A::Value>,
) -> (Option<Result<Event<A::Value>>>, State<A::Value>)
where
A: ApiMode,
A::Value: Resource + 'static,
{
match state {
State::Empty => match wc.initial_list_strategy {
InitialListStrategy::ListWatch => (Some(Ok(Event::Init)), State::InitPage {
continue_token: None,
objects: VecDeque::default(),
last_bookmark: None,
}),
InitialListStrategy::StreamingList => match api.watch(&wc.to_watch_params(), "0").await {
Ok(stream) => (None, State::InitialWatch { stream }),
Err(err) => {
if std::matches!(err, ClientErr::Api(ErrorResponse { code: 403, .. })) {
warn!("watch initlist error with 403: {err:?}");
} else {
debug!("watch initlist error: {err:?}");
}
(Some(Err(Error::WatchStartFailed(err))), State::default())
}
},
},
State::InitPage {
continue_token,
mut objects,
last_bookmark,
} => {
if let Some(next) = objects.pop_front() {
return (Some(Ok(Event::InitApply(next))), State::InitPage {
continue_token,
objects,
last_bookmark,
});
}
// check if we need to perform more pages
if continue_token.is_none() {
if let Some(resource_version) = last_bookmark {
// we have drained the last page - move on to next stage
return (Some(Ok(Event::InitDone)), State::InitListed { resource_version });
}
}
let mut lp = wc.to_list_params();
lp.continue_token = continue_token;
match api.list(&lp).await {
Ok(list) => {
let last_bookmark = list.metadata.resource_version.filter(|s| !s.is_empty());
let continue_token = list.metadata.continue_.filter(|s| !s.is_empty());
if last_bookmark.is_none() && continue_token.is_none() {
return (Some(Err(Error::NoResourceVersion)), State::Empty);
}
// Buffer page here, causing us to return to this enum branch (State::InitPage)
// until the objects buffer has drained
(None, State::InitPage {
continue_token,
objects: list.items.into_iter().collect(),
last_bookmark,
})
}
Err(err) => {
if std::matches!(err, ClientErr::Api(ErrorResponse { code: 403, .. })) {
warn!("watch list error with 403: {err:?}");
} else {
debug!("watch list error: {err:?}");
}
(Some(Err(Error::InitialListFailed(err))), State::Empty)
}
}
}
State::InitialWatch { mut stream } => {
match stream.next().await {
Some(Ok(WatchEvent::Added(obj) | WatchEvent::Modified(obj))) => {
(Some(Ok(Event::InitApply(obj))), State::InitialWatch { stream })
}
Some(Ok(WatchEvent::Deleted(_obj))) => {
// Kubernetes claims these events are impossible
// https://kubernetes.io/docs/reference/using-api/api-concepts/#streaming-lists
error!("got deleted event during initial watch. this is a bug");
(None, State::InitialWatch { stream })
}
Some(Ok(WatchEvent::Bookmark(bm))) => {
let marks_initial_end = bm.metadata.annotations.contains_key("k8s.io/initial-events-end");
if marks_initial_end {
(Some(Ok(Event::InitDone)), State::Watching {
resource_version: bm.metadata.resource_version,
stream,
})
} else {
(None, State::InitialWatch { stream })
}
}
Some(Ok(WatchEvent::Error(err))) => {
// HTTP GONE, means we have desynced and need to start over and re-list :(
let new_state = if err.code == 410 {
State::default()
} else {
State::InitialWatch { stream }
};
if err.code == 403 {
warn!("watcher watchevent error 403: {err:?}");
} else {
debug!("error watchevent error: {err:?}");
}
(Some(Err(Error::WatchError(err))), new_state)
}
Some(Err(err)) => {
if std::matches!(err, ClientErr::Api(ErrorResponse { code: 403, .. })) {
warn!("watcher error 403: {err:?}");
} else {
debug!("watcher error: {err:?}");
}
(Some(Err(Error::WatchFailed(err))), State::InitialWatch { stream })
}
None => (None, State::default()),
}
}
State::InitListed { resource_version } => {
match api.watch(&wc.to_watch_params(), &resource_version).await {
Ok(stream) => (None, State::Watching {
resource_version,
stream,
}),
Err(err) => {
if std::matches!(err, ClientErr::Api(ErrorResponse { code: 403, .. })) {
warn!("watch initlist error with 403: {err:?}");
} else {
debug!("watch initlist error: {err:?}");
}
(Some(Err(Error::WatchStartFailed(err))), State::InitListed {
resource_version,
})
}
}
}
State::Watching {
resource_version,
mut stream,
} => match stream.next().await {
Some(Ok(WatchEvent::Added(obj) | WatchEvent::Modified(obj))) => {
let resource_version = obj.resource_version().unwrap_or_default();
if resource_version.is_empty() {
(Some(Err(Error::NoResourceVersion)), State::default())
} else {
(Some(Ok(Event::Apply(obj))), State::Watching {
resource_version,
stream,
})
}
}
Some(Ok(WatchEvent::Deleted(obj))) => {
let resource_version = obj.resource_version().unwrap_or_default();
if resource_version.is_empty() {
(Some(Err(Error::NoResourceVersion)), State::default())
} else {
(Some(Ok(Event::Delete(obj))), State::Watching {
resource_version,
stream,
})
}
}
Some(Ok(WatchEvent::Bookmark(bm))) => (None, State::Watching {
resource_version: bm.metadata.resource_version,
stream,
}),
Some(Ok(WatchEvent::Error(err))) => {
// HTTP GONE, means we have desynced and need to start over and re-list :(
let new_state = if err.code == 410 {
State::default()
} else {
State::Watching {
resource_version,
stream,
}
};
if err.code == 403 {
warn!("watcher watchevent error 403: {err:?}");
} else {
debug!("error watchevent error: {err:?}");
}
(Some(Err(Error::WatchError(err))), new_state)
}
Some(Err(err)) => {
if std::matches!(err, ClientErr::Api(ErrorResponse { code: 403, .. })) {
warn!("watcher error 403: {err:?}");
} else {
debug!("watcher error: {err:?}");
}
(Some(Err(Error::WatchFailed(err))), State::Watching {
resource_version,
stream,
})
}
None => (None, State::InitListed { resource_version }),
},
}
}
/// Trampoline helper for `step_trampolined`
async fn step<A>(
api: &A,
config: &Config,
mut state: State<A::Value>,
) -> (Result<Event<A::Value>>, State<A::Value>)
where
A: ApiMode,
A::Value: Resource + 'static,
{
loop {
match step_trampolined(api, config, state).await {
(Some(result), new_state) => return (result, new_state),
(None, new_state) => state = new_state,
}
}
}
/// Watches a Kubernetes Resource for changes continuously
///
/// Compared to [`Api::watch`], this automatically tries to recover the stream upon errors.
///
/// Errors from the underlying watch are propagated, after which the stream will go into recovery mode on the next poll.
/// You can apply your own backoff by not polling the stream for a duration after errors.
/// Keep in mind that some [`TryStream`](futures::TryStream) combinators (such as
/// [`try_for_each`](futures::TryStreamExt::try_for_each) and [`try_concat`](futures::TryStreamExt::try_concat))
/// will terminate eagerly as soon as they receive an [`Err`].
///
/// The events are intended to provide a safe input interface for a state store like a [`reflector`].
/// Direct users may want to use [`WatchStreamExt`] for higher-level constructs.
///
/// ```no_run
/// use kube::{
/// api::{Api, ResourceExt}, Client,
/// runtime::{watcher, WatchStreamExt}
/// };
/// use k8s_openapi::api::core::v1::Pod;
/// use futures::TryStreamExt;
/// #[tokio::main]
/// async fn main() -> Result<(), watcher::Error> {
/// let client = Client::try_default().await.unwrap();
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
///
/// watcher(pods, watcher::Config::default()).applied_objects()
/// .try_for_each(|p| async move {
/// println!("Applied: {}", p.name_any());
/// Ok(())
/// })
/// .await?;
/// Ok(())
/// }
/// ```
/// [`WatchStreamExt`]: super::WatchStreamExt
/// [`reflector`]: super::reflector::reflector
/// [`Api::watch`]: kube_client::Api::watch
///
/// # Recovery
///
/// The stream will attempt to be recovered on the next poll after an [`Err`] is returned.
/// This will normally happen immediately, but you can use [`StreamBackoff`](crate::utils::StreamBackoff)
/// to introduce an artificial delay. [`default_backoff`] returns a suitable default set of parameters.
///
/// If the watch connection is interrupted, then `watcher` will attempt to restart the watch using the last
/// [resource version](https://kubernetes.io/docs/reference/using-api/api-concepts/#efficient-detection-of-changes)
/// that we have seen on the stream. If this is successful then the stream is simply resumed from where it left off.
/// If this fails because the resource version is no longer valid then we start over with a new stream, starting with
/// an [`Event::Init`]. The internals mechanics of recovery should be considered an implementation detail.
pub fn watcher<K: Resource + Clone + DeserializeOwned + Debug + Send + 'static>(
api: Api<K>,
watcher_config: Config,
) -> impl Stream<Item = Result<Event<K>>> + Send {
futures::stream::unfold(
(api, watcher_config, State::default()),
|(api, watcher_config, state)| async {
let (event, state) = step(&FullObject { api: &api }, &watcher_config, state).await;
Some((event, (api, watcher_config, state)))
},
)
}
/// Watches a Kubernetes Resource for changes continuously and receives only the
/// metadata
///
/// Compared to [`Api::watch_metadata`], this automatically tries to recover the stream upon errors.
///
/// Errors from the underlying watch are propagated, after which the stream will go into recovery mode on the next poll.
/// You can apply your own backoff by not polling the stream for a duration after errors.
/// Keep in mind that some [`TryStream`](futures::TryStream) combinators (such as
/// [`try_for_each`](futures::TryStreamExt::try_for_each) and [`try_concat`](futures::TryStreamExt::try_concat))
/// will terminate eagerly as soon as they receive an [`Err`].
///
/// The events are intended to provide a safe input interface for a state store like a [`reflector`].
/// Direct users may want to use [`WatchStreamExt`] for higher-level constructs.
///
/// ```no_run
/// use kube::{
/// api::{Api, ResourceExt}, Client,
/// runtime::{watcher, metadata_watcher, WatchStreamExt}
/// };
/// use k8s_openapi::api::core::v1::Pod;
/// use futures::TryStreamExt;
/// #[tokio::main]
/// async fn main() -> Result<(), watcher::Error> {
/// let client = Client::try_default().await.unwrap();
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
///
/// metadata_watcher(pods, watcher::Config::default()).applied_objects()
/// .try_for_each(|p| async move {
/// println!("Applied: {}", p.name_any());
/// Ok(())
/// })
/// .await?;
/// Ok(())
/// }
/// ```
/// [`WatchStreamExt`]: super::WatchStreamExt
/// [`reflector`]: super::reflector::reflector
/// [`Api::watch`]: kube_client::Api::watch
///
/// # Recovery
///
/// The stream will attempt to be recovered on the next poll after an [`Err`] is returned.
/// This will normally happen immediately, but you can use [`StreamBackoff`](crate::utils::StreamBackoff)
/// to introduce an artificial delay. [`default_backoff`] returns a suitable default set of parameters.
///
/// If the watch connection is interrupted, then `watcher` will attempt to restart the watch using the last
/// [resource version](https://kubernetes.io/docs/reference/using-api/api-concepts/#efficient-detection-of-changes)
/// that we have seen on the stream. If this is successful then the stream is simply resumed from where it left off.
/// If this fails because the resource version is no longer valid then we start over with a new stream, starting with
/// an [`Event::Init`]. The internals mechanics of recovery should be considered an implementation detail.
#[allow(clippy::module_name_repetitions)]
pub fn metadata_watcher<K: Resource + Clone + DeserializeOwned + Debug + Send + 'static>(
api: Api<K>,
watcher_config: Config,
) -> impl Stream<Item = Result<Event<PartialObjectMeta<K>>>> + Send {
futures::stream::unfold(
(api, watcher_config, State::default()),
|(api, watcher_config, state)| async {
let (event, state) = step(&MetaOnly { api: &api }, &watcher_config, state).await;
Some((event, (api, watcher_config, state)))
},
)
}
/// Watch a single named object for updates
///
/// Emits `None` if the object is deleted (or not found), and `Some` if an object is updated (or created/found).
///
/// Often invoked indirectly via [`await_condition`](crate::wait::await_condition()).
///
/// ## Scope Warning
///
/// When using this with an `Api::all` on namespaced resources there is a chance of duplicated names.
/// To avoid getting confusing / wrong answers for this, use `Api::namespaced` bound to a specific namespace
/// when watching for transitions to namespaced objects.
pub fn watch_object<K: Resource + Clone + DeserializeOwned + Debug + Send + 'static>(
api: Api<K>,
name: &str,
) -> impl Stream<Item = Result<Option<K>>> + Send {
// filtering by object name in given scope, so there's at most one matching object
// footgun: Api::all may generate events from namespaced objects with the same name in different namespaces
let fields = format!("metadata.name={name}");
watcher(api, Config::default().fields(&fields))
// The `obj_seen` state is used to track whether the object exists in each Init / InitApply / InitDone
// sequence of events. If the object wasn't seen in any particular sequence it is treated as deleted and
// `None` is emitted when the InitDone event is received.
//
// The first check ensures `None` is emitted if the object was already gone (or not found), subsequent
// checks ensure `None` is emitted even if for some reason the Delete event wasn't received, which
// could happen given K8S events aren't guaranteed delivery.
.scan(false, |obj_seen, event| {
if matches!(event, Ok(Event::Init)) {
*obj_seen = false;
} else if matches!(event, Ok(Event::InitApply(_))) {
*obj_seen = true;
}
future::ready(Some((*obj_seen, event)))
})
.filter_map(|(obj_seen, event)| async move {
match event {
// Pass up `Some` for Found / Updated
Ok(Event::Apply(obj) | Event::InitApply(obj)) => Some(Ok(Some(obj))),
// Pass up `None` for Deleted
Ok(Event::Delete(_)) => Some(Ok(None)),
// Pass up `None` if the object wasn't seen in the initial list
Ok(Event::InitDone) if !obj_seen => Some(Ok(None)),
// Ignore marker events
Ok(Event::Init | Event::InitDone) => None,
// Bubble up errors
Err(err) => Some(Err(err)),
}
})
}
pub struct ExponentialBackoff {
inner: backon::ExponentialBackoff,
builder: backon::ExponentialBuilder,
}
impl ExponentialBackoff {
fn new(min_delay: Duration, max_delay: Duration, factor: f32, enable_jitter: bool) -> Self {
let builder = backon::ExponentialBuilder::default()
.with_min_delay(min_delay)
.with_max_delay(max_delay)
.with_factor(factor)
.without_max_times();
if enable_jitter {
builder.with_jitter();
}
Self {
inner: builder.build(),
builder,
}
}
}
impl Backoff for ExponentialBackoff {
fn reset(&mut self) {
self.inner = self.builder.build();
}
}
impl Iterator for ExponentialBackoff {
type Item = Duration;
fn next(&mut self) -> Option<Self::Item> {
self.inner.next()
}
}
impl From<backon::ExponentialBuilder> for ExponentialBackoff {
fn from(builder: backon::ExponentialBuilder) -> Self {
Self {
inner: builder.build(),
builder,
}
}
}
/// Default watcher backoff inspired by Kubernetes' client-go.
///
/// The parameters currently optimize for being kind to struggling apiservers.
/// The exact parameters are taken from
/// [client-go's reflector source](https://github.com/kubernetes/client-go/blob/980663e185ab6fc79163b1c2565034f6d58368db/tools/cache/reflector.go#L177-L181)
/// and should not be considered stable.
///
/// This struct implements [`Backoff`] and is the default strategy used
/// when calling `WatchStreamExt::default_backoff`. If you need to create
/// this manually then [`DefaultBackoff::default`] can be used.
pub struct DefaultBackoff(Strategy);
type Strategy = ResetTimerBackoff<ExponentialBackoff>;
impl Default for DefaultBackoff {
fn default() -> Self {
Self(ResetTimerBackoff::new(
ExponentialBackoff::new(Duration::from_millis(800), Duration::from_secs(30), 2.0, true),
Duration::from_secs(120),
))
}
}
impl Iterator for DefaultBackoff {
type Item = Duration;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
impl Backoff for DefaultBackoff {
fn reset(&mut self) {
self.0.reset();
}
}

Binary file not shown.

File diff suppressed because one or more lines are too long