chore: checkpoint before Python removal

This commit is contained in:
2026-03-26 22:33:59 +00:00
parent 683cec9307
commit e568ddf82a
29972 changed files with 11269302 additions and 2 deletions

View File

@@ -0,0 +1 @@
{"files":{".cargo_vcs_info.json":"7c582ed49063fd0422ebee8eb9a987fc213b38738e8c8d92ff97f3f30874c5bd","Cargo.lock":"4b9851c68030b334a9d7a7c3430d2f537bef9b6979dc035f4c934b834d5e4fdf","Cargo.toml":"67312454e8b75b9d14a639351cfafa545df002079bb754a32cac7d1779af013b","Cargo.toml.orig":"515b86bdb525d747d2d1532f855f8bbd434eec77c2bc0680c26d636f4b6b2e9f","README.md":"5ad7386257c598ba2bfb874eefe5e53c918c94343f828238d2c58b95903e8bfe","src/api/core_methods.rs":"202aee3c30021e9b4fd4dc2ff26aca5dc3ae4664bd02ab696d018fdc96e28f8e","src/api/entry.rs":"fb0ae325195d5a7d017017abe087173e44c2e268e9fb7f595473ce06f449e6e3","src/api/mod.rs":"721974dd77ffae5f19416fb0e325c296927ea850d1c653198ab54125a5c05fa0","src/api/portforward.rs":"337df577b76315860ebf7b7e718f1c592c78d1c06d25dfcee3947700f0cd32cf","src/api/remote_command.rs":"75dae889737c8973c9c7e0e84e231be650a9bfdd3c64bdcd78531e2f19bde205","src/api/subresource.rs":"63f57f51975c099d0b673448a49df03ec540065bd82b652a38336cfc8e1ac843","src/api/util/csr.rs":"7c220fe59b2397f2bd10bcc2ab098072b15737228baedb8abcfa65c8e435aa3d","src/api/util/mod.rs":"4f2e7440021d34938b43c46a2264bf4c874ae804e9da8a4756fe123afd560685","src/client/auth/mod.rs":"edcf47bb6fced54b740e49ed4dfa7e704eeb0e4e93761c215dc2a41294b17668","src/client/auth/oauth.rs":"88a58816c6d8265e93ad83511f20855f96da220e2d8465c5b4a046f49e29a4d1","src/client/auth/oidc.rs":"c4f64b5d6b968e6d5279cfd95df036927dab7ad5232b5ef48d4848c16c2db78a","src/client/body.rs":"7eed80cde1180efb0a9f12e6ad898ad4aa0335cf65d9cb930852ace9a02b82a3","src/client/builder.rs":"427f277cec7dd8dd54adb029aa01e52cdeb66ff5579c152776afa2b538509938","src/client/client_ext.rs":"a119497466efb09663ccb09e9bd4c741a4e16fef423e1854e79d5cff2f021e53","src/client/config_ext.rs":"99f267eeb9e2474fa98232905c4b59dc0de2414fe37729e57b89c3ab674e91b4","src/client/kubelet_debug.rs":"12efa47782b9c9d7dc291e67b42b2b3a657900d3c9417eae8ccc9a4eb4275506","src/client/middleware/base_uri.rs":"ee9719423f888ca7134ad7a9b8a56c402be8b726a8bae1cddca2c00bbe901d73","src/client/middleware/extra_headers.rs":"956711fcf36dd0ee5dc0be9dc740f463760051370cab3226d75a1cd42804b78f","src/client/middleware/mod.rs":"bc3660c35488e408c71dc8d41d0a86849c03016741a2642f7f3159f34ef0a87b","src/client/mod.rs":"37739e73bb22310c8a52cdbc852c1b05bd957c684a3da89695367fa1014b5299","src/client/tls.rs":"262c1d178fee8b79dc5343aa7238528bdc0a15567e0d8089a74bc0b59b5d45ea","src/client/upgrade.rs":"336838241b84da20119120fac141ffb74385f6c72b8c6d54e39890afb788c508","src/config/file_config.rs":"3833f8ea1b6ff65e1b1aac7db4e01e28cc396f2bc880221c0490cbeca1097036","src/config/file_loader.rs":"680f343c0b06a98742066ec5a5ae6ec63e2a805a96c3e3f0a58bb96177c75769","src/config/incluster_config.rs":"f94d2eba9fb7e464912607ad7dfda9991fa39959e99d3631719950f5de3997a6","src/config/mod.rs":"2251611c63c01dbea696977c45a67d4a609566d388f95006bf1139b759e567cb","src/config/test_data/kubeconfig_utf16be.yaml":"67cb526eb44d0bd7d0c6948fe64f493cd99785e4ff85808216313f1598001bea","src/config/test_data/kubeconfig_utf16le.yaml":"a46c47944695c2269114117bd9b1df6a47bc2c13eaf8aba027304a302d7e5bb2","src/config/test_data/kubeconfig_utf8.yaml":"b23e3fe59c0d0279b5b3db369fa7c1d1aca4db82bcc31da552077ea4363548bf","src/discovery/apigroup.rs":"21a79c75068bd480f4237f94b5ff0d76c7e2b1e078edabdcd0c8f98151e109e4","src/discovery/mod.rs":"fe1ccec5f194e80d1167ab6690dc8241e09c7f7212be27ab4859404d7696b61a","src/discovery/oneshot.rs":"6d4a9e406040fc01b1331209475f6ba80d80da00fb283381cf9fdb6d3cc65861","src/discovery/parse.rs":"e25551bf5901d63bf3c8f8a733ed03db759db6c7fee9b764967e0f942ca18af0","src/error.rs":"33c4b5d56a627f9d016cd8add185bbdaebeef3765b15ac9c16be9cd0bae27629","src/lib.rs":"b83708796014840dafc40f5404c0b7b39c9d56558e5e3e9f730428bbdf703a33"},"package":"7fc2ed952042df20d15ac2fe9614d0ec14b6118eab89633985d4b36e688dccf1"}

View File

@@ -0,0 +1,6 @@
{
"git": {
"sha1": "c9b7b70f7fa0378ea1cd6ac697c1a0c0bb7b7dd3"
},
"path_in_vcs": "kube-client"
}

2602
vendor/kube-client/Cargo.lock generated vendored Normal file

File diff suppressed because it is too large Load Diff

361
vendor/kube-client/Cargo.toml vendored Normal file
View File

@@ -0,0 +1,361 @@
# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
#
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
# to registry (e.g., crates.io) dependencies.
#
# If you are reading this file be aware that the original Cargo.toml
# will likely look very different (and much more reasonable).
# See Cargo.toml.orig for the original contents.
[package]
edition = "2021"
rust-version = "1.81.0"
name = "kube-client"
version = "0.99.0"
authors = [
"clux <sszynrae@gmail.com>",
"Natalie Klestrup Röijezon <nat@nullable.se>",
"kazk <kazk.dev@gmail.com>",
]
build = false
autolib = false
autobins = false
autoexamples = false
autotests = false
autobenches = false
description = "Kubernetes client"
readme = "README.md"
keywords = [
"kubernetes",
"client",
]
categories = [
"web-programming::http-client",
"network-programming",
"api-bindings",
]
license = "Apache-2.0"
repository = "https://github.com/kube-rs/kube"
resolver = "1"
[package.metadata.docs.rs]
features = [
"client",
"rustls-tls",
"openssl-tls",
"ws",
"oauth",
"oidc",
"jsonpatch",
"admission",
"k8s-openapi/latest",
"socks5",
"unstable-client",
"http-proxy",
]
rustdoc-args = [
"--cfg",
"docsrs",
]
[lib]
name = "kube_client"
path = "src/lib.rs"
[dependencies.base64]
version = "0.22.1"
optional = true
[dependencies.bytes]
version = "1.1.0"
optional = true
[dependencies.chrono]
version = "0.4.34"
optional = true
default-features = false
[dependencies.either]
version = "1.6.1"
optional = true
[dependencies.form_urlencoded]
version = "1.2.0"
optional = true
[dependencies.futures]
version = "0.3.17"
features = ["std"]
optional = true
default-features = false
[dependencies.home]
version = "0.5.4"
optional = true
[dependencies.http]
version = "1.1.0"
[dependencies.http-body]
version = "1.0.1"
optional = true
[dependencies.http-body-util]
version = "0.1.2"
optional = true
[dependencies.hyper]
version = "1.2.0"
features = [
"client",
"http1",
]
optional = true
[dependencies.hyper-http-proxy]
version = "1"
optional = true
default-features = false
[dependencies.hyper-openssl]
version = "0.10.2"
features = ["client-legacy"]
optional = true
[dependencies.hyper-rustls]
version = "0.27.1"
features = [
"http1",
"logging",
"native-tokio",
"tls12",
]
optional = true
default-features = false
[dependencies.hyper-socks2]
version = "0.9.0"
optional = true
default-features = false
[dependencies.hyper-timeout]
version = "0.5.1"
optional = true
[dependencies.hyper-util]
version = "0.1.9"
features = [
"client",
"client-legacy",
"http1",
"tokio",
]
optional = true
[dependencies.jsonpath-rust]
version = "0.7.3"
optional = true
[dependencies.k8s-openapi]
version = "0.24.0"
features = []
default-features = false
[dependencies.kube-core]
version = "=0.99.0"
[dependencies.openssl]
version = "0.10.36"
optional = true
[dependencies.pem]
version = "3.0.1"
optional = true
[dependencies.rustls]
version = "0.23.16"
optional = true
default-features = false
[dependencies.secrecy]
version = "0.10.2"
[dependencies.serde]
version = "1.0.130"
features = ["derive"]
[dependencies.serde_json]
version = "1.0.68"
[dependencies.serde_yaml]
version = "0.9.19"
optional = true
[dependencies.tame-oauth]
version = "0.10.0"
features = ["gcp"]
optional = true
[dependencies.thiserror]
version = "2.0.3"
[dependencies.tokio]
version = "1.14.0"
features = [
"time",
"signal",
"sync",
]
optional = true
[dependencies.tokio-tungstenite]
version = "0.26.1"
optional = true
[dependencies.tokio-util]
version = "0.7.0"
features = [
"io",
"codec",
]
optional = true
[dependencies.tower]
version = "0.5.1"
features = [
"buffer",
"filter",
"util",
]
optional = true
[dependencies.tower-http]
version = "0.6.1"
features = [
"auth",
"map-response-body",
"trace",
]
optional = true
[dependencies.tracing]
version = "0.1.36"
features = ["log"]
optional = true
[dev-dependencies.futures]
version = "0.3.17"
features = ["async-await"]
default-features = false
[dev-dependencies.hyper]
version = "1.2.0"
features = ["server"]
[dev-dependencies.k8s-openapi]
version = "0.24.0"
features = ["latest"]
default-features = false
[dev-dependencies.kube]
version = "<2.0.0, >=0.98.0"
features = [
"derive",
"client",
"ws",
]
[dev-dependencies.schemars]
version = "0.8.6"
[dev-dependencies.tempfile]
version = "3.1.0"
[dev-dependencies.tokio]
version = "1.14.0"
features = ["full"]
[dev-dependencies.tokio-test]
version = "0.4.0"
[dev-dependencies.tower-test]
version = "0.4.0"
[features]
__non_core = [
"tracing",
"serde_yaml",
"base64",
]
admission = ["kube-core/admission"]
aws-lc-rs = ["hyper-rustls?/aws-lc-rs"]
client = [
"config",
"__non_core",
"hyper",
"hyper-util",
"http-body",
"http-body-util",
"tower",
"tower-http",
"hyper-timeout",
"chrono",
"jsonpath-rust",
"bytes",
"futures",
"tokio",
"tokio-util",
"either",
]
config = [
"__non_core",
"pem",
"home",
]
default = [
"client",
"ring",
]
gzip = [
"client",
"tower-http/decompression-gzip",
]
http-proxy = ["hyper-http-proxy"]
jsonpatch = ["kube-core/jsonpatch"]
kubelet-debug = [
"ws",
"kube-core/kubelet-debug",
]
oauth = [
"client",
"tame-oauth",
]
oidc = [
"client",
"form_urlencoded",
]
openssl-tls = [
"openssl",
"hyper-openssl",
]
ring = ["hyper-rustls?/ring"]
rustls-tls = [
"rustls",
"hyper-rustls",
"hyper-http-proxy?/rustls-tls-native-roots",
]
socks5 = ["hyper-socks2"]
unstable-client = []
webpki-roots = ["hyper-rustls/webpki-roots"]
ws = [
"client",
"tokio-tungstenite",
"kube-core/ws",
"tokio/macros",
]
[lints.rust]
missing_docs = "deny"
unsafe_code = "forbid"

18
vendor/kube-client/README.md vendored Normal file
View File

@@ -0,0 +1,18 @@
# kube-client
[![Client Capabilities](https://img.shields.io/badge/Kubernetes%20client-Silver-blue.svg?style=plastic&colorB=C0C0C0&colorA=306CE8)](https://github.com/kubernetes/design-proposals-archive/blob/main/api-machinery/csi-new-client-library-procedure.md#client-capabilities)
[![Client Support Level; Stable](https://img.shields.io/badge/kubernetes%20client-stable-green.svg?style=plastic&colorA=306CE8)](https://kube.rs/stability)
The rust counterpart to [kubernetes/client-go](https://github.com/kubernetes/apimachinery).
Contains the IO layer plus the core Api layer, and also as well as config parsing.
## Usage
This crate, and all its features, are re-exported from the facade-crate `kube`.
## Docs
See the **[kube-client API Docs](https://docs.rs/kube-client/)**
## Development
Help very welcome! To help out on this crate check out these labels:
- https://github.com/kube-rs/kube/labels/client
- https://github.com/kube-rs/kube/labels/api
- https://github.com/kube-rs/kube/labels/config

View File

@@ -0,0 +1,599 @@
use either::Either;
use futures::Stream;
use serde::{de::DeserializeOwned, Serialize};
use std::fmt::Debug;
use crate::{api::Api, Error, Result};
use kube_core::{
metadata::PartialObjectMeta, object::ObjectList, params::*, response::Status, ErrorResponse, WatchEvent,
};
/// PUSH/PUT/POST/GET abstractions
impl<K> Api<K>
where
K: Clone + DeserializeOwned + Debug,
{
/// Get a named resource
///
/// ```no_run
/// # use kube::Api;
/// use k8s_openapi::api::core::v1::Pod;
///
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
/// let p: Pod = pods.get("blog").await?;
/// # Ok(())
/// # }
/// ```
///
/// # Errors
///
/// This function assumes that the object is expected to always exist, and returns [`Error`] if it does not.
/// Consider using [`Api::get_opt`] if you need to handle missing objects.
pub async fn get(&self, name: &str) -> Result<K> {
self.get_with(name, &GetParams::default()).await
}
/// Get only the metadata for a named resource as [`PartialObjectMeta`]
///
/// ```no_run
/// use kube::{Api, core::PartialObjectMeta};
/// use k8s_openapi::api::core::v1::Pod;
///
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
/// let p: PartialObjectMeta<Pod> = pods.get_metadata("blog").await?;
/// # Ok(())
/// # }
/// ```
/// Note that the type may be converted to `ObjectMeta` through the usual
/// conversion traits.
///
/// # Errors
///
/// This function assumes that the object is expected to always exist, and returns [`Error`] if it does not.
/// Consider using [`Api::get_metadata_opt`] if you need to handle missing objects.
pub async fn get_metadata(&self, name: &str) -> Result<PartialObjectMeta<K>> {
self.get_metadata_with(name, &GetParams::default()).await
}
/// [Get](`Api::get`) a named resource with an explicit resourceVersion
///
/// This function allows the caller to pass in a [`GetParams`](`super::GetParams`) type containing
/// a `resourceVersion` to a [Get](`Api::get`) call.
/// For example
///
/// ```no_run
/// # use kube::{Api, api::GetParams};
/// use k8s_openapi::api::core::v1::Pod;
///
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
/// let p: Pod = pods.get_with("blog", &GetParams::any()).await?;
/// # Ok(())
/// # }
/// ```
///
/// # Errors
///
/// This function assumes that the object is expected to always exist, and returns [`Error`] if it does not.
/// Consider using [`Api::get_opt`] if you need to handle missing objects.
pub async fn get_with(&self, name: &str, gp: &GetParams) -> Result<K> {
let mut req = self.request.get(name, gp).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("get");
self.client.request::<K>(req).await
}
/// [Get](`Api::get_metadata`) the metadata of an object using an explicit `resourceVersion`
///
/// This function allows the caller to pass in a [`GetParams`](`super::GetParams`) type containing
/// a `resourceVersion` to a [Get](`Api::get_metadata`) call.
/// For example
///
///
/// ```no_run
/// use kube::{Api, api::GetParams, core::PartialObjectMeta};
/// use k8s_openapi::api::core::v1::Pod;
///
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
/// let p: PartialObjectMeta<Pod> = pods.get_metadata_with("blog", &GetParams::any()).await?;
/// # Ok(())
/// # }
/// ```
/// Note that the type may be converted to `ObjectMeta` through the usual
/// conversion traits.
///
/// # Errors
///
/// This function assumes that the object is expected to always exist, and returns [`Error`] if it does not.
/// Consider using [`Api::get_metadata_opt`] if you need to handle missing objects.
pub async fn get_metadata_with(&self, name: &str, gp: &GetParams) -> Result<PartialObjectMeta<K>> {
let mut req = self.request.get_metadata(name, gp).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("get_metadata");
self.client.request::<PartialObjectMeta<K>>(req).await
}
/// [Get](`Api::get`) a named resource if it exists, returns [`None`] if it doesn't exist
///
/// ```no_run
/// # use kube::Api;
/// use k8s_openapi::api::core::v1::Pod;
///
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
/// if let Some(pod) = pods.get_opt("blog").await? {
/// // Pod was found
/// } else {
/// // Pod was not found
/// }
/// # Ok(())
/// # }
/// ```
pub async fn get_opt(&self, name: &str) -> Result<Option<K>> {
match self.get(name).await {
Ok(obj) => Ok(Some(obj)),
Err(Error::Api(ErrorResponse { reason, .. })) if &reason == "NotFound" => Ok(None),
Err(err) => Err(err),
}
}
/// [Get Metadata](`Api::get_metadata`) for a named resource if it exists, returns [`None`] if it doesn't exist
///
/// ```no_run
/// # use kube::Api;
/// use k8s_openapi::api::core::v1::Pod;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
/// if let Some(pod) = pods.get_metadata_opt("blog").await? {
/// // Pod was found
/// } else {
/// // Pod was not found
/// }
/// # Ok(())
/// # }
/// ```
///
/// Note that [`PartialObjectMeta`] embeds the raw `ObjectMeta`.
pub async fn get_metadata_opt(&self, name: &str) -> Result<Option<PartialObjectMeta<K>>> {
self.get_metadata_opt_with(name, &GetParams::default()).await
}
/// [Get Metadata](`Api::get_metadata`) of an object if it exists, using an explicit `resourceVersion`.
/// Returns [`None`] if it doesn't exist.
///
/// ```no_run
/// # use kube::Api;
/// use k8s_openapi::api::core::v1::Pod;
/// use kube_core::params::GetParams;
///
/// async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
/// if let Some(pod) = pods.get_metadata_opt_with("blog", &GetParams::any()).await? {
/// // Pod was found
/// } else {
/// // Pod was not found
/// }
/// # Ok(())
/// # }
/// ```
///
/// Note that [`PartialObjectMeta`] embeds the raw `ObjectMeta`.
pub async fn get_metadata_opt_with(
&self,
name: &str,
gp: &GetParams,
) -> Result<Option<PartialObjectMeta<K>>> {
match self.get_metadata_with(name, gp).await {
Ok(meta) => Ok(Some(meta)),
Err(Error::Api(ErrorResponse { reason, .. })) if &reason == "NotFound" => Ok(None),
Err(err) => Err(err),
}
}
/// Get a list of resources
///
/// You use this to get everything, or a subset matching fields/labels, say:
///
/// ```no_run
/// use kube::api::{Api, ListParams, ResourceExt};
/// use k8s_openapi::api::core::v1::Pod;
///
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
/// let lp = ListParams::default().labels("app=blog"); // for this app only
/// for p in pods.list(&lp).await? {
/// println!("Found Pod: {}", p.name_any());
/// }
/// # Ok(())
/// # }
/// ```
pub async fn list(&self, lp: &ListParams) -> Result<ObjectList<K>> {
let mut req = self.request.list(lp).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("list");
self.client.request::<ObjectList<K>>(req).await
}
/// Get a list of resources that contains only their metadata as
///
/// Similar to [list](`Api::list`), you use this to get everything, or a
/// subset matching fields/labels. For example
///
/// ```no_run
/// use kube::api::{Api, ListParams, ResourceExt};
/// use kube::core::{ObjectMeta, ObjectList, PartialObjectMeta};
/// use k8s_openapi::api::core::v1::Pod;
///
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
/// let lp = ListParams::default().labels("app=blog"); // for this app only
/// let list: ObjectList<PartialObjectMeta<Pod>> = pods.list_metadata(&lp).await?;
/// for p in list {
/// println!("Found Pod: {}", p.name_any());
/// }
/// # Ok(())
/// # }
/// ```
pub async fn list_metadata(&self, lp: &ListParams) -> Result<ObjectList<PartialObjectMeta<K>>> {
let mut req = self.request.list_metadata(lp).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("list_metadata");
self.client.request::<ObjectList<PartialObjectMeta<K>>>(req).await
}
/// Create a resource
///
/// This function requires a type that Serializes to `K`, which can be:
/// 1. Raw string YAML
/// - easy to port from existing files
/// - error prone (run-time errors on typos due to failed serialize attempts)
/// - very error prone (can write invalid YAML)
/// 2. An instance of the struct itself
/// - easy to instantiate for CRDs (you define the struct)
/// - dense to instantiate for [`k8s_openapi`] types (due to many optionals)
/// - compile-time safety
/// - but still possible to write invalid native types (validation at apiserver)
/// 3. [`serde_json::json!`] macro instantiated [`serde_json::Value`]
/// - Tradeoff between the two
/// - Easy partially filling of native [`k8s_openapi`] types (most fields optional)
/// - Partial safety against runtime errors (at least you must write valid JSON)
///
/// Note that this method cannot write to the status object (when it exists) of a resource.
/// To set status objects please see [`Api::replace_status`] or [`Api::patch_status`].
pub async fn create(&self, pp: &PostParams, data: &K) -> Result<K>
where
K: Serialize,
{
let bytes = serde_json::to_vec(&data).map_err(Error::SerdeError)?;
let mut req = self.request.create(pp, bytes).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("create");
self.client.request::<K>(req).await
}
/// Delete a named resource
///
/// When you get a `K` via `Left`, your delete has started.
/// When you get a `Status` via `Right`, this should be a a 2XX style
/// confirmation that the object being gone.
///
/// 4XX and 5XX status types are returned as an [`Err(kube_client::Error::Api)`](crate::Error::Api).
///
/// ```no_run
/// use kube::api::{Api, DeleteParams};
/// use k8s_openapi::apiextensions_apiserver::pkg::apis::apiextensions::v1 as apiexts;
/// use apiexts::CustomResourceDefinition;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let crds: Api<CustomResourceDefinition> = Api::all(client);
/// crds.delete("foos.clux.dev", &DeleteParams::default()).await?
/// .map_left(|o| println!("Deleting CRD: {:?}", o.status))
/// .map_right(|s| println!("Deleted CRD: {:?}", s));
/// # Ok(())
/// # }
/// ```
pub async fn delete(&self, name: &str, dp: &DeleteParams) -> Result<Either<K, Status>> {
let mut req = self.request.delete(name, dp).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("delete");
self.client.request_status::<K>(req).await
}
/// Delete a collection of resources
///
/// When you get an `ObjectList<K>` via `Left`, your delete has started.
/// When you get a `Status` via `Right`, this should be a a 2XX style
/// confirmation that the object being gone.
///
/// 4XX and 5XX status types are returned as an [`Err(kube_client::Error::Api)`](crate::Error::Api).
///
/// ```no_run
/// use kube::api::{Api, DeleteParams, ListParams, ResourceExt};
/// use k8s_openapi::api::core::v1::Pod;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
///
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
/// match pods.delete_collection(&DeleteParams::default(), &ListParams::default()).await? {
/// either::Left(list) => {
/// let names: Vec<_> = list.iter().map(ResourceExt::name_any).collect();
/// println!("Deleting collection of pods: {:?}", names);
/// },
/// either::Right(status) => {
/// println!("Deleted collection of pods: status={:?}", status);
/// }
/// }
/// # Ok(())
/// # }
/// ```
pub async fn delete_collection(
&self,
dp: &DeleteParams,
lp: &ListParams,
) -> Result<Either<ObjectList<K>, Status>> {
let mut req = self
.request
.delete_collection(dp, lp)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("delete_collection");
self.client.request_status::<ObjectList<K>>(req).await
}
/// Patch a subset of a resource's properties
///
/// Takes a [`Patch`] along with [`PatchParams`] for the call.
///
/// ```no_run
/// use kube::api::{Api, PatchParams, Patch, Resource};
/// use k8s_openapi::api::core::v1::Pod;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
///
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
/// let patch = serde_json::json!({
/// "apiVersion": "v1",
/// "kind": "Pod",
/// "metadata": {
/// "name": "blog"
/// },
/// "spec": {
/// "activeDeadlineSeconds": 5
/// }
/// });
/// let params = PatchParams::apply("myapp");
/// let patch = Patch::Apply(&patch);
/// let o_patched = pods.patch("blog", &params, &patch).await?;
/// # Ok(())
/// # }
/// ```
/// [`Patch`]: super::Patch
/// [`PatchParams`]: super::PatchParams
///
/// Note that this method cannot write to the status object (when it exists) of a resource.
/// To set status objects please see [`Api::replace_status`] or [`Api::patch_status`].
pub async fn patch<P: Serialize + Debug>(
&self,
name: &str,
pp: &PatchParams,
patch: &Patch<P>,
) -> Result<K> {
let mut req = self.request.patch(name, pp, patch).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("patch");
self.client.request::<K>(req).await
}
/// Patch a metadata subset of a resource's properties from [`PartialObjectMeta`]
///
/// Takes a [`Patch`] along with [`PatchParams`] for the call.
/// Patches can be constructed raw using `serde_json::json!` or from `ObjectMeta` via [`PartialObjectMetaExt`].
///
/// ```no_run
/// use kube::api::{Api, PatchParams, Patch, Resource};
/// use kube::core::{PartialObjectMetaExt, ObjectMeta};
/// use k8s_openapi::api::core::v1::Pod;
///
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
/// let metadata = ObjectMeta {
/// labels: Some([("key".to_string(), "value".to_string())].into()),
/// ..Default::default()
/// }.into_request_partial::<Pod>();
///
/// let params = PatchParams::apply("myapp");
/// let o_patched = pods.patch_metadata("blog", &params, &Patch::Apply(&metadata)).await?;
/// println!("Patched {}", o_patched.metadata.name.unwrap());
/// # Ok(())
/// # }
/// ```
/// [`Patch`]: super::Patch
/// [`PatchParams`]: super::PatchParams
/// [`PartialObjectMetaExt`]: crate::core::PartialObjectMetaExt
///
/// ### Warnings
///
/// The `TypeMeta` (apiVersion + kind) of a patch request (required for apply patches)
/// must match the underlying type that is being patched (e.g. "v1" + "Pod").
/// The returned `TypeMeta` will always be {"meta.k8s.io/v1", "PartialObjectMetadata"}.
/// These constraints are encoded into [`PartialObjectMetaExt`].
///
/// This method can write to non-metadata fields such as spec if included in the patch.
pub async fn patch_metadata<P: Serialize + Debug>(
&self,
name: &str,
pp: &PatchParams,
patch: &Patch<P>,
) -> Result<PartialObjectMeta<K>> {
let mut req = self
.request
.patch_metadata(name, pp, patch)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("patch_metadata");
self.client.request::<PartialObjectMeta<K>>(req).await
}
/// Replace a resource entirely with a new one
///
/// This is used just like [`Api::create`], but with one additional instruction:
/// You must set `metadata.resourceVersion` in the provided data because k8s
/// will not accept an update unless you actually knew what the last version was.
///
/// Thus, to use this function, you need to do a `get` then a `replace` with its result.
///
/// ```no_run
/// use kube::api::{Api, PostParams, ResourceExt};
/// use k8s_openapi::api::batch::v1::Job;
///
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let jobs: Api<Job> = Api::namespaced(client, "apps");
/// let j = jobs.get("baz").await?;
/// let j_new: Job = serde_json::from_value(serde_json::json!({
/// "apiVersion": "batch/v1",
/// "kind": "Job",
/// "metadata": {
/// "name": "baz",
/// "resourceVersion": j.resource_version(),
/// },
/// "spec": {
/// "template": {
/// "metadata": {
/// "name": "empty-job-pod"
/// },
/// "spec": {
/// "containers": [{
/// "name": "empty",
/// "image": "alpine:latest"
/// }],
/// "restartPolicy": "Never",
/// }
/// }
/// }
/// }))?;
/// jobs.replace("baz", &PostParams::default(), &j_new).await?;
/// # Ok(())
/// # }
/// ```
///
/// Consider mutating the result of `api.get` rather than recreating it.
///
/// Note that this method cannot write to the status object (when it exists) of a resource.
/// To set status objects please see [`Api::replace_status`] or [`Api::patch_status`].
pub async fn replace(&self, name: &str, pp: &PostParams, data: &K) -> Result<K>
where
K: Serialize,
{
let bytes = serde_json::to_vec(&data).map_err(Error::SerdeError)?;
let mut req = self
.request
.replace(name, pp, bytes)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("replace");
self.client.request::<K>(req).await
}
/// Watch a list of resources
///
/// This returns a future that awaits the initial response,
/// then you can stream the remaining buffered `WatchEvent` objects.
///
/// Note that a `watch` call can terminate for many reasons (even before the specified
/// [`WatchParams::timeout`] is triggered), and will have to be re-issued
/// with the last seen resource version when or if it closes.
///
/// Consider using a managed [`watcher`] to deal with automatic re-watches and error cases.
///
/// ```no_run
/// use kube::api::{Api, WatchParams, ResourceExt, WatchEvent};
/// use k8s_openapi::api::batch::v1::Job;
/// use futures::{StreamExt, TryStreamExt};
///
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let jobs: Api<Job> = Api::namespaced(client, "apps");
/// let lp = WatchParams::default()
/// .fields("metadata.name=my_job")
/// .timeout(20); // upper bound of how long we watch for
/// let mut stream = jobs.watch(&lp, "0").await?.boxed();
/// while let Some(status) = stream.try_next().await? {
/// match status {
/// WatchEvent::Added(s) => println!("Added {}", s.name_any()),
/// WatchEvent::Modified(s) => println!("Modified: {}", s.name_any()),
/// WatchEvent::Deleted(s) => println!("Deleted {}", s.name_any()),
/// WatchEvent::Bookmark(s) => {},
/// WatchEvent::Error(s) => println!("{}", s),
/// }
/// }
/// # Ok(())
/// # }
/// ```
/// [`WatchParams::timeout`]: super::WatchParams::timeout
/// [`watcher`]: https://docs.rs/kube_runtime/*/kube_runtime/watcher/fn.watcher.html
pub async fn watch(
&self,
wp: &WatchParams,
version: &str,
) -> Result<impl Stream<Item = Result<WatchEvent<K>>>> {
let mut req = self.request.watch(wp, version).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("watch");
self.client.request_events::<K>(req).await
}
/// Watch a list of metadata for a given resources
///
/// This returns a future that awaits the initial response,
/// then you can stream the remaining buffered `WatchEvent` objects.
///
/// Note that a `watch_metadata` call can terminate for many reasons (even
/// before the specified [`WatchParams::timeout`] is triggered), and will
/// have to be re-issued with the last seen resource version when or if it
/// closes.
///
/// Consider using a managed [`metadata_watcher`] to deal with automatic re-watches and error cases.
///
/// ```no_run
/// use kube::api::{Api, WatchParams, ResourceExt, WatchEvent};
/// use k8s_openapi::api::batch::v1::Job;
/// use futures::{StreamExt, TryStreamExt};
///
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let jobs: Api<Job> = Api::namespaced(client, "apps");
///
/// let lp = WatchParams::default()
/// .fields("metadata.name=my_job")
/// .timeout(20); // upper bound of how long we watch for
/// let mut stream = jobs.watch(&lp, "0").await?.boxed();
/// while let Some(status) = stream.try_next().await? {
/// match status {
/// WatchEvent::Added(s) => println!("Added {}", s.metadata.name.unwrap()),
/// WatchEvent::Modified(s) => println!("Modified: {}", s.metadata.name.unwrap()),
/// WatchEvent::Deleted(s) => println!("Deleted {}", s.metadata.name.unwrap()),
/// WatchEvent::Bookmark(s) => {},
/// WatchEvent::Error(s) => println!("{}", s),
/// }
/// }
/// # Ok(())
/// # }
/// ```
/// [`WatchParams::timeout`]: super::WatchParams::timeout
/// [`metadata_watcher`]: https://docs.rs/kube_runtime/*/kube_runtime/watcher/fn.metadata_watcher.html
pub async fn watch_metadata(
&self,
wp: &WatchParams,
version: &str,
) -> Result<impl Stream<Item = Result<WatchEvent<PartialObjectMeta<K>>>>> {
let mut req = self
.request
.watch_metadata(wp, version)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("watch_metadata");
self.client.request_events::<PartialObjectMeta<K>>(req).await
}
}

589
vendor/kube-client/src/api/entry.rs vendored Normal file
View File

@@ -0,0 +1,589 @@
//! API helpers for get-or-create and get-and-modify patterns
//!
//! [`Api::entry`] is the primary entry point for this API.
// Import used in docs
#[allow(unused_imports)] use std::collections::HashMap;
use std::fmt::Debug;
use crate::{Api, Error, Result};
use kube_core::{params::PostParams, Resource};
use serde::{de::DeserializeOwned, Serialize};
impl<K: Resource + Clone + DeserializeOwned + Debug> Api<K> {
/// Gets a given object's "slot" on the Kubernetes API, designed for "get-or-create" and "get-and-modify" patterns
///
/// This is similar to [`HashMap::entry`], but the [`Entry`] must be [`OccupiedEntry::commit`]ed for changes to be persisted.
///
/// # Usage
///
/// ```rust,no_run
/// # use std::collections::BTreeMap;
/// # use k8s_openapi::api::core::v1::ConfigMap;
/// # async fn wrapper() -> Result <(), Box<dyn std::error::Error>> {
/// let kube = kube::Client::try_default().await?;
/// let cms = kube::Api::<ConfigMap>::namespaced(kube, "default");
/// cms
/// // Try to get `entry-example` if it exists
/// .entry("entry-example").await?
/// // Modify object if it already exists
/// .and_modify(|cm| {
/// cm.data
/// .get_or_insert_with(BTreeMap::default)
/// .insert("already-exists".to_string(), "true".to_string());
/// })
/// // Provide a default object if it does not exist
/// .or_insert(|| ConfigMap::default())
/// // Modify the object unconditionally now that we have provided a default value
/// .and_modify(|cm| {
/// cm.data
/// .get_or_insert_with(BTreeMap::default)
/// .insert("modified".to_string(), "true".to_string());
/// })
/// // Save changes
/// .commit(&kube::api::PostParams::default()).await?;
/// # Ok(())
/// # }
/// ```
pub async fn entry<'a>(&'a self, name: &'a str) -> Result<Entry<'a, K>> {
Ok(match self.get_opt(name).await? {
Some(object) => Entry::Occupied(OccupiedEntry {
api: self,
dirtiness: Dirtiness::Clean,
name,
object,
}),
None => Entry::Vacant(VacantEntry { api: self, name }),
})
}
}
#[derive(Debug)]
/// A view into a single object, with enough context to create or update it
///
/// See [`Api::entry`] for more information.
pub enum Entry<'a, K> {
/// An object that either exists on the server, or has been created locally (and is awaiting synchronization)
Occupied(OccupiedEntry<'a, K>),
/// An object that does not exist
Vacant(VacantEntry<'a, K>),
}
impl<'a, K> Entry<'a, K> {
/// Borrow the object, if it exists (on the API, or queued for creation using [`Entry::or_insert`])
pub fn get(&self) -> Option<&K> {
match self {
Entry::Occupied(entry) => Some(entry.get()),
Entry::Vacant(_) => None,
}
}
/// Borrow the object mutably, if it exists (on the API, or queued for creation using [`Entry::or_insert`])
///
/// [`OccupiedEntry::commit`] must be called afterwards for any changes to be persisted.
pub fn get_mut(&mut self) -> Option<&mut K> {
match self {
Entry::Occupied(entry) => Some(entry.get_mut()),
Entry::Vacant(_) => None,
}
}
/// Let `f` modify the object, if it exists (on the API, or queued for creation using [`Entry::or_insert`])
///
/// [`OccupiedEntry::commit`] must be called afterwards for any changes to be persisted.
pub fn and_modify(self, f: impl FnOnce(&mut K)) -> Self {
match self {
Entry::Occupied(entry) => Entry::Occupied(entry.and_modify(f)),
entry @ Entry::Vacant(_) => entry,
}
}
/// Create a new object if it does not already exist
///
/// [`OccupiedEntry::commit`] must be called afterwards for the change to be persisted.
pub fn or_insert(self, default: impl FnOnce() -> K) -> OccupiedEntry<'a, K>
where
K: Resource,
{
match self {
Entry::Occupied(entry) => entry,
Entry::Vacant(entry) => entry.insert(default()),
}
}
}
/// A view into a single object that exists
///
/// The object may exist because it existed at the time of call to [`Api::entry`],
/// or because it was created by [`Entry::or_insert`].
#[derive(Debug)]
pub struct OccupiedEntry<'a, K> {
api: &'a Api<K>,
dirtiness: Dirtiness,
name: &'a str,
object: K,
}
#[derive(Debug)]
enum Dirtiness {
/// The object has not been modified (locally) since the last API operation
Clean,
/// The object exists in the API, but has been modified locally
Dirty,
/// The object does not yet exist in the API, but was created locally
New,
}
impl<K> OccupiedEntry<'_, K> {
/// Borrow the object
pub fn get(&self) -> &K {
&self.object
}
/// Borrow the object mutably
///
/// [`OccupiedEntry::commit`] must be called afterwards for any changes to be persisted.
pub fn get_mut(&mut self) -> &mut K {
self.dirtiness = match self.dirtiness {
Dirtiness::Clean => Dirtiness::Dirty,
Dirtiness::Dirty => Dirtiness::Dirty,
Dirtiness::New => Dirtiness::New,
};
&mut self.object
}
/// Let `f` modify the object
///
/// [`OccupiedEntry::commit`] must be called afterwards for any changes to be persisted.
pub fn and_modify(mut self, f: impl FnOnce(&mut K)) -> Self {
f(self.get_mut());
self
}
/// Take ownership over the object
pub fn into_object(self) -> K {
self.object
}
/// Save the object to the Kubernetes API, if any changes have been made
///
/// The [`OccupiedEntry`] is updated with the new object (including changes made by the API server, such as
/// `.metadata.resource_version`).
///
/// # Errors
///
/// This function can fail due to transient errors, or due to write conflicts (for example: if another client
/// created the object between the calls to [`Api::entry`] and `OccupiedEntry::commit`, or because another
/// client modified the object in the meantime).
///
/// Any retries should be coarse-grained enough to also include the call to [`Api::entry`], so that the latest
/// state can be fetched.
#[tracing::instrument(skip(self))]
pub async fn commit(&mut self, pp: &PostParams) -> Result<(), CommitError>
where
K: Resource + DeserializeOwned + Serialize + Clone + Debug,
{
self.prepare_for_commit()?;
match self.dirtiness {
Dirtiness::New => {
self.object = self
.api
.create(pp, &self.object)
.await
.map_err(CommitError::Save)?
}
Dirtiness::Dirty => {
self.object = self
.api
.replace(self.name, pp, &self.object)
.await
.map_err(CommitError::Save)?;
}
Dirtiness::Clean => (),
};
if !pp.dry_run {
self.dirtiness = Dirtiness::Clean;
}
Ok(())
}
/// Validate that [`Self::object`] is valid, and refers to the same object as the original [`Api::entry`] call
///
/// Defaults `ObjectMeta::name` and `ObjectMeta::namespace` if unset.
fn prepare_for_commit(&mut self) -> Result<(), CommitValidationError>
where
K: Resource,
{
// Access `Self::object` directly rather than using `Self::get_mut` to avoid flagging the object as dirty
let meta = self.object.meta_mut();
match &mut meta.name {
name @ None => *name = Some(self.name.to_string()),
Some(name) if name != self.name => {
return Err(CommitValidationError::NameMismatch {
object_name: name.clone(),
expected: self.name.to_string(),
});
}
Some(_) => (),
}
match &mut meta.namespace {
ns @ None => ns.clone_from(&self.api.namespace),
Some(ns) if Some(ns.as_str()) != self.api.namespace.as_deref() => {
return Err(CommitValidationError::NamespaceMismatch {
object_namespace: Some(ns.clone()),
expected: self.api.namespace.clone(),
});
}
Some(_) => (),
}
if let Some(generate_name) = &meta.generate_name {
return Err(CommitValidationError::GenerateName {
object_generate_name: generate_name.clone(),
});
}
Ok(())
}
}
#[derive(Debug, thiserror::Error)]
/// Commit errors
pub enum CommitError {
/// Pre-commit validation failed
#[error("failed to validate object for saving")]
Validate(#[from] CommitValidationError),
/// Failed to submit the new object to the Kubernetes API
#[error("failed to save object")]
Save(#[source] Error),
}
#[derive(Debug, thiserror::Error)]
/// Pre-commit validation errors
pub enum CommitValidationError {
/// `ObjectMeta::name` does not match the name passed to [`Api::entry`]
#[error(".metadata.name does not match the name passed to Api::entry (got: {object_name:?}, expected: {expected:?})")]
NameMismatch {
/// The name of the object (`ObjectMeta::name`)
object_name: String,
/// The name passed to [`Api::entry`]
expected: String,
},
/// `ObjectMeta::namespace` does not match the namespace of the [`Api`]
#[error(".metadata.namespace does not match the namespace of the Api (got: {object_namespace:?}, expected: {expected:?})")]
NamespaceMismatch {
/// The name of the object (`ObjectMeta::namespace`)
object_namespace: Option<String>,
/// The namespace of the [`Api`]
expected: Option<String>,
},
/// `ObjectMeta::generate_name` must not be set
#[error(".metadata.generate_name must not be set (got: {object_generate_name:?})")]
GenerateName {
/// The set name generation template of the object (`ObjectMeta::generate_name`)
object_generate_name: String,
},
}
/// A view of an object that does not yet exist
///
/// Created by [`Api::entry`], as a variant of [`Entry`]
#[derive(Debug)]
pub struct VacantEntry<'a, K> {
api: &'a Api<K>,
name: &'a str,
}
impl<'a, K> VacantEntry<'a, K> {
/// Create a new object
///
/// [`OccupiedEntry::commit`] must be called afterwards for the change to be persisted.
#[tracing::instrument(skip(self, object))]
pub fn insert(self, object: K) -> OccupiedEntry<'a, K>
where
K: Resource,
{
OccupiedEntry {
api: self.api,
dirtiness: Dirtiness::New,
name: self.name,
object,
}
}
}
#[cfg(test)]
mod tests {
use std::collections::BTreeMap;
use k8s_openapi::api::core::v1::ConfigMap;
use kube_core::{
params::{DeleteParams, PostParams},
ErrorResponse, ObjectMeta,
};
use crate::{
api::entry::{CommitError, Entry},
Api, Client, Error,
};
#[tokio::test]
#[ignore = "needs cluster (gets and writes cms)"]
async fn entry_create_missing_object() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::try_default().await?;
let api = Api::<ConfigMap>::default_namespaced(client);
let object_name = "entry-missing-cm";
if api.get_opt(object_name).await?.is_some() {
api.delete(object_name, &DeleteParams::default()).await?;
}
let entry = api.entry(object_name).await?;
let entry2 = api.entry(object_name).await?;
assert_eq!(entry.get(), None);
assert_eq!(entry2.get(), None);
// Create object cleanly
let mut entry = entry.or_insert(|| ConfigMap {
data: Some([("key".to_string(), "value".to_string())].into()),
..ConfigMap::default()
});
entry.commit(&PostParams::default()).await?;
assert_eq!(
entry
.get()
.data
.as_ref()
.and_then(|data| data.get("key"))
.map(String::as_str),
Some("value")
);
let fetched_obj = api.get(object_name).await?;
assert_eq!(
fetched_obj
.data
.as_ref()
.and_then(|data| data.get("key"))
.map(String::as_str),
Some("value")
);
// Update object
entry
.get_mut()
.data
.get_or_insert_with(BTreeMap::default)
.insert("key".to_string(), "value2".to_string());
entry.commit(&PostParams::default()).await?;
assert_eq!(
entry
.get()
.data
.as_ref()
.and_then(|data| data.get("key"))
.map(String::as_str),
Some("value2")
);
let fetched_obj = api.get(object_name).await?;
assert_eq!(
fetched_obj
.data
.as_ref()
.and_then(|data| data.get("key"))
.map(String::as_str),
Some("value2")
);
// Object was already created in parallel, fail with a conflict error
let mut entry2 = entry2.or_insert(|| ConfigMap {
data: Some([("key".to_string(), "value3".to_string())].into()),
..ConfigMap::default()
});
assert!(
matches!(dbg!(entry2.commit(&PostParams::default()).await), Err(CommitError::Save(Error::Api(ErrorResponse { reason, .. }))) if reason == "AlreadyExists")
);
// Cleanup
api.delete(object_name, &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (gets and writes cms)"]
async fn entry_update_existing_object() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::try_default().await?;
let api = Api::<ConfigMap>::default_namespaced(client);
let object_name = "entry-existing-cm";
if api.get_opt(object_name).await?.is_some() {
api.delete(object_name, &DeleteParams::default()).await?;
}
api.create(&PostParams::default(), &ConfigMap {
metadata: ObjectMeta {
namespace: api.namespace.clone(),
name: Some(object_name.to_string()),
..ObjectMeta::default()
},
data: Some([("key".to_string(), "value".to_string())].into()),
..ConfigMap::default()
})
.await?;
let mut entry = match api.entry(object_name).await? {
Entry::Occupied(entry) => entry,
entry => panic!("entry for existing object must be occupied: {entry:?}"),
};
let mut entry2 = match api.entry(object_name).await? {
Entry::Occupied(entry) => entry,
entry => panic!("entry for existing object must be occupied: {entry:?}"),
};
// Entry is up-to-date, modify cleanly
entry
.get_mut()
.data
.get_or_insert_with(BTreeMap::default)
.insert("key".to_string(), "value2".to_string());
entry.commit(&PostParams::default()).await?;
assert_eq!(
entry
.get()
.data
.as_ref()
.and_then(|data| data.get("key"))
.map(String::as_str),
Some("value2")
);
let fetched_obj = api.get(object_name).await?;
assert_eq!(
fetched_obj
.data
.as_ref()
.and_then(|data| data.get("key"))
.map(String::as_str),
Some("value2")
);
// Object was already updated in parallel, fail with a conflict error
entry2
.get_mut()
.data
.get_or_insert_with(BTreeMap::default)
.insert("key".to_string(), "value3".to_string());
assert!(
matches!(entry2.commit(&PostParams::default()).await, Err(CommitError::Save(Error::Api(ErrorResponse { reason, .. }))) if reason == "Conflict")
);
// Cleanup
api.delete(object_name, &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (gets and writes cms)"]
async fn entry_create_dry_run() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::try_default().await?;
let api = Api::<ConfigMap>::default_namespaced(client);
let object_name = "entry-cm-dry";
if api.get_opt(object_name).await?.is_some() {
api.delete(object_name, &DeleteParams::default()).await?;
}
let pp_dry = PostParams {
dry_run: true,
..Default::default()
};
let entry = api.entry(object_name).await?;
assert_eq!(entry.get(), None);
// Create object dry-run
let mut entry = entry.or_insert(|| ConfigMap {
data: Some([("key".to_string(), "value".to_string())].into()),
..ConfigMap::default()
});
entry.commit(&pp_dry).await?;
assert_eq!(
entry
.get()
.data
.as_ref()
.and_then(|data| data.get("key"))
.map(String::as_str),
Some("value")
);
let fetched_obj = api.get_opt(object_name).await?;
assert_eq!(fetched_obj, None);
// Commit object creation properly
entry.commit(&PostParams::default()).await?;
assert_eq!(
entry
.get()
.data
.as_ref()
.and_then(|data| data.get("key"))
.map(String::as_str),
Some("value")
);
let fetched_obj = api.get(object_name).await?;
assert_eq!(
fetched_obj
.data
.as_ref()
.and_then(|data| data.get("key"))
.map(String::as_str),
Some("value")
);
// Update object dry-run
entry
.get_mut()
.data
.get_or_insert_with(BTreeMap::default)
.insert("key".to_string(), "value2".to_string());
entry.commit(&pp_dry).await?;
assert_eq!(
entry
.get()
.data
.as_ref()
.and_then(|data| data.get("key"))
.map(String::as_str),
Some("value2")
);
let fetched_obj = api.get(object_name).await?;
assert_eq!(
fetched_obj
.data
.as_ref()
.and_then(|data| data.get("key"))
.map(String::as_str),
Some("value")
);
// Commit object update properly
entry.commit(&PostParams::default()).await?;
assert_eq!(
entry
.get()
.data
.as_ref()
.and_then(|data| data.get("key"))
.map(String::as_str),
Some("value2")
);
let fetched_obj = api.get(object_name).await?;
assert_eq!(
fetched_obj
.data
.as_ref()
.and_then(|data| data.get("key"))
.map(String::as_str),
Some("value2")
);
// Cleanup
api.delete(object_name, &DeleteParams::default()).await?;
Ok(())
}
}

266
vendor/kube-client/src/api/mod.rs vendored Normal file
View File

@@ -0,0 +1,266 @@
//! API helpers for structured interaction with the Kubernetes API
mod core_methods;
#[cfg(feature = "ws")] mod remote_command;
use std::fmt::Debug;
#[cfg(feature = "ws")] pub use remote_command::{AttachedProcess, TerminalSize};
#[cfg(feature = "ws")] mod portforward;
#[cfg(feature = "ws")] pub use portforward::Portforwarder;
mod subresource;
#[cfg(feature = "ws")]
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
pub use subresource::{Attach, AttachParams, Ephemeral, Execute, Portforward};
pub use subresource::{Evict, EvictParams, Log, LogParams, ScaleSpec, ScaleStatus};
mod util;
pub mod entry;
// Re-exports from kube-core
#[cfg(feature = "admission")]
#[cfg_attr(docsrs, doc(cfg(feature = "admission")))]
pub use kube_core::admission;
pub(crate) use kube_core::params;
pub use kube_core::{
dynamic::{ApiResource, DynamicObject},
gvk::{GroupVersionKind, GroupVersionResource},
metadata::{ListMeta, ObjectMeta, PartialObjectMeta, PartialObjectMetaExt, TypeMeta},
object::{NotUsed, Object, ObjectList},
request::Request,
watch::WatchEvent,
Resource, ResourceExt,
};
use kube_core::{DynamicResourceScope, NamespaceResourceScope};
pub use params::{
DeleteParams, GetParams, ListParams, Patch, PatchParams, PostParams, Preconditions, PropagationPolicy,
ValidationDirective, VersionMatch, WatchParams,
};
use crate::Client;
/// The generic Api abstraction
///
/// This abstracts over a [`Request`] and a type `K` so that
/// we get automatic serialization/deserialization on the api calls
/// implemented by the dynamic [`Resource`].
#[cfg_attr(docsrs, doc(cfg(feature = "client")))]
#[derive(Clone)]
pub struct Api<K> {
/// The request builder object with its resource dependent url
pub(crate) request: Request,
/// The client to use (from this library)
pub(crate) client: Client,
namespace: Option<String>,
/// Note: Using `iter::Empty` over `PhantomData`, because we never actually keep any
/// `K` objects, so `Empty` better models our constraints (in particular, `Empty<K>`
/// is `Send`, even if `K` may not be).
pub(crate) _phantom: std::iter::Empty<K>,
}
/// Api constructors for Resource implementors with custom DynamicTypes
///
/// This generally means resources created via [`DynamicObject`](crate::api::DynamicObject).
impl<K: Resource> Api<K> {
/// Cluster level resources, or resources viewed across all namespaces
///
/// This function accepts `K::DynamicType` so it can be used with dynamic resources.
///
/// # Warning
///
/// This variant **can only `list` and `watch` namespaced resources** and is commonly used with a `watcher`.
/// If you need to create/patch/replace/get on a namespaced resource, you need a separate `Api::namespaced`.
pub fn all_with(client: Client, dyntype: &K::DynamicType) -> Self {
let url = K::url_path(dyntype, None);
Self {
client,
request: Request::new(url),
namespace: None,
_phantom: std::iter::empty(),
}
}
/// Namespaced resource within a given namespace
///
/// This function accepts `K::DynamicType` so it can be used with dynamic resources.
pub fn namespaced_with(client: Client, ns: &str, dyntype: &K::DynamicType) -> Self
where
K: Resource<Scope = DynamicResourceScope>,
{
// TODO: inspect dyntype scope to verify somehow?
let url = K::url_path(dyntype, Some(ns));
Self {
client,
request: Request::new(url),
namespace: Some(ns.to_string()),
_phantom: std::iter::empty(),
}
}
/// Namespaced resource within the default namespace
///
/// This function accepts `K::DynamicType` so it can be used with dynamic resources.
///
/// The namespace is either configured on `context` in the kubeconfig
/// or falls back to `default` when running locally, and it's using the service account's
/// namespace when deployed in-cluster.
pub fn default_namespaced_with(client: Client, dyntype: &K::DynamicType) -> Self
where
K: Resource<Scope = DynamicResourceScope>,
{
let ns = client.default_namespace().to_string();
Self::namespaced_with(client, &ns, dyntype)
}
/// Consume self and return the [`Client`]
pub fn into_client(self) -> Client {
self.into()
}
/// Return a reference to the current resource url path
pub fn resource_url(&self) -> &str {
&self.request.url_path
}
}
/// Api constructors for Resource implementors with Default DynamicTypes
///
/// This generally means structs implementing `k8s_openapi::Resource`.
impl<K: Resource> Api<K>
where
<K as Resource>::DynamicType: Default,
{
/// Cluster level resources, or resources viewed across all namespaces
///
/// Namespace scoped resource allowing querying across all namespaces:
///
/// ```no_run
/// # use kube::{Api, Client};
/// # let client: Client = todo!();
/// use k8s_openapi::api::core::v1::Pod;
/// let api: Api<Pod> = Api::all(client);
/// ```
///
/// Cluster scoped resources also use this entrypoint:
///
/// ```no_run
/// # use kube::{Api, Client};
/// # let client: Client = todo!();
/// use k8s_openapi::api::core::v1::Node;
/// let api: Api<Node> = Api::all(client);
/// ```
///
/// # Warning
///
/// This variant **can only `list` and `watch` namespaced resources** and is commonly used with a `watcher`.
/// If you need to create/patch/replace/get on a namespaced resource, you need a separate `Api::namespaced`.
pub fn all(client: Client) -> Self {
Self::all_with(client, &K::DynamicType::default())
}
/// Namespaced resource within a given namespace
///
/// ```no_run
/// # use kube::{Api, Client};
/// # let client: Client = todo!();
/// use k8s_openapi::api::core::v1::Pod;
/// let api: Api<Pod> = Api::namespaced(client, "default");
/// ```
///
/// This will ONLY work on namespaced resources as set by `Scope`:
///
/// ```compile_fail
/// # use kube::{Api, Client};
/// # let client: Client = todo!();
/// use k8s_openapi::api::core::v1::Node;
/// let api: Api<Node> = Api::namespaced(client, "default"); // resource not namespaced!
/// ```
///
/// For dynamic type information, use [`Api::namespaced_with`] variants.
pub fn namespaced(client: Client, ns: &str) -> Self
where
K: Resource<Scope = NamespaceResourceScope>,
{
let dyntype = K::DynamicType::default();
let url = K::url_path(&dyntype, Some(ns));
Self {
client,
request: Request::new(url),
namespace: Some(ns.to_string()),
_phantom: std::iter::empty(),
}
}
/// Namespaced resource within the default namespace
///
/// The namespace is either configured on `context` in the kubeconfig
/// or falls back to `default` when running locally, and it's using the service account's
/// namespace when deployed in-cluster.
///
/// ```no_run
/// # use kube::{Api, Client};
/// # let client: Client = todo!();
/// use k8s_openapi::api::core::v1::Pod;
/// let api: Api<Pod> = Api::default_namespaced(client);
/// ```
///
/// This will ONLY work on namespaced resources as set by `Scope`:
///
/// ```compile_fail
/// # use kube::{Api, Client};
/// # let client: Client = todo!();
/// use k8s_openapi::api::core::v1::Node;
/// let api: Api<Node> = Api::default_namespaced(client); // resource not namespaced!
/// ```
pub fn default_namespaced(client: Client) -> Self
where
K: Resource<Scope = NamespaceResourceScope>,
{
let ns = client.default_namespace().to_string();
Self::namespaced(client, &ns)
}
}
impl<K> From<Api<K>> for Client {
fn from(api: Api<K>) -> Self {
api.client
}
}
impl<K> Debug for Api<K> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Intentionally destructuring, to cause compile errors when new fields are added
let Self {
request,
client: _,
namespace,
_phantom,
} = self;
f.debug_struct("Api")
.field("request", &request)
.field("client", &"...")
.field("namespace", &namespace)
.finish()
}
}
/// Sanity test on scope restrictions
#[cfg(test)]
mod test {
use crate::{client::Body, Api, Client};
use k8s_openapi::api::core::v1 as corev1;
use http::{Request, Response};
use tower_test::mock;
#[tokio::test]
async fn scopes_should_allow_correct_interface() {
let (mock_service, _handle) = mock::pair::<Request<Body>, Response<Body>>();
let client = Client::new(mock_service, "default");
let _: Api<corev1::Node> = Api::all(client.clone());
let _: Api<corev1::Pod> = Api::default_namespaced(client.clone());
let _: Api<corev1::PersistentVolume> = Api::all(client.clone());
let _: Api<corev1::ConfigMap> = Api::namespaced(client, "default");
}
}

View File

@@ -0,0 +1,355 @@
use std::{collections::HashMap, future::Future};
use bytes::{Buf, Bytes};
use futures::{
channel::{mpsc, oneshot},
future, FutureExt, SinkExt, StreamExt,
};
use thiserror::Error;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, DuplexStream};
use tokio_tungstenite::{tungstenite as ws, WebSocketStream};
use tokio_util::io::ReaderStream;
/// Errors from Portforwarder.
#[derive(Debug, Error)]
pub enum Error {
/// Received invalid channel in WebSocket message.
#[error("received invalid channel {0}")]
InvalidChannel(usize),
/// Received initial frame with invalid size. The initial frame must be 3 bytes, including the channel prefix.
#[error("received initial frame with invalid size")]
InvalidInitialFrameSize,
/// Received initial frame with invalid port mapping.
/// The port included in the initial frame did not match the port number associated with the channel.
#[error("invalid port mapping in initial frame, got {actual}, expected {expected}")]
InvalidPortMapping { actual: u16, expected: u16 },
/// Failed to forward bytes from Pod.
#[error("failed to forward bytes from Pod: {0}")]
ForwardFromPod(#[source] futures::channel::mpsc::SendError),
/// Failed to forward bytes to Pod.
#[error("failed to forward bytes to Pod: {0}")]
ForwardToPod(#[source] futures::channel::mpsc::SendError),
/// Failed to write bytes from Pod.
#[error("failed to write bytes from Pod: {0}")]
WriteBytesFromPod(#[source] std::io::Error),
/// Failed to read bytes to send to Pod.
#[error("failed to read bytes to send to Pod: {0}")]
ReadBytesToSend(#[source] std::io::Error),
/// Received an error message from pod that is not a valid UTF-8.
#[error("received invalid error message from Pod: {0}")]
InvalidErrorMessage(#[source] std::string::FromUtf8Error),
/// Failed to forward an error message from pod.
#[error("failed to forward an error message {0:?}")]
ForwardErrorMessage(String),
/// Failed to send a WebSocket message to the server.
#[error("failed to send a WebSocket message: {0}")]
SendWebSocketMessage(#[source] ws::Error),
/// Failed to receive a WebSocket message from the server.
#[error("failed to receive a WebSocket message: {0}")]
ReceiveWebSocketMessage(#[source] ws::Error),
#[error("failed to complete the background task: {0}")]
Spawn(#[source] tokio::task::JoinError),
/// Failed to shutdown a pod writer channel.
#[error("failed to shutdown write to Pod channel: {0}")]
Shutdown(#[source] std::io::Error),
}
type ErrorReceiver = oneshot::Receiver<String>;
type ErrorSender = oneshot::Sender<String>;
// Internal message used by the futures to communicate with each other.
enum Message {
FromPod(u8, Bytes),
ToPod(u8, Bytes),
FromPodClose,
ToPodClose(u8),
}
/// Manages port-forwarded streams.
///
/// Provides `AsyncRead + AsyncWrite` for each port and **does not** bind to local ports. Error
/// channel for each port is only written by the server when there's an exception and
/// the port cannot be used (didn't initialize or can't be used anymore).
pub struct Portforwarder {
ports: HashMap<u16, DuplexStream>,
errors: HashMap<u16, ErrorReceiver>,
task: tokio::task::JoinHandle<Result<(), Error>>,
}
impl Portforwarder {
pub(crate) fn new<S>(stream: WebSocketStream<S>, port_nums: &[u16]) -> Self
where
S: AsyncRead + AsyncWrite + Unpin + Sized + Send + 'static,
{
let mut ports = HashMap::with_capacity(port_nums.len());
let mut error_rxs = HashMap::with_capacity(port_nums.len());
let mut error_txs = Vec::with_capacity(port_nums.len());
let mut task_ios = Vec::with_capacity(port_nums.len());
for port in port_nums.iter() {
let (a, b) = tokio::io::duplex(1024 * 1024);
ports.insert(*port, a);
task_ios.push(b);
let (tx, rx) = oneshot::channel();
error_rxs.insert(*port, rx);
error_txs.push(Some(tx));
}
let task = tokio::spawn(start_message_loop(
stream,
port_nums.to_vec(),
task_ios,
error_txs,
));
Portforwarder {
ports,
errors: error_rxs,
task,
}
}
/// Take a port stream by the port on the target resource.
///
/// A value is returned at most once per port.
#[inline]
pub fn take_stream(&mut self, port: u16) -> Option<impl AsyncRead + AsyncWrite + Unpin> {
self.ports.remove(&port)
}
/// Take a future that resolves with any error message or when the error sender is dropped.
/// When the future resolves, the port should be considered no longer usable.
///
/// A value is returned at most once per port.
#[inline]
pub fn take_error(&mut self, port: u16) -> Option<impl Future<Output = Option<String>>> {
self.errors.remove(&port).map(|recv| recv.map(|res| res.ok()))
}
/// Abort the background task, causing port forwards to fail.
#[inline]
pub fn abort(&self) {
self.task.abort();
}
/// Waits for port forwarding task to complete.
pub async fn join(self) -> Result<(), Error> {
let Self {
mut ports,
mut errors,
task,
} = self;
// Start by terminating any streams that have not yet been taken
// since they would otherwise keep the connection open indefinitely
ports.clear();
errors.clear();
task.await.unwrap_or_else(|e| Err(Error::Spawn(e)))
}
}
async fn start_message_loop<S>(
stream: WebSocketStream<S>,
ports: Vec<u16>,
duplexes: Vec<DuplexStream>,
error_senders: Vec<Option<ErrorSender>>,
) -> Result<(), Error>
where
S: AsyncRead + AsyncWrite + Unpin + Sized + Send + 'static,
{
let mut writers = Vec::new();
// Loops to run concurrently.
// We can spawn tasks to run `to_pod_loop` in parallel and flatten the errors, but the other 2 loops
// are over a single WebSocket connection and cannot process each port in parallel.
let mut loops = Vec::with_capacity(ports.len() + 2);
// Channel to communicate with the main loop
let (sender, receiver) = mpsc::channel::<Message>(1);
for (i, (r, w)) in duplexes.into_iter().map(tokio::io::split).enumerate() {
writers.push(w);
// Each port uses 2 channels. Duplex data channel and error.
let ch = 2 * (i as u8);
loops.push(to_pod_loop(ch, r, sender.clone()).boxed());
}
let (ws_sink, ws_stream) = stream.split();
loops.push(from_pod_loop(ws_stream, sender).boxed());
loops.push(forwarder_loop(&ports, receiver, ws_sink, writers, error_senders).boxed());
future::try_join_all(loops).await.map(|_| ())
}
async fn to_pod_loop(
ch: u8,
reader: tokio::io::ReadHalf<DuplexStream>,
mut sender: mpsc::Sender<Message>,
) -> Result<(), Error> {
let mut read_stream = ReaderStream::new(reader);
while let Some(bytes) = read_stream
.next()
.await
.transpose()
.map_err(Error::ReadBytesToSend)?
{
if !bytes.is_empty() {
sender
.send(Message::ToPod(ch, bytes))
.await
.map_err(Error::ForwardToPod)?;
}
}
sender
.send(Message::ToPodClose(ch))
.await
.map_err(Error::ForwardToPod)?;
Ok(())
}
async fn from_pod_loop<S>(
mut ws_stream: futures::stream::SplitStream<WebSocketStream<S>>,
mut sender: mpsc::Sender<Message>,
) -> Result<(), Error>
where
S: AsyncRead + AsyncWrite + Unpin + Sized + Send + 'static,
{
while let Some(msg) = ws_stream
.next()
.await
.transpose()
.map_err(Error::ReceiveWebSocketMessage)?
{
match msg {
ws::Message::Binary(mut bytes) if bytes.len() > 1 => {
let ch = bytes.split_to(1)[0];
sender
.send(Message::FromPod(ch, bytes))
.await
.map_err(Error::ForwardFromPod)?;
}
message if message.is_close() => {
sender
.send(Message::FromPodClose)
.await
.map_err(Error::ForwardFromPod)?;
break;
}
// REVIEW should we error on unexpected websocket message?
_ => {}
}
}
Ok(())
}
// Start a loop to handle messages received from other futures.
// On `Message::ToPod(ch, bytes)`, a WebSocket message is sent with the channel prefix.
// On `Message::FromPod(ch, bytes)` with an even `ch`, `bytes` are written to the port's sink.
// On `Message::FromPod(ch, bytes)` with an odd `ch`, an error message is sent to the error channel of the port.
async fn forwarder_loop<S>(
ports: &[u16],
mut receiver: mpsc::Receiver<Message>,
mut ws_sink: futures::stream::SplitSink<WebSocketStream<S>, ws::Message>,
mut writers: Vec<tokio::io::WriteHalf<DuplexStream>>,
mut error_senders: Vec<Option<ErrorSender>>,
) -> Result<(), Error>
where
S: AsyncRead + AsyncWrite + Unpin + Sized + Send + 'static,
{
#[derive(Default, Clone)]
struct ChannelState {
// Keep track if the channel has received the initialization frame.
initialized: bool,
// Keep track if the channel has shutdown.
shutdown: bool,
}
let mut chan_state = vec![ChannelState::default(); 2 * ports.len()];
let mut closed_ports = 0;
let mut socket_shutdown = false;
while let Some(msg) = receiver.next().await {
match msg {
Message::FromPod(ch, mut bytes) => {
let ch = ch as usize;
let channel = chan_state.get_mut(ch).ok_or_else(|| Error::InvalidChannel(ch))?;
let port_index = ch / 2;
// Initialization
if !channel.initialized {
// The initial message must be 3 bytes including the channel prefix.
if bytes.len() != 2 {
return Err(Error::InvalidInitialFrameSize);
}
let port = bytes.get_u16_le();
if port != ports[port_index] {
return Err(Error::InvalidPortMapping {
actual: port,
expected: ports[port_index],
});
}
channel.initialized = true;
continue;
}
// Odd channels are for errors for (n - 1)/2 th port
if ch % 2 != 0 {
// A port sends at most one error message because it's considered unusable after this.
if let Some(sender) = error_senders[port_index].take() {
let s = String::from_utf8(bytes.into_iter().collect())
.map_err(Error::InvalidErrorMessage)?;
sender.send(s).map_err(Error::ForwardErrorMessage)?;
}
} else if !channel.shutdown {
writers[port_index]
.write_all(&bytes)
.await
.map_err(Error::WriteBytesFromPod)?;
}
}
Message::ToPod(ch, bytes) => {
let mut bin = Vec::with_capacity(bytes.len() + 1);
bin.push(ch);
bin.extend(bytes);
ws_sink
.send(ws::Message::binary(bin))
.await
.map_err(Error::SendWebSocketMessage)?;
}
Message::ToPodClose(ch) => {
let ch = ch as usize;
let channel = chan_state.get_mut(ch).ok_or_else(|| Error::InvalidChannel(ch))?;
let port_index = ch / 2;
if !channel.shutdown {
writers[port_index].shutdown().await.map_err(Error::Shutdown)?;
channel.shutdown = true;
closed_ports += 1;
}
}
Message::FromPodClose => {
for writer in &mut writers {
writer.shutdown().await.map_err(Error::Shutdown)?;
}
}
}
if closed_ports == ports.len() && !socket_shutdown {
ws_sink
.send(ws::Message::Close(None))
.await
.map_err(Error::SendWebSocketMessage)?;
socket_shutdown = true;
}
}
Ok(())
}

View File

@@ -0,0 +1,411 @@
use std::future::Future;
use k8s_openapi::apimachinery::pkg::apis::meta::v1::Status;
use futures::{
channel::{mpsc, oneshot},
FutureExt, SinkExt, StreamExt,
};
use serde::{Deserialize, Serialize};
use thiserror::Error;
use tokio::{
io::{AsyncRead, AsyncWrite, AsyncWriteExt, DuplexStream},
select,
};
use tokio_tungstenite::tungstenite as ws;
use crate::client::Connection;
use super::AttachParams;
type StatusReceiver = oneshot::Receiver<Status>;
type StatusSender = oneshot::Sender<Status>;
type TerminalSizeReceiver = mpsc::Receiver<TerminalSize>;
type TerminalSizeSender = mpsc::Sender<TerminalSize>;
/// TerminalSize define the size of a terminal
#[derive(Debug, Serialize, Deserialize)]
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
#[serde(rename_all = "PascalCase")]
pub struct TerminalSize {
/// width of the terminal
pub width: u16,
/// height of the terminal
pub height: u16,
}
/// Errors from attaching to a pod.
#[derive(Debug, Error)]
pub enum Error {
/// Failed to read from stdin
#[error("failed to read from stdin: {0}")]
ReadStdin(#[source] std::io::Error),
/// Failed to send stdin data to the pod
#[error("failed to send a stdin data: {0}")]
SendStdin(#[source] ws::Error),
/// Failed to write to stdout
#[error("failed to write to stdout: {0}")]
WriteStdout(#[source] std::io::Error),
/// Failed to write to stderr
#[error("failed to write to stderr: {0}")]
WriteStderr(#[source] std::io::Error),
/// Failed to receive a WebSocket message from the server.
#[error("failed to receive a WebSocket message: {0}")]
ReceiveWebSocketMessage(#[source] ws::Error),
// Failed to complete the background task
#[error("failed to complete the background task: {0}")]
Spawn(#[source] tokio::task::JoinError),
/// Failed to send close message.
#[error("failed to send a WebSocket close message: {0}")]
SendClose(#[source] ws::Error),
/// Failed to deserialize status object
#[error("failed to deserialize status object: {0}")]
DeserializeStatus(#[source] serde_json::Error),
/// Failed to send status object
#[error("failed to send status object")]
SendStatus,
/// Fail to serialize Terminalsize object
#[error("failed to serialize TerminalSize object: {0}")]
SerializeTerminalSize(#[source] serde_json::Error),
/// Fail to send terminal size message
#[error("failed to send terminal size message")]
SendTerminalSize(#[source] ws::Error),
/// Failed to set terminal size, tty need to be true to resize the terminal
#[error("failed to set terminal size, tty need to be true to resize the terminal")]
TtyNeedToBeTrue,
}
const MAX_BUF_SIZE: usize = 1024;
/// Represents an attached process in a container for [`attach`] and [`exec`].
///
/// Provides access to `stdin`, `stdout`, and `stderr` if attached.
///
/// Use [`AttachedProcess::join`] to wait for the process to terminate.
///
/// [`attach`]: crate::Api::attach
/// [`exec`]: crate::Api::exec
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
pub struct AttachedProcess {
has_stdin: bool,
has_stdout: bool,
has_stderr: bool,
stdin_writer: Option<DuplexStream>,
stdout_reader: Option<DuplexStream>,
stderr_reader: Option<DuplexStream>,
status_rx: Option<StatusReceiver>,
terminal_resize_tx: Option<TerminalSizeSender>,
task: tokio::task::JoinHandle<Result<(), Error>>,
}
impl AttachedProcess {
pub(crate) fn new(connection: Connection, ap: &AttachParams) -> Self {
// To simplify the implementation, always create a pipe for stdin.
// The caller does not have access to it unless they had requested.
let (stdin_writer, stdin_reader) = tokio::io::duplex(ap.max_stdin_buf_size.unwrap_or(MAX_BUF_SIZE));
let (stdout_writer, stdout_reader) = if ap.stdout {
let (w, r) = tokio::io::duplex(ap.max_stdout_buf_size.unwrap_or(MAX_BUF_SIZE));
(Some(w), Some(r))
} else {
(None, None)
};
let (stderr_writer, stderr_reader) = if ap.stderr {
let (w, r) = tokio::io::duplex(ap.max_stderr_buf_size.unwrap_or(MAX_BUF_SIZE));
(Some(w), Some(r))
} else {
(None, None)
};
let (status_tx, status_rx) = oneshot::channel();
let (terminal_resize_tx, terminal_resize_rx) = if ap.tty {
let (w, r) = mpsc::channel(10);
(Some(w), Some(r))
} else {
(None, None)
};
let task = tokio::spawn(start_message_loop(
connection,
stdin_reader,
stdout_writer,
stderr_writer,
status_tx,
terminal_resize_rx,
));
AttachedProcess {
has_stdin: ap.stdin,
has_stdout: ap.stdout,
has_stderr: ap.stderr,
task,
stdin_writer: Some(stdin_writer),
stdout_reader,
stderr_reader,
terminal_resize_tx,
status_rx: Some(status_rx),
}
}
/// Async writer to stdin.
/// ```no_run
/// # use kube_client::api::AttachedProcess;
/// # use tokio::io::{AsyncReadExt, AsyncWriteExt};
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let attached: AttachedProcess = todo!();
/// let mut stdin_writer = attached.stdin().unwrap();
/// stdin_writer.write(b"foo\n").await?;
/// # Ok(())
/// # }
/// ```
/// Only available if [`AttachParams`](super::AttachParams) had `stdin`.
pub fn stdin(&mut self) -> Option<impl AsyncWrite + Unpin> {
if !self.has_stdin {
return None;
}
self.stdin_writer.take()
}
/// Async reader for stdout outputs.
/// ```no_run
/// # use kube_client::api::AttachedProcess;
/// # use tokio::io::{AsyncReadExt, AsyncWriteExt};
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let attached: AttachedProcess = todo!();
/// let mut stdout_reader = attached.stdout().unwrap();
/// let mut buf = [0u8; 4];
/// stdout_reader.read_exact(&mut buf).await?;
/// # Ok(())
/// # }
/// ```
/// Only available if [`AttachParams`](super::AttachParams) had `stdout`.
pub fn stdout(&mut self) -> Option<impl AsyncRead + Unpin> {
if !self.has_stdout {
return None;
}
self.stdout_reader.take()
}
/// Async reader for stderr outputs.
/// ```no_run
/// # use kube_client::api::AttachedProcess;
/// # use tokio::io::{AsyncReadExt, AsyncWriteExt};
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let attached: AttachedProcess = todo!();
/// let mut stderr_reader = attached.stderr().unwrap();
/// let mut buf = [0u8; 4];
/// stderr_reader.read_exact(&mut buf).await?;
/// # Ok(())
/// # }
/// ```
/// Only available if [`AttachParams`](super::AttachParams) had `stderr`.
pub fn stderr(&mut self) -> Option<impl AsyncRead + Unpin> {
if !self.has_stderr {
return None;
}
self.stderr_reader.take()
}
/// Abort the background task, causing remote command to fail.
#[inline]
pub fn abort(&self) {
self.task.abort();
}
/// Waits for the remote command task to complete.
pub async fn join(self) -> Result<(), Error> {
self.task.await.unwrap_or_else(|e| Err(Error::Spawn(e)))
}
/// Take a future that resolves with any status object or when the sender is dropped.
///
/// Returns `None` if called more than once.
pub fn take_status(&mut self) -> Option<impl Future<Output = Option<Status>>> {
self.status_rx.take().map(|recv| recv.map(|res| res.ok()))
}
/// Async writer to change the terminal size
/// ```no_run
/// # use kube_client::api::{AttachedProcess, TerminalSize};
/// # use tokio::io::{AsyncReadExt, AsyncWriteExt};
/// # use futures::SinkExt;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let attached: AttachedProcess = todo!();
/// let mut terminal_size_writer = attached.terminal_size().unwrap();
/// terminal_size_writer.send(TerminalSize{
/// height: 100,
/// width: 200,
/// }).await?;
/// # Ok(())
/// # }
/// ```
/// Only available if [`AttachParams`](super::AttachParams) had `tty`.
pub fn terminal_size(&mut self) -> Option<TerminalSizeSender> {
self.terminal_resize_tx.take()
}
}
// theses values come from here: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/util/remotecommand/constants.go#L57
const STDIN_CHANNEL: u8 = 0;
const STDOUT_CHANNEL: u8 = 1;
const STDERR_CHANNEL: u8 = 2;
// status channel receives `Status` object on exit.
const STATUS_CHANNEL: u8 = 3;
// resize channel is use to send TerminalSize object to change the size of the terminal
const RESIZE_CHANNEL: u8 = 4;
/// Used to signal that a channel has reached EOF. Only works on V5 of the protocol.
const CLOSE_CHANNEL: u8 = 255;
async fn start_message_loop(
connection: Connection,
stdin: impl AsyncRead + Unpin,
mut stdout: Option<impl AsyncWrite + Unpin>,
mut stderr: Option<impl AsyncWrite + Unpin>,
status_tx: StatusSender,
mut terminal_size_rx: Option<TerminalSizeReceiver>,
) -> Result<(), Error> {
let supports_stream_close = connection.supports_stream_close();
let stream = connection.into_stream();
let mut stdin_stream = tokio_util::io::ReaderStream::new(stdin);
let (mut server_send, raw_server_recv) = stream.split();
// Work with filtered messages to reduce noise.
let mut server_recv = raw_server_recv.filter_map(filter_message).boxed();
let mut have_terminal_size_rx = terminal_size_rx.is_some();
// True until we reach EOF for stdin.
let mut stdin_is_open = true;
loop {
let terminal_size_next = async {
match terminal_size_rx.as_mut() {
Some(tmp) => Some(tmp.next().await),
None => None,
}
};
select! {
server_message = server_recv.next() => {
match server_message {
Some(Ok(Message::Stdout(bin))) => {
if let Some(stdout) = stdout.as_mut() {
stdout.write_all(&bin[1..]).await.map_err(Error::WriteStdout)?;
}
},
Some(Ok(Message::Stderr(bin))) => {
if let Some(stderr) = stderr.as_mut() {
stderr.write_all(&bin[1..]).await.map_err(Error::WriteStderr)?;
}
},
Some(Ok(Message::Status(bin))) => {
let status = serde_json::from_slice::<Status>(&bin[1..]).map_err(Error::DeserializeStatus)?;
status_tx.send(status).map_err(|_| Error::SendStatus)?;
break
},
Some(Err(err)) => {
return Err(Error::ReceiveWebSocketMessage(err));
},
None => {
// Connection closed properly
break
},
}
},
stdin_message = stdin_stream.next(), if stdin_is_open => {
match stdin_message {
Some(Ok(bytes)) => {
if !bytes.is_empty() {
let mut vec = Vec::with_capacity(bytes.len() + 1);
vec.push(STDIN_CHANNEL);
vec.extend_from_slice(&bytes[..]);
server_send
.send(ws::Message::binary(vec))
.await
.map_err(Error::SendStdin)?;
}
},
Some(Err(err)) => {
return Err(Error::ReadStdin(err));
}
None => {
// Stdin closed (writer half dropped).
// Let the server know we reached the end of stdin.
if supports_stream_close {
// Signal stdin has reached EOF.
// See: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/util/httpstream/wsstream/conn.go#L346
let vec = vec![CLOSE_CHANNEL, STDIN_CHANNEL];
server_send
.send(ws::Message::binary(vec))
.await
.map_err(Error::SendStdin)?;
} else {
// Best we can do is trigger the whole websocket to close.
// We may miss out on any remaining stdout data that has not
// been sent yet.
server_send.close().await.map_err(Error::SendClose)?;
}
// Do not check stdin_stream for data in future loops.
stdin_is_open = false;
}
}
},
Some(terminal_size_message) = terminal_size_next, if have_terminal_size_rx => {
match terminal_size_message {
Some(new_size) => {
let new_size = serde_json::to_vec(&new_size).map_err(Error::SerializeTerminalSize)?;
let mut vec = Vec::with_capacity(new_size.len() + 1);
vec.push(RESIZE_CHANNEL);
vec.extend_from_slice(&new_size[..]);
server_send.send(ws::Message::Binary(vec.into())).await.map_err(Error::SendTerminalSize)?;
},
None => {
have_terminal_size_rx = false;
}
}
},
}
}
Ok(())
}
/// Channeled messages from the server.
enum Message {
/// To Stdout channel (1)
Stdout(Vec<u8>),
/// To stderr channel (2)
Stderr(Vec<u8>),
/// To error/status channel (3)
Status(Vec<u8>),
}
// Filter to reduce all the possible WebSocket messages into a few we expect to receive.
async fn filter_message(wsm: Result<ws::Message, ws::Error>) -> Option<Result<Message, ws::Error>> {
match wsm {
// The protocol only sends binary frames.
// Message of size 1 (only channel number) is sent on connection.
Ok(ws::Message::Binary(bin)) if bin.len() > 1 => match bin[0] {
STDOUT_CHANNEL => Some(Ok(Message::Stdout(bin.into()))),
STDERR_CHANNEL => Some(Ok(Message::Stderr(bin.into()))),
STATUS_CHANNEL => Some(Ok(Message::Status(bin.into()))),
// We don't receive messages to stdin and resize channels.
_ => None,
},
// Ignore any other message types.
// We can ignore close message because the server never sends anything special.
// The connection terminates on `None`.
Ok(_) => None,
// Fatal errors. `WebSocketStream` turns `ConnectionClosed` and `AlreadyClosed` into `None`.
// So these are unrecoverables.
Err(err) => Some(Err(err)),
}
}

View File

@@ -0,0 +1,612 @@
use futures::AsyncBufRead;
use serde::{de::DeserializeOwned, Serialize};
use std::fmt::Debug;
use crate::{
api::{Api, Patch, PatchParams, PostParams},
Error, Result,
};
use kube_core::response::Status;
pub use kube_core::subresource::{EvictParams, LogParams};
#[cfg(feature = "ws")]
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
pub use kube_core::subresource::AttachParams;
pub use k8s_openapi::api::autoscaling::v1::{Scale, ScaleSpec, ScaleStatus};
#[cfg(feature = "ws")] use crate::api::portforward::Portforwarder;
#[cfg(feature = "ws")] use crate::api::remote_command::AttachedProcess;
/// Methods for [scale subresource](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#scale-subresource).
impl<K> Api<K>
where
K: Clone + DeserializeOwned,
{
/// Fetch the scale subresource
pub async fn get_scale(&self, name: &str) -> Result<Scale> {
let mut req = self
.request
.get_subresource("scale", name)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("get_scale");
self.client.request::<Scale>(req).await
}
/// Update the scale subresource
pub async fn patch_scale<P: serde::Serialize + Debug>(
&self,
name: &str,
pp: &PatchParams,
patch: &Patch<P>,
) -> Result<Scale> {
let mut req = self
.request
.patch_subresource("scale", name, pp, patch)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("patch_scale");
self.client.request::<Scale>(req).await
}
/// Replace the scale subresource
pub async fn replace_scale(&self, name: &str, pp: &PostParams, data: Vec<u8>) -> Result<Scale> {
let mut req = self
.request
.replace_subresource("scale", name, pp, data)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("replace_scale");
self.client.request::<Scale>(req).await
}
}
/// Arbitrary subresources
impl<K> Api<K>
where
K: Clone + DeserializeOwned + Debug,
{
/// Display one or many sub-resources.
pub async fn get_subresource(&self, subresource_name: &str, name: &str) -> Result<K> {
let mut req = self
.request
.get_subresource(subresource_name, name)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("get_subresource");
self.client.request::<K>(req).await
}
/// Create an instance of the subresource
pub async fn create_subresource<T>(
&self,
subresource_name: &str,
name: &str,
pp: &PostParams,
data: Vec<u8>,
) -> Result<T>
where
T: DeserializeOwned,
{
let mut req = self
.request
.create_subresource(subresource_name, name, pp, data)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("create_subresource");
self.client.request::<T>(req).await
}
/// Patch an instance of the subresource
pub async fn patch_subresource<P: serde::Serialize + Debug>(
&self,
subresource_name: &str,
name: &str,
pp: &PatchParams,
patch: &Patch<P>,
) -> Result<K> {
let mut req = self
.request
.patch_subresource(subresource_name, name, pp, patch)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("patch_subresource");
self.client.request::<K>(req).await
}
/// Replace an instance of the subresource
pub async fn replace_subresource(
&self,
subresource_name: &str,
name: &str,
pp: &PostParams,
data: Vec<u8>,
) -> Result<K> {
let mut req = self
.request
.replace_subresource(subresource_name, name, pp, data)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("replace_subresource");
self.client.request::<K>(req).await
}
}
// ----------------------------------------------------------------------------
// Ephemeral containers
// ----------------------------------------------------------------------------
/// Marker trait for objects that support the ephemeral containers sub resource.
///
/// See [`Api::get_ephemeral_containers`] et al.
pub trait Ephemeral {}
impl Ephemeral for k8s_openapi::api::core::v1::Pod {}
impl<K> Api<K>
where
K: Clone + DeserializeOwned + Ephemeral,
{
/// Replace the ephemeral containers sub resource entirely.
///
/// This functions in the same way as [`Api::replace`] except only `.spec.ephemeralcontainers` is replaced, everything else is ignored.
///
/// Note that ephemeral containers may **not** be changed or removed once attached to a pod.
///
///
/// You way want to patch the underlying resource to gain access to the main container process,
/// see the [documentation](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) for `sharedProcessNamespace`.
///
/// See the Kubernetes [documentation](https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/#what-is-an-ephemeral-container) for more details.
///
/// [`Api::patch_ephemeral_containers`] may be more ergonomic, as you can will avoid having to first fetch the
/// existing subresources with an approriate merge strategy, see the examples for more details.
///
/// Example of using `replace_ephemeral_containers`:
///
/// ```no_run
/// use k8s_openapi::api::core::v1::Pod;
/// use kube::{Api, api::PostParams};
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = kube::Client::try_default().await?;
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
/// let pp = PostParams::default();
///
/// // Get pod object with ephemeral containers.
/// let mut mypod = pods.get_ephemeral_containers("mypod").await?;
///
/// // If there were existing ephemeral containers, we would have to append
/// // new containers to the list before calling replace_ephemeral_containers.
/// assert_eq!(mypod.spec.as_mut().unwrap().ephemeral_containers, None);
///
/// // Add an ephemeral container to the pod object.
/// mypod.spec.as_mut().unwrap().ephemeral_containers = Some(serde_json::from_value(serde_json::json!([
/// {
/// "name": "myephemeralcontainer",
/// "image": "busybox:1.34.1",
/// "command": ["sh", "-c", "sleep 20"],
/// },
/// ]))?);
///
/// pods.replace_ephemeral_containers("mypod", &pp, &mypod).await?;
///
/// # Ok(())
/// # }
/// ```
pub async fn replace_ephemeral_containers(&self, name: &str, pp: &PostParams, data: &K) -> Result<K>
where
K: Serialize,
{
let mut req = self
.request
.replace_subresource(
"ephemeralcontainers",
name,
pp,
serde_json::to_vec(data).map_err(Error::SerdeError)?,
)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("replace_ephemeralcontainers");
self.client.request::<K>(req).await
}
/// Patch the ephemeral containers sub resource
///
/// Any partial object containing the ephemeral containers
/// sub resource is valid as long as the complete structure
/// for the object is present, as shown below.
///
/// You way want to patch the underlying resource to gain access to the main container process,
/// see the [docs](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) for `sharedProcessNamespace`.
///
/// Ephemeral containers may **not** be changed or removed once attached to a pod.
/// Therefore if the chosen merge strategy overwrites the existing ephemeral containers,
/// you will have to fetch the existing ephemeral containers first.
/// In order to append your new ephemeral containers to the existing list before patching. See some examples and
/// discussion related to merge strategies in Kubernetes
/// [here](https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/#use-a-json-merge-patch-to-update-a-deployment). The example below uses a strategic merge patch which does not require
///
/// See the `Kubernetes` [documentation](https://kubernetes.io/docs/concepts/workloads/pods/ephemeral-containers/)
/// for more information about ephemeral containers.
///
///
/// Example of using `patch_ephemeral_containers`:
///
/// ```no_run
/// use kube::api::{Api, PatchParams, Patch};
/// use k8s_openapi::api::core::v1::Pod;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = kube::Client::try_default().await?;
/// let pods: Api<Pod> = Api::namespaced(client, "apps");
/// let pp = PatchParams::default(); // stratetgic merge patch
///
/// // Note that the strategic merge patch will concatenate the
/// // lists of ephemeral containers so we avoid having to fetch the
/// // current list and append to it manually.
/// let patch = serde_json::json!({
/// "spec":{
/// "ephemeralContainers": [
/// {
/// "name": "myephemeralcontainer",
/// "image": "busybox:1.34.1",
/// "command": ["sh", "-c", "sleep 20"],
/// },
/// ]
/// }});
///
/// pods.patch_ephemeral_containers("mypod", &pp, &Patch::Strategic(patch)).await?;
///
/// # Ok(())
/// # }
/// ```
pub async fn patch_ephemeral_containers<P: serde::Serialize>(
&self,
name: &str,
pp: &PatchParams,
patch: &Patch<P>,
) -> Result<K> {
let mut req = self
.request
.patch_subresource("ephemeralcontainers", name, pp, patch)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("patch_ephemeralcontainers");
self.client.request::<K>(req).await
}
/// Get the named resource with the ephemeral containers subresource.
///
/// This returns the whole K, with metadata and spec.
pub async fn get_ephemeral_containers(&self, name: &str) -> Result<K> {
let mut req = self
.request
.get_subresource("ephemeralcontainers", name)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("get_ephemeralcontainers");
self.client.request::<K>(req).await
}
}
// ----------------------------------------------------------------------------
// TODO: Replace examples with owned custom resources. Bad practice to write to owned objects
// These examples work, but the job controller will totally overwrite what we do.
/// Methods for [status subresource](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#status-subresource).
impl<K> Api<K>
where
K: DeserializeOwned,
{
/// Get the named resource with a status subresource
///
/// This actually returns the whole K, with metadata, and spec.
pub async fn get_status(&self, name: &str) -> Result<K> {
let mut req = self
.request
.get_subresource("status", name)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("get_status");
self.client.request::<K>(req).await
}
/// Patch fields on the status object
///
/// NB: Requires that the resource has a status subresource.
///
/// ```no_run
/// use kube::api::{Api, PatchParams, Patch};
/// use k8s_openapi::api::batch::v1::Job;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = kube::Client::try_default().await?;
/// let jobs: Api<Job> = Api::namespaced(client, "apps");
/// let mut j = jobs.get("baz").await?;
/// let pp = PatchParams::default(); // json merge patch
/// let data = serde_json::json!({
/// "status": {
/// "succeeded": 2
/// }
/// });
/// let o = jobs.patch_status("baz", &pp, &Patch::Merge(data)).await?;
/// assert_eq!(o.status.unwrap().succeeded, Some(2));
/// # Ok(())
/// # }
/// ```
pub async fn patch_status<P: serde::Serialize + Debug>(
&self,
name: &str,
pp: &PatchParams,
patch: &Patch<P>,
) -> Result<K> {
let mut req = self
.request
.patch_subresource("status", name, pp, patch)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("patch_status");
self.client.request::<K>(req).await
}
/// Replace every field on the status object
///
/// This works similarly to the [`Api::replace`] method, but `.spec` is ignored.
/// You can leave out the `.spec` entirely from the serialized output.
///
/// ```no_run
/// use kube::api::{Api, PostParams};
/// use k8s_openapi::api::batch::v1::{Job, JobStatus};
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client = kube::Client::try_default().await?;
/// let jobs: Api<Job> = Api::namespaced(client, "apps");
/// let mut o = jobs.get_status("baz").await?; // retrieve partial object
/// o.status = Some(JobStatus::default()); // update the job part
/// let pp = PostParams::default();
/// let o = jobs.replace_status("baz", &pp, serde_json::to_vec(&o)?).await?;
/// # Ok(())
/// # }
/// ```
pub async fn replace_status(&self, name: &str, pp: &PostParams, data: Vec<u8>) -> Result<K> {
let mut req = self
.request
.replace_subresource("status", name, pp, data)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("replace_status");
self.client.request::<K>(req).await
}
}
// ----------------------------------------------------------------------------
// Log subresource
// ----------------------------------------------------------------------------
#[test]
fn log_path() {
use crate::api::{Request, Resource};
use k8s_openapi::api::core::v1 as corev1;
let lp = LogParams {
container: Some("blah".into()),
..LogParams::default()
};
let url = corev1::Pod::url_path(&(), Some("ns"));
let req = Request::new(url).logs("foo", &lp).unwrap();
assert_eq!(req.uri(), "/api/v1/namespaces/ns/pods/foo/log?&container=blah");
}
/// Marker trait for objects that has logs
///
/// See [`Api::logs`] and [`Api::log_stream`] for usage.
pub trait Log {}
impl Log for k8s_openapi::api::core::v1::Pod {}
impl<K> Api<K>
where
K: DeserializeOwned + Log,
{
/// Fetch logs as a string
pub async fn logs(&self, name: &str, lp: &LogParams) -> Result<String> {
let mut req = self.request.logs(name, lp).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("logs");
self.client.request_text(req).await
}
/// Stream the logs via [`AsyncBufRead`].
///
/// Log stream can be processsed using [`AsyncReadExt`](futures::AsyncReadExt)
/// and [`AsyncBufReadExt`](futures::AsyncBufReadExt).
///
/// # Example
///
/// ```no_run
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # use k8s_openapi::api::core::v1::Pod;
/// # use kube::{api::{Api, LogParams}, Client};
/// # let client: Client = todo!();
/// use futures::{AsyncBufReadExt, TryStreamExt};
///
/// let pods: Api<Pod> = Api::default_namespaced(client);
/// let mut logs = pods
/// .log_stream("my-pod", &LogParams::default()).await?
/// .lines();
///
/// while let Some(line) = logs.try_next().await? {
/// println!("{}", line);
/// }
/// # Ok(())
/// # }
/// ```
pub async fn log_stream(&self, name: &str, lp: &LogParams) -> Result<impl AsyncBufRead> {
let mut req = self.request.logs(name, lp).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("log_stream");
self.client.request_stream(req).await
}
}
// ----------------------------------------------------------------------------
// Eviction subresource
// ----------------------------------------------------------------------------
#[test]
fn evict_path() {
use crate::api::{Request, Resource};
use k8s_openapi::api::core::v1 as corev1;
let ep = EvictParams::default();
let url = corev1::Pod::url_path(&(), Some("ns"));
let req = Request::new(url).evict("foo", &ep).unwrap();
assert_eq!(req.uri(), "/api/v1/namespaces/ns/pods/foo/eviction?");
}
/// Marker trait for objects that can be evicted
///
/// See [`Api::evic`] for usage
pub trait Evict {}
impl Evict for k8s_openapi::api::core::v1::Pod {}
impl<K> Api<K>
where
K: DeserializeOwned + Evict,
{
/// Create an eviction
pub async fn evict(&self, name: &str, ep: &EvictParams) -> Result<Status> {
let mut req = self.request.evict(name, ep).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("evict");
self.client.request::<Status>(req).await
}
}
// ----------------------------------------------------------------------------
// Attach subresource
// ----------------------------------------------------------------------------
#[cfg(feature = "ws")]
#[test]
fn attach_path() {
use crate::api::{Request, Resource};
use k8s_openapi::api::core::v1 as corev1;
let ap = AttachParams {
container: Some("blah".into()),
..AttachParams::default()
};
let url = corev1::Pod::url_path(&(), Some("ns"));
let req = Request::new(url).attach("foo", &ap).unwrap();
assert_eq!(
req.uri(),
"/api/v1/namespaces/ns/pods/foo/attach?&stdout=true&stderr=true&container=blah"
);
}
/// Marker trait for objects that has attach
///
/// See [`Api::attach`] for usage
#[cfg(feature = "ws")]
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
pub trait Attach {}
#[cfg(feature = "ws")]
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
impl Attach for k8s_openapi::api::core::v1::Pod {}
#[cfg(feature = "ws")]
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
impl<K> Api<K>
where
K: Clone + DeserializeOwned + Attach,
{
/// Attach to pod
pub async fn attach(&self, name: &str, ap: &AttachParams) -> Result<AttachedProcess> {
let mut req = self.request.attach(name, ap).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("attach");
let stream = self.client.connect(req).await?;
Ok(AttachedProcess::new(stream, ap))
}
}
// ----------------------------------------------------------------------------
// Exec subresource
// ----------------------------------------------------------------------------
#[cfg(feature = "ws")]
#[test]
fn exec_path() {
use crate::api::{Request, Resource};
use k8s_openapi::api::core::v1 as corev1;
let ap = AttachParams {
container: Some("blah".into()),
..AttachParams::default()
};
let url = corev1::Pod::url_path(&(), Some("ns"));
let req = Request::new(url)
.exec("foo", vec!["echo", "foo", "bar"], &ap)
.unwrap();
assert_eq!(
req.uri(),
"/api/v1/namespaces/ns/pods/foo/exec?&stdout=true&stderr=true&container=blah&command=echo&command=foo&command=bar"
);
}
/// Marker trait for objects that has exec
///
/// See [`Api::exec`] for usage.
#[cfg(feature = "ws")]
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
pub trait Execute {}
#[cfg(feature = "ws")]
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
impl Execute for k8s_openapi::api::core::v1::Pod {}
#[cfg(feature = "ws")]
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
impl<K> Api<K>
where
K: Clone + DeserializeOwned + Execute,
{
/// Execute a command in a pod
pub async fn exec<I, T>(&self, name: &str, command: I, ap: &AttachParams) -> Result<AttachedProcess>
where
I: IntoIterator<Item = T> + Debug,
T: Into<String>,
{
let mut req = self
.request
.exec(name, command, ap)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("exec");
let stream = self.client.connect(req).await?;
Ok(AttachedProcess::new(stream, ap))
}
}
// ----------------------------------------------------------------------------
// Portforward subresource
// ----------------------------------------------------------------------------
#[cfg(feature = "ws")]
#[test]
fn portforward_path() {
use crate::api::{Request, Resource};
use k8s_openapi::api::core::v1 as corev1;
let url = corev1::Pod::url_path(&(), Some("ns"));
let req = Request::new(url).portforward("foo", &[80, 1234]).unwrap();
assert_eq!(
req.uri(),
"/api/v1/namespaces/ns/pods/foo/portforward?&ports=80%2C1234"
);
}
/// Marker trait for objects that has portforward
///
/// See [`Api::portforward`] for usage.
#[cfg(feature = "ws")]
pub trait Portforward {}
#[cfg(feature = "ws")]
impl Portforward for k8s_openapi::api::core::v1::Pod {}
#[cfg(feature = "ws")]
impl<K> Api<K>
where
K: Clone + DeserializeOwned + Portforward,
{
/// Forward ports of a pod
pub async fn portforward(&self, name: &str, ports: &[u16]) -> Result<Portforwarder> {
let req = self
.request
.portforward(name, ports)
.map_err(Error::BuildRequest)?;
let connection = self.client.connect(req).await?;
Ok(Portforwarder::new(connection.into_stream(), ports))
}
}

26
vendor/kube-client/src/api/util/csr.rs vendored Normal file
View File

@@ -0,0 +1,26 @@
use crate::{api::Api, Error, Result};
use k8s_openapi::api::certificates::v1::CertificateSigningRequest;
use kube_core::params::{Patch, PatchParams};
impl Api<CertificateSigningRequest> {
/// Partially update approval of the specified CertificateSigningRequest.
pub async fn patch_approval<P: serde::Serialize>(
&self,
name: &str,
pp: &PatchParams,
patch: &Patch<P>,
) -> Result<CertificateSigningRequest> {
let mut req = self
.request
.patch_subresource("approval", name, pp, patch)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("approval");
self.client.request::<CertificateSigningRequest>(req).await
}
/// Get the CertificateSigningRequest. May differ from get(name)
pub async fn get_approval(&self, name: &str) -> Result<CertificateSigningRequest> {
self.get_subresource("approval", name).await
}
}

173
vendor/kube-client/src/api/util/mod.rs vendored Normal file
View File

@@ -0,0 +1,173 @@
use crate::{
api::{Api, Resource},
Error, Result,
};
use k8s_openapi::api::{
authentication::v1::TokenRequest,
core::v1::{Node, ServiceAccount},
};
use kube_core::{params::PostParams, util::Restart};
use serde::de::DeserializeOwned;
mod csr;
impl<K> Api<K>
where
K: Restart + Resource + DeserializeOwned,
{
/// Trigger a restart of a Resource.
pub async fn restart(&self, name: &str) -> Result<K> {
let mut req = self.request.restart(name).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("restart");
self.client.request::<K>(req).await
}
}
impl Api<Node> {
/// Cordon a Node.
pub async fn cordon(&self, name: &str) -> Result<Node> {
let mut req = self.request.cordon(name).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("cordon");
self.client.request::<Node>(req).await
}
/// Uncordon a Node.
pub async fn uncordon(&self, name: &str) -> Result<Node> {
let mut req = self.request.uncordon(name).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("cordon");
self.client.request::<Node>(req).await
}
}
impl Api<ServiceAccount> {
/// Create a TokenRequest of a ServiceAccount
pub async fn create_token_request(
&self,
name: &str,
pp: &PostParams,
token_request: &TokenRequest,
) -> Result<TokenRequest> {
let bytes = serde_json::to_vec(token_request).map_err(Error::SerdeError)?;
let mut req = self
.request
.create_subresource("token", name, pp, bytes)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("create_token_request");
self.client.request::<TokenRequest>(req).await
}
}
// Tests that require a cluster and the complete feature set
// Can be run with `cargo test -p kube-client --lib -- --ignored`
#[cfg(test)]
#[cfg(feature = "client")]
mod test {
use crate::{
api::{Api, DeleteParams, ListParams, PostParams},
Client,
};
use k8s_openapi::api::{
authentication::v1::{TokenRequest, TokenRequestSpec, TokenReview, TokenReviewSpec},
core::v1::{Node, ServiceAccount},
};
use serde_json::json;
#[tokio::test]
#[ignore = "needs kubeconfig"]
async fn node_cordon_and_uncordon_works() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::try_default().await?;
let node_name = "fakenode";
let fake_node = serde_json::from_value(json!({
"apiVersion": "v1",
"kind": "Node",
"metadata": {
"name": node_name,
},
}))?;
let nodes: Api<Node> = Api::all(client.clone());
nodes.create(&PostParams::default(), &fake_node).await?;
let schedulables = ListParams::default().fields("spec.unschedulable==false");
let nodes_init = nodes.list(&schedulables).await?;
let num_nodes_before_cordon = nodes_init.items.len();
nodes.cordon(node_name).await?;
let nodes_after_cordon = nodes.list(&schedulables).await?;
assert_eq!(nodes_after_cordon.items.len(), num_nodes_before_cordon - 1);
nodes.uncordon(node_name).await?;
let nodes_after_uncordon = nodes.list(&schedulables).await?;
assert_eq!(nodes_after_uncordon.items.len(), num_nodes_before_cordon);
nodes.delete(node_name, &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "requires a cluster"]
async fn create_token_request() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::try_default().await?;
let serviceaccount_name = "fakesa";
let serviceaccount_namespace = "default";
let audiences = vec!["api".to_string()];
let serviceaccounts: Api<ServiceAccount> = Api::namespaced(client.clone(), serviceaccount_namespace);
let tokenreviews: Api<TokenReview> = Api::all(client);
// Create ServiceAccount
let fake_sa = serde_json::from_value(json!({
"apiVersion": "v1",
"kind": "ServiceAccount",
"metadata": {
"name": serviceaccount_name,
},
}))?;
serviceaccounts.create(&PostParams::default(), &fake_sa).await?;
// Create TokenRequest
let tokenrequest = serviceaccounts
.create_token_request(serviceaccount_name, &PostParams::default(), &TokenRequest {
metadata: Default::default(),
spec: TokenRequestSpec {
audiences: audiences.clone(),
bound_object_ref: None,
expiration_seconds: None,
},
status: None,
})
.await?;
let token = tokenrequest.status.unwrap().token;
assert!(!token.is_empty());
// Check created token is valid with TokenReview
let tokenreview = tokenreviews
.create(&PostParams::default(), &TokenReview {
metadata: Default::default(),
spec: TokenReviewSpec {
audiences: Some(audiences.clone()),
token: Some(token),
},
status: None,
})
.await?;
let tokenreviewstatus = tokenreview.status.unwrap();
assert_eq!(tokenreviewstatus.audiences, Some(audiences));
assert_eq!(tokenreviewstatus.authenticated, Some(true));
assert_eq!(tokenreviewstatus.error, None);
assert_eq!(
tokenreviewstatus.user.unwrap().username,
Some(format!(
"system:serviceaccount:{serviceaccount_namespace}:{serviceaccount_name}"
))
);
// Cleanup ServiceAccount
serviceaccounts
.delete(serviceaccount_name, &DeleteParams::default())
.await?;
Ok(())
}
}

View File

@@ -0,0 +1,710 @@
use std::{
path::{Path, PathBuf},
process::Command,
sync::Arc,
};
use chrono::{DateTime, Duration, Utc};
use futures::future::BoxFuture;
use http::{
header::{InvalidHeaderValue, AUTHORIZATION},
HeaderValue, Request,
};
use jsonpath_rust::JsonPath;
use secrecy::{ExposeSecret, SecretString};
use serde::{Deserialize, Serialize};
use thiserror::Error;
use tokio::sync::{Mutex, RwLock};
use tower::{filter::AsyncPredicate, BoxError};
use crate::config::{AuthInfo, AuthProviderConfig, ExecAuthCluster, ExecConfig, ExecInteractiveMode};
#[cfg(feature = "oauth")] mod oauth;
#[cfg(feature = "oauth")] pub use oauth::Error as OAuthError;
#[cfg(feature = "oidc")] mod oidc;
#[cfg(feature = "oidc")] pub use oidc::errors as oidc_errors;
#[cfg(target_os = "windows")] use std::os::windows::process::CommandExt;
#[derive(Error, Debug)]
/// Client auth errors
pub enum Error {
/// Invalid basic auth
#[error("invalid basic auth: {0}")]
InvalidBasicAuth(#[source] InvalidHeaderValue),
/// Invalid bearer token
#[error("invalid bearer token: {0}")]
InvalidBearerToken(#[source] InvalidHeaderValue),
/// Tried to refresh a token and got a non-refreshable token response
#[error("tried to refresh a token and got a non-refreshable token response")]
UnrefreshableTokenResponse,
/// Exec plugin response did not contain a status
#[error("exec-plugin response did not contain a status")]
ExecPluginFailed,
/// Malformed token expiration date
#[error("malformed token expiration date: {0}")]
MalformedTokenExpirationDate(#[source] chrono::ParseError),
/// Failed to start auth exec
#[error("unable to run auth exec: {0}")]
AuthExecStart(#[source] std::io::Error),
/// Failed to run auth exec command
#[error("auth exec command '{cmd}' failed with status {status}: {out:?}")]
AuthExecRun {
/// The failed command
cmd: String,
/// The exit status or exit code of the failed command
status: std::process::ExitStatus,
/// Stdout/Stderr of the failed command
out: std::process::Output,
},
/// Failed to parse auth exec output
#[error("failed to parse auth exec output: {0}")]
AuthExecParse(#[source] serde_json::Error),
/// Fail to serialize input
#[error("failed to serialize input: {0}")]
AuthExecSerialize(#[source] serde_json::Error),
/// Failed to exec auth
#[error("failed exec auth: {0}")]
AuthExec(String),
/// Failed to read token file
#[error("failed to read token file '{1:?}': {0}")]
ReadTokenFile(#[source] std::io::Error, PathBuf),
/// Failed to parse token-key
#[error("failed to parse token-key")]
ParseTokenKey(#[source] serde_json::Error),
/// command was missing from exec config
#[error("command must be specified to use exec authentication plugin")]
MissingCommand,
/// OAuth error
#[cfg(feature = "oauth")]
#[cfg_attr(docsrs, doc(cfg(feature = "oauth")))]
#[error("failed OAuth: {0}")]
OAuth(#[source] OAuthError),
/// OIDC error
#[cfg(feature = "oidc")]
#[cfg_attr(docsrs, doc(cfg(feature = "oidc")))]
#[error("failed OIDC: {0}")]
Oidc(#[source] oidc_errors::Error),
/// cluster spec missing while `provideClusterInfo` is true
#[error("Cluster spec must be populated when `provideClusterInfo` is true")]
ExecMissingClusterInfo,
/// No valid native root CA certificates found
#[error("No valid native root CA certificates found")]
NoValidNativeRootCA(#[source] std::io::Error),
}
#[derive(Debug, Clone)]
#[allow(clippy::large_enum_variant)]
pub(crate) enum Auth {
None,
Basic(String, SecretString),
Bearer(SecretString),
RefreshableToken(RefreshableToken),
Certificate(String, SecretString, Option<DateTime<Utc>>),
}
// Token file reference. Reloads at least once per minute.
#[derive(Debug)]
pub struct TokenFile {
path: PathBuf,
token: SecretString,
expires_at: DateTime<Utc>,
}
impl TokenFile {
fn new<P: AsRef<Path>>(path: P) -> Result<TokenFile, Error> {
let token = std::fs::read_to_string(&path)
.map_err(|source| Error::ReadTokenFile(source, path.as_ref().to_owned()))?;
Ok(Self {
path: path.as_ref().to_owned(),
token: SecretString::from(token),
// Try to reload at least once a minute
expires_at: Utc::now() + SIXTY_SEC,
})
}
fn is_expiring(&self) -> bool {
Utc::now() + TEN_SEC > self.expires_at
}
/// Get the cached token. Returns `None` if it's expiring.
fn cached_token(&self) -> Option<&str> {
(!self.is_expiring()).then(|| self.token.expose_secret())
}
/// Get a token. Reloads from file if the cached token is expiring.
fn token(&mut self) -> &str {
if self.is_expiring() {
// > If reload from file fails, the last-read token should be used to avoid breaking
// > clients that make token files available on process start and then remove them to
// > limit credential exposure.
// > https://github.com/kubernetes/kubernetes/issues/68164
if let Ok(token) = std::fs::read_to_string(&self.path) {
self.token = SecretString::from(token);
}
self.expires_at = Utc::now() + SIXTY_SEC;
}
self.token.expose_secret()
}
}
// Questionable decisions by chrono: https://github.com/chronotope/chrono/issues/1491
macro_rules! const_unwrap {
($e:expr) => {
match $e {
Some(v) => v,
None => panic!(),
}
};
}
/// Common constant for checking if an auth token is close to expiring
pub const TEN_SEC: chrono::TimeDelta = const_unwrap!(Duration::try_seconds(10));
/// Common duration for time between reloads
const SIXTY_SEC: chrono::TimeDelta = const_unwrap!(Duration::try_seconds(60));
// See https://github.com/kubernetes/kubernetes/tree/master/staging/src/k8s.io/client-go/plugin/pkg/client/auth
// for the list of auth-plugins supported by client-go.
// We currently support the following:
// - exec
// - token-file refreshed at least once per minute
// - gcp: command based token source (exec)
// - gcp: application credential based token source (requires `oauth` feature)
//
// Note that the visibility must be `pub` for `impl Layer for AuthLayer`, but this is not exported from the crate.
// It's not accessible from outside and not shown on docs.
#[derive(Debug, Clone)]
pub enum RefreshableToken {
Exec(Arc<Mutex<(SecretString, DateTime<Utc>, AuthInfo)>>),
File(Arc<RwLock<TokenFile>>),
#[cfg(feature = "oauth")]
GcpOauth(Arc<Mutex<oauth::Gcp>>),
#[cfg(feature = "oidc")]
Oidc(Arc<Mutex<oidc::Oidc>>),
}
// For use with `AsyncFilterLayer` to add `Authorization` header with a refreshed token.
impl<B> AsyncPredicate<Request<B>> for RefreshableToken
where
B: http_body::Body + Send + 'static,
{
type Future = BoxFuture<'static, Result<Request<B>, BoxError>>;
type Request = Request<B>;
fn check(&mut self, mut request: Self::Request) -> Self::Future {
let refreshable = self.clone();
Box::pin(async move {
refreshable.to_header().await.map_err(Into::into).map(|value| {
request.headers_mut().insert(AUTHORIZATION, value);
request
})
})
}
}
impl RefreshableToken {
async fn to_header(&self) -> Result<HeaderValue, Error> {
match self {
RefreshableToken::Exec(data) => {
let mut locked_data = data.lock().await;
// Add some wiggle room onto the current timestamp so we don't get any race
// conditions where the token expires while we are refreshing
if Utc::now() + SIXTY_SEC >= locked_data.1 {
// TODO Improve refreshing exec to avoid `Auth::try_from`
match Auth::try_from(&locked_data.2)? {
Auth::None | Auth::Basic(_, _) | Auth::Bearer(_) | Auth::Certificate(_, _, _) => {
return Err(Error::UnrefreshableTokenResponse);
}
Auth::RefreshableToken(RefreshableToken::Exec(d)) => {
let (new_token, new_expire, new_info) = Arc::try_unwrap(d)
.expect("Unable to unwrap Arc, this is likely a programming error")
.into_inner();
locked_data.0 = new_token;
locked_data.1 = new_expire;
locked_data.2 = new_info;
}
// Unreachable because the token source does not change
Auth::RefreshableToken(RefreshableToken::File(_)) => unreachable!(),
#[cfg(feature = "oauth")]
Auth::RefreshableToken(RefreshableToken::GcpOauth(_)) => unreachable!(),
#[cfg(feature = "oidc")]
Auth::RefreshableToken(RefreshableToken::Oidc(_)) => unreachable!(),
}
}
bearer_header(locked_data.0.expose_secret())
}
RefreshableToken::File(token_file) => {
let guard = token_file.read().await;
if let Some(header) = guard.cached_token().map(bearer_header) {
return header;
}
// Drop the read guard before a write lock attempt to prevent deadlock.
drop(guard);
// Note that `token()` only reloads if the cached token is expiring.
// A separate method to conditionally reload minimizes the need for an exclusive access.
bearer_header(token_file.write().await.token())
}
#[cfg(feature = "oauth")]
RefreshableToken::GcpOauth(data) => {
let gcp_oauth = data.lock().await;
let token = (*gcp_oauth).token().await.map_err(Error::OAuth)?;
bearer_header(&token.access_token)
}
#[cfg(feature = "oidc")]
RefreshableToken::Oidc(oidc) => {
let token = oidc.lock().await.id_token().await.map_err(Error::Oidc)?;
bearer_header(&token)
}
}
}
}
fn bearer_header(token: &str) -> Result<HeaderValue, Error> {
let mut value = HeaderValue::try_from(format!("Bearer {token}")).map_err(Error::InvalidBearerToken)?;
value.set_sensitive(true);
Ok(value)
}
impl TryFrom<&AuthInfo> for Auth {
type Error = Error;
/// Loads the authentication header from the credentials available in the kubeconfig. This supports
/// exec plugins as well as specified in
/// https://kubernetes.io/docs/reference/access-authn-authz/authentication/#client-go-credential-plugins
fn try_from(auth_info: &AuthInfo) -> Result<Self, Self::Error> {
if let Some(provider) = &auth_info.auth_provider {
match token_from_provider(provider)? {
#[cfg(feature = "oidc")]
ProviderToken::Oidc(oidc) => {
return Ok(Self::RefreshableToken(RefreshableToken::Oidc(Arc::new(
Mutex::new(oidc),
))));
}
#[cfg(not(feature = "oidc"))]
ProviderToken::Oidc(token) => {
return Ok(Self::Bearer(SecretString::from(token)));
}
ProviderToken::GcpCommand(token, Some(expiry)) => {
let mut info = auth_info.clone();
let mut provider = provider.clone();
provider.config.insert("access-token".into(), token.clone());
provider.config.insert("expiry".into(), expiry.to_rfc3339());
info.auth_provider = Some(provider);
return Ok(Self::RefreshableToken(RefreshableToken::Exec(Arc::new(
Mutex::new((SecretString::from(token), expiry, info)),
))));
}
ProviderToken::GcpCommand(token, None) => {
return Ok(Self::Bearer(SecretString::from(token)));
}
#[cfg(feature = "oauth")]
ProviderToken::GcpOauth(gcp) => {
return Ok(Self::RefreshableToken(RefreshableToken::GcpOauth(Arc::new(
Mutex::new(gcp),
))));
}
}
}
if let (Some(u), Some(p)) = (&auth_info.username, &auth_info.password) {
return Ok(Self::Basic(u.to_owned(), p.to_owned()));
}
// Inline token. Has precedence over `token_file`.
if let Some(token) = &auth_info.token {
return Ok(Self::Bearer(token.clone()));
}
// Token file reference. Must be reloaded at least once a minute.
if let Some(file) = &auth_info.token_file {
return Ok(Self::RefreshableToken(RefreshableToken::File(Arc::new(
RwLock::new(TokenFile::new(file)?),
))));
}
if let Some(exec) = &auth_info.exec {
let creds = auth_exec(exec)?;
let status = creds.status.ok_or(Error::ExecPluginFailed)?;
let expiration = status
.expiration_timestamp
.map(|ts| ts.parse())
.transpose()
.map_err(Error::MalformedTokenExpirationDate)?;
if let (Some(client_certificate_data), Some(client_key_data)) =
(status.client_certificate_data, status.client_key_data)
{
return Ok(Self::Certificate(
client_certificate_data,
client_key_data.into(),
expiration,
));
}
match (status.token.map(SecretString::from), expiration) {
(Some(token), Some(expire)) => Ok(Self::RefreshableToken(RefreshableToken::Exec(Arc::new(
Mutex::new((token, expire, auth_info.clone())),
)))),
(Some(token), None) => Ok(Self::Bearer(token)),
_ => Ok(Self::None),
}
} else {
Ok(Self::None)
}
}
}
// We need to differentiate providers because the keys/formats to store token expiration differs.
enum ProviderToken {
#[cfg(feature = "oidc")]
Oidc(oidc::Oidc),
#[cfg(not(feature = "oidc"))]
Oidc(String),
// "access-token", "expiry" (RFC3339)
GcpCommand(String, Option<DateTime<Utc>>),
#[cfg(feature = "oauth")]
GcpOauth(oauth::Gcp),
// "access-token", "expires-on" (timestamp)
// Azure(String, Option<DateTime<Utc>>),
}
fn token_from_provider(provider: &AuthProviderConfig) -> Result<ProviderToken, Error> {
match provider.name.as_ref() {
"oidc" => token_from_oidc_provider(provider),
"gcp" => token_from_gcp_provider(provider),
"azure" => Err(Error::AuthExec(
"The azure auth plugin is not supported; use https://github.com/Azure/kubelogin instead".into(),
)),
_ => Err(Error::AuthExec(format!(
"Authentication with provider {:} not supported",
provider.name
))),
}
}
#[cfg(feature = "oidc")]
fn token_from_oidc_provider(provider: &AuthProviderConfig) -> Result<ProviderToken, Error> {
oidc::Oidc::from_config(&provider.config)
.map_err(Error::Oidc)
.map(ProviderToken::Oidc)
}
#[cfg(not(feature = "oidc"))]
fn token_from_oidc_provider(provider: &AuthProviderConfig) -> Result<ProviderToken, Error> {
match provider.config.get("id-token") {
Some(id_token) => Ok(ProviderToken::Oidc(id_token.clone())),
None => Err(Error::AuthExec(
"No id-token for oidc Authentication provider".into(),
)),
}
}
fn token_from_gcp_provider(provider: &AuthProviderConfig) -> Result<ProviderToken, Error> {
if let Some(id_token) = provider.config.get("id-token") {
return Ok(ProviderToken::GcpCommand(id_token.clone(), None));
}
// Return cached access token if it's still valid
if let Some(access_token) = provider.config.get("access-token") {
if let Some(expiry) = provider.config.get("expiry") {
let expiry_date = expiry
.parse::<DateTime<Utc>>()
.map_err(Error::MalformedTokenExpirationDate)?;
if Utc::now() + SIXTY_SEC < expiry_date {
return Ok(ProviderToken::GcpCommand(access_token.clone(), Some(expiry_date)));
}
}
}
// Command-based token source
if let Some(cmd) = provider.config.get("cmd-path") {
let params = provider.config.get("cmd-args").cloned().unwrap_or_default();
// NB: This property does currently not exist upstream in client-go
// See https://github.com/kube-rs/kube/issues/1060
let drop_env = provider.config.get("cmd-drop-env").cloned().unwrap_or_default();
// TODO splitting args by space is not safe
let mut command = Command::new(cmd);
// Do not pass the following env vars to the command
for env in drop_env.trim().split(' ') {
command.env_remove(env);
}
let output = command
.args(params.trim().split(' '))
.output()
.map_err(|e| Error::AuthExec(format!("Executing {cmd:} failed: {e:?}")))?;
if !output.status.success() {
return Err(Error::AuthExecRun {
cmd: format!("{cmd} {params}"),
status: output.status,
out: output,
});
}
if let Some(field) = provider.config.get("token-key") {
let json_output: serde_json::Value =
serde_json::from_slice(&output.stdout).map_err(Error::ParseTokenKey)?;
let token = extract_value(&json_output, "token-key", field)?;
if let Some(field) = provider.config.get("expiry-key") {
let expiry = extract_value(&json_output, "expiry-key", field)?;
let expiry = expiry
.parse::<DateTime<Utc>>()
.map_err(Error::MalformedTokenExpirationDate)?;
return Ok(ProviderToken::GcpCommand(token, Some(expiry)));
} else {
return Ok(ProviderToken::GcpCommand(token, None));
}
} else {
let token = std::str::from_utf8(&output.stdout)
.map_err(|e| Error::AuthExec(format!("Result is not a string {e:?} ")))?
.to_owned();
return Ok(ProviderToken::GcpCommand(token, None));
}
}
// Google Application Credentials-based token source
#[cfg(feature = "oauth")]
{
Ok(ProviderToken::GcpOauth(
oauth::Gcp::default_credentials_with_scopes(provider.config.get("scopes"))
.map_err(Error::OAuth)?,
))
}
#[cfg(not(feature = "oauth"))]
{
Err(Error::AuthExec(
"Enable oauth feature to use Google Application Credentials-based token source".into(),
))
}
}
fn extract_value(json: &serde_json::Value, context: &str, path: &str) -> Result<String, Error> {
let parsed_path = path
.trim_matches(|c| c == '"' || c == '{' || c == '}')
.parse::<JsonPath>()
.map_err(|err| {
Error::AuthExec(format!(
"Failed to parse {context:?} as a JsonPath: {path}\n
Error: {err}"
))
})?;
let res = parsed_path.find_slice(json);
let Some(res) = res.into_iter().next() else {
return Err(Error::AuthExec(format!(
"Target {context:?} value {path:?} not found"
)));
};
let jval = res.to_data();
let val = jval.as_str().ok_or(Error::AuthExec(format!(
"Target {context:?} value {path:?} is not a string"
)))?;
Ok(val.to_string())
}
/// ExecCredentials is used by exec-based plugins to communicate credentials to
/// HTTP transports.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ExecCredential {
pub kind: Option<String>,
#[serde(rename = "apiVersion")]
pub api_version: Option<String>,
pub spec: Option<ExecCredentialSpec>,
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<ExecCredentialStatus>,
}
/// ExecCredenitalSpec holds request and runtime specific information provided
/// by transport.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ExecCredentialSpec {
#[serde(skip_serializing_if = "Option::is_none")]
interactive: Option<bool>,
#[serde(skip_serializing_if = "Option::is_none")]
cluster: Option<ExecAuthCluster>,
}
/// ExecCredentialStatus holds credentials for the transport to use.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ExecCredentialStatus {
#[serde(rename = "expirationTimestamp")]
pub expiration_timestamp: Option<String>,
pub token: Option<String>,
#[serde(rename = "clientCertificateData")]
pub client_certificate_data: Option<String>,
#[serde(rename = "clientKeyData")]
pub client_key_data: Option<String>,
}
fn auth_exec(auth: &ExecConfig) -> Result<ExecCredential, Error> {
let mut cmd = match &auth.command {
Some(cmd) => Command::new(cmd),
None => return Err(Error::MissingCommand),
};
if let Some(args) = &auth.args {
cmd.args(args);
}
if let Some(env) = &auth.env {
let envs = env
.iter()
.flat_map(|env| match (env.get("name"), env.get("value")) {
(Some(name), Some(value)) => Some((name, value)),
_ => None,
});
cmd.envs(envs);
}
let interactive = auth.interactive_mode != Some(ExecInteractiveMode::Never);
if interactive {
cmd.stdin(std::process::Stdio::inherit());
} else {
cmd.stdin(std::process::Stdio::piped());
}
let mut exec_credential_spec = ExecCredentialSpec {
interactive: Some(interactive),
cluster: None,
};
if auth.provide_cluster_info {
exec_credential_spec.cluster = Some(auth.cluster.clone().ok_or(Error::ExecMissingClusterInfo)?);
}
// Provide exec info to child process
let exec_info = serde_json::to_string(&ExecCredential {
api_version: auth.api_version.clone(),
kind: "ExecCredential".to_string().into(),
spec: Some(exec_credential_spec),
status: None,
})
.map_err(Error::AuthExecSerialize)?;
cmd.env("KUBERNETES_EXEC_INFO", exec_info);
if let Some(envs) = &auth.drop_env {
for env in envs {
cmd.env_remove(env);
}
}
#[cfg(target_os = "windows")]
{
const CREATE_NO_WINDOW: u32 = 0x08000000;
cmd.creation_flags(CREATE_NO_WINDOW);
}
let out = cmd.output().map_err(Error::AuthExecStart)?;
if !out.status.success() {
return Err(Error::AuthExecRun {
cmd: format!("{cmd:?}"),
status: out.status,
out,
});
}
let creds = serde_json::from_slice(&out.stdout).map_err(Error::AuthExecParse)?;
Ok(creds)
}
#[cfg(test)]
mod test {
use crate::config::Kubeconfig;
use super::*;
#[tokio::test]
#[ignore = "fails on windows mysteriously"]
async fn exec_auth_command() -> Result<(), Error> {
let expiry = (Utc::now() + SIXTY_SEC).to_rfc3339();
let test_file = format!(
r#"
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: XXXXXXX
server: https://36.XXX.XXX.XX
name: generic-name
contexts:
- context:
cluster: generic-name
user: generic-name
name: generic-name
current-context: generic-name
kind: Config
preferences: {{}}
users:
- name: generic-name
user:
auth-provider:
config:
cmd-args: '{{"something": "else", "credential": {{"access_token": "my_token", "token_expiry": "{expiry}"}}}}'
cmd-path: echo
expiry-key: '{{.credential.token_expiry}}'
token-key: '{{.credential.access_token}}'
name: gcp
"#
);
let config: Kubeconfig = serde_yaml::from_str(&test_file).unwrap();
let auth_info = config.auth_infos[0].auth_info.as_ref().unwrap();
match Auth::try_from(auth_info).unwrap() {
Auth::RefreshableToken(RefreshableToken::Exec(refreshable)) => {
let (token, _expire, info) = Arc::try_unwrap(refreshable).unwrap().into_inner();
assert_eq!(token.expose_secret(), &"my_token".to_owned());
let config = info.auth_provider.unwrap().config;
assert_eq!(config.get("access-token"), Some(&"my_token".to_owned()));
}
_ => unreachable!(),
}
Ok(())
}
#[test]
fn token_file() {
let file = tempfile::NamedTempFile::new().unwrap();
std::fs::write(file.path(), "token1").unwrap();
let mut token_file = TokenFile::new(file.path()).unwrap();
assert_eq!(token_file.cached_token().unwrap(), "token1");
assert!(!token_file.is_expiring());
assert_eq!(token_file.token(), "token1");
// Doesn't reload unless expiring
std::fs::write(file.path(), "token2").unwrap();
assert_eq!(token_file.token(), "token1");
token_file.expires_at = Utc::now();
assert!(token_file.is_expiring());
assert_eq!(token_file.cached_token(), None);
assert_eq!(token_file.token(), "token2");
assert!(!token_file.is_expiring());
assert_eq!(token_file.cached_token().unwrap(), "token2");
}
}

View File

@@ -0,0 +1,171 @@
use http_body_util::BodyExt;
use hyper_util::rt::TokioExecutor;
use tame_oauth::{
gcp::{TokenOrRequest, TokenProvider, TokenProviderWrapper},
Token,
};
use thiserror::Error;
use crate::client::Body;
#[derive(Error, Debug)]
/// Possible errors when requesting token with OAuth
pub enum Error {
/// Default provider appears to be configured, but was invalid
#[error("default provider is configured but invalid: {0}")]
InvalidDefaultProviderConfig(#[source] tame_oauth::Error),
/// No provider was found
#[error("no provider was found")]
NoDefaultProvider,
/// Failed to load OAuth credentials file
#[error("failed to load OAuth credentials file: {0}")]
LoadCredentials(#[source] std::io::Error),
/// Failed to parse OAuth credentials file
#[error("failed to parse OAuth credentials file: {0}")]
ParseCredentials(#[source] serde_json::Error),
/// Credentials file had invalid key format
#[error("credentials file had invalid key format: {0}")]
InvalidKeyFormat(#[source] tame_oauth::Error),
/// Credentials file had invalid RSA key
#[error("credentials file had invalid RSA key: {0}")]
InvalidRsaKey(#[source] tame_oauth::Error),
/// Failed to request token
#[error("failed to request token: {0}")]
RequestToken(#[source] hyper_util::client::legacy::Error),
/// Failed to retrieve new credential
#[error("failed to retrieve new credential {0:?}")]
RetrieveCredentials(#[source] tame_oauth::Error),
/// Failed to parse token
#[error("failed to parse token: {0}")]
ParseToken(#[source] serde_json::Error),
/// Failed to concatenate the buffers from response body
#[error("failed to concatenate the buffers from response body: {0}")]
ConcatBuffers(#[source] hyper::Error),
/// Failed to build a request
#[error("failed to build request: {0}")]
BuildRequest(#[source] http::Error),
/// No valid native root CA certificates found
#[error("No valid native root CA certificates found")]
NoValidNativeRootCA(#[source] std::io::Error),
/// OAuth failed with unknown reason
#[error("unknown OAuth error: {0}")]
Unknown(String),
/// Failed to create OpenSSL HTTPS connector
#[cfg(feature = "openssl-tls")]
#[cfg_attr(docsrs, doc(cfg(feature = "openssl-tls")))]
#[error("failed to create OpenSSL HTTPS connector: {0}")]
CreateOpensslHttpsConnector(#[source] openssl::error::ErrorStack),
}
pub struct Gcp {
provider: TokenProviderWrapper,
scopes: Vec<String>,
}
impl std::fmt::Debug for Gcp {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Gcp")
.field("provider", &"{}".to_owned())
.field("scopes", &self.scopes)
.finish()
}
}
impl Gcp {
// Initialize `TokenProvider` following the "Google Default Credentials" flow.
// `tame-oauth` supports the same default credentials flow as the Go oauth2:
// - `GOOGLE_APPLICATION_CREDENTIALS` environmment variable
// - gcloud's application default credentials
// - local metadata server if running on GCP
pub(crate) fn default_credentials_with_scopes(scopes: Option<&String>) -> Result<Self, Error> {
const DEFAULT_SCOPES: &str =
"https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/userinfo.email";
let provider = TokenProviderWrapper::get_default_provider()
.map_err(Error::InvalidDefaultProviderConfig)?
.ok_or(Error::NoDefaultProvider)?;
let scopes = scopes
.map(String::to_owned)
.unwrap_or_else(|| DEFAULT_SCOPES.to_owned())
.split(',')
.map(str::to_owned)
.collect::<Vec<_>>();
Ok(Self { provider, scopes })
}
pub async fn token(&self) -> Result<Token, Error> {
match self.provider.get_token(&self.scopes) {
Ok(TokenOrRequest::Request {
request, scope_hash, ..
}) => {
#[cfg(not(any(feature = "rustls-tls", feature = "openssl-tls")))]
compile_error!(
"At least one of rustls-tls or openssl-tls feature must be enabled to use oauth feature"
);
// Current TLS feature precedence when more than one are set:
// 1. rustls-tls
// 2. openssl-tls
#[cfg(all(feature = "rustls-tls", not(feature = "webpki-roots")))]
let https = hyper_rustls::HttpsConnectorBuilder::new()
.with_native_roots()
.map_err(Error::NoValidNativeRootCA)?
.https_only()
.enable_http1()
.build();
#[cfg(all(feature = "rustls-tls", feature = "webpki-roots"))]
let https = hyper_rustls::HttpsConnectorBuilder::new()
.with_webpki_roots()
.https_only()
.enable_http1()
.build();
#[cfg(all(not(feature = "rustls-tls"), feature = "openssl-tls"))]
let https =
hyper_openssl::HttpsConnector::new().map_err(Error::CreateOpensslHttpsConnector)?;
let client = hyper_util::client::legacy::Client::builder(TokioExecutor::new()).build(https);
let res = client
.request(request.map(Body::from))
.await
.map_err(Error::RequestToken)?;
// Convert response body to `Vec<u8>` for parsing.
let (parts, body) = res.into_parts();
let bytes = body.collect().await.map_err(Error::ConcatBuffers)?.to_bytes();
let response = http::Response::from_parts(parts, bytes.to_vec());
match self.provider.parse_token_response(scope_hash, response) {
Ok(token) => Ok(token),
Err(err) => Err(match err {
tame_oauth::Error::Auth(_) | tame_oauth::Error::HttpStatus(_) => {
Error::RetrieveCredentials(err)
}
tame_oauth::Error::Json(e) => Error::ParseToken(e),
err => Error::Unknown(err.to_string()),
}),
}
}
Ok(TokenOrRequest::Token(token)) => Ok(token),
Err(err) => match err {
tame_oauth::Error::Http(e) => Err(Error::BuildRequest(e)),
tame_oauth::Error::InvalidRsaKey(_) => Err(Error::InvalidRsaKey(err)),
tame_oauth::Error::InvalidKeyFormat => Err(Error::InvalidKeyFormat(err)),
e => Err(Error::Unknown(e.to_string())),
},
}
}
}

View File

@@ -0,0 +1,564 @@
use std::collections::HashMap;
use super::TEN_SEC;
use chrono::{TimeZone, Utc};
use form_urlencoded::Serializer;
use http::{
header::{HeaderValue, AUTHORIZATION, CONTENT_TYPE},
Method, Request, Uri, Version,
};
use http_body_util::BodyExt;
use hyper_util::{
client::legacy::{connect::HttpConnector, Client},
rt::TokioExecutor,
};
use secrecy::{ExposeSecret, SecretString};
use serde::{Deserialize, Deserializer};
use serde_json::Number;
/// Possible errors when handling OIDC authentication.
pub mod errors {
use super::Oidc;
use http::{uri::InvalidUri, StatusCode};
use thiserror::Error;
/// Possible errors when extracting expiration time from an ID token.
#[derive(Error, Debug)]
pub enum IdTokenError {
/// Failed to extract payload from the ID token.
#[error("not a valid JWT token")]
InvalidFormat,
/// ID token payload is not properly encoded in base64.
#[error("failed to decode base64: {0}")]
InvalidBase64(
#[source]
#[from]
base64::DecodeError,
),
/// ID token payload is not valid JSON object containing expiration timestamp.
#[error("failed to unmarshal JSON: {0}")]
InvalidJson(
#[source]
#[from]
serde_json::Error,
),
/// Expiration timestamp extracted from the ID token payload is not valid.
#[error("invalid expiration timestamp")]
InvalidExpirationTimestamp,
}
/// Possible error when initializing the ID token refreshing.
#[derive(Error, Debug, Clone)]
pub enum RefreshInitError {
/// Missing field in the configuration.
#[error("missing field {0}")]
MissingField(&'static str),
/// Failed to create an HTTPS client.
#[cfg(feature = "openssl-tls")]
#[cfg_attr(docsrs, doc(cfg(feature = "openssl-tls")))]
#[error("failed to create OpenSSL HTTPS connector: {0}")]
CreateOpensslHttpsConnector(
#[source]
#[from]
openssl::error::ErrorStack,
),
/// No valid native root CA certificates found
#[error("No valid native root CA certificates found")]
NoValidNativeRootCA,
}
/// Possible errors when using the refresh token.
#[derive(Error, Debug)]
pub enum RefreshError {
/// Failed to parse the provided issuer URL.
#[error("invalid URI: {0}")]
InvalidURI(
#[source]
#[from]
InvalidUri,
),
/// [`hyper::Error`] occurred during refreshing.
#[error("hyper error: {0}")]
HyperError(
#[source]
#[from]
hyper::Error,
),
/// [`hyper_util::client::legacy::Error`] occurred during refreshing.
#[error("hyper-util error: {0}")]
HyperUtilError(
#[source]
#[from]
hyper_util::client::legacy::Error,
),
/// Failed to parse the metadata received from the provider.
#[error("invalid metadata received from the provider: {0}")]
InvalidMetadata(#[source] serde_json::Error),
/// Received an invalid status code from the provider.
#[error("request failed with status code: {0}")]
RequestFailed(StatusCode),
/// [`http::Error`] occurred during refreshing.
#[error("http error: {0}")]
HttpError(
#[source]
#[from]
http::Error,
),
/// Failed to authorize with the provider.
#[error("failed to authorize with the provider using any of known authorization styles")]
AuthorizationFailure,
/// Failed to parse the token response from the provider.
#[error("invalid token response received from the provider: {0}")]
InvalidTokenResponse(#[source] serde_json::Error),
/// Token response from the provider did not contain an ID token.
#[error("no ID token received from the provider")]
NoIdTokenReceived,
}
/// Possible errors when dealing with OIDC.
#[derive(Error, Debug)]
pub enum Error {
/// Config did not contain the ID token.
#[error("missing field {}", Oidc::CONFIG_ID_TOKEN)]
IdTokenMissing,
/// Failed to retrieve expiration timestamp from the ID token.
#[error("invalid ID token: {0}")]
IdToken(
#[source]
#[from]
IdTokenError,
),
/// Failed to initialize ID token refreshing.
#[error("ID token expired and refreshing is not possible: {0}")]
RefreshInit(
#[source]
#[from]
RefreshInitError,
),
/// Failed to refresh the ID token.
#[error("ID token expired and refreshing failed: {0}")]
Refresh(
#[source]
#[from]
RefreshError,
),
}
}
use base64::Engine as _;
const JWT_BASE64_ENGINE: base64::engine::GeneralPurpose = base64::engine::GeneralPurpose::new(
&base64::alphabet::URL_SAFE,
base64::engine::GeneralPurposeConfig::new()
.with_decode_allow_trailing_bits(true)
.with_decode_padding_mode(base64::engine::DecodePaddingMode::Indifferent),
);
use base64::engine::general_purpose::STANDARD as STANDARD_BASE64_ENGINE;
#[derive(Debug)]
pub struct Oidc {
id_token: SecretString,
refresher: Result<Refresher, errors::RefreshInitError>,
}
impl Oidc {
/// Config key for the ID token.
const CONFIG_ID_TOKEN: &'static str = "id-token";
/// Check whether the stored ID token can still be used.
fn token_valid(&self) -> Result<bool, errors::IdTokenError> {
let part = self
.id_token
.expose_secret()
.split('.')
.nth(1)
.ok_or(errors::IdTokenError::InvalidFormat)?;
let payload = JWT_BASE64_ENGINE.decode(part)?;
let expiry = serde_json::from_slice::<Claims>(&payload)?.expiry;
let timestamp = Utc
.timestamp_opt(expiry, 0)
.earliest()
.ok_or(errors::IdTokenError::InvalidExpirationTimestamp)?;
let valid = Utc::now() + TEN_SEC < timestamp;
Ok(valid)
}
/// Retrieve the ID token. If the stored ID token is or will soon be expired, try refreshing it first.
pub async fn id_token(&mut self) -> Result<String, errors::Error> {
if self.token_valid()? {
return Ok(self.id_token.expose_secret().to_string());
}
let id_token = self.refresher.as_mut().map_err(|e| e.clone())?.id_token().await?;
self.id_token = id_token.clone().into();
Ok(id_token)
}
/// Create an instance of this struct from the auth provider config.
pub fn from_config(config: &HashMap<String, String>) -> Result<Self, errors::Error> {
let id_token = config
.get(Self::CONFIG_ID_TOKEN)
.ok_or(errors::Error::IdTokenMissing)?
.clone()
.into();
let refresher = Refresher::from_config(config);
Ok(Self { id_token, refresher })
}
}
/// Claims extracted from the ID token. Only expiration time here is important.
#[derive(Deserialize)]
struct Claims {
#[serde(rename = "exp", deserialize_with = "deserialize_expiry")]
expiry: i64,
}
/// Deserialize expiration time from a JSON number.
fn deserialize_expiry<'de, D: Deserializer<'de>>(deserializer: D) -> core::result::Result<i64, D::Error> {
let json_number = Number::deserialize(deserializer)?;
json_number
.as_i64()
.or_else(|| Some(json_number.as_f64()? as i64))
.ok_or(serde::de::Error::custom("cannot be casted to i64"))
}
/// Metadata retrieved from the provider. Only token endpoint here is important.
#[derive(Deserialize)]
struct Metadata {
token_endpoint: String,
}
/// Authorization styles used by different providers.
/// Some providers require the authorization info in the header, some in the request body.
/// Some providers reject requests when authorization info is passed in both.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum AuthStyle {
Header,
Params,
}
impl AuthStyle {
/// All known authorization styles.
const ALL: [Self; 2] = [Self::Header, Self::Params];
}
/// Token response from the provider. Only refresh token and id token here are important.
#[derive(Deserialize)]
struct TokenResponse {
refresh_token: Option<String>,
id_token: Option<String>,
}
#[cfg(all(feature = "rustls-tls", not(any(feature = "ring", feature = "aws-lc-rs"))))]
compile_error!("At least one of ring or aws-lc-rs feature must be enabled to use rustls-tls feature");
#[cfg(not(any(feature = "rustls-tls", feature = "openssl-tls")))]
compile_error!(
"At least one of rustls-tls or openssl-tls feature must be enabled to use refresh-oidc feature"
);
// Current TLS feature precedence when more than one are set:
// 1. rustls-tls
// 2. openssl-tls
#[cfg(feature = "rustls-tls")]
type HttpsConnector = hyper_rustls::HttpsConnector<HttpConnector>;
#[cfg(all(not(feature = "rustls-tls"), feature = "openssl-tls"))]
type HttpsConnector = hyper_openssl::HttpsConnector<HttpConnector>;
/// Struct for refreshing the ID token with the refresh token.
#[derive(Debug)]
struct Refresher {
issuer: String,
/// Token endpoint exposed by the provider.
/// Retrieved from the provider metadata with the first refresh request.
token_endpoint: Option<String>,
/// Refresh token used in the refresh requests.
/// Updated when a new refresh token is returned by the provider.
refresh_token: SecretString,
client_id: SecretString,
client_secret: SecretString,
https_client: Client<HttpsConnector, String>,
/// Authorization style used by the provider.
/// Determined with the first refresh request by trying all known styles.
auth_style: Option<AuthStyle>,
}
impl Refresher {
/// Config key for the client ID.
const CONFIG_CLIENT_ID: &'static str = "client-id";
/// Config key for the client secret.
const CONFIG_CLIENT_SECRET: &'static str = "client-secret";
/// Config key for the issuer url.
const CONFIG_ISSUER_URL: &'static str = "idp-issuer-url";
/// Config key for the refresh token.
const CONFIG_REFRESH_TOKEN: &'static str = "refresh-token";
/// Create a new instance of this struct from the provider config.
fn from_config(config: &HashMap<String, String>) -> Result<Self, errors::RefreshInitError> {
let get_field = |name: &'static str| {
config
.get(name)
.cloned()
.ok_or(errors::RefreshInitError::MissingField(name))
};
let issuer = get_field(Self::CONFIG_ISSUER_URL)?;
let refresh_token = get_field(Self::CONFIG_REFRESH_TOKEN)?.into();
let client_id = get_field(Self::CONFIG_CLIENT_ID)?.into();
let client_secret = get_field(Self::CONFIG_CLIENT_SECRET)?.into();
#[cfg(all(feature = "rustls-tls", feature = "aws-lc-rs"))]
{
if rustls::crypto::CryptoProvider::get_default().is_none() {
// the only error here is if it's been initialized in between: we can ignore it
// since our semantic is only to set the default value if it does not exist.
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
}
}
#[cfg(all(feature = "rustls-tls", not(feature = "webpki-roots")))]
let https = hyper_rustls::HttpsConnectorBuilder::new()
.with_native_roots()
.map_err(|_| errors::RefreshInitError::NoValidNativeRootCA)?
.https_only()
.enable_http1()
.build();
#[cfg(all(feature = "rustls-tls", feature = "webpki-roots"))]
let https = hyper_rustls::HttpsConnectorBuilder::new()
.with_webpki_roots()
.https_only()
.enable_http1()
.build();
#[cfg(all(not(feature = "rustls-tls"), feature = "openssl-tls"))]
let https = hyper_openssl::HttpsConnector::new()?;
let https_client = hyper_util::client::legacy::Client::builder(TokioExecutor::new()).build(https);
Ok(Self {
issuer,
token_endpoint: None,
refresh_token,
client_id,
client_secret,
https_client,
auth_style: None,
})
}
/// If the token endpoint is not yet cached in this struct, extract it from the provider metadata and store in the cache.
/// Provider metadata is retrieved from a well-known path.
async fn token_endpoint(&mut self) -> Result<String, errors::RefreshError> {
if let Some(endpoint) = self.token_endpoint.clone() {
return Ok(endpoint);
}
let discovery = format!("{}/.well-known/openid-configuration", self.issuer).parse::<Uri>()?;
let response = self.https_client.get(discovery).await?;
if response.status().is_success() {
let body = response.into_body().collect().await?.to_bytes();
let metadata = serde_json::from_slice::<Metadata>(body.as_ref())
.map_err(errors::RefreshError::InvalidMetadata)?;
self.token_endpoint.replace(metadata.token_endpoint.clone());
Ok(metadata.token_endpoint)
} else {
Err(errors::RefreshError::RequestFailed(response.status()))
}
}
/// Prepare a token request to the provider.
fn token_request(
&self,
endpoint: &str,
auth_style: AuthStyle,
) -> Result<Request<String>, errors::RefreshError> {
let mut builder = Request::builder()
.uri(endpoint)
.method(Method::POST)
.header(
CONTENT_TYPE,
HeaderValue::from_static("application/x-www-form-urlencoded"),
)
.version(Version::HTTP_11);
let mut params = vec![
("grant_type", "refresh_token"),
("refresh_token", self.refresh_token.expose_secret()),
];
match auth_style {
AuthStyle::Header => {
builder = builder.header(
AUTHORIZATION,
format!(
"Basic {}",
STANDARD_BASE64_ENGINE.encode(format!(
"{}:{}",
self.client_id.expose_secret(),
self.client_secret.expose_secret()
))
),
);
}
AuthStyle::Params => {
params.extend([
("client_id", self.client_id.expose_secret()),
("client_secret", self.client_secret.expose_secret()),
]);
}
};
let body = Serializer::new(String::new()).extend_pairs(params).finish();
builder.body(body).map_err(Into::into)
}
/// Fetch a new ID token from the provider.
async fn id_token(&mut self) -> Result<String, errors::RefreshError> {
let token_endpoint = self.token_endpoint().await?;
let response = match self.auth_style {
Some(style) => {
let request = self.token_request(&token_endpoint, style)?;
self.https_client.request(request).await?
}
None => {
let mut ok_response = None;
for style in AuthStyle::ALL {
let request = self.token_request(&token_endpoint, style)?;
let response = self.https_client.request(request).await?;
if response.status().is_success() {
ok_response.replace(response);
self.auth_style.replace(style);
break;
}
}
ok_response.ok_or(errors::RefreshError::AuthorizationFailure)?
}
};
if !response.status().is_success() {
return Err(errors::RefreshError::RequestFailed(response.status()));
}
let body = response.into_body().collect().await?.to_bytes();
let token_response = serde_json::from_slice::<TokenResponse>(body.as_ref())
.map_err(errors::RefreshError::InvalidTokenResponse)?;
if let Some(token) = token_response.refresh_token {
self.refresh_token = token.into();
}
token_response
.id_token
.ok_or(errors::RefreshError::NoIdTokenReceived)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn token_valid() {
let mut oidc = Oidc {
id_token: String::new().into(),
refresher: Err(errors::RefreshInitError::MissingField(
Refresher::CONFIG_REFRESH_TOKEN,
)),
};
// Proper JWT expiring at 2123-06-28T15:18:12.629Z
let token_valid = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9\
.eyJpc3MiOiJPbmxpbmUgSldUIEJ1aWxkZXIiLCJpYXQiOjE2ODc5NjU0NTIsImV4cCI6NDg0MzYzOTA5MiwiYXVkIjoid3d3LmV4YW1wbGUuY29tIiwic3ViIjoianJvY2tldEBleGFtcGxlLmNvbSIsIkVtYWlsIjoiYmVlQGV4YW1wbGUuY29tIn0\
.GKTkPMywcNQv0n01iBfv_A6VuCCCcAe72RhP0OrZsQM";
// Proper JWT expired at 2023-06-28T15:19:53.421Z
let token_expired = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9\
.eyJpc3MiOiJPbmxpbmUgSldUIEJ1aWxkZXIiLCJpYXQiOjE2ODc5NjU0NTIsImV4cCI6MTY4Nzk2NTU5MywiYXVkIjoid3d3LmV4YW1wbGUuY29tIiwic3ViIjoianJvY2tldEBleGFtcGxlLmNvbSIsIkVtYWlsIjoiYmVlQGV4YW1wbGUuY29tIn0\
.zTDnfI_zXIa6yPKY_ZE8r6GoLK7Syj-URcTU5_ryv1M";
oidc.id_token = token_valid.to_string().into();
assert!(oidc.token_valid().expect("proper token failed validation"));
oidc.id_token = token_expired.to_string().into();
assert!(!oidc.token_valid().expect("proper token failed validation"));
let malformed_token = token_expired.split_once('.').unwrap().0.to_string();
oidc.id_token = malformed_token.into();
oidc.token_valid().expect_err("malformed token passed validation");
let invalid_base64_token = token_valid
.split_once('.')
.map(|(prefix, suffix)| format!("{}.?{}", prefix, suffix))
.unwrap();
oidc.id_token = invalid_base64_token.into();
oidc.token_valid()
.expect_err("token with invalid base64 encoding passed validation");
let invalid_claims = [("sub", "jrocket@example.com"), ("aud", "www.example.com")]
.into_iter()
.collect::<HashMap<_, _>>();
let invalid_claims_token = format!(
"{}.{}.{}",
token_valid.split_once('.').unwrap().0,
JWT_BASE64_ENGINE.encode(serde_json::to_string(&invalid_claims).unwrap()),
token_valid.rsplit_once('.').unwrap().1,
);
oidc.id_token = invalid_claims_token.into();
oidc.token_valid()
.expect_err("token without expiration timestamp passed validation");
}
#[cfg(any(feature = "openssl-tls", feature = "rustls-tls"))]
#[test]
fn from_minimal_config() {
let minimal_config = [(Oidc::CONFIG_ID_TOKEN.into(), "some_id_token".into())]
.into_iter()
.collect();
let oidc = Oidc::from_config(&minimal_config)
.expect("failed to create oidc from minimal config (only id-token)");
assert_eq!(oidc.id_token.expose_secret(), "some_id_token");
assert!(oidc.refresher.is_err());
}
#[cfg(any(feature = "openssl-tls", feature = "rustls-tls"))]
#[test]
fn from_full_config() {
let full_config = [
(Oidc::CONFIG_ID_TOKEN.into(), "some_id_token".into()),
(Refresher::CONFIG_ISSUER_URL.into(), "some_issuer".into()),
(
Refresher::CONFIG_REFRESH_TOKEN.into(),
"some_refresh_token".into(),
),
(Refresher::CONFIG_CLIENT_ID.into(), "some_client_id".into()),
(
Refresher::CONFIG_CLIENT_SECRET.into(),
"some_client_secret".into(),
),
]
.into_iter()
.collect();
let oidc = Oidc::from_config(&full_config).expect("failed to create oidc from full config");
assert_eq!(oidc.id_token.expose_secret(), "some_id_token");
let refresher = oidc
.refresher
.as_ref()
.expect("failed to create oidc refresher from full config");
assert_eq!(refresher.issuer, "some_issuer");
assert_eq!(refresher.token_endpoint, None);
assert_eq!(refresher.refresh_token.expose_secret(), "some_refresh_token");
assert_eq!(refresher.client_id.expose_secret(), "some_client_id");
assert_eq!(refresher.client_secret.expose_secret(), "some_client_secret");
assert_eq!(refresher.auth_style, None);
}
}

103
vendor/kube-client/src/client/body.rs vendored Normal file
View File

@@ -0,0 +1,103 @@
use std::{
error::Error as StdError,
fmt,
pin::{pin, Pin},
task::{Context, Poll},
};
use bytes::Bytes;
use http_body::{Body as HttpBody, Frame, SizeHint};
use http_body_util::{combinators::UnsyncBoxBody, BodyExt};
/// A request body.
pub struct Body {
kind: Kind,
}
impl fmt::Debug for Body {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut builder = f.debug_struct("Body");
match self.kind {
Kind::Once(_) => builder.field("kind", &"Once"),
Kind::Wrap(_) => builder.field("kind", &"Wrap"),
};
builder.finish()
}
}
enum Kind {
Once(Option<Bytes>),
Wrap(UnsyncBoxBody<Bytes, Box<dyn StdError + Send + Sync>>),
}
impl Body {
fn new(kind: Kind) -> Self {
Body { kind }
}
/// Create an empty body
pub fn empty() -> Self {
Self::new(Kind::Once(None))
}
// Create a body from an existing body
pub(crate) fn wrap_body<B>(body: B) -> Self
where
B: HttpBody<Data = Bytes> + Send + 'static,
B::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Body::new(Kind::Wrap(body.map_err(Into::into).boxed_unsync()))
}
/// Collect all the data frames and trailers of this request body and return the data frame
pub async fn collect_bytes(self) -> Result<Bytes, crate::Error> {
Ok(self.collect().await?.to_bytes())
}
}
impl From<Bytes> for Body {
fn from(bytes: Bytes) -> Self {
if bytes.is_empty() {
Self::empty()
} else {
Self::new(Kind::Once(Some(bytes)))
}
}
}
impl From<Vec<u8>> for Body {
fn from(vec: Vec<u8>) -> Self {
Self::from(Bytes::from(vec))
}
}
impl HttpBody for Body {
type Data = Bytes;
type Error = crate::Error;
fn poll_frame(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Frame<Self::Data>, Self::Error>>> {
match &mut self.kind {
Kind::Once(val) => Poll::Ready(val.take().map(|bytes| Ok(Frame::data(bytes)))),
Kind::Wrap(body) => pin!(body).poll_frame(cx).map_err(crate::Error::Service),
}
}
fn size_hint(&self) -> SizeHint {
match &self.kind {
Kind::Once(Some(bytes)) => SizeHint::with_exact(bytes.len() as u64),
Kind::Once(None) => SizeHint::with_exact(0),
Kind::Wrap(body) => body.size_hint(),
}
}
fn is_end_stream(&self) -> bool {
match &self.kind {
Kind::Once(Some(bytes)) => bytes.is_empty(),
Kind::Once(None) => true,
Kind::Wrap(body) => body.is_end_stream(),
}
}
}

341
vendor/kube-client/src/client/builder.rs vendored Normal file
View File

@@ -0,0 +1,341 @@
use bytes::Bytes;
use chrono::{DateTime, Utc};
use http::{header::HeaderMap, Request, Response};
use hyper::{
body::Incoming,
rt::{Read, Write},
};
use hyper_timeout::TimeoutConnector;
use hyper_util::{
client::legacy::connect::{Connection, HttpConnector},
rt::TokioExecutor,
};
use std::time::Duration;
use tower::{util::BoxService, BoxError, Layer, Service, ServiceBuilder};
use tower_http::{
classify::ServerErrorsFailureClass, map_response_body::MapResponseBodyLayer, trace::TraceLayer,
};
use tracing::Span;
use super::body::Body;
use crate::{client::ConfigExt, Client, Config, Error, Result};
/// HTTP body of a dynamic backing type.
///
/// The suggested implementation type is [`crate::client::Body`].
pub type DynBody = dyn http_body::Body<Data = Bytes, Error = BoxError> + Send + Unpin;
/// Builder for [`Client`] instances with customized [tower](`Service`) middleware.
pub struct ClientBuilder<Svc> {
service: Svc,
default_ns: String,
valid_until: Option<DateTime<Utc>>,
}
impl<Svc> ClientBuilder<Svc> {
/// Construct a [`ClientBuilder`] from scratch with a fully custom [`Service`] stack.
///
/// This method is only intended for advanced use cases, most users will want to use [`ClientBuilder::try_from`] instead,
/// which provides a default stack as a starting point.
pub fn new(service: Svc, default_namespace: impl Into<String>) -> Self
where
Svc: Service<Request<Body>>,
{
Self {
service,
default_ns: default_namespace.into(),
valid_until: None,
}
}
/// Add a [`Layer`] to the current [`Service`] stack.
pub fn with_layer<L: Layer<Svc>>(self, layer: &L) -> ClientBuilder<L::Service> {
let Self {
service: stack,
default_ns,
valid_until,
} = self;
ClientBuilder {
service: layer.layer(stack),
default_ns,
valid_until,
}
}
/// Sets an expiration timestamp for the client.
pub fn with_valid_until(self, valid_until: Option<DateTime<Utc>>) -> Self {
ClientBuilder {
service: self.service,
default_ns: self.default_ns,
valid_until,
}
}
/// Build a [`Client`] instance with the current [`Service`] stack.
pub fn build<B>(self) -> Client
where
Svc: Service<Request<Body>, Response = Response<B>> + Send + 'static,
Svc::Future: Send + 'static,
Svc::Error: Into<BoxError>,
B: http_body::Body<Data = bytes::Bytes> + Send + 'static,
B::Error: Into<BoxError>,
{
Client::new(self.service, self.default_ns).with_valid_until(self.valid_until)
}
}
pub type GenericService = BoxService<Request<Body>, Response<Box<DynBody>>, BoxError>;
impl TryFrom<Config> for ClientBuilder<GenericService> {
type Error = Error;
/// Builds a default [`ClientBuilder`] stack from a given configuration
fn try_from(config: Config) -> Result<Self> {
let mut connector = HttpConnector::new();
connector.enforce_http(false);
#[cfg(all(feature = "aws-lc-rs", feature = "rustls-tls"))]
{
if rustls::crypto::CryptoProvider::get_default().is_none() {
// the only error here is if it's been initialized in between: we can ignore it
// since our semantic is only to set the default value if it does not exist.
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
}
}
match config.proxy_url.as_ref() {
Some(proxy_url) if proxy_url.scheme_str() == Some("socks5") => {
#[cfg(feature = "socks5")]
{
let connector = hyper_socks2::SocksConnector {
proxy_addr: proxy_url.clone(),
auth: None,
connector,
};
make_generic_builder(connector, config)
}
#[cfg(not(feature = "socks5"))]
Err(Error::ProxyProtocolDisabled {
proxy_url: proxy_url.clone(),
protocol_feature: "kube/socks5",
})
}
Some(proxy_url) if proxy_url.scheme_str() == Some("http") => {
#[cfg(feature = "http-proxy")]
{
let proxy =
hyper_http_proxy::Proxy::new(hyper_http_proxy::Intercept::All, proxy_url.clone());
let connector = hyper_http_proxy::ProxyConnector::from_proxy_unsecured(connector, proxy);
make_generic_builder(connector, config)
}
#[cfg(not(feature = "http-proxy"))]
Err(Error::ProxyProtocolDisabled {
proxy_url: proxy_url.clone(),
protocol_feature: "kube/http-proxy",
})
}
Some(proxy_url) => Err(Error::ProxyProtocolUnsupported {
proxy_url: proxy_url.clone(),
}),
None => make_generic_builder(connector, config),
}
}
}
/// Helper function for implementation of [`TryFrom<Config>`] for [`ClientBuilder`].
/// Ignores [`Config::proxy_url`], which at this point is already handled.
fn make_generic_builder<H>(connector: H, config: Config) -> Result<ClientBuilder<GenericService>, Error>
where
H: 'static + Clone + Send + Sync + Service<http::Uri>,
H::Response: 'static + Connection + Read + Write + Send + Unpin,
H::Future: 'static + Send,
H::Error: 'static + Send + Sync + std::error::Error,
{
let default_ns = config.default_namespace.clone();
let auth_layer = config.auth_layer()?;
let client: hyper_util::client::legacy::Client<_, Body> = {
// Current TLS feature precedence when more than one are set:
// 1. rustls-tls
// 2. openssl-tls
// Create a custom client to use something else.
// If TLS features are not enabled, http connector will be used.
#[cfg(feature = "rustls-tls")]
let connector = config.rustls_https_connector_with_connector(connector)?;
#[cfg(all(not(feature = "rustls-tls"), feature = "openssl-tls"))]
let connector = config.openssl_https_connector_with_connector(connector)?;
#[cfg(all(not(feature = "rustls-tls"), not(feature = "openssl-tls")))]
if config.cluster_url.scheme() == Some(&http::uri::Scheme::HTTPS) {
// no tls stack situation only works with http scheme
return Err(Error::TlsRequired);
}
let mut connector = TimeoutConnector::new(connector);
// Set the timeouts for the client
connector.set_connect_timeout(config.connect_timeout);
connector.set_read_timeout(config.read_timeout);
connector.set_write_timeout(config.write_timeout);
hyper_util::client::legacy::Builder::new(TokioExecutor::new()).build(connector)
};
let stack = ServiceBuilder::new().layer(config.base_uri_layer()).into_inner();
#[cfg(feature = "gzip")]
let stack = ServiceBuilder::new()
.layer(stack)
.layer(
tower_http::decompression::DecompressionLayer::new()
.no_br()
.no_deflate()
.no_zstd()
.gzip(!config.disable_compression),
)
.into_inner();
let service = ServiceBuilder::new()
.layer(stack)
.option_layer(auth_layer)
.layer(config.extra_headers_layer()?)
.layer(
// Attribute names follow [Semantic Conventions].
// [Semantic Conventions]: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md
TraceLayer::new_for_http()
.make_span_with(|req: &Request<Body>| {
tracing::debug_span!(
"HTTP",
http.method = %req.method(),
http.url = %req.uri(),
http.status_code = tracing::field::Empty,
otel.name = req.extensions().get::<&'static str>().unwrap_or(&"HTTP"),
otel.kind = "client",
otel.status_code = tracing::field::Empty,
)
})
.on_request(|_req: &Request<Body>, _span: &Span| {
tracing::debug!("requesting");
})
.on_response(|res: &Response<Incoming>, _latency: Duration, span: &Span| {
let status = res.status();
span.record("http.status_code", status.as_u16());
if status.is_client_error() || status.is_server_error() {
span.record("otel.status_code", "ERROR");
}
})
// Explicitly disable `on_body_chunk`. The default does nothing.
.on_body_chunk(())
.on_eos(|_: Option<&HeaderMap>, _duration: Duration, _span: &Span| {
tracing::debug!("stream closed");
})
.on_failure(|ec: ServerErrorsFailureClass, _latency: Duration, span: &Span| {
// Called when
// - Calling the inner service errored
// - Polling `Body` errored
// - the response was classified as failure (5xx)
// - End of stream was classified as failure
span.record("otel.status_code", "ERROR");
match ec {
ServerErrorsFailureClass::StatusCode(status) => {
span.record("http.status_code", status.as_u16());
tracing::error!("failed with status {}", status)
}
ServerErrorsFailureClass::Error(err) => {
tracing::error!("failed with error {}", err)
}
}
}),
)
.map_err(BoxError::from)
.service(client);
let (_, expiration) = config.exec_identity_pem();
let client = ClientBuilder::new(
BoxService::new(
MapResponseBodyLayer::new(|body| {
Box::new(http_body_util::BodyExt::map_err(body, BoxError::from)) as Box<DynBody>
})
.layer(service),
),
default_ns,
)
.with_valid_until(expiration);
Ok(client)
}
#[cfg(test)]
mod tests {
#[cfg(feature = "gzip")] use super::*;
#[cfg(feature = "gzip")]
#[tokio::test]
async fn test_no_accept_encoding_header_sent_when_compression_disabled(
) -> Result<(), Box<dyn std::error::Error>> {
use http::Uri;
use std::net::SocketAddr;
use tokio::net::{TcpListener, TcpStream};
// setup a server that echoes back any encoding header value
let addr: SocketAddr = ([127, 0, 0, 1], 0).into();
let listener = TcpListener::bind(addr).await?;
let local_addr = listener.local_addr()?;
let uri: Uri = format!("http://{}", local_addr).parse()?;
tokio::spawn(async move {
use http_body_util::Full;
use hyper::{server::conn::http1, service::service_fn};
use hyper_util::rt::{TokioIo, TokioTimer};
use std::convert::Infallible;
loop {
let (tcp, _) = listener.accept().await.unwrap();
let io: TokioIo<TcpStream> = TokioIo::new(tcp);
tokio::spawn(async move {
let _ = http1::Builder::new()
.timer(TokioTimer::new())
.serve_connection(
io,
service_fn(|req| async move {
let response = req
.headers()
.get(http::header::ACCEPT_ENCODING)
.map(|b| Bytes::copy_from_slice(b.as_bytes()))
.unwrap_or_default();
Ok::<_, Infallible>(Response::new(Full::new(response)))
}),
)
.await
.unwrap();
});
}
});
// confirm gzip echoed back with default config
let config = Config { ..Config::new(uri) };
let client = make_generic_builder(HttpConnector::new(), config.clone())?.build();
let response = client.request_text(http::Request::default()).await?;
assert_eq!(&response, "gzip");
// now disable and check empty string echoed back
let config = Config {
disable_compression: true,
..config
};
let client = make_generic_builder(HttpConnector::new(), config)?.build();
let response = client.request_text(http::Request::default()).await?;
assert_eq!(&response, "");
Ok(())
}
}

View File

@@ -0,0 +1,545 @@
use crate::{Client, Error, Result};
use k8s_openapi::{
api::core::v1::{LocalObjectReference, Namespace as k8sNs, ObjectReference},
apimachinery::pkg::apis::meta::v1::OwnerReference,
};
use kube_core::{
object::ObjectList,
params::{GetParams, ListParams},
request::Request,
ApiResource, ClusterResourceScope, DynamicResourceScope, NamespaceResourceScope, Resource,
};
use serde::{de::DeserializeOwned, Serialize};
use std::fmt::Debug;
/// A marker trait to indicate cluster-wide operations are available
trait ClusterScope {}
/// A marker trait to indicate namespace-scoped operations are available
trait NamespaceScope {}
// k8s_openapi scopes get implementations for free
impl ClusterScope for ClusterResourceScope {}
impl NamespaceScope for NamespaceResourceScope {}
// our DynamicResourceScope can masquerade as either
impl NamespaceScope for DynamicResourceScope {}
impl ClusterScope for DynamicResourceScope {}
/// How to get the url for a collection
///
/// Pick one of `kube::client::Cluster` or `kube::client::Namespace`.
pub trait CollectionUrl<K> {
fn url_path(&self) -> String;
}
/// How to get the url for an object
///
/// Pick one of `kube::client::Cluster` or `kube::client::Namespace`.
pub trait ObjectUrl<K> {
fn url_path(&self) -> String;
}
/// Marker type for cluster level queries
#[derive(Debug, Clone)]
pub struct Cluster;
/// Namespace newtype for namespace level queries
///
/// You can create this directly, or convert `From` a `String` / `&str`, or `TryFrom` an `k8s_openapi::api::core::v1::Namespace`
#[derive(Debug, Clone)]
pub struct Namespace(String);
/// Referenced object name resolution
pub trait ObjectRef<K>: ObjectUrl<K> {
fn name(&self) -> Option<&str>;
}
/// Reference resolver for a specified namespace
pub trait NamespacedRef<K> {
/// Resolve reference in the provided namespace
fn within(&self, namespace: impl Into<Option<String>>) -> impl ObjectRef<K>;
}
impl<K> ObjectUrl<K> for ObjectReference
where
K: Resource,
{
fn url_path(&self) -> String {
url_path(
&ApiResource::from_gvk(&self.clone().into()),
self.namespace.clone(),
)
}
}
impl<K> ObjectRef<K> for ObjectReference
where
K: Resource,
{
fn name(&self) -> Option<&str> {
self.name.as_deref()
}
}
impl<K> NamespacedRef<K> for ObjectReference
where
K: Resource,
K::Scope: NamespaceScope,
{
fn within(&self, namespace: impl Into<Option<String>>) -> impl ObjectRef<K> {
Self {
namespace: namespace.into(),
..self.clone()
}
}
}
impl<K> ObjectUrl<K> for OwnerReference
where
K: Resource,
K::Scope: ClusterScope,
{
fn url_path(&self) -> String {
url_path(&ApiResource::from_gvk(&self.clone().into()), None)
}
}
impl<K> ObjectRef<K> for OwnerReference
where
K: Resource,
K::Scope: ClusterScope,
{
fn name(&self) -> Option<&str> {
self.name.as_str().into()
}
}
impl<K> NamespacedRef<K> for OwnerReference
where
K: Resource,
K::Scope: NamespaceScope,
{
fn within(&self, namespace: impl Into<Option<String>>) -> impl ObjectRef<K> {
ObjectReference {
api_version: self.api_version.clone().into(),
namespace: namespace.into(),
name: self.name.clone().into(),
uid: self.uid.clone().into(),
kind: self.kind.clone().into(),
..Default::default()
}
}
}
impl<K> NamespacedRef<K> for LocalObjectReference
where
K: Resource,
K::DynamicType: Default,
K::Scope: NamespaceScope,
{
fn within(&self, namespace: impl Into<Option<String>>) -> impl ObjectRef<K> {
let dt = Default::default();
ObjectReference {
api_version: K::api_version(&dt).to_string().into(),
namespace: namespace.into(),
name: Some(self.name.clone()),
kind: K::kind(&dt).to_string().into(),
..Default::default()
}
}
}
/// Scopes for `unstable-client` [`Client#impl-Client`] extension methods
pub mod scope {
pub use super::{Cluster, Namespace, NamespacedRef};
}
// All objects can be listed cluster-wide
impl<K> CollectionUrl<K> for Cluster
where
K: Resource,
K::DynamicType: Default,
{
fn url_path(&self) -> String {
K::url_path(&K::DynamicType::default(), None)
}
}
// Only cluster-scoped objects can be named globally
impl<K> ObjectUrl<K> for Cluster
where
K: Resource,
K::DynamicType: Default,
K::Scope: ClusterScope,
{
fn url_path(&self) -> String {
K::url_path(&K::DynamicType::default(), None)
}
}
// Only namespaced objects can be accessed via namespace
impl<K> CollectionUrl<K> for Namespace
where
K: Resource,
K::DynamicType: Default,
K::Scope: NamespaceScope,
{
fn url_path(&self) -> String {
K::url_path(&K::DynamicType::default(), Some(&self.0))
}
}
impl<K> ObjectUrl<K> for Namespace
where
K: Resource,
K::DynamicType: Default,
K::Scope: NamespaceScope,
{
fn url_path(&self) -> String {
K::url_path(&K::DynamicType::default(), Some(&self.0))
}
}
// can be created from a complete native object
impl TryFrom<&k8sNs> for Namespace {
type Error = NamespaceError;
fn try_from(ns: &k8sNs) -> Result<Namespace, Self::Error> {
if let Some(n) = &ns.meta().name {
Ok(Namespace(n.to_owned()))
} else {
Err(NamespaceError::MissingName)
}
}
}
// and from literals + owned strings
impl From<&str> for Namespace {
fn from(ns: &str) -> Namespace {
Namespace(ns.to_owned())
}
}
impl From<String> for Namespace {
fn from(ns: String) -> Namespace {
Namespace(ns)
}
}
#[derive(thiserror::Error, Debug)]
/// Failures to infer a namespace
pub enum NamespaceError {
/// MissingName
#[error("Missing Namespace Name")]
MissingName,
}
/// Generic client extensions for the `unstable-client` feature
///
/// These methods allow users to query across a wide-array of resources without needing
/// to explicitly create an [`Api`](crate::Api) for each one of them.
///
/// ## Usage
/// 1. Create a [`Client`]
/// 2. Specify the [`scope`] you are querying at via [`Cluster`] or [`Namespace`] as args
/// 3. Specify the resource type you are using for serialization (e.g. a top level k8s-openapi type)
///
/// ## Example
///
/// ```no_run
/// # use k8s_openapi::api::core::v1::{Pod, Service};
/// # use kube::client::scope::{Namespace, Cluster};
/// # use kube::prelude::*;
/// # use kube::api::ListParams;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let lp = ListParams::default();
/// // List at Cluster level for Pod resource:
/// for pod in client.list::<Pod>(&lp, &Cluster).await? {
/// println!("Found pod {} in {}", pod.name_any(), pod.namespace().unwrap());
/// }
/// // Namespaced Get for Service resource:
/// let svc = client.get::<Service>("kubernetes", &Namespace::from("default")).await?;
/// assert_eq!(svc.name_unchecked(), "kubernetes");
/// # Ok(())
/// # }
/// ```
impl Client {
/// Get a single instance of a `Resource` implementing type `K` at the specified scope.
///
/// ```no_run
/// # use k8s_openapi::api::rbac::v1::ClusterRole;
/// # use k8s_openapi::api::core::v1::Service;
/// # use kube::client::scope::{Namespace, Cluster};
/// # use kube::prelude::*;
/// # use kube::api::GetParams;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let cr = client.get::<ClusterRole>("cluster-admin", &Cluster).await?;
/// assert_eq!(cr.name_unchecked(), "cluster-admin");
/// let svc = client.get::<Service>("kubernetes", &Namespace::from("default")).await?;
/// assert_eq!(svc.name_unchecked(), "kubernetes");
/// # Ok(())
/// # }
/// ```
pub async fn get<K>(&self, name: &str, scope: &impl ObjectUrl<K>) -> Result<K>
where
K: Resource + Serialize + DeserializeOwned + Clone + Debug,
<K as Resource>::DynamicType: Default,
{
let mut req = Request::new(scope.url_path())
.get(name, &GetParams::default())
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("get");
self.request::<K>(req).await
}
/// Fetch a single instance of a `Resource` from a provided object reference.
///
/// ```no_run
/// # use k8s_openapi::api::rbac::v1::ClusterRole;
/// # use k8s_openapi::apimachinery::pkg::apis::meta::v1::OwnerReference;
/// # use k8s_openapi::api::core::v1::{ObjectReference, LocalObjectReference};
/// # use k8s_openapi::api::core::v1::{Node, Pod, Service, Secret};
/// # use kube::client::scope::NamespacedRef;
/// # use kube::api::GetParams;
/// # use kube::prelude::*;
/// # use kube::api::DynamicObject;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// // cluster scoped
/// let cr: ClusterRole = todo!();
/// let cr: ClusterRole = client.fetch(&cr.object_ref(&())).await?;
/// assert_eq!(cr.name_unchecked(), "cluster-admin");
/// // namespace scoped
/// let svc: Service = todo!();
/// let svc: Service = client.fetch(&svc.object_ref(&())).await?;
/// assert_eq!(svc.name_unchecked(), "kubernetes");
/// // Fetch an owner of the resource
/// let pod: Pod = todo!();
/// let owner = pod
/// .owner_references()
/// .to_vec()
/// .into_iter()
/// .find(|r| r.kind == Node::kind(&()))
/// .ok_or("Not Found")?;
/// let node: Node = client.fetch(&owner).await?;
/// // Namespace scoped objects require namespace
/// let pod: Pod = client.fetch(&owner.within("ns".to_string())).await?;
/// // Fetch dynamic object to resolve type later
/// let dynamic: DynamicObject = client.fetch(&owner.within("ns".to_string())).await?;
/// // Fetch using local object reference
/// let secret_ref = pod
/// .spec
/// .unwrap_or_default()
/// .image_pull_secrets
/// .unwrap_or_default()
/// .get(0)
/// .unwrap_or(&LocalObjectReference{name: "pull_secret".into()});
/// let secret: Secret = client.fetch(&secret_ref.within(pod.namespace())).await?;
/// # Ok(())
/// # }
/// ```
pub async fn fetch<K>(&self, reference: &impl ObjectRef<K>) -> Result<K>
where
K: Resource + Serialize + DeserializeOwned + Clone + Debug,
{
let mut req = Request::new(reference.url_path())
.get(
reference
.name()
.ok_or(Error::RefResolve("Reference is empty".to_string()))?,
&GetParams::default(),
)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("get");
self.request::<K>(req).await
}
/// List instances of a `Resource` implementing type `K` at the specified scope.
///
/// ```no_run
/// # use k8s_openapi::api::core::v1::Pod;
/// # use k8s_openapi::api::core::v1::Service;
/// # use kube::client::scope::{Namespace, Cluster};
/// # use kube::prelude::*;
/// # use kube::api::ListParams;
/// # async fn wrapper() -> Result<(), Box<dyn std::error::Error>> {
/// # let client: kube::Client = todo!();
/// let lp = ListParams::default();
/// for pod in client.list::<Pod>(&lp, &Cluster).await? {
/// println!("Found pod {} in {}", pod.name_any(), pod.namespace().unwrap());
/// }
/// for svc in client.list::<Service>(&lp, &Namespace::from("default")).await? {
/// println!("Found service {}", svc.name_any());
/// }
/// # Ok(())
/// # }
/// ```
pub async fn list<K>(&self, lp: &ListParams, scope: &impl CollectionUrl<K>) -> Result<ObjectList<K>>
where
K: Resource + Serialize + DeserializeOwned + Clone + Debug,
<K as Resource>::DynamicType: Default,
{
let mut req = Request::new(scope.url_path())
.list(lp)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("list");
self.request::<ObjectList<K>>(req).await
}
}
// Resource url_path resolver
fn url_path(r: &ApiResource, namespace: Option<String>) -> String {
let n = if let Some(ns) = namespace {
format!("namespaces/{ns}/")
} else {
"".into()
};
format!(
"/{group}/{api_version}/{namespaces}{plural}",
group = if r.group.is_empty() { "api" } else { "apis" },
api_version = r.api_version,
namespaces = n,
plural = r.plural
)
}
#[cfg(test)]
mod test {
use crate::{
client::{
client_ext::NamespacedRef as _,
scope::{Cluster, Namespace},
},
Client,
};
use super::ListParams;
use k8s_openapi::api::core::v1::LocalObjectReference;
use kube_core::{DynamicObject, Resource as _, ResourceExt as _};
#[tokio::test]
#[ignore = "needs cluster (will list/get namespaces, pods, jobs, svcs, clusterroles)"]
async fn client_ext_list_get_pods_svcs() -> Result<(), Box<dyn std::error::Error>> {
use k8s_openapi::api::{
batch::v1::Job,
core::v1::{Namespace as k8sNs, Pod, Service},
rbac::v1::ClusterRole,
};
let client = Client::try_default().await?;
let lp = ListParams::default();
// cluster-scoped list
for ns in client.list::<k8sNs>(&lp, &Cluster).await? {
// namespaced list
for p in client.list::<Pod>(&lp, &Namespace::try_from(&ns)?).await? {
println!("Found pod {} in {}", p.name_any(), ns.name_any());
}
}
// across-namespace list
for j in client.list::<Job>(&lp, &Cluster).await? {
println!("Found job {} in {}", j.name_any(), j.namespace().unwrap());
}
// namespaced get
let default: Namespace = "default".into();
let svc = client.get::<Service>("kubernetes", &default).await?;
assert_eq!(svc.name_unchecked(), "kubernetes");
// global get
let ca = client.get::<ClusterRole>("cluster-admin", &Cluster).await?;
assert_eq!(ca.name_unchecked(), "cluster-admin");
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (will get svcs, clusterroles, pods, nodes)"]
async fn client_ext_fetch_ref_pods_svcs() -> Result<(), Box<dyn std::error::Error>> {
use k8s_openapi::api::{
core::v1::{Node, ObjectReference, Pod, Service},
rbac::v1::ClusterRole,
};
let client = Client::try_default().await?;
// namespaced fetch
let svc: Service = client
.fetch(&ObjectReference {
kind: Some(Service::kind(&()).into()),
api_version: Some(Service::api_version(&()).into()),
name: Some("kubernetes".into()),
namespace: Some("default".into()),
..Default::default()
})
.await?;
assert_eq!(svc.name_unchecked(), "kubernetes");
// global fetch
let ca: ClusterRole = client
.fetch(&ObjectReference {
kind: Some(ClusterRole::kind(&()).into()),
api_version: Some(ClusterRole::api_version(&()).into()),
name: Some("cluster-admin".into()),
..Default::default()
})
.await?;
assert_eq!(ca.name_unchecked(), "cluster-admin");
// namespaced fetch untyped
let svc: DynamicObject = client.fetch(&svc.object_ref(&())).await?;
assert_eq!(svc.name_unchecked(), "kubernetes");
// global fetch untyped
let ca: DynamicObject = client.fetch(&ca.object_ref(&())).await?;
assert_eq!(ca.name_unchecked(), "cluster-admin");
// Fetch using local object reference
let svc: Service = client
.fetch(
&LocalObjectReference {
name: svc.name_any().into(),
}
.within(svc.namespace()),
)
.await?;
assert_eq!(svc.name_unchecked(), "kubernetes");
let kube_system: Namespace = "kube-system".into();
for pod in client
.list::<Pod>(
&ListParams::default().labels("component=kube-apiserver"),
&kube_system,
)
.await?
{
let owner = pod
.owner_references()
.iter()
.find(|r| r.kind == Node::kind(&()))
.ok_or("Not found")?;
let _: Node = client.fetch(owner).await?;
}
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (will get svcs, clusterroles, pods, nodes)"]
async fn fetch_fails() -> Result<(), Box<dyn std::error::Error>> {
use crate::error::Error;
use k8s_openapi::api::core::v1::{ObjectReference, Pod, Service};
let client = Client::try_default().await?;
// namespaced fetch
let svc: Service = client
.fetch(&ObjectReference {
kind: Some(Service::kind(&()).into()),
api_version: Some(Service::api_version(&()).into()),
name: Some("kubernetes".into()),
namespace: Some("default".into()),
..Default::default()
})
.await?;
let err = client.fetch::<Pod>(&svc.object_ref(&())).await.unwrap_err();
assert!(matches!(err, Error::SerdeError(_)));
assert_eq!(err.to_string(), "Error deserializing response: invalid value: string \"Service\", expected Pod at line 1 column 17".to_string());
let obj: DynamicObject = client.fetch(&svc.object_ref(&())).await?;
let err = obj.try_parse::<Pod>().unwrap_err();
assert_eq!(err.to_string(), "failed to parse this DynamicObject into a Resource: invalid value: string \"Service\", expected Pod".to_string());
Ok(())
}
}

View File

@@ -0,0 +1,313 @@
use std::sync::Arc;
use chrono::{DateTime, Utc};
use http::{header::HeaderName, HeaderValue};
#[cfg(feature = "openssl-tls")] use hyper::rt::{Read, Write};
use hyper_util::client::legacy::connect::HttpConnector;
use secrecy::ExposeSecret;
use tower::{filter::AsyncFilterLayer, util::Either};
#[cfg(any(feature = "rustls-tls", feature = "openssl-tls"))] use super::tls;
use super::{
auth::Auth,
middleware::{AddAuthorizationLayer, AuthLayer, BaseUriLayer, ExtraHeadersLayer},
};
use crate::{Config, Error, Result};
/// Extensions to [`Config`](crate::Config) for custom [`Client`](crate::Client).
///
/// See [`Client::new`](crate::Client::new) for an example.
///
/// This trait is sealed and cannot be implemented.
pub trait ConfigExt: private::Sealed {
/// Layer to set the base URI of requests to the configured server.
fn base_uri_layer(&self) -> BaseUriLayer;
/// Optional layer to set up `Authorization` header depending on the config.
fn auth_layer(&self) -> Result<Option<AuthLayer>>;
/// Layer to add non-authn HTTP headers depending on the config.
fn extra_headers_layer(&self) -> Result<ExtraHeadersLayer>;
/// Create [`hyper_rustls::HttpsConnector`] based on config.
///
/// # Example
///
/// ```rust
/// # async fn doc() -> Result<(), Box<dyn std::error::Error>> {
/// # use kube::{client::{Body, ConfigExt}, Config};
/// # use hyper_util::rt::TokioExecutor;
/// let config = Config::infer().await?;
/// let https = config.rustls_https_connector()?;
/// let hyper_client: hyper_util::client::legacy::Client<_, Body> = hyper_util::client::legacy::Client::builder(TokioExecutor::new()).build(https);
/// # Ok(())
/// # }
/// ```
#[cfg_attr(docsrs, doc(cfg(feature = "rustls-tls")))]
#[cfg(feature = "rustls-tls")]
fn rustls_https_connector(&self) -> Result<hyper_rustls::HttpsConnector<HttpConnector>>;
/// Create [`hyper_rustls::HttpsConnector`] based on config and `connector`.
///
/// # Example
///
/// ```rust
/// # async fn doc() -> Result<(), Box<dyn std::error::Error>> {
/// # use kube::{client::{Body, ConfigExt}, Config};
/// # use hyper_util::{client::legacy::connect::HttpConnector, rt::TokioExecutor};
/// let config = Config::infer().await?;
/// let mut connector = HttpConnector::new();
/// connector.enforce_http(false);
/// let https = config.rustls_https_connector_with_connector(connector)?;
/// let hyper_client: hyper_util::client::legacy::Client<_, Body> = hyper_util::client::legacy::Client::builder(TokioExecutor::new()).build(https);
/// # Ok(())
/// # }
/// ```
#[cfg_attr(docsrs, doc(cfg(feature = "rustls-tls")))]
#[cfg(feature = "rustls-tls")]
fn rustls_https_connector_with_connector<H>(
&self,
connector: H,
) -> Result<hyper_rustls::HttpsConnector<H>>;
/// Create [`rustls::ClientConfig`] based on config.
/// # Example
///
/// ```rust
/// # async fn doc() -> Result<(), Box<dyn std::error::Error>> {
/// # use hyper_util::client::legacy::connect::HttpConnector;
/// # use kube::{client::ConfigExt, Config};
/// let config = Config::infer().await?;
/// let https = {
/// let rustls_config = std::sync::Arc::new(config.rustls_client_config()?);
/// let mut http = HttpConnector::new();
/// http.enforce_http(false);
/// hyper_rustls::HttpsConnector::from((http, rustls_config))
/// };
/// # Ok(())
/// # }
/// ```
#[cfg_attr(docsrs, doc(cfg(feature = "rustls-tls")))]
#[cfg(feature = "rustls-tls")]
fn rustls_client_config(&self) -> Result<rustls::ClientConfig>;
/// Create [`hyper_openssl::HttpsConnector`] based on config.
/// # Example
///
/// ```rust
/// # async fn doc() -> Result<(), Box<dyn std::error::Error>> {
/// # use kube::{client::ConfigExt, Config};
/// let config = Config::infer().await?;
/// let https = config.openssl_https_connector()?;
/// # Ok(())
/// # }
/// ```
#[cfg_attr(docsrs, doc(cfg(feature = "openssl-tls")))]
#[cfg(feature = "openssl-tls")]
fn openssl_https_connector(&self)
-> Result<hyper_openssl::client::legacy::HttpsConnector<HttpConnector>>;
/// Create [`hyper_openssl::HttpsConnector`] based on config and `connector`.
/// # Example
///
/// ```rust
/// # async fn doc() -> Result<(), Box<dyn std::error::Error>> {
/// # use hyper_util::client::legacy::connect::HttpConnector;
/// # use kube::{client::ConfigExt, Config};
/// let mut http = HttpConnector::new();
/// http.enforce_http(false);
/// let config = Config::infer().await?;
/// let https = config.openssl_https_connector_with_connector(http)?;
/// # Ok(())
/// # }
/// ```
#[cfg_attr(docsrs, doc(cfg(feature = "openssl-tls")))]
#[cfg(feature = "openssl-tls")]
fn openssl_https_connector_with_connector<H>(
&self,
connector: H,
) -> Result<hyper_openssl::client::legacy::HttpsConnector<H>>
where
H: tower::Service<http::Uri> + Send,
H::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
H::Future: Send + 'static,
H::Response: Read + Write + hyper_util::client::legacy::connect::Connection + Unpin;
/// Create [`openssl::ssl::SslConnectorBuilder`] based on config.
/// # Example
///
/// ```rust
/// # async fn doc() -> Result<(), Box<dyn std::error::Error>> {
/// # use hyper_util::client::legacy::connect::HttpConnector;
/// # use kube::{client::ConfigExt, Client, Config};
/// let config = Config::infer().await?;
/// let https = {
/// let mut http = HttpConnector::new();
/// http.enforce_http(false);
/// let ssl = config.openssl_ssl_connector_builder()?;
/// hyper_openssl::client::legacy::HttpsConnector::with_connector(http, ssl)?
/// };
/// # Ok(())
/// # }
/// ```
#[cfg_attr(docsrs, doc(cfg(feature = "openssl-tls")))]
#[cfg(feature = "openssl-tls")]
fn openssl_ssl_connector_builder(&self) -> Result<openssl::ssl::SslConnectorBuilder>;
}
mod private {
pub trait Sealed {}
impl Sealed for super::Config {}
}
impl ConfigExt for Config {
fn base_uri_layer(&self) -> BaseUriLayer {
BaseUriLayer::new(self.cluster_url.clone())
}
fn auth_layer(&self) -> Result<Option<AuthLayer>> {
Ok(match Auth::try_from(&self.auth_info).map_err(Error::Auth)? {
Auth::None => None,
Auth::Basic(user, pass) => Some(AuthLayer(Either::Left(
AddAuthorizationLayer::basic(&user, pass.expose_secret()).as_sensitive(true),
))),
Auth::Bearer(token) => Some(AuthLayer(Either::Left(
AddAuthorizationLayer::bearer(token.expose_secret()).as_sensitive(true),
))),
Auth::RefreshableToken(refreshable) => {
Some(AuthLayer(Either::Right(AsyncFilterLayer::new(refreshable))))
}
Auth::Certificate(_client_certificate_data, _client_key_data, _) => None,
})
}
fn extra_headers_layer(&self) -> Result<ExtraHeadersLayer> {
let mut headers = self.headers.clone();
if let Some(impersonate_user) = &self.auth_info.impersonate {
headers.push((
HeaderName::from_static("impersonate-user"),
HeaderValue::from_str(impersonate_user)
.map_err(http::Error::from)
.map_err(Error::HttpError)?,
));
}
if let Some(impersonate_groups) = &self.auth_info.impersonate_groups {
for group in impersonate_groups {
headers.push((
HeaderName::from_static("impersonate-group"),
HeaderValue::from_str(group)
.map_err(http::Error::from)
.map_err(Error::HttpError)?,
));
}
}
Ok(ExtraHeadersLayer {
headers: Arc::new(headers),
})
}
#[cfg(feature = "rustls-tls")]
fn rustls_client_config(&self) -> Result<rustls::ClientConfig> {
let identity = self.exec_identity_pem().0.or_else(|| self.identity_pem());
tls::rustls_tls::rustls_client_config(
identity.as_deref(),
self.root_cert.as_deref(),
self.accept_invalid_certs,
)
.map_err(Error::RustlsTls)
}
#[cfg(feature = "rustls-tls")]
fn rustls_https_connector(&self) -> Result<hyper_rustls::HttpsConnector<HttpConnector>> {
let mut connector = HttpConnector::new();
connector.enforce_http(false);
self.rustls_https_connector_with_connector(connector)
}
#[cfg(feature = "rustls-tls")]
fn rustls_https_connector_with_connector<H>(
&self,
connector: H,
) -> Result<hyper_rustls::HttpsConnector<H>> {
use hyper_rustls::FixedServerNameResolver;
use crate::client::tls::rustls_tls;
let rustls_config = self.rustls_client_config()?;
let mut builder = hyper_rustls::HttpsConnectorBuilder::new()
.with_tls_config(rustls_config)
.https_or_http();
if let Some(tsn) = self.tls_server_name.as_ref() {
builder = builder.with_server_name_resolver(FixedServerNameResolver::new(
tsn.clone()
.try_into()
.map_err(rustls_tls::Error::InvalidServerName)
.map_err(Error::RustlsTls)?,
));
}
Ok(builder.enable_http1().wrap_connector(connector))
}
#[cfg(feature = "openssl-tls")]
fn openssl_ssl_connector_builder(&self) -> Result<openssl::ssl::SslConnectorBuilder> {
let identity = self.exec_identity_pem().0.or_else(|| self.identity_pem());
// TODO: pass self.tls_server_name for openssl
tls::openssl_tls::ssl_connector_builder(identity.as_ref(), self.root_cert.as_ref())
.map_err(|e| Error::OpensslTls(tls::openssl_tls::Error::CreateSslConnector(e)))
}
#[cfg(feature = "openssl-tls")]
fn openssl_https_connector(
&self,
) -> Result<hyper_openssl::client::legacy::HttpsConnector<HttpConnector>> {
let mut connector = HttpConnector::new();
connector.enforce_http(false);
self.openssl_https_connector_with_connector(connector)
}
#[cfg(feature = "openssl-tls")]
fn openssl_https_connector_with_connector<H>(
&self,
connector: H,
) -> Result<hyper_openssl::client::legacy::HttpsConnector<H>>
where
H: tower::Service<http::Uri> + Send,
H::Error: Into<Box<dyn std::error::Error + Send + Sync>>,
H::Future: Send + 'static,
H::Response: Read + Write + hyper_util::client::legacy::connect::Connection + Unpin,
{
let mut https = hyper_openssl::client::legacy::HttpsConnector::with_connector(
connector,
self.openssl_ssl_connector_builder()?,
)
.map_err(|e| Error::OpensslTls(tls::openssl_tls::Error::CreateHttpsConnector(e)))?;
if self.accept_invalid_certs {
https.set_callback(|ssl, _uri| {
ssl.set_verify(openssl::ssl::SslVerifyMode::NONE);
Ok(())
});
}
Ok(https)
}
}
impl Config {
// This is necessary to retrieve an identity when an exec plugin
// returns a client certificate and key instead of a token.
// This has be to be checked on TLS configuration vs tokens
// which can be added in as an AuthLayer.
pub(crate) fn exec_identity_pem(&self) -> (Option<Vec<u8>>, Option<DateTime<Utc>>) {
match Auth::try_from(&self.auth_info) {
Ok(Auth::Certificate(client_certificate_data, client_key_data, expiratiom)) => {
const NEW_LINE: u8 = b'\n';
let mut buffer = client_key_data.expose_secret().as_bytes().to_vec();
buffer.push(NEW_LINE);
buffer.extend_from_slice(client_certificate_data.as_bytes());
buffer.push(NEW_LINE);
(Some(buffer), expiratiom)
}
_ => (None, None),
}
}
}

View File

@@ -0,0 +1,91 @@
use crate::{
api::{AttachParams, AttachedProcess, LogParams, Portforwarder},
client::AsyncBufRead,
Client, Error, Result,
};
use kube_core::{kubelet_debug::KubeletDebugParams, Request};
use std::fmt::Debug;
/// Methods to access debug endpoints directly on `kubelet`
///
/// These provide analogous methods to the `Pod` api methods for [`Execute`](crate::api::Exec), [`Attach`](crate::api::Attach), and [`Portforward`](crate::api::Portforward).
/// Service account must have `nodes/proxy` access, and
/// the debug handlers must be enabled either via `--enable-debugging-handlers ` or in the [kubelet config](https://kubernetes.io/docs/reference/config-api/kubelet-config.v1beta1/#kubelet-config-k8s-io-v1beta1-KubeletConfiguration).
/// See the [kubelet source](https://github.com/kubernetes/kubernetes/blob/b3926d137cd2964cd3a04088ded30845910547b1/pkg/kubelet/server/server.go#L454), and [kubelet reference](https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/) for more info.
///
/// ## Warning
/// These methods require direct, and **insecure access** to `kubelet` and is only available under the `kubelet_debug` feature.
/// End-to-end usage is explored in the [pod_log_kubelet_debug](https://github.com/kube-rs/kube/blob/main/examples/pod_log_kubelet_debug.rs) example.
#[cfg(feature = "kubelet-debug")]
impl Client {
/// Attach to pod directly from the node
///
/// ## Warning
/// This method uses the insecure `kubelet_debug` interface. See [`Api::attach`](crate::Api::attach) for the normal interface.
pub async fn kubelet_node_attach(
&self,
kubelet_params: &KubeletDebugParams<'_>,
container: &str,
ap: &AttachParams,
) -> Result<AttachedProcess> {
let mut req =
Request::kubelet_node_attach(kubelet_params, container, ap).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("kubelet_node_attach");
let stream = self.connect(req).await?;
Ok(AttachedProcess::new(stream, ap))
}
/// Execute a command in a pod directly from the node
///
/// ## Warning
/// This method uses the insecure `kubelet_debug` interface. See [`Api::exec`](crate::Api::exec) for the normal interface.
pub async fn kubelet_node_exec<I, T>(
&self,
kubelet_params: &KubeletDebugParams<'_>,
container: &str,
command: I,
ap: &AttachParams,
) -> Result<AttachedProcess>
where
I: IntoIterator<Item = T> + Debug,
T: Into<String>,
{
let mut req = Request::kubelet_node_exec(kubelet_params, container, command, ap)
.map_err(Error::BuildRequest)?;
req.extensions_mut().insert("kubelet_node_exec");
let stream = self.connect(req).await?;
Ok(AttachedProcess::new(stream, ap))
}
/// Forward ports of a pod directly from the node
///
/// ## Warning
/// This method uses the insecure `kubelet_debug` interface. See [`Api::portforward`](crate::Api::portforward) for the normal interface.
pub async fn kubelet_node_portforward(
&self,
kubelet_params: &KubeletDebugParams<'_>,
ports: &[u16],
) -> Result<Portforwarder> {
let mut req =
Request::kubelet_node_portforward(kubelet_params, ports).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("kubelet_node_portforward");
let connection = self.connect(req).await?;
Ok(Portforwarder::new(connection.into_stream(), ports))
}
/// Stream logs directly from node
///
/// ## Warning
/// This method uses the insecure `kubelet_debug` interface. See [`Api::log_stream`](crate::Api::log_stream) for the normal interface.
pub async fn kubelet_node_logs(
&self,
kubelet_params: &KubeletDebugParams<'_>,
container: &str,
lp: &LogParams,
) -> Result<impl AsyncBufRead> {
let mut req =
Request::kubelet_node_logs(kubelet_params, container, lp).map_err(Error::BuildRequest)?;
req.extensions_mut().insert("kubelet_node_log");
self.request_stream(req).await
}
}

View File

@@ -0,0 +1,109 @@
//! Set base URI of requests.
use http::{uri, Request};
use tower::{Layer, Service};
/// Layer that applies [`BaseUri`] which makes all requests relative to the URI.
///
/// Path in the base URI is preseved.
#[derive(Debug, Clone)]
pub struct BaseUriLayer {
base_uri: http::Uri,
}
impl BaseUriLayer {
/// Set base URI of requests.
pub fn new(base_uri: http::Uri) -> Self {
Self { base_uri }
}
}
impl<S> Layer<S> for BaseUriLayer {
type Service = BaseUri<S>;
fn layer(&self, inner: S) -> Self::Service {
BaseUri {
base_uri: self.base_uri.clone(),
inner,
}
}
}
/// Middleware that sets base URI so that all requests are relative to it.
#[derive(Debug, Clone)]
pub struct BaseUri<S> {
base_uri: http::Uri,
inner: S,
}
impl<S, ReqBody> Service<Request<ReqBody>> for BaseUri<S>
where
S: Service<Request<ReqBody>>,
{
type Error = S::Error;
type Future = S::Future;
type Response = S::Response;
fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> std::task::Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx)
}
fn call(&mut self, req: Request<ReqBody>) -> Self::Future {
let (mut parts, body) = req.into_parts();
let req_pandq = parts.uri.path_and_query();
parts.uri = set_base_uri(&self.base_uri, req_pandq);
self.inner.call(Request::from_parts(parts, body))
}
}
// Join base URI and Path+Query, preserving any path in the base.
fn set_base_uri(base_uri: &http::Uri, req_pandq: Option<&uri::PathAndQuery>) -> http::Uri {
let mut builder = uri::Builder::new();
if let Some(scheme) = base_uri.scheme() {
builder = builder.scheme(scheme.as_str());
}
if let Some(authority) = base_uri.authority() {
builder = builder.authority(authority.as_str());
}
if let Some(pandq) = base_uri.path_and_query() {
builder = if let Some(req_pandq) = req_pandq {
// Remove any trailing slashes and join.
// `PathAndQuery` always starts with a slash.
let base_path = pandq.path().trim_end_matches('/');
builder.path_and_query(format!("{base_path}{req_pandq}"))
} else {
builder.path_and_query(pandq.as_str())
};
} else if let Some(req_pandq) = req_pandq {
builder = builder.path_and_query(req_pandq.as_str());
}
// Joining a valid Uri and valid PathAndQuery should result in a valid Uri.
builder.build().expect("Valid Uri")
}
#[cfg(test)]
mod tests {
#[test]
fn normal_host() {
let base_uri = http::Uri::from_static("https://192.168.1.65:8443");
let apipath = http::Uri::from_static("/api/v1/nodes?hi=yes");
let pandq = apipath.path_and_query();
assert_eq!(
super::set_base_uri(&base_uri, pandq),
"https://192.168.1.65:8443/api/v1/nodes?hi=yes"
);
}
#[test]
fn rancher_host() {
// in rancher, kubernetes server names are not hostnames, but a host with a path:
let base_uri = http::Uri::from_static("https://example.com/foo/bar");
let api_path = http::Uri::from_static("/api/v1/nodes?hi=yes");
let pandq = api_path.path_and_query();
assert_eq!(
super::set_base_uri(&base_uri, pandq),
"https://example.com/foo/bar/api/v1/nodes?hi=yes"
);
}
}

View File

@@ -0,0 +1,46 @@
use std::sync::Arc;
use http::{header::HeaderName, request::Request, HeaderValue};
use tower::{Layer, Service};
#[derive(Clone)]
/// Layer that adds a static set of extra headers to each request
pub struct ExtraHeadersLayer {
pub(crate) headers: Arc<Vec<(HeaderName, HeaderValue)>>,
}
impl<S> Layer<S> for ExtraHeadersLayer {
type Service = ExtraHeaders<S>;
fn layer(&self, inner: S) -> Self::Service {
ExtraHeaders {
inner,
headers: self.headers.clone(),
}
}
}
#[derive(Clone)]
/// Service that adds a static set of extra headers to each request
pub struct ExtraHeaders<S> {
inner: S,
headers: Arc<Vec<(HeaderName, HeaderValue)>>,
}
impl<S, ReqBody> Service<Request<ReqBody>> for ExtraHeaders<S>
where
S: Service<Request<ReqBody>>,
{
type Error = S::Error;
type Future = S::Future;
type Response = S::Response;
fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> std::task::Poll<Result<(), Self::Error>> {
self.inner.poll_ready(cx)
}
fn call(&mut self, mut req: Request<ReqBody>) -> Self::Future {
req.headers_mut().extend(self.headers.iter().cloned());
self.inner.call(req)
}
}

View File

@@ -0,0 +1,98 @@
//! Middleware types returned from `ConfigExt` methods.
use tower::{filter::AsyncFilterLayer, util::Either, Layer};
pub(crate) use tower_http::auth::AddAuthorizationLayer;
mod base_uri;
mod extra_headers;
pub use base_uri::{BaseUri, BaseUriLayer};
pub use extra_headers::{ExtraHeaders, ExtraHeadersLayer};
use super::auth::RefreshableToken;
/// Layer to set up `Authorization` header depending on the config.
pub struct AuthLayer(pub(crate) Either<AddAuthorizationLayer, AsyncFilterLayer<RefreshableToken>>);
impl<S> Layer<S> for AuthLayer {
type Service = Either<
<AddAuthorizationLayer as Layer<S>>::Service,
<AsyncFilterLayer<RefreshableToken> as Layer<S>>::Service,
>;
fn layer(&self, inner: S) -> Self::Service {
self.0.layer(inner)
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::{matches, pin::pin, sync::Arc};
use chrono::{Duration, Utc};
use http::{header::AUTHORIZATION, HeaderValue, Request, Response};
use secrecy::SecretString;
use tokio::sync::Mutex;
use tokio_test::assert_ready_ok;
use tower::filter::AsyncFilterLayer;
use tower_test::{mock, mock::Handle};
use crate::{
client::{AuthError, Body},
config::AuthInfo,
};
#[tokio::test(flavor = "current_thread")]
async fn valid_token() {
const TOKEN: &str = "test";
let auth = test_token(TOKEN.into());
let (mut service, handle): (_, Handle<Request<Body>, Response<Body>>) =
mock::spawn_layer(AsyncFilterLayer::new(auth));
let spawned = tokio::spawn(async move {
// Receive the requests and respond
let mut handle = pin!(handle);
let (request, send) = handle.next_request().await.expect("service not called");
assert_eq!(
request.headers().get(AUTHORIZATION).unwrap(),
HeaderValue::try_from(format!("Bearer {TOKEN}")).unwrap()
);
send.send_response(Response::builder().body(Body::empty()).unwrap());
});
assert_ready_ok!(service.poll_ready());
service
.call(Request::builder().uri("/").body(Body::empty()).unwrap())
.await
.unwrap();
spawned.await.unwrap();
}
#[tokio::test(flavor = "current_thread")]
async fn invalid_token() {
const TOKEN: &str = "\n";
let auth = test_token(TOKEN.into());
let (mut service, _handle) =
mock::spawn_layer::<Request<Body>, Response<Body>, _>(AsyncFilterLayer::new(auth));
let err = service
.call(Request::builder().uri("/").body(Body::empty()).unwrap())
.await
.unwrap_err();
assert!(err.is::<AuthError>());
assert!(matches!(
*err.downcast::<AuthError>().unwrap(),
AuthError::InvalidBearerToken(_)
));
}
fn test_token(token: String) -> RefreshableToken {
let expiry = Utc::now() + Duration::try_seconds(60 * 60).unwrap();
let secret_token = SecretString::from(token);
let info = AuthInfo {
token: Some(secret_token.clone()),
..Default::default()
};
RefreshableToken::Exec(Arc::new(Mutex::new((secret_token, expiry, info))))
}
}

572
vendor/kube-client/src/client/mod.rs vendored Normal file
View File

@@ -0,0 +1,572 @@
//! API client for interacting with the Kubernetes API
//!
//! The [`Client`] uses standard kube error handling.
//!
//! This client can be used on its own or in conjuction with the [`Api`][crate::api::Api]
//! type for more structured interaction with the kubernetes API.
//!
//! The [`Client`] can also be used with [`Discovery`](crate::Discovery) to dynamically
//! retrieve the resources served by the kubernetes API.
use chrono::{DateTime, Utc};
use either::{Either, Left, Right};
use futures::{future::BoxFuture, AsyncBufRead, StreamExt, TryStream, TryStreamExt};
use http::{self, Request, Response};
use http_body_util::BodyExt;
#[cfg(feature = "ws")] use hyper_util::rt::TokioIo;
use k8s_openapi::apimachinery::pkg::apis::meta::v1 as k8s_meta_v1;
pub use kube_core::response::Status;
use serde::de::DeserializeOwned;
use serde_json::{self, Value};
#[cfg(feature = "ws")]
use tokio_tungstenite::{tungstenite as ws, WebSocketStream};
use tokio_util::{
codec::{FramedRead, LinesCodec, LinesCodecError},
io::StreamReader,
};
use tower::{buffer::Buffer, util::BoxService, BoxError, Layer, Service, ServiceExt};
use tower_http::map_response_body::MapResponseBodyLayer;
pub use self::body::Body;
use crate::{api::WatchEvent, error::ErrorResponse, Config, Error, Result};
mod auth;
mod body;
mod builder;
#[cfg_attr(docsrs, doc(cfg(feature = "unstable-client")))]
#[cfg(feature = "unstable-client")]
mod client_ext;
#[cfg_attr(docsrs, doc(cfg(feature = "unstable-client")))]
#[cfg(feature = "unstable-client")]
pub use client_ext::scope;
mod config_ext;
pub use auth::Error as AuthError;
pub use config_ext::ConfigExt;
pub mod middleware;
#[cfg(any(feature = "rustls-tls", feature = "openssl-tls"))] mod tls;
#[cfg(feature = "openssl-tls")]
pub use tls::openssl_tls::Error as OpensslTlsError;
#[cfg(feature = "rustls-tls")] pub use tls::rustls_tls::Error as RustlsTlsError;
#[cfg(feature = "ws")] mod upgrade;
#[cfg(feature = "oauth")]
#[cfg_attr(docsrs, doc(cfg(feature = "oauth")))]
pub use auth::OAuthError;
#[cfg(feature = "oidc")]
#[cfg_attr(docsrs, doc(cfg(feature = "oidc")))]
pub use auth::oidc_errors;
#[cfg(feature = "ws")] pub use upgrade::UpgradeConnectionError;
#[cfg(feature = "kubelet-debug")]
#[cfg_attr(docsrs, doc(cfg(feature = "kubelet-debug")))]
mod kubelet_debug;
pub use builder::{ClientBuilder, DynBody};
/// Client for connecting with a Kubernetes cluster.
///
/// The easiest way to instantiate the client is either by
/// inferring the configuration from the environment using
/// [`Client::try_default`] or with an existing [`Config`]
/// using [`Client::try_from`].
#[cfg_attr(docsrs, doc(cfg(feature = "client")))]
#[derive(Clone)]
pub struct Client {
// - `Buffer` for cheap clone
// - `BoxFuture` for dynamic response future type
inner: Buffer<Request<Body>, BoxFuture<'static, Result<Response<Body>, BoxError>>>,
default_ns: String,
valid_until: Option<DateTime<Utc>>,
}
/// Represents a WebSocket connection.
/// Value returned by [`Client::connect`].
#[cfg(feature = "ws")]
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
pub struct Connection {
stream: WebSocketStream<TokioIo<hyper::upgrade::Upgraded>>,
protocol: upgrade::StreamProtocol,
}
#[cfg(feature = "ws")]
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
impl Connection {
/// Return true if the stream supports graceful close signaling.
pub fn supports_stream_close(&self) -> bool {
self.protocol.supports_stream_close()
}
/// Transform into the raw WebSocketStream.
pub fn into_stream(self) -> WebSocketStream<TokioIo<hyper::upgrade::Upgraded>> {
self.stream
}
}
/// Constructors and low-level api interfaces.
///
/// Most users only need [`Client::try_default`] or [`Client::new`] from this block.
///
/// The many various lower level interfaces here are for more advanced use-cases with specific requirements.
impl Client {
/// Create a [`Client`] using a custom `Service` stack.
///
/// [`ConfigExt`](crate::client::ConfigExt) provides extensions for
/// building a custom stack.
///
/// To create with the default stack with a [`Config`], use
/// [`Client::try_from`].
///
/// To create with the default stack with an inferred [`Config`], use
/// [`Client::try_default`].
///
/// # Example
///
/// ```rust
/// # async fn doc() -> Result<(), Box<dyn std::error::Error>> {
/// use kube::{client::ConfigExt, Client, Config};
/// use tower::{BoxError, ServiceBuilder};
/// use hyper_util::rt::TokioExecutor;
///
/// let config = Config::infer().await?;
/// let service = ServiceBuilder::new()
/// .layer(config.base_uri_layer())
/// .option_layer(config.auth_layer()?)
/// .map_err(BoxError::from)
/// .service(hyper_util::client::legacy::Client::builder(TokioExecutor::new()).build_http());
/// let client = Client::new(service, config.default_namespace);
/// # Ok(())
/// # }
/// ```
pub fn new<S, B, T>(service: S, default_namespace: T) -> Self
where
S: Service<Request<Body>, Response = Response<B>> + Send + 'static,
S::Future: Send + 'static,
S::Error: Into<BoxError>,
B: http_body::Body<Data = bytes::Bytes> + Send + 'static,
B::Error: Into<BoxError>,
T: Into<String>,
{
// Transform response body to `crate::client::Body` and use type erased error to avoid type parameters.
let service = MapResponseBodyLayer::new(Body::wrap_body)
.layer(service)
.map_err(|e| e.into());
Self {
inner: Buffer::new(BoxService::new(service), 1024),
default_ns: default_namespace.into(),
valid_until: None,
}
}
/// Sets an expiration timestamp to the client, which has to be checked by the user using [`Client::valid_until`] function.
pub fn with_valid_until(self, valid_until: Option<DateTime<Utc>>) -> Self {
Client { valid_until, ..self }
}
/// Get the expiration timestamp of the client, if it has been set.
pub fn valid_until(&self) -> &Option<DateTime<Utc>> {
&self.valid_until
}
/// Create and initialize a [`Client`] using the inferred configuration.
///
/// Will use [`Config::infer`] which attempts to load the local kubeconfig first,
/// and then if that fails, trying the in-cluster environment variables.
///
/// Will fail if neither configuration could be loaded.
///
/// ```rust
/// # async fn doc() -> Result<(), Box<dyn std::error::Error>> {
/// # use kube::Client;
/// let client = Client::try_default().await?;
/// # Ok(())
/// # }
/// ```
///
/// If you already have a [`Config`] then use [`Client::try_from`](Self::try_from)
/// instead.
pub async fn try_default() -> Result<Self> {
Self::try_from(Config::infer().await.map_err(Error::InferConfig)?)
}
/// Get the default namespace for the client
///
/// The namespace is either configured on `context` in the kubeconfig,
/// falls back to `default` when running locally,
/// or uses the service account's namespace when deployed in-cluster.
pub fn default_namespace(&self) -> &str {
&self.default_ns
}
/// Perform a raw HTTP request against the API and return the raw response back.
/// This method can be used to get raw access to the API which may be used to, for example,
/// create a proxy server or application-level gateway between localhost and the API server.
pub async fn send(&self, request: Request<Body>) -> Result<Response<Body>> {
let mut svc = self.inner.clone();
let res = svc
.ready()
.await
.map_err(Error::Service)?
.call(request)
.await
.map_err(|err| {
// Error decorating request
err.downcast::<Error>()
.map(|e| *e)
// Error requesting
.or_else(|err| err.downcast::<hyper::Error>().map(|err| Error::HyperError(*err)))
// Error from another middleware
.unwrap_or_else(Error::Service)
})?;
Ok(res)
}
/// Make WebSocket connection.
#[cfg(feature = "ws")]
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
pub async fn connect(&self, request: Request<Vec<u8>>) -> Result<Connection> {
use http::header::HeaderValue;
let (mut parts, body) = request.into_parts();
parts
.headers
.insert(http::header::CONNECTION, HeaderValue::from_static("Upgrade"));
parts
.headers
.insert(http::header::UPGRADE, HeaderValue::from_static("websocket"));
parts.headers.insert(
http::header::SEC_WEBSOCKET_VERSION,
HeaderValue::from_static("13"),
);
let key = tokio_tungstenite::tungstenite::handshake::client::generate_key();
parts.headers.insert(
http::header::SEC_WEBSOCKET_KEY,
key.parse().expect("valid header value"),
);
upgrade::StreamProtocol::add_to_headers(&mut parts.headers)?;
let res = self.send(Request::from_parts(parts, Body::from(body))).await?;
let protocol = upgrade::verify_response(&res, &key).map_err(Error::UpgradeConnection)?;
match hyper::upgrade::on(res).await {
Ok(upgraded) => Ok(Connection {
stream: WebSocketStream::from_raw_socket(
TokioIo::new(upgraded),
ws::protocol::Role::Client,
None,
)
.await,
protocol,
}),
Err(e) => Err(Error::UpgradeConnection(
UpgradeConnectionError::GetPendingUpgrade(e),
)),
}
}
/// Perform a raw HTTP request against the API and deserialize the response
/// as JSON to some known type.
pub async fn request<T>(&self, request: Request<Vec<u8>>) -> Result<T>
where
T: DeserializeOwned,
{
let text = self.request_text(request).await?;
serde_json::from_str(&text).map_err(|e| {
tracing::warn!("{}, {:?}", text, e);
Error::SerdeError(e)
})
}
/// Perform a raw HTTP request against the API and get back the response
/// as a string
pub async fn request_text(&self, request: Request<Vec<u8>>) -> Result<String> {
let res = self.send(request.map(Body::from)).await?;
let res = handle_api_errors(res).await?;
let body_bytes = res.into_body().collect().await?.to_bytes();
let text = String::from_utf8(body_bytes.to_vec()).map_err(Error::FromUtf8)?;
Ok(text)
}
/// Perform a raw HTTP request against the API and stream the response body.
///
/// The response can be processed using [`AsyncReadExt`](futures::AsyncReadExt)
/// and [`AsyncBufReadExt`](futures::AsyncBufReadExt).
pub async fn request_stream(&self, request: Request<Vec<u8>>) -> Result<impl AsyncBufRead> {
let res = self.send(request.map(Body::from)).await?;
let res = handle_api_errors(res).await?;
// Map the error, since we want to convert this into an `AsyncBufReader` using
// `into_async_read` which specifies `std::io::Error` as the stream's error type.
let body = res.into_body().into_data_stream().map_err(std::io::Error::other);
Ok(body.into_async_read())
}
/// Perform a raw HTTP request against the API and get back either an object
/// deserialized as JSON or a [`Status`] Object.
pub async fn request_status<T>(&self, request: Request<Vec<u8>>) -> Result<Either<T, Status>>
where
T: DeserializeOwned,
{
let text = self.request_text(request).await?;
// It needs to be JSON:
let v: Value = serde_json::from_str(&text).map_err(Error::SerdeError)?;
if v["kind"] == "Status" {
tracing::trace!("Status from {}", text);
Ok(Right(serde_json::from_str::<Status>(&text).map_err(|e| {
tracing::warn!("{}, {:?}", text, e);
Error::SerdeError(e)
})?))
} else {
Ok(Left(serde_json::from_str::<T>(&text).map_err(|e| {
tracing::warn!("{}, {:?}", text, e);
Error::SerdeError(e)
})?))
}
}
/// Perform a raw request and get back a stream of [`WatchEvent`] objects
pub async fn request_events<T>(
&self,
request: Request<Vec<u8>>,
) -> Result<impl TryStream<Item = Result<WatchEvent<T>>>>
where
T: Clone + DeserializeOwned,
{
let res = self.send(request.map(Body::from)).await?;
// trace!("Streaming from {} -> {}", res.url(), res.status().as_str());
tracing::trace!("headers: {:?}", res.headers());
let frames = FramedRead::new(
StreamReader::new(res.into_body().into_data_stream().map_err(|e| {
// Unexpected EOF from chunked decoder.
// Tends to happen when watching for 300+s. This will be ignored.
if e.to_string().contains("unexpected EOF during chunk") {
return std::io::Error::new(std::io::ErrorKind::UnexpectedEof, e);
}
std::io::Error::other(e)
})),
LinesCodec::new(),
);
Ok(frames.filter_map(|res| async {
match res {
Ok(line) => match serde_json::from_str::<WatchEvent<T>>(&line) {
Ok(event) => Some(Ok(event)),
Err(e) => {
// Ignore EOF error that can happen for incomplete line from `decode_eof`.
if e.is_eof() {
return None;
}
// Got general error response
if let Ok(e_resp) = serde_json::from_str::<ErrorResponse>(&line) {
return Some(Err(Error::Api(e_resp)));
}
// Parsing error
Some(Err(Error::SerdeError(e)))
}
},
Err(LinesCodecError::Io(e)) => match e.kind() {
// Client timeout
std::io::ErrorKind::TimedOut => {
tracing::warn!("timeout in poll: {}", e); // our client timeout
None
}
// Unexpected EOF from chunked decoder.
// Tends to happen after 300+s of watching.
std::io::ErrorKind::UnexpectedEof => {
tracing::warn!("eof in poll: {}", e);
None
}
_ => Some(Err(Error::ReadEvents(e))),
},
// Reached the maximum line length without finding a newline.
// This should never happen because we're using the default `usize::MAX`.
Err(LinesCodecError::MaxLineLengthExceeded) => {
Some(Err(Error::LinesCodecMaxLineLengthExceeded))
}
}
}))
}
}
/// Low level discovery methods using `k8s_openapi` types.
///
/// Consider using the [`discovery`](crate::discovery) module for
/// easier-to-use variants of this functionality.
/// The following methods might be deprecated to avoid confusion between similarly named types within `discovery`.
impl Client {
/// Returns apiserver version.
pub async fn apiserver_version(&self) -> Result<k8s_openapi::apimachinery::pkg::version::Info> {
self.request(
Request::builder()
.uri("/version")
.body(vec![])
.map_err(Error::HttpError)?,
)
.await
}
/// Lists api groups that apiserver serves.
pub async fn list_api_groups(&self) -> Result<k8s_meta_v1::APIGroupList> {
self.request(
Request::builder()
.uri("/apis")
.body(vec![])
.map_err(Error::HttpError)?,
)
.await
}
/// Lists resources served in given API group.
///
/// ### Example usage:
/// ```rust
/// # async fn scope(client: kube::Client) -> Result<(), Box<dyn std::error::Error>> {
/// let apigroups = client.list_api_groups().await?;
/// for g in apigroups.groups {
/// let ver = g
/// .preferred_version
/// .as_ref()
/// .or_else(|| g.versions.first())
/// .expect("preferred or versions exists");
/// let apis = client.list_api_group_resources(&ver.group_version).await?;
/// dbg!(apis);
/// }
/// # Ok(())
/// # }
/// ```
pub async fn list_api_group_resources(&self, apiversion: &str) -> Result<k8s_meta_v1::APIResourceList> {
let url = format!("/apis/{apiversion}");
self.request(
Request::builder()
.uri(url)
.body(vec![])
.map_err(Error::HttpError)?,
)
.await
}
/// Lists versions of `core` a.k.a. `""` legacy API group.
pub async fn list_core_api_versions(&self) -> Result<k8s_meta_v1::APIVersions> {
self.request(
Request::builder()
.uri("/api")
.body(vec![])
.map_err(Error::HttpError)?,
)
.await
}
/// Lists resources served in particular `core` group version.
pub async fn list_core_api_resources(&self, version: &str) -> Result<k8s_meta_v1::APIResourceList> {
let url = format!("/api/{version}");
self.request(
Request::builder()
.uri(url)
.body(vec![])
.map_err(Error::HttpError)?,
)
.await
}
}
/// Kubernetes returned error handling
///
/// Either kube returned an explicit ApiError struct,
/// or it someohow returned something we couldn't parse as one.
///
/// In either case, present an ApiError upstream.
/// The latter is probably a bug if encountered.
async fn handle_api_errors(res: Response<Body>) -> Result<Response<Body>> {
let status = res.status();
if status.is_client_error() || status.is_server_error() {
// trace!("Status = {:?} for {}", status, res.url());
let body_bytes = res.into_body().collect().await?.to_bytes();
let text = String::from_utf8(body_bytes.to_vec()).map_err(Error::FromUtf8)?;
// Print better debug when things do fail
// trace!("Parsing error: {}", text);
if let Ok(errdata) = serde_json::from_str::<ErrorResponse>(&text) {
tracing::debug!("Unsuccessful: {errdata:?}");
Err(Error::Api(errdata))
} else {
tracing::warn!("Unsuccessful data error parse: {}", text);
let error_response = ErrorResponse {
status: status.to_string(),
code: status.as_u16(),
message: format!("{text:?}"),
reason: "Failed to parse error data".into(),
};
tracing::debug!("Unsuccessful: {error_response:?} (reconstruct)");
Err(Error::Api(error_response))
}
} else {
Ok(res)
}
}
impl TryFrom<Config> for Client {
type Error = Error;
/// Builds a default [`Client`] from a [`Config`].
///
/// See [`ClientBuilder`] or [`Client::new`] if more customization is required
fn try_from(config: Config) -> Result<Self> {
Ok(ClientBuilder::try_from(config)?.build())
}
}
#[cfg(test)]
mod tests {
use std::pin::pin;
use crate::{client::Body, Api, Client};
use http::{Request, Response};
use k8s_openapi::api::core::v1::Pod;
use tower_test::mock;
#[tokio::test]
async fn test_default_ns() {
let (mock_service, _) = mock::pair::<Request<Body>, Response<Body>>();
let client = Client::new(mock_service, "test-namespace");
assert_eq!(client.default_namespace(), "test-namespace");
}
#[tokio::test]
async fn test_mock() {
let (mock_service, handle) = mock::pair::<Request<Body>, Response<Body>>();
let spawned = tokio::spawn(async move {
// Receive a request for pod and respond with some data
let mut handle = pin!(handle);
let (request, send) = handle.next_request().await.expect("service not called");
assert_eq!(request.method(), http::Method::GET);
assert_eq!(request.uri().to_string(), "/api/v1/namespaces/default/pods/test");
let pod: Pod = serde_json::from_value(serde_json::json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "test",
"annotations": { "kube-rs": "test" },
},
"spec": {
"containers": [{ "name": "test", "image": "test-image" }],
}
}))
.unwrap();
send.send_response(
Response::builder()
.body(Body::from(serde_json::to_vec(&pod).unwrap()))
.unwrap(),
);
});
let pods: Api<Pod> = Api::default_namespaced(Client::new(mock_service, "default"));
let pod = pods.get("test").await.unwrap();
assert_eq!(pod.metadata.annotations.unwrap().get("kube-rs").unwrap(), "test");
spawned.await.unwrap();
}
}

281
vendor/kube-client/src/client/tls.rs vendored Normal file
View File

@@ -0,0 +1,281 @@
#[cfg(feature = "rustls-tls")]
pub mod rustls_tls {
use hyper_rustls::ConfigBuilderExt;
use rustls::{
self,
client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier},
pki_types::{CertificateDer, InvalidDnsNameError, PrivateKeyDer, ServerName},
ClientConfig, DigitallySignedStruct,
};
use thiserror::Error;
/// Errors from Rustls
#[derive(Debug, Error)]
pub enum Error {
/// Identity PEM is invalid
#[error("identity PEM is invalid: {0}")]
InvalidIdentityPem(#[source] rustls::pki_types::pem::Error),
/// Identity PEM is missing a private key: the key must be PKCS8 or RSA/PKCS1
#[error("identity PEM is missing a private key: the key must be PKCS8 or RSA/PKCS1")]
MissingPrivateKey,
/// Identity PEM is missing certificate
#[error("identity PEM is missing certificate")]
MissingCertificate,
/// Invalid private key
#[error("invalid private key: {0}")]
InvalidPrivateKey(#[source] rustls::Error),
/// Unknown private key format
#[error("unknown private key format")]
UnknownPrivateKeyFormat,
// Using type-erased error to avoid depending on webpki
/// Failed to add a root certificate
#[error("failed to add a root certificate: {0}")]
AddRootCertificate(#[source] Box<dyn std::error::Error + Send + Sync>),
/// No valid native root CA certificates found
#[error("no valid native root CA certificates found")]
NoValidNativeRootCA(#[source] std::io::Error),
/// Invalid server name
#[error("invalid server name: {0}")]
InvalidServerName(#[source] InvalidDnsNameError),
}
/// Create `rustls::ClientConfig`.
pub fn rustls_client_config(
identity_pem: Option<&[u8]>,
root_certs: Option<&[Vec<u8>]>,
accept_invalid: bool,
) -> Result<ClientConfig, Error> {
let config_builder = if let Some(certs) = root_certs {
ClientConfig::builder().with_root_certificates(root_store(certs)?)
} else {
#[cfg(feature = "webpki-roots")]
{
// Use WebPKI roots.
ClientConfig::builder().with_webpki_roots()
}
#[cfg(not(feature = "webpki-roots"))]
{
// Use native roots. This will panic on Android and iOS.
ClientConfig::builder()
.with_native_roots()
.map_err(Error::NoValidNativeRootCA)?
}
};
let mut client_config = if let Some((chain, pkey)) = identity_pem.map(client_auth).transpose()? {
config_builder
.with_client_auth_cert(chain, pkey)
.map_err(Error::InvalidPrivateKey)?
} else {
config_builder.with_no_client_auth()
};
if accept_invalid {
client_config
.dangerous()
.set_certificate_verifier(std::sync::Arc::new(NoCertificateVerification {}));
}
Ok(client_config)
}
fn root_store(root_certs: &[Vec<u8>]) -> Result<rustls::RootCertStore, Error> {
let mut root_store = rustls::RootCertStore::empty();
for der in root_certs {
root_store
.add(CertificateDer::from(der.to_owned()))
.map_err(|e| Error::AddRootCertificate(Box::new(e)))?;
}
Ok(root_store)
}
fn client_auth(data: &[u8]) -> Result<(Vec<CertificateDer<'static>>, PrivateKeyDer<'static>), Error> {
use rustls::pki_types::pem::{self, SectionKind};
let mut cert_chain = Vec::new();
let mut pkcs8_key = None;
let mut pkcs1_key = None;
let mut sec1_key = None;
let mut reader = std::io::Cursor::new(data);
while let Some((kind, der)) = pem::from_buf(&mut reader).map_err(Error::InvalidIdentityPem)? {
match kind {
SectionKind::Certificate => cert_chain.push(der.into()),
SectionKind::PrivateKey => pkcs8_key = Some(PrivateKeyDer::Pkcs8(der.into())),
SectionKind::RsaPrivateKey => pkcs1_key = Some(PrivateKeyDer::Pkcs1(der.into())),
SectionKind::EcPrivateKey => sec1_key = Some(PrivateKeyDer::Sec1(der.into())),
_ => return Err(Error::UnknownPrivateKeyFormat),
}
}
let private_key = pkcs8_key
.or(pkcs1_key)
.or(sec1_key)
.ok_or(Error::MissingPrivateKey)?;
if cert_chain.is_empty() {
return Err(Error::MissingCertificate);
}
Ok((cert_chain, private_key))
}
#[derive(Debug)]
struct NoCertificateVerification {}
impl ServerCertVerifier for NoCertificateVerification {
fn verify_server_cert(
&self,
_end_entity: &CertificateDer,
_intermediates: &[CertificateDer],
_server_name: &ServerName,
_ocsp_response: &[u8],
_now: rustls::pki_types::UnixTime,
) -> Result<ServerCertVerified, rustls::Error> {
tracing::warn!("Server cert bypassed");
Ok(ServerCertVerified::assertion())
}
fn verify_tls13_signature(
&self,
_message: &[u8],
_cert: &CertificateDer,
_dss: &DigitallySignedStruct,
) -> Result<HandshakeSignatureValid, rustls::Error> {
Ok(HandshakeSignatureValid::assertion())
}
fn verify_tls12_signature(
&self,
_message: &[u8],
_cert: &CertificateDer,
_dss: &DigitallySignedStruct,
) -> Result<HandshakeSignatureValid, rustls::Error> {
Ok(HandshakeSignatureValid::assertion())
}
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
use rustls::SignatureScheme;
vec![
SignatureScheme::RSA_PKCS1_SHA1,
SignatureScheme::ECDSA_SHA1_Legacy,
SignatureScheme::RSA_PKCS1_SHA256,
SignatureScheme::ECDSA_NISTP256_SHA256,
SignatureScheme::RSA_PKCS1_SHA384,
SignatureScheme::ECDSA_NISTP384_SHA384,
SignatureScheme::RSA_PKCS1_SHA512,
SignatureScheme::ECDSA_NISTP521_SHA512,
SignatureScheme::RSA_PSS_SHA256,
SignatureScheme::RSA_PSS_SHA384,
SignatureScheme::RSA_PSS_SHA512,
SignatureScheme::ED25519,
SignatureScheme::ED448,
]
}
}
}
#[cfg(feature = "openssl-tls")]
pub mod openssl_tls {
use openssl::{
pkey::PKey,
ssl::{SslConnector, SslConnectorBuilder, SslMethod},
x509::X509,
};
use thiserror::Error;
/// Errors from OpenSSL TLS
#[derive(Debug, Error)]
pub enum Error {
/// Failed to create OpenSSL HTTPS connector
#[error("failed to create OpenSSL HTTPS connector: {0}")]
CreateHttpsConnector(#[source] openssl::error::ErrorStack),
/// Failed to create OpenSSL SSL connector
#[error("failed to create OpenSSL SSL connector: {0}")]
CreateSslConnector(#[source] SslConnectorError),
}
/// Errors from creating a `SslConnectorBuilder`
#[derive(Debug, Error)]
pub enum SslConnectorError {
/// Failed to build SslConnectorBuilder
#[error("failed to build SslConnectorBuilder: {0}")]
CreateBuilder(#[source] openssl::error::ErrorStack),
/// Failed to deserialize PEM-encoded chain of certificates
#[error("failed to deserialize PEM-encoded chain of certificates: {0}")]
DeserializeCertificateChain(#[source] openssl::error::ErrorStack),
/// Failed to deserialize PEM-encoded private key
#[error("failed to deserialize PEM-encoded private key: {0}")]
DeserializePrivateKey(#[source] openssl::error::ErrorStack),
/// Failed to set private key
#[error("failed to set private key: {0}")]
SetPrivateKey(#[source] openssl::error::ErrorStack),
/// Failed to get a leaf certificate, the certificate chain is empty
#[error("failed to get a leaf certificate, the certificate chain is empty")]
GetLeafCertificate,
/// Failed to set the leaf certificate
#[error("failed to set the leaf certificate: {0}")]
SetLeafCertificate(#[source] openssl::error::ErrorStack),
/// Failed to append a certificate to the chain
#[error("failed to append a certificate to the chain: {0}")]
AppendCertificate(#[source] openssl::error::ErrorStack),
/// Failed to deserialize DER-encoded root certificate
#[error("failed to deserialize DER-encoded root certificate: {0}")]
DeserializeRootCertificate(#[source] openssl::error::ErrorStack),
/// Failed to add a root certificate
#[error("failed to add a root certificate: {0}")]
AddRootCertificate(#[source] openssl::error::ErrorStack),
}
/// Create `openssl::ssl::SslConnectorBuilder` required for `hyper_openssl::HttpsConnector`.
pub fn ssl_connector_builder(
identity_pem: Option<&Vec<u8>>,
root_certs: Option<&Vec<Vec<u8>>>,
) -> Result<SslConnectorBuilder, SslConnectorError> {
let mut builder =
SslConnector::builder(SslMethod::tls()).map_err(SslConnectorError::CreateBuilder)?;
if let Some(pem) = identity_pem {
let mut chain = X509::stack_from_pem(pem)
.map_err(SslConnectorError::DeserializeCertificateChain)?
.into_iter();
let leaf_cert = chain.next().ok_or(SslConnectorError::GetLeafCertificate)?;
builder
.set_certificate(&leaf_cert)
.map_err(SslConnectorError::SetLeafCertificate)?;
for cert in chain {
builder
.add_extra_chain_cert(cert)
.map_err(SslConnectorError::AppendCertificate)?;
}
let pkey = PKey::private_key_from_pem(pem).map_err(SslConnectorError::DeserializePrivateKey)?;
builder
.set_private_key(&pkey)
.map_err(SslConnectorError::SetPrivateKey)?;
}
if let Some(ders) = root_certs {
for der in ders {
let cert = X509::from_der(der).map_err(SslConnectorError::DeserializeRootCertificate)?;
builder
.cert_store_mut()
.add_cert(cert)
.map_err(SslConnectorError::AddRootCertificate)?;
}
}
Ok(builder)
}
}

159
vendor/kube-client/src/client/upgrade.rs vendored Normal file
View File

@@ -0,0 +1,159 @@
use http::{self, HeaderValue, Response, StatusCode};
use thiserror::Error;
use tokio_tungstenite::tungstenite as ws;
use crate::{client::Body, Error, Result};
#[derive(Debug)]
pub enum StreamProtocol {
/// Binary subprotocol v4. See `Client::connect`.
V4,
/// Binary subprotocol v5. See `Client::connect`.
/// v5 supports CLOSE signals.
/// https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/util/remotecommand/constants.go#L52C26-L52C43
V5,
}
impl StreamProtocol {
pub fn as_str(&self) -> &'static str {
match self {
Self::V4 => "v4.channel.k8s.io",
Self::V5 => "v5.channel.k8s.io",
}
}
fn as_bytes(&self) -> &'static [u8] {
self.as_str().as_bytes()
}
pub fn supports_stream_close(&self) -> bool {
match self {
Self::V4 => false,
Self::V5 => true,
}
}
/// Add HTTP header SEC_WEBSOCKET_PROTOCOL with a list of supported protocol.
pub fn add_to_headers(headers: &mut http::HeaderMap) -> Result<()> {
// Protocols we support in our preferred order.
let supported_protocols = [
// v5 supports CLOSE signals.
Self::V5.as_str(),
// Use the binary subprotocol v4, to get JSON `Status` object in `error` channel (3).
// There's no official documentation about this protocol, but it's described in
// [`k8s.io/apiserver/pkg/util/wsstream/conn.go`](https://git.io/JLQED).
// There's a comment about v4 and `Status` object in
// [`kublet/cri/streaming/remotecommand/httpstream.go`](https://git.io/JLQEh).
Self::V4.as_str(),
];
let header_value_string = supported_protocols.join(", ");
// Note: Multiple headers does not work. Only a single CSV works.
headers.insert(
http::header::SEC_WEBSOCKET_PROTOCOL,
HeaderValue::from_str(&header_value_string).map_err(|e| Error::HttpError(e.into()))?,
);
Ok(())
}
/// Return the subprotocol of an HTTP response.
fn get_from_response<B>(res: &Response<B>) -> Option<Self> {
let headers = res.headers();
match headers
.get(http::header::SEC_WEBSOCKET_PROTOCOL)
.map(|h| h.as_bytes())
{
Some(protocol) => {
if protocol == Self::V4.as_bytes() {
Some(Self::V4)
} else if protocol == Self::V5.as_bytes() {
Some(Self::V5)
} else {
None
}
}
_ => None,
}
}
}
/// Possible errors from upgrading to a WebSocket connection
#[cfg(feature = "ws")]
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
#[derive(Debug, Error)]
pub enum UpgradeConnectionError {
/// The server did not respond with [`SWITCHING_PROTOCOLS`] status when upgrading the
/// connection.
///
/// [`SWITCHING_PROTOCOLS`]: http::status::StatusCode::SWITCHING_PROTOCOLS
#[error("failed to switch protocol: {0}")]
ProtocolSwitch(http::status::StatusCode),
/// `Upgrade` header was not set to `websocket` (case insensitive)
#[error("upgrade header was not set to websocket")]
MissingUpgradeWebSocketHeader,
/// `Connection` header was not set to `Upgrade` (case insensitive)
#[error("connection header was not set to Upgrade")]
MissingConnectionUpgradeHeader,
/// `Sec-WebSocket-Accept` key mismatched.
#[error("Sec-WebSocket-Accept key mismatched")]
SecWebSocketAcceptKeyMismatch,
/// `Sec-WebSocket-Protocol` mismatched.
#[error("Sec-WebSocket-Protocol mismatched")]
SecWebSocketProtocolMismatch,
/// Failed to get pending HTTP upgrade.
#[error("failed to get pending HTTP upgrade: {0}")]
GetPendingUpgrade(#[source] hyper::Error),
}
// Verify upgrade response according to RFC6455.
// Based on `tungstenite` and added subprotocol verification.
pub fn verify_response(res: &Response<Body>, key: &str) -> Result<StreamProtocol, UpgradeConnectionError> {
if res.status() != StatusCode::SWITCHING_PROTOCOLS {
return Err(UpgradeConnectionError::ProtocolSwitch(res.status()));
}
let headers = res.headers();
if !headers
.get(http::header::UPGRADE)
.and_then(|h| h.to_str().ok())
.map(|h| h.eq_ignore_ascii_case("websocket"))
.unwrap_or(false)
{
return Err(UpgradeConnectionError::MissingUpgradeWebSocketHeader);
}
if !headers
.get(http::header::CONNECTION)
.and_then(|h| h.to_str().ok())
.map(|h| h.eq_ignore_ascii_case("Upgrade"))
.unwrap_or(false)
{
return Err(UpgradeConnectionError::MissingConnectionUpgradeHeader);
}
let accept_key = ws::handshake::derive_accept_key(key.as_ref());
if !headers
.get(http::header::SEC_WEBSOCKET_ACCEPT)
.map(|h| h == &accept_key)
.unwrap_or(false)
{
return Err(UpgradeConnectionError::SecWebSocketAcceptKeyMismatch);
}
// Make sure that the server returned an expected subprotocol.
let protocol = match StreamProtocol::get_from_response(res) {
Some(p) => p,
None => return Err(UpgradeConnectionError::SecWebSocketProtocolMismatch),
};
Ok(protocol)
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,139 @@
use super::{
file_config::{AuthInfo, Cluster, Context, Kubeconfig},
KubeconfigError,
};
/// KubeConfigOptions stores options used when loading kubeconfig file.
#[derive(Default, Clone)]
pub struct KubeConfigOptions {
/// The named context to load
pub context: Option<String>,
/// The cluster to load
pub cluster: Option<String>,
/// The user to load
pub user: Option<String>,
}
/// ConfigLoader loads current context, cluster, and authentication information
/// from a kubeconfig file.
#[derive(Clone, Debug)]
pub struct ConfigLoader {
pub current_context: Context,
pub cluster: Cluster,
pub user: AuthInfo,
}
impl ConfigLoader {
/// Returns a config loader based on the cluster information from the kubeconfig file.
pub async fn new_from_options(options: &KubeConfigOptions) -> Result<Self, KubeconfigError> {
let config = Kubeconfig::read()?;
let loader = Self::load(
config,
options.context.as_ref(),
options.cluster.as_ref(),
options.user.as_ref(),
)
.await?;
Ok(loader)
}
pub async fn new_from_kubeconfig(
config: Kubeconfig,
options: &KubeConfigOptions,
) -> Result<Self, KubeconfigError> {
let loader = Self::load(
config,
options.context.as_ref(),
options.cluster.as_ref(),
options.user.as_ref(),
)
.await?;
Ok(loader)
}
pub async fn load(
config: Kubeconfig,
context: Option<&String>,
cluster: Option<&String>,
user: Option<&String>,
) -> Result<Self, KubeconfigError> {
let context_name = if let Some(name) = context {
name
} else if let Some(name) = &config.current_context {
name
} else {
return Err(KubeconfigError::CurrentContextNotSet);
};
let current_context = config
.contexts
.iter()
.find(|named_context| &named_context.name == context_name)
.and_then(|named_context| named_context.context.clone())
.ok_or_else(|| KubeconfigError::LoadContext(context_name.clone()))?;
let cluster_name = cluster.unwrap_or(&current_context.cluster);
let cluster = config
.clusters
.iter()
.find(|named_cluster| &named_cluster.name == cluster_name)
.and_then(|named_cluster| named_cluster.cluster.clone())
.ok_or_else(|| KubeconfigError::LoadClusterOfContext(cluster_name.clone()))?;
let user_name = user.or_else(|| current_context.user.as_ref());
// client-go doesn't fail on empty/missing user, so we don't either
// see https://github.com/kube-rs/kube/issues/1594
let mut auth_info = if let Some(user) = user_name {
config
.auth_infos
.iter()
.find(|named_user| &named_user.name == user)
.and_then(|named_user| named_user.auth_info.clone())
.unwrap_or_else(AuthInfo::default)
} else {
AuthInfo::default()
};
if let Some(exec_config) = &mut auth_info.exec {
if exec_config.provide_cluster_info {
exec_config.cluster = Some((&cluster).try_into()?);
}
}
Ok(ConfigLoader {
current_context,
cluster,
user: auth_info,
})
}
pub fn ca_bundle(&self) -> Result<Option<Vec<Vec<u8>>>, KubeconfigError> {
if let Some(bundle) = self.cluster.load_certificate_authority()? {
Ok(Some(
super::certs(&bundle).map_err(KubeconfigError::ParseCertificates)?,
))
} else {
Ok(None)
}
}
pub fn proxy_url(&self) -> Result<Option<http::Uri>, KubeconfigError> {
let nonempty = |o: Option<String>| o.filter(|s| !s.is_empty());
if let Some(proxy) = nonempty(self.cluster.proxy_url.clone())
.or_else(|| nonempty(std::env::var("HTTPS_PROXY").ok()))
.or_else(|| nonempty(std::env::var("https_proxy").ok()))
{
Ok(Some(
proxy
.parse::<http::Uri>()
.map_err(KubeconfigError::ParseProxyUrl)?,
))
} else {
Ok(None)
}
}
}

View File

@@ -0,0 +1,152 @@
use std::env;
use thiserror::Error;
const SERVICE_HOSTENV: &str = "KUBERNETES_SERVICE_HOST";
const SERVICE_PORTENV: &str = "KUBERNETES_SERVICE_PORT";
// Mounted credential files
const SERVICE_TOKENFILE: &str = "/var/run/secrets/kubernetes.io/serviceaccount/token";
const SERVICE_CERTFILE: &str = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt";
const SERVICE_DEFAULT_NS: &str = "/var/run/secrets/kubernetes.io/serviceaccount/namespace";
/// Errors from loading in-cluster config
#[derive(Error, Debug)]
pub enum Error {
/// Failed to read the default namespace for the service account
#[error("failed to read the default namespace: {0}")]
ReadDefaultNamespace(#[source] std::io::Error),
/// Failed to read the in-cluster environment variables
#[error("failed to read an incluster environment variable: {0}")]
ReadEnvironmentVariable(#[source] env::VarError),
/// Failed to read a certificate bundle
#[error("failed to read a certificate bundle: {0}")]
ReadCertificateBundle(#[source] std::io::Error),
/// Failed to parse cluster port value
#[error("failed to parse cluster port: {0}")]
ParseClusterPort(#[source] std::num::ParseIntError),
/// Failed to parse cluster url
#[error("failed to parse cluster url: {0}")]
ParseClusterUrl(#[source] http::uri::InvalidUri),
/// Failed to parse PEM-encoded certificates
#[error("failed to parse PEM-encoded certificates: {0}")]
ParseCertificates(#[source] pem::PemError),
}
/// Returns the URI of the Kubernetes API server using the in-cluster DNS name
/// `kubernetes.default.svc`.
pub(super) fn kube_dns() -> http::Uri {
http::Uri::from_static("https://kubernetes.default.svc/")
}
/// Returns the URI of the Kubernetes API server by reading the
/// `KUBERNETES_SERVICE_HOST` and `KUBERNETES_SERVICE_PORT` environment
/// variables.
pub(super) fn try_kube_from_env() -> Result<http::Uri, Error> {
// client-go requires that both environment variables are set.
let host = env::var(SERVICE_HOSTENV).map_err(Error::ReadEnvironmentVariable)?;
let port = env::var(SERVICE_PORTENV)
.map_err(Error::ReadEnvironmentVariable)?
.parse::<u16>()
.map_err(Error::ParseClusterPort)?;
try_uri(&host, port)
}
fn try_uri(host: &str, port: u16) -> Result<http::Uri, Error> {
// Format a host and, if not using 443, a port.
//
// Ensure that IPv6 addresses are properly bracketed.
const HTTPS: &str = "https";
let uri = match host.parse::<std::net::IpAddr>() {
Ok(ip) => {
if port == 443 {
if ip.is_ipv6() {
format!("{HTTPS}://[{ip}]")
} else {
format!("{HTTPS}://{ip}")
}
} else {
let addr = std::net::SocketAddr::new(ip, port);
format!("{HTTPS}://{addr}")
}
}
Err(_) => {
if port == 443 {
format!("{HTTPS}://{host}")
} else {
format!("{HTTPS}://{host}:{port}")
}
}
};
uri.parse().map_err(Error::ParseClusterUrl)
}
pub fn token_file() -> String {
SERVICE_TOKENFILE.to_owned()
}
/// Returns certification from specified path in cluster.
pub fn load_cert() -> Result<Vec<Vec<u8>>, Error> {
let certs = std::fs::read(SERVICE_CERTFILE).map_err(Error::ReadCertificateBundle)?;
super::certs(&certs).map_err(Error::ParseCertificates)
}
/// Returns the default namespace from specified path in cluster.
pub fn load_default_ns() -> Result<String, Error> {
std::fs::read_to_string(SERVICE_DEFAULT_NS).map_err(Error::ReadDefaultNamespace)
}
#[test]
fn test_kube_name() {
assert_eq!(
try_uri("fake.io", 8080).unwrap().to_string(),
"https://fake.io:8080/"
);
}
#[test]
fn test_kube_name_default_port() {
assert_eq!(try_uri("kubernetes.default.svc", 443).unwrap(), kube_dns())
}
#[test]
fn test_kube_ipv4() {
assert_eq!(
try_uri("10.11.12.13", 6443).unwrap().to_string(),
"https://10.11.12.13:6443/"
);
}
#[test]
fn test_kube_ipv4_default_port() {
assert_eq!(
try_uri("10.11.12.13", 443).unwrap().to_string(),
"https://10.11.12.13/"
);
}
#[test]
fn test_kube_ipv6() {
assert_eq!(
try_uri("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 6443)
.unwrap()
.to_string(),
"https://[2001:db8:85a3::8a2e:370:7334]:6443/"
);
}
#[test]
fn test_kube_ipv6_default_port() {
assert_eq!(
try_uri("2001:0db8:85a3:0000:0000:8a2e:0370:7334", 443)
.unwrap()
.to_string(),
"https://[2001:db8:85a3::8a2e:370:7334]/"
);
}

434
vendor/kube-client/src/config/mod.rs vendored Normal file
View File

@@ -0,0 +1,434 @@
//! Kubernetes configuration objects.
//!
//! Reads locally from `$KUBECONFIG` or `~/.kube/config`,
//! and in-cluster from the [pod environment](https://kubernetes.io/docs/tasks/run-application/access-api-from-pod/#accessing-the-api-from-within-a-pod).
//!
//! # Usage
//! The [`Config`] has several constructors plus logic to infer environment.
//!
//! Unless you have issues, prefer using [`Config::infer`], and pass it to a [`Client`][crate::Client].
use std::{path::PathBuf, time::Duration};
use http::{HeaderName, HeaderValue};
use thiserror::Error;
mod file_config;
mod file_loader;
mod incluster_config;
use file_loader::ConfigLoader;
pub use file_loader::KubeConfigOptions;
pub use incluster_config::Error as InClusterError;
/// Failed to infer config
#[derive(Error, Debug)]
#[error("failed to infer config: in-cluster: ({in_cluster}), kubeconfig: ({kubeconfig})")]
pub struct InferConfigError {
in_cluster: InClusterError,
// We can only pick one source, but the kubeconfig failure is more likely to be a user error
#[source]
kubeconfig: KubeconfigError,
}
/// Possible errors when loading kubeconfig
#[derive(Error, Debug)]
pub enum KubeconfigError {
/// Failed to determine current context
#[error("failed to determine current context")]
CurrentContextNotSet,
/// Kubeconfigs with mismatching kind cannot be merged
#[error("kubeconfigs with mismatching kind cannot be merged")]
KindMismatch,
/// Kubeconfigs with mismatching api version cannot be merged
#[error("kubeconfigs with mismatching api version cannot be merged")]
ApiVersionMismatch,
/// Failed to load current context
#[error("failed to load current context: {0}")]
LoadContext(String),
/// Failed to load the cluster of context
#[error("failed to load the cluster of context: {0}")]
LoadClusterOfContext(String),
/// Failed to find the path of kubeconfig
#[error("failed to find the path of kubeconfig")]
FindPath,
/// Failed to read kubeconfig
#[error("failed to read kubeconfig from '{1:?}': {0}")]
ReadConfig(#[source] std::io::Error, PathBuf),
/// Failed to parse kubeconfig YAML
#[error("failed to parse kubeconfig YAML: {0}")]
Parse(#[source] serde_yaml::Error),
/// The structure of the parsed kubeconfig is invalid
#[error("the structure of the parsed kubeconfig is invalid: {0}")]
InvalidStructure(#[source] serde_yaml::Error),
/// Cluster url is missing on selected cluster
#[error("cluster url is missing on selected cluster")]
MissingClusterUrl,
/// Failed to parse cluster url
#[error("failed to parse cluster url: {0}")]
ParseClusterUrl(#[source] http::uri::InvalidUri),
/// Failed to parse proxy url
#[error("failed to parse proxy url: {0}")]
ParseProxyUrl(#[source] http::uri::InvalidUri),
/// Failed to load certificate authority
#[error("failed to load certificate authority")]
LoadCertificateAuthority(#[source] LoadDataError),
/// Failed to load client certificate
#[error("failed to load client certificate")]
LoadClientCertificate(#[source] LoadDataError),
/// Failed to load client key
#[error("failed to load client key")]
LoadClientKey(#[source] LoadDataError),
/// Failed to parse PEM-encoded certificates
#[error("failed to parse PEM-encoded certificates: {0}")]
ParseCertificates(#[source] pem::PemError),
}
/// Errors from loading data from a base64 string or a file
#[derive(Debug, Error)]
pub enum LoadDataError {
/// Failed to decode base64 data
#[error("failed to decode base64 data: {0}")]
DecodeBase64(#[source] base64::DecodeError),
/// Failed to read file
#[error("failed to read file '{1:?}': {0}")]
ReadFile(#[source] std::io::Error, PathBuf),
/// No base64 data or file path was provided
#[error("no base64 data or file")]
NoBase64DataOrFile,
}
/// Configuration object for accessing a Kuernetes cluster
///
/// The configurable parameters for connecting like cluster URL, default namespace, root certificates, and timeouts.
/// Normally created implicitly through [`Config::infer`] or [`Client::try_default`](crate::Client::try_default).
///
/// # Usage
/// Construct a [`Config`] instance by using one of the many constructors.
///
/// Prefer [`Config::infer`] unless you have particular issues, and avoid manually managing
/// the data in this struct unless you have particular needs. It exists to be consumed by the [`Client`][crate::Client].
///
/// If you are looking to parse the kubeconfig found in a user's home directory see [`Kubeconfig`].
#[cfg_attr(docsrs, doc(cfg(feature = "config")))]
#[derive(Debug, Clone)]
pub struct Config {
/// The configured cluster url
pub cluster_url: http::Uri,
/// The configured default namespace
pub default_namespace: String,
/// The configured root certificate
pub root_cert: Option<Vec<Vec<u8>>>,
/// Set the timeout for connecting to the Kubernetes API.
///
/// A value of `None` means no timeout
pub connect_timeout: Option<std::time::Duration>,
/// Set the timeout for the Kubernetes API response.
///
/// A value of `None` means no timeout
pub read_timeout: Option<std::time::Duration>,
/// Set the timeout for the Kubernetes API request.
///
/// A value of `None` means no timeout
pub write_timeout: Option<std::time::Duration>,
/// Whether to accept invalid certificates
pub accept_invalid_certs: bool,
/// Stores information to tell the cluster who you are.
pub auth_info: AuthInfo,
/// Whether to disable compression (would only have an effect when the `gzip` feature is enabled)
pub disable_compression: bool,
/// Optional proxy URL. Proxy support requires the `socks5` feature.
pub proxy_url: Option<http::Uri>,
/// If set, apiserver certificate will be validated to contain this string
///
/// If not set, the `cluster_url` is used instead
pub tls_server_name: Option<String>,
/// Headers to pass with every request.
pub headers: Vec<(HeaderName, HeaderValue)>,
}
impl Config {
/// Construct a new config where only the `cluster_url` is set by the user.
/// and everything else receives a default value.
///
/// Most likely you want to use [`Config::infer`] to infer the config from
/// the environment.
pub fn new(cluster_url: http::Uri) -> Self {
Self {
cluster_url,
default_namespace: String::from("default"),
root_cert: None,
connect_timeout: Some(DEFAULT_CONNECT_TIMEOUT),
read_timeout: Some(DEFAULT_READ_TIMEOUT),
write_timeout: Some(DEFAULT_WRITE_TIMEOUT),
accept_invalid_certs: false,
auth_info: AuthInfo::default(),
disable_compression: false,
proxy_url: None,
tls_server_name: None,
headers: Vec::new(),
}
}
/// Infer a Kubernetes client configuration.
///
/// First, a user's kubeconfig is loaded from `KUBECONFIG` or
/// `~/.kube/config`. If that fails, an in-cluster config is loaded via
/// [`Config::incluster`]. If inference from both sources fails, then an
/// error is returned.
///
/// [`Config::apply_debug_overrides`] is used to augment the loaded
/// configuration based on the environment.
pub async fn infer() -> Result<Self, InferConfigError> {
let mut config = match Self::from_kubeconfig(&KubeConfigOptions::default()).await {
Err(kubeconfig_err) => {
tracing::trace!(
error = &kubeconfig_err as &dyn std::error::Error,
"no local config found, falling back to local in-cluster config"
);
Self::incluster().map_err(|in_cluster| InferConfigError {
in_cluster,
kubeconfig: kubeconfig_err,
})?
}
Ok(success) => success,
};
config.apply_debug_overrides();
Ok(config)
}
/// Load an in-cluster Kubernetes client configuration using
/// [`Config::incluster_env`].
pub fn incluster() -> Result<Self, InClusterError> {
Self::incluster_env()
}
/// Load an in-cluster config using the `KUBERNETES_SERVICE_HOST` and
/// `KUBERNETES_SERVICE_PORT` environment variables.
///
/// A service account's token must be available in
/// `/var/run/secrets/kubernetes.io/serviceaccount/`.
///
/// This method matches the behavior of the official Kubernetes client
/// libraries and is the default for both TLS stacks.
pub fn incluster_env() -> Result<Self, InClusterError> {
let uri = incluster_config::try_kube_from_env()?;
Self::incluster_with_uri(uri)
}
/// Load an in-cluster config using the API server at
/// `https://kubernetes.default.svc`.
///
/// A service account's token must be available in
/// `/var/run/secrets/kubernetes.io/serviceaccount/`.
///
/// This behavior does not match that of the official Kubernetes clients,
/// but can be used as a consistent entrypoint in many clusters.
/// See <https://github.com/kube-rs/kube/issues/1003> for more info.
pub fn incluster_dns() -> Result<Self, InClusterError> {
Self::incluster_with_uri(incluster_config::kube_dns())
}
fn incluster_with_uri(cluster_url: http::uri::Uri) -> Result<Self, InClusterError> {
let default_namespace = incluster_config::load_default_ns()?;
let root_cert = incluster_config::load_cert()?;
Ok(Self {
cluster_url,
default_namespace,
root_cert: Some(root_cert),
connect_timeout: Some(DEFAULT_CONNECT_TIMEOUT),
read_timeout: Some(DEFAULT_READ_TIMEOUT),
write_timeout: Some(DEFAULT_WRITE_TIMEOUT),
accept_invalid_certs: false,
auth_info: AuthInfo {
token_file: Some(incluster_config::token_file()),
..Default::default()
},
disable_compression: false,
proxy_url: None,
tls_server_name: None,
headers: Vec::new(),
})
}
/// Create configuration from the default local config file
///
/// This will respect the `$KUBECONFIG` evar, but otherwise default to `~/.kube/config`.
/// You can also customize what context/cluster/user you want to use here,
/// but it will default to the current-context.
pub async fn from_kubeconfig(options: &KubeConfigOptions) -> Result<Self, KubeconfigError> {
let loader = ConfigLoader::new_from_options(options).await?;
Self::new_from_loader(loader).await
}
/// Create configuration from a [`Kubeconfig`] struct
///
/// This bypasses kube's normal config parsing to obtain custom functionality.
pub async fn from_custom_kubeconfig(
kubeconfig: Kubeconfig,
options: &KubeConfigOptions,
) -> Result<Self, KubeconfigError> {
let loader = ConfigLoader::new_from_kubeconfig(kubeconfig, options).await?;
Self::new_from_loader(loader).await
}
async fn new_from_loader(loader: ConfigLoader) -> Result<Self, KubeconfigError> {
let cluster_url = loader
.cluster
.server
.clone()
.ok_or(KubeconfigError::MissingClusterUrl)?
.parse::<http::Uri>()
.map_err(KubeconfigError::ParseClusterUrl)?;
let default_namespace = loader
.current_context
.namespace
.clone()
.unwrap_or_else(|| String::from("default"));
let accept_invalid_certs = loader.cluster.insecure_skip_tls_verify.unwrap_or(false);
let disable_compression = loader.cluster.disable_compression.unwrap_or(false);
let mut root_cert = None;
if let Some(ca_bundle) = loader.ca_bundle()? {
root_cert = Some(ca_bundle);
}
Ok(Self {
cluster_url,
default_namespace,
root_cert,
connect_timeout: Some(DEFAULT_CONNECT_TIMEOUT),
read_timeout: Some(DEFAULT_READ_TIMEOUT),
write_timeout: Some(DEFAULT_WRITE_TIMEOUT),
accept_invalid_certs,
disable_compression,
proxy_url: loader.proxy_url()?,
auth_info: loader.user,
tls_server_name: loader.cluster.tls_server_name,
headers: Vec::new(),
})
}
/// Override configuration based on environment variables
///
/// This is only intended for use as a debugging aid, and the specific variables and their behaviour
/// should **not** be considered stable across releases.
///
/// Currently, the following overrides are supported:
///
/// - `KUBE_RS_DEBUG_IMPERSONATE_USER`: A Kubernetes user to impersonate, for example: `system:serviceaccount:default:foo` will impersonate the `ServiceAccount` `foo` in the `Namespace` `default`
/// - `KUBE_RS_DEBUG_IMPERSONATE_GROUP`: A Kubernetes group to impersonate, multiple groups may be specified by separating them with commas
/// - `KUBE_RS_DEBUG_OVERRIDE_URL`: A Kubernetes cluster URL to use rather than the one specified in the config, useful for proxying traffic through `kubectl proxy`
pub fn apply_debug_overrides(&mut self) {
// Log these overrides loudly, to emphasize that this is only a debugging aid, and should not be relied upon in production
if let Ok(impersonate_user) = std::env::var("KUBE_RS_DEBUG_IMPERSONATE_USER") {
tracing::warn!(?impersonate_user, "impersonating user");
self.auth_info.impersonate = Some(impersonate_user);
}
if let Ok(impersonate_groups) = std::env::var("KUBE_RS_DEBUG_IMPERSONATE_GROUP") {
let impersonate_groups = impersonate_groups.split(',').map(str::to_string).collect();
tracing::warn!(?impersonate_groups, "impersonating groups");
self.auth_info.impersonate_groups = Some(impersonate_groups);
}
if let Ok(url) = std::env::var("KUBE_RS_DEBUG_OVERRIDE_URL") {
tracing::warn!(?url, "overriding cluster URL");
match url.parse() {
Ok(uri) => {
self.cluster_url = uri;
}
Err(err) => {
tracing::warn!(
?url,
error = &err as &dyn std::error::Error,
"failed to parse override cluster URL, ignoring"
);
}
}
}
}
/// Client certificate and private key in PEM.
pub(crate) fn identity_pem(&self) -> Option<Vec<u8>> {
self.auth_info.identity_pem().ok()
}
}
fn certs(data: &[u8]) -> Result<Vec<Vec<u8>>, pem::PemError> {
Ok(pem::parse_many(data)?
.into_iter()
.filter_map(|p| {
if p.tag() == "CERTIFICATE" {
Some(p.into_contents())
} else {
None
}
})
.collect::<Vec<_>>())
}
// https://github.com/kube-rs/kube/issues/146#issuecomment-590924397
const DEFAULT_CONNECT_TIMEOUT: Duration = Duration::from_secs(30);
const DEFAULT_READ_TIMEOUT: Duration = Duration::from_secs(295);
const DEFAULT_WRITE_TIMEOUT: Duration = Duration::from_secs(295);
// Expose raw config structs
pub use file_config::{
AuthInfo, AuthProviderConfig, Cluster, Context, ExecAuthCluster, ExecConfig, ExecInteractiveMode,
Kubeconfig, NamedAuthInfo, NamedCluster, NamedContext, NamedExtension, Preferences,
};
#[cfg(test)]
mod tests {
#[cfg(not(feature = "client"))] // want to ensure this works without client features
#[tokio::test]
async fn config_loading_on_small_feature_set() {
use super::Config;
let cfgraw = r#"
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: aGVsbG8K
server: https://0.0.0.0:6443
name: k3d-test
contexts:
- context:
cluster: k3d-test
user: admin@k3d-test
name: k3d-test
current-context: k3d-test
kind: Config
preferences: {}
users:
- name: admin@k3d-test
user:
client-certificate-data: aGVsbG8K
client-key-data: aGVsbG8K
"#;
let file = tempfile::NamedTempFile::new().expect("create config tempfile");
std::fs::write(file.path(), cfgraw).unwrap();
std::env::set_var("KUBECONFIG", file.path());
let kubeconfig = Config::infer().await.unwrap();
assert_eq!(kubeconfig.cluster_url, "https://0.0.0.0:6443/");
}
}

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,19 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: aGVsbG8K
server: https://0.0.0.0:6443
name: k3d-promstack
contexts:
- context:
cluster: k3d-promstack
user: admin@k3d-promstack
name: k3d-promstack
users:
- name: admin@k3d-k3s-default
user:
client-certificate-data: aGVsbG8K
client-key-data: aGVsbG8K
current-context: k3d-promstack
kind: Config
preferences: {}

View File

@@ -0,0 +1,397 @@
use super::parse::{self, GroupVersionData};
use crate::{error::DiscoveryError, Client, Error, Result};
use k8s_openapi::apimachinery::pkg::apis::meta::v1::{APIGroup, APIVersions};
pub use kube_core::discovery::{ApiCapabilities, ApiResource};
use kube_core::{
gvk::{GroupVersion, GroupVersionKind, ParseGroupVersionError},
Version,
};
use std::{cmp::Reverse, collections::HashMap, iter::Iterator};
/// Describes one API groups collected resources and capabilities.
///
/// Each `ApiGroup` contains all data pinned to a each version.
/// In particular, one data set within the `ApiGroup` for `"apiregistration.k8s.io"`
/// is the subset pinned to `"v1"`; commonly referred to as `"apiregistration.k8s.io/v1"`.
///
/// If you know the version of the discovered group, you can fetch it directly:
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (apiresource, caps) in apigroup.versioned_resources("v1") {
/// println!("Found ApiResource {}", apiresource.kind);
/// }
/// Ok(())
/// }
/// ```
///
/// But if you do not know this information, you can use [`ApiGroup::preferred_version_or_latest`].
///
/// Whichever way you choose the end result is something describing a resource and its abilities:
/// - `Vec<(ApiResource, `ApiCapabilities)>` :: for all resources in a versioned ApiGroup
/// - `(ApiResource, ApiCapabilities)` :: for a single kind under a versioned ApiGroud
///
/// These two types: [`ApiResource`], and [`ApiCapabilities`]
/// should contain the information needed to construct an [`Api`](crate::Api) and start querying the kubernetes API.
/// You will likely need to use [`DynamicObject`] as the generic type for Api to do this,
/// as well as the [`ApiResource`] for the `DynamicType` for the [`Resource`] trait.
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name_any());
/// }
/// Ok(())
/// }
/// ```
///
/// This type represents an abstraction over the native [`APIGroup`] to provide easier access to underlying group resources.
///
/// ### Common Pitfall
/// Version preference and recommendations shown herein is a **group concept**, not a resource-wide concept.
/// A common mistake is have different stored versions for resources within a group, and then receive confusing results from this module.
/// Resources in a shared group should share versions - and transition together - to minimize confusion.
/// See <https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups-and-versioning> for more info.
///
/// [`ApiResource`]: crate::discovery::ApiResource
/// [`ApiCapabilities`]: crate::discovery::ApiCapabilities
/// [`DynamicObject`]: crate::api::DynamicObject
/// [`Resource`]: crate::Resource
/// [`ApiGroup::preferred_version_or_latest`]: crate::discovery::ApiGroup::preferred_version_or_latest
/// [`ApiGroup::versioned_resources`]: crate::discovery::ApiGroup::versioned_resources
/// [`ApiGroup::recommended_resources`]: crate::discovery::ApiGroup::recommended_resources
/// [`ApiGroup::recommended_kind`]: crate::discovery::ApiGroup::recommended_kind
pub struct ApiGroup {
/// Name of the group e.g. apiregistration.k8s.io
name: String,
/// List of resource information, capabilities at particular versions
data: Vec<GroupVersionData>,
/// Preferred version if exported by the `APIGroup`
preferred: Option<String>,
}
/// Internal queriers to convert from an APIGroup (or APIVersions for core) to our ApiGroup
///
/// These queriers ignore groups with empty versions.
/// This ensures that `ApiGroup::preferred_version_or_latest` always have an answer.
/// On construction, they also sort the internal vec of GroupVersionData according to `Version`.
impl ApiGroup {
pub(crate) async fn query_apis(client: &Client, g: APIGroup) -> Result<Self> {
tracing::debug!(name = g.name.as_str(), "Listing group versions");
let key = g.name;
if g.versions.is_empty() {
return Err(Error::Discovery(DiscoveryError::EmptyApiGroup(key)));
}
let mut data = vec![];
for vers in &g.versions {
let resources = client.list_api_group_resources(&vers.group_version).await?;
data.push(GroupVersionData::new(vers.version.clone(), resources)?);
}
let mut group = ApiGroup {
name: key,
data,
preferred: g.preferred_version.map(|v| v.version),
};
group.sort_versions();
Ok(group)
}
pub(crate) async fn query_core(client: &Client, coreapis: APIVersions) -> Result<Self> {
let mut data = vec![];
let key = ApiGroup::CORE_GROUP.to_string();
if coreapis.versions.is_empty() {
return Err(Error::Discovery(DiscoveryError::EmptyApiGroup(key)));
}
for v in coreapis.versions {
let resources = client.list_core_api_resources(&v).await?;
data.push(GroupVersionData::new(v, resources)?);
}
let mut group = ApiGroup {
name: ApiGroup::CORE_GROUP.to_string(),
data,
preferred: Some("v1".to_string()),
};
group.sort_versions();
Ok(group)
}
fn sort_versions(&mut self) {
self.data
.sort_by_cached_key(|gvd| Reverse(Version::parse(gvd.version.as_str()).priority()))
}
// shortcut method to give cheapest return for a single GVK
pub(crate) async fn query_gvk(
client: &Client,
gvk: &GroupVersionKind,
) -> Result<(ApiResource, ApiCapabilities)> {
let apiver = gvk.api_version();
let list = if gvk.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
for res in &list.resources {
if res.kind == gvk.kind && !res.name.contains('/') {
let ar = parse::parse_apiresource(res, &list.group_version).map_err(
|ParseGroupVersionError(s)| Error::Discovery(DiscoveryError::InvalidGroupVersion(s)),
)?;
let caps = parse::parse_apicapabilities(&list, &res.name)?;
return Ok((ar, caps));
}
}
Err(Error::Discovery(DiscoveryError::MissingKind(format!("{gvk:?}"))))
}
// shortcut method to give cheapest return for a pinned group
pub(crate) async fn query_gv(client: &Client, gv: &GroupVersion) -> Result<Self> {
let apiver = gv.api_version();
let list = if gv.group.is_empty() {
client.list_core_api_resources(&apiver).await?
} else {
client.list_api_group_resources(&apiver).await?
};
let data = GroupVersionData::new(gv.version.clone(), list)?;
let group = ApiGroup {
name: gv.group.clone(),
data: vec![data],
preferred: Some(gv.version.clone()), // you preferred what you asked for
};
Ok(group)
}
}
/// Public ApiGroup interface
impl ApiGroup {
/// Core group name
pub const CORE_GROUP: &'static str = "";
/// Returns the name of this group.
pub fn name(&self) -> &str {
&self.name
}
/// Returns served versions (e.g. `["v1", "v2beta1"]`) of this group.
///
/// This [`Iterator`] is never empty, and returns elements in descending order of [`Version`](kube_core::Version):
/// - Stable versions (with the last being the first)
/// - Beta versions (with the last being the first)
/// - Alpha versions (with the last being the first)
/// - Other versions, alphabetically
pub fn versions(&self) -> impl Iterator<Item = &str> {
self.data.as_slice().iter().map(|gvd| gvd.version.as_str())
}
/// Returns preferred version for working with given group.
///
/// Please note the [ApiGroup Common Pitfall](ApiGroup#common-pitfall).
pub fn preferred_version(&self) -> Option<&str> {
self.preferred.as_deref()
}
/// Returns the preferred version or latest version for working with given group.
///
/// If the server does not recommend a version, we pick the "most stable and most recent" version
/// in accordance with [kubernetes version priority](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#version-priority)
/// via the descending sort order from [`Version`](kube_core::Version).
///
/// Please note the [ApiGroup Common Pitfall](ApiGroup#common-pitfall).
pub fn preferred_version_or_latest(&self) -> &str {
// NB: self.versions is non-empty by construction in ApiGroup
self.preferred
.as_deref()
.unwrap_or_else(|| self.versions().next().unwrap())
}
/// Returns the resources in the group at an arbitrary version string.
///
/// If the group does not support this version, the returned vector is empty.
///
/// If you are looking for the api recommended list of resources, or just on particular kind
/// consider [`ApiGroup::recommended_resources`] or [`ApiGroup::recommended_kind`] instead.
pub fn versioned_resources(&self, ver: &str) -> Vec<(ApiResource, ApiCapabilities)> {
self.data
.iter()
.find(|gvd| gvd.version == ver)
.map(|gvd| gvd.resources.clone())
.unwrap_or_default()
}
/// Returns the recommended (preferred or latest) versioned resources in the group
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery::{self, verbs}, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (ar, caps) in apigroup.recommended_resources() {
/// if !caps.supports_operation(verbs::LIST) {
/// continue;
/// }
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for inst in api.list(&Default::default()).await? {
/// println!("Found {}: {}", ar.kind, inst.name_any());
/// }
/// }
/// Ok(())
/// }
/// ```
///
/// This is equivalent to taking the [`ApiGroup::versioned_resources`] at the [`ApiGroup::preferred_version_or_latest`].
///
/// Please note the [ApiGroup Common Pitfall](ApiGroup#common-pitfall).
pub fn recommended_resources(&self) -> Vec<(ApiResource, ApiCapabilities)> {
let ver = self.preferred_version_or_latest();
self.versioned_resources(ver)
}
/// Returns all resources in the group at their the most stable respective version
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery::{self, verbs}, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// for (ar, caps) in apigroup.resources_by_stability() {
/// if !caps.supports_operation(verbs::LIST) {
/// continue;
/// }
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for inst in api.list(&Default::default()).await? {
/// println!("Found {}: {}", ar.kind, inst.name_any());
/// }
/// }
/// Ok(())
/// }
/// ```
/// See an example in [examples/kubectl.rs](https://github.com/kube-rs/kube/blob/main/examples/kubectl.rs)
pub fn resources_by_stability(&self) -> Vec<(ApiResource, ApiCapabilities)> {
let mut lookup = HashMap::new();
self.data.iter().for_each(|gvd| {
gvd.resources.iter().for_each(|resource| {
lookup
.entry(resource.0.kind.clone())
.or_insert_with(Vec::new)
.push(resource);
})
});
lookup
.into_values()
.map(|mut v| {
v.sort_by_cached_key(|(ar, _)| Reverse(Version::parse(ar.version.as_str()).priority()));
v[0].to_owned()
})
.collect()
}
/// Returns the recommended version of the `kind` in the recommended resources (if found)
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name_any());
/// }
/// Ok(())
/// }
/// ```
///
/// This is equivalent to filtering the [`ApiGroup::versioned_resources`] at [`ApiGroup::preferred_version_or_latest`] against a chosen `kind`.
pub fn recommended_kind(&self, kind: &str) -> Option<(ApiResource, ApiCapabilities)> {
let ver = self.preferred_version_or_latest();
for (ar, caps) in self.versioned_resources(ver) {
if ar.kind == kind {
return Some((ar, caps));
}
}
None
}
}
#[cfg(test)]
mod tests {
use super::*;
use kube_core::discovery::Scope;
#[test]
fn test_resources_by_stability() {
let ac = ApiCapabilities {
scope: Scope::Namespaced,
subresources: vec![],
operations: vec![],
};
let testlowversioncr_v1alpha1 = ApiResource {
group: String::from("kube.rs"),
version: String::from("v1alpha1"),
kind: String::from("TestLowVersionCr"),
api_version: String::from("kube.rs/v1alpha1"),
plural: String::from("testlowversioncrs"),
};
let testcr_v1 = ApiResource {
group: String::from("kube.rs"),
version: String::from("v1"),
kind: String::from("TestCr"),
api_version: String::from("kube.rs/v1"),
plural: String::from("testcrs"),
};
let testcr_v2alpha1 = ApiResource {
group: String::from("kube.rs"),
version: String::from("v2alpha1"),
kind: String::from("TestCr"),
api_version: String::from("kube.rs/v2alpha1"),
plural: String::from("testcrs"),
};
let group = ApiGroup {
name: "kube.rs".to_string(),
data: vec![
GroupVersionData {
version: "v1alpha1".to_string(),
resources: vec![(testlowversioncr_v1alpha1, ac.clone())],
},
GroupVersionData {
version: "v1".to_string(),
resources: vec![(testcr_v1, ac.clone())],
},
GroupVersionData {
version: "v2alpha1".to_string(),
resources: vec![(testcr_v2alpha1, ac)],
},
],
preferred: Some(String::from("v1")),
};
let resources = group.resources_by_stability();
assert!(
resources
.iter()
.any(|(ar, _)| ar.kind == "TestCr" && ar.version == "v1"),
"wrong stable version"
);
assert!(
resources
.iter()
.any(|(ar, _)| ar.kind == "TestLowVersionCr" && ar.version == "v1alpha1"),
"lost low version resource"
);
}
}

170
vendor/kube-client/src/discovery/mod.rs vendored Normal file
View File

@@ -0,0 +1,170 @@
//! High-level utilities for runtime API discovery.
use crate::{Client, Result};
pub use kube_core::discovery::{verbs, ApiCapabilities, ApiResource, Scope};
use kube_core::gvk::GroupVersionKind;
use std::collections::HashMap;
mod apigroup;
pub mod oneshot;
pub use apigroup::ApiGroup;
mod parse;
// re-export one-shots
pub use oneshot::{group, pinned_group, pinned_kind};
/// How the Discovery client decides what api groups to scan
enum DiscoveryMode {
/// Only allow explicitly listed apigroups
Allow(Vec<String>),
/// Allow all apigroups except the ones listed
Block(Vec<String>),
}
impl DiscoveryMode {
fn is_queryable(&self, group: &String) -> bool {
match &self {
Self::Allow(allowed) => allowed.contains(group),
Self::Block(blocked) => !blocked.contains(group),
}
}
}
/// A caching client for running API discovery against the Kubernetes API.
///
/// This simplifies the required querying and type matching, and stores the responses
/// for each discovered api group and exposes helpers to access them.
///
/// The discovery process varies in complexity depending on:
/// - how much you know about the kind(s) and group(s) you are interested in
/// - how many groups you are interested in
///
/// Discovery can be performed on:
/// - all api groups (default)
/// - a subset of api groups (by setting Discovery::filter)
///
/// To make use of discovered apis, extract one or more [`ApiGroup`]s from it,
/// or resolve a precise one using [`Discovery::resolve_gvk`](crate::discovery::Discovery::resolve_gvk).
///
/// If caching of results is __not required__, then a simpler [`oneshot`](crate::discovery::oneshot) discovery system can be used.
///
/// [`ApiGroup`]: crate::discovery::ApiGroup
#[cfg_attr(docsrs, doc(cfg(feature = "client")))]
pub struct Discovery {
client: Client,
groups: HashMap<String, ApiGroup>,
mode: DiscoveryMode,
}
/// Caching discovery interface
///
/// Builds an internal map of its cache
impl Discovery {
/// Construct a caching api discovery client
#[must_use]
pub fn new(client: Client) -> Self {
let groups = HashMap::new();
let mode = DiscoveryMode::Block(vec![]);
Self { client, groups, mode }
}
/// Configure the discovery client to only look for the listed apigroups
#[must_use]
pub fn filter(mut self, allow: &[&str]) -> Self {
self.mode = DiscoveryMode::Allow(allow.iter().map(ToString::to_string).collect());
self
}
/// Configure the discovery client to look for all apigroups except the listed ones
#[must_use]
pub fn exclude(mut self, deny: &[&str]) -> Self {
self.mode = DiscoveryMode::Block(deny.iter().map(ToString::to_string).collect());
self
}
/// Runs or re-runs the configured discovery algorithm and updates/populates the cache
///
/// The cache is empty cleared when this is started. By default, every api group found is checked,
/// causing `N+2` queries to the api server (where `N` is number of api groups).
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery::{Discovery, verbs, Scope}, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::try_default().await?;
/// let discovery = Discovery::new(client.clone()).run().await?;
/// for group in discovery.groups() {
/// for (ar, caps) in group.recommended_resources() {
/// if !caps.supports_operation(verbs::LIST) {
/// continue;
/// }
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// // can now api.list() to emulate kubectl get all --all
/// for obj in api.list(&Default::default()).await? {
/// println!("{} {}: {}", ar.api_version, ar.kind, obj.name_any());
/// }
/// }
/// }
/// Ok(())
/// }
/// ```
/// See a bigger example in [examples/dynamic.api](https://github.com/kube-rs/kube/blob/main/examples/dynamic_api.rs)
pub async fn run(mut self) -> Result<Self> {
self.groups.clear();
let api_groups = self.client.list_api_groups().await?;
// query regular groups + crds under /apis
for g in api_groups.groups {
let key = g.name.clone();
if self.mode.is_queryable(&key) {
let apigroup = ApiGroup::query_apis(&self.client, g).await?;
self.groups.insert(key, apigroup);
}
}
// query core versions under /api
let corekey = ApiGroup::CORE_GROUP.to_string();
if self.mode.is_queryable(&corekey) {
let coreapis = self.client.list_core_api_versions().await?;
let apigroup = ApiGroup::query_core(&self.client, coreapis).await?;
self.groups.insert(corekey, apigroup);
}
Ok(self)
}
}
/// Interface to the Discovery cache
impl Discovery {
/// Returns iterator over all served groups
pub fn groups(&self) -> impl Iterator<Item = &ApiGroup> {
self.groups.values()
}
/// Returns a sorted vector of all served groups
///
/// This vector is in kubectl's normal alphabetical group order
pub fn groups_alphabetical(&self) -> Vec<&ApiGroup> {
let mut values: Vec<_> = self.groups().collect();
// collect to maintain kubectl order of groups
values.sort_by_key(|g| g.name());
values
}
/// Returns the [`ApiGroup`] for a given group if served
pub fn get(&self, group: &str) -> Option<&ApiGroup> {
self.groups.get(group)
}
/// Check if a group is served by the apiserver
pub fn has_group(&self, group: &str) -> bool {
self.groups.contains_key(group)
}
/// Finds an [`ApiResource`] and its [`ApiCapabilities`] after discovery by matching a GVK
///
/// This is for quick extraction after having done a complete discovery.
/// If you are only interested in a single kind, consider [`oneshot::pinned_kind`](crate::discovery::pinned_kind).
pub fn resolve_gvk(&self, gvk: &GroupVersionKind) -> Option<(ApiResource, ApiCapabilities)> {
self.get(&gvk.group)?
.versioned_resources(&gvk.version)
.into_iter()
.find(|res| res.0.kind == gvk.kind)
}
}

View File

@@ -0,0 +1,106 @@
//! single use discovery utils
//!
//! These helpers provides a simpler discovery interface, but do not offer any built-in caching.
//!
//! This can provide specific information for 3 cases:
//! - single kind in a particular group at a pinned version via [`oneshot::pinned_kind`]
//! - all kinds in a group at pinned version: "apiregistration.k8s.io/v1" via [`oneshot::pinned_group`]
//! - all kinds/version combinations in a group: "apiregistration.k8s.io" via [`oneshot::group`]
//!
//! [`oneshot::group`]: crate::discovery::group
//! [`oneshot::pinned_group`]: crate::discovery::pinned_group
//! [`oneshot::pinned_kind`]: crate::discovery::pinned_kind
use super::ApiGroup;
use crate::{error::DiscoveryError, Client, Error, Result};
use kube_core::{
discovery::{ApiCapabilities, ApiResource},
gvk::{GroupVersion, GroupVersionKind},
};
/// Discovers all APIs available under a certain group at all versions
///
/// This is recommended if you work with one group, but do not want to pin the version
/// of the apigroup. You can instead work with a recommended version (preferred or latest).
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::try_default().await?;
/// let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name_any());
/// }
/// Ok(())
/// }
/// ```
pub async fn group(client: &Client, apigroup: &str) -> Result<ApiGroup> {
if apigroup == ApiGroup::CORE_GROUP {
let coreapis = client.list_core_api_versions().await?;
return ApiGroup::query_core(client, coreapis).await;
} else {
let api_groups = client.list_api_groups().await?;
for g in api_groups.groups {
if g.name != apigroup {
continue;
}
return ApiGroup::query_apis(client, g).await;
}
}
Err(Error::Discovery(DiscoveryError::MissingApiGroup(
apigroup.to_string(),
)))
}
/// Discovers all APIs available under a certain group at a pinned version
///
/// This is a cheaper variant of [`oneshot::group`](crate::discovery::oneshot::group) when you know what version you want.
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::try_default().await?;
/// let gv = "apiregistration.k8s.io/v1".parse()?;
/// let apigroup = discovery::pinned_group(&client, &gv).await?;
/// let (ar, caps) = apigroup.recommended_kind("APIService").unwrap();
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name_any());
/// }
/// Ok(())
/// }
/// ```
///
/// While this example only uses a single kind, this type of discovery works best when you need more
/// than a single `kind`.
/// If you only need a single `kind`, [`oneshot::pinned_kind`](crate::discovery::pinned_kind) is the best solution.
pub async fn pinned_group(client: &Client, gv: &GroupVersion) -> Result<ApiGroup> {
ApiGroup::query_gv(client, gv).await
}
/// Single discovery for a single GVK
///
/// This is an optimized function that avoids the unnecessary listing of api groups.
/// It merely requests the api group resources for the specified apigroup, and then resolves the kind.
///
/// ```no_run
/// use kube::{Client, api::{Api, DynamicObject, GroupVersionKind}, discovery, ResourceExt};
/// #[tokio::main]
/// async fn main() -> Result<(), Box<dyn std::error::Error>> {
/// let client = Client::try_default().await?;
/// let gvk = GroupVersionKind::gvk("apiregistration.k8s.io", "v1", "APIService");
/// let (ar, caps) = discovery::pinned_kind(&client, &gvk).await?;
/// let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
/// for service in api.list(&Default::default()).await? {
/// println!("Found APIService: {}", service.name_any());
/// }
/// Ok(())
/// }
/// ```
pub async fn pinned_kind(client: &Client, gvk: &GroupVersionKind) -> Result<(ApiResource, ApiCapabilities)> {
ApiGroup::query_gvk(client, gvk).await
}

View File

@@ -0,0 +1,88 @@
//! Abstractions on top of k8s_openapi::apimachinery::pkg::apis::meta::v1
use crate::{error::DiscoveryError, Error, Result};
use k8s_openapi::apimachinery::pkg::apis::meta::v1::{APIResource, APIResourceList};
use kube_core::{
discovery::{ApiCapabilities, ApiResource, Scope},
gvk::{GroupVersion, ParseGroupVersionError},
};
/// Creates an `ApiResource` from a `meta::v1::APIResource` instance + its groupversion.
///
/// Returns a `DiscoveryError` if the passed group_version cannot be parsed
pub(crate) fn parse_apiresource(
ar: &APIResource,
group_version: &str,
) -> Result<ApiResource, ParseGroupVersionError> {
let gv: GroupVersion = group_version.parse()?;
// NB: not safe to use this with subresources (they don't have api_versions)
Ok(ApiResource {
group: ar.group.clone().unwrap_or_else(|| gv.group.clone()),
version: ar.version.clone().unwrap_or_else(|| gv.version.clone()),
api_version: gv.api_version(),
kind: ar.kind.to_string(),
plural: ar.name.clone(),
})
}
/// Creates `ApiCapabilities` from a `meta::v1::APIResourceList` instance + a name from the list.
///
/// Returns a `DiscoveryError` if the list does not contain resource with passed `name`.
pub(crate) fn parse_apicapabilities(list: &APIResourceList, name: &str) -> Result<ApiCapabilities> {
let ar = list
.resources
.iter()
.find(|r| r.name == name)
.ok_or_else(|| Error::Discovery(DiscoveryError::MissingResource(name.into())))?;
let scope = if ar.namespaced {
Scope::Namespaced
} else {
Scope::Cluster
};
let subresource_name_prefix = format!("{name}/");
let mut subresources = vec![];
for res in &list.resources {
if let Some(subresource_name) = res.name.strip_prefix(&subresource_name_prefix) {
let mut api_resource =
parse_apiresource(res, &list.group_version).map_err(|ParseGroupVersionError(s)| {
Error::Discovery(DiscoveryError::InvalidGroupVersion(s))
})?;
api_resource.plural = subresource_name.to_string();
let caps = parse_apicapabilities(list, &res.name)?; // NB: recursion
subresources.push((api_resource, caps));
}
}
Ok(ApiCapabilities {
scope,
subresources,
operations: ar.verbs.clone(),
})
}
/// Internal resource information and capabilities for a particular ApiGroup at a particular version
pub(crate) struct GroupVersionData {
/// Pinned api version
pub(crate) version: String,
/// Pair of dynamic resource info along with what it supports.
pub(crate) resources: Vec<(ApiResource, ApiCapabilities)>,
}
impl GroupVersionData {
/// Given an APIResourceList, extract all information for a given version
pub(crate) fn new(version: String, list: APIResourceList) -> Result<Self> {
let mut resources = vec![];
for res in &list.resources {
// skip subresources
if res.name.contains('/') {
continue;
}
// NB: these two should be infallible from discovery when k8s api is well-behaved, but..
let ar = parse_apiresource(res, &list.group_version).map_err(|ParseGroupVersionError(s)| {
Error::Discovery(DiscoveryError::InvalidGroupVersion(s))
})?;
let caps = parse_apicapabilities(&list, &res.name)?;
resources.push((ar, caps));
}
Ok(GroupVersionData { version, resources })
}
}

135
vendor/kube-client/src/error.rs vendored Normal file
View File

@@ -0,0 +1,135 @@
//! Error handling and error types
use http::Uri;
use thiserror::Error;
pub use kube_core::ErrorResponse;
/// Possible errors from the [`Client`](crate::Client)
#[cfg_attr(docsrs, doc(cfg(any(feature = "config", feature = "client"))))]
#[derive(Error, Debug)]
pub enum Error {
/// ApiError for when things fail
///
/// This can be parsed into as an error handling fallback.
/// It's also used in `WatchEvent` from watch calls.
///
/// It's quite common to get a `410 Gone` when the `resourceVersion` is too old.
#[error("ApiError: {0} ({0:?})")]
Api(#[source] ErrorResponse),
/// Hyper error
#[cfg(feature = "client")]
#[error("HyperError: {0}")]
HyperError(#[source] hyper::Error),
/// Service error
#[cfg(feature = "client")]
#[error("ServiceError: {0}")]
Service(#[source] tower::BoxError),
/// Returned when the configured proxy uses an unsupported protocol.
#[error("configured proxy {proxy_url:?} uses an unsupported protocol")]
ProxyProtocolUnsupported {
/// The URL of the proxy.
proxy_url: Uri,
},
/// Returned when the configured proxy uses a protocol that requires a Cargo feature that is currently disabled
#[error("configured proxy {proxy_url:?} requires the disabled feature {protocol_feature:?}")]
ProxyProtocolDisabled {
/// The URL of the proxy.
proxy_url: Uri,
/// The Cargo feature that the proxy protocol requires.
protocol_feature: &'static str,
},
/// UTF-8 Error
#[error("UTF-8 Error: {0}")]
FromUtf8(#[source] std::string::FromUtf8Error),
/// Returned when failed to find a newline character within max length.
/// Only returned by `Client::request_events` and this should never happen as
/// the max is `usize::MAX`.
#[error("Error finding newline character")]
LinesCodecMaxLineLengthExceeded,
/// Returned on `std::io::Error` when reading event stream.
#[error("Error reading events stream: {0}")]
ReadEvents(#[source] std::io::Error),
/// Http based error
#[error("HttpError: {0}")]
HttpError(#[source] http::Error),
/// Common error case when requesting parsing into own structs
#[error("Error deserializing response: {0}")]
SerdeError(#[source] serde_json::Error),
/// Failed to build request
#[error("Failed to build request: {0}")]
BuildRequest(#[source] kube_core::request::Error),
/// Failed to infer config
#[error("Failed to infer configuration: {0}")]
InferConfig(#[source] crate::config::InferConfigError),
/// Discovery errors
#[error("Error from discovery: {0}")]
Discovery(#[source] DiscoveryError),
/// Errors from OpenSSL TLS
#[cfg(feature = "openssl-tls")]
#[cfg_attr(docsrs, doc(cfg(feature = "openssl-tls")))]
#[error("openssl tls error: {0}")]
OpensslTls(#[source] crate::client::OpensslTlsError),
/// Errors from Rustls TLS
#[cfg(feature = "rustls-tls")]
#[cfg_attr(docsrs, doc(cfg(feature = "rustls-tls")))]
#[error("rustls tls error: {0}")]
RustlsTls(#[source] crate::client::RustlsTlsError),
/// Missing TLS stacks when TLS is required
#[error("TLS required but no TLS stack selected")]
TlsRequired,
/// Failed to upgrade to a WebSocket connection
#[cfg(feature = "ws")]
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
#[error("failed to upgrade to a WebSocket connection: {0}")]
UpgradeConnection(#[source] crate::client::UpgradeConnectionError),
/// Errors related to client auth
#[cfg(feature = "client")]
#[cfg_attr(docsrs, doc(cfg(feature = "client")))]
#[error("auth error: {0}")]
Auth(#[source] crate::client::AuthError),
/// Error resolving resource reference
#[cfg(feature = "unstable-client")]
#[cfg_attr(docsrs, doc(cfg(feature = "unstable-client")))]
#[error("Reference resolve error: {0}")]
RefResolve(String),
}
#[derive(Error, Debug)]
/// Possible errors when using API [discovery](crate::discovery)
pub enum DiscoveryError {
/// Invalid GroupVersion
#[error("Invalid GroupVersion: {0}")]
InvalidGroupVersion(String),
/// Missing Kind
#[error("Missing Kind: {0}")]
MissingKind(String),
/// Missing ApiGroup
#[error("Missing Api Group: {0}")]
MissingApiGroup(String),
/// MissingResource
#[error("Missing Resource: {0}")]
MissingResource(String),
/// Empty ApiGroup
#[error("Empty Api Group: {0}")]
EmptyApiGroup(String),
}

929
vendor/kube-client/src/lib.rs vendored Normal file
View File

@@ -0,0 +1,929 @@
//! Crate for interacting with the Kubernetes API
//!
//! This crate includes the tools for manipulating Kubernetes resources as
//! well as keeping track of those resources as they change over time
//!
//! # Example
//!
//! The following example will create a [`Pod`](k8s_openapi::api::core::v1::Pod)
//! and then watch for it to become available using a manual [`Api::watch`] call.
//!
//! ```rust,no_run
//! use futures::{StreamExt, TryStreamExt};
//! use kube_client::api::{Api, ResourceExt, ListParams, PatchParams, Patch};
//! use kube_client::Client;
//! use k8s_openapi::api::core::v1::Pod;
//!
//! #[tokio::main]
//! async fn main() -> Result<(), Box<dyn std::error::Error>> {
//! // Read the environment to find config for kube client.
//! // Note that this tries an in-cluster configuration first,
//! // then falls back on a kubeconfig file.
//! let client = Client::try_default().await?;
//!
//! // Interact with pods in the configured namespace with the typed interface from k8s-openapi
//! let pods: Api<Pod> = Api::default_namespaced(client);
//!
//! // Create a Pod (cheating here with json, but it has to validate against the type):
//! let patch: Pod = serde_json::from_value(serde_json::json!({
//! "apiVersion": "v1",
//! "kind": "Pod",
//! "metadata": {
//! "name": "my-pod"
//! },
//! "spec": {
//! "containers": [
//! {
//! "name": "my-container",
//! "image": "myregistry.azurecr.io/hello-world:v1",
//! },
//! ],
//! }
//! }))?;
//!
//! // Apply the Pod via server-side apply
//! let params = PatchParams::apply("myapp");
//! let result = pods.patch("my-pod", &params, &Patch::Apply(&patch)).await?;
//!
//! // List pods in the configured namespace
//! for p in pods.list(&ListParams::default()).await? {
//! println!("found pod {}", p.name_any());
//! }
//!
//! Ok(())
//! }
//! ```
//!
//! For more details, see:
//!
//! - [`Client`](crate::client) for the extensible Kubernetes client
//! - [`Config`](crate::config) for the Kubernetes config abstraction
//! - [`Api`](crate::Api) for the generic api methods available on Kubernetes resources
//! - [k8s-openapi](https://docs.rs/k8s-openapi) for how to create typed kubernetes objects directly
#![cfg_attr(docsrs, feature(doc_cfg))]
// Nightly clippy (0.1.64) considers Drop a side effect, see https://github.com/rust-lang/rust-clippy/issues/9608
#![allow(clippy::unnecessary_lazy_evaluations)]
macro_rules! cfg_client {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "client")))]
#[cfg(feature = "client")]
$item
)*
}
}
macro_rules! cfg_config {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(feature = "config")))]
#[cfg(feature = "config")]
$item
)*
}
}
macro_rules! cfg_error {
($($item:item)*) => {
$(
#[cfg_attr(docsrs, doc(cfg(any(feature = "config", feature = "client"))))]
#[cfg(any(feature = "config", feature = "client"))]
$item
)*
}
}
cfg_client! {
pub mod api;
pub mod discovery;
pub mod client;
#[doc(inline)]
pub use api::Api;
#[doc(inline)]
pub use client::Client;
#[doc(inline)]
pub use discovery::Discovery;
}
cfg_config! {
pub mod config;
#[doc(inline)]
pub use config::Config;
}
cfg_error! {
pub mod error;
#[doc(inline)] pub use error::Error;
/// Convient alias for `Result<T, Error>`
pub type Result<T, E = Error> = std::result::Result<T, E>;
}
pub use crate::core::{CustomResourceExt, Resource, ResourceExt};
/// Re-exports from kube_core
pub use kube_core as core;
// Tests that require a cluster and the complete feature set
// Can be run with `cargo test -p kube-client --lib features=rustls-tls,ws -- --ignored`
#[cfg(all(feature = "client", feature = "config"))]
#[cfg(test)]
#[allow(unused_imports)] // varying test imports depending on feature
mod test {
use crate::{
api::{AttachParams, AttachedProcess},
client::ConfigExt,
Api, Client, Config, ResourceExt,
};
use futures::{AsyncBufRead, AsyncBufReadExt, StreamExt, TryStreamExt};
use hyper::Uri;
use k8s_openapi::api::core::v1::{EphemeralContainer, Pod, PodSpec};
use kube_core::{
params::{DeleteParams, Patch, PatchParams, PostParams, WatchParams},
response::StatusSummary,
};
use serde_json::json;
use tower::ServiceBuilder;
// hard disabled test atm due to k3d rustls issues: https://github.com/kube-rs/kube/issues?q=is%3Aopen+is%3Aissue+label%3Arustls
#[allow(dead_code)]
// #[tokio::test]
#[ignore = "needs cluster (lists pods)"]
#[cfg(feature = "rustls-tls")]
async fn custom_client_rustls_configuration() -> Result<(), Box<dyn std::error::Error>> {
use hyper_util::rt::TokioExecutor;
let config = Config::infer().await?;
let https = config.rustls_https_connector()?;
let service = ServiceBuilder::new()
.layer(config.base_uri_layer())
.service(hyper_util::client::legacy::Client::builder(TokioExecutor::new()).build(https));
let client = Client::new(service, config.default_namespace);
let pods: Api<Pod> = Api::default_namespaced(client);
pods.list(&Default::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (lists pods)"]
#[cfg(feature = "openssl-tls")]
async fn custom_client_openssl_tls_configuration() -> Result<(), Box<dyn std::error::Error>> {
use hyper_util::rt::TokioExecutor;
let config = Config::infer().await?;
let https = config.openssl_https_connector()?;
let service = ServiceBuilder::new()
.layer(config.base_uri_layer())
.service(hyper_util::client::legacy::Client::builder(TokioExecutor::new()).build(https));
let client = Client::new(service, config.default_namespace);
let pods: Api<Pod> = Api::default_namespaced(client);
pods.list(&Default::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (lists api resources)"]
#[cfg(feature = "client")]
async fn group_discovery_oneshot() -> Result<(), Box<dyn std::error::Error>> {
use crate::{core::DynamicObject, discovery};
let client = Client::try_default().await?;
let apigroup = discovery::group(&client, "apiregistration.k8s.io").await?;
let (ar, _caps) = apigroup.recommended_kind("APIService").unwrap();
let api: Api<DynamicObject> = Api::all_with(client.clone(), &ar);
api.list(&Default::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (will create and edit a pod)"]
async fn pod_can_use_core_apis() -> Result<(), Box<dyn std::error::Error>> {
use kube::api::{DeleteParams, ListParams, Patch, PatchParams, PostParams, WatchEvent};
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::default_namespaced(client);
// create busybox pod that's alive for at most 30s
let p: Pod = serde_json::from_value(json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "busybox-kube1",
"labels": { "app": "kube-rs-test" },
},
"spec": {
"terminationGracePeriodSeconds": 1,
"restartPolicy": "Never",
"containers": [{
"name": "busybox",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "sleep 30"],
}],
}
}))?;
let pp = PostParams::default();
match pods.create(&pp, &p).await {
Ok(o) => assert_eq!(p.name_unchecked(), o.name_unchecked()),
Err(crate::Error::Api(ae)) => assert_eq!(ae.code, 409), // if we failed to clean-up
Err(e) => return Err(e.into()), // any other case if a failure
}
// Manual watch-api for it to become ready
// NB: don't do this; using conditions (see pod_api example) is easier and less error prone
let wp = WatchParams::default()
.fields(&format!("metadata.name={}", "busybox-kube1"))
.timeout(15);
let mut stream = pods.watch(&wp, "0").await?.boxed();
while let Some(ev) = stream.try_next().await? {
// can debug format watch event
let _ = format!("we: {ev:?}");
match ev {
WatchEvent::Modified(o) => {
let s = o.status.as_ref().expect("status exists on pod");
let phase = s.phase.clone().unwrap_or_default();
if phase == "Running" {
break;
}
}
WatchEvent::Error(e) => panic!("watch error: {e}"),
_ => {}
}
}
// Verify we can get it
let mut pod = pods.get("busybox-kube1").await?;
assert_eq!(p.spec.as_ref().unwrap().containers[0].name, "busybox");
// verify replace with explicit resource version
// NB: don't do this; use server side apply
{
assert!(pod.resource_version().is_some());
pod.spec.as_mut().unwrap().active_deadline_seconds = Some(5);
let pp = PostParams::default();
let patched_pod = pods.replace("busybox-kube1", &pp, &pod).await?;
assert_eq!(patched_pod.spec.unwrap().active_deadline_seconds, Some(5));
}
// Delete it
let dp = DeleteParams::default();
pods.delete("busybox-kube1", &dp).await?.map_left(|pdel| {
assert_eq!(pdel.name_unchecked(), "busybox-kube1");
});
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (will create and attach to a pod)"]
#[cfg(feature = "ws")]
async fn pod_can_exec_and_write_to_stdin() -> Result<(), Box<dyn std::error::Error>> {
use crate::api::{DeleteParams, ListParams, Patch, PatchParams, WatchEvent};
use tokio::io::AsyncWriteExt;
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::default_namespaced(client);
// create busybox pod that's alive for at most 30s
let p: Pod = serde_json::from_value(json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "busybox-kube2",
"labels": { "app": "kube-rs-test" },
},
"spec": {
"terminationGracePeriodSeconds": 1,
"restartPolicy": "Never",
"containers": [{
"name": "busybox",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "sleep 30"],
}],
}
}))?;
match pods.create(&Default::default(), &p).await {
Ok(o) => assert_eq!(p.name_unchecked(), o.name_unchecked()),
Err(crate::Error::Api(ae)) => assert_eq!(ae.code, 409), // if we failed to clean-up
Err(e) => return Err(e.into()), // any other case if a failure
}
// Manual watch-api for it to become ready
// NB: don't do this; using conditions (see pod_api example) is easier and less error prone
let wp = WatchParams::default()
.fields(&format!("metadata.name={}", "busybox-kube2"))
.timeout(15);
let mut stream = pods.watch(&wp, "0").await?.boxed();
while let Some(ev) = stream.try_next().await? {
match ev {
WatchEvent::Modified(o) => {
let s = o.status.as_ref().expect("status exists on pod");
let phase = s.phase.clone().unwrap_or_default();
if phase == "Running" {
break;
}
}
WatchEvent::Error(e) => panic!("watch error: {e}"),
_ => {}
}
}
// Verify exec works and we can get the output
{
let mut attached = pods
.exec(
"busybox-kube2",
vec!["sh", "-c", "for i in $(seq 1 3); do echo $i; done"],
&AttachParams::default().stderr(false),
)
.await?;
let stdout = tokio_util::io::ReaderStream::new(attached.stdout().unwrap());
let out = stdout
.filter_map(|r| async { r.ok().and_then(|v| String::from_utf8(v.to_vec()).ok()) })
.collect::<Vec<_>>()
.await
.join("");
attached.join().await.unwrap();
assert_eq!(out.lines().count(), 3);
assert_eq!(out, "1\n2\n3\n");
}
// Verify we read from stdout after stdin is closed.
{
let name = "busybox-kube2";
let command = vec!["sh", "-c", "sleep 2; echo test string 2"];
let ap = AttachParams::default().stdin(true).stderr(false);
// Make a connection so we can determine if the K8s cluster supports stream closing.
let mut req = pods.request.exec(name, command.clone(), &ap)?;
req.extensions_mut().insert("exec");
let stream = pods.client.connect(req).await?;
// This only works is the cluster supports protocol version v5.channel.k8s.io
// Skip for older protocols.
if stream.supports_stream_close() {
let mut attached = pods.exec(name, command, &ap).await?;
let mut stdin_writer = attached.stdin().unwrap();
let mut stdout_stream = tokio_util::io::ReaderStream::new(attached.stdout().unwrap());
stdin_writer.write_all(b"this will be ignored\n").await?;
_ = stdin_writer.shutdown().await;
let next_stdout = stdout_stream.next();
let stdout = String::from_utf8(next_stdout.await.unwrap().unwrap().to_vec()).unwrap();
assert_eq!(stdout, "test string 2\n");
// AttachedProcess resolves with status object.
let status = attached.take_status().unwrap();
if let Some(status) = status.await {
assert_eq!(status.status, Some("Success".to_owned()));
assert_eq!(status.reason, None);
}
}
}
// Verify we can write to Stdin
{
let mut attached = pods
.exec(
"busybox-kube2",
vec!["sh"],
&AttachParams::default().stdin(true).stderr(false),
)
.await?;
let mut stdin_writer = attached.stdin().unwrap();
let mut stdout_stream = tokio_util::io::ReaderStream::new(attached.stdout().unwrap());
let next_stdout = stdout_stream.next();
stdin_writer.write_all(b"echo test string 1\n").await?;
let stdout = String::from_utf8(next_stdout.await.unwrap().unwrap().to_vec()).unwrap();
println!("{stdout}");
assert_eq!(stdout, "test string 1\n");
// AttachedProcess resolves with status object.
// Send `exit 1` to get a failure status.
stdin_writer.write_all(b"exit 1\n").await?;
let status = attached.take_status().unwrap();
if let Some(status) = status.await {
println!("{status:?}");
assert_eq!(status.status, Some("Failure".to_owned()));
assert_eq!(status.reason, Some("NonZeroExitCode".to_owned()));
}
}
// Delete it
let dp = DeleteParams::default();
pods.delete("busybox-kube2", &dp).await?.map_left(|pdel| {
assert_eq!(pdel.name_unchecked(), "busybox-kube2");
});
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (will create and tail logs from a pod)"]
async fn can_get_pod_logs_and_evict() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
api::{DeleteParams, EvictParams, ListParams, Patch, PatchParams, WatchEvent},
core::subresource::LogParams,
};
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::default_namespaced(client);
// create busybox pod that's alive for at most 30s
let p: Pod = serde_json::from_value(json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "busybox-kube3",
"labels": { "app": "kube-rs-test" },
},
"spec": {
"terminationGracePeriodSeconds": 1,
"restartPolicy": "Never",
"containers": [{
"name": "busybox",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "for i in $(seq 1 5); do echo kube $i; sleep 0.1; done"],
}],
}
}))?;
match pods.create(&Default::default(), &p).await {
Ok(o) => assert_eq!(p.name_unchecked(), o.name_unchecked()),
Err(crate::Error::Api(ae)) => assert_eq!(ae.code, 409), // if we failed to clean-up
Err(e) => return Err(e.into()), // any other case if a failure
}
// Manual watch-api for it to become ready
// NB: don't do this; using conditions (see pod_api example) is easier and less error prone
let wp = WatchParams::default()
.fields(&format!("metadata.name={}", "busybox-kube3"))
.timeout(15);
let mut stream = pods.watch(&wp, "0").await?.boxed();
while let Some(ev) = stream.try_next().await? {
match ev {
WatchEvent::Modified(o) => {
let s = o.status.as_ref().expect("status exists on pod");
let phase = s.phase.clone().unwrap_or_default();
if phase == "Running" {
break;
}
}
WatchEvent::Error(e) => panic!("watch error: {e}"),
_ => {}
}
}
// Get current list of logs
let lp = LogParams {
follow: true,
..LogParams::default()
};
let mut logs_stream = pods.log_stream("busybox-kube3", &lp).await?.lines();
// wait for container to finish
tokio::time::sleep(std::time::Duration::from_secs(2)).await;
let all_logs = pods.logs("busybox-kube3", &Default::default()).await?;
assert_eq!(all_logs, "kube 1\nkube 2\nkube 3\nkube 4\nkube 5\n");
// individual logs may or may not buffer
let mut output = vec![];
while let Some(line) = logs_stream.try_next().await? {
output.push(line);
}
assert_eq!(output, vec!["kube 1", "kube 2", "kube 3", "kube 4", "kube 5"]);
// evict the pod
let ep = EvictParams::default();
let eres = pods.evict("busybox-kube3", &ep).await?;
assert_eq!(eres.code, 201); // created
assert!(eres.is_success());
Ok(())
}
#[tokio::test]
#[ignore = "requires a cluster"]
async fn can_operate_on_pod_metadata() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
api::{DeleteParams, EvictParams, ListParams, Patch, PatchParams, WatchEvent},
core::subresource::LogParams,
};
use kube_core::{ObjectList, ObjectMeta, PartialObjectMeta, PartialObjectMetaExt};
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::default_namespaced(client);
// create busybox pod that's alive for at most 30s
let p: Pod = serde_json::from_value(json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "busybox-kube-meta",
"labels": { "app": "kube-rs-test" },
},
"spec": {
"terminationGracePeriodSeconds": 1,
"restartPolicy": "Never",
"containers": [{
"name": "busybox",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "sleep 30s"],
}],
}
}))?;
match pods.create(&Default::default(), &p).await {
Ok(o) => assert_eq!(p.name_unchecked(), o.name_unchecked()),
Err(crate::Error::Api(ae)) => assert_eq!(ae.code, 409), // if we failed to clean-up
Err(e) => return Err(e.into()), // any other case if a failure
}
// Test we can get a pod as a PartialObjectMeta and convert to
// ObjectMeta
let pod_metadata = pods.get_metadata("busybox-kube-meta").await?;
assert_eq!("busybox-kube-meta", pod_metadata.name_any());
assert_eq!(
Some((&"app".to_string(), &"kube-rs-test".to_string())),
pod_metadata.labels().get_key_value("app")
);
// Test we can get a list of PartialObjectMeta for pods
let p_list = pods.list_metadata(&ListParams::default()).await?;
// Find only pod we are concerned with in this test and fail eagerly if
// name doesn't exist
let pod_metadata = p_list
.items
.into_iter()
.find(|p| p.name_any() == "busybox-kube-meta")
.unwrap();
assert_eq!(
pod_metadata.labels().get("app"),
Some(&"kube-rs-test".to_string())
);
// Attempt to patch pod metadata
let patch = ObjectMeta {
annotations: Some([("test".to_string(), "123".to_string())].into()),
..Default::default()
}
.into_request_partial::<Pod>();
let patchparams = PatchParams::default();
let p_patched = pods
.patch_metadata("busybox-kube-meta", &patchparams, &Patch::Merge(&patch))
.await?;
assert_eq!(p_patched.annotations().get("test"), Some(&"123".to_string()));
assert_eq!(p_patched.types.as_ref().unwrap().kind, "PartialObjectMetadata");
assert_eq!(p_patched.types.as_ref().unwrap().api_version, "meta.k8s.io/v1");
// Clean-up
let dp = DeleteParams::default();
pods.delete("busybox-kube-meta", &dp).await?.map_left(|pdel| {
assert_eq!(pdel.name_any(), "busybox-kube-meta");
});
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster (will create a CertificateSigningRequest)"]
async fn csr_can_be_approved() -> Result<(), Box<dyn std::error::Error>> {
use crate::api::PostParams;
use k8s_openapi::api::certificates::v1::{
CertificateSigningRequest, CertificateSigningRequestCondition, CertificateSigningRequestStatus,
};
let csr_name = "fake";
let dummy_csr: CertificateSigningRequest = serde_json::from_value(json!({
"apiVersion": "certificates.k8s.io/v1",
"kind": "CertificateSigningRequest",
"metadata": { "name": csr_name },
"spec": {
"request": "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1ZqQ0NBVDRDQVFBd0VURVBNQTBHQTFVRUF3d0dZVzVuWld4aE1JSUJJakFOQmdrcWhraUc5dzBCQVFFRgpBQU9DQVE4QU1JSUJDZ0tDQVFFQTByczhJTHRHdTYxakx2dHhWTTJSVlRWMDNHWlJTWWw0dWluVWo4RElaWjBOCnR2MUZtRVFSd3VoaUZsOFEzcWl0Qm0wMUFSMkNJVXBGd2ZzSjZ4MXF3ckJzVkhZbGlBNVhwRVpZM3ExcGswSDQKM3Z3aGJlK1o2MVNrVHF5SVBYUUwrTWM5T1Nsbm0xb0R2N0NtSkZNMUlMRVI3QTVGZnZKOEdFRjJ6dHBoaUlFMwpub1dtdHNZb3JuT2wzc2lHQ2ZGZzR4Zmd4eW8ybmlneFNVekl1bXNnVm9PM2ttT0x1RVF6cXpkakJ3TFJXbWlECklmMXBMWnoyalVnald4UkhCM1gyWnVVV1d1T09PZnpXM01LaE8ybHEvZi9DdS8wYk83c0x0MCt3U2ZMSU91TFcKcW90blZtRmxMMytqTy82WDNDKzBERHk5aUtwbXJjVDBnWGZLemE1dHJRSURBUUFCb0FBd0RRWUpLb1pJaHZjTgpBUUVMQlFBRGdnRUJBR05WdmVIOGR4ZzNvK21VeVRkbmFjVmQ1N24zSkExdnZEU1JWREkyQTZ1eXN3ZFp1L1BVCkkwZXpZWFV0RVNnSk1IRmQycVVNMjNuNVJsSXJ3R0xuUXFISUh5VStWWHhsdnZsRnpNOVpEWllSTmU3QlJvYXgKQVlEdUI5STZXT3FYbkFvczFqRmxNUG5NbFpqdU5kSGxpT1BjTU1oNndLaTZzZFhpVStHYTJ2RUVLY01jSVUyRgpvU2djUWdMYTk0aEpacGk3ZnNMdm1OQUxoT045UHdNMGM1dVJVejV4T0dGMUtCbWRSeEgvbUNOS2JKYjFRQm1HCkkwYitEUEdaTktXTU0xMzhIQXdoV0tkNjVoVHdYOWl4V3ZHMkh4TG1WQzg0L1BHT0tWQW9FNkpsYWFHdTlQVmkKdjlOSjVaZlZrcXdCd0hKbzZXdk9xVlA3SVFjZmg3d0drWm89Ci0tLS0tRU5EIENFUlRJRklDQVRFIFJFUVVFU1QtLS0tLQo=",
"signerName": "kubernetes.io/kube-apiserver-client",
"expirationSeconds": 86400,
"usages": ["client auth"]
}
}))?;
let client = Client::try_default().await?;
let csr: Api<CertificateSigningRequest> = Api::all(client.clone());
assert!(csr.create(&PostParams::default(), &dummy_csr).await.is_ok());
// Patch the approval and approve the CSR
let approval_type = "ApprovedFake";
let csr_status: CertificateSigningRequestStatus = CertificateSigningRequestStatus {
certificate: None,
conditions: Some(vec![CertificateSigningRequestCondition {
type_: approval_type.to_string(),
last_update_time: None,
last_transition_time: None,
message: Some(format!("{} {}", approval_type, "by kube-rs client")),
reason: Some("kube-rsClient".to_string()),
status: "True".to_string(),
}]),
};
let csr_status_patch = Patch::Merge(serde_json::json!({ "status": csr_status }));
let _ = csr
.patch_approval(csr_name, &Default::default(), &csr_status_patch)
.await?;
let csr_after_approval = csr.get_approval(csr_name).await?;
assert_eq!(
csr_after_approval
.status
.as_ref()
.unwrap()
.conditions
.as_ref()
.unwrap()[0]
.type_,
approval_type.to_string()
);
csr.delete(csr_name, &DeleteParams::default()).await?;
Ok(())
}
#[tokio::test]
#[ignore = "needs cluster for ephemeral containers operations"]
async fn can_operate_on_ephemeral_containers() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::try_default().await?;
// Ephemeral containers were stabilized in Kubernetes v1.25.
// This test therefore exits early if the current cluster version is older than v1.25.
let api_version = client.apiserver_version().await?;
if api_version.major.parse::<i32>()? < 1 || api_version.minor.parse::<i32>()? < 25 {
return Ok(());
}
let pod: Pod = serde_json::from_value(serde_json::json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "ephemeral-container-test",
"labels": { "app": "kube-rs-test" },
},
"spec": {
"restartPolicy": "Never",
"containers": [{
"name": "busybox",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "sleep 2"],
}],
}
}))?;
let pod_name = pod.name_any();
let pods = Api::<Pod>::default_namespaced(client);
// If cleanup failed and a pod already exists, we attempt to remove it
// before proceeding. This is important as ephemeral containers can't
// be removed from a Pod's spec. Therefore this test must start with a fresh
// Pod every time.
let _ = pods
.delete(&pod.name_any(), &DeleteParams::default())
.await
.map(|v| v.map_left(|pdel| assert_eq!(pdel.name_any(), pod.name_any())));
// Ephemeral containes can only be applied to a running pod, so one must
// be created before any operations are tested.
match pods.create(&Default::default(), &pod).await {
Ok(o) => assert_eq!(pod.name_unchecked(), o.name_unchecked()),
Err(e) => return Err(e.into()), // any other case if a failure
}
let current_ephemeral_containers = pods
.get_ephemeral_containers(&pod.name_any())
.await?
.spec
.unwrap()
.ephemeral_containers;
// We expect no ephemeral containers initially, get_ephemeral_containers should
// reflect that.
assert_eq!(current_ephemeral_containers, None);
let mut busybox_eph: EphemeralContainer = serde_json::from_value(json!(
{
"name": "myephemeralcontainer1",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "sleep 2"],
}
))?;
// Attempt to replace ephemeral containers.
let patch: Pod = serde_json::from_value(json!({
"metadata": { "name": pod_name },
"spec":{ "ephemeralContainers": [ busybox_eph ] }
}))?;
let current_containers = pods
.replace_ephemeral_containers(&pod_name, &PostParams::default(), &patch)
.await?
.spec
.unwrap()
.ephemeral_containers
.expect("could find ephemeral container");
// Note that we can't compare the whole ephemeral containers object, as some fields
// are set by the cluster. We therefore compare the fields specified in the patch.
assert_eq!(current_containers.len(), 1);
assert_eq!(current_containers[0].name, busybox_eph.name);
assert_eq!(current_containers[0].image, busybox_eph.image);
assert_eq!(current_containers[0].command, busybox_eph.command);
// Attempt to patch ephemeral containers.
// The new ephemeral container will have different values from the
// first to ensure we can test for its presence.
busybox_eph = serde_json::from_value(json!(
{
"name": "myephemeralcontainer2",
"image": "busybox:1.35.0",
"command": ["sh", "-c", "sleep 1"],
}
))?;
let patch: Pod =
serde_json::from_value(json!({ "spec": { "ephemeralContainers": [ busybox_eph ] }}))?;
let current_containers = pods
.patch_ephemeral_containers(&pod_name, &PatchParams::default(), &Patch::Strategic(patch))
.await?
.spec
.unwrap()
.ephemeral_containers
.expect("could find ephemeral container");
// There should only be 2 ephemeral containers at this point,
// one from each patch
assert_eq!(current_containers.len(), 2);
let new_container = current_containers
.iter()
.find(|c| c.name == busybox_eph.name)
.expect("could find myephemeralcontainer2");
// Note that we can't compare the whole ephemeral container object, as some fields
// get set in the cluster. We therefore compare the fields specified in the patch.
assert_eq!(new_container.image, busybox_eph.image);
assert_eq!(new_container.command, busybox_eph.command);
// Attempt to get ephemeral containers.
let expected_containers = current_containers;
let current_containers = pods
.get_ephemeral_containers(&pod.name_any())
.await?
.spec
.unwrap()
.ephemeral_containers
.unwrap();
assert_eq!(current_containers, expected_containers);
pods.delete(&pod.name_any(), &DeleteParams::default())
.await?
.map_left(|pdel| {
assert_eq!(pdel.name_any(), pod.name_any());
});
Ok(())
}
#[tokio::test]
#[ignore = "needs kubelet debug methods"]
#[cfg(feature = "kubelet-debug")]
async fn pod_can_exec_and_write_to_stdin_from_node_proxy() -> Result<(), Box<dyn std::error::Error>> {
use crate::{
api::{DeleteParams, ListParams, Patch, PatchParams, WatchEvent},
core::kubelet_debug::KubeletDebugParams,
};
let client = Client::try_default().await?;
let pods: Api<Pod> = Api::default_namespaced(client);
// create busybox pod that's alive for at most 30s
let p: Pod = serde_json::from_value(json!({
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": "busybox-kube2",
"labels": { "app": "kube-rs-test" },
},
"spec": {
"terminationGracePeriodSeconds": 1,
"restartPolicy": "Never",
"containers": [{
"name": "busybox",
"image": "busybox:1.34.1",
"command": ["sh", "-c", "sleep 30"],
}],
}
}))?;
match pods.create(&Default::default(), &p).await {
Ok(o) => assert_eq!(p.name_unchecked(), o.name_unchecked()),
Err(crate::Error::Api(ae)) => assert_eq!(ae.code, 409), // if we failed to clean-up
Err(e) => return Err(e.into()), // any other case if a failure
}
// Manual watch-api for it to become ready
// NB: don't do this; using conditions (see pod_api example) is easier and less error prone
let wp = WatchParams::default()
.fields(&format!("metadata.name={}", "busybox-kube2"))
.timeout(15);
let mut stream = pods.watch(&wp, "0").await?.boxed();
while let Some(ev) = stream.try_next().await? {
match ev {
WatchEvent::Modified(o) => {
let s = o.status.as_ref().expect("status exists on pod");
let phase = s.phase.clone().unwrap_or_default();
if phase == "Running" {
break;
}
}
WatchEvent::Error(e) => panic!("watch error: {e}"),
_ => {}
}
}
let mut config = Config::infer().await?;
config.accept_invalid_certs = true;
config.cluster_url = "https://localhost:10250".to_string().parse::<Uri>().unwrap();
let kubelet_client: Client = config.try_into()?;
// Verify exec works and we can get the output
{
let mut attached = kubelet_client
.kubelet_node_exec(
&KubeletDebugParams {
name: "busybox-kube2",
namespace: "default",
..Default::default()
},
"busybox",
vec!["sh", "-c", "for i in $(seq 1 3); do echo $i; done"],
&AttachParams::default().stderr(false),
)
.await?;
let stdout = tokio_util::io::ReaderStream::new(attached.stdout().unwrap());
let out = stdout
.filter_map(|r| async { r.ok().and_then(|v| String::from_utf8(v.to_vec()).ok()) })
.collect::<Vec<_>>()
.await
.join("");
attached.join().await.unwrap();
assert_eq!(out.lines().count(), 3);
assert_eq!(out, "1\n2\n3\n");
}
// Verify we can write to Stdin
{
use tokio::io::AsyncWriteExt;
let mut attached = kubelet_client
.kubelet_node_exec(
&KubeletDebugParams {
name: "busybox-kube2",
namespace: "default",
..Default::default()
},
"busybox",
vec!["sh"],
&AttachParams::default().stdin(true).stderr(false),
)
.await?;
let mut stdin_writer = attached.stdin().unwrap();
let mut stdout_stream = tokio_util::io::ReaderStream::new(attached.stdout().unwrap());
let next_stdout = stdout_stream.next();
stdin_writer.write_all(b"echo test string 1\n").await?;
let stdout = String::from_utf8(next_stdout.await.unwrap().unwrap().to_vec()).unwrap();
println!("{stdout}");
assert_eq!(stdout, "test string 1\n");
// AttachedProcess resolves with status object.
// Send `exit 1` to get a failure status.
stdin_writer.write_all(b"exit 1\n").await?;
let status = attached.take_status().unwrap();
if let Some(status) = status.await {
println!("{status:?}");
assert_eq!(status.status, Some("Failure".to_owned()));
assert_eq!(status.reason, Some("NonZeroExitCode".to_owned()));
}
}
// Delete it
let dp = DeleteParams::default();
pods.delete("busybox-kube2", &dp).await?.map_left(|pdel| {
assert_eq!(pdel.name_unchecked(), "busybox-kube2");
});
Ok(())
}
}

Binary file not shown.

File diff suppressed because one or more lines are too long