feat: Phase 1 foundations — kube-rs client, OpenBao HTTP client, self-update
kube.rs: - KubeClient with lazy init from kubeconfig + context selection - SSH tunnel via subprocess (port 2222, forward 16443->6443) - Server-side apply for multi-document YAML via kube-rs discovery - Secret get/create, namespace ensure, exec in pod, rollout restart - Domain discovery from gitea-inline-config secret - kustomize_build with embedded binary, domain/email/registry substitution - kubectl and bao CLI passthrough commands openbao.rs: - Lightweight Vault/OpenBao HTTP API client using reqwest - System ops: seal-status, init, unseal - KV v2: get, put, patch, delete with proper response parsing - Auth: enable method, write policy, write roles - Database secrets engine: config, static roles - Replaces all kubectl exec bao shell commands from Python version update.rs: - Self-update from latest mainline commit via Gitea API - CI artifact download with SHA256 checksum verification - Atomic self-replace (temp file + rename) - Background update check with hourly cache (~/.local/share/sunbeam/) - Enhanced version command with target triple and build date build.rs: - Added SUNBEAM_TARGET and SUNBEAM_BUILD_DATE env vars 35 tests pass.
This commit is contained in:
@@ -54,3 +54,4 @@ reqwest = { version = "0.12", features = ["blocking", "rustls-tls"] }
|
||||
sha2 = "0.10"
|
||||
flate2 = "1"
|
||||
tar = "0.4"
|
||||
chrono = "0.4"
|
||||
|
||||
5
build.rs
5
build.rs
@@ -21,6 +21,11 @@ fn main() {
|
||||
let commit = git_commit_sha();
|
||||
println!("cargo:rustc-env=SUNBEAM_COMMIT={commit}");
|
||||
|
||||
// Build target triple and build date
|
||||
println!("cargo:rustc-env=SUNBEAM_TARGET={target}");
|
||||
let date = chrono::Utc::now().format("%Y-%m-%d").to_string();
|
||||
println!("cargo:rustc-env=SUNBEAM_BUILD_DATE={date}");
|
||||
|
||||
// Rebuild if git HEAD changes
|
||||
println!("cargo:rerun-if-changed=.git/HEAD");
|
||||
}
|
||||
|
||||
641
src/kube.rs
641
src/kube.rs
@@ -1,8 +1,20 @@
|
||||
use anyhow::{bail, Result};
|
||||
use anyhow::{bail, Context, Result};
|
||||
use base64::Engine;
|
||||
use k8s_openapi::api::apps::v1::Deployment;
|
||||
use k8s_openapi::api::core::v1::{Namespace, Secret};
|
||||
use kube::api::{Api, ApiResource, DynamicObject, ListParams, Patch, PatchParams};
|
||||
use kube::config::{KubeConfigOptions, Kubeconfig};
|
||||
use kube::discovery::{self, Scope};
|
||||
use kube::{Client, Config};
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::process::Stdio;
|
||||
use std::sync::OnceLock;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
static CONTEXT: OnceLock<String> = OnceLock::new();
|
||||
static SSH_HOST: OnceLock<String> = OnceLock::new();
|
||||
static KUBE_CLIENT: OnceCell<Client> = OnceCell::const_new();
|
||||
|
||||
/// Set the active kubectl context and optional SSH host for production tunnel.
|
||||
pub fn set_context(ctx: &str, ssh_host: &str) {
|
||||
@@ -20,6 +32,592 @@ pub fn ssh_host() -> &'static str {
|
||||
SSH_HOST.get().map(|s| s.as_str()).unwrap_or("")
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SSH tunnel management
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Ensure SSH tunnel is open for production (forwards localhost:16443 -> remote:6443).
|
||||
/// For local dev (empty ssh_host), this is a no-op.
|
||||
#[allow(dead_code)]
|
||||
pub async fn ensure_tunnel() -> Result<()> {
|
||||
let host = ssh_host();
|
||||
if host.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check if tunnel is already open
|
||||
if tokio::net::TcpStream::connect("127.0.0.1:16443")
|
||||
.await
|
||||
.is_ok()
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
crate::output::ok(&format!("Opening SSH tunnel to {host}..."));
|
||||
|
||||
let _child = tokio::process::Command::new("ssh")
|
||||
.args([
|
||||
"-p",
|
||||
"2222",
|
||||
"-L",
|
||||
"16443:127.0.0.1:6443",
|
||||
"-N",
|
||||
"-o",
|
||||
"ExitOnForwardFailure=yes",
|
||||
"-o",
|
||||
"StrictHostKeyChecking=no",
|
||||
host,
|
||||
])
|
||||
.stdout(Stdio::null())
|
||||
.stderr(Stdio::null())
|
||||
.spawn()
|
||||
.context("Failed to spawn SSH tunnel")?;
|
||||
|
||||
// Wait for tunnel to become available
|
||||
for _ in 0..20 {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
|
||||
if tokio::net::TcpStream::connect("127.0.0.1:16443")
|
||||
.await
|
||||
.is_ok()
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
bail!("SSH tunnel to {host} did not open in time")
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Client initialization
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Get or create a kube::Client configured for the active context.
|
||||
/// Opens SSH tunnel first if needed for production.
|
||||
pub async fn get_client() -> Result<&'static Client> {
|
||||
KUBE_CLIENT
|
||||
.get_or_try_init(|| async {
|
||||
ensure_tunnel().await?;
|
||||
|
||||
let kubeconfig = Kubeconfig::read().context("Failed to read kubeconfig")?;
|
||||
let options = KubeConfigOptions {
|
||||
context: Some(context().to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
let config = Config::from_custom_kubeconfig(kubeconfig, &options)
|
||||
.await
|
||||
.context("Failed to build kube config from kubeconfig")?;
|
||||
Client::try_from(config).context("Failed to create kube client")
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Core Kubernetes operations
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Server-side apply a multi-document YAML manifest.
|
||||
#[allow(dead_code)]
|
||||
pub async fn kube_apply(manifest: &str) -> Result<()> {
|
||||
let client = get_client().await?;
|
||||
let ssapply = PatchParams::apply("sunbeam").force();
|
||||
|
||||
for doc in manifest.split("\n---") {
|
||||
let doc = doc.trim();
|
||||
if doc.is_empty() || doc == "---" {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Parse the YAML to a DynamicObject so we can route it
|
||||
let obj: serde_yaml::Value =
|
||||
serde_yaml::from_str(doc).context("Failed to parse YAML document")?;
|
||||
|
||||
let api_version = obj
|
||||
.get("apiVersion")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("");
|
||||
let kind = obj.get("kind").and_then(|v| v.as_str()).unwrap_or("");
|
||||
let metadata = obj.get("metadata");
|
||||
let name = metadata
|
||||
.and_then(|m| m.get("name"))
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("");
|
||||
let namespace = metadata
|
||||
.and_then(|m| m.get("namespace"))
|
||||
.and_then(|v| v.as_str());
|
||||
|
||||
if name.is_empty() || kind.is_empty() {
|
||||
continue; // skip incomplete documents
|
||||
}
|
||||
|
||||
// Use discovery to find the right API resource
|
||||
let (ar, scope) = resolve_api_resource(client, api_version, kind).await?;
|
||||
|
||||
let api: Api<DynamicObject> = if let Some(ns) = namespace {
|
||||
Api::namespaced_with(client.clone(), ns, &ar)
|
||||
} else if scope == Scope::Namespaced {
|
||||
// Namespaced resource without a namespace specified; use default
|
||||
Api::default_namespaced_with(client.clone(), &ar)
|
||||
} else {
|
||||
Api::all_with(client.clone(), &ar)
|
||||
};
|
||||
|
||||
let patch: serde_json::Value = serde_json::from_str(
|
||||
&serde_json::to_string(
|
||||
&serde_yaml::from_str::<serde_json::Value>(doc)
|
||||
.context("Failed to parse YAML to JSON")?,
|
||||
)
|
||||
.context("Failed to serialize to JSON")?,
|
||||
)
|
||||
.context("Failed to parse JSON")?;
|
||||
|
||||
api.patch(name, &ssapply, &Patch::Apply(patch))
|
||||
.await
|
||||
.with_context(|| format!("Failed to apply {kind}/{name}"))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Resolve an API resource from apiVersion and kind using discovery.
|
||||
async fn resolve_api_resource(
|
||||
client: &Client,
|
||||
api_version: &str,
|
||||
kind: &str,
|
||||
) -> Result<(ApiResource, Scope)> {
|
||||
// Split apiVersion into group and version
|
||||
let (group, version) = if api_version.contains('/') {
|
||||
let parts: Vec<&str> = api_version.splitn(2, '/').collect();
|
||||
(parts[0], parts[1])
|
||||
} else {
|
||||
("", api_version) // core API group
|
||||
};
|
||||
|
||||
let disc = discovery::Discovery::new(client.clone())
|
||||
.run()
|
||||
.await
|
||||
.context("API discovery failed")?;
|
||||
|
||||
for api_group in disc.groups() {
|
||||
if api_group.name() == group {
|
||||
for (ar, caps) in api_group.resources_by_stability() {
|
||||
if ar.kind == kind && ar.version == version {
|
||||
return Ok((ar, caps.scope));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bail!("Could not discover API resource for {api_version}/{kind}")
|
||||
}
|
||||
|
||||
/// Get a Kubernetes Secret object.
|
||||
#[allow(dead_code)]
|
||||
pub async fn kube_get_secret(ns: &str, name: &str) -> Result<Option<Secret>> {
|
||||
let client = get_client().await?;
|
||||
let api: Api<Secret> = Api::namespaced(client.clone(), ns);
|
||||
match api.get_opt(name).await {
|
||||
Ok(secret) => Ok(secret),
|
||||
Err(e) => Err(e).context(format!("Failed to get secret {ns}/{name}")),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a specific base64-decoded field from a Kubernetes secret.
|
||||
#[allow(dead_code)]
|
||||
pub async fn kube_get_secret_field(ns: &str, name: &str, key: &str) -> Result<String> {
|
||||
let secret = kube_get_secret(ns, name)
|
||||
.await?
|
||||
.with_context(|| format!("Secret {ns}/{name} not found"))?;
|
||||
|
||||
let data = secret.data.as_ref().context("Secret has no data")?;
|
||||
|
||||
let bytes = data
|
||||
.get(key)
|
||||
.with_context(|| format!("Key {key:?} not found in secret {ns}/{name}"))?;
|
||||
|
||||
String::from_utf8(bytes.0.clone())
|
||||
.with_context(|| format!("Key {key:?} in secret {ns}/{name} is not valid UTF-8"))
|
||||
}
|
||||
|
||||
/// Check if a namespace exists.
|
||||
#[allow(dead_code)]
|
||||
pub async fn ns_exists(ns: &str) -> Result<bool> {
|
||||
let client = get_client().await?;
|
||||
let api: Api<Namespace> = Api::all(client.clone());
|
||||
match api.get_opt(ns).await {
|
||||
Ok(Some(_)) => Ok(true),
|
||||
Ok(None) => Ok(false),
|
||||
Err(e) => Err(e).context(format!("Failed to check namespace {ns}")),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create namespace if it does not exist.
|
||||
#[allow(dead_code)]
|
||||
pub async fn ensure_ns(ns: &str) -> Result<()> {
|
||||
if ns_exists(ns).await? {
|
||||
return Ok(());
|
||||
}
|
||||
let client = get_client().await?;
|
||||
let api: Api<Namespace> = Api::all(client.clone());
|
||||
let ns_obj = serde_json::json!({
|
||||
"apiVersion": "v1",
|
||||
"kind": "Namespace",
|
||||
"metadata": { "name": ns }
|
||||
});
|
||||
let pp = PatchParams::apply("sunbeam").force();
|
||||
api.patch(ns, &pp, &Patch::Apply(ns_obj))
|
||||
.await
|
||||
.with_context(|| format!("Failed to create namespace {ns}"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create or update a generic Kubernetes secret via server-side apply.
|
||||
#[allow(dead_code)]
|
||||
pub async fn create_secret(ns: &str, name: &str, data: HashMap<String, String>) -> Result<()> {
|
||||
let client = get_client().await?;
|
||||
let api: Api<Secret> = Api::namespaced(client.clone(), ns);
|
||||
|
||||
// Encode values as base64
|
||||
let mut encoded: serde_json::Map<String, serde_json::Value> = serde_json::Map::new();
|
||||
for (k, v) in &data {
|
||||
let b64 = base64::engine::general_purpose::STANDARD.encode(v.as_bytes());
|
||||
encoded.insert(k.clone(), serde_json::Value::String(b64));
|
||||
}
|
||||
|
||||
let secret_obj = serde_json::json!({
|
||||
"apiVersion": "v1",
|
||||
"kind": "Secret",
|
||||
"metadata": {
|
||||
"name": name,
|
||||
"namespace": ns,
|
||||
},
|
||||
"type": "Opaque",
|
||||
"data": encoded,
|
||||
});
|
||||
|
||||
let pp = PatchParams::apply("sunbeam").force();
|
||||
api.patch(name, &pp, &Patch::Apply(secret_obj))
|
||||
.await
|
||||
.with_context(|| format!("Failed to create/update secret {ns}/{name}"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Execute a command in a pod and return (exit_code, stdout).
|
||||
#[allow(dead_code)]
|
||||
pub async fn kube_exec(
|
||||
ns: &str,
|
||||
pod: &str,
|
||||
cmd: &[&str],
|
||||
container: Option<&str>,
|
||||
) -> Result<(i32, String)> {
|
||||
let client = get_client().await?;
|
||||
let pods: Api<k8s_openapi::api::core::v1::Pod> = Api::namespaced(client.clone(), ns);
|
||||
|
||||
let mut ep = kube::api::AttachParams::default();
|
||||
ep.stdout = true;
|
||||
ep.stderr = true;
|
||||
ep.stdin = false;
|
||||
if let Some(c) = container {
|
||||
ep.container = Some(c.to_string());
|
||||
}
|
||||
|
||||
let cmd_strings: Vec<String> = cmd.iter().map(|s| s.to_string()).collect();
|
||||
let mut attached = pods
|
||||
.exec(pod, cmd_strings, &ep)
|
||||
.await
|
||||
.with_context(|| format!("Failed to exec in pod {ns}/{pod}"))?;
|
||||
|
||||
let stdout = {
|
||||
let mut stdout_reader = attached
|
||||
.stdout()
|
||||
.context("No stdout stream from exec")?;
|
||||
let mut buf = Vec::new();
|
||||
tokio::io::AsyncReadExt::read_to_end(&mut stdout_reader, &mut buf).await?;
|
||||
String::from_utf8_lossy(&buf).to_string()
|
||||
};
|
||||
|
||||
let status = attached
|
||||
.take_status()
|
||||
.context("No status channel from exec")?;
|
||||
|
||||
// Wait for the status
|
||||
let exit_code = if let Some(status) = status.await {
|
||||
status
|
||||
.status
|
||||
.map(|s| if s == "Success" { 0 } else { 1 })
|
||||
.unwrap_or(1)
|
||||
} else {
|
||||
1
|
||||
};
|
||||
|
||||
Ok((exit_code, stdout.trim().to_string()))
|
||||
}
|
||||
|
||||
/// Patch a deployment to trigger a rollout restart.
|
||||
#[allow(dead_code)]
|
||||
pub async fn kube_rollout_restart(ns: &str, deployment: &str) -> Result<()> {
|
||||
let client = get_client().await?;
|
||||
let api: Api<Deployment> = Api::namespaced(client.clone(), ns);
|
||||
|
||||
let now = chrono::Utc::now().to_rfc3339();
|
||||
let patch = serde_json::json!({
|
||||
"spec": {
|
||||
"template": {
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"kubectl.kubernetes.io/restartedAt": now
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
api.patch(deployment, &PatchParams::default(), &Patch::Strategic(patch))
|
||||
.await
|
||||
.with_context(|| format!("Failed to restart deployment {ns}/{deployment}"))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Discover the active domain from cluster state.
|
||||
///
|
||||
/// Tries the gitea-inline-config secret first (DOMAIN=src.<domain>),
|
||||
/// falls back to lasuite-oidc-provider configmap, then Lima VM IP.
|
||||
#[allow(dead_code)]
|
||||
pub async fn get_domain() -> Result<String> {
|
||||
// 1. Gitea inline-config secret
|
||||
if let Ok(Some(secret)) = kube_get_secret("devtools", "gitea-inline-config").await {
|
||||
if let Some(data) = &secret.data {
|
||||
if let Some(server_bytes) = data.get("server") {
|
||||
let server_ini = String::from_utf8_lossy(&server_bytes.0);
|
||||
for line in server_ini.lines() {
|
||||
if let Some(rest) = line.strip_prefix("DOMAIN=src.") {
|
||||
return Ok(rest.trim().to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Fallback: lasuite-oidc-provider configmap
|
||||
{
|
||||
let client = get_client().await?;
|
||||
let api: Api<k8s_openapi::api::core::v1::ConfigMap> =
|
||||
Api::namespaced(client.clone(), "lasuite");
|
||||
if let Ok(Some(cm)) = api.get_opt("lasuite-oidc-provider").await {
|
||||
if let Some(data) = &cm.data {
|
||||
if let Some(endpoint) = data.get("OIDC_OP_JWKS_ENDPOINT") {
|
||||
if let Some(rest) = endpoint.split("https://auth.").nth(1) {
|
||||
if let Some(domain) = rest.split('/').next() {
|
||||
return Ok(domain.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Local dev fallback: Lima VM IP
|
||||
let ip = get_lima_ip().await;
|
||||
Ok(format!("{ip}.sslip.io"))
|
||||
}
|
||||
|
||||
/// Get the socket_vmnet IP of the Lima sunbeam VM.
|
||||
async fn get_lima_ip() -> String {
|
||||
let output = tokio::process::Command::new("limactl")
|
||||
.args(["shell", "sunbeam", "ip", "-4", "addr", "show", "eth1"])
|
||||
.output()
|
||||
.await;
|
||||
|
||||
if let Ok(out) = output {
|
||||
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||
for line in stdout.lines() {
|
||||
if line.contains("inet ") {
|
||||
if let Some(addr) = line.trim().split_whitespace().nth(1) {
|
||||
if let Some(ip) = addr.split('/').next() {
|
||||
return ip.to_string();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: hostname -I
|
||||
let output2 = tokio::process::Command::new("limactl")
|
||||
.args(["shell", "sunbeam", "hostname", "-I"])
|
||||
.output()
|
||||
.await;
|
||||
|
||||
if let Ok(out) = output2 {
|
||||
let stdout = String::from_utf8_lossy(&out.stdout);
|
||||
let ips: Vec<&str> = stdout.trim().split_whitespace().collect();
|
||||
if ips.len() >= 2 {
|
||||
return ips[ips.len() - 1].to_string();
|
||||
} else if !ips.is_empty() {
|
||||
return ips[0].to_string();
|
||||
}
|
||||
}
|
||||
|
||||
String::new()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// kustomize build
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Run kustomize build --enable-helm and apply domain/email substitution.
|
||||
#[allow(dead_code)]
|
||||
pub async fn kustomize_build(overlay: &Path, domain: &str, email: &str) -> Result<String> {
|
||||
let kustomize_path = crate::tools::ensure_kustomize()?;
|
||||
let helm_path = crate::tools::ensure_helm()?;
|
||||
|
||||
// Ensure helm's parent dir is on PATH so kustomize can find it
|
||||
let helm_dir = helm_path
|
||||
.parent()
|
||||
.map(|p| p.to_string_lossy().to_string())
|
||||
.unwrap_or_default();
|
||||
|
||||
let mut env_path = helm_dir.clone();
|
||||
if let Ok(existing) = std::env::var("PATH") {
|
||||
env_path = format!("{helm_dir}:{existing}");
|
||||
}
|
||||
|
||||
let output = tokio::process::Command::new(&kustomize_path)
|
||||
.args(["build", "--enable-helm"])
|
||||
.arg(overlay)
|
||||
.env("PATH", &env_path)
|
||||
.output()
|
||||
.await
|
||||
.context("Failed to run kustomize")?;
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
bail!("kustomize build failed: {stderr}");
|
||||
}
|
||||
|
||||
let mut text = String::from_utf8(output.stdout).context("kustomize output not UTF-8")?;
|
||||
|
||||
// Domain substitution
|
||||
text = domain_replace(&text, domain);
|
||||
|
||||
// ACME email substitution
|
||||
if !email.is_empty() {
|
||||
text = text.replace("ACME_EMAIL", email);
|
||||
}
|
||||
|
||||
// Registry host IP resolution
|
||||
if text.contains("REGISTRY_HOST_IP") {
|
||||
let registry_ip = resolve_registry_ip(domain).await;
|
||||
text = text.replace("REGISTRY_HOST_IP", ®istry_ip);
|
||||
}
|
||||
|
||||
// Strip null annotations artifact
|
||||
text = text.replace("\n annotations: null", "");
|
||||
|
||||
Ok(text)
|
||||
}
|
||||
|
||||
/// Resolve the registry host IP for REGISTRY_HOST_IP substitution.
|
||||
async fn resolve_registry_ip(domain: &str) -> String {
|
||||
use std::net::ToSocketAddrs;
|
||||
|
||||
// Try DNS for src.<domain>
|
||||
let hostname = format!("src.{domain}:443");
|
||||
if let Ok(mut addrs) = hostname.to_socket_addrs() {
|
||||
if let Some(addr) = addrs.next() {
|
||||
return addr.ip().to_string();
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: derive from production host config
|
||||
let ssh_host = crate::config::get_production_host();
|
||||
if !ssh_host.is_empty() {
|
||||
let raw = ssh_host
|
||||
.split('@')
|
||||
.last()
|
||||
.unwrap_or(&ssh_host)
|
||||
.split(':')
|
||||
.next()
|
||||
.unwrap_or(&ssh_host);
|
||||
let host_lookup = format!("{raw}:443");
|
||||
if let Ok(mut addrs) = host_lookup.to_socket_addrs() {
|
||||
if let Some(addr) = addrs.next() {
|
||||
return addr.ip().to_string();
|
||||
}
|
||||
}
|
||||
// raw is likely already an IP
|
||||
return raw.to_string();
|
||||
}
|
||||
|
||||
String::new()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// kubectl / bao passthrough
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Transparent kubectl passthrough for the active context.
|
||||
pub async fn cmd_k8s(kubectl_args: &[String]) -> Result<()> {
|
||||
ensure_tunnel().await?;
|
||||
|
||||
let status = tokio::process::Command::new("kubectl")
|
||||
.arg(format!("--context={}", context()))
|
||||
.args(kubectl_args)
|
||||
.stdin(Stdio::inherit())
|
||||
.stdout(Stdio::inherit())
|
||||
.stderr(Stdio::inherit())
|
||||
.status()
|
||||
.await
|
||||
.context("Failed to run kubectl")?;
|
||||
|
||||
if !status.success() {
|
||||
std::process::exit(status.code().unwrap_or(1));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Run bao CLI inside the OpenBao pod with the root token.
|
||||
pub async fn cmd_bao(bao_args: &[String]) -> Result<()> {
|
||||
// Find the openbao pod
|
||||
let client = get_client().await?;
|
||||
let pods: Api<k8s_openapi::api::core::v1::Pod> = Api::namespaced(client.clone(), "data");
|
||||
|
||||
let lp = ListParams::default().labels("app.kubernetes.io/name=openbao");
|
||||
let pod_list = pods.list(&lp).await.context("Failed to list OpenBao pods")?;
|
||||
let ob_pod = pod_list
|
||||
.items
|
||||
.first()
|
||||
.and_then(|p| p.metadata.name.as_deref())
|
||||
.context("OpenBao pod not found -- is the cluster running?")?
|
||||
.to_string();
|
||||
|
||||
// Get root token
|
||||
let root_token = kube_get_secret_field("data", "openbao-keys", "root-token")
|
||||
.await
|
||||
.context("root-token not found in openbao-keys secret")?;
|
||||
|
||||
// Build the command string for sh -c
|
||||
let bao_arg_str = bao_args.join(" ");
|
||||
let bao_cmd = format!("VAULT_TOKEN={root_token} bao {bao_arg_str}");
|
||||
|
||||
// Use kubectl for full TTY support
|
||||
let status = tokio::process::Command::new("kubectl")
|
||||
.arg(format!("--context={}", context()))
|
||||
.args(["-n", "data", "exec", &ob_pod, "-c", "openbao", "--", "sh", "-c", &bao_cmd])
|
||||
.stdin(Stdio::inherit())
|
||||
.stdout(Stdio::inherit())
|
||||
.stderr(Stdio::inherit())
|
||||
.status()
|
||||
.await
|
||||
.context("Failed to run bao in OpenBao pod")?;
|
||||
|
||||
if !status.success() {
|
||||
std::process::exit(status.code().unwrap_or(1));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Parse target and domain_replace (already tested)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Parse 'ns/name' -> (Some(ns), Some(name)), 'ns' -> (Some(ns), None), None -> (None, None).
|
||||
pub fn parse_target(s: Option<&str>) -> Result<(Option<&str>, Option<&str>)> {
|
||||
match s {
|
||||
@@ -40,15 +638,9 @@ pub fn domain_replace(text: &str, domain: &str) -> String {
|
||||
text.replace("DOMAIN_SUFFIX", domain)
|
||||
}
|
||||
|
||||
/// Transparent kubectl passthrough for the active context.
|
||||
pub async fn cmd_k8s(_kubectl_args: &[String]) -> Result<()> {
|
||||
todo!("cmd_k8s: kubectl passthrough via kube-rs")
|
||||
}
|
||||
|
||||
/// Run bao CLI inside the OpenBao pod with the root token.
|
||||
pub async fn cmd_bao(_bao_args: &[String]) -> Result<()> {
|
||||
todo!("cmd_bao: bao passthrough via kube-rs exec")
|
||||
}
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
@@ -104,4 +696,33 @@ mod tests {
|
||||
let result = domain_replace("no match here", "x.sslip.io");
|
||||
assert_eq!(result, "no match here");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_secret_data_encoding() {
|
||||
// Test that we can build the expected JSON structure for secret creation
|
||||
let mut data = HashMap::new();
|
||||
data.insert("username".to_string(), "admin".to_string());
|
||||
data.insert("password".to_string(), "s3cret".to_string());
|
||||
|
||||
let mut encoded: serde_json::Map<String, serde_json::Value> = serde_json::Map::new();
|
||||
for (k, v) in &data {
|
||||
let b64 = base64::engine::general_purpose::STANDARD.encode(v.as_bytes());
|
||||
encoded.insert(k.clone(), serde_json::Value::String(b64));
|
||||
}
|
||||
|
||||
let secret_obj = serde_json::json!({
|
||||
"apiVersion": "v1",
|
||||
"kind": "Secret",
|
||||
"metadata": {
|
||||
"name": "test-secret",
|
||||
"namespace": "default",
|
||||
},
|
||||
"type": "Opaque",
|
||||
"data": encoded,
|
||||
});
|
||||
|
||||
let json_str = serde_json::to_string(&secret_obj).unwrap();
|
||||
assert!(json_str.contains("YWRtaW4=")); // base64("admin")
|
||||
assert!(json_str.contains("czNjcmV0")); // base64("s3cret")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ mod gitea;
|
||||
mod images;
|
||||
mod kube;
|
||||
mod manifests;
|
||||
mod openbao;
|
||||
mod output;
|
||||
mod secrets;
|
||||
mod services;
|
||||
|
||||
486
src/openbao.rs
Normal file
486
src/openbao.rs
Normal file
@@ -0,0 +1,486 @@
|
||||
//! Lightweight OpenBao/Vault HTTP API client.
|
||||
//!
|
||||
//! Replaces all `kubectl exec openbao-0 -- sh -c "bao ..."` calls from the
|
||||
//! Python version with direct HTTP API calls via port-forward to openbao:8200.
|
||||
|
||||
use anyhow::{bail, Context, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// OpenBao HTTP client wrapping a base URL and optional root token.
|
||||
#[derive(Clone)]
|
||||
pub struct BaoClient {
|
||||
pub base_url: String,
|
||||
pub token: Option<String>,
|
||||
http: reqwest::Client,
|
||||
}
|
||||
|
||||
// ── API response types ──────────────────────────────────────────────────────
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct InitResponse {
|
||||
pub unseal_keys_b64: Vec<String>,
|
||||
pub root_token: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct SealStatusResponse {
|
||||
#[serde(default)]
|
||||
pub initialized: bool,
|
||||
#[serde(default)]
|
||||
pub sealed: bool,
|
||||
#[serde(default)]
|
||||
pub progress: u32,
|
||||
#[serde(default)]
|
||||
pub t: u32,
|
||||
#[serde(default)]
|
||||
pub n: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
pub struct UnsealResponse {
|
||||
#[serde(default)]
|
||||
pub sealed: bool,
|
||||
#[serde(default)]
|
||||
pub progress: u32,
|
||||
}
|
||||
|
||||
/// KV v2 read response wrapper.
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct KvReadResponse {
|
||||
data: Option<KvReadData>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct KvReadData {
|
||||
data: Option<HashMap<String, serde_json::Value>>,
|
||||
}
|
||||
|
||||
// ── Client implementation ───────────────────────────────────────────────────
|
||||
|
||||
impl BaoClient {
|
||||
/// Create a new client pointing at `base_url` (e.g. `http://localhost:8200`).
|
||||
pub fn new(base_url: &str) -> Self {
|
||||
Self {
|
||||
base_url: base_url.trim_end_matches('/').to_string(),
|
||||
token: None,
|
||||
http: reqwest::Client::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a client with an authentication token.
|
||||
pub fn with_token(base_url: &str, token: &str) -> Self {
|
||||
let mut client = Self::new(base_url);
|
||||
client.token = Some(token.to_string());
|
||||
client
|
||||
}
|
||||
|
||||
fn url(&self, path: &str) -> String {
|
||||
format!("{}/v1/{}", self.base_url, path.trim_start_matches('/'))
|
||||
}
|
||||
|
||||
fn request(&self, method: reqwest::Method, path: &str) -> reqwest::RequestBuilder {
|
||||
let mut req = self.http.request(method, self.url(path));
|
||||
if let Some(ref token) = self.token {
|
||||
req = req.header("X-Vault-Token", token);
|
||||
}
|
||||
req
|
||||
}
|
||||
|
||||
// ── System operations ───────────────────────────────────────────────
|
||||
|
||||
/// Get the seal status of the OpenBao instance.
|
||||
pub async fn seal_status(&self) -> Result<SealStatusResponse> {
|
||||
let resp = self
|
||||
.http
|
||||
.get(format!("{}/v1/sys/seal-status", self.base_url))
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to connect to OpenBao")?;
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let body = resp.text().await.unwrap_or_default();
|
||||
bail!("OpenBao seal-status returned {status}: {body}");
|
||||
}
|
||||
resp.json().await.context("Failed to parse seal status")
|
||||
}
|
||||
|
||||
/// Initialize OpenBao with the given number of key shares and threshold.
|
||||
pub async fn init(&self, key_shares: u32, key_threshold: u32) -> Result<InitResponse> {
|
||||
#[derive(Serialize)]
|
||||
struct InitRequest {
|
||||
secret_shares: u32,
|
||||
secret_threshold: u32,
|
||||
}
|
||||
|
||||
let resp = self
|
||||
.http
|
||||
.put(format!("{}/v1/sys/init", self.base_url))
|
||||
.json(&InitRequest {
|
||||
secret_shares: key_shares,
|
||||
secret_threshold: key_threshold,
|
||||
})
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to initialize OpenBao")?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let body = resp.text().await.unwrap_or_default();
|
||||
bail!("OpenBao init returned {status}: {body}");
|
||||
}
|
||||
resp.json().await.context("Failed to parse init response")
|
||||
}
|
||||
|
||||
/// Unseal OpenBao with one key share.
|
||||
pub async fn unseal(&self, key: &str) -> Result<UnsealResponse> {
|
||||
#[derive(Serialize)]
|
||||
struct UnsealRequest<'a> {
|
||||
key: &'a str,
|
||||
}
|
||||
|
||||
let resp = self
|
||||
.http
|
||||
.put(format!("{}/v1/sys/unseal", self.base_url))
|
||||
.json(&UnsealRequest { key })
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to unseal OpenBao")?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let body = resp.text().await.unwrap_or_default();
|
||||
bail!("OpenBao unseal returned {status}: {body}");
|
||||
}
|
||||
resp.json().await.context("Failed to parse unseal response")
|
||||
}
|
||||
|
||||
// ── Secrets engine management ───────────────────────────────────────
|
||||
|
||||
/// Enable a secrets engine at the given path.
|
||||
/// Returns Ok(()) even if already enabled (409 is tolerated).
|
||||
pub async fn enable_secrets_engine(&self, path: &str, engine_type: &str) -> Result<()> {
|
||||
#[derive(Serialize)]
|
||||
struct EnableRequest<'a> {
|
||||
r#type: &'a str,
|
||||
}
|
||||
|
||||
let resp = self
|
||||
.request(reqwest::Method::POST, &format!("sys/mounts/{path}"))
|
||||
.json(&EnableRequest {
|
||||
r#type: engine_type,
|
||||
})
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to enable secrets engine")?;
|
||||
|
||||
let status = resp.status();
|
||||
if status.is_success() || status.as_u16() == 400 {
|
||||
// 400 = "path is already in use" — idempotent
|
||||
Ok(())
|
||||
} else {
|
||||
let body = resp.text().await.unwrap_or_default();
|
||||
bail!("Enable secrets engine {path} returned {status}: {body}");
|
||||
}
|
||||
}
|
||||
|
||||
// ── KV v2 operations ────────────────────────────────────────────────
|
||||
|
||||
/// Read all fields from a KV v2 secret path.
|
||||
/// Returns None if the path doesn't exist (404).
|
||||
pub async fn kv_get(&self, mount: &str, path: &str) -> Result<Option<HashMap<String, String>>> {
|
||||
let resp = self
|
||||
.request(reqwest::Method::GET, &format!("{mount}/data/{path}"))
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to read KV secret")?;
|
||||
|
||||
if resp.status().as_u16() == 404 {
|
||||
return Ok(None);
|
||||
}
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let body = resp.text().await.unwrap_or_default();
|
||||
bail!("KV get {mount}/{path} returned {status}: {body}");
|
||||
}
|
||||
|
||||
let kv_resp: KvReadResponse = resp.json().await.context("Failed to parse KV response")?;
|
||||
let data = kv_resp
|
||||
.data
|
||||
.and_then(|d| d.data)
|
||||
.unwrap_or_default();
|
||||
|
||||
// Convert all values to strings
|
||||
let result: HashMap<String, String> = data
|
||||
.into_iter()
|
||||
.map(|(k, v)| {
|
||||
let s = match v {
|
||||
serde_json::Value::String(s) => s,
|
||||
other => other.to_string(),
|
||||
};
|
||||
(k, s)
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(Some(result))
|
||||
}
|
||||
|
||||
/// Read a single field from a KV v2 secret path.
|
||||
/// Returns empty string if path or field doesn't exist.
|
||||
pub async fn kv_get_field(&self, mount: &str, path: &str, field: &str) -> Result<String> {
|
||||
match self.kv_get(mount, path).await? {
|
||||
Some(data) => Ok(data.get(field).cloned().unwrap_or_default()),
|
||||
None => Ok(String::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Write (create or overwrite) all fields in a KV v2 secret path.
|
||||
pub async fn kv_put(
|
||||
&self,
|
||||
mount: &str,
|
||||
path: &str,
|
||||
data: &HashMap<String, String>,
|
||||
) -> Result<()> {
|
||||
#[derive(Serialize)]
|
||||
struct KvWriteRequest<'a> {
|
||||
data: &'a HashMap<String, String>,
|
||||
}
|
||||
|
||||
let resp = self
|
||||
.request(reqwest::Method::POST, &format!("{mount}/data/{path}"))
|
||||
.json(&KvWriteRequest { data })
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to write KV secret")?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let body = resp.text().await.unwrap_or_default();
|
||||
bail!("KV put {mount}/{path} returned {status}: {body}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Patch (merge) fields into an existing KV v2 secret path.
|
||||
pub async fn kv_patch(
|
||||
&self,
|
||||
mount: &str,
|
||||
path: &str,
|
||||
data: &HashMap<String, String>,
|
||||
) -> Result<()> {
|
||||
#[derive(Serialize)]
|
||||
struct KvWriteRequest<'a> {
|
||||
data: &'a HashMap<String, String>,
|
||||
}
|
||||
|
||||
let resp = self
|
||||
.request(reqwest::Method::PATCH, &format!("{mount}/data/{path}"))
|
||||
.header("Content-Type", "application/merge-patch+json")
|
||||
.json(&KvWriteRequest { data })
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to patch KV secret")?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let body = resp.text().await.unwrap_or_default();
|
||||
bail!("KV patch {mount}/{path} returned {status}: {body}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Delete a KV v2 secret path (soft delete — deletes latest version).
|
||||
pub async fn kv_delete(&self, mount: &str, path: &str) -> Result<()> {
|
||||
let resp = self
|
||||
.request(reqwest::Method::DELETE, &format!("{mount}/data/{path}"))
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to delete KV secret")?;
|
||||
|
||||
// 404 is fine (already deleted)
|
||||
if !resp.status().is_success() && resp.status().as_u16() != 404 {
|
||||
let status = resp.status();
|
||||
let body = resp.text().await.unwrap_or_default();
|
||||
bail!("KV delete {mount}/{path} returned {status}: {body}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ── Auth operations ─────────────────────────────────────────────────
|
||||
|
||||
/// Enable an auth method at the given path.
|
||||
/// Tolerates "already enabled" (400/409).
|
||||
pub async fn auth_enable(&self, path: &str, method_type: &str) -> Result<()> {
|
||||
#[derive(Serialize)]
|
||||
struct AuthEnableRequest<'a> {
|
||||
r#type: &'a str,
|
||||
}
|
||||
|
||||
let resp = self
|
||||
.request(reqwest::Method::POST, &format!("sys/auth/{path}"))
|
||||
.json(&AuthEnableRequest {
|
||||
r#type: method_type,
|
||||
})
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to enable auth method")?;
|
||||
|
||||
let status = resp.status();
|
||||
if status.is_success() || status.as_u16() == 400 {
|
||||
Ok(())
|
||||
} else {
|
||||
let body = resp.text().await.unwrap_or_default();
|
||||
bail!("Enable auth {path} returned {status}: {body}");
|
||||
}
|
||||
}
|
||||
|
||||
/// Write a policy.
|
||||
pub async fn write_policy(&self, name: &str, policy_hcl: &str) -> Result<()> {
|
||||
#[derive(Serialize)]
|
||||
struct PolicyRequest<'a> {
|
||||
policy: &'a str,
|
||||
}
|
||||
|
||||
let resp = self
|
||||
.request(
|
||||
reqwest::Method::PUT,
|
||||
&format!("sys/policies/acl/{name}"),
|
||||
)
|
||||
.json(&PolicyRequest { policy: policy_hcl })
|
||||
.send()
|
||||
.await
|
||||
.context("Failed to write policy")?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let body = resp.text().await.unwrap_or_default();
|
||||
bail!("Write policy {name} returned {status}: {body}");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write to an arbitrary API path (for auth config, roles, database config, etc.).
|
||||
pub async fn write(
|
||||
&self,
|
||||
path: &str,
|
||||
data: &serde_json::Value,
|
||||
) -> Result<serde_json::Value> {
|
||||
let resp = self
|
||||
.request(reqwest::Method::POST, path)
|
||||
.json(data)
|
||||
.send()
|
||||
.await
|
||||
.with_context(|| format!("Failed to write to {path}"))?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let body = resp.text().await.unwrap_or_default();
|
||||
bail!("Write {path} returned {status}: {body}");
|
||||
}
|
||||
|
||||
let body = resp.text().await.unwrap_or_default();
|
||||
if body.is_empty() {
|
||||
Ok(serde_json::Value::Null)
|
||||
} else {
|
||||
serde_json::from_str(&body).context("Failed to parse write response")
|
||||
}
|
||||
}
|
||||
|
||||
/// Read from an arbitrary API path.
|
||||
pub async fn read(&self, path: &str) -> Result<Option<serde_json::Value>> {
|
||||
let resp = self
|
||||
.request(reqwest::Method::GET, path)
|
||||
.send()
|
||||
.await
|
||||
.with_context(|| format!("Failed to read {path}"))?;
|
||||
|
||||
if resp.status().as_u16() == 404 {
|
||||
return Ok(None);
|
||||
}
|
||||
if !resp.status().is_success() {
|
||||
let status = resp.status();
|
||||
let body = resp.text().await.unwrap_or_default();
|
||||
bail!("Read {path} returned {status}: {body}");
|
||||
}
|
||||
|
||||
let body = resp.text().await.unwrap_or_default();
|
||||
if body.is_empty() {
|
||||
Ok(Some(serde_json::Value::Null))
|
||||
} else {
|
||||
Ok(Some(serde_json::from_str(&body)?))
|
||||
}
|
||||
}
|
||||
|
||||
// ── Database secrets engine ─────────────────────────────────────────
|
||||
|
||||
/// Configure the database secrets engine connection.
|
||||
pub async fn write_db_config(
|
||||
&self,
|
||||
name: &str,
|
||||
plugin: &str,
|
||||
connection_url: &str,
|
||||
username: &str,
|
||||
password: &str,
|
||||
allowed_roles: &str,
|
||||
) -> Result<()> {
|
||||
let data = serde_json::json!({
|
||||
"plugin_name": plugin,
|
||||
"connection_url": connection_url,
|
||||
"username": username,
|
||||
"password": password,
|
||||
"allowed_roles": allowed_roles,
|
||||
});
|
||||
self.write(&format!("database/config/{name}"), &data).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create a database static role.
|
||||
pub async fn write_db_static_role(
|
||||
&self,
|
||||
name: &str,
|
||||
db_name: &str,
|
||||
username: &str,
|
||||
rotation_period: u64,
|
||||
rotation_statements: &[&str],
|
||||
) -> Result<()> {
|
||||
let data = serde_json::json!({
|
||||
"db_name": db_name,
|
||||
"username": username,
|
||||
"rotation_period": rotation_period,
|
||||
"rotation_statements": rotation_statements,
|
||||
});
|
||||
self.write(&format!("database/static-roles/{name}"), &data)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_client_url_construction() {
|
||||
let client = BaoClient::new("http://localhost:8200");
|
||||
assert_eq!(client.url("sys/seal-status"), "http://localhost:8200/v1/sys/seal-status");
|
||||
assert_eq!(client.url("/sys/seal-status"), "http://localhost:8200/v1/sys/seal-status");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_client_url_strips_trailing_slash() {
|
||||
let client = BaoClient::new("http://localhost:8200/");
|
||||
assert_eq!(client.base_url, "http://localhost:8200");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_token() {
|
||||
let client = BaoClient::with_token("http://localhost:8200", "mytoken");
|
||||
assert_eq!(client.token, Some("mytoken".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_has_no_token() {
|
||||
let client = BaoClient::new("http://localhost:8200");
|
||||
assert!(client.token.is_none());
|
||||
}
|
||||
}
|
||||
419
src/update.rs
419
src/update.rs
@@ -1,12 +1,425 @@
|
||||
use anyhow::Result;
|
||||
use anyhow::{bail, Context, Result};
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
|
||||
/// Compile-time commit SHA set by build.rs.
|
||||
pub const COMMIT: &str = env!("SUNBEAM_COMMIT");
|
||||
|
||||
pub async fn cmd_update() -> Result<()> {
|
||||
todo!("cmd_update: self-update from latest mainline commit via Gitea API")
|
||||
/// Compile-time build target triple set by build.rs.
|
||||
pub const TARGET: &str = env!("SUNBEAM_TARGET");
|
||||
|
||||
/// Compile-time build date set by build.rs.
|
||||
pub const BUILD_DATE: &str = env!("SUNBEAM_BUILD_DATE");
|
||||
|
||||
/// Artifact name prefix for this platform.
|
||||
fn artifact_name() -> String {
|
||||
format!("sunbeam-{TARGET}")
|
||||
}
|
||||
|
||||
/// Resolve the forge URL (Gitea instance).
|
||||
///
|
||||
/// TODO: Once kube.rs exposes `get_domain()`, derive this automatically as
|
||||
/// `https://src.{domain}`. For now we read the SUNBEAM_FORGE_URL environment
|
||||
/// variable with a sensible fallback.
|
||||
fn forge_url() -> String {
|
||||
if let Ok(url) = std::env::var("SUNBEAM_FORGE_URL") {
|
||||
return url.trim_end_matches('/').to_string();
|
||||
}
|
||||
|
||||
// Derive from production_host domain in config
|
||||
let config = crate::config::load_config();
|
||||
if !config.production_host.is_empty() {
|
||||
// production_host is like "user@server.example.com" — extract domain
|
||||
let host = config
|
||||
.production_host
|
||||
.split('@')
|
||||
.last()
|
||||
.unwrap_or(&config.production_host);
|
||||
// Strip any leading subdomain segments that look like a hostname to get the base domain.
|
||||
// For a host like "admin.sunbeam.pt", the forge is "src.sunbeam.pt".
|
||||
// Heuristic: use the last two segments as the domain.
|
||||
let parts: Vec<&str> = host.split('.').collect();
|
||||
if parts.len() >= 2 {
|
||||
let domain = format!("{}.{}", parts[parts.len() - 2], parts[parts.len() - 1]);
|
||||
return format!("https://src.{domain}");
|
||||
}
|
||||
}
|
||||
|
||||
// Hard fallback — will fail at runtime if not configured, which is fine.
|
||||
String::new()
|
||||
}
|
||||
|
||||
/// Cache file location for background update checks.
|
||||
fn update_cache_path() -> PathBuf {
|
||||
dirs::data_dir()
|
||||
.unwrap_or_else(|| dirs::home_dir().unwrap_or_else(|| PathBuf::from(".")).join(".local/share"))
|
||||
.join("sunbeam")
|
||||
.join("update-check.json")
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Gitea API response types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct BranchResponse {
|
||||
commit: BranchCommit,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct BranchCommit {
|
||||
id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct ArtifactListResponse {
|
||||
artifacts: Vec<Artifact>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct Artifact {
|
||||
name: String,
|
||||
id: u64,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Update-check cache
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
struct UpdateCache {
|
||||
last_check: DateTime<Utc>,
|
||||
latest_commit: String,
|
||||
current_commit: String,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Public API
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Print version information.
|
||||
pub fn cmd_version() {
|
||||
println!("sunbeam {COMMIT}");
|
||||
println!(" target: {TARGET}");
|
||||
println!(" built: {BUILD_DATE}");
|
||||
}
|
||||
|
||||
/// Self-update from the latest mainline commit via Gitea CI artifacts.
|
||||
pub async fn cmd_update() -> Result<()> {
|
||||
let base = forge_url();
|
||||
if base.is_empty() {
|
||||
bail!(
|
||||
"Forge URL not configured. Set SUNBEAM_FORGE_URL or configure a \
|
||||
production host via `sunbeam config set --host`."
|
||||
);
|
||||
}
|
||||
|
||||
crate::output::step("Checking for updates...");
|
||||
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
// 1. Check latest commit on mainline
|
||||
let latest_commit = fetch_latest_commit(&client, &base).await?;
|
||||
let short_latest = &latest_commit[..std::cmp::min(8, latest_commit.len())];
|
||||
|
||||
crate::output::ok(&format!("Current: {COMMIT}"));
|
||||
crate::output::ok(&format!("Latest: {short_latest}"));
|
||||
|
||||
if latest_commit.starts_with(COMMIT) || COMMIT.starts_with(&latest_commit[..std::cmp::min(COMMIT.len(), latest_commit.len())]) {
|
||||
crate::output::ok("Already up to date.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// 2. Find the CI artifact for our platform
|
||||
crate::output::step("Downloading update...");
|
||||
let wanted = artifact_name();
|
||||
|
||||
let artifacts = fetch_artifacts(&client, &base).await?;
|
||||
let binary_artifact = artifacts
|
||||
.iter()
|
||||
.find(|a| a.name == wanted)
|
||||
.with_context(|| format!("No artifact found for platform '{wanted}'"))?;
|
||||
|
||||
let checksums_artifact = artifacts
|
||||
.iter()
|
||||
.find(|a| a.name == "checksums.txt" || a.name == "checksums");
|
||||
|
||||
// 3. Download the binary
|
||||
let binary_url = format!(
|
||||
"{base}/api/v1/repos/studio/cli/actions/artifacts/{id}",
|
||||
id = binary_artifact.id
|
||||
);
|
||||
let binary_bytes = client
|
||||
.get(&binary_url)
|
||||
.send()
|
||||
.await?
|
||||
.error_for_status()
|
||||
.context("Failed to download binary artifact")?
|
||||
.bytes()
|
||||
.await?;
|
||||
|
||||
crate::output::ok(&format!("Downloaded {} bytes", binary_bytes.len()));
|
||||
|
||||
// 4. Verify SHA256 if checksums artifact exists
|
||||
if let Some(checksums) = checksums_artifact {
|
||||
let checksums_url = format!(
|
||||
"{base}/api/v1/repos/studio/cli/actions/artifacts/{id}",
|
||||
id = checksums.id
|
||||
);
|
||||
let checksums_text = client
|
||||
.get(&checksums_url)
|
||||
.send()
|
||||
.await?
|
||||
.error_for_status()
|
||||
.context("Failed to download checksums")?
|
||||
.text()
|
||||
.await?;
|
||||
|
||||
verify_checksum(&binary_bytes, &wanted, &checksums_text)?;
|
||||
crate::output::ok("SHA256 checksum verified.");
|
||||
} else {
|
||||
crate::output::warn("No checksums artifact found; skipping verification.");
|
||||
}
|
||||
|
||||
// 5. Atomic self-replace
|
||||
crate::output::step("Installing update...");
|
||||
let current_exe = std::env::current_exe().context("Failed to determine current executable path")?;
|
||||
atomic_replace(¤t_exe, &binary_bytes)?;
|
||||
|
||||
crate::output::ok(&format!(
|
||||
"Updated sunbeam {COMMIT} -> {short_latest}"
|
||||
));
|
||||
|
||||
// Update the cache so background check knows we are current
|
||||
let _ = write_cache(&UpdateCache {
|
||||
last_check: Utc::now(),
|
||||
latest_commit: latest_commit.clone(),
|
||||
current_commit: latest_commit,
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Background update check. Returns a notification message if a newer version
|
||||
/// is available, or None if up-to-date / on error / checked too recently.
|
||||
///
|
||||
/// This function never blocks for long and never returns errors — it silently
|
||||
/// returns None on any failure.
|
||||
pub async fn check_update_background() -> Option<String> {
|
||||
// Read cache
|
||||
let cache_path = update_cache_path();
|
||||
if let Ok(data) = fs::read_to_string(&cache_path) {
|
||||
if let Ok(cache) = serde_json::from_str::<UpdateCache>(&data) {
|
||||
let age = Utc::now().signed_duration_since(cache.last_check);
|
||||
if age.num_seconds() < 3600 {
|
||||
// Checked recently — just compare cached values
|
||||
if cache.latest_commit.starts_with(COMMIT)
|
||||
|| COMMIT.starts_with(&cache.latest_commit[..std::cmp::min(COMMIT.len(), cache.latest_commit.len())])
|
||||
{
|
||||
return None; // up to date
|
||||
}
|
||||
let short = &cache.latest_commit[..std::cmp::min(8, cache.latest_commit.len())];
|
||||
return Some(format!(
|
||||
"A newer version of sunbeam is available ({short}). Run `sunbeam update` to upgrade."
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Time to check again
|
||||
let base = forge_url();
|
||||
if base.is_empty() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(5))
|
||||
.build()
|
||||
.ok()?;
|
||||
|
||||
let latest = fetch_latest_commit(&client, &base).await.ok()?;
|
||||
|
||||
let cache = UpdateCache {
|
||||
last_check: Utc::now(),
|
||||
latest_commit: latest.clone(),
|
||||
current_commit: COMMIT.to_string(),
|
||||
};
|
||||
let _ = write_cache(&cache);
|
||||
|
||||
if latest.starts_with(COMMIT)
|
||||
|| COMMIT.starts_with(&latest[..std::cmp::min(COMMIT.len(), latest.len())])
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
||||
let short = &latest[..std::cmp::min(8, latest.len())];
|
||||
Some(format!(
|
||||
"A newer version of sunbeam is available ({short}). Run `sunbeam update` to upgrade."
|
||||
))
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Internal helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Fetch the latest commit SHA on the mainline branch.
|
||||
async fn fetch_latest_commit(client: &reqwest::Client, forge_url: &str) -> Result<String> {
|
||||
let url = format!("{forge_url}/api/v1/repos/studio/cli/branches/mainline");
|
||||
let resp: BranchResponse = client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await?
|
||||
.error_for_status()
|
||||
.context("Failed to query mainline branch")?
|
||||
.json()
|
||||
.await?;
|
||||
Ok(resp.commit.id)
|
||||
}
|
||||
|
||||
/// Fetch the list of CI artifacts for the repo.
|
||||
async fn fetch_artifacts(client: &reqwest::Client, forge_url: &str) -> Result<Vec<Artifact>> {
|
||||
let url = format!("{forge_url}/api/v1/repos/studio/cli/actions/artifacts");
|
||||
let resp: ArtifactListResponse = client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await?
|
||||
.error_for_status()
|
||||
.context("Failed to query CI artifacts")?
|
||||
.json()
|
||||
.await?;
|
||||
Ok(resp.artifacts)
|
||||
}
|
||||
|
||||
/// Verify that the downloaded binary matches the expected SHA256 from checksums text.
|
||||
///
|
||||
/// Checksums file format (one per line):
|
||||
/// <hex-sha256> <filename>
|
||||
fn verify_checksum(binary: &[u8], artifact_name: &str, checksums_text: &str) -> Result<()> {
|
||||
let actual = {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(binary);
|
||||
format!("{:x}", hasher.finalize())
|
||||
};
|
||||
|
||||
for line in checksums_text.lines() {
|
||||
// Split on whitespace — format is "<hash> <name>" or "<hash> <name>"
|
||||
let mut parts = line.split_whitespace();
|
||||
if let (Some(expected_hash), Some(name)) = (parts.next(), parts.next()) {
|
||||
if name == artifact_name {
|
||||
if actual != expected_hash {
|
||||
bail!(
|
||||
"Checksum mismatch for {artifact_name}:\n expected: {expected_hash}\n actual: {actual}"
|
||||
);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bail!("No checksum entry found for '{artifact_name}' in checksums file");
|
||||
}
|
||||
|
||||
/// Atomically replace the binary at `target` with `new_bytes`.
|
||||
///
|
||||
/// Writes to a temp file in the same directory, sets executable permissions,
|
||||
/// then renames over the original.
|
||||
fn atomic_replace(target: &std::path::Path, new_bytes: &[u8]) -> Result<()> {
|
||||
let parent = target
|
||||
.parent()
|
||||
.context("Cannot determine parent directory of current executable")?;
|
||||
|
||||
let tmp_path = parent.join(".sunbeam-update.tmp");
|
||||
|
||||
// Write new binary
|
||||
fs::write(&tmp_path, new_bytes).context("Failed to write temporary update file")?;
|
||||
|
||||
// Set executable permissions (unix)
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
fs::set_permissions(&tmp_path, fs::Permissions::from_mode(0o755))
|
||||
.context("Failed to set executable permissions")?;
|
||||
}
|
||||
|
||||
// Atomic rename
|
||||
fs::rename(&tmp_path, target).context("Failed to replace current executable")?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Write the update-check cache to disk.
|
||||
fn write_cache(cache: &UpdateCache) -> Result<()> {
|
||||
let path = update_cache_path();
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent)?;
|
||||
}
|
||||
let json = serde_json::to_string_pretty(cache)?;
|
||||
fs::write(&path, json)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_version_consts() {
|
||||
// COMMIT, TARGET, BUILD_DATE are set at compile time
|
||||
assert!(!COMMIT.is_empty());
|
||||
assert!(!TARGET.is_empty());
|
||||
assert!(!BUILD_DATE.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_artifact_name() {
|
||||
let name = artifact_name();
|
||||
assert!(name.starts_with("sunbeam-"));
|
||||
assert!(name.contains(TARGET));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_checksum_ok() {
|
||||
let data = b"hello world";
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(data);
|
||||
let hash = format!("{:x}", hasher.finalize());
|
||||
|
||||
let checksums = format!("{hash} sunbeam-test");
|
||||
assert!(verify_checksum(data, "sunbeam-test", &checksums).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_checksum_mismatch() {
|
||||
let checksums = "0000000000000000000000000000000000000000000000000000000000000000 sunbeam-test";
|
||||
assert!(verify_checksum(b"hello", "sunbeam-test", checksums).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_checksum_missing_entry() {
|
||||
let checksums = "abcdef1234567890 sunbeam-other";
|
||||
assert!(verify_checksum(b"hello", "sunbeam-test", checksums).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_update_cache_path() {
|
||||
let path = update_cache_path();
|
||||
assert!(path.to_string_lossy().contains("sunbeam"));
|
||||
assert!(path.to_string_lossy().ends_with("update-check.json"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cache_roundtrip() {
|
||||
let cache = UpdateCache {
|
||||
last_check: Utc::now(),
|
||||
latest_commit: "abc12345".to_string(),
|
||||
current_commit: "def67890".to_string(),
|
||||
};
|
||||
let json = serde_json::to_string(&cache).unwrap();
|
||||
let loaded: UpdateCache = serde_json::from_str(&json).unwrap();
|
||||
assert_eq!(loaded.latest_commit, "abc12345");
|
||||
assert_eq!(loaded.current_commit, "def67890");
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user