diff --git a/Cargo.lock b/Cargo.lock index 13ab2ed..4a09035 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3530,6 +3530,7 @@ dependencies = [ "clap", "dirs", "flate2", + "futures", "hmac", "k8s-openapi", "kube", @@ -3550,6 +3551,7 @@ dependencies = [ "tar", "tempfile", "tokio", + "tokio-stream", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 342b30b..87df835 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,7 +18,7 @@ kube = { version = "0.99", features = ["client", "runtime", "derive", "ws"] } k8s-openapi = { version = "0.24", features = ["v1_32"] } # HTTP + TLS -reqwest = { version = "0.12", features = ["json", "rustls-tls"] } +reqwest = { version = "0.12", features = ["json", "rustls-tls", "blocking"] } rustls = "0.23" # SSH @@ -44,6 +44,10 @@ lettre = { version = "0.11", default-features = false, features = ["smtp-transpo flate2 = "1" tar = "0.4" +# Async +futures = "0.3" +tokio-stream = "0.1" + # Utility tempfile = "3" dirs = "5" diff --git a/src/checks.rs b/src/checks.rs index e1c44fe..b51f574 100644 --- a/src/checks.rs +++ b/src/checks.rs @@ -1,5 +1,1133 @@ -use anyhow::Result; +//! Service-level health checks — functional probes beyond pod readiness. -pub async fn cmd_check(_target: Option<&str>) -> Result<()> { - todo!("cmd_check: concurrent health checks via reqwest + kube-rs") +use anyhow::Result; +use base64::Engine; +use hmac::{Hmac, Mac}; +use k8s_openapi::api::core::v1::Pod; +use kube::api::{Api, ListParams}; +use kube::ResourceExt; +use sha2::{Digest, Sha256}; +use std::time::Duration; + +use crate::kube::{get_client, kube_exec, parse_target}; +use crate::output::{ok, step, warn}; + +type HmacSha256 = Hmac; + +// --------------------------------------------------------------------------- +// CheckResult +// --------------------------------------------------------------------------- + +/// Result of a single health check. +#[derive(Debug, Clone)] +pub struct CheckResult { + pub name: String, + pub ns: String, + pub svc: String, + pub passed: bool, + pub detail: String, +} + +impl CheckResult { + fn ok(name: &str, ns: &str, svc: &str, detail: &str) -> Self { + Self { + name: name.into(), + ns: ns.into(), + svc: svc.into(), + passed: true, + detail: detail.into(), + } + } + + fn fail(name: &str, ns: &str, svc: &str, detail: &str) -> Self { + Self { + name: name.into(), + ns: ns.into(), + svc: svc.into(), + passed: false, + detail: detail.into(), + } + } +} + +// --------------------------------------------------------------------------- +// HTTP client builder +// --------------------------------------------------------------------------- + +/// Build a reqwest client that trusts the mkcert local CA if available, +/// does not follow redirects, and has a 5s timeout. +fn build_http_client() -> Result { + let mut builder = reqwest::Client::builder() + .redirect(reqwest::redirect::Policy::none()) + .timeout(Duration::from_secs(5)); + + // Try mkcert root CA + if let Ok(output) = std::process::Command::new("mkcert") + .arg("-CAROOT") + .output() + { + if output.status.success() { + let ca_root = String::from_utf8_lossy(&output.stdout).trim().to_string(); + let ca_file = std::path::Path::new(&ca_root).join("rootCA.pem"); + if ca_file.exists() { + if let Ok(pem_bytes) = std::fs::read(&ca_file) { + if let Ok(cert) = reqwest::Certificate::from_pem(&pem_bytes) { + builder = builder.add_root_certificate(cert); + } + } + } + } + } + + Ok(builder.build()?) +} + +/// Helper: GET a URL, return (status_code, body_bytes). Does not follow redirects. +async fn http_get( + client: &reqwest::Client, + url: &str, + headers: Option<&[(&str, &str)]>, +) -> Result<(u16, Vec), String> { + let mut req = client.get(url); + if let Some(hdrs) = headers { + for (k, v) in hdrs { + req = req.header(*k, *v); + } + } + match req.send().await { + Ok(resp) => { + let status = resp.status().as_u16(); + let body = resp.bytes().await.unwrap_or_default().to_vec(); + Ok((status, body)) + } + Err(e) => Err(format!("{e}")), + } +} + +/// Read a K8s secret field, returning empty string on failure. +async fn kube_secret(ns: &str, name: &str, key: &str) -> String { + crate::kube::kube_get_secret_field(ns, name, key) + .await + .unwrap_or_default() +} + +// --------------------------------------------------------------------------- +// Individual checks +// --------------------------------------------------------------------------- + +/// GET /api/v1/version -> JSON with version field. +async fn check_gitea_version(domain: &str, client: &reqwest::Client) -> CheckResult { + let url = format!("https://src.{domain}/api/v1/version"); + match http_get(client, &url, None).await { + Ok((200, body)) => { + let ver = serde_json::from_slice::(&body) + .ok() + .and_then(|v| v.get("version").and_then(|v| v.as_str()).map(String::from)) + .unwrap_or_else(|| "?".into()); + CheckResult::ok("gitea-version", "devtools", "gitea", &format!("v{ver}")) + } + Ok((status, _)) => { + CheckResult::fail("gitea-version", "devtools", "gitea", &format!("HTTP {status}")) + } + Err(e) => CheckResult::fail("gitea-version", "devtools", "gitea", &e), + } +} + +/// GET /api/v1/user with admin credentials -> 200 and login field. +async fn check_gitea_auth(domain: &str, client: &reqwest::Client) -> CheckResult { + let username = { + let u = kube_secret("devtools", "gitea-admin-credentials", "admin-username").await; + if u.is_empty() { + "gitea_admin".to_string() + } else { + u + } + }; + let password = + kube_secret("devtools", "gitea-admin-credentials", "admin-password").await; + if password.is_empty() { + return CheckResult::fail( + "gitea-auth", + "devtools", + "gitea", + "admin-password not found in secret", + ); + } + + let creds = + base64::engine::general_purpose::STANDARD.encode(format!("{username}:{password}")); + let auth_hdr = format!("Basic {creds}"); + let url = format!("https://src.{domain}/api/v1/user"); + + match http_get(client, &url, Some(&[("Authorization", &auth_hdr)])).await { + Ok((200, body)) => { + let login = serde_json::from_slice::(&body) + .ok() + .and_then(|v| v.get("login").and_then(|v| v.as_str()).map(String::from)) + .unwrap_or_else(|| "?".into()); + CheckResult::ok("gitea-auth", "devtools", "gitea", &format!("user={login}")) + } + Ok((status, _)) => { + CheckResult::fail("gitea-auth", "devtools", "gitea", &format!("HTTP {status}")) + } + Err(e) => CheckResult::fail("gitea-auth", "devtools", "gitea", &e), + } +} + +/// CNPG Cluster readyInstances == instances. +async fn check_postgres(_domain: &str, _client: &reqwest::Client) -> CheckResult { + let kube_client = match get_client().await { + Ok(c) => c, + Err(e) => { + return CheckResult::fail("postgres", "data", "postgres", &format!("{e}")); + } + }; + + let ar = kube::api::ApiResource { + group: "postgresql.cnpg.io".into(), + version: "v1".into(), + api_version: "postgresql.cnpg.io/v1".into(), + kind: "Cluster".into(), + plural: "clusters".into(), + }; + + let api: Api = + Api::namespaced_with(kube_client.clone(), "data", &ar); + + match api.get_opt("postgres").await { + Ok(Some(obj)) => { + let ready = obj + .data + .get("status") + .and_then(|s| s.get("readyInstances")) + .and_then(|v| v.as_i64()) + .map(|v| v.to_string()) + .unwrap_or_default(); + let total = obj + .data + .get("status") + .and_then(|s| s.get("instances")) + .and_then(|v| v.as_i64()) + .map(|v| v.to_string()) + .unwrap_or_default(); + + if !ready.is_empty() && !total.is_empty() && ready == total { + CheckResult::ok( + "postgres", + "data", + "postgres", + &format!("{ready}/{total} ready"), + ) + } else { + let r = if ready.is_empty() { "?" } else { &ready }; + let t = if total.is_empty() { "?" } else { &total }; + CheckResult::fail("postgres", "data", "postgres", &format!("{r}/{t} ready")) + } + } + Ok(None) => CheckResult::fail("postgres", "data", "postgres", "cluster not found"), + Err(e) => CheckResult::fail("postgres", "data", "postgres", &format!("{e}")), + } +} + +/// kubectl exec valkey pod -- valkey-cli ping -> PONG. +async fn check_valkey(_domain: &str, _client: &reqwest::Client) -> CheckResult { + let kube_client = match get_client().await { + Ok(c) => c, + Err(e) => return CheckResult::fail("valkey", "data", "valkey", &format!("{e}")), + }; + + let api: Api = Api::namespaced(kube_client.clone(), "data"); + let lp = ListParams::default().labels("app=valkey"); + let pod_list = match api.list(&lp).await { + Ok(l) => l, + Err(e) => return CheckResult::fail("valkey", "data", "valkey", &format!("{e}")), + }; + + let pod_name = match pod_list.items.first() { + Some(p) => p.name_any(), + None => return CheckResult::fail("valkey", "data", "valkey", "no valkey pod"), + }; + + match kube_exec("data", &pod_name, &["valkey-cli", "ping"], Some("valkey")).await { + Ok((_, out)) => { + let passed = out == "PONG"; + let detail = if out.is_empty() { + "no response".to_string() + } else { + out + }; + CheckResult { + name: "valkey".into(), + ns: "data".into(), + svc: "valkey".into(), + passed, + detail, + } + } + Err(e) => CheckResult::fail("valkey", "data", "valkey", &format!("{e}")), + } +} + +/// kubectl exec openbao-0 -- bao status -format=json -> initialized + unsealed. +async fn check_openbao(_domain: &str, _client: &reqwest::Client) -> CheckResult { + match kube_exec( + "data", + "openbao-0", + &["bao", "status", "-format=json"], + Some("openbao"), + ) + .await + { + Ok((_, out)) => { + if out.is_empty() { + return CheckResult::fail("openbao", "data", "openbao", "no response"); + } + match serde_json::from_str::(&out) { + Ok(data) => { + let init = data + .get("initialized") + .and_then(|v| v.as_bool()) + .unwrap_or(false); + let sealed = data + .get("sealed") + .and_then(|v| v.as_bool()) + .unwrap_or(true); + let passed = init && !sealed; + CheckResult { + name: "openbao".into(), + ns: "data".into(), + svc: "openbao".into(), + passed, + detail: format!("init={init}, sealed={sealed}"), + } + } + Err(_) => { + let truncated: String = out.chars().take(80).collect(); + CheckResult::fail("openbao", "data", "openbao", &truncated) + } + } + } + Err(e) => CheckResult::fail("openbao", "data", "openbao", &format!("{e}")), + } +} + +// --------------------------------------------------------------------------- +// S3 auth (AWS4-HMAC-SHA256) +// --------------------------------------------------------------------------- + +/// Generate AWS4-HMAC-SHA256 Authorization and x-amz-date headers for an unsigned +/// GET / request, matching the Python `_s3_auth_headers` function exactly. +fn s3_auth_headers(access_key: &str, secret_key: &str, host: &str) -> (String, String) { + let now = chrono::Utc::now(); + let amzdate = now.format("%Y%m%dT%H%M%SZ").to_string(); + let datestamp = now.format("%Y%m%d").to_string(); + + let payload_hash = hex_encode(&Sha256::digest(b"")); + let canonical = format!( + "GET\n/\n\nhost:{host}\nx-amz-date:{amzdate}\n\nhost;x-amz-date\n{payload_hash}" + ); + let credential_scope = format!("{datestamp}/us-east-1/s3/aws4_request"); + let canonical_hash = hex_encode(&Sha256::digest(canonical.as_bytes())); + let string_to_sign = + format!("AWS4-HMAC-SHA256\n{amzdate}\n{credential_scope}\n{canonical_hash}"); + + fn hmac_sign(key: &[u8], msg: &[u8]) -> Vec { + let mut mac = HmacSha256::new_from_slice(key).expect("HMAC accepts any key length"); + mac.update(msg); + mac.finalize().into_bytes().to_vec() + } + + let k = hmac_sign( + format!("AWS4{secret_key}").as_bytes(), + datestamp.as_bytes(), + ); + let k = hmac_sign(&k, b"us-east-1"); + let k = hmac_sign(&k, b"s3"); + let k = hmac_sign(&k, b"aws4_request"); + + let sig = { + let mut mac = HmacSha256::new_from_slice(&k).expect("HMAC accepts any key length"); + mac.update(string_to_sign.as_bytes()); + hex_encode(&mac.finalize().into_bytes()) + }; + + let auth = format!( + "AWS4-HMAC-SHA256 Credential={access_key}/{credential_scope}, SignedHeaders=host;x-amz-date, Signature={sig}" + ); + (auth, amzdate) +} + +/// GET https://s3.{domain}/ with S3 credentials -> 200 list-buckets response. +async fn check_seaweedfs(domain: &str, client: &reqwest::Client) -> CheckResult { + let access_key = + kube_secret("storage", "seaweedfs-s3-credentials", "S3_ACCESS_KEY").await; + let secret_key = + kube_secret("storage", "seaweedfs-s3-credentials", "S3_SECRET_KEY").await; + + if access_key.is_empty() || secret_key.is_empty() { + return CheckResult::fail( + "seaweedfs", + "storage", + "seaweedfs", + "credentials not found in seaweedfs-s3-credentials secret", + ); + } + + let host = format!("s3.{domain}"); + let url = format!("https://{host}/"); + let (auth, amzdate) = s3_auth_headers(&access_key, &secret_key, &host); + + match http_get( + client, + &url, + Some(&[("Authorization", &auth), ("x-amz-date", &amzdate)]), + ) + .await + { + Ok((200, _)) => { + CheckResult::ok("seaweedfs", "storage", "seaweedfs", "S3 authenticated") + } + Ok((status, _)) => CheckResult::fail( + "seaweedfs", + "storage", + "seaweedfs", + &format!("HTTP {status}"), + ), + Err(e) => CheckResult::fail("seaweedfs", "storage", "seaweedfs", &e), + } +} + +/// GET /kratos/health/ready -> 200. +async fn check_kratos(domain: &str, client: &reqwest::Client) -> CheckResult { + let url = format!("https://auth.{domain}/kratos/health/ready"); + match http_get(client, &url, None).await { + Ok((status, body)) => { + let ok_flag = status == 200; + let mut detail = format!("HTTP {status}"); + if !ok_flag && !body.is_empty() { + let body_str: String = + String::from_utf8_lossy(&body).chars().take(80).collect(); + detail = format!("{detail}: {body_str}"); + } + CheckResult { + name: "kratos".into(), + ns: "ory".into(), + svc: "kratos".into(), + passed: ok_flag, + detail, + } + } + Err(e) => CheckResult::fail("kratos", "ory", "kratos", &e), + } +} + +/// GET /.well-known/openid-configuration -> 200 with issuer field. +async fn check_hydra_oidc(domain: &str, client: &reqwest::Client) -> CheckResult { + let url = format!("https://auth.{domain}/.well-known/openid-configuration"); + match http_get(client, &url, None).await { + Ok((200, body)) => { + let issuer = serde_json::from_slice::(&body) + .ok() + .and_then(|v| v.get("issuer").and_then(|v| v.as_str()).map(String::from)) + .unwrap_or_else(|| "?".into()); + CheckResult::ok("hydra-oidc", "ory", "hydra", &format!("issuer={issuer}")) + } + Ok((status, _)) => { + CheckResult::fail("hydra-oidc", "ory", "hydra", &format!("HTTP {status}")) + } + Err(e) => CheckResult::fail("hydra-oidc", "ory", "hydra", &e), + } +} + +/// GET https://people.{domain}/ -> any response < 500 (302 to OIDC is fine). +async fn check_people(domain: &str, client: &reqwest::Client) -> CheckResult { + let url = format!("https://people.{domain}/"); + match http_get(client, &url, None).await { + Ok((status, _)) => CheckResult { + name: "people".into(), + ns: "lasuite".into(), + svc: "people".into(), + passed: status < 500, + detail: format!("HTTP {status}"), + }, + Err(e) => CheckResult::fail("people", "lasuite", "people", &e), + } +} + +/// GET /api/v1.0/config/ -> any response < 500 (401 auth-required is fine). +async fn check_people_api(domain: &str, client: &reqwest::Client) -> CheckResult { + let url = format!("https://people.{domain}/api/v1.0/config/"); + match http_get(client, &url, None).await { + Ok((status, _)) => CheckResult { + name: "people-api".into(), + ns: "lasuite".into(), + svc: "people".into(), + passed: status < 500, + detail: format!("HTTP {status}"), + }, + Err(e) => CheckResult::fail("people-api", "lasuite", "people", &e), + } +} + +/// kubectl exec livekit-server pod -- wget localhost:7880/ -> rc 0. +async fn check_livekit(_domain: &str, _client: &reqwest::Client) -> CheckResult { + let kube_client = match get_client().await { + Ok(c) => c, + Err(e) => return CheckResult::fail("livekit", "media", "livekit", &format!("{e}")), + }; + + let api: Api = Api::namespaced(kube_client.clone(), "media"); + let lp = ListParams::default().labels("app.kubernetes.io/name=livekit-server"); + let pod_list = match api.list(&lp).await { + Ok(l) => l, + Err(e) => return CheckResult::fail("livekit", "media", "livekit", &format!("{e}")), + }; + + let pod_name = match pod_list.items.first() { + Some(p) => p.name_any(), + None => return CheckResult::fail("livekit", "media", "livekit", "no livekit pod"), + }; + + match kube_exec( + "media", + &pod_name, + &["wget", "-qO-", "http://localhost:7880/"], + None, + ) + .await + { + Ok((exit_code, _)) => { + if exit_code == 0 { + CheckResult::ok("livekit", "media", "livekit", "server responding") + } else { + CheckResult::fail("livekit", "media", "livekit", "server not responding") + } + } + Err(e) => CheckResult::fail("livekit", "media", "livekit", &format!("{e}")), + } +} + +// --------------------------------------------------------------------------- +// Check registry — function pointer + metadata +// --------------------------------------------------------------------------- + +type CheckFn = for<'a> fn( + &'a str, + &'a reqwest::Client, +) -> std::pin::Pin + Send + 'a>>; + +struct CheckEntry { + func: CheckFn, + ns: &'static str, + svc: &'static str, +} + +fn check_registry() -> Vec { + vec![ + CheckEntry { + func: |d, c| Box::pin(check_gitea_version(d, c)), + ns: "devtools", + svc: "gitea", + }, + CheckEntry { + func: |d, c| Box::pin(check_gitea_auth(d, c)), + ns: "devtools", + svc: "gitea", + }, + CheckEntry { + func: |d, c| Box::pin(check_postgres(d, c)), + ns: "data", + svc: "postgres", + }, + CheckEntry { + func: |d, c| Box::pin(check_valkey(d, c)), + ns: "data", + svc: "valkey", + }, + CheckEntry { + func: |d, c| Box::pin(check_openbao(d, c)), + ns: "data", + svc: "openbao", + }, + CheckEntry { + func: |d, c| Box::pin(check_seaweedfs(d, c)), + ns: "storage", + svc: "seaweedfs", + }, + CheckEntry { + func: |d, c| Box::pin(check_kratos(d, c)), + ns: "ory", + svc: "kratos", + }, + CheckEntry { + func: |d, c| Box::pin(check_hydra_oidc(d, c)), + ns: "ory", + svc: "hydra", + }, + CheckEntry { + func: |d, c| Box::pin(check_people(d, c)), + ns: "lasuite", + svc: "people", + }, + CheckEntry { + func: |d, c| Box::pin(check_people_api(d, c)), + ns: "lasuite", + svc: "people", + }, + CheckEntry { + func: |d, c| Box::pin(check_livekit(d, c)), + ns: "media", + svc: "livekit", + }, + ] +} + +// --------------------------------------------------------------------------- +// cmd_check — concurrent execution +// --------------------------------------------------------------------------- + +/// Run service-level health checks, optionally scoped to a namespace or service. +pub async fn cmd_check(target: Option<&str>) -> Result<()> { + step("Service health checks..."); + + let domain = crate::kube::get_domain().await?; + let http_client = build_http_client()?; + + let (ns_filter, svc_filter) = parse_target(target)?; + + let all_checks = check_registry(); + let selected: Vec<&CheckEntry> = all_checks + .iter() + .filter(|e| { + (ns_filter.is_none() || ns_filter == Some(e.ns)) + && (svc_filter.is_none() || svc_filter == Some(e.svc)) + }) + .collect(); + + if selected.is_empty() { + warn(&format!( + "No checks match target: {}", + target.unwrap_or("(none)") + )); + return Ok(()); + } + + // Run all checks concurrently + let mut join_set = tokio::task::JoinSet::new(); + for entry in &selected { + let domain = domain.clone(); + let client = http_client.clone(); + let func = entry.func; + join_set.spawn(async move { func(&domain, &client).await }); + } + + let mut results: Vec = Vec::new(); + while let Some(res) = join_set.join_next().await { + match res { + Ok(cr) => results.push(cr), + Err(e) => results.push(CheckResult::fail("unknown", "?", "?", &format!("{e}"))), + } + } + + // Sort to match the registry order for consistent output + let registry = check_registry(); + results.sort_by(|a, b| { + let idx_a = registry + .iter() + .position(|e| e.ns == a.ns && e.svc == a.svc) + .unwrap_or(usize::MAX); + let idx_b = registry + .iter() + .position(|e| e.ns == b.ns && e.svc == b.svc) + .unwrap_or(usize::MAX); + idx_a.cmp(&idx_b).then_with(|| a.name.cmp(&b.name)) + }); + + // Print grouped by namespace + let name_w = results.iter().map(|r| r.name.len()).max().unwrap_or(0); + let mut cur_ns: Option<&str> = None; + for r in &results { + if cur_ns != Some(&r.ns) { + println!(" {}:", r.ns); + cur_ns = Some(&r.ns); + } + let icon = if r.passed { "\u{2713}" } else { "\u{2717}" }; + let detail = if r.detail.is_empty() { + String::new() + } else { + format!(" {}", r.detail) + }; + println!(" {icon} {: = results.iter().filter(|r| !r.passed).collect(); + if failed.is_empty() { + ok(&format!("All {} check(s) passed.", results.len())); + } else { + warn(&format!("{} check(s) failed.", failed.len())); + } + + Ok(()) +} + +// --------------------------------------------------------------------------- +// hex encoding helper (avoids adding the `hex` crate) +// --------------------------------------------------------------------------- + +fn hex_encode(bytes: impl AsRef<[u8]>) -> String { + const HEX_CHARS: &[u8; 16] = b"0123456789abcdef"; + let bytes = bytes.as_ref(); + let mut s = String::with_capacity(bytes.len() * 2); + for &b in bytes { + s.push(HEX_CHARS[(b >> 4) as usize] as char); + s.push(HEX_CHARS[(b & 0xf) as usize] as char); + } + s +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + // ── S3 auth header tests ───────────────────────────────────────────── + + #[test] + fn test_s3_auth_headers_format() { + let (auth, amzdate) = s3_auth_headers( + "AKIAIOSFODNN7EXAMPLE", + "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY", + "s3.example.com", + ); + + // Verify header structure + assert!(auth.starts_with("AWS4-HMAC-SHA256 Credential=AKIAIOSFODNN7EXAMPLE/")); + assert!(auth.contains("us-east-1/s3/aws4_request")); + assert!(auth.contains("SignedHeaders=host;x-amz-date")); + assert!(auth.contains("Signature=")); + + // amzdate format: YYYYMMDDTHHMMSSZ + assert_eq!(amzdate.len(), 16); + assert!(amzdate.ends_with('Z')); + assert!(amzdate.contains('T')); + } + + #[test] + fn test_s3_auth_headers_signature_changes_with_key() { + let (auth1, _) = s3_auth_headers("key1", "secret1", "host1"); + let (auth2, _) = s3_auth_headers("key2", "secret2", "host2"); + // Different keys produce different signatures + let sig1 = auth1.split("Signature=").nth(1).unwrap(); + let sig2 = auth2.split("Signature=").nth(1).unwrap(); + assert_ne!(sig1, sig2); + } + + #[test] + fn test_s3_auth_headers_credential_scope() { + let (auth, amzdate) = s3_auth_headers("AK", "SK", "s3.example.com"); + let datestamp = &amzdate[..8]; + let expected_scope = format!("{datestamp}/us-east-1/s3/aws4_request"); + assert!(auth.contains(&expected_scope)); + } + + // ── hex encoding ──────────────────────────────────────────────────── + + #[test] + fn test_hex_encode_empty() { + assert_eq!(hex_encode(b""), ""); + } + + #[test] + fn test_hex_encode_zero() { + assert_eq!(hex_encode(b"\x00"), "00"); + } + + #[test] + fn test_hex_encode_ff() { + assert_eq!(hex_encode(b"\xff"), "ff"); + } + + #[test] + fn test_hex_encode_deadbeef() { + assert_eq!(hex_encode(b"\xde\xad\xbe\xef"), "deadbeef"); + } + + #[test] + fn test_hex_encode_hello() { + assert_eq!(hex_encode(b"hello"), "68656c6c6f"); + } + + // ── CheckResult ───────────────────────────────────────────────────── + + #[test] + fn test_check_result_ok() { + let r = CheckResult::ok("gitea-version", "devtools", "gitea", "v1.21.0"); + assert!(r.passed); + assert_eq!(r.name, "gitea-version"); + assert_eq!(r.ns, "devtools"); + assert_eq!(r.svc, "gitea"); + assert_eq!(r.detail, "v1.21.0"); + } + + #[test] + fn test_check_result_fail() { + let r = CheckResult::fail("postgres", "data", "postgres", "cluster not found"); + assert!(!r.passed); + assert_eq!(r.detail, "cluster not found"); + } + + // ── Check registry ────────────────────────────────────────────────── + + #[test] + fn test_check_registry_has_all_checks() { + let registry = check_registry(); + assert_eq!(registry.len(), 11); + + // Verify order matches Python CHECKS list + assert_eq!(registry[0].ns, "devtools"); + assert_eq!(registry[0].svc, "gitea"); + assert_eq!(registry[1].ns, "devtools"); + assert_eq!(registry[1].svc, "gitea"); + assert_eq!(registry[2].ns, "data"); + assert_eq!(registry[2].svc, "postgres"); + assert_eq!(registry[3].ns, "data"); + assert_eq!(registry[3].svc, "valkey"); + assert_eq!(registry[4].ns, "data"); + assert_eq!(registry[4].svc, "openbao"); + assert_eq!(registry[5].ns, "storage"); + assert_eq!(registry[5].svc, "seaweedfs"); + assert_eq!(registry[6].ns, "ory"); + assert_eq!(registry[6].svc, "kratos"); + assert_eq!(registry[7].ns, "ory"); + assert_eq!(registry[7].svc, "hydra"); + assert_eq!(registry[8].ns, "lasuite"); + assert_eq!(registry[8].svc, "people"); + assert_eq!(registry[9].ns, "lasuite"); + assert_eq!(registry[9].svc, "people"); + assert_eq!(registry[10].ns, "media"); + assert_eq!(registry[10].svc, "livekit"); + } + + #[test] + fn test_check_registry_filter_namespace() { + let all = check_registry(); + let filtered: Vec<&CheckEntry> = all.iter().filter(|e| e.ns == "ory").collect(); + assert_eq!(filtered.len(), 2); + } + + #[test] + fn test_check_registry_filter_service() { + let all = check_registry(); + let filtered: Vec<&CheckEntry> = all + .iter() + .filter(|e| e.ns == "ory" && e.svc == "kratos") + .collect(); + assert_eq!(filtered.len(), 1); + } + + #[test] + fn test_check_registry_filter_no_match() { + let all = check_registry(); + let filtered: Vec<&CheckEntry> = + all.iter().filter(|e| e.ns == "nonexistent").collect(); + assert!(filtered.is_empty()); + } + + // ── HMAC-SHA256 verification ──────────────────────────────────────── + + #[test] + fn test_hmac_sha256_known_vector() { + // RFC 4231 Test Case 2 + let key = b"Jefe"; + let data = b"what do ya want for nothing?"; + let mut mac = HmacSha256::new_from_slice(key).expect("HMAC key"); + mac.update(data); + let result = hex_encode(mac.finalize().into_bytes()); + assert_eq!( + result, + "5bdcc146bf60754e6a042426089575c75a003f089d2739839dec58b964ec3843" + ); + } + + // ── SHA256 verification ───────────────────────────────────────────── + + #[test] + fn test_sha256_empty() { + let hash = hex_encode(Sha256::digest(b"")); + assert_eq!( + hash, + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" + ); + } + + #[test] + fn test_sha256_hello() { + let hash = hex_encode(Sha256::digest(b"hello")); + assert_eq!( + hash, + "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824" + ); + } + + // ── Additional CheckResult tests ────────────────────────────────── + + #[test] + fn test_check_result_ok_empty_detail() { + let r = CheckResult::ok("test", "ns", "svc", ""); + assert!(r.passed); + assert!(r.detail.is_empty()); + } + + #[test] + fn test_check_result_fail_contains_status_code() { + let r = CheckResult::fail("gitea-version", "devtools", "gitea", "HTTP 502"); + assert!(!r.passed); + assert!(r.detail.contains("502")); + } + + #[test] + fn test_check_result_fail_contains_secret_message() { + let r = CheckResult::fail( + "gitea-auth", + "devtools", + "gitea", + "admin-password not found in secret", + ); + assert!(!r.passed); + assert!(r.detail.contains("secret")); + } + + #[test] + fn test_check_result_ok_with_version() { + let r = CheckResult::ok("gitea-version", "devtools", "gitea", "v1.21.0"); + assert!(r.passed); + assert!(r.detail.contains("1.21.0")); + } + + #[test] + fn test_check_result_ok_with_login() { + let r = CheckResult::ok("gitea-auth", "devtools", "gitea", "user=gitea_admin"); + assert!(r.passed); + assert!(r.detail.contains("gitea_admin")); + } + + #[test] + fn test_check_result_ok_authenticated() { + let r = CheckResult::ok("seaweedfs", "storage", "seaweedfs", "S3 authenticated"); + assert!(r.passed); + assert!(r.detail.contains("authenticated")); + } + + // ── Additional registry tests ───────────────────────────────────── + + #[test] + fn test_check_registry_expected_namespaces() { + let registry = check_registry(); + let namespaces: std::collections::HashSet<&str> = + registry.iter().map(|e| e.ns).collect(); + for expected in &["devtools", "data", "storage", "ory", "lasuite", "media"] { + assert!( + namespaces.contains(expected), + "registry missing namespace: {expected}" + ); + } + } + + #[test] + fn test_check_registry_expected_services() { + let registry = check_registry(); + let services: std::collections::HashSet<&str> = + registry.iter().map(|e| e.svc).collect(); + for expected in &[ + "gitea", "postgres", "valkey", "openbao", "seaweedfs", "kratos", "hydra", + "people", "livekit", + ] { + assert!( + services.contains(expected), + "registry missing service: {expected}" + ); + } + } + + #[test] + fn test_check_registry_devtools_has_two_gitea_entries() { + let registry = check_registry(); + let gitea: Vec<_> = registry + .iter() + .filter(|e| e.ns == "devtools" && e.svc == "gitea") + .collect(); + assert_eq!(gitea.len(), 2); + } + + #[test] + fn test_check_registry_lasuite_has_two_people_entries() { + let registry = check_registry(); + let people: Vec<_> = registry + .iter() + .filter(|e| e.ns == "lasuite" && e.svc == "people") + .collect(); + assert_eq!(people.len(), 2); + } + + #[test] + fn test_check_registry_data_has_three_entries() { + let registry = check_registry(); + let data: Vec<_> = registry.iter().filter(|e| e.ns == "data").collect(); + assert_eq!(data.len(), 3); // postgres, valkey, openbao + } + + // ── Filter logic (mirrors Python TestCmdCheck) ──────────────────── + + /// Helper: apply the same filter logic as cmd_check to the registry. + fn filter_registry( + ns_filter: Option<&str>, + svc_filter: Option<&str>, + ) -> Vec<(&'static str, &'static str)> { + let all = check_registry(); + all.into_iter() + .filter(|e| ns_filter.map_or(true, |ns| e.ns == ns)) + .filter(|e| svc_filter.map_or(true, |svc| e.svc == svc)) + .map(|e| (e.ns, e.svc)) + .collect() + } + + #[test] + fn test_no_target_runs_all() { + let selected = filter_registry(None, None); + assert_eq!(selected.len(), 11); + } + + #[test] + fn test_ns_filter_devtools_selects_two() { + let selected = filter_registry(Some("devtools"), None); + assert_eq!(selected.len(), 2); + assert!(selected.iter().all(|(ns, _)| *ns == "devtools")); + } + + #[test] + fn test_ns_filter_skips_other_namespaces() { + let selected = filter_registry(Some("devtools"), None); + // Should NOT contain data/postgres + assert!(selected.iter().all(|(ns, _)| *ns != "data")); + } + + #[test] + fn test_svc_filter_ory_kratos() { + let selected = filter_registry(Some("ory"), Some("kratos")); + assert_eq!(selected.len(), 1); + assert_eq!(selected[0], ("ory", "kratos")); + } + + #[test] + fn test_svc_filter_ory_hydra() { + let selected = filter_registry(Some("ory"), Some("hydra")); + assert_eq!(selected.len(), 1); + assert_eq!(selected[0], ("ory", "hydra")); + } + + #[test] + fn test_svc_filter_people_returns_both() { + let selected = filter_registry(Some("lasuite"), Some("people")); + assert_eq!(selected.len(), 2); + assert!(selected.iter().all(|(ns, svc)| *ns == "lasuite" && *svc == "people")); + } + + #[test] + fn test_filter_nonexistent_ns_returns_empty() { + let selected = filter_registry(Some("nonexistent"), None); + assert!(selected.is_empty()); + } + + #[test] + fn test_filter_ns_match_svc_mismatch_returns_empty() { + // ory namespace exists but postgres service does not live there + let selected = filter_registry(Some("ory"), Some("postgres")); + assert!(selected.is_empty()); + } + + #[test] + fn test_filter_data_namespace() { + let selected = filter_registry(Some("data"), None); + assert_eq!(selected.len(), 3); + let svcs: Vec<&str> = selected.iter().map(|(_, svc)| *svc).collect(); + assert!(svcs.contains(&"postgres")); + assert!(svcs.contains(&"valkey")); + assert!(svcs.contains(&"openbao")); + } + + #[test] + fn test_filter_storage_namespace() { + let selected = filter_registry(Some("storage"), None); + assert_eq!(selected.len(), 1); + assert_eq!(selected[0], ("storage", "seaweedfs")); + } + + #[test] + fn test_filter_media_namespace() { + let selected = filter_registry(Some("media"), None); + assert_eq!(selected.len(), 1); + assert_eq!(selected[0], ("media", "livekit")); + } + + // ── Additional S3 auth header tests ─────────────────────────────── + + #[test] + fn test_s3_auth_headers_deterministic() { + // Same inputs at the same point in time produce identical output. + // (Time may advance between calls, but the format is still valid.) + let (auth1, date1) = s3_auth_headers("AK", "SK", "host"); + let (auth2, date2) = s3_auth_headers("AK", "SK", "host"); + // If both calls happen within the same second, they must be identical. + if date1 == date2 { + assert_eq!(auth1, auth2, "same inputs at same time must produce same signature"); + } + } + + #[test] + fn test_s3_auth_headers_different_hosts_differ() { + let (auth1, d1) = s3_auth_headers("AK", "SK", "s3.a.com"); + let (auth2, d2) = s3_auth_headers("AK", "SK", "s3.b.com"); + let sig1 = auth1.split("Signature=").nth(1).unwrap(); + let sig2 = auth2.split("Signature=").nth(1).unwrap(); + // Different hosts -> different canonical request -> different signature + // (only guaranteed when timestamps match) + if d1 == d2 { + assert_ne!(sig1, sig2); + } + } + + #[test] + fn test_s3_auth_headers_signature_is_64_hex_chars() { + let (auth, _) = s3_auth_headers("AK", "SK", "host"); + let sig = auth.split("Signature=").nth(1).unwrap(); + assert_eq!(sig.len(), 64, "SHA-256 HMAC hex signature is 64 chars"); + assert!( + sig.chars().all(|c| c.is_ascii_hexdigit()), + "signature must be lowercase hex: {sig}" + ); + } + + // ── hex_encode edge cases ───────────────────────────────────────── + + #[test] + fn test_hex_encode_all_byte_values() { + // Verify 0x00..0xff all produce 2-char lowercase hex + for b in 0u8..=255 { + let encoded = hex_encode([b]); + assert_eq!(encoded.len(), 2); + assert!(encoded.chars().all(|c| c.is_ascii_hexdigit())); + } + } + + #[test] + fn test_hex_encode_matches_format() { + // Cross-check against Rust's built-in formatting + let bytes: Vec = (0..32).collect(); + let expected: String = bytes.iter().map(|b| format!("{b:02x}")).collect(); + assert_eq!(hex_encode(&bytes), expected); + } } diff --git a/src/cli.rs b/src/cli.rs index 20d4ffa..2939827 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -326,6 +326,338 @@ fn default_context(env: &Env) -> &'static str { } } +#[cfg(test)] +mod tests { + use super::*; + use clap::Parser; + + fn parse(args: &[&str]) -> Cli { + Cli::try_parse_from(args).unwrap() + } + + // 1. test_up + #[test] + fn test_up() { + let cli = parse(&["sunbeam", "up"]); + assert!(matches!(cli.verb, Some(Verb::Up))); + } + + // 2. test_status_no_target + #[test] + fn test_status_no_target() { + let cli = parse(&["sunbeam", "status"]); + match cli.verb { + Some(Verb::Status { target }) => assert!(target.is_none()), + _ => panic!("expected Status"), + } + } + + // 3. test_status_with_namespace + #[test] + fn test_status_with_namespace() { + let cli = parse(&["sunbeam", "status", "ory"]); + match cli.verb { + Some(Verb::Status { target }) => assert_eq!(target.unwrap(), "ory"), + _ => panic!("expected Status"), + } + } + + // 4. test_logs_no_follow + #[test] + fn test_logs_no_follow() { + let cli = parse(&["sunbeam", "logs", "ory/kratos"]); + match cli.verb { + Some(Verb::Logs { target, follow }) => { + assert_eq!(target, "ory/kratos"); + assert!(!follow); + } + _ => panic!("expected Logs"), + } + } + + // 5. test_logs_follow_short + #[test] + fn test_logs_follow_short() { + let cli = parse(&["sunbeam", "logs", "ory/kratos", "-f"]); + match cli.verb { + Some(Verb::Logs { follow, .. }) => assert!(follow), + _ => panic!("expected Logs"), + } + } + + // 6. test_build_proxy + #[test] + fn test_build_proxy() { + let cli = parse(&["sunbeam", "build", "proxy"]); + match cli.verb { + Some(Verb::Build { what, push, deploy }) => { + assert!(matches!(what, BuildTarget::Proxy)); + assert!(!push); + assert!(!deploy); + } + _ => panic!("expected Build"), + } + } + + // 7. test_build_deploy_flag + #[test] + fn test_build_deploy_flag() { + let cli = parse(&["sunbeam", "build", "proxy", "--deploy"]); + match cli.verb { + Some(Verb::Build { deploy, push, .. }) => { + assert!(deploy); + // clap does not imply --push; that logic is in dispatch() + assert!(!push); + } + _ => panic!("expected Build"), + } + } + + // 8. test_build_invalid_target + #[test] + fn test_build_invalid_target() { + let result = Cli::try_parse_from(&["sunbeam", "build", "notavalidtarget"]); + assert!(result.is_err()); + } + + // 9. test_user_set_password + #[test] + fn test_user_set_password() { + let cli = parse(&["sunbeam", "user", "set-password", "admin@example.com", "hunter2"]); + match cli.verb { + Some(Verb::User { action: Some(UserAction::SetPassword { target, password }) }) => { + assert_eq!(target, "admin@example.com"); + assert_eq!(password, "hunter2"); + } + _ => panic!("expected User SetPassword"), + } + } + + // 10. test_user_onboard_basic + #[test] + fn test_user_onboard_basic() { + let cli = parse(&["sunbeam", "user", "onboard", "a@b.com"]); + match cli.verb { + Some(Verb::User { action: Some(UserAction::Onboard { + email, name, schema, no_email, notify, .. + }) }) => { + assert_eq!(email, "a@b.com"); + assert_eq!(name, ""); + assert_eq!(schema, "employee"); + assert!(!no_email); + assert_eq!(notify, ""); + } + _ => panic!("expected User Onboard"), + } + } + + // 11. test_user_onboard_full + #[test] + fn test_user_onboard_full() { + let cli = parse(&[ + "sunbeam", "user", "onboard", "a@b.com", + "--name", "A B", "--schema", "default", "--no-email", + "--job-title", "Engineer", "--department", "Dev", + "--office-location", "Paris", "--hire-date", "2026-01-15", + "--manager", "boss@b.com", + ]); + match cli.verb { + Some(Verb::User { action: Some(UserAction::Onboard { + email, name, schema, no_email, job_title, + department, office_location, hire_date, manager, .. + }) }) => { + assert_eq!(email, "a@b.com"); + assert_eq!(name, "A B"); + assert_eq!(schema, "default"); + assert!(no_email); + assert_eq!(job_title, "Engineer"); + assert_eq!(department, "Dev"); + assert_eq!(office_location, "Paris"); + assert_eq!(hire_date, "2026-01-15"); + assert_eq!(manager, "boss@b.com"); + } + _ => panic!("expected User Onboard"), + } + } + + // 12. test_apply_no_namespace + #[test] + fn test_apply_no_namespace() { + let cli = parse(&["sunbeam", "apply"]); + match cli.verb { + Some(Verb::Apply { namespace, .. }) => assert!(namespace.is_none()), + _ => panic!("expected Apply"), + } + } + + // 13. test_apply_with_namespace + #[test] + fn test_apply_with_namespace() { + let cli = parse(&["sunbeam", "apply", "lasuite"]); + match cli.verb { + Some(Verb::Apply { namespace, .. }) => assert_eq!(namespace.unwrap(), "lasuite"), + _ => panic!("expected Apply"), + } + } + + // 14. test_config_set + #[test] + fn test_config_set() { + let cli = parse(&[ + "sunbeam", "config", "set", + "--host", "user@example.com", + "--infra-dir", "/path/to/infra", + ]); + match cli.verb { + Some(Verb::Config { action: Some(ConfigAction::Set { host, infra_dir, .. }) }) => { + assert_eq!(host, "user@example.com"); + assert_eq!(infra_dir, "/path/to/infra"); + } + _ => panic!("expected Config Set"), + } + } + + // 15. test_config_get / test_config_clear + #[test] + fn test_config_get() { + let cli = parse(&["sunbeam", "config", "get"]); + match cli.verb { + Some(Verb::Config { action: Some(ConfigAction::Get) }) => {} + _ => panic!("expected Config Get"), + } + } + + #[test] + fn test_config_clear() { + let cli = parse(&["sunbeam", "config", "clear"]); + match cli.verb { + Some(Verb::Config { action: Some(ConfigAction::Clear) }) => {} + _ => panic!("expected Config Clear"), + } + } + + // 16. test_no_args_prints_help + #[test] + fn test_no_args_prints_help() { + let cli = parse(&["sunbeam"]); + assert!(cli.verb.is_none()); + } + + // 17. test_get_json_output + #[test] + fn test_get_json_output() { + let cli = parse(&["sunbeam", "get", "ory/kratos-abc", "-o", "json"]); + match cli.verb { + Some(Verb::Get { target, output }) => { + assert_eq!(target, "ory/kratos-abc"); + assert_eq!(output, "json"); + } + _ => panic!("expected Get"), + } + } + + // 18. test_check_with_target + #[test] + fn test_check_with_target() { + let cli = parse(&["sunbeam", "check", "devtools"]); + match cli.verb { + Some(Verb::Check { target }) => assert_eq!(target.unwrap(), "devtools"), + _ => panic!("expected Check"), + } + } + + // 19. test_build_messages_components + #[test] + fn test_build_messages_backend() { + let cli = parse(&["sunbeam", "build", "messages-backend"]); + match cli.verb { + Some(Verb::Build { what, .. }) => { + assert!(matches!(what, BuildTarget::MessagesBackend)); + } + _ => panic!("expected Build"), + } + } + + #[test] + fn test_build_messages_frontend() { + let cli = parse(&["sunbeam", "build", "messages-frontend"]); + match cli.verb { + Some(Verb::Build { what, .. }) => { + assert!(matches!(what, BuildTarget::MessagesFrontend)); + } + _ => panic!("expected Build"), + } + } + + #[test] + fn test_build_messages_mta_in() { + let cli = parse(&["sunbeam", "build", "messages-mta-in"]); + match cli.verb { + Some(Verb::Build { what, .. }) => { + assert!(matches!(what, BuildTarget::MessagesMtaIn)); + } + _ => panic!("expected Build"), + } + } + + #[test] + fn test_build_messages_mta_out() { + let cli = parse(&["sunbeam", "build", "messages-mta-out"]); + match cli.verb { + Some(Verb::Build { what, .. }) => { + assert!(matches!(what, BuildTarget::MessagesMtaOut)); + } + _ => panic!("expected Build"), + } + } + + #[test] + fn test_build_messages_mpa() { + let cli = parse(&["sunbeam", "build", "messages-mpa"]); + match cli.verb { + Some(Verb::Build { what, .. }) => { + assert!(matches!(what, BuildTarget::MessagesMpa)); + } + _ => panic!("expected Build"), + } + } + + #[test] + fn test_build_messages_socks_proxy() { + let cli = parse(&["sunbeam", "build", "messages-socks-proxy"]); + match cli.verb { + Some(Verb::Build { what, .. }) => { + assert!(matches!(what, BuildTarget::MessagesSocksProxy)); + } + _ => panic!("expected Build"), + } + } + + // 20. test_hire_date_validation + #[test] + fn test_hire_date_valid() { + let cli = parse(&[ + "sunbeam", "user", "onboard", "a@b.com", + "--hire-date", "2026-01-15", + ]); + match cli.verb { + Some(Verb::User { action: Some(UserAction::Onboard { hire_date, .. }) }) => { + assert_eq!(hire_date, "2026-01-15"); + } + _ => panic!("expected User Onboard"), + } + } + + #[test] + fn test_hire_date_invalid() { + let result = Cli::try_parse_from(&[ + "sunbeam", "user", "onboard", "a@b.com", + "--hire-date", "not-a-date", + ]); + assert!(result.is_err()); + } +} + /// Main dispatch function — parse CLI args and route to subcommands. pub async fn dispatch() -> Result<()> { let cli = Cli::parse(); diff --git a/src/cluster.rs b/src/cluster.rs index 3735308..8786446 100644 --- a/src/cluster.rs +++ b/src/cluster.rs @@ -1,5 +1,456 @@ -use anyhow::Result; +//! Cluster lifecycle — cert-manager, Linkerd, TLS, core service readiness. +//! +//! Pure K8s implementation: no Lima VM operations. -pub async fn cmd_up() -> Result<()> { - todo!("cmd_up: full cluster bring-up via kube-rs") +use anyhow::{bail, Context, Result}; +use std::path::PathBuf; + +const GITEA_ADMIN_USER: &str = "gitea_admin"; + +const CERT_MANAGER_URL: &str = + "https://github.com/cert-manager/cert-manager/releases/download/v1.17.0/cert-manager.yaml"; + +const GATEWAY_API_CRDS_URL: &str = + "https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/standard-install.yaml"; + +fn secrets_dir() -> PathBuf { + crate::config::get_infra_dir() + .join("secrets") + .join("local") +} + +// --------------------------------------------------------------------------- +// cert-manager +// --------------------------------------------------------------------------- + +async fn ensure_cert_manager() -> Result<()> { + crate::output::step("cert-manager..."); + + if crate::kube::ns_exists("cert-manager").await? { + crate::output::ok("Already installed."); + return Ok(()); + } + + crate::output::ok("Installing..."); + + // Download and apply cert-manager YAML + let body = reqwest::get(CERT_MANAGER_URL) + .await + .context("Failed to download cert-manager manifest")? + .text() + .await + .context("Failed to read cert-manager manifest body")?; + + crate::kube::kube_apply(&body).await?; + + // Wait for rollout + for dep in &[ + "cert-manager", + "cert-manager-webhook", + "cert-manager-cainjector", + ] { + crate::output::ok(&format!("Waiting for {dep}...")); + wait_rollout("cert-manager", dep, 120).await?; + } + + crate::output::ok("Installed."); + Ok(()) +} + +// --------------------------------------------------------------------------- +// Linkerd +// --------------------------------------------------------------------------- + +async fn ensure_linkerd() -> Result<()> { + crate::output::step("Linkerd..."); + + if crate::kube::ns_exists("linkerd").await? { + crate::output::ok("Already installed."); + return Ok(()); + } + + // Gateway API CRDs + crate::output::ok("Installing Gateway API CRDs..."); + let gateway_body = reqwest::get(GATEWAY_API_CRDS_URL) + .await + .context("Failed to download Gateway API CRDs")? + .text() + .await?; + + // Gateway API CRDs require server-side apply; kube_apply already does SSA + crate::kube::kube_apply(&gateway_body).await?; + + // Linkerd CRDs via subprocess (no pure HTTP source for linkerd manifests) + crate::output::ok("Installing Linkerd CRDs..."); + let crds_output = tokio::process::Command::new("linkerd") + .args(["install", "--crds"]) + .output() + .await + .context("Failed to run `linkerd install --crds`")?; + + if !crds_output.status.success() { + let stderr = String::from_utf8_lossy(&crds_output.stderr); + bail!("linkerd install --crds failed: {stderr}"); + } + let crds = String::from_utf8_lossy(&crds_output.stdout); + crate::kube::kube_apply(&crds).await?; + + // Linkerd control plane + crate::output::ok("Installing Linkerd control plane..."); + let cp_output = tokio::process::Command::new("linkerd") + .args(["install"]) + .output() + .await + .context("Failed to run `linkerd install`")?; + + if !cp_output.status.success() { + let stderr = String::from_utf8_lossy(&cp_output.stderr); + bail!("linkerd install failed: {stderr}"); + } + let cp = String::from_utf8_lossy(&cp_output.stdout); + crate::kube::kube_apply(&cp).await?; + + for dep in &[ + "linkerd-identity", + "linkerd-destination", + "linkerd-proxy-injector", + ] { + crate::output::ok(&format!("Waiting for {dep}...")); + wait_rollout("linkerd", dep, 120).await?; + } + + crate::output::ok("Installed."); + Ok(()) +} + +// --------------------------------------------------------------------------- +// TLS certificate (rcgen) +// --------------------------------------------------------------------------- + +async fn ensure_tls_cert(domain: &str) -> Result<()> { + crate::output::step("TLS certificate..."); + + let dir = secrets_dir(); + let cert_path = dir.join("tls.crt"); + let key_path = dir.join("tls.key"); + + if cert_path.exists() { + crate::output::ok(&format!("Cert exists. Domain: {domain}")); + return Ok(()); + } + + crate::output::ok(&format!("Generating wildcard cert for *.{domain}...")); + std::fs::create_dir_all(&dir) + .with_context(|| format!("Failed to create secrets dir: {}", dir.display()))?; + + let subject_alt_names = vec![format!("*.{domain}")]; + let mut params = rcgen::CertificateParams::new(subject_alt_names) + .context("Failed to create certificate params")?; + params + .distinguished_name + .push(rcgen::DnType::CommonName, format!("*.{domain}")); + + let key_pair = rcgen::KeyPair::generate().context("Failed to generate key pair")?; + let cert = params + .self_signed(&key_pair) + .context("Failed to generate self-signed certificate")?; + + std::fs::write(&cert_path, cert.pem()) + .with_context(|| format!("Failed to write {}", cert_path.display()))?; + std::fs::write(&key_path, key_pair.serialize_pem()) + .with_context(|| format!("Failed to write {}", key_path.display()))?; + + crate::output::ok(&format!("Cert generated. Domain: {domain}")); + Ok(()) +} + +// --------------------------------------------------------------------------- +// TLS secret +// --------------------------------------------------------------------------- + +async fn ensure_tls_secret(domain: &str) -> Result<()> { + crate::output::step("TLS secret..."); + + let _ = domain; // domain used contextually above; secret uses files + crate::kube::ensure_ns("ingress").await?; + + let dir = secrets_dir(); + let cert_pem = + std::fs::read_to_string(dir.join("tls.crt")).context("Failed to read tls.crt")?; + let key_pem = + std::fs::read_to_string(dir.join("tls.key")).context("Failed to read tls.key")?; + + // Create TLS secret via kube-rs + let client = crate::kube::get_client().await?; + let api: kube::api::Api = + kube::api::Api::namespaced(client.clone(), "ingress"); + + let b64_cert = base64::Engine::encode( + &base64::engine::general_purpose::STANDARD, + cert_pem.as_bytes(), + ); + let b64_key = base64::Engine::encode( + &base64::engine::general_purpose::STANDARD, + key_pem.as_bytes(), + ); + + let secret_obj = serde_json::json!({ + "apiVersion": "v1", + "kind": "Secret", + "metadata": { + "name": "pingora-tls", + "namespace": "ingress", + }, + "type": "kubernetes.io/tls", + "data": { + "tls.crt": b64_cert, + "tls.key": b64_key, + }, + }); + + let pp = kube::api::PatchParams::apply("sunbeam").force(); + api.patch("pingora-tls", &pp, &kube::api::Patch::Apply(secret_obj)) + .await + .context("Failed to create TLS secret")?; + + crate::output::ok("Done."); + Ok(()) +} + +// --------------------------------------------------------------------------- +// Wait for core +// --------------------------------------------------------------------------- + +async fn wait_for_core() -> Result<()> { + crate::output::step("Waiting for core services..."); + + for (ns, dep) in &[("data", "valkey"), ("ory", "kratos"), ("ory", "hydra")] { + let _ = wait_rollout(ns, dep, 120).await; + } + + crate::output::ok("Core services ready."); + Ok(()) +} + +// --------------------------------------------------------------------------- +// Print URLs +// --------------------------------------------------------------------------- + +fn print_urls(domain: &str, gitea_admin_pass: &str) { + let sep = "\u{2500}".repeat(60); + println!("\n{sep}"); + println!(" Stack is up. Domain: {domain}"); + println!("{sep}"); + + let urls: &[(&str, String)] = &[ + ("Auth", format!("https://auth.{domain}/")), + ("Docs", format!("https://docs.{domain}/")), + ("Meet", format!("https://meet.{domain}/")), + ("Drive", format!("https://drive.{domain}/")), + ("Chat", format!("https://chat.{domain}/")), + ("Mail", format!("https://mail.{domain}/")), + ("People", format!("https://people.{domain}/")), + ( + "Gitea", + format!( + "https://src.{domain}/ ({GITEA_ADMIN_USER} / {gitea_admin_pass})" + ), + ), + ]; + + for (name, url) in urls { + println!(" {name:<10} {url}"); + } + + println!(); + println!(" OpenBao UI:"); + println!(" kubectl --context=sunbeam -n data port-forward svc/openbao 8200:8200"); + println!(" http://localhost:8200"); + println!( + " token: kubectl --context=sunbeam -n data get secret openbao-keys \ + -o jsonpath='{{.data.root-token}}' | base64 -d" + ); + println!("{sep}\n"); +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/// Poll deployment rollout status (approximate: check Available condition). +async fn wait_rollout(ns: &str, deployment: &str, timeout_secs: u64) -> Result<()> { + use k8s_openapi::api::apps::v1::Deployment; + use std::time::{Duration, Instant}; + + let client = crate::kube::get_client().await?; + let api: kube::api::Api = kube::api::Api::namespaced(client.clone(), ns); + + let deadline = Instant::now() + Duration::from_secs(timeout_secs); + + loop { + if Instant::now() > deadline { + bail!("Timed out waiting for deployment {ns}/{deployment}"); + } + + match api.get_opt(deployment).await? { + Some(dep) => { + if let Some(status) = &dep.status { + if let Some(conditions) = &status.conditions { + let available = conditions.iter().any(|c| { + c.type_ == "Available" && c.status == "True" + }); + if available { + return Ok(()); + } + } + } + } + None => { + // Deployment doesn't exist yet — keep waiting + } + } + + tokio::time::sleep(Duration::from_secs(3)).await; + } +} + +// --------------------------------------------------------------------------- +// Commands +// --------------------------------------------------------------------------- + +/// Full cluster bring-up (pure K8s — no Lima VM operations). +pub async fn cmd_up() -> Result<()> { + // Resolve domain from cluster state + let domain = crate::kube::get_domain().await?; + + ensure_cert_manager().await?; + ensure_linkerd().await?; + ensure_tls_cert(&domain).await?; + ensure_tls_secret(&domain).await?; + + // Apply manifests + crate::manifests::cmd_apply("local", &domain, "", "").await?; + + // Seed secrets + crate::secrets::cmd_seed().await?; + + // Gitea bootstrap + crate::gitea::cmd_bootstrap().await?; + + // Mirror amd64-only images + crate::images::cmd_mirror().await?; + + // Wait for core services + wait_for_core().await?; + + // Get gitea admin password for URL display + let admin_pass = crate::kube::kube_get_secret_field( + "devtools", + "gitea-admin-credentials", + "password", + ) + .await + .unwrap_or_default(); + + print_urls(&domain, &admin_pass); + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn cert_manager_url_points_to_github_release() { + assert!(CERT_MANAGER_URL.starts_with("https://github.com/cert-manager/cert-manager/")); + assert!(CERT_MANAGER_URL.contains("/releases/download/")); + assert!(CERT_MANAGER_URL.ends_with(".yaml")); + } + + #[test] + fn cert_manager_url_has_version() { + // Verify the URL contains a version tag like v1.x.x + assert!( + CERT_MANAGER_URL.contains("/v1."), + "CERT_MANAGER_URL should reference a v1.x release" + ); + } + + #[test] + fn gateway_api_crds_url_points_to_github_release() { + assert!(GATEWAY_API_CRDS_URL + .starts_with("https://github.com/kubernetes-sigs/gateway-api/")); + assert!(GATEWAY_API_CRDS_URL.contains("/releases/download/")); + assert!(GATEWAY_API_CRDS_URL.ends_with(".yaml")); + } + + #[test] + fn gateway_api_crds_url_has_version() { + assert!( + GATEWAY_API_CRDS_URL.contains("/v1."), + "GATEWAY_API_CRDS_URL should reference a v1.x release" + ); + } + + #[test] + fn secrets_dir_ends_with_secrets_local() { + let dir = secrets_dir(); + assert!( + dir.ends_with("secrets/local"), + "secrets_dir() should end with secrets/local, got: {}", + dir.display() + ); + } + + #[test] + fn secrets_dir_has_at_least_three_components() { + let dir = secrets_dir(); + let components: Vec<_> = dir.components().collect(); + assert!( + components.len() >= 3, + "secrets_dir() should have at least 3 path components (base/secrets/local), got: {}", + dir.display() + ); + } + + #[test] + fn gitea_admin_user_constant() { + assert_eq!(GITEA_ADMIN_USER, "gitea_admin"); + } + + #[test] + fn print_urls_contains_expected_services() { + // Capture print_urls output by checking the URL construction logic. + // We can't easily capture stdout in unit tests, but we can verify + // the URL format matches expectations. + let domain = "test.local"; + let expected_urls = [ + format!("https://auth.{domain}/"), + format!("https://docs.{domain}/"), + format!("https://meet.{domain}/"), + format!("https://drive.{domain}/"), + format!("https://chat.{domain}/"), + format!("https://mail.{domain}/"), + format!("https://people.{domain}/"), + format!("https://src.{domain}/"), + ]; + + // Verify URL patterns are valid + for url in &expected_urls { + assert!(url.starts_with("https://")); + assert!(url.contains(domain)); + } + } + + #[test] + fn print_urls_gitea_includes_credentials() { + let domain = "example.local"; + let pass = "s3cret"; + let gitea_url = format!( + "https://src.{domain}/ ({GITEA_ADMIN_USER} / {pass})" + ); + assert!(gitea_url.contains(GITEA_ADMIN_USER)); + assert!(gitea_url.contains(pass)); + assert!(gitea_url.contains(&format!("src.{domain}"))); + } } diff --git a/src/gitea.rs b/src/gitea.rs index f375894..f23bb49 100644 --- a/src/gitea.rs +++ b/src/gitea.rs @@ -1,5 +1,429 @@ -use anyhow::Result; +//! Gitea bootstrap -- admin setup, org creation, OIDC auth source configuration. +use anyhow::Result; +use k8s_openapi::api::core::v1::Pod; +use kube::api::{Api, ListParams}; +use serde_json::Value; + +use crate::kube::{get_client, get_domain, kube_exec, kube_get_secret_field}; +use crate::output::{ok, step, warn}; + +const GITEA_ADMIN_USER: &str = "gitea_admin"; +const GITEA_ADMIN_EMAIL: &str = "gitea@local.domain"; + +/// Bootstrap Gitea: set admin password, create orgs, configure OIDC. pub async fn cmd_bootstrap() -> Result<()> { - todo!("cmd_bootstrap: Gitea admin + org setup via kube-rs exec + reqwest") + let domain = get_domain().await?; + + // Retrieve gitea admin password from cluster secret + let gitea_admin_pass = kube_get_secret_field("devtools", "gitea-admin-credentials", "password") + .await + .unwrap_or_default(); + + if gitea_admin_pass.is_empty() { + warn("gitea-admin-credentials password not found -- cannot bootstrap."); + return Ok(()); + } + + step("Bootstrapping Gitea..."); + + // Wait for a Running + Ready Gitea pod + let pod_name = wait_for_gitea_pod().await?; + let Some(pod) = pod_name else { + warn("Gitea pod not ready after 3 min -- skipping bootstrap."); + return Ok(()); + }; + + // Set admin password + set_admin_password(&pod, &gitea_admin_pass).await?; + + // Mark admin as private + mark_admin_private(&pod, &gitea_admin_pass).await?; + + // Create orgs + create_orgs(&pod, &gitea_admin_pass).await?; + + // Configure OIDC auth source + configure_oidc(&pod, &gitea_admin_pass).await?; + + ok(&format!( + "Gitea ready -- https://src.{domain} ({GITEA_ADMIN_USER} / )" + )); + Ok(()) +} + +/// Wait for a Running + Ready Gitea pod (up to 3 minutes). +async fn wait_for_gitea_pod() -> Result> { + let client = get_client().await?; + let pods: Api = Api::namespaced(client.clone(), "devtools"); + + for _ in 0..60 { + let lp = ListParams::default().labels("app.kubernetes.io/name=gitea"); + if let Ok(pod_list) = pods.list(&lp).await { + for pod in &pod_list.items { + let phase = pod + .status + .as_ref() + .and_then(|s| s.phase.as_deref()) + .unwrap_or(""); + + if phase != "Running" { + continue; + } + + let ready = pod + .status + .as_ref() + .and_then(|s| s.container_statuses.as_ref()) + .and_then(|cs| cs.first()) + .map(|c| c.ready) + .unwrap_or(false); + + if ready { + let name = pod + .metadata + .name + .as_deref() + .unwrap_or("") + .to_string(); + if !name.is_empty() { + return Ok(Some(name)); + } + } + } + } + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + } + + Ok(None) +} + +/// Set the admin password via gitea CLI exec. +async fn set_admin_password(pod: &str, password: &str) -> Result<()> { + let (code, output) = kube_exec( + "devtools", + pod, + &[ + "gitea", + "admin", + "user", + "change-password", + "--username", + GITEA_ADMIN_USER, + "--password", + password, + "--must-change-password=false", + ], + Some("gitea"), + ) + .await?; + + if code == 0 || output.to_lowercase().contains("password") { + ok(&format!("Admin '{GITEA_ADMIN_USER}' password set.")); + } else { + warn(&format!("change-password: {output}")); + } + Ok(()) +} + +/// Call Gitea API via kubectl exec + curl inside the pod. +async fn gitea_api( + pod: &str, + method: &str, + path: &str, + password: &str, + data: Option<&Value>, +) -> Result { + let url = format!("http://localhost:3000/api/v1{path}"); + let auth = format!("{GITEA_ADMIN_USER}:{password}"); + + let mut args = vec![ + "curl", "-s", "-X", method, &url, "-H", "Content-Type: application/json", "-u", &auth, + ]; + + let data_str; + if let Some(d) = data { + data_str = serde_json::to_string(d)?; + args.push("-d"); + args.push(&data_str); + } + + let (_, stdout) = kube_exec("devtools", pod, &args, Some("gitea")).await?; + + Ok(serde_json::from_str(&stdout).unwrap_or(Value::Object(Default::default()))) +} + +/// Mark the admin account as private. +async fn mark_admin_private(pod: &str, password: &str) -> Result<()> { + let data = serde_json::json!({ + "source_id": 0, + "login_name": GITEA_ADMIN_USER, + "email": GITEA_ADMIN_EMAIL, + "visibility": "private", + }); + + let result = gitea_api( + pod, + "PATCH", + &format!("/admin/users/{GITEA_ADMIN_USER}"), + password, + Some(&data), + ) + .await?; + + if result.get("login").and_then(|v| v.as_str()) == Some(GITEA_ADMIN_USER) { + ok(&format!("Admin '{GITEA_ADMIN_USER}' marked as private.")); + } else { + warn(&format!("Could not set admin visibility: {result}")); + } + Ok(()) +} + +/// Create the studio and internal organizations. +async fn create_orgs(pod: &str, password: &str) -> Result<()> { + let orgs = [ + ("studio", "public", "Public source code"), + ("internal", "private", "Internal tools and services"), + ]; + + for (org_name, visibility, desc) in &orgs { + let data = serde_json::json!({ + "username": org_name, + "visibility": visibility, + "description": desc, + }); + + let result = gitea_api(pod, "POST", "/orgs", password, Some(&data)).await?; + + if result.get("id").is_some() { + ok(&format!("Created org '{org_name}'.")); + } else if result + .get("message") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_lowercase() + .contains("already") + { + ok(&format!("Org '{org_name}' already exists.")); + } else { + let msg = result + .get("message") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()) + .unwrap_or_else(|| format!("{result}")); + warn(&format!("Org '{org_name}': {msg}")); + } + } + Ok(()) +} + +/// Configure Hydra as the OIDC authentication source. +async fn configure_oidc(pod: &str, _password: &str) -> Result<()> { + // List existing auth sources + let (_, auth_list_output) = + kube_exec("devtools", pod, &["gitea", "admin", "auth", "list"], Some("gitea")).await?; + + let mut existing_id: Option = None; + let mut exact_ok = false; + + for line in auth_list_output.lines().skip(1) { + // Tab-separated: ID\tName\tType\tEnabled + let parts: Vec<&str> = line.split('\t').collect(); + if parts.len() < 2 { + continue; + } + let src_id = parts[0].trim(); + let src_name = parts[1].trim(); + + if src_name == "Sunbeam" { + exact_ok = true; + break; + } + + let src_type = if parts.len() > 2 { + parts[2].trim() + } else { + "" + }; + + if src_name == "Sunbeam Auth" + || (src_name.starts_with("Sunbeam") && src_type == "OAuth2") + { + existing_id = Some(src_id.to_string()); + } + } + + if exact_ok { + ok("OIDC auth source 'Sunbeam' already present."); + return Ok(()); + } + + if let Some(eid) = existing_id { + // Wrong name -- rename in-place + let (code, stderr) = kube_exec( + "devtools", + pod, + &[ + "gitea", + "admin", + "auth", + "update-oauth", + "--id", + &eid, + "--name", + "Sunbeam", + ], + Some("gitea"), + ) + .await?; + + if code == 0 { + ok(&format!( + "Renamed OIDC auth source (id={eid}) to 'Sunbeam'." + )); + } else { + warn(&format!("Rename failed: {stderr}")); + } + return Ok(()); + } + + // Create new OIDC auth source + let oidc_id = kube_get_secret_field("lasuite", "oidc-gitea", "CLIENT_ID").await; + let oidc_secret = kube_get_secret_field("lasuite", "oidc-gitea", "CLIENT_SECRET").await; + + match (oidc_id, oidc_secret) { + (Ok(oidc_id), Ok(oidc_sec)) => { + let discover_url = + "http://hydra-public.ory.svc.cluster.local:4444/.well-known/openid-configuration"; + + let (code, stderr) = kube_exec( + "devtools", + pod, + &[ + "gitea", + "admin", + "auth", + "add-oauth", + "--name", + "Sunbeam", + "--provider", + "openidConnect", + "--key", + &oidc_id, + "--secret", + &oidc_sec, + "--auto-discover-url", + discover_url, + "--scopes", + "openid", + "--scopes", + "email", + "--scopes", + "profile", + ], + Some("gitea"), + ) + .await?; + + if code == 0 { + ok("OIDC auth source 'Sunbeam' configured."); + } else { + warn(&format!("OIDC auth source config failed: {stderr}")); + } + } + _ => { + warn("oidc-gitea secret not found -- OIDC auth source not configured."); + } + } + + Ok(()) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_constants() { + assert_eq!(GITEA_ADMIN_USER, "gitea_admin"); + assert_eq!(GITEA_ADMIN_EMAIL, "gitea@local.domain"); + } + + #[test] + fn test_org_definitions() { + // Verify the org configs match the Python version + let orgs = [ + ("studio", "public", "Public source code"), + ("internal", "private", "Internal tools and services"), + ]; + assert_eq!(orgs[0].0, "studio"); + assert_eq!(orgs[0].1, "public"); + assert_eq!(orgs[1].0, "internal"); + assert_eq!(orgs[1].1, "private"); + } + + #[test] + fn test_parse_auth_list_output() { + let output = "ID\tName\tType\tEnabled\n1\tSunbeam\tOAuth2\ttrue\n"; + let mut found = false; + for line in output.lines().skip(1) { + let parts: Vec<&str> = line.split('\t').collect(); + if parts.len() >= 2 && parts[1].trim() == "Sunbeam" { + found = true; + } + } + assert!(found); + } + + #[test] + fn test_parse_auth_list_rename_needed() { + let output = "ID\tName\tType\tEnabled\n5\tSunbeam Auth\tOAuth2\ttrue\n"; + let mut rename_id: Option = None; + for line in output.lines().skip(1) { + let parts: Vec<&str> = line.split('\t').collect(); + if parts.len() >= 3 { + let name = parts[1].trim(); + let typ = parts[2].trim(); + if name == "Sunbeam Auth" || (name.starts_with("Sunbeam") && typ == "OAuth2") { + rename_id = Some(parts[0].trim().to_string()); + } + } + } + assert_eq!(rename_id, Some("5".to_string())); + } + + #[test] + fn test_gitea_api_response_parsing() { + // Simulate a successful org creation response + let json_str = r#"{"id": 1, "username": "studio"}"#; + let val: Value = serde_json::from_str(json_str).unwrap(); + assert!(val.get("id").is_some()); + + // Simulate an "already exists" response + let json_str = r#"{"message": "organization already exists"}"#; + let val: Value = serde_json::from_str(json_str).unwrap(); + assert!(val + .get("message") + .unwrap() + .as_str() + .unwrap() + .to_lowercase() + .contains("already")); + } + + #[test] + fn test_admin_visibility_patch_body() { + let data = serde_json::json!({ + "source_id": 0, + "login_name": GITEA_ADMIN_USER, + "email": GITEA_ADMIN_EMAIL, + "visibility": "private", + }); + assert_eq!(data["login_name"], "gitea_admin"); + assert_eq!(data["visibility"], "private"); + } } diff --git a/src/images.rs b/src/images.rs index 232eae8..e364d3f 100644 --- a/src/images.rs +++ b/src/images.rs @@ -1,10 +1,1789 @@ +//! Image building, mirroring, and pushing to Gitea registry. + +use anyhow::{bail, Context, Result}; +use base64::Engine; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; +use std::process::Stdio; + use crate::cli::BuildTarget; -use anyhow::Result; +use crate::output::{ok, step, warn}; -pub async fn cmd_build(_what: &BuildTarget, _push: bool, _deploy: bool) -> Result<()> { - todo!("cmd_build: BuildKit gRPC builds") +const GITEA_ADMIN_USER: &str = "gitea_admin"; + +const MANAGED_NS: &[&str] = &[ + "data", + "devtools", + "ingress", + "lasuite", + "matrix", + "media", + "ory", + "storage", + "vault-secrets-operator", +]; + +/// amd64-only images that need mirroring: (source, org, repo, tag). +const AMD64_ONLY_IMAGES: &[(&str, &str, &str, &str)] = &[ + ( + "docker.io/lasuite/people-backend:latest", + "studio", + "people-backend", + "latest", + ), + ( + "docker.io/lasuite/people-frontend:latest", + "studio", + "people-frontend", + "latest", + ), + ( + "docker.io/lasuite/impress-backend:latest", + "studio", + "impress-backend", + "latest", + ), + ( + "docker.io/lasuite/impress-frontend:latest", + "studio", + "impress-frontend", + "latest", + ), + ( + "docker.io/lasuite/impress-y-provider:latest", + "studio", + "impress-y-provider", + "latest", + ), +]; + +// --------------------------------------------------------------------------- +// Build environment +// --------------------------------------------------------------------------- + +/// Resolved build environment — production (remote k8s) or local. +#[derive(Debug, Clone)] +pub struct BuildEnv { + pub is_prod: bool, + pub domain: String, + pub registry: String, + pub admin_pass: String, + pub platform: String, + pub ssh_host: Option, } +/// Detect prod vs local and resolve registry credentials. +async fn get_build_env() -> Result { + let ssh = crate::kube::ssh_host(); + let is_prod = !ssh.is_empty(); + + let domain = crate::kube::get_domain().await?; + + // Fetch gitea admin password from the cluster secret + let admin_pass = crate::kube::kube_get_secret_field( + "devtools", + "gitea-admin-credentials", + "password", + ) + .await + .context("gitea-admin-credentials secret not found -- run seed first.")?; + + let platform = if is_prod { + "linux/amd64".to_string() + } else { + "linux/arm64".to_string() + }; + + let ssh_host = if is_prod { + Some(ssh.to_string()) + } else { + None + }; + + Ok(BuildEnv { + is_prod, + domain: domain.clone(), + registry: format!("src.{domain}"), + admin_pass, + platform, + ssh_host, + }) +} + +// --------------------------------------------------------------------------- +// buildctl build + push +// --------------------------------------------------------------------------- + +/// Build and push an image via buildkitd running in k8s. +/// +/// Port-forwards to the buildkitd service in the `build` namespace, +/// runs `buildctl build`, and pushes the image directly to the Gitea +/// registry from inside the cluster. +#[allow(clippy::too_many_arguments)] +async fn buildctl_build_and_push( + env: &BuildEnv, + image: &str, + dockerfile: &Path, + context_dir: &Path, + target: Option<&str>, + build_args: Option<&HashMap>, + _no_cache: bool, +) -> Result<()> { + // Find a free local port for port-forward + let listener = std::net::TcpListener::bind("127.0.0.1:0") + .context("Failed to bind ephemeral port")?; + let local_port = listener.local_addr()?.port(); + drop(listener); + + // Build docker config for registry auth + let auth_token = base64::engine::general_purpose::STANDARD + .encode(format!("{GITEA_ADMIN_USER}:{}", env.admin_pass)); + let docker_cfg = serde_json::json!({ + "auths": { + &env.registry: { "auth": auth_token } + } + }); + + let tmpdir = tempfile::TempDir::new().context("Failed to create temp dir")?; + let cfg_path = tmpdir.path().join("config.json"); + std::fs::write(&cfg_path, serde_json::to_string(&docker_cfg)?) + .context("Failed to write docker config")?; + + // Start port-forward to buildkitd + let ctx_arg = format!("--context={}", crate::kube::context()); + let pf_port_arg = format!("{local_port}:1234"); + + let mut pf = tokio::process::Command::new("kubectl") + .args([ + &ctx_arg, + "port-forward", + "-n", + "build", + "svc/buildkitd", + &pf_port_arg, + ]) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .spawn() + .context("Failed to start buildkitd port-forward")?; + + // Wait for port-forward to become ready + let deadline = tokio::time::Instant::now() + std::time::Duration::from_secs(15); + loop { + if tokio::time::Instant::now() > deadline { + pf.kill().await.ok(); + bail!("buildkitd port-forward on :{local_port} did not become ready within 15s"); + } + if tokio::net::TcpStream::connect(format!("127.0.0.1:{local_port}")) + .await + .is_ok() + { + break; + } + tokio::time::sleep(std::time::Duration::from_millis(300)).await; + } + + // Build the buildctl command + let dockerfile_parent = dockerfile + .parent() + .unwrap_or(dockerfile) + .to_string_lossy() + .to_string(); + let dockerfile_name = dockerfile + .file_name() + .unwrap_or_default() + .to_string_lossy() + .to_string(); + let context_str = context_dir.to_string_lossy().to_string(); + + let mut cmd_args = vec![ + "build".to_string(), + "--frontend".to_string(), + "dockerfile.v0".to_string(), + "--local".to_string(), + format!("context={context_str}"), + "--local".to_string(), + format!("dockerfile={dockerfile_parent}"), + "--opt".to_string(), + format!("filename={dockerfile_name}"), + "--opt".to_string(), + format!("platform={}", env.platform), + "--output".to_string(), + format!("type=image,name={image},push=true"), + ]; + + if let Some(tgt) = target { + cmd_args.push("--opt".to_string()); + cmd_args.push(format!("target={tgt}")); + } + + if _no_cache { + cmd_args.push("--no-cache".to_string()); + } + + if let Some(args) = build_args { + for (k, v) in args { + cmd_args.push("--opt".to_string()); + cmd_args.push(format!("build-arg:{k}={v}")); + } + } + + let buildctl_host = format!("tcp://127.0.0.1:{local_port}"); + let tmpdir_str = tmpdir.path().to_string_lossy().to_string(); + + let result = tokio::process::Command::new("buildctl") + .args(&cmd_args) + .env("BUILDKIT_HOST", &buildctl_host) + .env("DOCKER_CONFIG", &tmpdir_str) + .stdin(Stdio::null()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .status() + .await; + + // Always terminate port-forward + pf.kill().await.ok(); + pf.wait().await.ok(); + + match result { + Ok(status) if status.success() => Ok(()), + Ok(status) => bail!("buildctl exited with status {status}"), + Err(e) => bail!("Failed to run buildctl: {e}"), + } +} + +// --------------------------------------------------------------------------- +// build_image wrapper +// --------------------------------------------------------------------------- + +/// Build a container image via buildkitd and push to the Gitea registry. +#[allow(clippy::too_many_arguments)] +async fn build_image( + env: &BuildEnv, + image: &str, + dockerfile: &Path, + context_dir: &Path, + target: Option<&str>, + build_args: Option<&HashMap>, + push: bool, + no_cache: bool, + cleanup_paths: &[PathBuf], +) -> Result<()> { + ok(&format!( + "Building image ({}{})...", + env.platform, + target + .map(|t| format!(", {t} target")) + .unwrap_or_default() + )); + + if !push { + warn("Builds require --push (buildkitd pushes directly to registry); skipping."); + return Ok(()); + } + + let result = buildctl_build_and_push( + env, + image, + dockerfile, + context_dir, + target, + build_args, + no_cache, + ) + .await; + + // Cleanup + for p in cleanup_paths { + if p.exists() { + if p.is_dir() { + let _ = std::fs::remove_dir_all(p); + } else { + let _ = std::fs::remove_file(p); + } + } + } + + result +} + +// --------------------------------------------------------------------------- +// Node operations +// --------------------------------------------------------------------------- + +/// Return one SSH-reachable IP per node in the cluster. +async fn get_node_addresses() -> Result> { + let client = crate::kube::get_client().await?; + let api: kube::api::Api = + kube::api::Api::all(client.clone()); + + let node_list = api + .list(&kube::api::ListParams::default()) + .await + .context("Failed to list nodes")?; + + let mut addresses = Vec::new(); + for node in &node_list.items { + if let Some(status) = &node.status { + if let Some(addrs) = &status.addresses { + // Prefer IPv4 InternalIP + let mut ipv4: Option = None; + let mut any_internal: Option = None; + + for addr in addrs { + if addr.type_ == "InternalIP" { + if !addr.address.contains(':') { + ipv4 = Some(addr.address.clone()); + } else if any_internal.is_none() { + any_internal = Some(addr.address.clone()); + } + } + } + + if let Some(ip) = ipv4.or(any_internal) { + addresses.push(ip); + } + } + } + } + + Ok(addresses) +} + +/// SSH to each k3s node and pull images into containerd. +async fn ctr_pull_on_nodes(env: &BuildEnv, images: &[String]) -> Result<()> { + if images.is_empty() { + return Ok(()); + } + + let nodes = get_node_addresses().await?; + if nodes.is_empty() { + warn("Could not detect node addresses; skipping ctr pull."); + return Ok(()); + } + + let ssh_user = env + .ssh_host + .as_deref() + .and_then(|h| h.split('@').next()) + .unwrap_or("root"); + + for node_ip in &nodes { + for img in images { + ok(&format!("Pulling {img} into containerd on {node_ip}...")); + let status = tokio::process::Command::new("ssh") + .args([ + "-p", + "2222", + "-o", + "StrictHostKeyChecking=no", + &format!("{ssh_user}@{node_ip}"), + &format!("sudo ctr -n k8s.io images pull {img}"), + ]) + .stdout(Stdio::null()) + .stderr(Stdio::piped()) + .status() + .await; + + match status { + Ok(s) if s.success() => ok(&format!("Pulled {img} on {node_ip}")), + _ => bail!("ctr pull failed on {node_ip} for {img}"), + } + } + } + + Ok(()) +} + +// --------------------------------------------------------------------------- +// Deploy rollout +// --------------------------------------------------------------------------- + +/// Apply manifests for the target namespace and rolling-restart the given deployments. +async fn deploy_rollout( + env: &BuildEnv, + deployments: &[&str], + namespace: &str, + timeout_secs: u64, + images: Option<&[String]>, +) -> Result<()> { + let env_str = if env.is_prod { "production" } else { "local" }; + crate::manifests::cmd_apply(env_str, &env.domain, "", namespace).await?; + + // Pull fresh images into containerd on every node before rollout + if let Some(imgs) = images { + ctr_pull_on_nodes(env, imgs).await?; + } + + for dep in deployments { + ok(&format!("Rolling {dep}...")); + crate::kube::kube_rollout_restart(namespace, dep).await?; + } + + // Wait for rollout completion + for dep in deployments { + wait_deployment_ready(namespace, dep, timeout_secs).await?; + } + + ok("Redeployed."); + Ok(()) +} + +/// Wait for a deployment to become ready. +async fn wait_deployment_ready(ns: &str, deployment: &str, timeout_secs: u64) -> Result<()> { + use k8s_openapi::api::apps::v1::Deployment; + use std::time::{Duration, Instant}; + + let client = crate::kube::get_client().await?; + let api: kube::api::Api = kube::api::Api::namespaced(client.clone(), ns); + let deadline = Instant::now() + Duration::from_secs(timeout_secs); + + loop { + if Instant::now() > deadline { + bail!("Timed out waiting for deployment {ns}/{deployment}"); + } + + if let Some(dep) = api.get_opt(deployment).await? { + if let Some(status) = &dep.status { + if let Some(conditions) = &status.conditions { + let available = conditions + .iter() + .any(|c| c.type_ == "Available" && c.status == "True"); + if available { + return Ok(()); + } + } + } + } + + tokio::time::sleep(Duration::from_secs(3)).await; + } +} + +// --------------------------------------------------------------------------- +// Mirroring +// --------------------------------------------------------------------------- + +/// Docker Hub auth token response. +#[derive(serde::Deserialize)] +struct DockerAuthToken { + token: String, +} + +/// Fetch a Docker Hub auth token for the given repository. +async fn docker_hub_token(repo: &str) -> Result { + let url = format!( + "https://auth.docker.io/token?service=registry.docker.io&scope=repository:{repo}:pull" + ); + let resp: DockerAuthToken = reqwest::get(&url) + .await + .context("Failed to fetch Docker Hub token")? + .json() + .await + .context("Failed to parse Docker Hub token response")?; + Ok(resp.token) +} + +/// Fetch an OCI/Docker manifest index from Docker Hub. +async fn fetch_manifest_index( + repo: &str, + tag: &str, +) -> Result { + let token = docker_hub_token(repo).await?; + + let client = reqwest::Client::new(); + let url = format!("https://registry-1.docker.io/v2/{repo}/manifests/{tag}"); + let accept = "application/vnd.oci.image.index.v1+json,\ + application/vnd.docker.distribution.manifest.list.v2+json"; + + let resp = client + .get(&url) + .header("Authorization", format!("Bearer {token}")) + .header("Accept", accept) + .send() + .await + .context("Failed to fetch manifest from Docker Hub")?; + + if !resp.status().is_success() { + bail!( + "Docker Hub returned {} for {repo}:{tag}", + resp.status() + ); + } + + resp.json() + .await + .context("Failed to parse manifest index JSON") +} + +/// Build an OCI tar archive containing a patched index that maps both +/// amd64 and arm64 to the same amd64 manifest. +fn make_oci_tar( + ref_name: &str, + new_index_bytes: &[u8], + amd64_manifest_bytes: &[u8], +) -> Result> { + use std::io::Write; + + let ix_hex = { + use sha2::Digest; + let hash = sha2::Sha256::digest(new_index_bytes); + hash.iter().map(|b| format!("{b:02x}")).collect::() + }; + + let new_index: serde_json::Value = serde_json::from_slice(new_index_bytes)?; + let amd64_hex = new_index["manifests"][0]["digest"] + .as_str() + .unwrap_or("") + .replace("sha256:", ""); + + let layout = serde_json::json!({"imageLayoutVersion": "1.0.0"}); + let layout_bytes = serde_json::to_vec(&layout)?; + + let top = serde_json::json!({ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.index.v1+json", + "manifests": [{ + "mediaType": "application/vnd.oci.image.index.v1+json", + "digest": format!("sha256:{ix_hex}"), + "size": new_index_bytes.len(), + "annotations": { + "org.opencontainers.image.ref.name": ref_name, + }, + }], + }); + let top_bytes = serde_json::to_vec(&top)?; + + let mut buf = Vec::new(); + { + let mut builder = tar::Builder::new(&mut buf); + + let mut add_entry = |name: &str, data: &[u8]| -> Result<()> { + let mut header = tar::Header::new_gnu(); + header.set_size(data.len() as u64); + header.set_mode(0o644); + header.set_cksum(); + builder.append_data(&mut header, name, data)?; + Ok(()) + }; + + add_entry("oci-layout", &layout_bytes)?; + add_entry("index.json", &top_bytes)?; + add_entry(&format!("blobs/sha256/{ix_hex}"), new_index_bytes)?; + add_entry( + &format!("blobs/sha256/{amd64_hex}"), + amd64_manifest_bytes, + )?; + + builder.finish()?; + } + + // Flush + buf.flush().ok(); + Ok(buf) +} + +/// Mirror amd64-only La Suite images to the Gitea registry. +/// +/// The Python version ran a script inside the Lima VM via `limactl shell`. +/// Without Lima, we use reqwest for Docker registry token/manifest fetching +/// and construct OCI tars natively. The containerd import + push operations +/// require SSH to nodes and are implemented via subprocess. pub async fn cmd_mirror() -> Result<()> { - todo!("cmd_mirror: containerd-client + reqwest mirror") + step("Mirroring amd64-only images to Gitea registry..."); + + let domain = crate::kube::get_domain().await?; + let admin_pass = crate::kube::kube_get_secret_field( + "devtools", + "gitea-admin-credentials", + "password", + ) + .await + .unwrap_or_default(); + + if admin_pass.is_empty() { + warn("Could not get gitea admin password; skipping mirror."); + return Ok(()); + } + + let registry = format!("src.{domain}"); + + let nodes = get_node_addresses().await.unwrap_or_default(); + if nodes.is_empty() { + warn("No node addresses found; cannot mirror images (need SSH to containerd)."); + return Ok(()); + } + + // Determine SSH user + let ssh_host_val = crate::kube::ssh_host(); + let ssh_user = if ssh_host_val.contains('@') { + ssh_host_val.split('@').next().unwrap_or("root") + } else { + "root" + }; + + for (src, org, repo, tag) in AMD64_ONLY_IMAGES { + let tgt = format!("{registry}/{org}/{repo}:{tag}"); + ok(&format!("Processing {src} -> {tgt}")); + + // Fetch manifest index from Docker Hub + let no_prefix = src.replace("docker.io/", ""); + let parts: Vec<&str> = no_prefix.splitn(2, ':').collect(); + let (docker_repo, docker_tag) = if parts.len() == 2 { + (parts[0], parts[1]) + } else { + (parts[0], "latest") + }; + + let index = match fetch_manifest_index(docker_repo, docker_tag).await { + Ok(idx) => idx, + Err(e) => { + warn(&format!("Failed to fetch index for {src}: {e}")); + continue; + } + }; + + // Find amd64 manifest + let manifests = index["manifests"].as_array(); + let amd64 = manifests.and_then(|ms| { + ms.iter().find(|m| { + m["platform"]["architecture"].as_str() == Some("amd64") + && m["platform"]["os"].as_str() == Some("linux") + }) + }); + + let amd64 = match amd64 { + Some(m) => m.clone(), + None => { + warn(&format!("No linux/amd64 entry in index for {src}; skipping")); + continue; + } + }; + + let amd64_digest = amd64["digest"] + .as_str() + .unwrap_or("") + .to_string(); + + // Fetch the actual amd64 manifest blob from registry + let token = docker_hub_token(docker_repo).await?; + let manifest_url = format!( + "https://registry-1.docker.io/v2/{docker_repo}/manifests/{amd64_digest}" + ); + let client = reqwest::Client::new(); + let amd64_manifest_bytes = client + .get(&manifest_url) + .header("Authorization", format!("Bearer {token}")) + .header( + "Accept", + "application/vnd.oci.image.manifest.v1+json,\ + application/vnd.docker.distribution.manifest.v2+json", + ) + .send() + .await? + .bytes() + .await?; + + // Build patched index: amd64 + arm64 alias pointing to same manifest + let arm64_entry = serde_json::json!({ + "mediaType": amd64["mediaType"], + "digest": amd64["digest"], + "size": amd64["size"], + "platform": {"architecture": "arm64", "os": "linux"}, + }); + + let new_index = serde_json::json!({ + "schemaVersion": index["schemaVersion"], + "mediaType": index.get("mediaType").unwrap_or(&serde_json::json!("application/vnd.oci.image.index.v1+json")), + "manifests": [amd64, arm64_entry], + }); + let new_index_bytes = serde_json::to_vec(&new_index)?; + + // Build OCI tar + let oci_tar = match make_oci_tar(&tgt, &new_index_bytes, &amd64_manifest_bytes) { + Ok(tar) => tar, + Err(e) => { + warn(&format!("Failed to build OCI tar for {tgt}: {e}")); + continue; + } + }; + + // Import + push via SSH to each node (containerd operations) + for node_ip in &nodes { + ok(&format!("Importing {tgt} on {node_ip}...")); + + // Remove existing, import, label + let ssh_target = format!("{ssh_user}@{node_ip}"); + + // Import via stdin + let mut import_cmd = tokio::process::Command::new("ssh") + .args([ + "-p", + "2222", + "-o", + "StrictHostKeyChecking=no", + &ssh_target, + "sudo ctr -n k8s.io images import --all-platforms -", + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::null()) + .stderr(Stdio::piped()) + .spawn() + .context("Failed to spawn ssh for ctr import")?; + + if let Some(mut stdin) = import_cmd.stdin.take() { + use tokio::io::AsyncWriteExt; + stdin.write_all(&oci_tar).await?; + drop(stdin); + } + let import_status = import_cmd.wait().await?; + if !import_status.success() { + warn(&format!("ctr import failed on {node_ip} for {tgt}")); + continue; + } + + // Label for CRI + let _ = tokio::process::Command::new("ssh") + .args([ + "-p", + "2222", + "-o", + "StrictHostKeyChecking=no", + &ssh_target, + &format!( + "sudo ctr -n k8s.io images label {tgt} io.cri-containerd.image=managed" + ), + ]) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .status() + .await; + + // Push to Gitea registry + ok(&format!("Pushing {tgt} from {node_ip}...")); + let push_status = tokio::process::Command::new("ssh") + .args([ + "-p", + "2222", + "-o", + "StrictHostKeyChecking=no", + &ssh_target, + &format!( + "sudo ctr -n k8s.io images push --user {GITEA_ADMIN_USER}:{admin_pass} {tgt}" + ), + ]) + .stdout(Stdio::null()) + .stderr(Stdio::piped()) + .status() + .await; + + match push_status { + Ok(s) if s.success() => ok(&format!("Pushed {tgt}")), + _ => warn(&format!("Push failed for {tgt} on {node_ip}")), + } + + // Only need to push from one node + break; + } + } + + // Delete pods stuck in image-pull error states + ok("Clearing image-pull-error pods..."); + clear_image_pull_error_pods().await?; + + ok("Done."); + Ok(()) +} + +/// Delete pods in image-pull error states across managed namespaces. +async fn clear_image_pull_error_pods() -> Result<()> { + use k8s_openapi::api::core::v1::Pod; + + let error_reasons = ["ImagePullBackOff", "ErrImagePull", "ErrImageNeverPull"]; + + let client = crate::kube::get_client().await?; + + for ns in MANAGED_NS { + let api: kube::api::Api = kube::api::Api::namespaced(client.clone(), ns); + let pods = api + .list(&kube::api::ListParams::default()) + .await; + + let pods = match pods { + Ok(p) => p, + Err(_) => continue, + }; + + for pod in &pods.items { + let pod_name = pod.metadata.name.as_deref().unwrap_or(""); + if pod_name.is_empty() { + continue; + } + + let has_error = pod + .status + .as_ref() + .and_then(|s| s.container_statuses.as_ref()) + .map(|statuses| { + statuses.iter().any(|cs| { + cs.state + .as_ref() + .and_then(|s| s.waiting.as_ref()) + .and_then(|w| w.reason.as_deref()) + .is_some_and(|r| error_reasons.contains(&r)) + }) + }) + .unwrap_or(false); + + if has_error { + let _ = api + .delete(pod_name, &kube::api::DeleteParams::default()) + .await; + } + } + } + + Ok(()) +} + +// --------------------------------------------------------------------------- +// Per-service build functions +// --------------------------------------------------------------------------- + +async fn build_proxy(push: bool, deploy: bool) -> Result<()> { + let env = get_build_env().await?; + let proxy_dir = crate::config::get_repo_root().join("proxy"); + if !proxy_dir.is_dir() { + bail!("Proxy source not found at {}", proxy_dir.display()); + } + + let image = format!("{}/studio/proxy:latest", env.registry); + step(&format!("Building sunbeam-proxy -> {image} ...")); + + build_image( + &env, + &image, + &proxy_dir.join("Dockerfile"), + &proxy_dir, + None, + None, + push, + false, + &[], + ) + .await?; + + if deploy { + deploy_rollout(&env, &["pingora"], "ingress", 120, Some(&[image])).await?; + } + Ok(()) +} + +async fn build_tuwunel(push: bool, deploy: bool) -> Result<()> { + let env = get_build_env().await?; + let tuwunel_dir = crate::config::get_repo_root().join("tuwunel"); + if !tuwunel_dir.is_dir() { + bail!("Tuwunel source not found at {}", tuwunel_dir.display()); + } + + let image = format!("{}/studio/tuwunel:latest", env.registry); + step(&format!("Building tuwunel -> {image} ...")); + + build_image( + &env, + &image, + &tuwunel_dir.join("Dockerfile"), + &tuwunel_dir, + None, + None, + push, + false, + &[], + ) + .await?; + + if deploy { + deploy_rollout(&env, &["tuwunel"], "matrix", 180, Some(&[image])).await?; + } + Ok(()) +} + +async fn build_integration(push: bool, deploy: bool) -> Result<()> { + let env = get_build_env().await?; + let sunbeam_dir = crate::config::get_repo_root(); + let integration_service_dir = sunbeam_dir.join("integration-service"); + let dockerfile = integration_service_dir.join("Dockerfile"); + let dockerignore = integration_service_dir.join(".dockerignore"); + + if !dockerfile.exists() { + bail!( + "integration-service Dockerfile not found at {}", + dockerfile.display() + ); + } + if !sunbeam_dir + .join("integration") + .join("packages") + .join("widgets") + .is_dir() + { + bail!( + "integration repo not found at {} -- \ + run: cd sunbeam && git clone https://github.com/suitenumerique/integration.git", + sunbeam_dir.join("integration").display() + ); + } + + let image = format!("{}/studio/integration:latest", env.registry); + step(&format!("Building integration -> {image} ...")); + + // .dockerignore needs to be at context root + let root_ignore = sunbeam_dir.join(".dockerignore"); + let mut copied_ignore = false; + if !root_ignore.exists() && dockerignore.exists() { + std::fs::copy(&dockerignore, &root_ignore).ok(); + copied_ignore = true; + } + + let result = build_image( + &env, + &image, + &dockerfile, + &sunbeam_dir, + None, + None, + push, + false, + &[], + ) + .await; + + if copied_ignore && root_ignore.exists() { + let _ = std::fs::remove_file(&root_ignore); + } + + result?; + + if deploy { + deploy_rollout(&env, &["integration"], "lasuite", 120, None).await?; + } + Ok(()) +} + +async fn build_kratos_admin(push: bool, deploy: bool) -> Result<()> { + let env = get_build_env().await?; + let kratos_admin_dir = crate::config::get_repo_root().join("kratos-admin"); + if !kratos_admin_dir.is_dir() { + bail!( + "kratos-admin source not found at {}", + kratos_admin_dir.display() + ); + } + + let image = format!("{}/studio/kratos-admin-ui:latest", env.registry); + step(&format!("Building kratos-admin-ui -> {image} ...")); + + build_image( + &env, + &image, + &kratos_admin_dir.join("Dockerfile"), + &kratos_admin_dir, + None, + None, + push, + false, + &[], + ) + .await?; + + if deploy { + deploy_rollout(&env, &["kratos-admin-ui"], "ory", 120, None).await?; + } + Ok(()) +} + +async fn build_meet(push: bool, deploy: bool) -> Result<()> { + let env = get_build_env().await?; + let meet_dir = crate::config::get_repo_root().join("meet"); + if !meet_dir.is_dir() { + bail!("meet source not found at {}", meet_dir.display()); + } + + let backend_image = format!("{}/studio/meet-backend:latest", env.registry); + let frontend_image = format!("{}/studio/meet-frontend:latest", env.registry); + + // Backend + step(&format!("Building meet-backend -> {backend_image} ...")); + build_image( + &env, + &backend_image, + &meet_dir.join("Dockerfile"), + &meet_dir, + Some("backend-production"), + None, + push, + false, + &[], + ) + .await?; + + // Frontend + step(&format!("Building meet-frontend -> {frontend_image} ...")); + let frontend_dockerfile = meet_dir.join("src").join("frontend").join("Dockerfile"); + if !frontend_dockerfile.exists() { + bail!( + "meet frontend Dockerfile not found at {}", + frontend_dockerfile.display() + ); + } + + let mut build_args = HashMap::new(); + build_args.insert("VITE_API_BASE_URL".to_string(), String::new()); + + build_image( + &env, + &frontend_image, + &frontend_dockerfile, + &meet_dir, + Some("frontend-production"), + Some(&build_args), + push, + false, + &[], + ) + .await?; + + if deploy { + deploy_rollout( + &env, + &["meet-backend", "meet-celery-worker", "meet-frontend"], + "lasuite", + 180, + None, + ) + .await?; + } + Ok(()) +} + +async fn build_people(push: bool, deploy: bool) -> Result<()> { + let env = get_build_env().await?; + let people_dir = crate::config::get_repo_root().join("people"); + if !people_dir.is_dir() { + bail!("people source not found at {}", people_dir.display()); + } + + let workspace_dir = people_dir.join("src").join("frontend"); + let app_dir = workspace_dir.join("apps").join("desk"); + let dockerfile = workspace_dir.join("Dockerfile"); + if !dockerfile.exists() { + bail!("Dockerfile not found at {}", dockerfile.display()); + } + + let image = format!("{}/studio/people-frontend:latest", env.registry); + step(&format!("Building people-frontend -> {image} ...")); + + // yarn install + ok("Updating yarn.lock (yarn install in workspace)..."); + let yarn_status = tokio::process::Command::new("yarn") + .args(["install", "--ignore-engines"]) + .current_dir(&workspace_dir) + .status() + .await + .context("Failed to run yarn install")?; + if !yarn_status.success() { + bail!("yarn install failed"); + } + + // cunningham design tokens + ok("Regenerating cunningham design tokens..."); + let cunningham_bin = workspace_dir + .join("node_modules") + .join(".bin") + .join("cunningham"); + let cunningham_status = tokio::process::Command::new(&cunningham_bin) + .args(["-g", "css,ts", "-o", "src/cunningham", "--utility-classes"]) + .current_dir(&app_dir) + .status() + .await + .context("Failed to run cunningham")?; + if !cunningham_status.success() { + bail!("cunningham failed"); + } + + let mut build_args = HashMap::new(); + build_args.insert("DOCKER_USER".to_string(), "101".to_string()); + + build_image( + &env, + &image, + &dockerfile, + &people_dir, + Some("frontend-production"), + Some(&build_args), + push, + false, + &[], + ) + .await?; + + if deploy { + deploy_rollout(&env, &["people-frontend"], "lasuite", 180, None).await?; + } + Ok(()) +} + +/// Message component definition: (cli_name, image_name, dockerfile_rel, target). +const MESSAGES_COMPONENTS: &[(&str, &str, &str, Option<&str>)] = &[ + ( + "messages-backend", + "messages-backend", + "src/backend/Dockerfile", + Some("runtime-distroless-prod"), + ), + ( + "messages-frontend", + "messages-frontend", + "src/frontend/Dockerfile", + Some("runtime-prod"), + ), + ( + "messages-mta-in", + "messages-mta-in", + "src/mta-in/Dockerfile", + None, + ), + ( + "messages-mta-out", + "messages-mta-out", + "src/mta-out/Dockerfile", + None, + ), + ( + "messages-mpa", + "messages-mpa", + "src/mpa/rspamd/Dockerfile", + None, + ), + ( + "messages-socks-proxy", + "messages-socks-proxy", + "src/socks-proxy/Dockerfile", + None, + ), +]; + +async fn build_messages(what: &str, push: bool, deploy: bool) -> Result<()> { + let env = get_build_env().await?; + let messages_dir = crate::config::get_repo_root().join("messages"); + if !messages_dir.is_dir() { + bail!("messages source not found at {}", messages_dir.display()); + } + + let components: Vec<_> = if what == "messages" { + MESSAGES_COMPONENTS.to_vec() + } else { + MESSAGES_COMPONENTS + .iter() + .filter(|(name, _, _, _)| *name == what) + .copied() + .collect() + }; + + let mut built_images = Vec::new(); + + for (component, image_name, dockerfile_rel, target) in &components { + let dockerfile = messages_dir.join(dockerfile_rel); + if !dockerfile.exists() { + warn(&format!( + "Dockerfile not found at {} -- skipping {component}", + dockerfile.display() + )); + continue; + } + + let image = format!("{}/studio/{image_name}:latest", env.registry); + let context_dir = dockerfile.parent().unwrap_or(&messages_dir); + step(&format!("Building {component} -> {image} ...")); + + // Patch ghcr.io/astral-sh/uv COPY for messages-backend on local builds + let mut cleanup_paths = Vec::new(); + let actual_dockerfile; + + if !env.is_prod && *image_name == "messages-backend" { + let (patched, cleanup) = + patch_dockerfile_uv(&dockerfile, context_dir, &env.platform).await?; + actual_dockerfile = patched; + cleanup_paths = cleanup; + } else { + actual_dockerfile = dockerfile.clone(); + } + + build_image( + &env, + &image, + &actual_dockerfile, + context_dir, + *target, + None, + push, + false, + &cleanup_paths, + ) + .await?; + + built_images.push(image); + } + + if deploy && !built_images.is_empty() { + deploy_rollout( + &env, + &[ + "messages-backend", + "messages-worker", + "messages-frontend", + "messages-mta-in", + "messages-mta-out", + "messages-mpa", + "messages-socks-proxy", + ], + "lasuite", + 180, + None, + ) + .await?; + } + + Ok(()) +} + +/// Build a La Suite frontend image from source and push to the Gitea registry. +#[allow(clippy::too_many_arguments)] +async fn build_la_suite_frontend( + app: &str, + repo_dir: &Path, + workspace_rel: &str, + app_rel: &str, + dockerfile_rel: &str, + image_name: &str, + deployment: &str, + namespace: &str, + push: bool, + deploy: bool, +) -> Result<()> { + let env = get_build_env().await?; + + let workspace_dir = repo_dir.join(workspace_rel); + let app_dir = repo_dir.join(app_rel); + let dockerfile = repo_dir.join(dockerfile_rel); + + if !repo_dir.is_dir() { + bail!("{app} source not found at {}", repo_dir.display()); + } + if !dockerfile.exists() { + bail!("Dockerfile not found at {}", dockerfile.display()); + } + + let image = format!("{}/studio/{image_name}:latest", env.registry); + step(&format!("Building {app} -> {image} ...")); + + ok("Updating yarn.lock (yarn install in workspace)..."); + let yarn_status = tokio::process::Command::new("yarn") + .args(["install", "--ignore-engines"]) + .current_dir(&workspace_dir) + .status() + .await + .context("Failed to run yarn install")?; + if !yarn_status.success() { + bail!("yarn install failed"); + } + + ok("Regenerating cunningham design tokens (yarn build-theme)..."); + let theme_status = tokio::process::Command::new("yarn") + .args(["build-theme"]) + .current_dir(&app_dir) + .status() + .await + .context("Failed to run yarn build-theme")?; + if !theme_status.success() { + bail!("yarn build-theme failed"); + } + + let mut build_args = HashMap::new(); + build_args.insert("DOCKER_USER".to_string(), "101".to_string()); + + build_image( + &env, + &image, + &dockerfile, + repo_dir, + Some("frontend-production"), + Some(&build_args), + push, + false, + &[], + ) + .await?; + + if deploy { + deploy_rollout(&env, &[deployment], namespace, 180, None).await?; + } + Ok(()) +} + +/// Download uv from GitHub releases and return a patched Dockerfile path. +async fn patch_dockerfile_uv( + dockerfile_path: &Path, + context_dir: &Path, + platform: &str, +) -> Result<(PathBuf, Vec)> { + let content = std::fs::read_to_string(dockerfile_path) + .context("Failed to read Dockerfile for uv patching")?; + + // Match COPY --from=ghcr.io/astral-sh/uv@sha256:... /uv /uvx /bin/ + let original_copy = content + .lines() + .find(|line| { + line.contains("COPY") + && line.contains("--from=ghcr.io/astral-sh/uv@sha256:") + && line.contains("/uv") + && line.contains("/bin/") + }) + .map(|line| line.trim().to_string()); + + let original_copy = match original_copy { + Some(c) => c, + None => return Ok((dockerfile_path.to_path_buf(), vec![])), + }; + + // Find uv version from comment like: oci://ghcr.io/astral-sh/uv:0.x.y + let version = content + .lines() + .find_map(|line| { + let marker = "oci://ghcr.io/astral-sh/uv:"; + if let Some(idx) = line.find(marker) { + let rest = &line[idx + marker.len()..]; + let ver = rest.split_whitespace().next().unwrap_or(""); + if !ver.is_empty() { + Some(ver.to_string()) + } else { + None + } + } else { + None + } + }); + + let version = match version { + Some(v) => v, + None => { + warn("Could not find uv version comment in Dockerfile; ghcr.io pull may fail."); + return Ok((dockerfile_path.to_path_buf(), vec![])); + } + }; + + let arch = if platform.contains("amd64") { + "x86_64" + } else { + "aarch64" + }; + + let url = format!( + "https://github.com/astral-sh/uv/releases/download/{version}/uv-{arch}-unknown-linux-gnu.tar.gz" + ); + + let stage_dir = context_dir.join("_sunbeam_uv_stage"); + let patched_df = dockerfile_path + .parent() + .unwrap_or(dockerfile_path) + .join("Dockerfile._sunbeam_patched"); + let cleanup = vec![stage_dir.clone(), patched_df.clone()]; + + ok(&format!( + "Downloading uv {version} ({arch}) from GitHub releases to bypass ghcr.io..." + )); + + std::fs::create_dir_all(&stage_dir)?; + + // Download tarball + let response = reqwest::get(&url) + .await + .context("Failed to download uv release")?; + let tarball_bytes = response.bytes().await?; + + // Extract uv and uvx from tarball + let decoder = flate2::read::GzDecoder::new(&tarball_bytes[..]); + let mut archive = tar::Archive::new(decoder); + + for entry in archive.entries()? { + let mut entry = entry?; + let path = entry.path()?.to_path_buf(); + let file_name = path + .file_name() + .unwrap_or_default() + .to_string_lossy() + .to_string(); + + if (file_name == "uv" || file_name == "uvx") && entry.header().entry_type().is_file() { + let dest = stage_dir.join(&file_name); + let mut outfile = std::fs::File::create(&dest)?; + std::io::copy(&mut entry, &mut outfile)?; + + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + std::fs::set_permissions(&dest, std::fs::Permissions::from_mode(0o755))?; + } + } + } + + if !stage_dir.join("uv").exists() { + warn("uv binary not found in release tarball; build may fail."); + return Ok((dockerfile_path.to_path_buf(), cleanup)); + } + + let patched = content.replace( + &original_copy, + "COPY _sunbeam_uv_stage/uv _sunbeam_uv_stage/uvx /bin/", + ); + std::fs::write(&patched_df, patched)?; + ok(&format!(" uv {version} staged; using patched Dockerfile.")); + + Ok((patched_df, cleanup)) +} + +async fn build_projects(push: bool, deploy: bool) -> Result<()> { + let env = get_build_env().await?; + let projects_dir = crate::config::get_repo_root().join("projects"); + if !projects_dir.is_dir() { + bail!("projects source not found at {}", projects_dir.display()); + } + + let image = format!("{}/studio/projects:latest", env.registry); + step(&format!("Building projects -> {image} ...")); + + build_image( + &env, + &image, + &projects_dir.join("Dockerfile"), + &projects_dir, + None, + None, + push, + false, + &[], + ) + .await?; + + if deploy { + deploy_rollout(&env, &["projects"], "lasuite", 180, Some(&[image])).await?; + } + Ok(()) +} + +async fn build_calendars(push: bool, deploy: bool) -> Result<()> { + let env = get_build_env().await?; + let cal_dir = crate::config::get_repo_root().join("calendars"); + if !cal_dir.is_dir() { + bail!("calendars source not found at {}", cal_dir.display()); + } + + let backend_dir = cal_dir.join("src").join("backend"); + let backend_image = format!("{}/studio/calendars-backend:latest", env.registry); + step(&format!("Building calendars-backend -> {backend_image} ...")); + + // Stage translations.json into the build context + let translations_src = cal_dir + .join("src") + .join("frontend") + .join("apps") + .join("calendars") + .join("src") + .join("features") + .join("i18n") + .join("translations.json"); + + let translations_dst = backend_dir.join("_translations.json"); + let mut cleanup: Vec = Vec::new(); + let mut dockerfile = backend_dir.join("Dockerfile"); + + if translations_src.exists() { + std::fs::copy(&translations_src, &translations_dst)?; + cleanup.push(translations_dst); + + // Patch Dockerfile to COPY translations into production image + let mut content = std::fs::read_to_string(&dockerfile)?; + content.push_str( + "\n# Sunbeam: bake translations.json for default calendar names\n\ + COPY _translations.json /data/translations.json\n", + ); + let patched_df = backend_dir.join("Dockerfile._sunbeam_patched"); + std::fs::write(&patched_df, content)?; + cleanup.push(patched_df.clone()); + dockerfile = patched_df; + } + + build_image( + &env, + &backend_image, + &dockerfile, + &backend_dir, + Some("backend-production"), + None, + push, + false, + &cleanup, + ) + .await?; + + // caldav + let caldav_image = format!("{}/studio/calendars-caldav:latest", env.registry); + step(&format!("Building calendars-caldav -> {caldav_image} ...")); + let caldav_dir = cal_dir.join("src").join("caldav"); + build_image( + &env, + &caldav_image, + &caldav_dir.join("Dockerfile"), + &caldav_dir, + None, + None, + push, + false, + &[], + ) + .await?; + + // frontend + let frontend_image = format!("{}/studio/calendars-frontend:latest", env.registry); + step(&format!( + "Building calendars-frontend -> {frontend_image} ..." + )); + let integration_base = format!("https://integration.{}", env.domain); + let mut build_args = HashMap::new(); + build_args.insert( + "VISIO_BASE_URL".to_string(), + format!("https://meet.{}", env.domain), + ); + build_args.insert( + "GAUFRE_WIDGET_PATH".to_string(), + format!("{integration_base}/api/v2/lagaufre.js"), + ); + build_args.insert( + "GAUFRE_API_URL".to_string(), + format!("{integration_base}/api/v2/services.json"), + ); + build_args.insert( + "THEME_CSS_URL".to_string(), + format!("{integration_base}/api/v2/theme.css"), + ); + + let frontend_dir = cal_dir.join("src").join("frontend"); + build_image( + &env, + &frontend_image, + &frontend_dir.join("Dockerfile"), + &frontend_dir, + Some("frontend-production"), + Some(&build_args), + push, + false, + &[], + ) + .await?; + + if deploy { + deploy_rollout( + &env, + &[ + "calendars-backend", + "calendars-worker", + "calendars-caldav", + "calendars-frontend", + ], + "lasuite", + 180, + Some(&[backend_image, caldav_image, frontend_image]), + ) + .await?; + } + Ok(()) +} + +// --------------------------------------------------------------------------- +// Build dispatch +// --------------------------------------------------------------------------- + +/// Build an image. Pass push=true to push, deploy=true to also apply + rollout. +pub async fn cmd_build(what: &BuildTarget, push: bool, deploy: bool) -> Result<()> { + match what { + BuildTarget::Proxy => build_proxy(push, deploy).await, + BuildTarget::Integration => build_integration(push, deploy).await, + BuildTarget::KratosAdmin => build_kratos_admin(push, deploy).await, + BuildTarget::Meet => build_meet(push, deploy).await, + BuildTarget::DocsFrontend => { + let repo_dir = crate::config::get_repo_root().join("docs"); + build_la_suite_frontend( + "docs-frontend", + &repo_dir, + "src/frontend", + "src/frontend/apps/impress", + "src/frontend/Dockerfile", + "impress-frontend", + "docs-frontend", + "lasuite", + push, + deploy, + ) + .await + } + BuildTarget::PeopleFrontend | BuildTarget::People => build_people(push, deploy).await, + BuildTarget::Messages => build_messages("messages", push, deploy).await, + BuildTarget::MessagesBackend => build_messages("messages-backend", push, deploy).await, + BuildTarget::MessagesFrontend => build_messages("messages-frontend", push, deploy).await, + BuildTarget::MessagesMtaIn => build_messages("messages-mta-in", push, deploy).await, + BuildTarget::MessagesMtaOut => build_messages("messages-mta-out", push, deploy).await, + BuildTarget::MessagesMpa => build_messages("messages-mpa", push, deploy).await, + BuildTarget::MessagesSocksProxy => { + build_messages("messages-socks-proxy", push, deploy).await + } + BuildTarget::Tuwunel => build_tuwunel(push, deploy).await, + BuildTarget::Calendars => build_calendars(push, deploy).await, + BuildTarget::Projects => build_projects(push, deploy).await, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn managed_ns_is_sorted() { + let mut sorted = MANAGED_NS.to_vec(); + sorted.sort(); + assert_eq!( + MANAGED_NS, &sorted[..], + "MANAGED_NS should be in alphabetical order" + ); + } + + #[test] + fn managed_ns_contains_expected_namespaces() { + assert!(MANAGED_NS.contains(&"data")); + assert!(MANAGED_NS.contains(&"devtools")); + assert!(MANAGED_NS.contains(&"ingress")); + assert!(MANAGED_NS.contains(&"ory")); + assert!(MANAGED_NS.contains(&"matrix")); + } + + #[test] + fn amd64_only_images_all_from_docker_hub() { + for (src, _org, _repo, _tag) in AMD64_ONLY_IMAGES { + assert!( + src.starts_with("docker.io/"), + "Expected docker.io prefix, got: {src}" + ); + } + } + + #[test] + fn amd64_only_images_all_have_latest_tag() { + for (src, _org, _repo, tag) in AMD64_ONLY_IMAGES { + assert_eq!( + *tag, "latest", + "Expected 'latest' tag for {src}, got: {tag}" + ); + } + } + + #[test] + fn amd64_only_images_non_empty() { + assert!( + !AMD64_ONLY_IMAGES.is_empty(), + "AMD64_ONLY_IMAGES should not be empty" + ); + } + + #[test] + fn amd64_only_images_org_is_studio() { + for (src, org, _repo, _tag) in AMD64_ONLY_IMAGES { + assert_eq!( + *org, "studio", + "Expected org 'studio' for {src}, got: {org}" + ); + } + } + + #[test] + fn build_target_display_proxy() { + assert_eq!(BuildTarget::Proxy.to_string(), "proxy"); + } + + #[test] + fn build_target_display_kratos_admin() { + assert_eq!(BuildTarget::KratosAdmin.to_string(), "kratos-admin"); + } + + #[test] + fn build_target_display_all_lowercase_or_hyphenated() { + let targets = [ + BuildTarget::Proxy, + BuildTarget::Integration, + BuildTarget::KratosAdmin, + BuildTarget::Meet, + BuildTarget::DocsFrontend, + BuildTarget::PeopleFrontend, + BuildTarget::People, + BuildTarget::Messages, + BuildTarget::MessagesBackend, + BuildTarget::MessagesFrontend, + BuildTarget::MessagesMtaIn, + BuildTarget::MessagesMtaOut, + BuildTarget::MessagesMpa, + BuildTarget::MessagesSocksProxy, + BuildTarget::Tuwunel, + BuildTarget::Calendars, + BuildTarget::Projects, + ]; + for t in &targets { + let s = t.to_string(); + assert!( + s.chars().all(|c| c.is_ascii_lowercase() || c == '-'), + "BuildTarget display '{s}' has unexpected characters" + ); + } + } + + #[test] + fn gitea_admin_user_constant() { + assert_eq!(GITEA_ADMIN_USER, "gitea_admin"); + } + + #[test] + fn messages_components_non_empty() { + assert!(!MESSAGES_COMPONENTS.is_empty()); + } + + #[test] + fn messages_components_dockerfiles_are_relative() { + for (_name, _image, dockerfile_rel, _target) in MESSAGES_COMPONENTS { + assert!( + dockerfile_rel.ends_with("Dockerfile"), + "Expected Dockerfile suffix in: {dockerfile_rel}" + ); + assert!( + !dockerfile_rel.starts_with('/'), + "Dockerfile path should be relative: {dockerfile_rel}" + ); + } + } + + #[test] + fn messages_components_names_match_build_targets() { + for (name, _image, _df, _target) in MESSAGES_COMPONENTS { + assert!( + name.starts_with("messages-"), + "Component name should start with 'messages-': {name}" + ); + } + } } diff --git a/src/manifests.rs b/src/manifests.rs index 81f814d..e422536 100644 --- a/src/manifests.rs +++ b/src/manifests.rs @@ -34,8 +34,386 @@ pub fn filter_by_namespace(manifests: &str, namespace: &str) -> String { format!("---\n{}\n", kept.join("\n---\n")) } -pub async fn cmd_apply(_env: &str, _domain: &str, _email: &str, _namespace: &str) -> Result<()> { - todo!("cmd_apply: kustomize build + kube-rs apply pipeline") +/// Build kustomize overlay for env, substitute domain/email, apply via kube-rs. +/// +/// Runs a second convergence pass if cert-manager is present in the overlay — +/// cert-manager registers a ValidatingWebhook that must be running before +/// ClusterIssuer / Certificate resources can be created. +pub async fn cmd_apply(env: &str, domain: &str, email: &str, namespace: &str) -> Result<()> { + // Fall back to config for ACME email if not provided via CLI flag. + let email = if email.is_empty() { + crate::config::load_config().acme_email + } else { + email.to_string() + }; + + let infra_dir = crate::config::get_infra_dir(); + + let (resolved_domain, overlay) = if env == "production" { + let d = if domain.is_empty() { + crate::kube::get_domain().await? + } else { + domain.to_string() + }; + if d.is_empty() { + anyhow::bail!("--domain is required for production apply on first deploy"); + } + let overlay = infra_dir.join("overlays").join("production"); + (d, overlay) + } else { + // Local: discover domain from Lima IP + let d = crate::kube::get_domain().await?; + let overlay = infra_dir.join("overlays").join("local"); + (d, overlay) + }; + + let scope = if namespace.is_empty() { + String::new() + } else { + format!(" [{namespace}]") + }; + crate::output::step(&format!( + "Applying manifests (env: {env}, domain: {resolved_domain}){scope}..." + )); + + if env == "local" { + apply_mkcert_ca_configmap().await; + } + + let ns_list = if namespace.is_empty() { + None + } else { + Some(vec![namespace.to_string()]) + }; + pre_apply_cleanup(ns_list.as_deref()).await; + + let before = snapshot_configmaps().await; + let mut manifests = + crate::kube::kustomize_build(&overlay, &resolved_domain, &email).await?; + + if !namespace.is_empty() { + manifests = filter_by_namespace(&manifests, namespace); + if manifests.trim().is_empty() { + crate::output::warn(&format!( + "No resources found for namespace '{namespace}' -- check the name and try again." + )); + return Ok(()); + } + } + + // First pass: may emit errors for resources that depend on webhooks not yet running + if let Err(e) = crate::kube::kube_apply(&manifests).await { + crate::output::warn(&format!("First apply pass had errors (may be expected): {e}")); + } + + // If cert-manager is in the overlay, wait for its webhook then re-apply + let cert_manager_present = overlay + .join("../../base/cert-manager") + .canonicalize() + .map(|p| p.exists()) + .unwrap_or(false); + + if cert_manager_present && namespace.is_empty() { + if wait_for_webhook("cert-manager", "cert-manager-webhook", 120).await { + crate::output::ok("Running convergence pass for cert-manager resources..."); + let manifests2 = + crate::kube::kustomize_build(&overlay, &resolved_domain, &email).await?; + crate::kube::kube_apply(&manifests2).await?; + } + } + + restart_for_changed_configmaps(&before, &snapshot_configmaps().await).await; + + // Post-apply hooks + if namespace.is_empty() || namespace == "matrix" { + patch_tuwunel_oauth2_redirect(&resolved_domain).await; + inject_opensearch_model_id().await; + } + if namespace.is_empty() || namespace == "data" { + ensure_opensearch_ml().await; + } + + crate::output::ok("Applied."); + Ok(()) +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/// Delete immutable resources that must be re-created on each apply. +async fn pre_apply_cleanup(namespaces: Option<&[String]>) { + let ns_list: Vec<&str> = match namespaces { + Some(ns) => ns.iter().map(|s| s.as_str()).collect(), + None => MANAGED_NS.to_vec(), + }; + + crate::output::ok("Cleaning up immutable Jobs and test Pods..."); + for ns in &ns_list { + // Delete all jobs + let client = match crate::kube::get_client().await { + Ok(c) => c, + Err(_) => return, + }; + let jobs: kube::api::Api = + kube::api::Api::namespaced(client.clone(), ns); + if let Ok(job_list) = jobs.list(&kube::api::ListParams::default()).await { + for job in job_list.items { + if let Some(name) = &job.metadata.name { + let dp = kube::api::DeleteParams::default(); + let _ = jobs.delete(name, &dp).await; + } + } + } + + // Delete test pods + let pods: kube::api::Api = + kube::api::Api::namespaced(client.clone(), ns); + if let Ok(pod_list) = pods.list(&kube::api::ListParams::default()).await { + for pod in pod_list.items { + if let Some(name) = &pod.metadata.name { + if name.ends_with("-test-connection") + || name.ends_with("-server-test") + || name.ends_with("-test") + { + let dp = kube::api::DeleteParams::default(); + let _ = pods.delete(name, &dp).await; + } + } + } + } + } +} + +/// Snapshot ConfigMap resourceVersions across managed namespaces. +async fn snapshot_configmaps() -> std::collections::HashMap { + let mut result = std::collections::HashMap::new(); + let client = match crate::kube::get_client().await { + Ok(c) => c, + Err(_) => return result, + }; + + for ns in MANAGED_NS { + let cms: kube::api::Api = + kube::api::Api::namespaced(client.clone(), ns); + if let Ok(cm_list) = cms.list(&kube::api::ListParams::default()).await { + for cm in cm_list.items { + if let (Some(name), Some(rv)) = ( + &cm.metadata.name, + &cm.metadata.resource_version, + ) { + result.insert(format!("{ns}/{name}"), rv.clone()); + } + } + } + } + result +} + +/// Restart deployments that mount any ConfigMap whose resourceVersion changed. +async fn restart_for_changed_configmaps( + before: &std::collections::HashMap, + after: &std::collections::HashMap, +) { + let mut changed_by_ns: std::collections::HashMap<&str, std::collections::HashSet<&str>> = + std::collections::HashMap::new(); + + for (key, rv) in after { + if before.get(key) != Some(rv) { + if let Some((ns, name)) = key.split_once('/') { + changed_by_ns.entry(ns).or_default().insert(name); + } + } + } + + if changed_by_ns.is_empty() { + return; + } + + let client = match crate::kube::get_client().await { + Ok(c) => c, + Err(_) => return, + }; + + for (ns, cm_names) in &changed_by_ns { + let deps: kube::api::Api = + kube::api::Api::namespaced(client.clone(), ns); + if let Ok(dep_list) = deps.list(&kube::api::ListParams::default()).await { + for dep in dep_list.items { + let dep_name = dep.metadata.name.as_deref().unwrap_or(""); + // Check if this deployment mounts any changed ConfigMap + let volumes = dep + .spec + .as_ref() + .and_then(|s| s.template.spec.as_ref()) + .and_then(|s| s.volumes.as_ref()); + + if let Some(vols) = volumes { + let mounts_changed = vols.iter().any(|v| { + if let Some(cm) = &v.config_map { + cm_names.contains(cm.name.as_str()) + } else { + false + } + }); + if mounts_changed { + crate::output::ok(&format!( + "Restarting {ns}/{dep_name} (ConfigMap updated)..." + )); + let _ = crate::kube::kube_rollout_restart(ns, dep_name).await; + } + } + } + } + } +} + +/// Wait for a webhook endpoint to become ready. +async fn wait_for_webhook(ns: &str, svc: &str, timeout_secs: u64) -> bool { + crate::output::ok(&format!( + "Waiting for {ns}/{svc} webhook (up to {timeout_secs}s)..." + )); + let deadline = + std::time::Instant::now() + std::time::Duration::from_secs(timeout_secs); + + let client = match crate::kube::get_client().await { + Ok(c) => c, + Err(_) => return false, + }; + let eps: kube::api::Api = + kube::api::Api::namespaced(client.clone(), ns); + + loop { + if std::time::Instant::now() > deadline { + crate::output::warn(&format!( + " {ns}/{svc} not ready after {timeout_secs}s -- continuing anyway." + )); + return false; + } + + if let Ok(Some(ep)) = eps.get_opt(svc).await { + let has_addr = ep + .subsets + .as_ref() + .and_then(|ss| ss.first()) + .and_then(|s| s.addresses.as_ref()) + .is_some_and(|a| !a.is_empty()); + if has_addr { + crate::output::ok(&format!(" {ns}/{svc} ready.")); + return true; + } + } + + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + } +} + +/// Create/update gitea-mkcert-ca ConfigMap from the local mkcert root CA. +async fn apply_mkcert_ca_configmap() { + let caroot = tokio::process::Command::new("mkcert") + .arg("-CAROOT") + .output() + .await; + + let caroot_path = match caroot { + Ok(out) if out.status.success() => { + String::from_utf8_lossy(&out.stdout).trim().to_string() + } + _ => { + crate::output::warn("mkcert not found -- skipping gitea-mkcert-ca ConfigMap."); + return; + } + }; + + let ca_pem_path = std::path::Path::new(&caroot_path).join("rootCA.pem"); + let ca_pem = match std::fs::read_to_string(&ca_pem_path) { + Ok(s) => s, + Err(_) => { + crate::output::warn(&format!( + "mkcert root CA not found at {} -- skipping.", + ca_pem_path.display() + )); + return; + } + }; + + let cm = serde_json::json!({ + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": {"name": "gitea-mkcert-ca", "namespace": "devtools"}, + "data": {"ca.crt": ca_pem}, + }); + + let manifest = serde_json::to_string(&cm).unwrap_or_default(); + if let Err(e) = crate::kube::kube_apply(&manifest).await { + crate::output::warn(&format!("Failed to apply gitea-mkcert-ca: {e}")); + } else { + crate::output::ok("gitea-mkcert-ca ConfigMap applied."); + } +} + +/// Patch the tuwunel OAuth2Client redirect URI with the actual client_id. +async fn patch_tuwunel_oauth2_redirect(domain: &str) { + let client_id = match crate::kube::kube_get_secret_field("matrix", "oidc-tuwunel", "CLIENT_ID") + .await + { + Ok(id) if !id.is_empty() => id, + _ => { + crate::output::warn( + "oidc-tuwunel secret not yet available -- skipping redirect URI patch.", + ); + return; + } + }; + + let redirect_uri = format!( + "https://messages.{domain}/_matrix/client/unstable/login/sso/callback/{client_id}" + ); + + // Patch the OAuth2Client CRD via kube-rs + let client = match crate::kube::get_client().await { + Ok(c) => c, + Err(_) => return, + }; + + let ar = kube::api::ApiResource { + group: "hydra.ory.sh".into(), + version: "v1alpha1".into(), + api_version: "hydra.ory.sh/v1alpha1".into(), + kind: "OAuth2Client".into(), + plural: "oauth2clients".into(), + }; + + let api: kube::api::Api = + kube::api::Api::namespaced_with(client.clone(), "matrix", &ar); + + let patch = serde_json::json!({ + "spec": { + "redirectUris": [redirect_uri] + } + }); + + let pp = kube::api::PatchParams::default(); + if let Err(e) = api + .patch("tuwunel", &pp, &kube::api::Patch::Merge(patch)) + .await + { + crate::output::warn(&format!("Failed to patch tuwunel OAuth2Client: {e}")); + } else { + crate::output::ok("Patched tuwunel OAuth2Client redirect URI."); + } +} + +/// Inject OpenSearch model_id into matrix/opensearch-ml-config ConfigMap. +async fn inject_opensearch_model_id() { + // Read model_id from the ingest pipeline via OpenSearch API + // This requires port-forward to opensearch — skip if not reachable + // TODO: implement opensearch API calls via port-forward + reqwest +} + +/// Configure OpenSearch ML Commons for neural search. +async fn ensure_opensearch_ml() { + // TODO: implement opensearch ML setup via port-forward + reqwest } #[cfg(test)] diff --git a/src/secrets.rs b/src/secrets.rs index 69c959e..15725bc 100644 --- a/src/secrets.rs +++ b/src/secrets.rs @@ -7,3 +7,19 @@ pub async fn cmd_seed() -> Result<()> { pub async fn cmd_verify() -> Result<()> { todo!("cmd_verify: VSO E2E verification via kube-rs") } + +#[cfg(test)] +mod tests { + #[test] + fn module_compiles() { + // Verify the secrets module compiles and its public API exists. + // The actual functions (cmd_seed, cmd_verify) are async stubs that + // require a live cluster, so we just confirm they are callable types. + let _seed: fn() -> std::pin::Pin< + Box>>, + > = || Box::pin(super::cmd_seed()); + let _verify: fn() -> std::pin::Pin< + Box>>, + > = || Box::pin(super::cmd_verify()); + } +} diff --git a/src/services.rs b/src/services.rs index 6499b8f..8f52645 100644 --- a/src/services.rs +++ b/src/services.rs @@ -1,17 +1,584 @@ -use anyhow::Result; +//! Service management — status, logs, restart. -pub async fn cmd_status(_target: Option<&str>) -> Result<()> { - todo!("cmd_status: pod health via kube-rs") +use anyhow::{bail, Result}; +use k8s_openapi::api::core::v1::Pod; +use kube::api::{Api, DynamicObject, ListParams, LogParams}; +use kube::ResourceExt; +use std::collections::BTreeMap; +use crate::kube::{get_client, kube_rollout_restart, parse_target}; +use crate::output::{ok, step, warn}; + +/// Namespaces managed by sunbeam. +pub const MANAGED_NS: &[&str] = &[ + "data", + "devtools", + "ingress", + "lasuite", + "matrix", + "media", + "ory", + "storage", + "vault-secrets-operator", +]; + +/// Services that can be rollout-restarted, as (namespace, deployment) pairs. +pub const SERVICES_TO_RESTART: &[(&str, &str)] = &[ + ("ory", "hydra"), + ("ory", "kratos"), + ("ory", "login-ui"), + ("devtools", "gitea"), + ("storage", "seaweedfs-filer"), + ("lasuite", "hive"), + ("lasuite", "people-backend"), + ("lasuite", "people-frontend"), + ("lasuite", "people-celery-worker"), + ("lasuite", "people-celery-beat"), + ("lasuite", "projects"), + ("matrix", "tuwunel"), + ("media", "livekit-server"), +]; + +// --------------------------------------------------------------------------- +// Status helpers +// --------------------------------------------------------------------------- + +/// Parsed pod row for display. +struct PodRow { + ns: String, + name: String, + ready: String, + status: String, } -pub async fn cmd_logs(_target: &str, _follow: bool) -> Result<()> { - todo!("cmd_logs: stream pod logs via kube-rs") +fn icon_for_status(status: &str) -> &'static str { + match status { + "Running" | "Completed" | "Succeeded" => "\u{2713}", + "Pending" => "\u{25cb}", + "Failed" => "\u{2717}", + _ => "?", + } } -pub async fn cmd_get(_target: &str, _output: &str) -> Result<()> { - todo!("cmd_get: get pod via kube-rs") +fn is_unhealthy(pod: &Pod) -> bool { + let status = pod.status.as_ref(); + let phase = status + .and_then(|s| s.phase.as_deref()) + .unwrap_or("Unknown"); + + match phase { + "Running" => { + // Check all containers are ready. + let container_statuses = status + .and_then(|s| s.container_statuses.as_ref()); + if let Some(cs) = container_statuses { + let total = cs.len(); + let ready = cs.iter().filter(|c| c.ready).count(); + ready != total + } else { + true + } + } + "Succeeded" | "Completed" => false, + _ => true, + } } -pub async fn cmd_restart(_target: Option<&str>) -> Result<()> { - todo!("cmd_restart: rollout restart via kube-rs") +fn pod_phase(pod: &Pod) -> String { + pod.status + .as_ref() + .and_then(|s| s.phase.clone()) + .unwrap_or_else(|| "Unknown".to_string()) +} + +fn pod_ready_str(pod: &Pod) -> String { + let cs = pod + .status + .as_ref() + .and_then(|s| s.container_statuses.as_ref()); + match cs { + Some(cs) => { + let total = cs.len(); + let ready = cs.iter().filter(|c| c.ready).count(); + format!("{ready}/{total}") + } + None => "0/0".to_string(), + } +} + +// --------------------------------------------------------------------------- +// VSO sync status +// --------------------------------------------------------------------------- + +async fn vso_sync_status() -> Result<()> { + step("VSO secret sync status..."); + + let client = get_client().await?; + let mut all_ok = true; + + // --- VaultStaticSecrets --- + { + let ar = kube::api::ApiResource { + group: "secrets.hashicorp.com".into(), + version: "v1beta1".into(), + api_version: "secrets.hashicorp.com/v1beta1".into(), + kind: "VaultStaticSecret".into(), + plural: "vaultstaticsecrets".into(), + }; + + let api: Api = Api::all_with(client.clone(), &ar); + let list = api.list(&ListParams::default()).await; + + if let Ok(list) = list { + // Group by namespace and sort + let mut grouped: BTreeMap> = BTreeMap::new(); + for obj in &list.items { + let ns = obj.namespace().unwrap_or_default(); + let name = obj.name_any(); + let mac = obj + .data + .get("status") + .and_then(|s| s.get("secretMAC")) + .and_then(|v| v.as_str()) + .unwrap_or(""); + let synced = !mac.is_empty() && mac != ""; + if !synced { + all_ok = false; + } + grouped.entry(ns).or_default().push((name, synced)); + } + for (ns, mut items) in grouped { + println!(" {ns} (VSS):"); + items.sort(); + for (name, synced) in items { + let icon = if synced { "\u{2713}" } else { "\u{2717}" }; + println!(" {icon} {name}"); + } + } + } + } + + // --- VaultDynamicSecrets --- + { + let ar = kube::api::ApiResource { + group: "secrets.hashicorp.com".into(), + version: "v1beta1".into(), + api_version: "secrets.hashicorp.com/v1beta1".into(), + kind: "VaultDynamicSecret".into(), + plural: "vaultdynamicsecrets".into(), + }; + + let api: Api = Api::all_with(client.clone(), &ar); + let list = api.list(&ListParams::default()).await; + + if let Ok(list) = list { + let mut grouped: BTreeMap> = BTreeMap::new(); + for obj in &list.items { + let ns = obj.namespace().unwrap_or_default(); + let name = obj.name_any(); + let renewed = obj + .data + .get("status") + .and_then(|s| s.get("lastRenewalTime")) + .and_then(|v| v.as_str()) + .unwrap_or("0"); + let synced = !renewed.is_empty() && renewed != "0" && renewed != ""; + if !synced { + all_ok = false; + } + grouped.entry(ns).or_default().push((name, synced)); + } + for (ns, mut items) in grouped { + println!(" {ns} (VDS):"); + items.sort(); + for (name, synced) in items { + let icon = if synced { "\u{2713}" } else { "\u{2717}" }; + println!(" {icon} {name}"); + } + } + } + } + + println!(); + if all_ok { + ok("All VSO secrets synced."); + } else { + warn("Some VSO secrets are not synced."); + } + Ok(()) +} + +// --------------------------------------------------------------------------- +// Public commands +// --------------------------------------------------------------------------- + +/// Show pod health, optionally filtered by namespace or namespace/service. +pub async fn cmd_status(target: Option<&str>) -> Result<()> { + step("Pod health across all namespaces..."); + + let client = get_client().await?; + let (ns_filter, svc_filter) = parse_target(target)?; + + let mut pods: Vec = Vec::new(); + + match (ns_filter, svc_filter) { + (None, _) => { + // All managed namespaces + let ns_set: std::collections::HashSet<&str> = + MANAGED_NS.iter().copied().collect(); + for ns in MANAGED_NS { + let api: Api = Api::namespaced(client.clone(), ns); + let lp = ListParams::default(); + if let Ok(list) = api.list(&lp).await { + for pod in list.items { + let pod_ns = pod.namespace().unwrap_or_default(); + if !ns_set.contains(pod_ns.as_str()) { + continue; + } + pods.push(PodRow { + ns: pod_ns, + name: pod.name_any(), + ready: pod_ready_str(&pod), + status: pod_phase(&pod), + }); + } + } + } + } + (Some(ns), None) => { + // All pods in a namespace + let api: Api = Api::namespaced(client.clone(), ns); + let lp = ListParams::default(); + if let Ok(list) = api.list(&lp).await { + for pod in list.items { + pods.push(PodRow { + ns: ns.to_string(), + name: pod.name_any(), + ready: pod_ready_str(&pod), + status: pod_phase(&pod), + }); + } + } + } + (Some(ns), Some(svc)) => { + // Specific service: filter by app label + let api: Api = Api::namespaced(client.clone(), ns); + let lp = ListParams::default().labels(&format!("app={svc}")); + if let Ok(list) = api.list(&lp).await { + for pod in list.items { + pods.push(PodRow { + ns: ns.to_string(), + name: pod.name_any(), + ready: pod_ready_str(&pod), + status: pod_phase(&pod), + }); + } + } + } + } + + if pods.is_empty() { + warn("No pods found in managed namespaces."); + return Ok(()); + } + + pods.sort_by(|a, b| (&a.ns, &a.name).cmp(&(&b.ns, &b.name))); + + let mut all_ok = true; + let mut cur_ns: Option<&str> = None; + for row in &pods { + if cur_ns != Some(&row.ns) { + println!(" {}:", row.ns); + cur_ns = Some(&row.ns); + } + let icon = icon_for_status(&row.status); + + let mut unhealthy = !matches!( + row.status.as_str(), + "Running" | "Completed" | "Succeeded" + ); + // For Running pods, check ready ratio + if !unhealthy && row.status == "Running" && row.ready.contains('/') { + let parts: Vec<&str> = row.ready.split('/').collect(); + if parts.len() == 2 && parts[0] != parts[1] { + unhealthy = true; + } + } + if unhealthy { + all_ok = false; + } + println!(" {icon} {:<50} {:<6} {}", row.name, row.ready, row.status); + } + + println!(); + if all_ok { + ok("All pods healthy."); + } else { + warn("Some pods are not ready."); + } + + vso_sync_status().await?; + Ok(()) +} + +/// Stream logs for a service. Target must include service name (e.g. ory/kratos). +pub async fn cmd_logs(target: &str, follow: bool) -> Result<()> { + let (ns_opt, name_opt) = parse_target(Some(target))?; + let ns = ns_opt.unwrap_or(""); + let name = match name_opt { + Some(n) => n, + None => bail!("Logs require a service name, e.g. 'ory/kratos'."), + }; + + let client = get_client().await?; + let api: Api = Api::namespaced(client.clone(), ns); + + // Find pods matching the app label + let lp = ListParams::default().labels(&format!("app={name}")); + let pod_list = api.list(&lp).await?; + + if pod_list.items.is_empty() { + bail!("No pods found for {ns}/{name}"); + } + + if follow { + // Stream logs from the first matching pod + let pod_name = pod_list.items[0].name_any(); + let mut lp = LogParams::default(); + lp.follow = true; + lp.tail_lines = Some(100); + + // log_stream returns a futures::AsyncBufRead — use the futures crate to read it + use futures::AsyncBufReadExt; + let stream = api.log_stream(&pod_name, &lp).await?; + let reader = futures::io::BufReader::new(stream); + let mut lines = reader.lines(); + use futures::StreamExt; + while let Some(line) = lines.next().await { + match line { + Ok(line) => println!("{line}"), + Err(e) => { + warn(&format!("Log stream error: {e}")); + break; + } + } + } + } else { + // Print logs from all matching pods + for pod in &pod_list.items { + let pod_name = pod.name_any(); + let mut lp = LogParams::default(); + lp.tail_lines = Some(100); + + match api.logs(&pod_name, &lp).await { + Ok(logs) => print!("{logs}"), + Err(e) => warn(&format!("Failed to get logs for {pod_name}: {e}")), + } + } + } + + Ok(()) +} + +/// Print raw pod output in YAML or JSON format. +pub async fn cmd_get(target: &str, output: &str) -> Result<()> { + let (ns_opt, name_opt) = parse_target(Some(target))?; + let ns = match ns_opt { + Some(n) if !n.is_empty() => n, + _ => bail!("get requires namespace/name, e.g. 'sunbeam get ory/kratos-abc'"), + }; + let name = match name_opt { + Some(n) => n, + None => bail!("get requires namespace/name, e.g. 'sunbeam get ory/kratos-abc'"), + }; + + let client = get_client().await?; + let api: Api = Api::namespaced(client.clone(), ns); + + let pod = api + .get_opt(name) + .await? + .ok_or_else(|| anyhow::anyhow!("Pod {ns}/{name} not found."))?; + + let text = match output { + "json" => serde_json::to_string_pretty(&pod)?, + _ => serde_yaml::to_string(&pod)?, + }; + println!("{text}"); + Ok(()) +} + +/// Restart deployments. None=all, 'ory'=namespace, 'ory/kratos'=specific. +pub async fn cmd_restart(target: Option<&str>) -> Result<()> { + step("Restarting services..."); + + let (ns_filter, svc_filter) = parse_target(target)?; + + let matched: Vec<(&str, &str)> = match (ns_filter, svc_filter) { + (None, _) => SERVICES_TO_RESTART.to_vec(), + (Some(ns), None) => SERVICES_TO_RESTART + .iter() + .filter(|(n, _)| *n == ns) + .copied() + .collect(), + (Some(ns), Some(name)) => SERVICES_TO_RESTART + .iter() + .filter(|(n, d)| *n == ns && *d == name) + .copied() + .collect(), + }; + + if matched.is_empty() { + warn(&format!( + "No matching services for target: {}", + target.unwrap_or("(none)") + )); + return Ok(()); + } + + for (ns, dep) in &matched { + if let Err(e) = kube_rollout_restart(ns, dep).await { + warn(&format!("Failed to restart {ns}/{dep}: {e}")); + } + } + ok("Done."); + Ok(()) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_managed_ns_contains_expected() { + assert!(MANAGED_NS.contains(&"ory")); + assert!(MANAGED_NS.contains(&"data")); + assert!(MANAGED_NS.contains(&"devtools")); + assert!(MANAGED_NS.contains(&"ingress")); + assert!(MANAGED_NS.contains(&"lasuite")); + assert!(MANAGED_NS.contains(&"matrix")); + assert!(MANAGED_NS.contains(&"media")); + assert!(MANAGED_NS.contains(&"storage")); + assert!(MANAGED_NS.contains(&"vault-secrets-operator")); + assert_eq!(MANAGED_NS.len(), 9); + } + + #[test] + fn test_services_to_restart_contains_expected() { + assert!(SERVICES_TO_RESTART.contains(&("ory", "hydra"))); + assert!(SERVICES_TO_RESTART.contains(&("ory", "kratos"))); + assert!(SERVICES_TO_RESTART.contains(&("ory", "login-ui"))); + assert!(SERVICES_TO_RESTART.contains(&("devtools", "gitea"))); + assert!(SERVICES_TO_RESTART.contains(&("storage", "seaweedfs-filer"))); + assert!(SERVICES_TO_RESTART.contains(&("lasuite", "hive"))); + assert!(SERVICES_TO_RESTART.contains(&("matrix", "tuwunel"))); + assert!(SERVICES_TO_RESTART.contains(&("media", "livekit-server"))); + assert_eq!(SERVICES_TO_RESTART.len(), 13); + } + + #[test] + fn test_icon_for_status() { + assert_eq!(icon_for_status("Running"), "\u{2713}"); + assert_eq!(icon_for_status("Completed"), "\u{2713}"); + assert_eq!(icon_for_status("Succeeded"), "\u{2713}"); + assert_eq!(icon_for_status("Pending"), "\u{25cb}"); + assert_eq!(icon_for_status("Failed"), "\u{2717}"); + assert_eq!(icon_for_status("Unknown"), "?"); + assert_eq!(icon_for_status("CrashLoopBackOff"), "?"); + } + + #[test] + fn test_restart_filter_namespace() { + let matched: Vec<(&str, &str)> = SERVICES_TO_RESTART + .iter() + .filter(|(n, _)| *n == "ory") + .copied() + .collect(); + assert_eq!(matched.len(), 3); + assert!(matched.contains(&("ory", "hydra"))); + assert!(matched.contains(&("ory", "kratos"))); + assert!(matched.contains(&("ory", "login-ui"))); + } + + #[test] + fn test_restart_filter_specific() { + let matched: Vec<(&str, &str)> = SERVICES_TO_RESTART + .iter() + .filter(|(n, d)| *n == "ory" && *d == "kratos") + .copied() + .collect(); + assert_eq!(matched.len(), 1); + assert_eq!(matched[0], ("ory", "kratos")); + } + + #[test] + fn test_restart_filter_no_match() { + let matched: Vec<(&str, &str)> = SERVICES_TO_RESTART + .iter() + .filter(|(n, d)| *n == "nonexistent" && *d == "nosuch") + .copied() + .collect(); + assert!(matched.is_empty()); + } + + #[test] + fn test_restart_filter_all() { + let matched: Vec<(&str, &str)> = SERVICES_TO_RESTART.to_vec(); + assert_eq!(matched.len(), 13); + } + + #[test] + fn test_pod_ready_string_format() { + // Verify format: "N/M" + let ready = "2/3"; + let parts: Vec<&str> = ready.split('/').collect(); + assert_eq!(parts.len(), 2); + assert_ne!(parts[0], parts[1]); // unhealthy + } + + #[test] + fn test_unhealthy_detection_by_ready_ratio() { + // Simulate the ready ratio check used in cmd_status + let ready = "1/2"; + let status = "Running"; + let mut unhealthy = !matches!(status, "Running" | "Completed" | "Succeeded"); + if !unhealthy && status == "Running" && ready.contains('/') { + let parts: Vec<&str> = ready.split('/').collect(); + if parts.len() == 2 && parts[0] != parts[1] { + unhealthy = true; + } + } + assert!(unhealthy); + } + + #[test] + fn test_healthy_detection_by_ready_ratio() { + let ready = "2/2"; + let status = "Running"; + let mut unhealthy = !matches!(status, "Running" | "Completed" | "Succeeded"); + if !unhealthy && status == "Running" && ready.contains('/') { + let parts: Vec<&str> = ready.split('/').collect(); + if parts.len() == 2 && parts[0] != parts[1] { + unhealthy = true; + } + } + assert!(!unhealthy); + } + + #[test] + fn test_completed_pods_are_healthy() { + let status = "Completed"; + let unhealthy = !matches!(status, "Running" | "Completed" | "Succeeded"); + assert!(!unhealthy); + } + + #[test] + fn test_pending_pods_are_unhealthy() { + let status = "Pending"; + let unhealthy = !matches!(status, "Running" | "Completed" | "Succeeded"); + assert!(unhealthy); + } } diff --git a/src/tools.rs b/src/tools.rs index 937d4a4..27776ea 100644 --- a/src/tools.rs +++ b/src/tools.rs @@ -49,3 +49,132 @@ pub fn ensure_kustomize() -> Result { pub fn ensure_helm() -> Result { extract_embedded(HELM_BIN, "helm") } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn kustomize_bin_is_non_empty() { + assert!( + KUSTOMIZE_BIN.len() > 0, + "Embedded kustomize binary should not be empty" + ); + } + + #[test] + fn helm_bin_is_non_empty() { + assert!( + HELM_BIN.len() > 0, + "Embedded helm binary should not be empty" + ); + } + + #[test] + fn kustomize_bin_has_reasonable_size() { + // kustomize binary should be at least 1 MB + assert!( + KUSTOMIZE_BIN.len() > 1_000_000, + "Embedded kustomize binary seems too small: {} bytes", + KUSTOMIZE_BIN.len() + ); + } + + #[test] + fn helm_bin_has_reasonable_size() { + // helm binary should be at least 1 MB + assert!( + HELM_BIN.len() > 1_000_000, + "Embedded helm binary seems too small: {} bytes", + HELM_BIN.len() + ); + } + + #[test] + fn cache_dir_ends_with_sunbeam_bin() { + let dir = cache_dir(); + assert!( + dir.ends_with("sunbeam/bin"), + "cache_dir() should end with sunbeam/bin, got: {}", + dir.display() + ); + } + + #[test] + fn cache_dir_is_absolute() { + let dir = cache_dir(); + assert!( + dir.is_absolute(), + "cache_dir() should return an absolute path, got: {}", + dir.display() + ); + } + + #[test] + fn ensure_kustomize_returns_valid_path() { + let path = ensure_kustomize().expect("ensure_kustomize should succeed"); + assert!( + path.ends_with("kustomize"), + "ensure_kustomize path should end with 'kustomize', got: {}", + path.display() + ); + assert!(path.exists(), "kustomize binary should exist at: {}", path.display()); + } + + #[test] + fn ensure_helm_returns_valid_path() { + let path = ensure_helm().expect("ensure_helm should succeed"); + assert!( + path.ends_with("helm"), + "ensure_helm path should end with 'helm', got: {}", + path.display() + ); + assert!(path.exists(), "helm binary should exist at: {}", path.display()); + } + + #[test] + fn ensure_kustomize_is_idempotent() { + let path1 = ensure_kustomize().expect("first call should succeed"); + let path2 = ensure_kustomize().expect("second call should succeed"); + assert_eq!(path1, path2, "ensure_kustomize should return the same path on repeated calls"); + } + + #[test] + fn ensure_helm_is_idempotent() { + let path1 = ensure_helm().expect("first call should succeed"); + let path2 = ensure_helm().expect("second call should succeed"); + assert_eq!(path1, path2, "ensure_helm should return the same path on repeated calls"); + } + + #[test] + fn extracted_kustomize_is_executable() { + let path = ensure_kustomize().expect("ensure_kustomize should succeed"); + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + let perms = std::fs::metadata(&path) + .expect("should read metadata") + .permissions(); + assert!( + perms.mode() & 0o111 != 0, + "kustomize binary should be executable" + ); + } + } + + #[test] + fn extracted_helm_is_executable() { + let path = ensure_helm().expect("ensure_helm should succeed"); + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + let perms = std::fs::metadata(&path) + .expect("should read metadata") + .permissions(); + assert!( + perms.mode() & 0o111 != 0, + "helm binary should be executable" + ); + } + } +} diff --git a/src/users.rs b/src/users.rs index 0d20c8d..ae267ef 100644 --- a/src/users.rs +++ b/src/users.rs @@ -1,53 +1,891 @@ -use anyhow::Result; +//! User management -- Kratos identity operations via port-forwarded admin API. -pub async fn cmd_user_list(_search: &str) -> Result<()> { - todo!("cmd_user_list: ory-kratos-client SDK") +use anyhow::{bail, Context, Result}; +use serde_json::Value; +use std::io::Write; + +use crate::output::{ok, step, table, warn}; + +const SMTP_LOCAL_PORT: u16 = 10025; + +// --------------------------------------------------------------------------- +// Port-forward helper +// --------------------------------------------------------------------------- + +/// Spawn a kubectl port-forward process and return (child, base_url). +/// The caller **must** kill the child when done. +fn spawn_port_forward( + ns: &str, + svc: &str, + local_port: u16, + remote_port: u16, +) -> Result<(std::process::Child, String)> { + let ctx = crate::kube::context(); + let child = std::process::Command::new("kubectl") + .arg(format!("--context={ctx}")) + .args([ + "-n", + ns, + "port-forward", + &format!("svc/{svc}"), + &format!("{local_port}:{remote_port}"), + ]) + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .spawn() + .with_context(|| format!("Failed to spawn port-forward to {ns}/svc/{svc}"))?; + + // Give the port-forward time to bind + std::thread::sleep(std::time::Duration::from_millis(1500)); + + Ok((child, format!("http://localhost:{local_port}"))) } -pub async fn cmd_user_get(_target: &str) -> Result<()> { - todo!("cmd_user_get: ory-kratos-client SDK") +/// RAII guard that terminates the port-forward on drop. +struct PortForward { + child: std::process::Child, + pub base_url: String, } -pub async fn cmd_user_create(_email: &str, _name: &str, _schema_id: &str) -> Result<()> { - todo!("cmd_user_create: ory-kratos-client SDK") +impl PortForward { + fn new(ns: &str, svc: &str, local_port: u16, remote_port: u16) -> Result { + let (child, base_url) = spawn_port_forward(ns, svc, local_port, remote_port)?; + Ok(Self { child, base_url }) + } + + /// Convenience: Kratos admin (ory/kratos-admin 80 -> 4434). + fn kratos() -> Result { + Self::new("ory", "kratos-admin", 4434, 80) + } } -pub async fn cmd_user_delete(_target: &str) -> Result<()> { - todo!("cmd_user_delete: ory-kratos-client SDK") +impl Drop for PortForward { + fn drop(&mut self) { + let _ = self.child.kill(); + let _ = self.child.wait(); + } } -pub async fn cmd_user_recover(_target: &str) -> Result<()> { - todo!("cmd_user_recover: ory-kratos-client SDK") +// --------------------------------------------------------------------------- +// HTTP helpers +// --------------------------------------------------------------------------- + +/// Make an HTTP request to an admin API endpoint. +fn api( + base_url: &str, + path: &str, + method: &str, + body: Option<&Value>, + prefix: &str, + ok_statuses: &[u16], +) -> Result> { + let url = format!("{base_url}{prefix}{path}"); + let client = reqwest::blocking::Client::new(); + + let mut req = match method { + "GET" => client.get(&url), + "POST" => client.post(&url), + "PUT" => client.put(&url), + "PATCH" => client.patch(&url), + "DELETE" => client.delete(&url), + _ => bail!("Unsupported HTTP method: {method}"), + }; + + req = req + .header("Content-Type", "application/json") + .header("Accept", "application/json"); + + if let Some(b) = body { + req = req.json(b); + } + + let resp = req.send().with_context(|| format!("HTTP {method} {url} failed"))?; + let status = resp.status().as_u16(); + + if !resp.status().is_success() { + if ok_statuses.contains(&status) { + return Ok(None); + } + let err_text = resp.text().unwrap_or_default(); + bail!("API error {status}: {err_text}"); + } + + let text = resp.text().unwrap_or_default(); + if text.is_empty() { + return Ok(None); + } + let val: Value = serde_json::from_str(&text) + .with_context(|| format!("Failed to parse API response as JSON: {text}"))?; + Ok(Some(val)) } -pub async fn cmd_user_disable(_target: &str) -> Result<()> { - todo!("cmd_user_disable: ory-kratos-client SDK") +/// Shorthand: Kratos admin API call (prefix = "/admin"). +fn kratos_api( + base_url: &str, + path: &str, + method: &str, + body: Option<&Value>, + ok_statuses: &[u16], +) -> Result> { + api(base_url, path, method, body, "/admin", ok_statuses) } -pub async fn cmd_user_enable(_target: &str) -> Result<()> { - todo!("cmd_user_enable: ory-kratos-client SDK") +// --------------------------------------------------------------------------- +// Identity helpers +// --------------------------------------------------------------------------- + +/// Find identity by UUID or email search. Returns the identity JSON. +fn find_identity(base_url: &str, target: &str, required: bool) -> Result> { + // Looks like a UUID? + if target.len() == 36 && target.chars().filter(|&c| c == '-').count() == 4 { + let result = kratos_api(base_url, &format!("/identities/{target}"), "GET", None, &[])?; + return Ok(result); + } + + // Search by email + let result = kratos_api( + base_url, + &format!("/identities?credentials_identifier={target}&page_size=1"), + "GET", + None, + &[], + )?; + + if let Some(Value::Array(arr)) = &result { + if let Some(first) = arr.first() { + return Ok(Some(first.clone())); + } + } + + if required { + bail!("Identity not found: {target}"); + } + Ok(None) } -pub async fn cmd_user_set_password(_target: &str, _password: &str) -> Result<()> { - todo!("cmd_user_set_password: ory-kratos-client SDK") +/// Build the PUT body for updating an identity, preserving all required fields. +fn identity_put_body(identity: &Value, state: Option<&str>, extra: Option) -> Value { + let mut body = serde_json::json!({ + "schema_id": identity["schema_id"], + "traits": identity["traits"], + "state": state.unwrap_or_else(|| identity.get("state").and_then(|v| v.as_str()).unwrap_or("active")), + "metadata_public": identity.get("metadata_public").cloned().unwrap_or(Value::Null), + "metadata_admin": identity.get("metadata_admin").cloned().unwrap_or(Value::Null), + }); + + if let Some(extra_obj) = extra { + if let (Some(base_map), Some(extra_map)) = (body.as_object_mut(), extra_obj.as_object()) { + for (k, v) in extra_map { + base_map.insert(k.clone(), v.clone()); + } + } + } + + body +} + +/// Generate a 24h recovery code. Returns (link, code). +fn generate_recovery(base_url: &str, identity_id: &str) -> Result<(String, String)> { + let body = serde_json::json!({ + "identity_id": identity_id, + "expires_in": "24h", + }); + + let result = kratos_api(base_url, "/recovery/code", "POST", Some(&body), &[])?; + + let recovery = result.unwrap_or_default(); + let link = recovery + .get("recovery_link") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + let code = recovery + .get("recovery_code") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + + Ok((link, code)) +} + +/// Find the next sequential employee ID by scanning all employee identities. +fn next_employee_id(base_url: &str) -> Result { + let result = kratos_api( + base_url, + "/identities?page_size=200", + "GET", + None, + &[], + )?; + + let identities = match result { + Some(Value::Array(arr)) => arr, + _ => vec![], + }; + + let mut max_num: u64 = 0; + for ident in &identities { + if let Some(eid) = ident + .get("traits") + .and_then(|t| t.get("employee_id")) + .and_then(|v| v.as_str()) + { + if let Ok(n) = eid.parse::() { + max_num = max_num.max(n); + } + } + } + + Ok((max_num + 1).to_string()) +} + +// --------------------------------------------------------------------------- +// Display helpers +// --------------------------------------------------------------------------- + +/// Extract a display name from identity traits (supports both default and employee schemas). +fn display_name(traits: &Value) -> String { + let given = traits + .get("given_name") + .and_then(|v| v.as_str()) + .unwrap_or(""); + let family = traits + .get("family_name") + .and_then(|v| v.as_str()) + .unwrap_or(""); + + if !given.is_empty() || !family.is_empty() { + return format!("{given} {family}").trim().to_string(); + } + + match traits.get("name") { + Some(Value::Object(name_map)) => { + let first = name_map + .get("first") + .and_then(|v| v.as_str()) + .unwrap_or(""); + let last = name_map + .get("last") + .and_then(|v| v.as_str()) + .unwrap_or(""); + format!("{first} {last}").trim().to_string() + } + Some(name) => name.as_str().unwrap_or("").to_string(), + None => String::new(), + } +} + +/// Extract the short ID prefix (first 8 chars + "..."). +fn short_id(id: &str) -> String { + if id.len() >= 8 { + format!("{}...", &id[..8]) + } else { + id.to_string() + } +} + +/// Get identity ID as a string from a JSON value. +fn identity_id(identity: &Value) -> Result { + identity + .get("id") + .and_then(|v| v.as_str()) + .map(|s| s.to_string()) + .context("Identity missing 'id' field") +} + +// --------------------------------------------------------------------------- +// Public commands +// --------------------------------------------------------------------------- + +pub async fn cmd_user_list(search: &str) -> Result<()> { + step("Listing identities..."); + + let pf = PortForward::kratos()?; + let mut path = "/identities?page_size=20".to_string(); + if !search.is_empty() { + path.push_str(&format!("&credentials_identifier={search}")); + } + let result = kratos_api(&pf.base_url, &path, "GET", None, &[])?; + drop(pf); + + let identities = match result { + Some(Value::Array(arr)) => arr, + _ => vec![], + }; + + let rows: Vec> = identities + .iter() + .map(|i| { + let traits = i.get("traits").cloned().unwrap_or(Value::Object(Default::default())); + let email = traits + .get("email") + .and_then(|v| v.as_str()) + .unwrap_or("") + .to_string(); + let name = display_name(&traits); + let state = i + .get("state") + .and_then(|v| v.as_str()) + .unwrap_or("active") + .to_string(); + let id = i + .get("id") + .and_then(|v| v.as_str()) + .unwrap_or(""); + vec![short_id(id), email, name, state] + }) + .collect(); + + println!("{}", table(&rows, &["ID", "Email", "Name", "State"])); + Ok(()) +} + +pub async fn cmd_user_get(target: &str) -> Result<()> { + step(&format!("Getting identity: {target}")); + + let pf = PortForward::kratos()?; + let identity = find_identity(&pf.base_url, target, true)? + .context("Identity not found")?; + drop(pf); + + println!("{}", serde_json::to_string_pretty(&identity)?); + Ok(()) +} + +pub async fn cmd_user_create(email: &str, name: &str, schema_id: &str) -> Result<()> { + step(&format!("Creating identity: {email}")); + + let mut traits = serde_json::json!({ "email": email }); + if !name.is_empty() { + let parts: Vec<&str> = name.splitn(2, ' ').collect(); + traits["name"] = serde_json::json!({ + "first": parts[0], + "last": if parts.len() > 1 { parts[1] } else { "" }, + }); + } + + let body = serde_json::json!({ + "schema_id": schema_id, + "traits": traits, + "state": "active", + }); + + let pf = PortForward::kratos()?; + let identity = kratos_api(&pf.base_url, "/identities", "POST", Some(&body), &[])? + .context("Failed to create identity")?; + + let iid = identity_id(&identity)?; + ok(&format!("Created identity: {iid}")); + + let (link, code) = generate_recovery(&pf.base_url, &iid)?; + drop(pf); + + ok("Recovery link (valid 24h):"); + println!("{link}"); + ok("Recovery code (enter on the page above):"); + println!("{code}"); + Ok(()) +} + +pub async fn cmd_user_delete(target: &str) -> Result<()> { + step(&format!("Deleting identity: {target}")); + + eprint!("Delete identity '{target}'? This cannot be undone. [y/N] "); + std::io::stderr().flush()?; + let mut answer = String::new(); + std::io::stdin().read_line(&mut answer)?; + if answer.trim().to_lowercase() != "y" { + ok("Cancelled."); + return Ok(()); + } + + let pf = PortForward::kratos()?; + let identity = find_identity(&pf.base_url, target, true)? + .context("Identity not found")?; + let iid = identity_id(&identity)?; + kratos_api( + &pf.base_url, + &format!("/identities/{iid}"), + "DELETE", + None, + &[], + )?; + drop(pf); + + ok("Deleted."); + Ok(()) +} + +pub async fn cmd_user_recover(target: &str) -> Result<()> { + step(&format!("Generating recovery link for: {target}")); + + let pf = PortForward::kratos()?; + let identity = find_identity(&pf.base_url, target, true)? + .context("Identity not found")?; + let iid = identity_id(&identity)?; + let (link, code) = generate_recovery(&pf.base_url, &iid)?; + drop(pf); + + ok("Recovery link (valid 24h):"); + println!("{link}"); + ok("Recovery code (enter on the page above):"); + println!("{code}"); + Ok(()) +} + +pub async fn cmd_user_disable(target: &str) -> Result<()> { + step(&format!("Disabling identity: {target}")); + + let pf = PortForward::kratos()?; + let identity = find_identity(&pf.base_url, target, true)? + .context("Identity not found")?; + let iid = identity_id(&identity)?; + + let put_body = identity_put_body(&identity, Some("inactive"), None); + kratos_api( + &pf.base_url, + &format!("/identities/{iid}"), + "PUT", + Some(&put_body), + &[], + )?; + kratos_api( + &pf.base_url, + &format!("/identities/{iid}/sessions"), + "DELETE", + None, + &[], + )?; + drop(pf); + + ok(&format!( + "Identity {}... disabled and all Kratos sessions revoked.", + &iid[..8.min(iid.len())] + )); + warn("App sessions (docs/people) expire within SESSION_COOKIE_AGE -- currently 1h."); + Ok(()) +} + +pub async fn cmd_user_enable(target: &str) -> Result<()> { + step(&format!("Enabling identity: {target}")); + + let pf = PortForward::kratos()?; + let identity = find_identity(&pf.base_url, target, true)? + .context("Identity not found")?; + let iid = identity_id(&identity)?; + + let put_body = identity_put_body(&identity, Some("active"), None); + kratos_api( + &pf.base_url, + &format!("/identities/{iid}"), + "PUT", + Some(&put_body), + &[], + )?; + drop(pf); + + ok(&format!("Identity {}... re-enabled.", short_id(&iid))); + Ok(()) +} + +pub async fn cmd_user_set_password(target: &str, password: &str) -> Result<()> { + step(&format!("Setting password for: {target}")); + + let pf = PortForward::kratos()?; + let identity = find_identity(&pf.base_url, target, true)? + .context("Identity not found")?; + let iid = identity_id(&identity)?; + + let extra = serde_json::json!({ + "credentials": { + "password": { + "config": { + "password": password, + } + } + } + }); + let put_body = identity_put_body(&identity, None, Some(extra)); + kratos_api( + &pf.base_url, + &format!("/identities/{iid}"), + "PUT", + Some(&put_body), + &[], + )?; + drop(pf); + + ok(&format!("Password set for {}...", short_id(&iid))); + Ok(()) +} + +// --------------------------------------------------------------------------- +// Onboard +// --------------------------------------------------------------------------- + +/// Send a welcome email via cluster Postfix (port-forward to svc/postfix in lasuite). +fn send_welcome_email( + domain: &str, + email: &str, + name: &str, + recovery_link: &str, + recovery_code: &str, +) -> Result<()> { + let greeting = if name.is_empty() { + "Hi".to_string() + } else { + format!("Hi {name}") + }; + + let body_text = format!( + "{greeting}, + +Welcome to Sunbeam Studios! Your account has been created. + +To set your password, open this link and enter the recovery code below: + + Link: {recovery_link} + Code: {recovery_code} + +This link expires in 24 hours. + +Once signed in you will be prompted to set up 2FA (mandatory). + +After that, head to https://auth.{domain}/settings to set up your +profile -- add your name, profile picture, and any other details. + +Your services: + Calendar: https://cal.{domain} + Drive: https://drive.{domain} + Mail: https://mail.{domain} + Meet: https://meet.{domain} + Projects: https://projects.{domain} + Source Code: https://src.{domain} + +Messages (Matrix): + Download Element from https://element.io/download + Open Element and sign in with a custom homeserver: + Homeserver: https://messages.{domain} + Use \"Sign in with Sunbeam Studios\" (SSO) to log in. + +-- With Love & Warmth, Sunbeam Studios +" + ); + + use lettre::message::Mailbox; + use lettre::{Message, SmtpTransport, Transport}; + + let from: Mailbox = format!("Sunbeam Studios ") + .parse() + .context("Invalid from address")?; + let to: Mailbox = email.parse().context("Invalid recipient address")?; + + let message = Message::builder() + .from(from) + .to(to) + .subject("Welcome to Sunbeam Studios -- Set Your Password") + .body(body_text) + .context("Failed to build email message")?; + + let _pf = PortForward::new("lasuite", "postfix", SMTP_LOCAL_PORT, 25)?; + + let mailer = SmtpTransport::builder_dangerous("localhost") + .port(SMTP_LOCAL_PORT) + .build(); + + mailer + .send(&message) + .context("Failed to send welcome email via SMTP")?; + + ok(&format!("Welcome email sent to {email}")); + Ok(()) } #[allow(clippy::too_many_arguments)] pub async fn cmd_user_onboard( - _email: &str, - _name: &str, - _schema_id: &str, - _send_email: bool, - _notify: &str, - _job_title: &str, - _department: &str, - _office_location: &str, - _hire_date: &str, - _manager: &str, + email: &str, + name: &str, + schema_id: &str, + send_email: bool, + notify: &str, + job_title: &str, + department: &str, + office_location: &str, + hire_date: &str, + manager: &str, ) -> Result<()> { - todo!("cmd_user_onboard: ory-kratos-client SDK + lettre SMTP") + step(&format!("Onboarding: {email}")); + + let pf = PortForward::kratos()?; + + let (iid, recovery_link, recovery_code) = { + let existing = find_identity(&pf.base_url, email, false)?; + + if let Some(existing) = existing { + let iid = identity_id(&existing)?; + warn(&format!("Identity already exists: {}...", short_id(&iid))); + step("Generating fresh recovery link..."); + let (link, code) = generate_recovery(&pf.base_url, &iid)?; + (iid, link, code) + } else { + let mut traits = serde_json::json!({ "email": email }); + if !name.is_empty() { + let parts: Vec<&str> = name.splitn(2, ' ').collect(); + traits["given_name"] = Value::String(parts[0].to_string()); + traits["family_name"] = + Value::String(if parts.len() > 1 { parts[1] } else { "" }.to_string()); + } + + let mut employee_id = String::new(); + if schema_id == "employee" { + employee_id = next_employee_id(&pf.base_url)?; + traits["employee_id"] = Value::String(employee_id.clone()); + if !job_title.is_empty() { + traits["job_title"] = Value::String(job_title.to_string()); + } + if !department.is_empty() { + traits["department"] = Value::String(department.to_string()); + } + if !office_location.is_empty() { + traits["office_location"] = Value::String(office_location.to_string()); + } + if !hire_date.is_empty() { + traits["hire_date"] = Value::String(hire_date.to_string()); + } + if !manager.is_empty() { + traits["manager"] = Value::String(manager.to_string()); + } + } + + let body = serde_json::json!({ + "schema_id": schema_id, + "traits": traits, + "state": "active", + "verifiable_addresses": [{ + "value": email, + "verified": true, + "via": "email", + }], + }); + + let identity = kratos_api(&pf.base_url, "/identities", "POST", Some(&body), &[])? + .context("Failed to create identity")?; + + let iid = identity_id(&identity)?; + ok(&format!("Created identity: {iid}")); + if !employee_id.is_empty() { + ok(&format!("Employee #{employee_id}")); + } + + // Kratos ignores verifiable_addresses on POST -- PATCH to mark verified + let patch_body = serde_json::json!([ + {"op": "replace", "path": "/verifiable_addresses/0/verified", "value": true}, + {"op": "replace", "path": "/verifiable_addresses/0/status", "value": "completed"}, + ]); + kratos_api( + &pf.base_url, + &format!("/identities/{iid}"), + "PATCH", + Some(&patch_body), + &[], + )?; + + let (link, code) = generate_recovery(&pf.base_url, &iid)?; + (iid, link, code) + } + }; + + drop(pf); + + if send_email { + let domain = crate::kube::get_domain().await?; + let recipient = if notify.is_empty() { email } else { notify }; + send_welcome_email(&domain, recipient, name, &recovery_link, &recovery_code)?; + } + + ok(&format!("Identity ID: {iid}")); + ok("Recovery link (valid 24h):"); + println!("{recovery_link}"); + ok("Recovery code:"); + println!("{recovery_code}"); + Ok(()) } -pub async fn cmd_user_offboard(_target: &str) -> Result<()> { - todo!("cmd_user_offboard: ory-kratos-client + ory-hydra-client SDK") +// --------------------------------------------------------------------------- +// Offboard +// --------------------------------------------------------------------------- + +pub async fn cmd_user_offboard(target: &str) -> Result<()> { + step(&format!("Offboarding: {target}")); + + eprint!("Offboard '{target}'? This will disable the account and revoke all sessions. [y/N] "); + std::io::stderr().flush()?; + let mut answer = String::new(); + std::io::stdin().read_line(&mut answer)?; + if answer.trim().to_lowercase() != "y" { + ok("Cancelled."); + return Ok(()); + } + + let pf = PortForward::kratos()?; + let identity = find_identity(&pf.base_url, target, true)? + .context("Identity not found")?; + let iid = identity_id(&identity)?; + + step("Disabling identity..."); + let put_body = identity_put_body(&identity, Some("inactive"), None); + kratos_api( + &pf.base_url, + &format!("/identities/{iid}"), + "PUT", + Some(&put_body), + &[], + )?; + ok(&format!("Identity {}... disabled.", short_id(&iid))); + + step("Revoking Kratos sessions..."); + kratos_api( + &pf.base_url, + &format!("/identities/{iid}/sessions"), + "DELETE", + None, + &[404], + )?; + ok("Kratos sessions revoked."); + + step("Revoking Hydra consent sessions..."); + { + let hydra_pf = PortForward::new("ory", "hydra-admin", 14445, 4445)?; + api( + &hydra_pf.base_url, + &format!("/oauth2/auth/sessions/consent?subject={iid}&all=true"), + "DELETE", + None, + "/admin", + &[404], + )?; + } + ok("Hydra consent sessions revoked."); + + drop(pf); + + ok(&format!("Offboarding complete for {}...", short_id(&iid))); + warn("Existing access tokens expire within ~1h (Hydra TTL)."); + warn("App sessions (docs/people) expire within SESSION_COOKIE_AGE (~1h)."); + Ok(()) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_display_name_employee_schema() { + let traits = serde_json::json!({ + "email": "test@example.com", + "given_name": "Alice", + "family_name": "Smith", + }); + assert_eq!(display_name(&traits), "Alice Smith"); + } + + #[test] + fn test_display_name_default_schema() { + let traits = serde_json::json!({ + "email": "test@example.com", + "name": { "first": "Bob", "last": "Jones" }, + }); + assert_eq!(display_name(&traits), "Bob Jones"); + } + + #[test] + fn test_display_name_empty() { + let traits = serde_json::json!({ "email": "test@example.com" }); + assert_eq!(display_name(&traits), ""); + } + + #[test] + fn test_display_name_given_only() { + let traits = serde_json::json!({ + "given_name": "Alice", + }); + assert_eq!(display_name(&traits), "Alice"); + } + + #[test] + fn test_short_id() { + assert_eq!( + short_id("12345678-abcd-1234-abcd-123456789012"), + "12345678..." + ); + } + + #[test] + fn test_short_id_short() { + assert_eq!(short_id("abc"), "abc"); + } + + #[test] + fn test_identity_put_body_preserves_fields() { + let identity = serde_json::json!({ + "schema_id": "employee", + "traits": { "email": "a@b.com" }, + "state": "active", + "metadata_public": null, + "metadata_admin": null, + }); + + let body = identity_put_body(&identity, Some("inactive"), None); + assert_eq!(body["state"], "inactive"); + assert_eq!(body["schema_id"], "employee"); + assert_eq!(body["traits"]["email"], "a@b.com"); + } + + #[test] + fn test_identity_put_body_with_extra() { + let identity = serde_json::json!({ + "schema_id": "default", + "traits": { "email": "a@b.com" }, + "state": "active", + }); + + let extra = serde_json::json!({ + "credentials": { + "password": { "config": { "password": "s3cret" } } + } + }); + let body = identity_put_body(&identity, None, Some(extra)); + assert_eq!(body["state"], "active"); + assert!(body["credentials"]["password"]["config"]["password"] == "s3cret"); + } + + #[test] + fn test_identity_put_body_default_state() { + let identity = serde_json::json!({ + "schema_id": "default", + "traits": {}, + "state": "inactive", + }); + let body = identity_put_body(&identity, None, None); + assert_eq!(body["state"], "inactive"); + } + + #[test] + fn test_identity_id_extraction() { + let identity = serde_json::json!({ "id": "12345678-abcd-1234-abcd-123456789012" }); + assert_eq!( + identity_id(&identity).unwrap(), + "12345678-abcd-1234-abcd-123456789012" + ); + } + + #[test] + fn test_identity_id_missing() { + let identity = serde_json::json!({}); + assert!(identity_id(&identity).is_err()); + } }