From bc65b9157dccefa03553d619f847c3d7af85d0f9 Mon Sep 17 00:00:00 2001 From: Sienna Meridian Satterwhite Date: Sat, 21 Mar 2026 14:37:47 +0000 Subject: [PATCH] refactor: SDK images and secrets modules with submodule splits Split images.rs (1809L) into mod.rs + builders.rs (per-service build functions). Split secrets.rs (1727L) into mod.rs + seeding.rs (KV get_or_create, seed_openbao) + db_engine.rs (PostgreSQL static roles). Moves BuildTarget enum from cli.rs into images/mod.rs with conditional clap::ValueEnum derive behind the "cli" feature. --- sunbeam-sdk/src/images/builders.rs | 806 +++++++++++++++++++ sunbeam-sdk/src/images/mod.rs | 1070 +++++++++++++++++++++++++ sunbeam-sdk/src/secrets/db_engine.rs | 107 +++ sunbeam-sdk/src/secrets/mod.rs | 1106 ++++++++++++++++++++++++++ sunbeam-sdk/src/secrets/seeding.rs | 542 +++++++++++++ 5 files changed, 3631 insertions(+) create mode 100644 sunbeam-sdk/src/images/builders.rs create mode 100644 sunbeam-sdk/src/images/mod.rs create mode 100644 sunbeam-sdk/src/secrets/db_engine.rs create mode 100644 sunbeam-sdk/src/secrets/mod.rs create mode 100644 sunbeam-sdk/src/secrets/seeding.rs diff --git a/sunbeam-sdk/src/images/builders.rs b/sunbeam-sdk/src/images/builders.rs new file mode 100644 index 0000000..a0bd9c9 --- /dev/null +++ b/sunbeam-sdk/src/images/builders.rs @@ -0,0 +1,806 @@ +//! Per-service image build functions. + +use crate::error::{Result, ResultExt, SunbeamError}; +use crate::output::{ok, step, warn}; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; + +use super::{build_image, deploy_rollout, get_build_env}; + +/// Message component definition: (cli_name, image_name, dockerfile_rel, target). +pub const MESSAGES_COMPONENTS: &[(&str, &str, &str, Option<&str>)] = &[ + ( + "messages-backend", + "messages-backend", + "src/backend/Dockerfile", + Some("runtime-distroless-prod"), + ), + ( + "messages-frontend", + "messages-frontend", + "src/frontend/Dockerfile", + Some("runtime-prod"), + ), + ( + "messages-mta-in", + "messages-mta-in", + "src/mta-in/Dockerfile", + None, + ), + ( + "messages-mta-out", + "messages-mta-out", + "src/mta-out/Dockerfile", + None, + ), + ( + "messages-mpa", + "messages-mpa", + "src/mpa/rspamd/Dockerfile", + None, + ), + ( + "messages-socks-proxy", + "messages-socks-proxy", + "src/socks-proxy/Dockerfile", + None, + ), +]; + +pub async fn build_proxy(push: bool, deploy: bool, no_cache: bool) -> Result<()> { + let env = get_build_env().await?; + let proxy_dir = crate::config::get_repo_root().join("proxy"); + if !proxy_dir.is_dir() { + return Err(SunbeamError::build(format!("Proxy source not found at {}", proxy_dir.display()))); + } + + let image = format!("{}/studio/proxy:latest", env.registry); + step(&format!("Building sunbeam-proxy -> {image} ...")); + + build_image( + &env, + &image, + &proxy_dir.join("Dockerfile"), + &proxy_dir, + None, + None, + push, + no_cache, + &[], + ) + .await?; + + if deploy { + deploy_rollout(&env, &["pingora"], "ingress", 120, Some(&[image])).await?; + } + Ok(()) +} + +pub async fn build_tuwunel(push: bool, deploy: bool, no_cache: bool) -> Result<()> { + let env = get_build_env().await?; + let tuwunel_dir = crate::config::get_repo_root().join("tuwunel"); + if !tuwunel_dir.is_dir() { + return Err(SunbeamError::build(format!("Tuwunel source not found at {}", tuwunel_dir.display()))); + } + + let image = format!("{}/studio/tuwunel:latest", env.registry); + step(&format!("Building tuwunel -> {image} ...")); + + build_image( + &env, + &image, + &tuwunel_dir.join("Dockerfile"), + &tuwunel_dir, + None, + None, + push, + no_cache, + &[], + ) + .await?; + + if deploy { + deploy_rollout(&env, &["tuwunel"], "matrix", 180, Some(&[image])).await?; + } + Ok(()) +} + +pub async fn build_integration(push: bool, deploy: bool, no_cache: bool) -> Result<()> { + let env = get_build_env().await?; + let sunbeam_dir = crate::config::get_repo_root(); + let integration_service_dir = sunbeam_dir.join("integration-service"); + let dockerfile = integration_service_dir.join("Dockerfile"); + let dockerignore = integration_service_dir.join(".dockerignore"); + + if !dockerfile.exists() { + return Err(SunbeamError::build(format!( + "integration-service Dockerfile not found at {}", + dockerfile.display() + ))); + } + if !sunbeam_dir + .join("integration") + .join("packages") + .join("widgets") + .is_dir() + { + return Err(SunbeamError::build(format!( + "integration repo not found at {} -- \ + run: cd sunbeam && git clone https://github.com/suitenumerique/integration.git", + sunbeam_dir.join("integration").display() + ))); + } + + let image = format!("{}/studio/integration:latest", env.registry); + step(&format!("Building integration -> {image} ...")); + + // .dockerignore needs to be at context root + let root_ignore = sunbeam_dir.join(".dockerignore"); + let mut copied_ignore = false; + if !root_ignore.exists() && dockerignore.exists() { + std::fs::copy(&dockerignore, &root_ignore).ok(); + copied_ignore = true; + } + + let result = build_image( + &env, + &image, + &dockerfile, + &sunbeam_dir, + None, + None, + push, + no_cache, + &[], + ) + .await; + + if copied_ignore && root_ignore.exists() { + let _ = std::fs::remove_file(&root_ignore); + } + + result?; + + if deploy { + deploy_rollout(&env, &["integration"], "lasuite", 120, None).await?; + } + Ok(()) +} + +pub async fn build_kratos_admin(push: bool, deploy: bool, no_cache: bool) -> Result<()> { + let env = get_build_env().await?; + let kratos_admin_dir = crate::config::get_repo_root().join("kratos-admin"); + if !kratos_admin_dir.is_dir() { + return Err(SunbeamError::build(format!( + "kratos-admin source not found at {}", + kratos_admin_dir.display() + ))); + } + + let image = format!("{}/studio/kratos-admin-ui:latest", env.registry); + step(&format!("Building kratos-admin-ui -> {image} ...")); + + build_image( + &env, + &image, + &kratos_admin_dir.join("Dockerfile"), + &kratos_admin_dir, + None, + None, + push, + no_cache, + &[], + ) + .await?; + + if deploy { + deploy_rollout(&env, &["kratos-admin-ui"], "ory", 120, None).await?; + } + Ok(()) +} + +pub async fn build_meet(push: bool, deploy: bool, no_cache: bool) -> Result<()> { + let env = get_build_env().await?; + let meet_dir = crate::config::get_repo_root().join("meet"); + if !meet_dir.is_dir() { + return Err(SunbeamError::build(format!("meet source not found at {}", meet_dir.display()))); + } + + let backend_image = format!("{}/studio/meet-backend:latest", env.registry); + let frontend_image = format!("{}/studio/meet-frontend:latest", env.registry); + + // Backend + step(&format!("Building meet-backend -> {backend_image} ...")); + build_image( + &env, + &backend_image, + &meet_dir.join("Dockerfile"), + &meet_dir, + Some("backend-production"), + None, + push, + no_cache, + &[], + ) + .await?; + + // Frontend + step(&format!("Building meet-frontend -> {frontend_image} ...")); + let frontend_dockerfile = meet_dir.join("src").join("frontend").join("Dockerfile"); + if !frontend_dockerfile.exists() { + return Err(SunbeamError::build(format!( + "meet frontend Dockerfile not found at {}", + frontend_dockerfile.display() + ))); + } + + let mut build_args = HashMap::new(); + build_args.insert("VITE_API_BASE_URL".to_string(), String::new()); + + build_image( + &env, + &frontend_image, + &frontend_dockerfile, + &meet_dir, + Some("frontend-production"), + Some(&build_args), + push, + no_cache, + &[], + ) + .await?; + + if deploy { + deploy_rollout( + &env, + &["meet-backend", "meet-celery-worker", "meet-frontend"], + "lasuite", + 180, + None, + ) + .await?; + } + Ok(()) +} + +pub async fn build_people(push: bool, deploy: bool, no_cache: bool) -> Result<()> { + let env = get_build_env().await?; + let people_dir = crate::config::get_repo_root().join("people"); + if !people_dir.is_dir() { + return Err(SunbeamError::build(format!("people source not found at {}", people_dir.display()))); + } + + let workspace_dir = people_dir.join("src").join("frontend"); + let app_dir = workspace_dir.join("apps").join("desk"); + let dockerfile = workspace_dir.join("Dockerfile"); + if !dockerfile.exists() { + return Err(SunbeamError::build(format!("Dockerfile not found at {}", dockerfile.display()))); + } + + let image = format!("{}/studio/people-frontend:latest", env.registry); + step(&format!("Building people-frontend -> {image} ...")); + + // yarn install + ok("Updating yarn.lock (yarn install in workspace)..."); + let yarn_status = tokio::process::Command::new("yarn") + .args(["install", "--ignore-engines"]) + .current_dir(&workspace_dir) + .status() + .await + .ctx("Failed to run yarn install")?; + if !yarn_status.success() { + return Err(SunbeamError::tool("yarn", "install failed")); + } + + // cunningham design tokens + ok("Regenerating cunningham design tokens..."); + let cunningham_bin = workspace_dir + .join("node_modules") + .join(".bin") + .join("cunningham"); + let cunningham_status = tokio::process::Command::new(&cunningham_bin) + .args(["-g", "css,ts", "-o", "src/cunningham", "--utility-classes"]) + .current_dir(&app_dir) + .status() + .await + .ctx("Failed to run cunningham")?; + if !cunningham_status.success() { + return Err(SunbeamError::tool("cunningham", "design token generation failed")); + } + + let mut build_args = HashMap::new(); + build_args.insert("DOCKER_USER".to_string(), "101".to_string()); + + build_image( + &env, + &image, + &dockerfile, + &people_dir, + Some("frontend-production"), + Some(&build_args), + push, + no_cache, + &[], + ) + .await?; + + if deploy { + deploy_rollout(&env, &["people-frontend"], "lasuite", 180, None).await?; + } + Ok(()) +} + +pub async fn build_messages(what: &str, push: bool, deploy: bool, no_cache: bool) -> Result<()> { + let env = get_build_env().await?; + let messages_dir = crate::config::get_repo_root().join("messages"); + if !messages_dir.is_dir() { + return Err(SunbeamError::build(format!("messages source not found at {}", messages_dir.display()))); + } + + let components: Vec<_> = if what == "messages" { + MESSAGES_COMPONENTS.to_vec() + } else { + MESSAGES_COMPONENTS + .iter() + .filter(|(name, _, _, _)| *name == what) + .copied() + .collect() + }; + + let mut built_images = Vec::new(); + + for (component, image_name, dockerfile_rel, target) in &components { + let dockerfile = messages_dir.join(dockerfile_rel); + if !dockerfile.exists() { + warn(&format!( + "Dockerfile not found at {} -- skipping {component}", + dockerfile.display() + )); + continue; + } + + let image = format!("{}/studio/{image_name}:latest", env.registry); + let context_dir = dockerfile.parent().unwrap_or(&messages_dir); + step(&format!("Building {component} -> {image} ...")); + + // Patch ghcr.io/astral-sh/uv COPY for messages-backend on local builds + let mut cleanup_paths = Vec::new(); + let actual_dockerfile; + + if !env.is_prod && *image_name == "messages-backend" { + let (patched, cleanup) = + patch_dockerfile_uv(&dockerfile, context_dir, &env.platform).await?; + actual_dockerfile = patched; + cleanup_paths = cleanup; + } else { + actual_dockerfile = dockerfile.clone(); + } + + build_image( + &env, + &image, + &actual_dockerfile, + context_dir, + *target, + None, + push, + no_cache, + &cleanup_paths, + ) + .await?; + + built_images.push(image); + } + + if deploy && !built_images.is_empty() { + deploy_rollout( + &env, + &[ + "messages-backend", + "messages-worker", + "messages-frontend", + "messages-mta-in", + "messages-mta-out", + "messages-mpa", + "messages-socks-proxy", + ], + "lasuite", + 180, + None, + ) + .await?; + } + + Ok(()) +} + +/// Build a La Suite frontend image from source and push to the Gitea registry. +#[allow(clippy::too_many_arguments)] +pub async fn build_la_suite_frontend( + app: &str, + repo_dir: &Path, + workspace_rel: &str, + app_rel: &str, + dockerfile_rel: &str, + image_name: &str, + deployment: &str, + namespace: &str, + push: bool, + deploy: bool, + no_cache: bool, +) -> Result<()> { + let env = get_build_env().await?; + + let workspace_dir = repo_dir.join(workspace_rel); + let app_dir = repo_dir.join(app_rel); + let dockerfile = repo_dir.join(dockerfile_rel); + + if !repo_dir.is_dir() { + return Err(SunbeamError::build(format!("{app} source not found at {}", repo_dir.display()))); + } + if !dockerfile.exists() { + return Err(SunbeamError::build(format!("Dockerfile not found at {}", dockerfile.display()))); + } + + let image = format!("{}/studio/{image_name}:latest", env.registry); + step(&format!("Building {app} -> {image} ...")); + + ok("Updating yarn.lock (yarn install in workspace)..."); + let yarn_status = tokio::process::Command::new("yarn") + .args(["install", "--ignore-engines"]) + .current_dir(&workspace_dir) + .status() + .await + .ctx("Failed to run yarn install")?; + if !yarn_status.success() { + return Err(SunbeamError::tool("yarn", "install failed")); + } + + ok("Regenerating cunningham design tokens (yarn build-theme)..."); + let theme_status = tokio::process::Command::new("yarn") + .args(["build-theme"]) + .current_dir(&app_dir) + .status() + .await + .ctx("Failed to run yarn build-theme")?; + if !theme_status.success() { + return Err(SunbeamError::tool("yarn", "build-theme failed")); + } + + let mut build_args = HashMap::new(); + build_args.insert("DOCKER_USER".to_string(), "101".to_string()); + + build_image( + &env, + &image, + &dockerfile, + repo_dir, + Some("frontend-production"), + Some(&build_args), + push, + no_cache, + &[], + ) + .await?; + + if deploy { + deploy_rollout(&env, &[deployment], namespace, 180, None).await?; + } + Ok(()) +} + +/// Download uv from GitHub releases and return a patched Dockerfile path. +pub async fn patch_dockerfile_uv( + dockerfile_path: &Path, + context_dir: &Path, + platform: &str, +) -> Result<(PathBuf, Vec)> { + let content = std::fs::read_to_string(dockerfile_path) + .ctx("Failed to read Dockerfile for uv patching")?; + + // Match COPY --from=ghcr.io/astral-sh/uv@sha256:... /uv /uvx /bin/ + let original_copy = content + .lines() + .find(|line| { + line.contains("COPY") + && line.contains("--from=ghcr.io/astral-sh/uv@sha256:") + && line.contains("/uv") + && line.contains("/bin/") + }) + .map(|line| line.trim().to_string()); + + let original_copy = match original_copy { + Some(c) => c, + None => return Ok((dockerfile_path.to_path_buf(), vec![])), + }; + + // Find uv version from comment like: oci://ghcr.io/astral-sh/uv:0.x.y + let version = content + .lines() + .find_map(|line| { + let marker = "oci://ghcr.io/astral-sh/uv:"; + if let Some(idx) = line.find(marker) { + let rest = &line[idx + marker.len()..]; + let ver = rest.split_whitespace().next().unwrap_or(""); + if !ver.is_empty() { + Some(ver.to_string()) + } else { + None + } + } else { + None + } + }); + + let version = match version { + Some(v) => v, + None => { + warn("Could not find uv version comment in Dockerfile; ghcr.io pull may fail."); + return Ok((dockerfile_path.to_path_buf(), vec![])); + } + }; + + let arch = if platform.contains("amd64") { + "x86_64" + } else { + "aarch64" + }; + + let url = format!( + "https://github.com/astral-sh/uv/releases/download/{version}/uv-{arch}-unknown-linux-gnu.tar.gz" + ); + + let stage_dir = context_dir.join("_sunbeam_uv_stage"); + let patched_df = dockerfile_path + .parent() + .unwrap_or(dockerfile_path) + .join("Dockerfile._sunbeam_patched"); + let cleanup = vec![stage_dir.clone(), patched_df.clone()]; + + ok(&format!( + "Downloading uv {version} ({arch}) from GitHub releases to bypass ghcr.io..." + )); + + std::fs::create_dir_all(&stage_dir)?; + + // Download tarball + let response = reqwest::get(&url) + .await + .ctx("Failed to download uv release")?; + let tarball_bytes = response.bytes().await?; + + // Extract uv and uvx from tarball + let decoder = flate2::read::GzDecoder::new(&tarball_bytes[..]); + let mut archive = tar::Archive::new(decoder); + + for entry in archive.entries()? { + let mut entry = entry?; + let path = entry.path()?.to_path_buf(); + let file_name = path + .file_name() + .unwrap_or_default() + .to_string_lossy() + .to_string(); + + if (file_name == "uv" || file_name == "uvx") && entry.header().entry_type().is_file() { + let dest = stage_dir.join(&file_name); + let mut outfile = std::fs::File::create(&dest)?; + std::io::copy(&mut entry, &mut outfile)?; + + #[cfg(unix)] + { + use std::os::unix::fs::PermissionsExt; + std::fs::set_permissions(&dest, std::fs::Permissions::from_mode(0o755))?; + } + } + } + + if !stage_dir.join("uv").exists() { + warn("uv binary not found in release tarball; build may fail."); + return Ok((dockerfile_path.to_path_buf(), cleanup)); + } + + let patched = content.replace( + &original_copy, + "COPY _sunbeam_uv_stage/uv _sunbeam_uv_stage/uvx /bin/", + ); + std::fs::write(&patched_df, patched)?; + ok(&format!(" uv {version} staged; using patched Dockerfile.")); + + Ok((patched_df, cleanup)) +} + +pub async fn build_projects(push: bool, deploy: bool, no_cache: bool) -> Result<()> { + let env = get_build_env().await?; + let projects_dir = crate::config::get_repo_root().join("projects"); + if !projects_dir.is_dir() { + return Err(SunbeamError::build(format!("projects source not found at {}", projects_dir.display()))); + } + + let image = format!("{}/studio/projects:latest", env.registry); + step(&format!("Building projects -> {image} ...")); + + build_image( + &env, + &image, + &projects_dir.join("Dockerfile"), + &projects_dir, + None, + None, + push, + no_cache, + &[], + ) + .await?; + + if deploy { + deploy_rollout(&env, &["projects"], "lasuite", 180, Some(&[image])).await?; + } + Ok(()) +} + +// TODO: first deploy requires registration enabled on tuwunel to create +// the @sol:sunbeam.pt bot account. Flow: +// 1. Set allow_registration = true in tuwunel-config.yaml +// 2. Apply + restart tuwunel +// 3. Register bot via POST /_matrix/client/v3/register with registration token +// 4. Store access_token + device_id in OpenBao at secret/sol +// 5. Set allow_registration = false, re-apply +// 6. Then build + deploy sol +// This should be automated as `sunbeam user create-bot `. +pub async fn build_sol(push: bool, deploy: bool, no_cache: bool) -> Result<()> { + let env = get_build_env().await?; + let sol_dir = crate::config::get_repo_root().join("sol"); + if !sol_dir.is_dir() { + return Err(SunbeamError::build(format!("Sol source not found at {}", sol_dir.display()))); + } + + let image = format!("{}/studio/sol:latest", env.registry); + step(&format!("Building sol -> {image} ...")); + + build_image( + &env, + &image, + &sol_dir.join("Dockerfile"), + &sol_dir, + None, + None, + push, + no_cache, + &[], + ) + .await?; + + if deploy { + deploy_rollout(&env, &["sol"], "matrix", 120, None).await?; + } + Ok(()) +} + +pub async fn build_calendars(push: bool, deploy: bool, no_cache: bool) -> Result<()> { + let env = get_build_env().await?; + let cal_dir = crate::config::get_repo_root().join("calendars"); + if !cal_dir.is_dir() { + return Err(SunbeamError::build(format!("calendars source not found at {}", cal_dir.display()))); + } + + let backend_dir = cal_dir.join("src").join("backend"); + let backend_image = format!("{}/studio/calendars-backend:latest", env.registry); + step(&format!("Building calendars-backend -> {backend_image} ...")); + + // Stage translations.json into the build context + let translations_src = cal_dir + .join("src") + .join("frontend") + .join("apps") + .join("calendars") + .join("src") + .join("features") + .join("i18n") + .join("translations.json"); + + let translations_dst = backend_dir.join("_translations.json"); + let mut cleanup: Vec = Vec::new(); + let mut dockerfile = backend_dir.join("Dockerfile"); + + if translations_src.exists() { + std::fs::copy(&translations_src, &translations_dst)?; + cleanup.push(translations_dst); + + // Patch Dockerfile to COPY translations into production image + let mut content = std::fs::read_to_string(&dockerfile)?; + content.push_str( + "\n# Sunbeam: bake translations.json for default calendar names\n\ + COPY _translations.json /data/translations.json\n", + ); + let patched_df = backend_dir.join("Dockerfile._sunbeam_patched"); + std::fs::write(&patched_df, content)?; + cleanup.push(patched_df.clone()); + dockerfile = patched_df; + } + + build_image( + &env, + &backend_image, + &dockerfile, + &backend_dir, + Some("backend-production"), + None, + push, + no_cache, + &cleanup, + ) + .await?; + + // caldav + let caldav_image = format!("{}/studio/calendars-caldav:latest", env.registry); + step(&format!("Building calendars-caldav -> {caldav_image} ...")); + let caldav_dir = cal_dir.join("src").join("caldav"); + build_image( + &env, + &caldav_image, + &caldav_dir.join("Dockerfile"), + &caldav_dir, + None, + None, + push, + no_cache, + &[], + ) + .await?; + + // frontend + let frontend_image = format!("{}/studio/calendars-frontend:latest", env.registry); + step(&format!( + "Building calendars-frontend -> {frontend_image} ..." + )); + let integration_base = format!("https://integration.{}", env.domain); + let mut build_args = HashMap::new(); + build_args.insert( + "VISIO_BASE_URL".to_string(), + format!("https://meet.{}", env.domain), + ); + build_args.insert( + "GAUFRE_WIDGET_PATH".to_string(), + format!("{integration_base}/api/v2/lagaufre.js"), + ); + build_args.insert( + "GAUFRE_API_URL".to_string(), + format!("{integration_base}/api/v2/services.json"), + ); + build_args.insert( + "THEME_CSS_URL".to_string(), + format!("{integration_base}/api/v2/theme.css"), + ); + + let frontend_dir = cal_dir.join("src").join("frontend"); + build_image( + &env, + &frontend_image, + &frontend_dir.join("Dockerfile"), + &frontend_dir, + Some("frontend-production"), + Some(&build_args), + push, + no_cache, + &[], + ) + .await?; + + if deploy { + deploy_rollout( + &env, + &[ + "calendars-backend", + "calendars-worker", + "calendars-caldav", + "calendars-frontend", + ], + "lasuite", + 180, + Some(&[backend_image, caldav_image, frontend_image]), + ) + .await?; + } + Ok(()) +} diff --git a/sunbeam-sdk/src/images/mod.rs b/sunbeam-sdk/src/images/mod.rs new file mode 100644 index 0000000..b71f65d --- /dev/null +++ b/sunbeam-sdk/src/images/mod.rs @@ -0,0 +1,1070 @@ +//! Image building, mirroring, and pushing to Gitea registry. + +pub mod builders; + +use crate::error::{Result, ResultExt, SunbeamError}; +use base64::Engine; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; +use std::process::Stdio; + +use crate::constants::{GITEA_ADMIN_USER, MANAGED_NS}; +use crate::output::{ok, step, warn}; + +// --------------------------------------------------------------------------- +// BuildTarget enum (moved from cli.rs) +// --------------------------------------------------------------------------- + +#[derive(Debug, Clone)] +#[cfg_attr(feature = "cli", derive(clap::ValueEnum))] +pub enum BuildTarget { + Proxy, + Integration, + KratosAdmin, + Meet, + DocsFrontend, + PeopleFrontend, + People, + Messages, + MessagesBackend, + MessagesFrontend, + MessagesMtaIn, + MessagesMtaOut, + MessagesMpa, + MessagesSocksProxy, + Tuwunel, + Calendars, + Projects, + Sol, +} + +impl std::fmt::Display for BuildTarget { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let s = match self { + BuildTarget::Proxy => "proxy", + BuildTarget::Integration => "integration", + BuildTarget::KratosAdmin => "kratos-admin", + BuildTarget::Meet => "meet", + BuildTarget::DocsFrontend => "docs-frontend", + BuildTarget::PeopleFrontend => "people-frontend", + BuildTarget::People => "people", + BuildTarget::Messages => "messages", + BuildTarget::MessagesBackend => "messages-backend", + BuildTarget::MessagesFrontend => "messages-frontend", + BuildTarget::MessagesMtaIn => "messages-mta-in", + BuildTarget::MessagesMtaOut => "messages-mta-out", + BuildTarget::MessagesMpa => "messages-mpa", + BuildTarget::MessagesSocksProxy => "messages-socks-proxy", + BuildTarget::Tuwunel => "tuwunel", + BuildTarget::Calendars => "calendars", + BuildTarget::Projects => "projects", + BuildTarget::Sol => "sol", + }; + write!(f, "{s}") + } +} + +/// amd64-only images that need mirroring: (source, org, repo, tag). +const AMD64_ONLY_IMAGES: &[(&str, &str, &str, &str)] = &[ + ( + "docker.io/lasuite/people-backend:latest", + "studio", + "people-backend", + "latest", + ), + ( + "docker.io/lasuite/people-frontend:latest", + "studio", + "people-frontend", + "latest", + ), + ( + "docker.io/lasuite/impress-backend:latest", + "studio", + "impress-backend", + "latest", + ), + ( + "docker.io/lasuite/impress-frontend:latest", + "studio", + "impress-frontend", + "latest", + ), + ( + "docker.io/lasuite/impress-y-provider:latest", + "studio", + "impress-y-provider", + "latest", + ), +]; + +// --------------------------------------------------------------------------- +// Build environment +// --------------------------------------------------------------------------- + +/// Resolved build environment — production (remote k8s) or local. +#[derive(Debug, Clone)] +pub struct BuildEnv { + pub is_prod: bool, + pub domain: String, + pub registry: String, + pub admin_pass: String, + pub platform: String, + pub ssh_host: Option, +} + +/// Detect prod vs local and resolve registry credentials. +pub(crate) async fn get_build_env() -> Result { + let ssh = crate::kube::ssh_host(); + let is_prod = !ssh.is_empty(); + + let domain = crate::kube::get_domain().await?; + + // Fetch gitea admin password from the cluster secret + let admin_pass = crate::kube::kube_get_secret_field( + "devtools", + "gitea-admin-credentials", + "password", + ) + .await + .ctx("gitea-admin-credentials secret not found -- run seed first.")?; + + let platform = if is_prod { + "linux/amd64".to_string() + } else { + "linux/arm64".to_string() + }; + + let ssh_host = if is_prod { + Some(ssh.to_string()) + } else { + None + }; + + Ok(BuildEnv { + is_prod, + domain: domain.clone(), + registry: format!("src.{domain}"), + admin_pass, + platform, + ssh_host, + }) +} + +// --------------------------------------------------------------------------- +// buildctl build + push +// --------------------------------------------------------------------------- + +/// Build and push an image via buildkitd running in k8s. +/// +/// Port-forwards to the buildkitd service in the `build` namespace, +/// runs `buildctl build`, and pushes the image directly to the Gitea +/// registry from inside the cluster. +#[allow(clippy::too_many_arguments)] +pub(crate) async fn buildctl_build_and_push( + env: &BuildEnv, + image: &str, + dockerfile: &Path, + context_dir: &Path, + target: Option<&str>, + build_args: Option<&HashMap>, + _no_cache: bool, +) -> Result<()> { + // Find a free local port for port-forward + let listener = std::net::TcpListener::bind("127.0.0.1:0") + .ctx("Failed to bind ephemeral port")?; + let local_port = listener.local_addr()?.port(); + drop(listener); + + // Build docker config for registry auth + let auth_token = base64::engine::general_purpose::STANDARD + .encode(format!("{GITEA_ADMIN_USER}:{}", env.admin_pass)); + let docker_cfg = serde_json::json!({ + "auths": { + &env.registry: { "auth": auth_token } + } + }); + + let tmpdir = tempfile::TempDir::new().ctx("Failed to create temp dir")?; + let cfg_path = tmpdir.path().join("config.json"); + std::fs::write(&cfg_path, serde_json::to_string(&docker_cfg)?) + .ctx("Failed to write docker config")?; + + // Start port-forward to buildkitd + let ctx_arg = format!("--context={}", crate::kube::context()); + let pf_port_arg = format!("{local_port}:1234"); + + let mut pf = tokio::process::Command::new("kubectl") + .args([ + &ctx_arg, + "port-forward", + "-n", + "build", + "svc/buildkitd", + &pf_port_arg, + ]) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .spawn() + .ctx("Failed to start buildkitd port-forward")?; + + // Wait for port-forward to become ready + let deadline = tokio::time::Instant::now() + std::time::Duration::from_secs(15); + loop { + if tokio::time::Instant::now() > deadline { + pf.kill().await.ok(); + return Err(SunbeamError::tool("buildctl", format!("buildkitd port-forward on :{local_port} did not become ready within 15s"))); + } + if tokio::net::TcpStream::connect(format!("127.0.0.1:{local_port}")) + .await + .is_ok() + { + break; + } + tokio::time::sleep(std::time::Duration::from_millis(300)).await; + } + + // Build the buildctl command + let dockerfile_parent = dockerfile + .parent() + .unwrap_or(dockerfile) + .to_string_lossy() + .to_string(); + let dockerfile_name = dockerfile + .file_name() + .unwrap_or_default() + .to_string_lossy() + .to_string(); + let context_str = context_dir.to_string_lossy().to_string(); + + let mut cmd_args = vec![ + "build".to_string(), + "--frontend".to_string(), + "dockerfile.v0".to_string(), + "--local".to_string(), + format!("context={context_str}"), + "--local".to_string(), + format!("dockerfile={dockerfile_parent}"), + "--opt".to_string(), + format!("filename={dockerfile_name}"), + "--opt".to_string(), + format!("platform={}", env.platform), + "--output".to_string(), + format!("type=image,name={image},push=true"), + ]; + + if let Some(tgt) = target { + cmd_args.push("--opt".to_string()); + cmd_args.push(format!("target={tgt}")); + } + + if _no_cache { + cmd_args.push("--no-cache".to_string()); + } + + if let Some(args) = build_args { + for (k, v) in args { + cmd_args.push("--opt".to_string()); + cmd_args.push(format!("build-arg:{k}={v}")); + } + } + + let buildctl_host = format!("tcp://127.0.0.1:{local_port}"); + let tmpdir_str = tmpdir.path().to_string_lossy().to_string(); + + let result = tokio::process::Command::new("buildctl") + .args(&cmd_args) + .env("BUILDKIT_HOST", &buildctl_host) + .env("DOCKER_CONFIG", &tmpdir_str) + .stdin(Stdio::null()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .status() + .await; + + // Always terminate port-forward + pf.kill().await.ok(); + pf.wait().await.ok(); + + match result { + Ok(status) if status.success() => Ok(()), + Ok(status) => return Err(SunbeamError::tool("buildctl", format!("exited with status {status}"))), + Err(e) => return Err(SunbeamError::tool("buildctl", format!("failed to run: {e}"))), + } +} + +// --------------------------------------------------------------------------- +// build_image wrapper +// --------------------------------------------------------------------------- + +/// Build a container image via buildkitd and push to the Gitea registry. +#[allow(clippy::too_many_arguments)] +pub(crate) async fn build_image( + env: &BuildEnv, + image: &str, + dockerfile: &Path, + context_dir: &Path, + target: Option<&str>, + build_args: Option<&HashMap>, + push: bool, + no_cache: bool, + cleanup_paths: &[PathBuf], +) -> Result<()> { + ok(&format!( + "Building image ({}{})...", + env.platform, + target + .map(|t| format!(", {t} target")) + .unwrap_or_default() + )); + + if !push { + warn("Builds require --push (buildkitd pushes directly to registry); skipping."); + return Ok(()); + } + + let result = buildctl_build_and_push( + env, + image, + dockerfile, + context_dir, + target, + build_args, + no_cache, + ) + .await; + + // Cleanup + for p in cleanup_paths { + if p.exists() { + if p.is_dir() { + let _ = std::fs::remove_dir_all(p); + } else { + let _ = std::fs::remove_file(p); + } + } + } + + result +} + +// --------------------------------------------------------------------------- +// Node operations +// --------------------------------------------------------------------------- + +/// Return one SSH-reachable IP per node in the cluster. +async fn get_node_addresses() -> Result> { + let client = crate::kube::get_client().await?; + let api: kube::api::Api = + kube::api::Api::all(client.clone()); + + let node_list = api + .list(&kube::api::ListParams::default()) + .await + .ctx("Failed to list nodes")?; + + let mut addresses = Vec::new(); + for node in &node_list.items { + if let Some(status) = &node.status { + if let Some(addrs) = &status.addresses { + // Prefer IPv4 InternalIP + let mut ipv4: Option = None; + let mut any_internal: Option = None; + + for addr in addrs { + if addr.type_ == "InternalIP" { + if !addr.address.contains(':') { + ipv4 = Some(addr.address.clone()); + } else if any_internal.is_none() { + any_internal = Some(addr.address.clone()); + } + } + } + + if let Some(ip) = ipv4.or(any_internal) { + addresses.push(ip); + } + } + } + } + + Ok(addresses) +} + +/// SSH to each k3s node and pull images into containerd. +pub(crate) async fn ctr_pull_on_nodes(env: &BuildEnv, images: &[String]) -> Result<()> { + if images.is_empty() { + return Ok(()); + } + + let nodes = get_node_addresses().await?; + if nodes.is_empty() { + warn("Could not detect node addresses; skipping ctr pull."); + return Ok(()); + } + + let ssh_user = env + .ssh_host + .as_deref() + .and_then(|h| h.split('@').next()) + .unwrap_or("root"); + + for node_ip in &nodes { + for img in images { + ok(&format!("Pulling {img} into containerd on {node_ip}...")); + let status = tokio::process::Command::new("ssh") + .args([ + "-p", + "2222", + "-o", + "StrictHostKeyChecking=no", + &format!("{ssh_user}@{node_ip}"), + &format!("sudo ctr -n k8s.io images pull {img}"), + ]) + .stdout(Stdio::null()) + .stderr(Stdio::piped()) + .status() + .await; + + match status { + Ok(s) if s.success() => ok(&format!("Pulled {img} on {node_ip}")), + _ => return Err(SunbeamError::tool("ctr", format!("pull failed on {node_ip} for {img}"))), + } + } + } + + Ok(()) +} + +// --------------------------------------------------------------------------- +// Deploy rollout +// --------------------------------------------------------------------------- + +/// Apply manifests for the target namespace and rolling-restart the given deployments. +pub(crate) async fn deploy_rollout( + env: &BuildEnv, + deployments: &[&str], + namespace: &str, + timeout_secs: u64, + images: Option<&[String]>, +) -> Result<()> { + let env_str = if env.is_prod { "production" } else { "local" }; + crate::manifests::cmd_apply(env_str, &env.domain, "", namespace).await?; + + // Pull fresh images into containerd on every node before rollout + if let Some(imgs) = images { + ctr_pull_on_nodes(env, imgs).await?; + } + + for dep in deployments { + ok(&format!("Rolling {dep}...")); + crate::kube::kube_rollout_restart(namespace, dep).await?; + } + + // Wait for rollout completion + for dep in deployments { + wait_deployment_ready(namespace, dep, timeout_secs).await?; + } + + ok("Redeployed."); + Ok(()) +} + +/// Wait for a deployment to become ready. +async fn wait_deployment_ready(ns: &str, deployment: &str, timeout_secs: u64) -> Result<()> { + use k8s_openapi::api::apps::v1::Deployment; + use std::time::{Duration, Instant}; + + let client = crate::kube::get_client().await?; + let api: kube::api::Api = kube::api::Api::namespaced(client.clone(), ns); + let deadline = Instant::now() + Duration::from_secs(timeout_secs); + + loop { + if Instant::now() > deadline { + return Err(SunbeamError::build(format!("Timed out waiting for deployment {ns}/{deployment}"))); + } + + if let Some(dep) = api.get_opt(deployment).await? { + if let Some(status) = &dep.status { + if let Some(conditions) = &status.conditions { + let available = conditions + .iter() + .any(|c| c.type_ == "Available" && c.status == "True"); + if available { + return Ok(()); + } + } + } + } + + tokio::time::sleep(Duration::from_secs(3)).await; + } +} + +// --------------------------------------------------------------------------- +// Mirroring +// --------------------------------------------------------------------------- + +/// Docker Hub auth token response. +#[derive(serde::Deserialize)] +struct DockerAuthToken { + token: String, +} + +/// Fetch a Docker Hub auth token for the given repository. +async fn docker_hub_token(repo: &str) -> Result { + let url = format!( + "https://auth.docker.io/token?service=registry.docker.io&scope=repository:{repo}:pull" + ); + let resp: DockerAuthToken = reqwest::get(&url) + .await + .ctx("Failed to fetch Docker Hub token")? + .json() + .await + .ctx("Failed to parse Docker Hub token response")?; + Ok(resp.token) +} + +/// Fetch an OCI/Docker manifest index from Docker Hub. +async fn fetch_manifest_index( + repo: &str, + tag: &str, +) -> Result { + let token = docker_hub_token(repo).await?; + + let client = reqwest::Client::new(); + let url = format!("https://registry-1.docker.io/v2/{repo}/manifests/{tag}"); + let accept = "application/vnd.oci.image.index.v1+json,\ + application/vnd.docker.distribution.manifest.list.v2+json"; + + let resp = client + .get(&url) + .header("Authorization", format!("Bearer {token}")) + .header("Accept", accept) + .send() + .await + .ctx("Failed to fetch manifest from Docker Hub")?; + + if !resp.status().is_success() { + return Err(SunbeamError::build(format!( + "Docker Hub returned {} for {repo}:{tag}", + resp.status() + ))); + } + + resp.json() + .await + .ctx("Failed to parse manifest index JSON") +} + +/// Build an OCI tar archive containing a patched index that maps both +/// amd64 and arm64 to the same amd64 manifest. +fn make_oci_tar( + ref_name: &str, + new_index_bytes: &[u8], + amd64_manifest_bytes: &[u8], +) -> Result> { + use std::io::Write; + + let ix_hex = { + use sha2::Digest; + let hash = sha2::Sha256::digest(new_index_bytes); + hash.iter().map(|b| format!("{b:02x}")).collect::() + }; + + let new_index: serde_json::Value = serde_json::from_slice(new_index_bytes)?; + let amd64_hex = new_index["manifests"][0]["digest"] + .as_str() + .unwrap_or("") + .replace("sha256:", ""); + + let layout = serde_json::json!({"imageLayoutVersion": "1.0.0"}); + let layout_bytes = serde_json::to_vec(&layout)?; + + let top = serde_json::json!({ + "schemaVersion": 2, + "mediaType": "application/vnd.oci.image.index.v1+json", + "manifests": [{ + "mediaType": "application/vnd.oci.image.index.v1+json", + "digest": format!("sha256:{ix_hex}"), + "size": new_index_bytes.len(), + "annotations": { + "org.opencontainers.image.ref.name": ref_name, + }, + }], + }); + let top_bytes = serde_json::to_vec(&top)?; + + let mut buf = Vec::new(); + { + let mut builder = tar::Builder::new(&mut buf); + + let mut add_entry = |name: &str, data: &[u8]| -> Result<()> { + let mut header = tar::Header::new_gnu(); + header.set_size(data.len() as u64); + header.set_mode(0o644); + header.set_cksum(); + builder.append_data(&mut header, name, data)?; + Ok(()) + }; + + add_entry("oci-layout", &layout_bytes)?; + add_entry("index.json", &top_bytes)?; + add_entry(&format!("blobs/sha256/{ix_hex}"), new_index_bytes)?; + add_entry( + &format!("blobs/sha256/{amd64_hex}"), + amd64_manifest_bytes, + )?; + + builder.finish()?; + } + + // Flush + buf.flush().ok(); + Ok(buf) +} + +/// Mirror amd64-only La Suite images to the Gitea registry. +/// +/// The Python version ran a script inside the Lima VM via `limactl shell`. +/// Without Lima, we use reqwest for Docker registry token/manifest fetching +/// and construct OCI tars natively. The containerd import + push operations +/// require SSH to nodes and are implemented via subprocess. +pub async fn cmd_mirror() -> Result<()> { + step("Mirroring amd64-only images to Gitea registry..."); + + let domain = crate::kube::get_domain().await?; + let admin_pass = crate::kube::kube_get_secret_field( + "devtools", + "gitea-admin-credentials", + "password", + ) + .await + .unwrap_or_default(); + + if admin_pass.is_empty() { + warn("Could not get gitea admin password; skipping mirror."); + return Ok(()); + } + + let registry = format!("src.{domain}"); + + let nodes = get_node_addresses().await.unwrap_or_default(); + if nodes.is_empty() { + warn("No node addresses found; cannot mirror images (need SSH to containerd)."); + return Ok(()); + } + + // Determine SSH user + let ssh_host_val = crate::kube::ssh_host(); + let ssh_user = if ssh_host_val.contains('@') { + ssh_host_val.split('@').next().unwrap_or("root") + } else { + "root" + }; + + for (src, org, repo, tag) in AMD64_ONLY_IMAGES { + let tgt = format!("{registry}/{org}/{repo}:{tag}"); + ok(&format!("Processing {src} -> {tgt}")); + + // Fetch manifest index from Docker Hub + let no_prefix = src.replace("docker.io/", ""); + let parts: Vec<&str> = no_prefix.splitn(2, ':').collect(); + let (docker_repo, docker_tag) = if parts.len() == 2 { + (parts[0], parts[1]) + } else { + (parts[0], "latest") + }; + + let index = match fetch_manifest_index(docker_repo, docker_tag).await { + Ok(idx) => idx, + Err(e) => { + warn(&format!("Failed to fetch index for {src}: {e}")); + continue; + } + }; + + // Find amd64 manifest + let manifests = index["manifests"].as_array(); + let amd64 = manifests.and_then(|ms| { + ms.iter().find(|m| { + m["platform"]["architecture"].as_str() == Some("amd64") + && m["platform"]["os"].as_str() == Some("linux") + }) + }); + + let amd64 = match amd64 { + Some(m) => m.clone(), + None => { + warn(&format!("No linux/amd64 entry in index for {src}; skipping")); + continue; + } + }; + + let amd64_digest = amd64["digest"] + .as_str() + .unwrap_or("") + .to_string(); + + // Fetch the actual amd64 manifest blob from registry + let token = docker_hub_token(docker_repo).await?; + let manifest_url = format!( + "https://registry-1.docker.io/v2/{docker_repo}/manifests/{amd64_digest}" + ); + let client = reqwest::Client::new(); + let amd64_manifest_bytes = client + .get(&manifest_url) + .header("Authorization", format!("Bearer {token}")) + .header( + "Accept", + "application/vnd.oci.image.manifest.v1+json,\ + application/vnd.docker.distribution.manifest.v2+json", + ) + .send() + .await? + .bytes() + .await?; + + // Build patched index: amd64 + arm64 alias pointing to same manifest + let arm64_entry = serde_json::json!({ + "mediaType": amd64["mediaType"], + "digest": amd64["digest"], + "size": amd64["size"], + "platform": {"architecture": "arm64", "os": "linux"}, + }); + + let new_index = serde_json::json!({ + "schemaVersion": index["schemaVersion"], + "mediaType": index.get("mediaType").unwrap_or(&serde_json::json!("application/vnd.oci.image.index.v1+json")), + "manifests": [amd64, arm64_entry], + }); + let new_index_bytes = serde_json::to_vec(&new_index)?; + + // Build OCI tar + let oci_tar = match make_oci_tar(&tgt, &new_index_bytes, &amd64_manifest_bytes) { + Ok(tar) => tar, + Err(e) => { + warn(&format!("Failed to build OCI tar for {tgt}: {e}")); + continue; + } + }; + + // Import + push via SSH to each node (containerd operations) + for node_ip in &nodes { + ok(&format!("Importing {tgt} on {node_ip}...")); + + // Remove existing, import, label + let ssh_target = format!("{ssh_user}@{node_ip}"); + + // Import via stdin + let mut import_cmd = tokio::process::Command::new("ssh") + .args([ + "-p", + "2222", + "-o", + "StrictHostKeyChecking=no", + &ssh_target, + "sudo ctr -n k8s.io images import --all-platforms -", + ]) + .stdin(Stdio::piped()) + .stdout(Stdio::null()) + .stderr(Stdio::piped()) + .spawn() + .ctx("Failed to spawn ssh for ctr import")?; + + if let Some(mut stdin) = import_cmd.stdin.take() { + use tokio::io::AsyncWriteExt; + stdin.write_all(&oci_tar).await?; + drop(stdin); + } + let import_status = import_cmd.wait().await?; + if !import_status.success() { + warn(&format!("ctr import failed on {node_ip} for {tgt}")); + continue; + } + + // Label for CRI + let _ = tokio::process::Command::new("ssh") + .args([ + "-p", + "2222", + "-o", + "StrictHostKeyChecking=no", + &ssh_target, + &format!( + "sudo ctr -n k8s.io images label {tgt} io.cri-containerd.image=managed" + ), + ]) + .stdout(Stdio::null()) + .stderr(Stdio::null()) + .status() + .await; + + // Push to Gitea registry + ok(&format!("Pushing {tgt} from {node_ip}...")); + let push_status = tokio::process::Command::new("ssh") + .args([ + "-p", + "2222", + "-o", + "StrictHostKeyChecking=no", + &ssh_target, + &format!( + "sudo ctr -n k8s.io images push --user {GITEA_ADMIN_USER}:{admin_pass} {tgt}" + ), + ]) + .stdout(Stdio::null()) + .stderr(Stdio::piped()) + .status() + .await; + + match push_status { + Ok(s) if s.success() => ok(&format!("Pushed {tgt}")), + _ => warn(&format!("Push failed for {tgt} on {node_ip}")), + } + + // Only need to push from one node + break; + } + } + + // Delete pods stuck in image-pull error states + ok("Clearing image-pull-error pods..."); + clear_image_pull_error_pods().await?; + + ok("Done."); + Ok(()) +} + +/// Delete pods in image-pull error states across managed namespaces. +async fn clear_image_pull_error_pods() -> Result<()> { + use k8s_openapi::api::core::v1::Pod; + + let error_reasons = ["ImagePullBackOff", "ErrImagePull", "ErrImageNeverPull"]; + + let client = crate::kube::get_client().await?; + + for ns in MANAGED_NS { + let api: kube::api::Api = kube::api::Api::namespaced(client.clone(), ns); + let pods = api + .list(&kube::api::ListParams::default()) + .await; + + let pods = match pods { + Ok(p) => p, + Err(_) => continue, + }; + + for pod in &pods.items { + let pod_name = pod.metadata.name.as_deref().unwrap_or(""); + if pod_name.is_empty() { + continue; + } + + let has_error = pod + .status + .as_ref() + .and_then(|s| s.container_statuses.as_ref()) + .map(|statuses| { + statuses.iter().any(|cs| { + cs.state + .as_ref() + .and_then(|s| s.waiting.as_ref()) + .and_then(|w| w.reason.as_deref()) + .is_some_and(|r| error_reasons.contains(&r)) + }) + }) + .unwrap_or(false); + + if has_error { + let _ = api + .delete(pod_name, &kube::api::DeleteParams::default()) + .await; + } + } + } + + Ok(()) +} + +// --------------------------------------------------------------------------- +// Build dispatch +// --------------------------------------------------------------------------- + +/// Build an image. Pass push=true to push, deploy=true to also apply + rollout. +pub async fn cmd_build(what: &BuildTarget, push: bool, deploy: bool, no_cache: bool) -> Result<()> { + match what { + BuildTarget::Proxy => builders::build_proxy(push, deploy, no_cache).await, + BuildTarget::Integration => builders::build_integration(push, deploy, no_cache).await, + BuildTarget::KratosAdmin => builders::build_kratos_admin(push, deploy, no_cache).await, + BuildTarget::Meet => builders::build_meet(push, deploy, no_cache).await, + BuildTarget::DocsFrontend => { + let repo_dir = crate::config::get_repo_root().join("docs"); + builders::build_la_suite_frontend( + "docs-frontend", + &repo_dir, + "src/frontend", + "src/frontend/apps/impress", + "src/frontend/Dockerfile", + "impress-frontend", + "docs-frontend", + "lasuite", + push, + deploy, + no_cache, + ) + .await + } + BuildTarget::PeopleFrontend | BuildTarget::People => builders::build_people(push, deploy, no_cache).await, + BuildTarget::Messages => builders::build_messages("messages", push, deploy, no_cache).await, + BuildTarget::MessagesBackend => builders::build_messages("messages-backend", push, deploy, no_cache).await, + BuildTarget::MessagesFrontend => builders::build_messages("messages-frontend", push, deploy, no_cache).await, + BuildTarget::MessagesMtaIn => builders::build_messages("messages-mta-in", push, deploy, no_cache).await, + BuildTarget::MessagesMtaOut => builders::build_messages("messages-mta-out", push, deploy, no_cache).await, + BuildTarget::MessagesMpa => builders::build_messages("messages-mpa", push, deploy, no_cache).await, + BuildTarget::MessagesSocksProxy => { + builders::build_messages("messages-socks-proxy", push, deploy, no_cache).await + } + BuildTarget::Tuwunel => builders::build_tuwunel(push, deploy, no_cache).await, + BuildTarget::Calendars => builders::build_calendars(push, deploy, no_cache).await, + BuildTarget::Projects => builders::build_projects(push, deploy, no_cache).await, + BuildTarget::Sol => builders::build_sol(push, deploy, no_cache).await, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn managed_ns_is_sorted() { + let mut sorted = MANAGED_NS.to_vec(); + sorted.sort(); + assert_eq!( + MANAGED_NS, &sorted[..], + "MANAGED_NS should be in alphabetical order" + ); + } + + #[test] + fn managed_ns_contains_expected_namespaces() { + assert!(MANAGED_NS.contains(&"data")); + assert!(MANAGED_NS.contains(&"devtools")); + assert!(MANAGED_NS.contains(&"ingress")); + assert!(MANAGED_NS.contains(&"ory")); + assert!(MANAGED_NS.contains(&"matrix")); + } + + #[test] + fn amd64_only_images_all_from_docker_hub() { + for (src, _org, _repo, _tag) in AMD64_ONLY_IMAGES { + assert!( + src.starts_with("docker.io/"), + "Expected docker.io prefix, got: {src}" + ); + } + } + + #[test] + fn amd64_only_images_all_have_latest_tag() { + for (src, _org, _repo, tag) in AMD64_ONLY_IMAGES { + assert_eq!( + *tag, "latest", + "Expected 'latest' tag for {src}, got: {tag}" + ); + } + } + + #[test] + fn amd64_only_images_non_empty() { + assert!( + !AMD64_ONLY_IMAGES.is_empty(), + "AMD64_ONLY_IMAGES should not be empty" + ); + } + + #[test] + fn amd64_only_images_org_is_studio() { + for (src, org, _repo, _tag) in AMD64_ONLY_IMAGES { + assert_eq!( + *org, "studio", + "Expected org 'studio' for {src}, got: {org}" + ); + } + } + + #[test] + fn build_target_display_proxy() { + assert_eq!(BuildTarget::Proxy.to_string(), "proxy"); + } + + #[test] + fn build_target_display_kratos_admin() { + assert_eq!(BuildTarget::KratosAdmin.to_string(), "kratos-admin"); + } + + #[test] + fn build_target_display_all_lowercase_or_hyphenated() { + let targets = [ + BuildTarget::Proxy, + BuildTarget::Integration, + BuildTarget::KratosAdmin, + BuildTarget::Meet, + BuildTarget::DocsFrontend, + BuildTarget::PeopleFrontend, + BuildTarget::People, + BuildTarget::Messages, + BuildTarget::MessagesBackend, + BuildTarget::MessagesFrontend, + BuildTarget::MessagesMtaIn, + BuildTarget::MessagesMtaOut, + BuildTarget::MessagesMpa, + BuildTarget::MessagesSocksProxy, + BuildTarget::Tuwunel, + BuildTarget::Calendars, + BuildTarget::Projects, + BuildTarget::Sol, + ]; + for t in &targets { + let s = t.to_string(); + assert!( + s.chars().all(|c| c.is_ascii_lowercase() || c == '-'), + "BuildTarget display '{s}' has unexpected characters" + ); + } + } + + #[test] + fn gitea_admin_user_constant() { + assert_eq!(GITEA_ADMIN_USER, "gitea_admin"); + } + + #[test] + fn messages_components_non_empty() { + assert!(!builders::MESSAGES_COMPONENTS.is_empty()); + } + + #[test] + fn messages_components_dockerfiles_are_relative() { + for (_name, _image, dockerfile_rel, _target) in builders::MESSAGES_COMPONENTS { + assert!( + dockerfile_rel.ends_with("Dockerfile"), + "Expected Dockerfile suffix in: {dockerfile_rel}" + ); + assert!( + !dockerfile_rel.starts_with('/'), + "Dockerfile path should be relative: {dockerfile_rel}" + ); + } + } + + #[test] + fn messages_components_names_match_build_targets() { + for (name, _image, _df, _target) in builders::MESSAGES_COMPONENTS { + assert!( + name.starts_with("messages-"), + "Component name should start with 'messages-': {name}" + ); + } + } +} diff --git a/sunbeam-sdk/src/secrets/db_engine.rs b/sunbeam-sdk/src/secrets/db_engine.rs new file mode 100644 index 0000000..2aeb9a7 --- /dev/null +++ b/sunbeam-sdk/src/secrets/db_engine.rs @@ -0,0 +1,107 @@ +//! OpenBao database secrets engine configuration. + +use std::collections::HashMap; + +use k8s_openapi::api::core::v1::Pod; +use kube::api::{Api, ListParams}; + +use crate::error::{Result, ResultExt}; +use crate::kube as k; +use crate::openbao::BaoClient; +use crate::output::ok; + +use super::{rand_token, PG_USERS}; + +/// Enable OpenBao database secrets engine and create PostgreSQL static roles. +pub async fn configure_db_engine(bao: &BaoClient) -> Result<()> { + ok("Configuring OpenBao database secrets engine..."); + let pg_rw = "postgres-rw.data.svc.cluster.local:5432"; + + let _ = bao.enable_secrets_engine("database", "database").await; + + // ── vault PG user setup ───────────────────────────────────────────── + let client = k::get_client().await?; + let pods: Api = Api::namespaced(client.clone(), "data"); + let lp = ListParams::default().labels("cnpg.io/cluster=postgres,role=primary"); + let pod_list = pods.list(&lp).await?; + let cnpg_pod = pod_list + .items + .first() + .and_then(|p| p.metadata.name.as_deref()) + .ctx("Could not find CNPG primary pod for vault user setup.")? + .to_string(); + + let existing_vault_pass = bao.kv_get_field("secret", "vault", "pg-password").await?; + let vault_pg_pass = if existing_vault_pass.is_empty() { + let new_pass = rand_token(); + let mut vault_data = HashMap::new(); + vault_data.insert("pg-password".to_string(), new_pass.clone()); + bao.kv_put("secret", "vault", &vault_data).await?; + ok("vault KV entry written."); + new_pass + } else { + ok("vault KV entry already present -- skipping write."); + existing_vault_pass + }; + + let create_vault_sql = concat!( + "DO $$ BEGIN ", + "IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'vault') THEN ", + "CREATE USER vault WITH LOGIN CREATEROLE; ", + "END IF; ", + "END $$;" + ); + + psql_exec(&cnpg_pod, create_vault_sql).await?; + psql_exec( + &cnpg_pod, + &format!("ALTER USER vault WITH PASSWORD '{vault_pg_pass}';"), + ) + .await?; + + for user in PG_USERS { + psql_exec( + &cnpg_pod, + &format!("GRANT {user} TO vault WITH ADMIN OPTION;"), + ) + .await?; + } + ok("vault PG user configured with ADMIN OPTION on all service roles."); + + let conn_url = format!( + "postgresql://{{{{username}}}}:{{{{password}}}}@{pg_rw}/postgres?sslmode=disable" + ); + + bao.write_db_config( + "cnpg-postgres", + "postgresql-database-plugin", + &conn_url, + "vault", + &vault_pg_pass, + "*", + ) + .await?; + ok("DB engine connection configured (vault user)."); + + let rotation_stmt = r#"ALTER USER "{{name}}" WITH PASSWORD '{{password}}';"#; + + for user in PG_USERS { + bao.write_db_static_role(user, "cnpg-postgres", user, 86400, &[rotation_stmt]) + .await?; + ok(&format!(" static-role/{user}")); + } + + ok("Database secrets engine configured."); + Ok(()) +} + +/// Execute a psql command on the CNPG primary pod. +async fn psql_exec(cnpg_pod: &str, sql: &str) -> Result<(i32, String)> { + k::kube_exec( + "data", + cnpg_pod, + &["psql", "-U", "postgres", "-c", sql], + Some("postgres"), + ) + .await +} diff --git a/sunbeam-sdk/src/secrets/mod.rs b/sunbeam-sdk/src/secrets/mod.rs new file mode 100644 index 0000000..faa81c3 --- /dev/null +++ b/sunbeam-sdk/src/secrets/mod.rs @@ -0,0 +1,1106 @@ +//! Secrets management — OpenBao KV seeding, DB engine config, VSO verification. +//! +//! Replaces Python's `kubectl exec openbao-0 -- bao ...` pattern with: +//! 1. kube-rs port-forward to openbao pod on port 8200 +//! 2. `crate::openbao::BaoClient` for all HTTP API calls + +pub mod db_engine; +pub mod seeding; + +use crate::error::{Result, ResultExt, SunbeamError}; +use k8s_openapi::api::core::v1::Pod; +use kube::api::{Api, ApiResource, DynamicObject, ListParams}; +use rand::RngCore; +use rsa::pkcs8::{EncodePrivateKey, EncodePublicKey}; +use rsa::RsaPrivateKey; +use serde::Deserialize; +use std::collections::HashMap; +use tokio::net::TcpListener; + +use crate::kube as k; +use crate::openbao::BaoClient; +use crate::output::{ok, step, warn}; + +// ── Constants ─────────────────────────────────────────────────────────────── + +const ADMIN_USERNAME: &str = "estudio-admin"; +const GITEA_ADMIN_USER: &str = "gitea_admin"; +const PG_USERS: &[&str] = &[ + "kratos", + "hydra", + "gitea", + "hive", + "docs", + "meet", + "drive", + "messages", + "conversations", + "people", + "find", + "calendars", + "projects", +]; + +const SMTP_URI: &str = "smtp://postfix.lasuite.svc.cluster.local:25/?skip_ssl_verify=true"; + +// ── Key generation ────────────────────────────────────────────────────────── + +/// Generate a Fernet-compatible key (32 random bytes, URL-safe base64). +fn gen_fernet_key() -> String { + use base64::Engine; + let mut buf = [0u8; 32]; + rand::thread_rng().fill_bytes(&mut buf); + base64::engine::general_purpose::URL_SAFE.encode(buf) +} + +/// Generate an RSA 2048-bit DKIM key pair. +/// Returns (private_pem_pkcs8, public_pem). Returns ("", "") on failure. +fn gen_dkim_key_pair() -> (String, String) { + let mut rng = rand::thread_rng(); + let bits = 2048; + let private_key = match RsaPrivateKey::new(&mut rng, bits) { + Ok(k) => k, + Err(e) => { + warn(&format!("RSA key generation failed: {e}")); + return (String::new(), String::new()); + } + }; + + let private_pem = match private_key.to_pkcs8_pem(rsa::pkcs8::LineEnding::LF) { + Ok(p) => p.to_string(), + Err(e) => { + warn(&format!("PKCS8 encoding failed: {e}")); + return (String::new(), String::new()); + } + }; + + let public_key = private_key.to_public_key(); + let public_pem = match public_key.to_public_key_pem(rsa::pkcs8::LineEnding::LF) { + Ok(p) => p.to_string(), + Err(e) => { + warn(&format!("Public key PEM encoding failed: {e}")); + return (private_pem, String::new()); + } + }; + + (private_pem, public_pem) +} + +/// Generate a URL-safe random token (32 bytes). +pub(crate) fn rand_token() -> String { + use base64::Engine; + let mut buf = [0u8; 32]; + rand::thread_rng().fill_bytes(&mut buf); + base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(buf) +} + +/// Generate a URL-safe random token with a specific byte count. +fn rand_token_n(n: usize) -> String { + use base64::Engine; + let mut buf = vec![0u8; n]; + rand::thread_rng().fill_bytes(&mut buf); + base64::engine::general_purpose::URL_SAFE_NO_PAD.encode(buf) +} + +// ── Port-forward helper ───────────────────────────────────────────────────── + +/// Port-forward guard — cancels the background forwarder on drop. +struct PortForwardGuard { + _abort_handle: tokio::task::AbortHandle, + pub local_port: u16, +} + +impl Drop for PortForwardGuard { + fn drop(&mut self) { + self._abort_handle.abort(); + } +} + +/// Open a kube-rs port-forward to `pod_name` in `namespace` on `remote_port`. +/// Binds a local TCP listener and proxies connections to the pod. +async fn port_forward( + namespace: &str, + pod_name: &str, + remote_port: u16, +) -> Result { + let client = k::get_client().await?; + let pods: Api = Api::namespaced(client.clone(), namespace); + + let listener = TcpListener::bind("127.0.0.1:0") + .await + .ctx("Failed to bind local TCP listener for port-forward")?; + let local_port = listener + .local_addr() + .map_err(|e| SunbeamError::Other(format!("local_addr: {e}")))? + .port(); + + let pod_name = pod_name.to_string(); + let ns = namespace.to_string(); + let task = tokio::spawn(async move { + let mut current_pod = pod_name; + loop { + let (mut client_stream, _) = match listener.accept().await { + Ok(s) => s, + Err(_) => break, + }; + + let pf_result = pods.portforward(¤t_pod, &[remote_port]).await; + let mut pf = match pf_result { + Ok(pf) => pf, + Err(e) => { + tracing::warn!("Port-forward failed, re-resolving pod: {e}"); + // Re-resolve the pod in case it restarted with a new name + if let Ok(new_client) = k::get_client().await { + let new_pods: Api = Api::namespaced(new_client.clone(), &ns); + let lp = ListParams::default(); + if let Ok(pod_list) = new_pods.list(&lp).await { + if let Some(name) = pod_list + .items + .iter() + .find(|p| { + p.metadata + .name + .as_deref() + .map(|n| n.starts_with(current_pod.split('-').next().unwrap_or(""))) + .unwrap_or(false) + }) + .and_then(|p| p.metadata.name.clone()) + { + current_pod = name; + } + } + } + continue; // next accept() iteration will retry + } + }; + + let mut upstream = match pf.take_stream(remote_port) { + Some(s) => s, + None => continue, + }; + + tokio::spawn(async move { + let _ = tokio::io::copy_bidirectional(&mut client_stream, &mut upstream).await; + }); + } + }); + + let abort_handle = task.abort_handle(); + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + Ok(PortForwardGuard { + _abort_handle: abort_handle, + local_port, + }) +} + +/// Port-forward to a service by finding a matching pod via label selector. +async fn port_forward_svc( + namespace: &str, + label_selector: &str, + remote_port: u16, +) -> Result { + let client = k::get_client().await?; + let pods: Api = Api::namespaced(client.clone(), namespace); + let lp = ListParams::default().labels(label_selector); + let pod_list = pods.list(&lp).await?; + let pod_name = pod_list + .items + .first() + .and_then(|p| p.metadata.name.as_deref()) + .ctx("No pod found matching label selector")? + .to_string(); + + port_forward(namespace, &pod_name, remote_port).await +} + +// ── Kratos admin identity seeding ─────────────────────────────────────────── + +#[derive(Debug, Deserialize)] +struct KratosIdentity { + id: String, +} + +#[derive(Debug, Deserialize)] +struct KratosRecovery { + #[serde(default)] + recovery_link: String, + #[serde(default)] + recovery_code: String, +} + +/// Ensure estudio-admin@ exists in Kratos and is the only admin identity. +async fn seed_kratos_admin_identity(bao: &BaoClient) -> (String, String) { + let domain = match k::get_domain().await { + Ok(d) => d, + Err(e) => { + warn(&format!("Could not determine domain: {e}")); + return (String::new(), String::new()); + } + }; + let admin_email = format!("{ADMIN_USERNAME}@{domain}"); + ok(&format!( + "Ensuring Kratos admin identity ({admin_email})..." + )); + + let result: std::result::Result<(String, String), SunbeamError> = async { + let pf = match port_forward_svc("ory", "app.kubernetes.io/name=kratos-admin", 80).await { + Ok(pf) => pf, + Err(_) => port_forward_svc("ory", "app.kubernetes.io/name=kratos", 4434) + .await + .ctx("Could not port-forward to Kratos admin API")?, + }; + let base = format!("http://127.0.0.1:{}", pf.local_port); + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + + let http = reqwest::Client::new(); + + let resp = http + .get(format!( + "{base}/admin/identities?credentials_identifier={admin_email}&page_size=1" + )) + .header("Accept", "application/json") + .send() + .await?; + + let identities: Vec = resp.json().await.unwrap_or_default(); + let identity_id = if let Some(existing) = identities.first() { + ok(&format!( + " admin identity exists ({}...)", + &existing.id[..8.min(existing.id.len())] + )); + existing.id.clone() + } else { + let resp = http + .post(format!("{base}/admin/identities")) + .header("Content-Type", "application/json") + .header("Accept", "application/json") + .json(&serde_json::json!({ + "schema_id": "employee", + "traits": {"email": admin_email}, + "state": "active", + })) + .send() + .await?; + + let identity: KratosIdentity = + resp.json().await.map_err(|e| SunbeamError::Other(e.to_string()))?; + ok(&format!( + " created admin identity ({}...)", + &identity.id[..8.min(identity.id.len())] + )); + identity.id + }; + + let resp = http + .post(format!("{base}/admin/recovery/code")) + .header("Content-Type", "application/json") + .header("Accept", "application/json") + .json(&serde_json::json!({ + "identity_id": identity_id, + "expires_in": "24h", + })) + .send() + .await?; + + let recovery: KratosRecovery = resp.json().await.unwrap_or(KratosRecovery { + recovery_link: String::new(), + recovery_code: String::new(), + }); + + let mut patch_data = HashMap::new(); + patch_data.insert("admin-identity-ids".to_string(), admin_email.clone()); + let _ = bao.kv_patch("secret", "kratos-admin", &patch_data).await; + ok(&format!(" ADMIN_IDENTITY_IDS set to {admin_email}")); + + Ok((recovery.recovery_link, recovery.recovery_code)) + } + .await; + + match result { + Ok(r) => r, + Err(e) => { + warn(&format!( + "Could not seed Kratos admin identity (Kratos may not be ready): {e}" + )); + (String::new(), String::new()) + } + } +} + +// ── cmd_seed — main entry point ───────────────────────────────────────────── + +/// Seed OpenBao KV with crypto-random credentials, then mirror to K8s Secrets. +/// File-based advisory lock for `cmd_seed` to prevent concurrent runs. +struct SeedLock { + path: std::path::PathBuf, +} + +impl SeedLock { + fn acquire() -> Result { + let lock_path = dirs::data_dir() + .unwrap_or_else(|| dirs::home_dir().unwrap().join(".local/share")) + .join("sunbeam") + .join("seed.lock"); + std::fs::create_dir_all(lock_path.parent().unwrap())?; + + match std::fs::OpenOptions::new() + .write(true) + .create_new(true) + .open(&lock_path) + { + Ok(mut f) => { + use std::io::Write; + write!(f, "{}", std::process::id())?; + Ok(SeedLock { path: lock_path }) + } + Err(e) if e.kind() == std::io::ErrorKind::AlreadyExists => { + // Check if the PID in the file is still alive + if let Ok(pid_str) = std::fs::read_to_string(&lock_path) { + if let Ok(pid) = pid_str.trim().parse::() { + // kill(pid, 0) checks if process exists without sending a signal + let alive = is_pid_alive(pid); + if alive { + return Err(SunbeamError::secrets( + "Another sunbeam seed is already running. Wait for it to finish.", + )); + } + } + } + // Stale lock, remove and retry + std::fs::remove_file(&lock_path)?; + let mut f = std::fs::OpenOptions::new() + .write(true) + .create_new(true) + .open(&lock_path)?; + use std::io::Write; + write!(f, "{}", std::process::id())?; + Ok(SeedLock { path: lock_path }) + } + Err(e) => Err(e.into()), + } + } +} + +impl Drop for SeedLock { + fn drop(&mut self) { + let _ = std::fs::remove_file(&self.path); + } +} + +/// Check if a process with the given PID is still alive. +fn is_pid_alive(pid: i32) -> bool { + std::process::Command::new("kill") + .args(["-0", &pid.to_string()]) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) + .status() + .map(|s| s.success()) + .unwrap_or(false) +} + +pub async fn cmd_seed() -> Result<()> { + let _lock = SeedLock::acquire()?; + step("Seeding secrets..."); + + let seed_result = seeding::seed_openbao().await?; + let (creds, ob_pod, root_token) = match seed_result { + Some(r) => (r.creds, r.ob_pod, r.root_token), + None => (HashMap::new(), String::new(), String::new()), + }; + + let s3_access_key = creds.get("s3-access-key").cloned().unwrap_or_default(); + let s3_secret_key = creds.get("s3-secret-key").cloned().unwrap_or_default(); + let hydra_system = creds + .get("hydra-system-secret") + .cloned() + .unwrap_or_default(); + let hydra_cookie = creds + .get("hydra-cookie-secret") + .cloned() + .unwrap_or_default(); + let hydra_pairwise = creds + .get("hydra-pairwise-salt") + .cloned() + .unwrap_or_default(); + let kratos_secrets_default = creds + .get("kratos-secrets-default") + .cloned() + .unwrap_or_default(); + let kratos_secrets_cookie = creds + .get("kratos-secrets-cookie") + .cloned() + .unwrap_or_default(); + let hive_oidc_id = creds + .get("hive-oidc-client-id") + .cloned() + .unwrap_or_else(|| "hive-local".into()); + let hive_oidc_sec = creds + .get("hive-oidc-client-secret") + .cloned() + .unwrap_or_default(); + let django_secret = creds + .get("people-django-secret") + .cloned() + .unwrap_or_default(); + let gitea_admin_pass = creds + .get("gitea-admin-password") + .cloned() + .unwrap_or_default(); + + // ── Wait for Postgres ─────────────────────────────────────────────── + ok("Waiting for postgres cluster..."); + let mut pg_pod = String::new(); + + let client = k::get_client().await?; + let ar = ApiResource { + group: "postgresql.cnpg.io".into(), + version: "v1".into(), + api_version: "postgresql.cnpg.io/v1".into(), + kind: "Cluster".into(), + plural: "clusters".into(), + }; + let cnpg_api: Api = Api::namespaced_with(client.clone(), "data", &ar); + for _ in 0..60 { + if let Ok(cluster) = cnpg_api.get("postgres").await { + let phase = cluster + .data + .get("status") + .and_then(|s| s.get("phase")) + .and_then(|p| p.as_str()) + .unwrap_or(""); + if phase == "Cluster in healthy state" { + // Cluster is healthy — find the primary pod name + let pods: Api = Api::namespaced(client.clone(), "data"); + let lp = ListParams::default().labels("cnpg.io/cluster=postgres,role=primary"); + if let Ok(pod_list) = pods.list(&lp).await { + if let Some(name) = pod_list + .items + .first() + .and_then(|p| p.metadata.name.as_deref()) + { + pg_pod = name.to_string(); + ok(&format!("Postgres ready ({pg_pod}).")); + break; + } + } + } + } + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + } + + if pg_pod.is_empty() { + warn("Postgres not ready after 5 min -- continuing anyway."); + } + + if !pg_pod.is_empty() { + ok("Ensuring postgres roles and databases exist..."); + let db_map: HashMap<&str, &str> = [ + ("kratos", "kratos_db"), + ("hydra", "hydra_db"), + ("gitea", "gitea_db"), + ("hive", "hive_db"), + ("docs", "docs_db"), + ("meet", "meet_db"), + ("drive", "drive_db"), + ("messages", "messages_db"), + ("conversations", "conversations_db"), + ("people", "people_db"), + ("find", "find_db"), + ("calendars", "calendars_db"), + ("projects", "projects_db"), + ] + .into_iter() + .collect(); + + for user in PG_USERS { + let ensure_sql = format!( + "DO $$ BEGIN IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname='{user}') \ + THEN EXECUTE 'CREATE USER {user}'; END IF; END $$;" + ); + let _ = k::kube_exec( + "data", + &pg_pod, + &["psql", "-U", "postgres", "-c", &ensure_sql], + Some("postgres"), + ) + .await; + + let db = db_map.get(user).copied().unwrap_or("unknown_db"); + let create_db_sql = format!("CREATE DATABASE {db} OWNER {user};"); + let _ = k::kube_exec( + "data", + &pg_pod, + &["psql", "-U", "postgres", "-c", &create_db_sql], + Some("postgres"), + ) + .await; + } + + // Configure database secrets engine via port-forward + if !ob_pod.is_empty() && !root_token.is_empty() { + match port_forward("data", &ob_pod, 8200).await { + Ok(pf) => { + let bao_url = format!("http://127.0.0.1:{}", pf.local_port); + let bao = BaoClient::with_token(&bao_url, &root_token); + if let Err(e) = db_engine::configure_db_engine(&bao).await { + warn(&format!("DB engine config failed: {e}")); + } + } + Err(e) => warn(&format!("Port-forward to OpenBao failed: {e}")), + } + } else { + warn("Skipping DB engine config -- missing ob_pod or root_token."); + } + } + + // ── Create K8s secrets ────────────────────────────────────────────── + ok("Creating K8s secrets (VSO will overwrite on next sync)..."); + + k::ensure_ns("ory").await?; + k::create_secret( + "ory", + "hydra", + HashMap::from([ + ("secretsSystem".into(), hydra_system), + ("secretsCookie".into(), hydra_cookie), + ("pairwise-salt".into(), hydra_pairwise), + ]), + ) + .await?; + k::create_secret( + "ory", + "kratos-app-secrets", + HashMap::from([ + ("secretsDefault".into(), kratos_secrets_default), + ("secretsCookie".into(), kratos_secrets_cookie), + ]), + ) + .await?; + + k::ensure_ns("devtools").await?; + k::create_secret( + "devtools", + "gitea-s3-credentials", + HashMap::from([ + ("access-key".into(), s3_access_key.clone()), + ("secret-key".into(), s3_secret_key.clone()), + ]), + ) + .await?; + k::create_secret( + "devtools", + "gitea-admin-credentials", + HashMap::from([ + ("username".into(), GITEA_ADMIN_USER.into()), + ("password".into(), gitea_admin_pass.clone()), + ]), + ) + .await?; + + // Sync Gitea admin password to Gitea's own DB + if !gitea_admin_pass.is_empty() { + let gitea_pods: Api = Api::namespaced(client.clone(), "devtools"); + let lp = ListParams::default().labels("app.kubernetes.io/name=gitea"); + if let Ok(pod_list) = gitea_pods.list(&lp).await { + if let Some(gitea_pod) = pod_list + .items + .first() + .and_then(|p| p.metadata.name.as_deref()) + { + match k::kube_exec( + "devtools", + gitea_pod, + &[ + "gitea", + "admin", + "user", + "change-password", + "--username", + GITEA_ADMIN_USER, + "--password", + &gitea_admin_pass, + "--must-change-password=false", + ], + Some("gitea"), + ) + .await + { + Ok((0, _)) => ok("Gitea admin password synced to Gitea DB."), + Ok((_, stderr)) => { + warn(&format!("Could not sync Gitea admin password: {stderr}")) + } + Err(e) => warn(&format!("Could not sync Gitea admin password: {e}")), + } + } else { + warn("Gitea pod not found -- admin password NOT synced to Gitea DB. Run seed again after Gitea is deployed."); + } + } + } + + k::ensure_ns("storage").await?; + let s3_json = format!( + r#"{{"identities":[{{"name":"seaweed","credentials":[{{"accessKey":"{}","secretKey":"{}"}}],"actions":["Admin","Read","Write","List","Tagging"]}}]}}"#, + s3_access_key, s3_secret_key + ); + k::create_secret( + "storage", + "seaweedfs-s3-credentials", + HashMap::from([ + ("S3_ACCESS_KEY".into(), s3_access_key.clone()), + ("S3_SECRET_KEY".into(), s3_secret_key.clone()), + ]), + ) + .await?; + k::create_secret( + "storage", + "seaweedfs-s3-json", + HashMap::from([("s3.json".into(), s3_json)]), + ) + .await?; + + k::ensure_ns("lasuite").await?; + k::create_secret( + "lasuite", + "seaweedfs-s3-credentials", + HashMap::from([ + ("S3_ACCESS_KEY".into(), s3_access_key), + ("S3_SECRET_KEY".into(), s3_secret_key), + ]), + ) + .await?; + k::create_secret( + "lasuite", + "hive-oidc", + HashMap::from([ + ("client-id".into(), hive_oidc_id), + ("client-secret".into(), hive_oidc_sec), + ]), + ) + .await?; + k::create_secret( + "lasuite", + "people-django-secret", + HashMap::from([("DJANGO_SECRET_KEY".into(), django_secret)]), + ) + .await?; + + k::ensure_ns("matrix").await?; + k::ensure_ns("media").await?; + k::ensure_ns("monitoring").await?; + + // ── Kratos admin identity ─────────────────────────────────────────── + if !ob_pod.is_empty() && !root_token.is_empty() { + if let Ok(pf) = port_forward("data", &ob_pod, 8200).await { + let bao_url = format!("http://127.0.0.1:{}", pf.local_port); + let bao = BaoClient::with_token(&bao_url, &root_token); + let (recovery_link, recovery_code) = seed_kratos_admin_identity(&bao).await; + if !recovery_link.is_empty() { + ok("Admin recovery link (valid 24h):"); + println!(" {recovery_link}"); + } + if !recovery_code.is_empty() { + ok("Admin recovery code (enter on the page above):"); + println!(" {recovery_code}"); + } + } + } + + let dkim_pub = creds + .get("messages-dkim-public-key") + .cloned() + .unwrap_or_default(); + if !dkim_pub.is_empty() { + let b64_key: String = dkim_pub + .replace("-----BEGIN PUBLIC KEY-----", "") + .replace("-----END PUBLIC KEY-----", "") + .replace("-----BEGIN RSA PUBLIC KEY-----", "") + .replace("-----END RSA PUBLIC KEY-----", "") + .split_whitespace() + .collect(); + + if let Ok(domain) = k::get_domain().await { + ok("DKIM DNS record (add to DNS at your registrar):"); + println!( + " default._domainkey.{domain} TXT \"v=DKIM1; k=rsa; p={b64_key}\"" + ); + } + } + + ok("All secrets seeded."); + Ok(()) +} + +// ── cmd_verify — VSO E2E verification ─────────────────────────────────────── + +/// End-to-end test of VSO -> OpenBao integration. +pub async fn cmd_verify() -> Result<()> { + step("Verifying VSO -> OpenBao integration (E2E)..."); + + let client = k::get_client().await?; + let pods: Api = Api::namespaced(client.clone(), "data"); + let lp = ListParams::default().labels("app.kubernetes.io/name=openbao,component=server"); + let pod_list = pods.list(&lp).await?; + + let ob_pod = pod_list + .items + .first() + .and_then(|p| p.metadata.name.as_deref()) + .ctx("OpenBao pod not found -- run full bring-up first.")? + .to_string(); + + let root_token = k::kube_get_secret_field("data", "openbao-keys", "root-token") + .await + .ctx("Could not read openbao-keys secret.")?; + + let pf = port_forward("data", &ob_pod, 8200).await?; + let bao_url = format!("http://127.0.0.1:{}", pf.local_port); + let bao = BaoClient::with_token(&bao_url, &root_token); + + let test_value = rand_token_n(16); + let test_ns = "ory"; + let test_name = "vso-verify"; + + let result: std::result::Result<(), SunbeamError> = async { + ok("Writing test sentinel to OpenBao secret/vso-test ..."); + let mut data = HashMap::new(); + data.insert("test-key".to_string(), test_value.clone()); + bao.kv_put("secret", "vso-test", &data).await?; + + ok(&format!("Creating VaultAuth {test_ns}/{test_name} ...")); + k::kube_apply(&format!( + r#" +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultAuth +metadata: + name: {test_name} + namespace: {test_ns} +spec: + method: kubernetes + mount: kubernetes + kubernetes: + role: vso + serviceAccount: default +"# + )) + .await?; + + ok(&format!( + "Creating VaultStaticSecret {test_ns}/{test_name} ..." + )); + k::kube_apply(&format!( + r#" +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultStaticSecret +metadata: + name: {test_name} + namespace: {test_ns} +spec: + vaultAuthRef: {test_name} + mount: secret + type: kv-v2 + path: vso-test + refreshAfter: 10s + destination: + name: {test_name} + create: true + overwrite: true +"# + )) + .await?; + + ok("Waiting for VSO to sync (up to 60s) ..."); + let deadline = tokio::time::Instant::now() + std::time::Duration::from_secs(60); + let mut synced = false; + + while tokio::time::Instant::now() < deadline { + let (code, mac) = kubectl_jsonpath( + test_ns, + "vaultstaticsecret", + test_name, + "{.status.secretMAC}", + ) + .await; + if code == 0 && !mac.is_empty() && mac != "" { + synced = true; + break; + } + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + } + + if !synced { + let (_, msg) = kubectl_jsonpath( + test_ns, + "vaultstaticsecret", + test_name, + "{.status.conditions[0].message}", + ) + .await; + return Err(SunbeamError::secrets(format!( + "VSO did not sync within 60s. Last status: {}", + if msg.is_empty() { + "unknown".to_string() + } else { + msg + } + ))); + } + + ok("Verifying K8s Secret contents ..."); + let secret = k::kube_get_secret(test_ns, test_name) + .await? + .with_ctx(|| format!("K8s Secret {test_ns}/{test_name} not found."))?; + + let data = secret.data.as_ref().ctx("Secret has no data")?; + let raw = data + .get("test-key") + .ctx("Missing key 'test-key' in secret")?; + let actual = String::from_utf8(raw.0.clone()) + .map_err(|e| SunbeamError::Other(format!("UTF-8 error: {e}")))?; + + if actual != test_value { + return Err(SunbeamError::secrets(format!( + "Value mismatch!\n expected: {:?}\n got: {:?}", + test_value, + actual + ))); + } + + ok("Sentinel value matches -- VSO -> OpenBao integration is working."); + Ok(()) + } + .await; + + // Always clean up + ok("Cleaning up test resources..."); + let _ = delete_crd(test_ns, "vaultstaticsecret", test_name).await; + let _ = delete_crd(test_ns, "vaultauth", test_name).await; + let _ = delete_k8s_secret(test_ns, test_name).await; + let _ = bao.kv_delete("secret", "vso-test").await; + + match result { + Ok(()) => { + ok("VSO E2E verification passed."); + Ok(()) + } + Err(e) => Err(SunbeamError::secrets(format!( + "VSO verification FAILED: {e}" + ))), + } +} + +// ── Utility helpers ───────────────────────────────────────────────────────── + +async fn wait_pod_running(ns: &str, pod_name: &str, timeout_secs: u64) -> bool { + let client = match k::get_client().await { + Ok(c) => c, + Err(_) => return false, + }; + let pods: Api = Api::namespaced(client.clone(), ns); + + let deadline = tokio::time::Instant::now() + std::time::Duration::from_secs(timeout_secs); + while tokio::time::Instant::now() < deadline { + if let Ok(Some(pod)) = pods.get_opt(pod_name).await { + if pod + .status + .as_ref() + .and_then(|s| s.phase.as_deref()) + .unwrap_or("") + == "Running" + { + return true; + } + } + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + } + false +} + +fn scw_config(key: &str) -> String { + std::process::Command::new("scw") + .args(["config", "get", key]) + .output() + .ok() + .filter(|o| o.status.success()) + .map(|o| String::from_utf8_lossy(&o.stdout).trim().to_string()) + .unwrap_or_default() +} + +async fn delete_crd(ns: &str, kind: &str, name: &str) -> Result<()> { + let ctx = format!("--context={}", k::context()); + let _ = tokio::process::Command::new("kubectl") + .args([&ctx, "-n", ns, "delete", kind, name, "--ignore-not-found"]) + .output() + .await; + Ok(()) +} + +async fn delete_k8s_secret(ns: &str, name: &str) -> Result<()> { + let client = k::get_client().await?; + let api: Api = Api::namespaced(client.clone(), ns); + let _ = api + .delete(name, &kube::api::DeleteParams::default()) + .await; + Ok(()) +} + +async fn delete_resource(ns: &str, kind: &str, name: &str) -> Result<()> { + let ctx = format!("--context={}", k::context()); + let _ = tokio::process::Command::new("kubectl") + .args([&ctx, "-n", ns, "delete", kind, name, "--ignore-not-found"]) + .output() + .await; + Ok(()) +} + +async fn kubectl_jsonpath(ns: &str, kind: &str, name: &str, jsonpath: &str) -> (i32, String) { + let ctx = format!("--context={}", k::context()); + let jp = format!("-o=jsonpath={jsonpath}"); + match tokio::process::Command::new("kubectl") + .args([&ctx, "-n", ns, "get", kind, name, &jp, "--ignore-not-found"]) + .output() + .await + { + Ok(output) => { + let code = output.status.code().unwrap_or(1); + let stdout = String::from_utf8_lossy(&output.stdout).trim().to_string(); + (code, stdout) + } + Err(_) => (1, String::new()), + } +} + +// ── Tests ─────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_gen_fernet_key_length() { + use base64::Engine; + let key = gen_fernet_key(); + assert_eq!(key.len(), 44); + let decoded = base64::engine::general_purpose::URL_SAFE + .decode(&key) + .expect("should be valid URL-safe base64"); + assert_eq!(decoded.len(), 32); + } + + #[test] + fn test_gen_fernet_key_unique() { + let k1 = gen_fernet_key(); + let k2 = gen_fernet_key(); + assert_ne!(k1, k2, "Two generated Fernet keys should differ"); + } + + #[test] + fn test_gen_dkim_key_pair_produces_pem() { + let (private_pem, public_pem) = gen_dkim_key_pair(); + assert!( + private_pem.contains("BEGIN PRIVATE KEY"), + "Private key should be PKCS8 PEM" + ); + assert!( + public_pem.contains("BEGIN PUBLIC KEY"), + "Public key should be SPKI PEM (not PKCS#1)" + ); + assert!( + !public_pem.contains("BEGIN RSA PUBLIC KEY"), + "Public key should NOT be PKCS#1 format" + ); + assert!(!private_pem.is_empty()); + assert!(!public_pem.is_empty()); + } + + #[test] + fn test_rand_token_nonempty_and_unique() { + let t1 = rand_token(); + let t2 = rand_token(); + assert!(!t1.is_empty()); + assert_ne!(t1, t2); + } + + #[test] + fn test_rand_token_n_length() { + use base64::Engine; + let t = rand_token_n(50); + let decoded = base64::engine::general_purpose::URL_SAFE_NO_PAD + .decode(&t) + .expect("should be valid URL-safe base64"); + assert_eq!(decoded.len(), 50); + } + + #[test] + fn test_constants() { + assert_eq!(ADMIN_USERNAME, "estudio-admin"); + assert_eq!(GITEA_ADMIN_USER, "gitea_admin"); + assert_eq!(PG_USERS.len(), 13); + assert!(PG_USERS.contains(&"kratos")); + assert!(PG_USERS.contains(&"projects")); + } + + #[test] + fn test_scw_config_returns_empty_on_missing_binary() { + let result = scw_config("nonexistent-key"); + let _ = result; + } + + #[test] + fn test_seed_result_structure() { + let mut creds = HashMap::new(); + creds.insert( + "hydra-system-secret".to_string(), + "existingvalue".to_string(), + ); + let result = seeding::SeedResult { + creds, + ob_pod: "openbao-0".to_string(), + root_token: "token123".to_string(), + }; + assert!(result.creds.contains_key("hydra-system-secret")); + assert_eq!(result.creds["hydra-system-secret"], "existingvalue"); + assert_eq!(result.ob_pod, "openbao-0"); + } + + #[test] + fn test_dkim_public_key_extraction() { + let pem = "-----BEGIN PUBLIC KEY-----\nMIIBCgKCAQ...\nbase64data\n-----END PUBLIC KEY-----"; + let b64_key: String = pem + .replace("-----BEGIN PUBLIC KEY-----", "") + .replace("-----END PUBLIC KEY-----", "") + .replace("-----BEGIN RSA PUBLIC KEY-----", "") + .replace("-----END RSA PUBLIC KEY-----", "") + .split_whitespace() + .collect(); + assert_eq!(b64_key, "MIIBCgKCAQ...base64data"); + } + + #[test] + fn test_smtp_uri() { + assert_eq!( + SMTP_URI, + "smtp://postfix.lasuite.svc.cluster.local:25/?skip_ssl_verify=true" + ); + } + + #[test] + fn test_pg_users_match_python() { + let expected = vec![ + "kratos", + "hydra", + "gitea", + "hive", + "docs", + "meet", + "drive", + "messages", + "conversations", + "people", + "find", + "calendars", + "projects", + ]; + assert_eq!(PG_USERS, &expected[..]); + } +} diff --git a/sunbeam-sdk/src/secrets/seeding.rs b/sunbeam-sdk/src/secrets/seeding.rs new file mode 100644 index 0000000..0f80a00 --- /dev/null +++ b/sunbeam-sdk/src/secrets/seeding.rs @@ -0,0 +1,542 @@ +//! OpenBao KV seeding — init/unseal, idempotent credential generation, VSO auth. + +use std::collections::{HashMap, HashSet}; + +use k8s_openapi::api::core::v1::Pod; +use kube::api::{Api, ListParams}; + +use crate::error::Result; +use crate::kube as k; +use crate::openbao::BaoClient; +use crate::output::{ok, warn}; + +use super::{ + gen_dkim_key_pair, gen_fernet_key, port_forward, rand_token, rand_token_n, scw_config, + wait_pod_running, delete_resource, GITEA_ADMIN_USER, SMTP_URI, +}; + +/// Internal result from seed_openbao, used by cmd_seed. +pub struct SeedResult { + pub creds: HashMap, + pub ob_pod: String, + pub root_token: String, +} + +/// Read-or-create pattern: reads existing KV values, only generates missing ones. +pub async fn get_or_create( + bao: &BaoClient, + path: &str, + fields: &[(&str, &dyn Fn() -> String)], + dirty_paths: &mut HashSet, +) -> Result> { + let existing = bao.kv_get("secret", path).await?.unwrap_or_default(); + let mut result = HashMap::new(); + for (key, default_fn) in fields { + let val = existing.get(*key).filter(|v| !v.is_empty()).cloned(); + if let Some(v) = val { + result.insert(key.to_string(), v); + } else { + result.insert(key.to_string(), default_fn()); + dirty_paths.insert(path.to_string()); + } + } + Ok(result) +} + +/// Initialize/unseal OpenBao, generate/read credentials idempotently, configure VSO auth. +pub async fn seed_openbao() -> Result> { + let client = k::get_client().await?; + let pods: Api = Api::namespaced(client.clone(), "data"); + let lp = ListParams::default().labels("app.kubernetes.io/name=openbao,component=server"); + let pod_list = pods.list(&lp).await?; + + let ob_pod = match pod_list + .items + .first() + .and_then(|p| p.metadata.name.as_deref()) + { + Some(name) => name.to_string(), + None => { + ok("OpenBao pod not found -- skipping."); + return Ok(None); + } + }; + + ok(&format!("OpenBao ({ob_pod})...")); + let _ = wait_pod_running("data", &ob_pod, 120).await; + + let pf = port_forward("data", &ob_pod, 8200).await?; + let bao_url = format!("http://127.0.0.1:{}", pf.local_port); + let bao = BaoClient::new(&bao_url); + + // ── Init / Unseal ─────────────────────────────────────────────────── + let mut unseal_key = String::new(); + let mut root_token = String::new(); + + let status = bao.seal_status().await.unwrap_or_else(|_| { + crate::openbao::SealStatusResponse { + initialized: false, + sealed: true, + progress: 0, + t: 0, + n: 0, + } + }); + + let mut already_initialized = status.initialized; + if !already_initialized { + if let Ok(Some(_)) = k::kube_get_secret("data", "openbao-keys").await { + already_initialized = true; + } + } + + if !already_initialized { + ok("Initializing OpenBao..."); + match bao.init(1, 1).await { + Ok(init) => { + unseal_key = init.unseal_keys_b64[0].clone(); + root_token = init.root_token.clone(); + let mut data = HashMap::new(); + data.insert("key".to_string(), unseal_key.clone()); + data.insert("root-token".to_string(), root_token.clone()); + k::create_secret("data", "openbao-keys", data).await?; + ok("Initialized -- keys stored in secret/openbao-keys."); + } + Err(e) => { + warn(&format!( + "Init failed -- resetting OpenBao storage for local dev... ({e})" + )); + let _ = delete_resource("data", "pvc", "data-openbao-0").await; + let _ = delete_resource("data", "pod", &ob_pod).await; + warn("OpenBao storage reset. Run --seed again after the pod restarts."); + return Ok(None); + } + } + } else { + ok("Already initialized."); + if let Ok(key) = k::kube_get_secret_field("data", "openbao-keys", "key").await { + unseal_key = key; + } + if let Ok(token) = k::kube_get_secret_field("data", "openbao-keys", "root-token").await { + root_token = token; + } + } + + // Unseal if needed + let status = bao.seal_status().await.unwrap_or_else(|_| { + crate::openbao::SealStatusResponse { + initialized: true, + sealed: true, + progress: 0, + t: 0, + n: 0, + } + }); + if status.sealed && !unseal_key.is_empty() { + ok("Unsealing..."); + bao.unseal(&unseal_key).await?; + } + + if root_token.is_empty() { + warn("No root token available -- skipping KV seeding."); + return Ok(None); + } + + let bao = BaoClient::with_token(&bao_url, &root_token); + + // ── KV seeding ────────────────────────────────────────────────────── + ok("Seeding KV (idempotent -- existing values preserved)..."); + let _ = bao.enable_secrets_engine("secret", "kv").await; + let _ = bao + .write( + "sys/mounts/secret/tune", + &serde_json::json!({"options": {"version": "2"}}), + ) + .await; + + let mut dirty_paths: HashSet = HashSet::new(); + + let hydra = get_or_create( + &bao, + "hydra", + &[ + ("system-secret", &rand_token as &dyn Fn() -> String), + ("cookie-secret", &rand_token), + ("pairwise-salt", &rand_token), + ], + &mut dirty_paths, + ) + .await?; + + let smtp_uri_fn = || SMTP_URI.to_string(); + let kratos = get_or_create( + &bao, + "kratos", + &[ + ("secrets-default", &rand_token as &dyn Fn() -> String), + ("secrets-cookie", &rand_token), + ("smtp-connection-uri", &smtp_uri_fn), + ], + &mut dirty_paths, + ) + .await?; + + let seaweedfs = get_or_create( + &bao, + "seaweedfs", + &[ + ("access-key", &rand_token as &dyn Fn() -> String), + ("secret-key", &rand_token), + ], + &mut dirty_paths, + ) + .await?; + + let gitea_admin_user_fn = || GITEA_ADMIN_USER.to_string(); + let gitea = get_or_create( + &bao, + "gitea", + &[ + ( + "admin-username", + &gitea_admin_user_fn as &dyn Fn() -> String, + ), + ("admin-password", &rand_token), + ], + &mut dirty_paths, + ) + .await?; + + let hive_local_fn = || "hive-local".to_string(); + let hive = get_or_create( + &bao, + "hive", + &[ + ("oidc-client-id", &hive_local_fn as &dyn Fn() -> String), + ("oidc-client-secret", &rand_token), + ], + &mut dirty_paths, + ) + .await?; + + let devkey_fn = || "devkey".to_string(); + let livekit = get_or_create( + &bao, + "livekit", + &[ + ("api-key", &devkey_fn as &dyn Fn() -> String), + ("api-secret", &rand_token), + ], + &mut dirty_paths, + ) + .await?; + + let people = get_or_create( + &bao, + "people", + &[("django-secret-key", &rand_token as &dyn Fn() -> String)], + &mut dirty_paths, + ) + .await?; + + let login_ui = get_or_create( + &bao, + "login-ui", + &[ + ("cookie-secret", &rand_token as &dyn Fn() -> String), + ("csrf-cookie-secret", &rand_token), + ], + &mut dirty_paths, + ) + .await?; + + let sw_access = seaweedfs.get("access-key").cloned().unwrap_or_default(); + let sw_secret = seaweedfs.get("secret-key").cloned().unwrap_or_default(); + let empty_fn = || String::new(); + let sw_access_fn = { + let v = sw_access.clone(); + move || v.clone() + }; + let sw_secret_fn = { + let v = sw_secret.clone(); + move || v.clone() + }; + + let kratos_admin = get_or_create( + &bao, + "kratos-admin", + &[ + ("cookie-secret", &rand_token as &dyn Fn() -> String), + ("csrf-cookie-secret", &rand_token), + ("admin-identity-ids", &empty_fn), + ("s3-access-key", &sw_access_fn), + ("s3-secret-key", &sw_secret_fn), + ], + &mut dirty_paths, + ) + .await?; + + let docs = get_or_create( + &bao, + "docs", + &[ + ("django-secret-key", &rand_token as &dyn Fn() -> String), + ("collaboration-secret", &rand_token), + ], + &mut dirty_paths, + ) + .await?; + + let meet = get_or_create( + &bao, + "meet", + &[ + ("django-secret-key", &rand_token as &dyn Fn() -> String), + ("application-jwt-secret-key", &rand_token), + ], + &mut dirty_paths, + ) + .await?; + + let drive = get_or_create( + &bao, + "drive", + &[("django-secret-key", &rand_token as &dyn Fn() -> String)], + &mut dirty_paths, + ) + .await?; + + let projects = get_or_create( + &bao, + "projects", + &[("secret-key", &rand_token as &dyn Fn() -> String)], + &mut dirty_paths, + ) + .await?; + + let cal_django_fn = || rand_token_n(50); + let calendars = get_or_create( + &bao, + "calendars", + &[ + ("django-secret-key", &cal_django_fn as &dyn Fn() -> String), + ("salt-key", &rand_token), + ("caldav-inbound-api-key", &rand_token), + ("caldav-outbound-api-key", &rand_token), + ("caldav-internal-api-key", &rand_token), + ], + &mut dirty_paths, + ) + .await?; + + // DKIM key pair — generated together since keys are coupled. + let existing_messages = bao.kv_get("secret", "messages").await?.unwrap_or_default(); + let (dkim_private, dkim_public) = if existing_messages + .get("dkim-private-key") + .filter(|v| !v.is_empty()) + .is_some() + { + ( + existing_messages + .get("dkim-private-key") + .cloned() + .unwrap_or_default(), + existing_messages + .get("dkim-public-key") + .cloned() + .unwrap_or_default(), + ) + } else { + gen_dkim_key_pair() + }; + + let dkim_priv_fn = { + let v = dkim_private.clone(); + move || v.clone() + }; + let dkim_pub_fn = { + let v = dkim_public.clone(); + move || v.clone() + }; + let socks_proxy_fn = || format!("sunbeam:{}", rand_token()); + let sunbeam_fn = || "sunbeam".to_string(); + + let messages = get_or_create( + &bao, + "messages", + &[ + ("django-secret-key", &rand_token as &dyn Fn() -> String), + ("salt-key", &rand_token), + ("mda-api-secret", &rand_token), + ( + "oidc-refresh-token-key", + &gen_fernet_key as &dyn Fn() -> String, + ), + ("dkim-private-key", &dkim_priv_fn), + ("dkim-public-key", &dkim_pub_fn), + ("rspamd-password", &rand_token), + ("socks-proxy-users", &socks_proxy_fn), + ("mta-out-smtp-username", &sunbeam_fn), + ("mta-out-smtp-password", &rand_token), + ], + &mut dirty_paths, + ) + .await?; + + let admin_fn = || "admin".to_string(); + let collabora = get_or_create( + &bao, + "collabora", + &[ + ("username", &admin_fn as &dyn Fn() -> String), + ("password", &rand_token), + ], + &mut dirty_paths, + ) + .await?; + + let tuwunel = get_or_create( + &bao, + "tuwunel", + &[ + ("oidc-client-id", &empty_fn as &dyn Fn() -> String), + ("oidc-client-secret", &empty_fn), + ("turn-secret", &empty_fn), + ("registration-token", &rand_token), + ], + &mut dirty_paths, + ) + .await?; + + let grafana = get_or_create( + &bao, + "grafana", + &[("admin-password", &rand_token as &dyn Fn() -> String)], + &mut dirty_paths, + ) + .await?; + + let scw_access_fn = || scw_config("access-key"); + let scw_secret_fn = || scw_config("secret-key"); + let scaleway_s3 = get_or_create( + &bao, + "scaleway-s3", + &[ + ("access-key-id", &scw_access_fn as &dyn Fn() -> String), + ("secret-access-key", &scw_secret_fn), + ], + &mut dirty_paths, + ) + .await?; + + // ── Write dirty paths ─────────────────────────────────────────────── + if dirty_paths.is_empty() { + ok("All OpenBao KV secrets already present -- skipping writes."); + } else { + let mut sorted_paths: Vec<&String> = dirty_paths.iter().collect(); + sorted_paths.sort(); + ok(&format!( + "Writing new secrets to OpenBao KV ({})...", + sorted_paths + .iter() + .map(|s| s.as_str()) + .collect::>() + .join(", ") + )); + + let all_paths: &[(&str, &HashMap)] = &[ + ("hydra", &hydra), + ("kratos", &kratos), + ("seaweedfs", &seaweedfs), + ("gitea", &gitea), + ("hive", &hive), + ("livekit", &livekit), + ("people", &people), + ("login-ui", &login_ui), + ("kratos-admin", &kratos_admin), + ("docs", &docs), + ("meet", &meet), + ("drive", &drive), + ("projects", &projects), + ("calendars", &calendars), + ("messages", &messages), + ("collabora", &collabora), + ("tuwunel", &tuwunel), + ("grafana", &grafana), + ("scaleway-s3", &scaleway_s3), + ]; + + for (path, data) in all_paths { + if dirty_paths.contains(*path) { + bao.kv_patch("secret", path, data).await?; + } + } + } + + // ── Kubernetes auth for VSO ───────────────────────────────────────── + ok("Configuring Kubernetes auth for VSO..."); + let _ = bao.auth_enable("kubernetes", "kubernetes").await; + + bao.write( + "auth/kubernetes/config", + &serde_json::json!({ + "kubernetes_host": "https://kubernetes.default.svc.cluster.local" + }), + ) + .await?; + + let policy_hcl = concat!( + "path \"secret/data/*\" { capabilities = [\"read\"] }\n", + "path \"secret/metadata/*\" { capabilities = [\"read\", \"list\"] }\n", + "path \"database/static-creds/*\" { capabilities = [\"read\"] }\n", + ); + bao.write_policy("vso-reader", policy_hcl).await?; + + bao.write( + "auth/kubernetes/role/vso", + &serde_json::json!({ + "bound_service_account_names": "default", + "bound_service_account_namespaces": "ory,devtools,storage,lasuite,matrix,media,data,monitoring", + "policies": "vso-reader", + "ttl": "1h" + }), + ) + .await?; + + // Build credentials map + let mut creds = HashMap::new(); + let field_map: &[(&str, &str, &HashMap)] = &[ + ("hydra-system-secret", "system-secret", &hydra), + ("hydra-cookie-secret", "cookie-secret", &hydra), + ("hydra-pairwise-salt", "pairwise-salt", &hydra), + ("kratos-secrets-default", "secrets-default", &kratos), + ("kratos-secrets-cookie", "secrets-cookie", &kratos), + ("s3-access-key", "access-key", &seaweedfs), + ("s3-secret-key", "secret-key", &seaweedfs), + ("gitea-admin-password", "admin-password", &gitea), + ("hive-oidc-client-id", "oidc-client-id", &hive), + ("hive-oidc-client-secret", "oidc-client-secret", &hive), + ("people-django-secret", "django-secret-key", &people), + ("livekit-api-key", "api-key", &livekit), + ("livekit-api-secret", "api-secret", &livekit), + ( + "kratos-admin-cookie-secret", + "cookie-secret", + &kratos_admin, + ), + ("messages-dkim-public-key", "dkim-public-key", &messages), + ]; + + for (cred_key, field_key, source) in field_map { + creds.insert( + cred_key.to_string(), + source.get(*field_key).cloned().unwrap_or_default(), + ); + } + + Ok(Some(SeedResult { + creds, + ob_pod, + root_token, + })) +}