refactor: SDK images and secrets modules with submodule splits

Split images.rs (1809L) into mod.rs + builders.rs (per-service build
functions). Split secrets.rs (1727L) into mod.rs + seeding.rs (KV
get_or_create, seed_openbao) + db_engine.rs (PostgreSQL static roles).
Moves BuildTarget enum from cli.rs into images/mod.rs with conditional
clap::ValueEnum derive behind the "cli" feature.
This commit is contained in:
2026-03-21 14:37:47 +00:00
parent 8e51e0b3ae
commit bc65b9157d
5 changed files with 3631 additions and 0 deletions

View File

@@ -0,0 +1,806 @@
//! Per-service image build functions.
use crate::error::{Result, ResultExt, SunbeamError};
use crate::output::{ok, step, warn};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use super::{build_image, deploy_rollout, get_build_env};
/// Message component definition: (cli_name, image_name, dockerfile_rel, target).
pub const MESSAGES_COMPONENTS: &[(&str, &str, &str, Option<&str>)] = &[
(
"messages-backend",
"messages-backend",
"src/backend/Dockerfile",
Some("runtime-distroless-prod"),
),
(
"messages-frontend",
"messages-frontend",
"src/frontend/Dockerfile",
Some("runtime-prod"),
),
(
"messages-mta-in",
"messages-mta-in",
"src/mta-in/Dockerfile",
None,
),
(
"messages-mta-out",
"messages-mta-out",
"src/mta-out/Dockerfile",
None,
),
(
"messages-mpa",
"messages-mpa",
"src/mpa/rspamd/Dockerfile",
None,
),
(
"messages-socks-proxy",
"messages-socks-proxy",
"src/socks-proxy/Dockerfile",
None,
),
];
pub async fn build_proxy(push: bool, deploy: bool, no_cache: bool) -> Result<()> {
let env = get_build_env().await?;
let proxy_dir = crate::config::get_repo_root().join("proxy");
if !proxy_dir.is_dir() {
return Err(SunbeamError::build(format!("Proxy source not found at {}", proxy_dir.display())));
}
let image = format!("{}/studio/proxy:latest", env.registry);
step(&format!("Building sunbeam-proxy -> {image} ..."));
build_image(
&env,
&image,
&proxy_dir.join("Dockerfile"),
&proxy_dir,
None,
None,
push,
no_cache,
&[],
)
.await?;
if deploy {
deploy_rollout(&env, &["pingora"], "ingress", 120, Some(&[image])).await?;
}
Ok(())
}
pub async fn build_tuwunel(push: bool, deploy: bool, no_cache: bool) -> Result<()> {
let env = get_build_env().await?;
let tuwunel_dir = crate::config::get_repo_root().join("tuwunel");
if !tuwunel_dir.is_dir() {
return Err(SunbeamError::build(format!("Tuwunel source not found at {}", tuwunel_dir.display())));
}
let image = format!("{}/studio/tuwunel:latest", env.registry);
step(&format!("Building tuwunel -> {image} ..."));
build_image(
&env,
&image,
&tuwunel_dir.join("Dockerfile"),
&tuwunel_dir,
None,
None,
push,
no_cache,
&[],
)
.await?;
if deploy {
deploy_rollout(&env, &["tuwunel"], "matrix", 180, Some(&[image])).await?;
}
Ok(())
}
pub async fn build_integration(push: bool, deploy: bool, no_cache: bool) -> Result<()> {
let env = get_build_env().await?;
let sunbeam_dir = crate::config::get_repo_root();
let integration_service_dir = sunbeam_dir.join("integration-service");
let dockerfile = integration_service_dir.join("Dockerfile");
let dockerignore = integration_service_dir.join(".dockerignore");
if !dockerfile.exists() {
return Err(SunbeamError::build(format!(
"integration-service Dockerfile not found at {}",
dockerfile.display()
)));
}
if !sunbeam_dir
.join("integration")
.join("packages")
.join("widgets")
.is_dir()
{
return Err(SunbeamError::build(format!(
"integration repo not found at {} -- \
run: cd sunbeam && git clone https://github.com/suitenumerique/integration.git",
sunbeam_dir.join("integration").display()
)));
}
let image = format!("{}/studio/integration:latest", env.registry);
step(&format!("Building integration -> {image} ..."));
// .dockerignore needs to be at context root
let root_ignore = sunbeam_dir.join(".dockerignore");
let mut copied_ignore = false;
if !root_ignore.exists() && dockerignore.exists() {
std::fs::copy(&dockerignore, &root_ignore).ok();
copied_ignore = true;
}
let result = build_image(
&env,
&image,
&dockerfile,
&sunbeam_dir,
None,
None,
push,
no_cache,
&[],
)
.await;
if copied_ignore && root_ignore.exists() {
let _ = std::fs::remove_file(&root_ignore);
}
result?;
if deploy {
deploy_rollout(&env, &["integration"], "lasuite", 120, None).await?;
}
Ok(())
}
pub async fn build_kratos_admin(push: bool, deploy: bool, no_cache: bool) -> Result<()> {
let env = get_build_env().await?;
let kratos_admin_dir = crate::config::get_repo_root().join("kratos-admin");
if !kratos_admin_dir.is_dir() {
return Err(SunbeamError::build(format!(
"kratos-admin source not found at {}",
kratos_admin_dir.display()
)));
}
let image = format!("{}/studio/kratos-admin-ui:latest", env.registry);
step(&format!("Building kratos-admin-ui -> {image} ..."));
build_image(
&env,
&image,
&kratos_admin_dir.join("Dockerfile"),
&kratos_admin_dir,
None,
None,
push,
no_cache,
&[],
)
.await?;
if deploy {
deploy_rollout(&env, &["kratos-admin-ui"], "ory", 120, None).await?;
}
Ok(())
}
pub async fn build_meet(push: bool, deploy: bool, no_cache: bool) -> Result<()> {
let env = get_build_env().await?;
let meet_dir = crate::config::get_repo_root().join("meet");
if !meet_dir.is_dir() {
return Err(SunbeamError::build(format!("meet source not found at {}", meet_dir.display())));
}
let backend_image = format!("{}/studio/meet-backend:latest", env.registry);
let frontend_image = format!("{}/studio/meet-frontend:latest", env.registry);
// Backend
step(&format!("Building meet-backend -> {backend_image} ..."));
build_image(
&env,
&backend_image,
&meet_dir.join("Dockerfile"),
&meet_dir,
Some("backend-production"),
None,
push,
no_cache,
&[],
)
.await?;
// Frontend
step(&format!("Building meet-frontend -> {frontend_image} ..."));
let frontend_dockerfile = meet_dir.join("src").join("frontend").join("Dockerfile");
if !frontend_dockerfile.exists() {
return Err(SunbeamError::build(format!(
"meet frontend Dockerfile not found at {}",
frontend_dockerfile.display()
)));
}
let mut build_args = HashMap::new();
build_args.insert("VITE_API_BASE_URL".to_string(), String::new());
build_image(
&env,
&frontend_image,
&frontend_dockerfile,
&meet_dir,
Some("frontend-production"),
Some(&build_args),
push,
no_cache,
&[],
)
.await?;
if deploy {
deploy_rollout(
&env,
&["meet-backend", "meet-celery-worker", "meet-frontend"],
"lasuite",
180,
None,
)
.await?;
}
Ok(())
}
pub async fn build_people(push: bool, deploy: bool, no_cache: bool) -> Result<()> {
let env = get_build_env().await?;
let people_dir = crate::config::get_repo_root().join("people");
if !people_dir.is_dir() {
return Err(SunbeamError::build(format!("people source not found at {}", people_dir.display())));
}
let workspace_dir = people_dir.join("src").join("frontend");
let app_dir = workspace_dir.join("apps").join("desk");
let dockerfile = workspace_dir.join("Dockerfile");
if !dockerfile.exists() {
return Err(SunbeamError::build(format!("Dockerfile not found at {}", dockerfile.display())));
}
let image = format!("{}/studio/people-frontend:latest", env.registry);
step(&format!("Building people-frontend -> {image} ..."));
// yarn install
ok("Updating yarn.lock (yarn install in workspace)...");
let yarn_status = tokio::process::Command::new("yarn")
.args(["install", "--ignore-engines"])
.current_dir(&workspace_dir)
.status()
.await
.ctx("Failed to run yarn install")?;
if !yarn_status.success() {
return Err(SunbeamError::tool("yarn", "install failed"));
}
// cunningham design tokens
ok("Regenerating cunningham design tokens...");
let cunningham_bin = workspace_dir
.join("node_modules")
.join(".bin")
.join("cunningham");
let cunningham_status = tokio::process::Command::new(&cunningham_bin)
.args(["-g", "css,ts", "-o", "src/cunningham", "--utility-classes"])
.current_dir(&app_dir)
.status()
.await
.ctx("Failed to run cunningham")?;
if !cunningham_status.success() {
return Err(SunbeamError::tool("cunningham", "design token generation failed"));
}
let mut build_args = HashMap::new();
build_args.insert("DOCKER_USER".to_string(), "101".to_string());
build_image(
&env,
&image,
&dockerfile,
&people_dir,
Some("frontend-production"),
Some(&build_args),
push,
no_cache,
&[],
)
.await?;
if deploy {
deploy_rollout(&env, &["people-frontend"], "lasuite", 180, None).await?;
}
Ok(())
}
pub async fn build_messages(what: &str, push: bool, deploy: bool, no_cache: bool) -> Result<()> {
let env = get_build_env().await?;
let messages_dir = crate::config::get_repo_root().join("messages");
if !messages_dir.is_dir() {
return Err(SunbeamError::build(format!("messages source not found at {}", messages_dir.display())));
}
let components: Vec<_> = if what == "messages" {
MESSAGES_COMPONENTS.to_vec()
} else {
MESSAGES_COMPONENTS
.iter()
.filter(|(name, _, _, _)| *name == what)
.copied()
.collect()
};
let mut built_images = Vec::new();
for (component, image_name, dockerfile_rel, target) in &components {
let dockerfile = messages_dir.join(dockerfile_rel);
if !dockerfile.exists() {
warn(&format!(
"Dockerfile not found at {} -- skipping {component}",
dockerfile.display()
));
continue;
}
let image = format!("{}/studio/{image_name}:latest", env.registry);
let context_dir = dockerfile.parent().unwrap_or(&messages_dir);
step(&format!("Building {component} -> {image} ..."));
// Patch ghcr.io/astral-sh/uv COPY for messages-backend on local builds
let mut cleanup_paths = Vec::new();
let actual_dockerfile;
if !env.is_prod && *image_name == "messages-backend" {
let (patched, cleanup) =
patch_dockerfile_uv(&dockerfile, context_dir, &env.platform).await?;
actual_dockerfile = patched;
cleanup_paths = cleanup;
} else {
actual_dockerfile = dockerfile.clone();
}
build_image(
&env,
&image,
&actual_dockerfile,
context_dir,
*target,
None,
push,
no_cache,
&cleanup_paths,
)
.await?;
built_images.push(image);
}
if deploy && !built_images.is_empty() {
deploy_rollout(
&env,
&[
"messages-backend",
"messages-worker",
"messages-frontend",
"messages-mta-in",
"messages-mta-out",
"messages-mpa",
"messages-socks-proxy",
],
"lasuite",
180,
None,
)
.await?;
}
Ok(())
}
/// Build a La Suite frontend image from source and push to the Gitea registry.
#[allow(clippy::too_many_arguments)]
pub async fn build_la_suite_frontend(
app: &str,
repo_dir: &Path,
workspace_rel: &str,
app_rel: &str,
dockerfile_rel: &str,
image_name: &str,
deployment: &str,
namespace: &str,
push: bool,
deploy: bool,
no_cache: bool,
) -> Result<()> {
let env = get_build_env().await?;
let workspace_dir = repo_dir.join(workspace_rel);
let app_dir = repo_dir.join(app_rel);
let dockerfile = repo_dir.join(dockerfile_rel);
if !repo_dir.is_dir() {
return Err(SunbeamError::build(format!("{app} source not found at {}", repo_dir.display())));
}
if !dockerfile.exists() {
return Err(SunbeamError::build(format!("Dockerfile not found at {}", dockerfile.display())));
}
let image = format!("{}/studio/{image_name}:latest", env.registry);
step(&format!("Building {app} -> {image} ..."));
ok("Updating yarn.lock (yarn install in workspace)...");
let yarn_status = tokio::process::Command::new("yarn")
.args(["install", "--ignore-engines"])
.current_dir(&workspace_dir)
.status()
.await
.ctx("Failed to run yarn install")?;
if !yarn_status.success() {
return Err(SunbeamError::tool("yarn", "install failed"));
}
ok("Regenerating cunningham design tokens (yarn build-theme)...");
let theme_status = tokio::process::Command::new("yarn")
.args(["build-theme"])
.current_dir(&app_dir)
.status()
.await
.ctx("Failed to run yarn build-theme")?;
if !theme_status.success() {
return Err(SunbeamError::tool("yarn", "build-theme failed"));
}
let mut build_args = HashMap::new();
build_args.insert("DOCKER_USER".to_string(), "101".to_string());
build_image(
&env,
&image,
&dockerfile,
repo_dir,
Some("frontend-production"),
Some(&build_args),
push,
no_cache,
&[],
)
.await?;
if deploy {
deploy_rollout(&env, &[deployment], namespace, 180, None).await?;
}
Ok(())
}
/// Download uv from GitHub releases and return a patched Dockerfile path.
pub async fn patch_dockerfile_uv(
dockerfile_path: &Path,
context_dir: &Path,
platform: &str,
) -> Result<(PathBuf, Vec<PathBuf>)> {
let content = std::fs::read_to_string(dockerfile_path)
.ctx("Failed to read Dockerfile for uv patching")?;
// Match COPY --from=ghcr.io/astral-sh/uv@sha256:... /uv /uvx /bin/
let original_copy = content
.lines()
.find(|line| {
line.contains("COPY")
&& line.contains("--from=ghcr.io/astral-sh/uv@sha256:")
&& line.contains("/uv")
&& line.contains("/bin/")
})
.map(|line| line.trim().to_string());
let original_copy = match original_copy {
Some(c) => c,
None => return Ok((dockerfile_path.to_path_buf(), vec![])),
};
// Find uv version from comment like: oci://ghcr.io/astral-sh/uv:0.x.y
let version = content
.lines()
.find_map(|line| {
let marker = "oci://ghcr.io/astral-sh/uv:";
if let Some(idx) = line.find(marker) {
let rest = &line[idx + marker.len()..];
let ver = rest.split_whitespace().next().unwrap_or("");
if !ver.is_empty() {
Some(ver.to_string())
} else {
None
}
} else {
None
}
});
let version = match version {
Some(v) => v,
None => {
warn("Could not find uv version comment in Dockerfile; ghcr.io pull may fail.");
return Ok((dockerfile_path.to_path_buf(), vec![]));
}
};
let arch = if platform.contains("amd64") {
"x86_64"
} else {
"aarch64"
};
let url = format!(
"https://github.com/astral-sh/uv/releases/download/{version}/uv-{arch}-unknown-linux-gnu.tar.gz"
);
let stage_dir = context_dir.join("_sunbeam_uv_stage");
let patched_df = dockerfile_path
.parent()
.unwrap_or(dockerfile_path)
.join("Dockerfile._sunbeam_patched");
let cleanup = vec![stage_dir.clone(), patched_df.clone()];
ok(&format!(
"Downloading uv {version} ({arch}) from GitHub releases to bypass ghcr.io..."
));
std::fs::create_dir_all(&stage_dir)?;
// Download tarball
let response = reqwest::get(&url)
.await
.ctx("Failed to download uv release")?;
let tarball_bytes = response.bytes().await?;
// Extract uv and uvx from tarball
let decoder = flate2::read::GzDecoder::new(&tarball_bytes[..]);
let mut archive = tar::Archive::new(decoder);
for entry in archive.entries()? {
let mut entry = entry?;
let path = entry.path()?.to_path_buf();
let file_name = path
.file_name()
.unwrap_or_default()
.to_string_lossy()
.to_string();
if (file_name == "uv" || file_name == "uvx") && entry.header().entry_type().is_file() {
let dest = stage_dir.join(&file_name);
let mut outfile = std::fs::File::create(&dest)?;
std::io::copy(&mut entry, &mut outfile)?;
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
std::fs::set_permissions(&dest, std::fs::Permissions::from_mode(0o755))?;
}
}
}
if !stage_dir.join("uv").exists() {
warn("uv binary not found in release tarball; build may fail.");
return Ok((dockerfile_path.to_path_buf(), cleanup));
}
let patched = content.replace(
&original_copy,
"COPY _sunbeam_uv_stage/uv _sunbeam_uv_stage/uvx /bin/",
);
std::fs::write(&patched_df, patched)?;
ok(&format!(" uv {version} staged; using patched Dockerfile."));
Ok((patched_df, cleanup))
}
pub async fn build_projects(push: bool, deploy: bool, no_cache: bool) -> Result<()> {
let env = get_build_env().await?;
let projects_dir = crate::config::get_repo_root().join("projects");
if !projects_dir.is_dir() {
return Err(SunbeamError::build(format!("projects source not found at {}", projects_dir.display())));
}
let image = format!("{}/studio/projects:latest", env.registry);
step(&format!("Building projects -> {image} ..."));
build_image(
&env,
&image,
&projects_dir.join("Dockerfile"),
&projects_dir,
None,
None,
push,
no_cache,
&[],
)
.await?;
if deploy {
deploy_rollout(&env, &["projects"], "lasuite", 180, Some(&[image])).await?;
}
Ok(())
}
// TODO: first deploy requires registration enabled on tuwunel to create
// the @sol:sunbeam.pt bot account. Flow:
// 1. Set allow_registration = true in tuwunel-config.yaml
// 2. Apply + restart tuwunel
// 3. Register bot via POST /_matrix/client/v3/register with registration token
// 4. Store access_token + device_id in OpenBao at secret/sol
// 5. Set allow_registration = false, re-apply
// 6. Then build + deploy sol
// This should be automated as `sunbeam user create-bot <name>`.
pub async fn build_sol(push: bool, deploy: bool, no_cache: bool) -> Result<()> {
let env = get_build_env().await?;
let sol_dir = crate::config::get_repo_root().join("sol");
if !sol_dir.is_dir() {
return Err(SunbeamError::build(format!("Sol source not found at {}", sol_dir.display())));
}
let image = format!("{}/studio/sol:latest", env.registry);
step(&format!("Building sol -> {image} ..."));
build_image(
&env,
&image,
&sol_dir.join("Dockerfile"),
&sol_dir,
None,
None,
push,
no_cache,
&[],
)
.await?;
if deploy {
deploy_rollout(&env, &["sol"], "matrix", 120, None).await?;
}
Ok(())
}
pub async fn build_calendars(push: bool, deploy: bool, no_cache: bool) -> Result<()> {
let env = get_build_env().await?;
let cal_dir = crate::config::get_repo_root().join("calendars");
if !cal_dir.is_dir() {
return Err(SunbeamError::build(format!("calendars source not found at {}", cal_dir.display())));
}
let backend_dir = cal_dir.join("src").join("backend");
let backend_image = format!("{}/studio/calendars-backend:latest", env.registry);
step(&format!("Building calendars-backend -> {backend_image} ..."));
// Stage translations.json into the build context
let translations_src = cal_dir
.join("src")
.join("frontend")
.join("apps")
.join("calendars")
.join("src")
.join("features")
.join("i18n")
.join("translations.json");
let translations_dst = backend_dir.join("_translations.json");
let mut cleanup: Vec<PathBuf> = Vec::new();
let mut dockerfile = backend_dir.join("Dockerfile");
if translations_src.exists() {
std::fs::copy(&translations_src, &translations_dst)?;
cleanup.push(translations_dst);
// Patch Dockerfile to COPY translations into production image
let mut content = std::fs::read_to_string(&dockerfile)?;
content.push_str(
"\n# Sunbeam: bake translations.json for default calendar names\n\
COPY _translations.json /data/translations.json\n",
);
let patched_df = backend_dir.join("Dockerfile._sunbeam_patched");
std::fs::write(&patched_df, content)?;
cleanup.push(patched_df.clone());
dockerfile = patched_df;
}
build_image(
&env,
&backend_image,
&dockerfile,
&backend_dir,
Some("backend-production"),
None,
push,
no_cache,
&cleanup,
)
.await?;
// caldav
let caldav_image = format!("{}/studio/calendars-caldav:latest", env.registry);
step(&format!("Building calendars-caldav -> {caldav_image} ..."));
let caldav_dir = cal_dir.join("src").join("caldav");
build_image(
&env,
&caldav_image,
&caldav_dir.join("Dockerfile"),
&caldav_dir,
None,
None,
push,
no_cache,
&[],
)
.await?;
// frontend
let frontend_image = format!("{}/studio/calendars-frontend:latest", env.registry);
step(&format!(
"Building calendars-frontend -> {frontend_image} ..."
));
let integration_base = format!("https://integration.{}", env.domain);
let mut build_args = HashMap::new();
build_args.insert(
"VISIO_BASE_URL".to_string(),
format!("https://meet.{}", env.domain),
);
build_args.insert(
"GAUFRE_WIDGET_PATH".to_string(),
format!("{integration_base}/api/v2/lagaufre.js"),
);
build_args.insert(
"GAUFRE_API_URL".to_string(),
format!("{integration_base}/api/v2/services.json"),
);
build_args.insert(
"THEME_CSS_URL".to_string(),
format!("{integration_base}/api/v2/theme.css"),
);
let frontend_dir = cal_dir.join("src").join("frontend");
build_image(
&env,
&frontend_image,
&frontend_dir.join("Dockerfile"),
&frontend_dir,
Some("frontend-production"),
Some(&build_args),
push,
no_cache,
&[],
)
.await?;
if deploy {
deploy_rollout(
&env,
&[
"calendars-backend",
"calendars-worker",
"calendars-caldav",
"calendars-frontend",
],
"lasuite",
180,
Some(&[backend_image, caldav_image, frontend_image]),
)
.await?;
}
Ok(())
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,107 @@
//! OpenBao database secrets engine configuration.
use std::collections::HashMap;
use k8s_openapi::api::core::v1::Pod;
use kube::api::{Api, ListParams};
use crate::error::{Result, ResultExt};
use crate::kube as k;
use crate::openbao::BaoClient;
use crate::output::ok;
use super::{rand_token, PG_USERS};
/// Enable OpenBao database secrets engine and create PostgreSQL static roles.
pub async fn configure_db_engine(bao: &BaoClient) -> Result<()> {
ok("Configuring OpenBao database secrets engine...");
let pg_rw = "postgres-rw.data.svc.cluster.local:5432";
let _ = bao.enable_secrets_engine("database", "database").await;
// ── vault PG user setup ─────────────────────────────────────────────
let client = k::get_client().await?;
let pods: Api<Pod> = Api::namespaced(client.clone(), "data");
let lp = ListParams::default().labels("cnpg.io/cluster=postgres,role=primary");
let pod_list = pods.list(&lp).await?;
let cnpg_pod = pod_list
.items
.first()
.and_then(|p| p.metadata.name.as_deref())
.ctx("Could not find CNPG primary pod for vault user setup.")?
.to_string();
let existing_vault_pass = bao.kv_get_field("secret", "vault", "pg-password").await?;
let vault_pg_pass = if existing_vault_pass.is_empty() {
let new_pass = rand_token();
let mut vault_data = HashMap::new();
vault_data.insert("pg-password".to_string(), new_pass.clone());
bao.kv_put("secret", "vault", &vault_data).await?;
ok("vault KV entry written.");
new_pass
} else {
ok("vault KV entry already present -- skipping write.");
existing_vault_pass
};
let create_vault_sql = concat!(
"DO $$ BEGIN ",
"IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'vault') THEN ",
"CREATE USER vault WITH LOGIN CREATEROLE; ",
"END IF; ",
"END $$;"
);
psql_exec(&cnpg_pod, create_vault_sql).await?;
psql_exec(
&cnpg_pod,
&format!("ALTER USER vault WITH PASSWORD '{vault_pg_pass}';"),
)
.await?;
for user in PG_USERS {
psql_exec(
&cnpg_pod,
&format!("GRANT {user} TO vault WITH ADMIN OPTION;"),
)
.await?;
}
ok("vault PG user configured with ADMIN OPTION on all service roles.");
let conn_url = format!(
"postgresql://{{{{username}}}}:{{{{password}}}}@{pg_rw}/postgres?sslmode=disable"
);
bao.write_db_config(
"cnpg-postgres",
"postgresql-database-plugin",
&conn_url,
"vault",
&vault_pg_pass,
"*",
)
.await?;
ok("DB engine connection configured (vault user).");
let rotation_stmt = r#"ALTER USER "{{name}}" WITH PASSWORD '{{password}}';"#;
for user in PG_USERS {
bao.write_db_static_role(user, "cnpg-postgres", user, 86400, &[rotation_stmt])
.await?;
ok(&format!(" static-role/{user}"));
}
ok("Database secrets engine configured.");
Ok(())
}
/// Execute a psql command on the CNPG primary pod.
async fn psql_exec(cnpg_pod: &str, sql: &str) -> Result<(i32, String)> {
k::kube_exec(
"data",
cnpg_pod,
&["psql", "-U", "postgres", "-c", sql],
Some("postgres"),
)
.await
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,542 @@
//! OpenBao KV seeding — init/unseal, idempotent credential generation, VSO auth.
use std::collections::{HashMap, HashSet};
use k8s_openapi::api::core::v1::Pod;
use kube::api::{Api, ListParams};
use crate::error::Result;
use crate::kube as k;
use crate::openbao::BaoClient;
use crate::output::{ok, warn};
use super::{
gen_dkim_key_pair, gen_fernet_key, port_forward, rand_token, rand_token_n, scw_config,
wait_pod_running, delete_resource, GITEA_ADMIN_USER, SMTP_URI,
};
/// Internal result from seed_openbao, used by cmd_seed.
pub struct SeedResult {
pub creds: HashMap<String, String>,
pub ob_pod: String,
pub root_token: String,
}
/// Read-or-create pattern: reads existing KV values, only generates missing ones.
pub async fn get_or_create(
bao: &BaoClient,
path: &str,
fields: &[(&str, &dyn Fn() -> String)],
dirty_paths: &mut HashSet<String>,
) -> Result<HashMap<String, String>> {
let existing = bao.kv_get("secret", path).await?.unwrap_or_default();
let mut result = HashMap::new();
for (key, default_fn) in fields {
let val = existing.get(*key).filter(|v| !v.is_empty()).cloned();
if let Some(v) = val {
result.insert(key.to_string(), v);
} else {
result.insert(key.to_string(), default_fn());
dirty_paths.insert(path.to_string());
}
}
Ok(result)
}
/// Initialize/unseal OpenBao, generate/read credentials idempotently, configure VSO auth.
pub async fn seed_openbao() -> Result<Option<SeedResult>> {
let client = k::get_client().await?;
let pods: Api<Pod> = Api::namespaced(client.clone(), "data");
let lp = ListParams::default().labels("app.kubernetes.io/name=openbao,component=server");
let pod_list = pods.list(&lp).await?;
let ob_pod = match pod_list
.items
.first()
.and_then(|p| p.metadata.name.as_deref())
{
Some(name) => name.to_string(),
None => {
ok("OpenBao pod not found -- skipping.");
return Ok(None);
}
};
ok(&format!("OpenBao ({ob_pod})..."));
let _ = wait_pod_running("data", &ob_pod, 120).await;
let pf = port_forward("data", &ob_pod, 8200).await?;
let bao_url = format!("http://127.0.0.1:{}", pf.local_port);
let bao = BaoClient::new(&bao_url);
// ── Init / Unseal ───────────────────────────────────────────────────
let mut unseal_key = String::new();
let mut root_token = String::new();
let status = bao.seal_status().await.unwrap_or_else(|_| {
crate::openbao::SealStatusResponse {
initialized: false,
sealed: true,
progress: 0,
t: 0,
n: 0,
}
});
let mut already_initialized = status.initialized;
if !already_initialized {
if let Ok(Some(_)) = k::kube_get_secret("data", "openbao-keys").await {
already_initialized = true;
}
}
if !already_initialized {
ok("Initializing OpenBao...");
match bao.init(1, 1).await {
Ok(init) => {
unseal_key = init.unseal_keys_b64[0].clone();
root_token = init.root_token.clone();
let mut data = HashMap::new();
data.insert("key".to_string(), unseal_key.clone());
data.insert("root-token".to_string(), root_token.clone());
k::create_secret("data", "openbao-keys", data).await?;
ok("Initialized -- keys stored in secret/openbao-keys.");
}
Err(e) => {
warn(&format!(
"Init failed -- resetting OpenBao storage for local dev... ({e})"
));
let _ = delete_resource("data", "pvc", "data-openbao-0").await;
let _ = delete_resource("data", "pod", &ob_pod).await;
warn("OpenBao storage reset. Run --seed again after the pod restarts.");
return Ok(None);
}
}
} else {
ok("Already initialized.");
if let Ok(key) = k::kube_get_secret_field("data", "openbao-keys", "key").await {
unseal_key = key;
}
if let Ok(token) = k::kube_get_secret_field("data", "openbao-keys", "root-token").await {
root_token = token;
}
}
// Unseal if needed
let status = bao.seal_status().await.unwrap_or_else(|_| {
crate::openbao::SealStatusResponse {
initialized: true,
sealed: true,
progress: 0,
t: 0,
n: 0,
}
});
if status.sealed && !unseal_key.is_empty() {
ok("Unsealing...");
bao.unseal(&unseal_key).await?;
}
if root_token.is_empty() {
warn("No root token available -- skipping KV seeding.");
return Ok(None);
}
let bao = BaoClient::with_token(&bao_url, &root_token);
// ── KV seeding ──────────────────────────────────────────────────────
ok("Seeding KV (idempotent -- existing values preserved)...");
let _ = bao.enable_secrets_engine("secret", "kv").await;
let _ = bao
.write(
"sys/mounts/secret/tune",
&serde_json::json!({"options": {"version": "2"}}),
)
.await;
let mut dirty_paths: HashSet<String> = HashSet::new();
let hydra = get_or_create(
&bao,
"hydra",
&[
("system-secret", &rand_token as &dyn Fn() -> String),
("cookie-secret", &rand_token),
("pairwise-salt", &rand_token),
],
&mut dirty_paths,
)
.await?;
let smtp_uri_fn = || SMTP_URI.to_string();
let kratos = get_or_create(
&bao,
"kratos",
&[
("secrets-default", &rand_token as &dyn Fn() -> String),
("secrets-cookie", &rand_token),
("smtp-connection-uri", &smtp_uri_fn),
],
&mut dirty_paths,
)
.await?;
let seaweedfs = get_or_create(
&bao,
"seaweedfs",
&[
("access-key", &rand_token as &dyn Fn() -> String),
("secret-key", &rand_token),
],
&mut dirty_paths,
)
.await?;
let gitea_admin_user_fn = || GITEA_ADMIN_USER.to_string();
let gitea = get_or_create(
&bao,
"gitea",
&[
(
"admin-username",
&gitea_admin_user_fn as &dyn Fn() -> String,
),
("admin-password", &rand_token),
],
&mut dirty_paths,
)
.await?;
let hive_local_fn = || "hive-local".to_string();
let hive = get_or_create(
&bao,
"hive",
&[
("oidc-client-id", &hive_local_fn as &dyn Fn() -> String),
("oidc-client-secret", &rand_token),
],
&mut dirty_paths,
)
.await?;
let devkey_fn = || "devkey".to_string();
let livekit = get_or_create(
&bao,
"livekit",
&[
("api-key", &devkey_fn as &dyn Fn() -> String),
("api-secret", &rand_token),
],
&mut dirty_paths,
)
.await?;
let people = get_or_create(
&bao,
"people",
&[("django-secret-key", &rand_token as &dyn Fn() -> String)],
&mut dirty_paths,
)
.await?;
let login_ui = get_or_create(
&bao,
"login-ui",
&[
("cookie-secret", &rand_token as &dyn Fn() -> String),
("csrf-cookie-secret", &rand_token),
],
&mut dirty_paths,
)
.await?;
let sw_access = seaweedfs.get("access-key").cloned().unwrap_or_default();
let sw_secret = seaweedfs.get("secret-key").cloned().unwrap_or_default();
let empty_fn = || String::new();
let sw_access_fn = {
let v = sw_access.clone();
move || v.clone()
};
let sw_secret_fn = {
let v = sw_secret.clone();
move || v.clone()
};
let kratos_admin = get_or_create(
&bao,
"kratos-admin",
&[
("cookie-secret", &rand_token as &dyn Fn() -> String),
("csrf-cookie-secret", &rand_token),
("admin-identity-ids", &empty_fn),
("s3-access-key", &sw_access_fn),
("s3-secret-key", &sw_secret_fn),
],
&mut dirty_paths,
)
.await?;
let docs = get_or_create(
&bao,
"docs",
&[
("django-secret-key", &rand_token as &dyn Fn() -> String),
("collaboration-secret", &rand_token),
],
&mut dirty_paths,
)
.await?;
let meet = get_or_create(
&bao,
"meet",
&[
("django-secret-key", &rand_token as &dyn Fn() -> String),
("application-jwt-secret-key", &rand_token),
],
&mut dirty_paths,
)
.await?;
let drive = get_or_create(
&bao,
"drive",
&[("django-secret-key", &rand_token as &dyn Fn() -> String)],
&mut dirty_paths,
)
.await?;
let projects = get_or_create(
&bao,
"projects",
&[("secret-key", &rand_token as &dyn Fn() -> String)],
&mut dirty_paths,
)
.await?;
let cal_django_fn = || rand_token_n(50);
let calendars = get_or_create(
&bao,
"calendars",
&[
("django-secret-key", &cal_django_fn as &dyn Fn() -> String),
("salt-key", &rand_token),
("caldav-inbound-api-key", &rand_token),
("caldav-outbound-api-key", &rand_token),
("caldav-internal-api-key", &rand_token),
],
&mut dirty_paths,
)
.await?;
// DKIM key pair — generated together since keys are coupled.
let existing_messages = bao.kv_get("secret", "messages").await?.unwrap_or_default();
let (dkim_private, dkim_public) = if existing_messages
.get("dkim-private-key")
.filter(|v| !v.is_empty())
.is_some()
{
(
existing_messages
.get("dkim-private-key")
.cloned()
.unwrap_or_default(),
existing_messages
.get("dkim-public-key")
.cloned()
.unwrap_or_default(),
)
} else {
gen_dkim_key_pair()
};
let dkim_priv_fn = {
let v = dkim_private.clone();
move || v.clone()
};
let dkim_pub_fn = {
let v = dkim_public.clone();
move || v.clone()
};
let socks_proxy_fn = || format!("sunbeam:{}", rand_token());
let sunbeam_fn = || "sunbeam".to_string();
let messages = get_or_create(
&bao,
"messages",
&[
("django-secret-key", &rand_token as &dyn Fn() -> String),
("salt-key", &rand_token),
("mda-api-secret", &rand_token),
(
"oidc-refresh-token-key",
&gen_fernet_key as &dyn Fn() -> String,
),
("dkim-private-key", &dkim_priv_fn),
("dkim-public-key", &dkim_pub_fn),
("rspamd-password", &rand_token),
("socks-proxy-users", &socks_proxy_fn),
("mta-out-smtp-username", &sunbeam_fn),
("mta-out-smtp-password", &rand_token),
],
&mut dirty_paths,
)
.await?;
let admin_fn = || "admin".to_string();
let collabora = get_or_create(
&bao,
"collabora",
&[
("username", &admin_fn as &dyn Fn() -> String),
("password", &rand_token),
],
&mut dirty_paths,
)
.await?;
let tuwunel = get_or_create(
&bao,
"tuwunel",
&[
("oidc-client-id", &empty_fn as &dyn Fn() -> String),
("oidc-client-secret", &empty_fn),
("turn-secret", &empty_fn),
("registration-token", &rand_token),
],
&mut dirty_paths,
)
.await?;
let grafana = get_or_create(
&bao,
"grafana",
&[("admin-password", &rand_token as &dyn Fn() -> String)],
&mut dirty_paths,
)
.await?;
let scw_access_fn = || scw_config("access-key");
let scw_secret_fn = || scw_config("secret-key");
let scaleway_s3 = get_or_create(
&bao,
"scaleway-s3",
&[
("access-key-id", &scw_access_fn as &dyn Fn() -> String),
("secret-access-key", &scw_secret_fn),
],
&mut dirty_paths,
)
.await?;
// ── Write dirty paths ───────────────────────────────────────────────
if dirty_paths.is_empty() {
ok("All OpenBao KV secrets already present -- skipping writes.");
} else {
let mut sorted_paths: Vec<&String> = dirty_paths.iter().collect();
sorted_paths.sort();
ok(&format!(
"Writing new secrets to OpenBao KV ({})...",
sorted_paths
.iter()
.map(|s| s.as_str())
.collect::<Vec<_>>()
.join(", ")
));
let all_paths: &[(&str, &HashMap<String, String>)] = &[
("hydra", &hydra),
("kratos", &kratos),
("seaweedfs", &seaweedfs),
("gitea", &gitea),
("hive", &hive),
("livekit", &livekit),
("people", &people),
("login-ui", &login_ui),
("kratos-admin", &kratos_admin),
("docs", &docs),
("meet", &meet),
("drive", &drive),
("projects", &projects),
("calendars", &calendars),
("messages", &messages),
("collabora", &collabora),
("tuwunel", &tuwunel),
("grafana", &grafana),
("scaleway-s3", &scaleway_s3),
];
for (path, data) in all_paths {
if dirty_paths.contains(*path) {
bao.kv_patch("secret", path, data).await?;
}
}
}
// ── Kubernetes auth for VSO ─────────────────────────────────────────
ok("Configuring Kubernetes auth for VSO...");
let _ = bao.auth_enable("kubernetes", "kubernetes").await;
bao.write(
"auth/kubernetes/config",
&serde_json::json!({
"kubernetes_host": "https://kubernetes.default.svc.cluster.local"
}),
)
.await?;
let policy_hcl = concat!(
"path \"secret/data/*\" { capabilities = [\"read\"] }\n",
"path \"secret/metadata/*\" { capabilities = [\"read\", \"list\"] }\n",
"path \"database/static-creds/*\" { capabilities = [\"read\"] }\n",
);
bao.write_policy("vso-reader", policy_hcl).await?;
bao.write(
"auth/kubernetes/role/vso",
&serde_json::json!({
"bound_service_account_names": "default",
"bound_service_account_namespaces": "ory,devtools,storage,lasuite,matrix,media,data,monitoring",
"policies": "vso-reader",
"ttl": "1h"
}),
)
.await?;
// Build credentials map
let mut creds = HashMap::new();
let field_map: &[(&str, &str, &HashMap<String, String>)] = &[
("hydra-system-secret", "system-secret", &hydra),
("hydra-cookie-secret", "cookie-secret", &hydra),
("hydra-pairwise-salt", "pairwise-salt", &hydra),
("kratos-secrets-default", "secrets-default", &kratos),
("kratos-secrets-cookie", "secrets-cookie", &kratos),
("s3-access-key", "access-key", &seaweedfs),
("s3-secret-key", "secret-key", &seaweedfs),
("gitea-admin-password", "admin-password", &gitea),
("hive-oidc-client-id", "oidc-client-id", &hive),
("hive-oidc-client-secret", "oidc-client-secret", &hive),
("people-django-secret", "django-secret-key", &people),
("livekit-api-key", "api-key", &livekit),
("livekit-api-secret", "api-secret", &livekit),
(
"kratos-admin-cookie-secret",
"cookie-secret",
&kratos_admin,
),
("messages-dkim-public-key", "dkim-public-key", &messages),
];
for (cred_key, field_key, source) in field_map {
creds.insert(
cred_key.to_string(),
source.get(*field_key).cloned().unwrap_or_default(),
);
}
Ok(Some(SeedResult {
creds,
ob_pod,
root_token,
}))
}