Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
4d9659a8bb
|
|||
|
cd80a57a40
|
|||
|
de5c807374
|
|||
|
2ab2fd5b8f
|
|||
|
27536b4695
|
|||
|
2f2f4585f1
|
|||
|
477006ede2
|
|||
|
ca0748b109
|
|||
|
13e3f5d42e
|
|||
|
faf525522c
|
|||
|
34647e6bcb
|
1
.gitignore
vendored
1
.gitignore
vendored
@@ -11,3 +11,4 @@ build/
|
||||
|
||||
# Environment
|
||||
.envrc
|
||||
.DS_Store
|
||||
|
||||
101
CHANGELOG.md
Normal file
101
CHANGELOG.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# Changelog
|
||||
|
||||
## v1.1.1
|
||||
|
||||
- cd80a57 fix: DynamicBearer auth, retry on 500/429, upload resilience
|
||||
- de5c807 fix: progress bar tracks files not bytes, retry on 502, dedup folders
|
||||
- 2ab2fd5 fix: polish Drive upload progress UI
|
||||
- 27536b4 feat: parallel Drive upload with indicatif progress UI
|
||||
|
||||
## v1.1.0
|
||||
|
||||
- 477006e chore: bump to v1.1.0, update package description
|
||||
- ca0748b feat: encrypted vault keystore, JWT auth, Drive upload
|
||||
- 13e3f5d fix opensearch pod resolution + sol-agent vault policy
|
||||
- faf5255 feat: async SunbeamClient factory with unified auth resolution
|
||||
|
||||
## v1.0.1
|
||||
|
||||
- 34647e6 feat: seed Sol agent vault policy + gitea creds, bump v1.0.1
|
||||
|
||||
## v1.0.0
|
||||
|
||||
- 051e17d chore: bump to v1.0.0, drop native-tls for pure rustls
|
||||
- 7ebf900 feat: wire 15 service subcommands into CLI, remove old user command
|
||||
- f867805 feat: CLI modules for all 25+ service clients
|
||||
- 3d7a2d5 feat: OutputFormat enum + render/render_list/read_json_input helpers
|
||||
- 756fbc5 chore: update Cargo.lock
|
||||
- 97976e0 fix: include build module (was gitignored)
|
||||
- f06a167 feat: BuildKit client + integration test suite (651 tests)
|
||||
- b60e22e feat: La Suite clients — 7 DRF services (75 endpoints)
|
||||
- 915f0b2 feat: monitoring clients — Prometheus, Loki, Grafana (57 endpoints)
|
||||
- 21f9e18 feat: LiveKitClient — real-time media API (15 endpoints + JWT)
|
||||
- a33697c feat: S3Client — object storage API (21 endpoints)
|
||||
- 329c18b feat: OpenSearchClient — search and analytics API (60 endpoints)
|
||||
- 2888d59 feat: MatrixClient — chat and collaboration API (80 endpoints)
|
||||
- 890d7b8 feat: GiteaClient — unified git forge API (50+ endpoints)
|
||||
- c597234 feat: HydraClient — OAuth2/OIDC admin API (35 endpoints)
|
||||
- f0bc363 feat: KratosClient — identity management (30 endpoints)
|
||||
- 6823772 feat: ServiceClient trait, HttpTransport, and SunbeamClient factory
|
||||
- 31fde1a fix: forge URL derivation for bare IP hosts, add Cargo registry config
|
||||
- 46d2133 docs: update README for Rust workspace layout
|
||||
- 3ef3fc0 feat: Python upstream — Sol bot registration TODO
|
||||
- e0961cc refactor: binary crate — thin main.rs + cli.rs dispatch
|
||||
- 8e5d295 refactor: SDK small command modules — services, cluster, manifests, gitea, update, auth
|
||||
- 6c7e1cd refactor: SDK users, pm, and checks modules with submodule splits
|
||||
- bc65b91 refactor: SDK images and secrets modules with submodule splits
|
||||
- 8e51e0b refactor: SDK kube, openbao, and tools modules
|
||||
- b92700d refactor: SDK core modules — error, config, output, constants
|
||||
- 2ffedb9 refactor: workspace scaffolding — sunbeam-sdk + sunbeam binary crate
|
||||
- b6daf60 chore: suppress dead_code warning on exit code constants
|
||||
- b92c6ad feat: Python upstream — onboard/offboard, mailbox, Projects, --no-cache
|
||||
- 8d6e815 feat: --no-cache build flag and Sol build target
|
||||
- f75f61f feat: user provisioning — mailbox, Projects, welcome email
|
||||
- c6aa1bd feat: complete pm subcommands with board discovery and user resolution
|
||||
- ffc0fe9 feat: split auth into sso/git, Planka token exchange, board discovery
|
||||
- ded0ab4 refactor: remove --env flag, use --context like kubectl
|
||||
- 88b02ac feat: kubectl-style contexts with per-domain auth tokens
|
||||
- 3a5e1c6 fix: use predictable client_id via pre-seeded K8s secret
|
||||
- 1029ff0 fix: auth login UX — timeout, Ctrl+C, suppress K8s error, center HTML
|
||||
- 43b5a4e fix: URL-encode scope parameter with %20 instead of +
|
||||
- 7fab2a7 fix: auth login domain resolution with --domain flag
|
||||
- 184ad85 fix: install rustls ring crypto provider at startup
|
||||
- 5bdb789 feat: unified project management across Planka and Gitea
|
||||
- d4421d3 feat: OAuth2 CLI authentication with PKCE and token caching
|
||||
- aad469e fix: stdin password, port-forward retry, seed advisory lock
|
||||
- dff4588 fix: employee ID pagination, add async tests
|
||||
- 019c73e fix: S3 auth signature tested against AWS reference vector
|
||||
- e95ee4f fix: rewrite users.rs to fully async (was blocking tokio runtime)
|
||||
- 24e98b4 fix: CNPG readiness, DKIM SPKI format, kv_patch, container name
|
||||
- 6ec0666 fix: SSH tunnel leak, cmd_bao injection, discovery cache, DNS async
|
||||
- bcfb443 refactor: deduplicate constants, fix secret key mismatch, add VSS pruning
|
||||
- 503e407 feat: implement OpenSearch ML setup and model_id injection
|
||||
- bc5eeaa feat: implement secrets.rs with OpenBao HTTP API
|
||||
- 7fd8874 refactor: migrate all modules from anyhow to SunbeamError
|
||||
- cc0b6a8 refactor: add thiserror error tree and tracing logging
|
||||
- ec23568 feat: Phase 2 feature modules + comprehensive test suite (142 tests)
|
||||
- 42c2a74 feat: Phase 1 foundations — kube-rs client, OpenBao HTTP client, self-update
|
||||
- 80c67d3 feat: Rust rewrite scaffolding with embedded kustomize+helm
|
||||
- d5b9632 refactor: cross-platform tool downloads, configurable infra dir and ACME email
|
||||
- c82f15b feat: add tuwunel/matrix support with OpenSearch ML post-apply hooks
|
||||
- 928323e fix(cli): unify proxy build path, fix Gitea password sync
|
||||
- 956a883 chore: added AGENTS.md file for various models.
|
||||
- 507b4d3 feat(config): add production host and infrastructure directory configuration
|
||||
- cbf5c12 docs: update repository URLs to use HTTPS remotes for src.sunbeam.pt
|
||||
- 133fc98 docs: add comprehensive README with professional documentation
|
||||
- 33d7774 chore: added license
|
||||
- 1a97781 docs: add comprehensive documentation for sunbeam CLI
|
||||
- 28c266e feat(cli): partial apply with namespace filter
|
||||
- 2569978 feat(cli): meet build/seed support, production kube tunnel, gitea OIDC bootstrap
|
||||
- c759f2c feat(users): add disable/enable lockout commands; fix table output
|
||||
- cb5a290 feat: auto-restart deployments on ConfigMap change after sunbeam apply
|
||||
- 1a3df1f feat: add sunbeam build integration target
|
||||
- de12847 feat: add impress image mirroring and docs secret seeding
|
||||
- 14dd685 feat: add kratos-admin-ui build target and user management commands
|
||||
- b917aa3 fix: specify -c openbao container in cmd_bao kubectl exec
|
||||
- 352f0b6 feat: add sunbeam k8s kubectl passthrough; fix kube_exec container arg
|
||||
- fb3fd93 fix: sunbeam apply and bootstrap reliability
|
||||
- 0acbf66 check: rewrite seaweedfs probe with S3 SigV4 auth
|
||||
- 6bd59ab sunbeam check: parallel execution, 5s timeout, external S3 check
|
||||
- 39a2f70 Fix sunbeam check: group by namespace, never crash on network errors
|
||||
- 1573faa Add sunbeam check verb with service-level health probes
|
||||
90
Cargo.lock
generated
90
Cargo.lock
generated
@@ -146,6 +146,18 @@ dependencies = [
|
||||
"object",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "argon2"
|
||||
version = "0.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c3610892ee6e0cbce8ae2700349fcf8f98adb0dbfbee85aec3c9179d29cc072"
|
||||
dependencies = [
|
||||
"base64ct",
|
||||
"blake2",
|
||||
"cpufeatures",
|
||||
"password-hash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "asn1-rs"
|
||||
version = "0.7.1"
|
||||
@@ -323,6 +335,15 @@ dependencies = [
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "blake2"
|
||||
version = "0.10.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe"
|
||||
dependencies = [
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "block-buffer"
|
||||
version = "0.10.4"
|
||||
@@ -511,6 +532,19 @@ dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "console"
|
||||
version = "0.15.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "054ccb5b10f9f2cbf51eb355ca1d05c2d279ce1804688d0db74b4733a5aeafd8"
|
||||
dependencies = [
|
||||
"encode_unicode",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"unicode-width",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "const-oid"
|
||||
version = "0.9.6"
|
||||
@@ -915,6 +949,12 @@ version = "0.2.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e079f19b08ca6239f47f8ba8509c11cf3ea30095831f7fed61441475edd8c449"
|
||||
|
||||
[[package]]
|
||||
name = "encode_unicode"
|
||||
version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "34aa73646ffb006b8f5147f3dc182bd4bcb190227ce861fc4a4844bf8e3cb2c0"
|
||||
|
||||
[[package]]
|
||||
name = "enum-ordinalize"
|
||||
version = "4.3.2"
|
||||
@@ -1651,6 +1691,20 @@ dependencies = [
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "indicatif"
|
||||
version = "0.17.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "183b3088984b400f4cfac3620d5e076c84da5364016b4f49473de574b2586235"
|
||||
dependencies = [
|
||||
"console",
|
||||
"number_prefix",
|
||||
"portable-atomic",
|
||||
"tokio",
|
||||
"unicode-width",
|
||||
"web-time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "inout"
|
||||
version = "0.1.4"
|
||||
@@ -2124,6 +2178,12 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "number_prefix"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
|
||||
|
||||
[[package]]
|
||||
name = "object"
|
||||
version = "0.37.3"
|
||||
@@ -2269,6 +2329,17 @@ dependencies = [
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "password-hash"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166"
|
||||
dependencies = [
|
||||
"base64ct",
|
||||
"rand_core 0.6.4",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pbkdf2"
|
||||
version = "0.12.2"
|
||||
@@ -2446,6 +2517,12 @@ dependencies = [
|
||||
"universal-hash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "portable-atomic"
|
||||
version = "1.13.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49"
|
||||
|
||||
[[package]]
|
||||
name = "potential_utf"
|
||||
version = "0.1.4"
|
||||
@@ -3469,7 +3546,7 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
|
||||
|
||||
[[package]]
|
||||
name = "sunbeam"
|
||||
version = "1.0.0"
|
||||
version = "1.1.1"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"clap",
|
||||
@@ -3482,8 +3559,10 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "sunbeam-sdk"
|
||||
version = "1.0.0"
|
||||
version = "1.1.1"
|
||||
dependencies = [
|
||||
"aes-gcm",
|
||||
"argon2",
|
||||
"base64",
|
||||
"bytes",
|
||||
"chrono",
|
||||
@@ -3492,6 +3571,7 @@ dependencies = [
|
||||
"flate2",
|
||||
"futures",
|
||||
"hmac",
|
||||
"indicatif",
|
||||
"k8s-openapi",
|
||||
"kube",
|
||||
"lettre",
|
||||
@@ -3914,6 +3994,12 @@ version = "1.0.24"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-xid"
|
||||
version = "0.2.6"
|
||||
|
||||
19
src/kube.rs
19
src/kube.rs
@@ -305,6 +305,25 @@ pub async fn create_secret(ns: &str, name: &str, data: HashMap<String, String>)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Find the first Running pod matching a label selector in a namespace.
|
||||
pub async fn find_pod_by_label(ns: &str, label: &str) -> Option<String> {
|
||||
let client = get_client().await.ok()?;
|
||||
let pods: kube::Api<k8s_openapi::api::core::v1::Pod> =
|
||||
kube::Api::namespaced(client, ns);
|
||||
let lp = kube::api::ListParams::default().labels(label);
|
||||
let pod_list = pods.list(&lp).await.ok()?;
|
||||
pod_list
|
||||
.items
|
||||
.iter()
|
||||
.find(|p| {
|
||||
p.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.phase.as_deref())
|
||||
== Some("Running")
|
||||
})
|
||||
.and_then(|p| p.metadata.name.clone())
|
||||
}
|
||||
|
||||
/// Execute a command in a pod and return (exit_code, stdout).
|
||||
#[allow(dead_code)]
|
||||
pub async fn kube_exec(
|
||||
|
||||
@@ -475,10 +475,16 @@ async fn os_api(path: &str, method: &str, body: Option<&str>) -> Option<String>
|
||||
curl_args.extend_from_slice(&["-H", "Content-Type: application/json", "-d", &body_string]);
|
||||
}
|
||||
|
||||
// Build the full exec command: exec deploy/opensearch -n data -c opensearch -- curl ...
|
||||
let exec_cmd = curl_args;
|
||||
// Resolve the actual pod name from the app=opensearch label
|
||||
let pod_name = match crate::kube::find_pod_by_label("data", "app=opensearch").await {
|
||||
Some(name) => name,
|
||||
None => {
|
||||
crate::output::warn("No OpenSearch pod found in data namespace");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
match crate::kube::kube_exec("data", "opensearch-0", &exec_cmd, Some("opensearch")).await {
|
||||
match crate::kube::kube_exec("data", &pod_name, &curl_args, Some("opensearch")).await {
|
||||
Ok((0, out)) if !out.is_empty() => Some(out),
|
||||
_ => None,
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "sunbeam-sdk"
|
||||
version = "1.0.0"
|
||||
version = "1.1.1"
|
||||
edition = "2024"
|
||||
description = "Sunbeam SDK — reusable library for cluster management"
|
||||
description = "Sunbeam Studios SDK, CLI, and ecosystem integrations"
|
||||
repository = "https://src.sunbeam.pt/studio/cli"
|
||||
license = "MIT"
|
||||
publish = ["sunbeam"]
|
||||
@@ -53,6 +53,9 @@ sha2 = "0.10"
|
||||
hmac = "0.12"
|
||||
base64 = "0.22"
|
||||
rand = "0.8"
|
||||
aes-gcm = "0.10"
|
||||
argon2 = "0.5"
|
||||
indicatif = { version = "0.17", features = ["tokio"] }
|
||||
|
||||
# Certificate generation
|
||||
rcgen = "0.14"
|
||||
|
||||
@@ -29,9 +29,9 @@ impl ServiceClient for HydraClient {
|
||||
}
|
||||
|
||||
impl HydraClient {
|
||||
/// Build a HydraClient from domain (e.g. `https://auth.{domain}`).
|
||||
/// Build a HydraClient from domain (e.g. `https://hydra.{domain}`).
|
||||
pub fn connect(domain: &str) -> Self {
|
||||
let base_url = format!("https://auth.{domain}");
|
||||
let base_url = format!("https://hydra.{domain}");
|
||||
Self::from_parts(base_url, AuthMethod::None)
|
||||
}
|
||||
|
||||
@@ -467,7 +467,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_connect_url() {
|
||||
let c = HydraClient::connect("sunbeam.pt");
|
||||
assert_eq!(c.base_url(), "https://auth.sunbeam.pt");
|
||||
assert_eq!(c.base_url(), "https://hydra.sunbeam.pt");
|
||||
assert_eq!(c.service_name(), "hydra");
|
||||
}
|
||||
|
||||
|
||||
@@ -674,6 +674,28 @@ pub fn get_gitea_token() -> Result<String> {
|
||||
})
|
||||
}
|
||||
|
||||
/// Get cached SSO access token synchronously (reads from cache file).
|
||||
/// If the token was recently refreshed by the async `get_token()`, this
|
||||
/// returns the fresh one. Used by DynamicBearer for per-request auth.
|
||||
pub fn get_token_sync() -> Result<String> {
|
||||
let cached = read_cache().map_err(|_| {
|
||||
SunbeamError::identity("Not logged in. Run `sunbeam auth login` first.")
|
||||
})?;
|
||||
Ok(cached.access_token)
|
||||
}
|
||||
|
||||
/// Get cached OIDC id_token (JWT).
|
||||
pub fn get_id_token() -> Result<String> {
|
||||
let tokens = read_cache().map_err(|_| {
|
||||
SunbeamError::identity("Not logged in. Run `sunbeam auth login` first.")
|
||||
})?;
|
||||
tokens.id_token.ok_or_else(|| {
|
||||
SunbeamError::identity(
|
||||
"No id_token cached. Run `sunbeam auth sso` to get one.",
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Remove cached auth tokens.
|
||||
pub async fn cmd_auth_logout() -> Result<()> {
|
||||
let path = cache_path();
|
||||
|
||||
@@ -7,7 +7,7 @@
|
||||
use crate::error::{Result, ResultExt, SunbeamError};
|
||||
use reqwest::Method;
|
||||
use serde::{de::DeserializeOwned, Serialize};
|
||||
use std::sync::OnceLock;
|
||||
use tokio::sync::OnceCell;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// AuthMethod
|
||||
@@ -20,6 +20,8 @@ pub enum AuthMethod {
|
||||
None,
|
||||
/// Bearer token (`Authorization: Bearer <token>`).
|
||||
Bearer(String),
|
||||
/// Dynamic bearer — resolves token fresh on each request (survives expiry).
|
||||
DynamicBearer,
|
||||
/// Custom header (e.g. `X-Vault-Token`).
|
||||
Header { name: &'static str, value: String },
|
||||
/// Gitea-style PAT (`Authorization: token <pat>`).
|
||||
@@ -84,6 +86,12 @@ impl HttpTransport {
|
||||
AuthMethod::Bearer(token) => {
|
||||
req = req.bearer_auth(token);
|
||||
}
|
||||
AuthMethod::DynamicBearer => {
|
||||
// Resolve token fresh on each request — survives token expiry/refresh.
|
||||
if let Ok(token) = crate::auth::get_token_sync() {
|
||||
req = req.bearer_auth(token);
|
||||
}
|
||||
}
|
||||
AuthMethod::Header { name, value } => {
|
||||
req = req.header(*name, value);
|
||||
}
|
||||
@@ -222,51 +230,51 @@ impl HttpTransport {
|
||||
/// Unified entry point for all service clients.
|
||||
///
|
||||
/// Lazily constructs and caches per-service clients from the active config
|
||||
/// context. Each accessor returns a `&Client` reference, constructing on
|
||||
/// first call via [`OnceLock`].
|
||||
/// context. Each accessor resolves auth and returns a `&Client` reference,
|
||||
/// constructing on first call via [`OnceCell`] (async-aware).
|
||||
///
|
||||
/// Auth is resolved per-client:
|
||||
/// - SSO bearer (`get_token()`) — admin APIs, Matrix, La Suite, OpenSearch
|
||||
/// - Gitea PAT (`get_gitea_token()`) — Gitea
|
||||
/// - None — Prometheus, Loki, S3, LiveKit
|
||||
pub struct SunbeamClient {
|
||||
ctx: crate::config::Context,
|
||||
domain: String,
|
||||
// Phase 1
|
||||
#[cfg(feature = "identity")]
|
||||
kratos: OnceLock<crate::identity::KratosClient>,
|
||||
kratos: OnceCell<crate::identity::KratosClient>,
|
||||
#[cfg(feature = "identity")]
|
||||
hydra: OnceLock<crate::auth::hydra::HydraClient>,
|
||||
// Phase 2
|
||||
hydra: OnceCell<crate::auth::hydra::HydraClient>,
|
||||
#[cfg(feature = "gitea")]
|
||||
gitea: OnceLock<crate::gitea::GiteaClient>,
|
||||
// Phase 3
|
||||
gitea: OnceCell<crate::gitea::GiteaClient>,
|
||||
#[cfg(feature = "matrix")]
|
||||
matrix: OnceLock<crate::matrix::MatrixClient>,
|
||||
matrix: OnceCell<crate::matrix::MatrixClient>,
|
||||
#[cfg(feature = "opensearch")]
|
||||
opensearch: OnceLock<crate::search::OpenSearchClient>,
|
||||
opensearch: OnceCell<crate::search::OpenSearchClient>,
|
||||
#[cfg(feature = "s3")]
|
||||
s3: OnceLock<crate::storage::S3Client>,
|
||||
s3: OnceCell<crate::storage::S3Client>,
|
||||
#[cfg(feature = "livekit")]
|
||||
livekit: OnceLock<crate::media::LiveKitClient>,
|
||||
livekit: OnceCell<crate::media::LiveKitClient>,
|
||||
#[cfg(feature = "monitoring")]
|
||||
prometheus: OnceLock<crate::monitoring::PrometheusClient>,
|
||||
prometheus: OnceCell<crate::monitoring::PrometheusClient>,
|
||||
#[cfg(feature = "monitoring")]
|
||||
loki: OnceLock<crate::monitoring::LokiClient>,
|
||||
loki: OnceCell<crate::monitoring::LokiClient>,
|
||||
#[cfg(feature = "monitoring")]
|
||||
grafana: OnceLock<crate::monitoring::GrafanaClient>,
|
||||
// Phase 4
|
||||
grafana: OnceCell<crate::monitoring::GrafanaClient>,
|
||||
#[cfg(feature = "lasuite")]
|
||||
people: OnceLock<crate::lasuite::PeopleClient>,
|
||||
people: OnceCell<crate::lasuite::PeopleClient>,
|
||||
#[cfg(feature = "lasuite")]
|
||||
docs: OnceLock<crate::lasuite::DocsClient>,
|
||||
docs: OnceCell<crate::lasuite::DocsClient>,
|
||||
#[cfg(feature = "lasuite")]
|
||||
meet: OnceLock<crate::lasuite::MeetClient>,
|
||||
meet: OnceCell<crate::lasuite::MeetClient>,
|
||||
#[cfg(feature = "lasuite")]
|
||||
drive: OnceLock<crate::lasuite::DriveClient>,
|
||||
drive: OnceCell<crate::lasuite::DriveClient>,
|
||||
#[cfg(feature = "lasuite")]
|
||||
messages: OnceLock<crate::lasuite::MessagesClient>,
|
||||
messages: OnceCell<crate::lasuite::MessagesClient>,
|
||||
#[cfg(feature = "lasuite")]
|
||||
calendars: OnceLock<crate::lasuite::CalendarsClient>,
|
||||
calendars: OnceCell<crate::lasuite::CalendarsClient>,
|
||||
#[cfg(feature = "lasuite")]
|
||||
find: OnceLock<crate::lasuite::FindClient>,
|
||||
// Bao/Planka stay in their existing modules
|
||||
bao: OnceLock<crate::openbao::BaoClient>,
|
||||
find: OnceCell<crate::lasuite::FindClient>,
|
||||
bao: OnceCell<crate::openbao::BaoClient>,
|
||||
}
|
||||
|
||||
impl SunbeamClient {
|
||||
@@ -276,40 +284,40 @@ impl SunbeamClient {
|
||||
domain: ctx.domain.clone(),
|
||||
ctx: ctx.clone(),
|
||||
#[cfg(feature = "identity")]
|
||||
kratos: OnceLock::new(),
|
||||
kratos: OnceCell::new(),
|
||||
#[cfg(feature = "identity")]
|
||||
hydra: OnceLock::new(),
|
||||
hydra: OnceCell::new(),
|
||||
#[cfg(feature = "gitea")]
|
||||
gitea: OnceLock::new(),
|
||||
gitea: OnceCell::new(),
|
||||
#[cfg(feature = "matrix")]
|
||||
matrix: OnceLock::new(),
|
||||
matrix: OnceCell::new(),
|
||||
#[cfg(feature = "opensearch")]
|
||||
opensearch: OnceLock::new(),
|
||||
opensearch: OnceCell::new(),
|
||||
#[cfg(feature = "s3")]
|
||||
s3: OnceLock::new(),
|
||||
s3: OnceCell::new(),
|
||||
#[cfg(feature = "livekit")]
|
||||
livekit: OnceLock::new(),
|
||||
livekit: OnceCell::new(),
|
||||
#[cfg(feature = "monitoring")]
|
||||
prometheus: OnceLock::new(),
|
||||
prometheus: OnceCell::new(),
|
||||
#[cfg(feature = "monitoring")]
|
||||
loki: OnceLock::new(),
|
||||
loki: OnceCell::new(),
|
||||
#[cfg(feature = "monitoring")]
|
||||
grafana: OnceLock::new(),
|
||||
grafana: OnceCell::new(),
|
||||
#[cfg(feature = "lasuite")]
|
||||
people: OnceLock::new(),
|
||||
people: OnceCell::new(),
|
||||
#[cfg(feature = "lasuite")]
|
||||
docs: OnceLock::new(),
|
||||
docs: OnceCell::new(),
|
||||
#[cfg(feature = "lasuite")]
|
||||
meet: OnceLock::new(),
|
||||
meet: OnceCell::new(),
|
||||
#[cfg(feature = "lasuite")]
|
||||
drive: OnceLock::new(),
|
||||
drive: OnceCell::new(),
|
||||
#[cfg(feature = "lasuite")]
|
||||
messages: OnceLock::new(),
|
||||
messages: OnceCell::new(),
|
||||
#[cfg(feature = "lasuite")]
|
||||
calendars: OnceLock::new(),
|
||||
calendars: OnceCell::new(),
|
||||
#[cfg(feature = "lasuite")]
|
||||
find: OnceLock::new(),
|
||||
bao: OnceLock::new(),
|
||||
find: OnceCell::new(),
|
||||
bao: OnceCell::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -323,131 +331,227 @@ impl SunbeamClient {
|
||||
&self.ctx
|
||||
}
|
||||
|
||||
// -- Lazy accessors (each feature-gated) --------------------------------
|
||||
// -- Auth helpers --------------------------------------------------------
|
||||
|
||||
/// Get cached SSO bearer token (from `sunbeam auth sso`).
|
||||
async fn sso_token(&self) -> Result<String> {
|
||||
crate::auth::get_token().await
|
||||
}
|
||||
|
||||
/// Get cached Gitea PAT (from `sunbeam auth git`).
|
||||
fn gitea_token(&self) -> Result<String> {
|
||||
crate::auth::get_gitea_token()
|
||||
}
|
||||
|
||||
/// Get cached OIDC id_token (JWT with claims including admin flag).
|
||||
fn id_token(&self) -> Result<String> {
|
||||
crate::auth::get_id_token()
|
||||
}
|
||||
|
||||
// -- Lazy async accessors (each feature-gated) ---------------------------
|
||||
//
|
||||
// Each accessor resolves the appropriate auth and constructs the client
|
||||
// with from_parts(url, auth). Cached after first call.
|
||||
|
||||
#[cfg(feature = "identity")]
|
||||
pub fn kratos(&self) -> &crate::identity::KratosClient {
|
||||
self.kratos.get_or_init(|| {
|
||||
crate::identity::KratosClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn kratos(&self) -> Result<&crate::identity::KratosClient> {
|
||||
self.kratos.get_or_try_init(|| async {
|
||||
let token = self.sso_token().await?;
|
||||
let url = format!("https://id.{}", self.domain);
|
||||
Ok(crate::identity::KratosClient::from_parts(url, AuthMethod::Bearer(token)))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "identity")]
|
||||
pub fn hydra(&self) -> &crate::auth::hydra::HydraClient {
|
||||
self.hydra.get_or_init(|| {
|
||||
crate::auth::hydra::HydraClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn hydra(&self) -> Result<&crate::auth::hydra::HydraClient> {
|
||||
self.hydra.get_or_try_init(|| async {
|
||||
let token = self.sso_token().await?;
|
||||
let url = format!("https://hydra.{}", self.domain);
|
||||
Ok(crate::auth::hydra::HydraClient::from_parts(url, AuthMethod::Bearer(token)))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "gitea")]
|
||||
pub fn gitea(&self) -> &crate::gitea::GiteaClient {
|
||||
self.gitea.get_or_init(|| {
|
||||
crate::gitea::GiteaClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn gitea(&self) -> Result<&crate::gitea::GiteaClient> {
|
||||
self.gitea.get_or_try_init(|| async {
|
||||
let token = self.gitea_token()?;
|
||||
let url = format!("https://src.{}/api/v1", self.domain);
|
||||
Ok(crate::gitea::GiteaClient::from_parts(url, AuthMethod::Token(token)))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "matrix")]
|
||||
pub fn matrix(&self) -> &crate::matrix::MatrixClient {
|
||||
self.matrix.get_or_init(|| {
|
||||
crate::matrix::MatrixClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn matrix(&self) -> Result<&crate::matrix::MatrixClient> {
|
||||
self.matrix.get_or_try_init(|| async {
|
||||
let token = self.sso_token().await?;
|
||||
let url = format!("https://messages.{}/_matrix", self.domain);
|
||||
Ok(crate::matrix::MatrixClient::from_parts(url, AuthMethod::Bearer(token)))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "opensearch")]
|
||||
pub fn opensearch(&self) -> &crate::search::OpenSearchClient {
|
||||
self.opensearch.get_or_init(|| {
|
||||
crate::search::OpenSearchClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn opensearch(&self) -> Result<&crate::search::OpenSearchClient> {
|
||||
self.opensearch.get_or_try_init(|| async {
|
||||
let token = self.sso_token().await?;
|
||||
let url = format!("https://search.{}", self.domain);
|
||||
Ok(crate::search::OpenSearchClient::from_parts(url, AuthMethod::Bearer(token)))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "s3")]
|
||||
pub fn s3(&self) -> &crate::storage::S3Client {
|
||||
self.s3.get_or_init(|| {
|
||||
crate::storage::S3Client::connect(&self.domain)
|
||||
})
|
||||
pub async fn s3(&self) -> Result<&crate::storage::S3Client> {
|
||||
self.s3.get_or_try_init(|| async {
|
||||
Ok(crate::storage::S3Client::connect(&self.domain))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "livekit")]
|
||||
pub fn livekit(&self) -> &crate::media::LiveKitClient {
|
||||
self.livekit.get_or_init(|| {
|
||||
crate::media::LiveKitClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn livekit(&self) -> Result<&crate::media::LiveKitClient> {
|
||||
self.livekit.get_or_try_init(|| async {
|
||||
Ok(crate::media::LiveKitClient::connect(&self.domain))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "monitoring")]
|
||||
pub fn prometheus(&self) -> &crate::monitoring::PrometheusClient {
|
||||
self.prometheus.get_or_init(|| {
|
||||
crate::monitoring::PrometheusClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn prometheus(&self) -> Result<&crate::monitoring::PrometheusClient> {
|
||||
self.prometheus.get_or_try_init(|| async {
|
||||
Ok(crate::monitoring::PrometheusClient::connect(&self.domain))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "monitoring")]
|
||||
pub fn loki(&self) -> &crate::monitoring::LokiClient {
|
||||
self.loki.get_or_init(|| {
|
||||
crate::monitoring::LokiClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn loki(&self) -> Result<&crate::monitoring::LokiClient> {
|
||||
self.loki.get_or_try_init(|| async {
|
||||
Ok(crate::monitoring::LokiClient::connect(&self.domain))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "monitoring")]
|
||||
pub fn grafana(&self) -> &crate::monitoring::GrafanaClient {
|
||||
self.grafana.get_or_init(|| {
|
||||
crate::monitoring::GrafanaClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn grafana(&self) -> Result<&crate::monitoring::GrafanaClient> {
|
||||
self.grafana.get_or_try_init(|| async {
|
||||
Ok(crate::monitoring::GrafanaClient::connect(&self.domain))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "lasuite")]
|
||||
pub fn people(&self) -> &crate::lasuite::PeopleClient {
|
||||
self.people.get_or_init(|| {
|
||||
crate::lasuite::PeopleClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn people(&self) -> Result<&crate::lasuite::PeopleClient> {
|
||||
// Ensure we have a valid token (triggers refresh if expired).
|
||||
self.sso_token().await?;
|
||||
self.people.get_or_try_init(|| async {
|
||||
let url = format!("https://people.{}/external_api/v1.0", self.domain);
|
||||
Ok(crate::lasuite::PeopleClient::from_parts(url, AuthMethod::DynamicBearer))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "lasuite")]
|
||||
pub fn docs(&self) -> &crate::lasuite::DocsClient {
|
||||
self.docs.get_or_init(|| {
|
||||
crate::lasuite::DocsClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn docs(&self) -> Result<&crate::lasuite::DocsClient> {
|
||||
self.sso_token().await?;
|
||||
self.docs.get_or_try_init(|| async {
|
||||
let url = format!("https://docs.{}/external_api/v1.0", self.domain);
|
||||
Ok(crate::lasuite::DocsClient::from_parts(url, AuthMethod::DynamicBearer))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "lasuite")]
|
||||
pub fn meet(&self) -> &crate::lasuite::MeetClient {
|
||||
self.meet.get_or_init(|| {
|
||||
crate::lasuite::MeetClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn meet(&self) -> Result<&crate::lasuite::MeetClient> {
|
||||
self.sso_token().await?;
|
||||
self.meet.get_or_try_init(|| async {
|
||||
let url = format!("https://meet.{}/external_api/v1.0", self.domain);
|
||||
Ok(crate::lasuite::MeetClient::from_parts(url, AuthMethod::DynamicBearer))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "lasuite")]
|
||||
pub fn drive(&self) -> &crate::lasuite::DriveClient {
|
||||
self.drive.get_or_init(|| {
|
||||
crate::lasuite::DriveClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn drive(&self) -> Result<&crate::lasuite::DriveClient> {
|
||||
self.sso_token().await?;
|
||||
self.drive.get_or_try_init(|| async {
|
||||
let url = format!("https://drive.{}/external_api/v1.0", self.domain);
|
||||
Ok(crate::lasuite::DriveClient::from_parts(url, AuthMethod::DynamicBearer))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "lasuite")]
|
||||
pub fn messages(&self) -> &crate::lasuite::MessagesClient {
|
||||
self.messages.get_or_init(|| {
|
||||
crate::lasuite::MessagesClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn messages(&self) -> Result<&crate::lasuite::MessagesClient> {
|
||||
self.sso_token().await?;
|
||||
self.messages.get_or_try_init(|| async {
|
||||
let url = format!("https://mail.{}/external_api/v1.0", self.domain);
|
||||
Ok(crate::lasuite::MessagesClient::from_parts(url, AuthMethod::DynamicBearer))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "lasuite")]
|
||||
pub fn calendars(&self) -> &crate::lasuite::CalendarsClient {
|
||||
self.calendars.get_or_init(|| {
|
||||
crate::lasuite::CalendarsClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn calendars(&self) -> Result<&crate::lasuite::CalendarsClient> {
|
||||
self.sso_token().await?;
|
||||
self.calendars.get_or_try_init(|| async {
|
||||
let url = format!("https://calendar.{}/external_api/v1.0", self.domain);
|
||||
Ok(crate::lasuite::CalendarsClient::from_parts(url, AuthMethod::DynamicBearer))
|
||||
}).await
|
||||
}
|
||||
|
||||
#[cfg(feature = "lasuite")]
|
||||
pub fn find(&self) -> &crate::lasuite::FindClient {
|
||||
self.find.get_or_init(|| {
|
||||
crate::lasuite::FindClient::connect(&self.domain)
|
||||
})
|
||||
pub async fn find(&self) -> Result<&crate::lasuite::FindClient> {
|
||||
self.sso_token().await?;
|
||||
self.find.get_or_try_init(|| async {
|
||||
let url = format!("https://find.{}/external_api/v1.0", self.domain);
|
||||
Ok(crate::lasuite::FindClient::from_parts(url, AuthMethod::DynamicBearer))
|
||||
}).await
|
||||
}
|
||||
|
||||
pub fn bao(&self, base_url: &str) -> &crate::openbao::BaoClient {
|
||||
self.bao.get_or_init(|| {
|
||||
crate::openbao::BaoClient::new(base_url)
|
||||
})
|
||||
pub async fn bao(&self) -> Result<&crate::openbao::BaoClient> {
|
||||
self.bao.get_or_try_init(|| async {
|
||||
let url = format!("https://vault.{}", self.domain);
|
||||
let id_token = self.id_token()?;
|
||||
let bearer = self.sso_token().await?;
|
||||
|
||||
// Authenticate to OpenBao via JWT auth method using the OIDC id_token.
|
||||
// Try admin role first (for users with admin: true), fall back to reader.
|
||||
let http = reqwest::Client::new();
|
||||
let vault_token = {
|
||||
let mut token = None;
|
||||
for role in &["cli-admin", "cli-reader"] {
|
||||
let resp = http
|
||||
.post(format!("{url}/v1/auth/jwt/login"))
|
||||
.bearer_auth(&bearer)
|
||||
.json(&serde_json::json!({ "jwt": id_token, "role": role }))
|
||||
.send()
|
||||
.await;
|
||||
match resp {
|
||||
Ok(r) => {
|
||||
let status = r.status();
|
||||
if status.is_success() {
|
||||
if let Ok(body) = r.json::<serde_json::Value>().await {
|
||||
if let Some(t) = body["auth"]["client_token"].as_str() {
|
||||
tracing::debug!("vault JWT login ok (role={role})");
|
||||
token = Some(t.to_string());
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let body = r.text().await.unwrap_or_default();
|
||||
tracing::debug!("vault JWT login {status} (role={role}): {body}");
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::debug!("vault JWT login request failed (role={role}): {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
match token {
|
||||
Some(t) => t,
|
||||
None => {
|
||||
tracing::debug!("vault JWT auth failed, falling back to local keystore");
|
||||
match crate::vault_keystore::load_keystore(&self.domain) {
|
||||
Ok(ks) => ks.root_token,
|
||||
Err(_) => return Err(SunbeamError::secrets(
|
||||
"Vault auth failed: no valid JWT role and no local keystore. Run `sunbeam auth sso` and retry."
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(crate::openbao::BaoClient::with_proxy_auth(&url, &vault_token, &bearer))
|
||||
}).await
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
use clap::Subcommand;
|
||||
|
||||
use crate::client::SunbeamClient;
|
||||
use crate::error::{Result, SunbeamError};
|
||||
use crate::gitea::types::*;
|
||||
use crate::gitea::GiteaClient;
|
||||
use crate::output::{render, render_list, read_json_input, OutputFormat};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -435,7 +435,8 @@ fn notification_row(n: &Notification) -> Vec<String> {
|
||||
// Dispatch
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
pub async fn dispatch(cmd: VcsCommand, client: &GiteaClient, fmt: OutputFormat) -> Result<()> {
|
||||
pub async fn dispatch(cmd: VcsCommand, client: &SunbeamClient, fmt: OutputFormat) -> Result<()> {
|
||||
let client = client.gitea().await?;
|
||||
match cmd {
|
||||
// -- Repo -----------------------------------------------------------
|
||||
VcsCommand::Repo { action } => match action {
|
||||
|
||||
@@ -349,7 +349,7 @@ pub async fn dispatch(
|
||||
AuthCommand::Courier { action } => dispatch_courier(action, client, output).await,
|
||||
// -- Kratos: Health -----------------------------------------------------
|
||||
AuthCommand::Health => {
|
||||
let status = client.kratos().alive().await?;
|
||||
let status = client.kratos().await?.alive().await?;
|
||||
output::render(&status, output)
|
||||
}
|
||||
// -- Hydra: Client ------------------------------------------------------
|
||||
@@ -384,7 +384,7 @@ async fn dispatch_identity(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let kratos = client.kratos();
|
||||
let kratos = client.kratos().await?;
|
||||
match action {
|
||||
IdentityAction::List { page, page_size } => {
|
||||
let items = kratos.list_identities(page, page_size).await?;
|
||||
@@ -437,7 +437,7 @@ async fn dispatch_session(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let kratos = client.kratos();
|
||||
let kratos = client.kratos().await?;
|
||||
match action {
|
||||
SessionAction::List {
|
||||
page_size,
|
||||
@@ -486,7 +486,7 @@ async fn dispatch_recovery(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let kratos = client.kratos();
|
||||
let kratos = client.kratos().await?;
|
||||
match action {
|
||||
RecoveryAction::CreateCode { id, expires_in } => {
|
||||
let item = kratos
|
||||
@@ -512,7 +512,7 @@ async fn dispatch_schema(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let kratos = client.kratos();
|
||||
let kratos = client.kratos().await?;
|
||||
match action {
|
||||
SchemaAction::List => {
|
||||
let items = kratos.list_schemas().await?;
|
||||
@@ -539,7 +539,7 @@ async fn dispatch_courier(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let kratos = client.kratos();
|
||||
let kratos = client.kratos().await?;
|
||||
match action {
|
||||
CourierAction::List {
|
||||
page_size,
|
||||
@@ -579,7 +579,7 @@ async fn dispatch_client(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let hydra = client.hydra();
|
||||
let hydra = client.hydra().await?;
|
||||
match action {
|
||||
ClientAction::List { limit, offset } => {
|
||||
let items = hydra.list_clients(limit, offset).await?;
|
||||
@@ -631,7 +631,7 @@ async fn dispatch_jwk(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let hydra = client.hydra();
|
||||
let hydra = client.hydra().await?;
|
||||
match action {
|
||||
JwkAction::List { set_name } => {
|
||||
let item = hydra.get_jwk_set(&set_name).await?;
|
||||
@@ -665,7 +665,7 @@ async fn dispatch_issuer(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let hydra = client.hydra();
|
||||
let hydra = client.hydra().await?;
|
||||
match action {
|
||||
IssuerAction::List => {
|
||||
let items = hydra.list_trusted_issuers().await?;
|
||||
@@ -711,7 +711,7 @@ async fn dispatch_token(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let hydra = client.hydra();
|
||||
let hydra = client.hydra().await?;
|
||||
match action {
|
||||
TokenAction::Introspect { token } => {
|
||||
let item = hydra.introspect_token(&token).await?;
|
||||
|
||||
@@ -296,6 +296,9 @@ pub async fn create_secret(ns: &str, name: &str, data: HashMap<String, String>)
|
||||
"metadata": {
|
||||
"name": name,
|
||||
"namespace": ns,
|
||||
"labels": {
|
||||
"sunbeam.dev/managed-by": "sunbeam"
|
||||
},
|
||||
},
|
||||
"type": "Opaque",
|
||||
"data": encoded,
|
||||
@@ -308,6 +311,25 @@ pub async fn create_secret(ns: &str, name: &str, data: HashMap<String, String>)
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Find the first Running pod matching a label selector in a namespace.
|
||||
pub async fn find_pod_by_label(ns: &str, label: &str) -> Option<String> {
|
||||
let client = get_client().await.ok()?;
|
||||
let pods: kube::Api<k8s_openapi::api::core::v1::Pod> =
|
||||
kube::Api::namespaced(client.clone(), ns);
|
||||
let lp = kube::api::ListParams::default().labels(label);
|
||||
let pod_list = pods.list(&lp).await.ok()?;
|
||||
pod_list
|
||||
.items
|
||||
.iter()
|
||||
.find(|p| {
|
||||
p.status
|
||||
.as_ref()
|
||||
.and_then(|s| s.phase.as_deref())
|
||||
== Some("Running")
|
||||
})
|
||||
.and_then(|p| p.metadata.name.clone())
|
||||
}
|
||||
|
||||
/// Execute a command in a pod and return (exit_code, stdout).
|
||||
#[allow(dead_code)]
|
||||
pub async fn kube_exec(
|
||||
|
||||
@@ -6,44 +6,6 @@ use crate::client::SunbeamClient;
|
||||
use crate::error::Result;
|
||||
use crate::output::{self, OutputFormat};
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// Helper: build an authenticated La Suite client
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
async fn people_client(domain: &str) -> Result<super::PeopleClient> {
|
||||
let token = crate::auth::get_token().await?;
|
||||
Ok(super::PeopleClient::connect(domain).with_token(&token))
|
||||
}
|
||||
|
||||
async fn docs_client(domain: &str) -> Result<super::DocsClient> {
|
||||
let token = crate::auth::get_token().await?;
|
||||
Ok(super::DocsClient::connect(domain).with_token(&token))
|
||||
}
|
||||
|
||||
async fn meet_client(domain: &str) -> Result<super::MeetClient> {
|
||||
let token = crate::auth::get_token().await?;
|
||||
Ok(super::MeetClient::connect(domain).with_token(&token))
|
||||
}
|
||||
|
||||
async fn drive_client(domain: &str) -> Result<super::DriveClient> {
|
||||
let token = crate::auth::get_token().await?;
|
||||
Ok(super::DriveClient::connect(domain).with_token(&token))
|
||||
}
|
||||
|
||||
async fn messages_client(domain: &str) -> Result<super::MessagesClient> {
|
||||
let token = crate::auth::get_token().await?;
|
||||
Ok(super::MessagesClient::connect(domain).with_token(&token))
|
||||
}
|
||||
|
||||
async fn calendars_client(domain: &str) -> Result<super::CalendarsClient> {
|
||||
let token = crate::auth::get_token().await?;
|
||||
Ok(super::CalendarsClient::connect(domain).with_token(&token))
|
||||
}
|
||||
|
||||
async fn find_client(domain: &str) -> Result<super::FindClient> {
|
||||
let token = crate::auth::get_token().await?;
|
||||
Ok(super::FindClient::connect(domain).with_token(&token))
|
||||
}
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// People
|
||||
@@ -143,7 +105,7 @@ pub async fn dispatch_people(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let people = people_client(client.domain()).await?;
|
||||
let people = client.people().await?;
|
||||
match cmd {
|
||||
PeopleCommand::Contact { action } => match action {
|
||||
ContactAction::List { page } => {
|
||||
@@ -346,7 +308,7 @@ pub async fn dispatch_docs(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let docs = docs_client(client.domain()).await?;
|
||||
let docs = client.docs().await?;
|
||||
match cmd {
|
||||
DocsCommand::Document { action } => match action {
|
||||
DocumentAction::List { page } => {
|
||||
@@ -498,7 +460,7 @@ pub async fn dispatch_meet(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let meet = meet_client(client.domain()).await?;
|
||||
let meet = client.meet().await?;
|
||||
match cmd {
|
||||
MeetCommand::Room { action } => match action {
|
||||
RoomAction::List { page } => {
|
||||
@@ -588,6 +550,18 @@ pub enum DriveCommand {
|
||||
#[command(subcommand)]
|
||||
action: PermissionAction,
|
||||
},
|
||||
/// Upload a local file or directory to a Drive folder.
|
||||
Upload {
|
||||
/// Local path to upload (file or directory).
|
||||
#[arg(short, long)]
|
||||
path: String,
|
||||
/// Target Drive folder ID.
|
||||
#[arg(short = 't', long)]
|
||||
folder_id: String,
|
||||
/// Number of concurrent uploads.
|
||||
#[arg(long, default_value = "3")]
|
||||
parallel: usize,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
@@ -645,20 +619,21 @@ pub async fn dispatch_drive(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let drive = drive_client(client.domain()).await?;
|
||||
let drive = client.drive().await?;
|
||||
match cmd {
|
||||
DriveCommand::File { action } => match action {
|
||||
FileAction::List { page } => {
|
||||
let page_data = drive.list_files(page).await?;
|
||||
output::render_list(
|
||||
&page_data.results,
|
||||
&["ID", "NAME", "SIZE", "MIME_TYPE"],
|
||||
&["ID", "TITLE", "TYPE", "SIZE", "MIMETYPE"],
|
||||
|f| {
|
||||
vec![
|
||||
f.id.clone(),
|
||||
f.name.clone().unwrap_or_default(),
|
||||
f.title.clone().unwrap_or_default(),
|
||||
f.item_type.clone().unwrap_or_default(),
|
||||
f.size.map_or("-".into(), |s| s.to_string()),
|
||||
f.mime_type.clone().unwrap_or_default(),
|
||||
f.mimetype.clone().unwrap_or_default(),
|
||||
]
|
||||
},
|
||||
fmt,
|
||||
@@ -684,12 +659,13 @@ pub async fn dispatch_drive(
|
||||
let page_data = drive.list_folders(page).await?;
|
||||
output::render_list(
|
||||
&page_data.results,
|
||||
&["ID", "NAME", "PARENT_ID"],
|
||||
&["ID", "TITLE", "CHILDREN", "CREATED"],
|
||||
|f| {
|
||||
vec![
|
||||
f.id.clone(),
|
||||
f.name.clone().unwrap_or_default(),
|
||||
f.parent_id.clone().unwrap_or_default(),
|
||||
f.title.clone().unwrap_or_default(),
|
||||
f.numchild.map_or("-".into(), |n| n.to_string()),
|
||||
f.created_at.clone().unwrap_or_default(),
|
||||
]
|
||||
},
|
||||
fmt,
|
||||
@@ -725,9 +701,398 @@ pub async fn dispatch_drive(
|
||||
)
|
||||
}
|
||||
},
|
||||
DriveCommand::Upload { path, folder_id, parallel } => {
|
||||
upload_recursive(drive, &path, &folder_id, parallel).await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A file that needs uploading, collected during the directory-walk phase.
|
||||
struct UploadJob {
|
||||
local_path: std::path::PathBuf,
|
||||
parent_id: String,
|
||||
file_size: u64,
|
||||
relative_path: String,
|
||||
}
|
||||
|
||||
/// Recursively upload a local file or directory to a Drive folder.
|
||||
async fn upload_recursive(
|
||||
drive: &super::DriveClient,
|
||||
local_path: &str,
|
||||
parent_id: &str,
|
||||
parallel: usize,
|
||||
) -> Result<()> {
|
||||
use indicatif::{HumanBytes, MultiProgress, ProgressBar, ProgressStyle};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Semaphore;
|
||||
|
||||
let path = std::path::Path::new(local_path);
|
||||
if !path.exists() {
|
||||
return Err(crate::error::SunbeamError::Other(format!(
|
||||
"Path does not exist: {local_path}"
|
||||
)));
|
||||
}
|
||||
|
||||
// Phase 1 — Walk and collect: create folders sequentially, gather file jobs.
|
||||
let mut jobs = Vec::new();
|
||||
if path.is_file() {
|
||||
let file_size = std::fs::metadata(path)
|
||||
.map_err(|e| crate::error::SunbeamError::Other(format!("stat: {e}")))?
|
||||
.len();
|
||||
let filename = path
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.unwrap_or("unnamed");
|
||||
if !filename.starts_with('.') {
|
||||
jobs.push(UploadJob {
|
||||
local_path: path.to_path_buf(),
|
||||
parent_id: parent_id.to_string(),
|
||||
file_size,
|
||||
relative_path: filename.to_string(),
|
||||
});
|
||||
}
|
||||
} else if path.is_dir() {
|
||||
collect_upload_jobs(drive, path, parent_id, "", &mut jobs).await?;
|
||||
} else {
|
||||
return Err(crate::error::SunbeamError::Other(format!(
|
||||
"Not a file or directory: {local_path}"
|
||||
)));
|
||||
}
|
||||
|
||||
if jobs.is_empty() {
|
||||
output::ok("Nothing to upload.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let total_files = jobs.len() as u64;
|
||||
let total_bytes: u64 = jobs.iter().map(|j| j.file_size).sum();
|
||||
|
||||
// Clear the folder creation line
|
||||
eprint!("\r\x1b[K");
|
||||
|
||||
// Phase 2 — Parallel upload with progress bars.
|
||||
let multi = MultiProgress::new();
|
||||
|
||||
// Overall bar tracks file count. Bandwidth is computed manually in the message.
|
||||
let overall_style = ProgressStyle::with_template(
|
||||
" {spinner:.green} [{elapsed_precise}] {bar:40.cyan/blue} {pos}/{len} files {msg}",
|
||||
)
|
||||
.unwrap()
|
||||
.progress_chars("█▓░");
|
||||
let overall = multi.add(ProgressBar::new(total_files));
|
||||
overall.set_style(overall_style);
|
||||
overall.enable_steady_tick(std::time::Duration::from_millis(100));
|
||||
let completed_bytes = std::sync::Arc::new(std::sync::atomic::AtomicU64::new(0));
|
||||
|
||||
let file_style = ProgressStyle::with_template(
|
||||
" {spinner:.cyan} {wide_msg}",
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let sem = Arc::new(Semaphore::new(parallel));
|
||||
let drive = Arc::new(drive.clone());
|
||||
let mut handles = Vec::new();
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
for job in jobs {
|
||||
let permit = sem.clone().acquire_owned().await.unwrap();
|
||||
let drive = Arc::clone(&drive);
|
||||
let multi = multi.clone();
|
||||
let overall = overall.clone();
|
||||
let file_style = file_style.clone();
|
||||
let job_size = job.file_size;
|
||||
let completed_bytes = Arc::clone(&completed_bytes);
|
||||
let total_bytes = total_bytes;
|
||||
let start = start.clone();
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
let pb = multi.add(ProgressBar::new_spinner());
|
||||
pb.set_style(file_style);
|
||||
pb.set_message(job.relative_path.clone());
|
||||
pb.enable_steady_tick(std::time::Duration::from_millis(80));
|
||||
|
||||
let result = upload_single_file_with_progress(&drive, &job, &pb).await;
|
||||
|
||||
pb.finish_and_clear();
|
||||
multi.remove(&pb);
|
||||
|
||||
// Update overall — increment file count, compute bandwidth from bytes
|
||||
overall.inc(1);
|
||||
let done_bytes = completed_bytes.fetch_add(job_size, std::sync::atomic::Ordering::Relaxed) + job_size;
|
||||
let elapsed = start.elapsed().as_secs_f64();
|
||||
let speed = if elapsed > 1.0 { done_bytes as f64 / elapsed } else { 0.0 };
|
||||
let remaining = total_bytes.saturating_sub(done_bytes);
|
||||
let eta = if speed > 0.0 { remaining as f64 / speed } else { 0.0 };
|
||||
let eta_m = eta as u64 / 60;
|
||||
let eta_s = eta as u64 % 60;
|
||||
overall.set_message(format!(
|
||||
"{}/{} {}/s ETA: {}m {:02}s",
|
||||
indicatif::HumanBytes(done_bytes),
|
||||
indicatif::HumanBytes(total_bytes),
|
||||
indicatif::HumanBytes(speed as u64),
|
||||
eta_m, eta_s,
|
||||
));
|
||||
|
||||
drop(permit);
|
||||
result
|
||||
});
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
let mut errors = 0u64;
|
||||
for handle in handles {
|
||||
match handle.await {
|
||||
Ok(Ok(())) => {}
|
||||
Ok(Err(e)) => {
|
||||
errors += 1;
|
||||
multi.suspend(|| eprintln!(" ERROR: {e}"));
|
||||
}
|
||||
Err(e) => {
|
||||
errors += 1;
|
||||
multi.suspend(|| eprintln!(" ERROR: task panic: {e}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
overall.finish_and_clear();
|
||||
multi.clear().ok();
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
let secs = elapsed.as_secs_f64();
|
||||
let speed = if secs > 0.0 {
|
||||
total_bytes as f64 / secs
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
let mins = elapsed.as_secs() / 60;
|
||||
let secs_rem = elapsed.as_secs() % 60;
|
||||
let uploaded = total_files - errors;
|
||||
if errors > 0 {
|
||||
println!(
|
||||
"✓ Uploaded {uploaded}/{total_files} files ({}) in {mins}m {secs_rem}s ({}/s) — {errors} failed",
|
||||
HumanBytes(total_bytes),
|
||||
HumanBytes(speed as u64),
|
||||
);
|
||||
} else {
|
||||
println!(
|
||||
"✓ Uploaded {total_files} files ({}) in {mins}m {secs_rem}s ({}/s)",
|
||||
HumanBytes(total_bytes),
|
||||
HumanBytes(speed as u64),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Phase 1: Walk a directory recursively, create folders in Drive sequentially,
|
||||
/// and collect [`UploadJob`]s for every regular file.
|
||||
async fn collect_upload_jobs(
|
||||
drive: &super::DriveClient,
|
||||
dir: &std::path::Path,
|
||||
parent_id: &str,
|
||||
prefix: &str,
|
||||
jobs: &mut Vec<UploadJob>,
|
||||
) -> Result<()> {
|
||||
let dir_name = dir
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.unwrap_or("unnamed");
|
||||
|
||||
// Skip hidden directories
|
||||
if dir_name.starts_with('.') {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Build the display prefix for children
|
||||
let display_prefix = if prefix.is_empty() {
|
||||
dir_name.to_string()
|
||||
} else {
|
||||
format!("{prefix}/{dir_name}")
|
||||
};
|
||||
|
||||
eprint!("\r\x1b[K Scanning: {display_prefix} ");
|
||||
|
||||
// Check if folder already exists under the parent.
|
||||
let existing = drive.list_children(parent_id, None).await.ok();
|
||||
let existing_folder_id = existing.and_then(|page| {
|
||||
page.results.iter().find_map(|item| {
|
||||
let is_folder = item.get("type").and_then(|v| v.as_str()) == Some("folder");
|
||||
let title_matches = item.get("title").and_then(|v| v.as_str()) == Some(dir_name);
|
||||
if is_folder && title_matches {
|
||||
item.get("id").and_then(|v| v.as_str()).map(String::from)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
let folder_id = if let Some(id) = existing_folder_id {
|
||||
id
|
||||
} else {
|
||||
let folder = drive
|
||||
.create_child(
|
||||
parent_id,
|
||||
&serde_json::json!({
|
||||
"title": dir_name,
|
||||
"type": "folder",
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
folder["id"]
|
||||
.as_str()
|
||||
.ok_or_else(|| crate::error::SunbeamError::Other("No folder ID in response".into()))?
|
||||
.to_string()
|
||||
};
|
||||
|
||||
// Build a set of existing file titles in this folder to skip duplicates.
|
||||
let existing_file_titles: std::collections::HashSet<String> = {
|
||||
let mut titles = std::collections::HashSet::new();
|
||||
if let Ok(page) = drive.list_children(&folder_id, None).await {
|
||||
for item in &page.results {
|
||||
if item.get("type").and_then(|v| v.as_str()) == Some("file") {
|
||||
if let Some(title) = item.get("title").and_then(|v| v.as_str()) {
|
||||
titles.insert(title.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
titles
|
||||
};
|
||||
|
||||
let mut entries: Vec<_> = std::fs::read_dir(dir)
|
||||
.map_err(|e| crate::error::SunbeamError::Other(format!("reading dir: {e}")))?
|
||||
.filter_map(|e| e.ok())
|
||||
.collect();
|
||||
entries.sort_by_key(|e| e.file_name());
|
||||
|
||||
for entry in entries {
|
||||
let entry_path = entry.path();
|
||||
let name = entry
|
||||
.file_name()
|
||||
.to_str()
|
||||
.unwrap_or_default()
|
||||
.to_string();
|
||||
|
||||
// Skip hidden entries
|
||||
if name.starts_with('.') {
|
||||
continue;
|
||||
}
|
||||
|
||||
if entry_path.is_dir() {
|
||||
Box::pin(collect_upload_jobs(
|
||||
drive,
|
||||
&entry_path,
|
||||
&folder_id,
|
||||
&display_prefix,
|
||||
jobs,
|
||||
))
|
||||
.await?;
|
||||
} else if entry_path.is_file() {
|
||||
// Skip if a file with this title already exists in the folder.
|
||||
if existing_file_titles.contains(&name) {
|
||||
continue;
|
||||
}
|
||||
let file_size = std::fs::metadata(&entry_path)
|
||||
.map_err(|e| crate::error::SunbeamError::Other(format!("stat: {e}")))?
|
||||
.len();
|
||||
jobs.push(UploadJob {
|
||||
local_path: entry_path,
|
||||
parent_id: folder_id.clone(),
|
||||
file_size,
|
||||
relative_path: format!("{display_prefix}/{name}"),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Upload a single file to Drive, updating the progress bar.
|
||||
/// Retries on 429/500/502/503 up to 5 times with exponential backoff.
|
||||
async fn upload_single_file_with_progress(
|
||||
drive: &super::DriveClient,
|
||||
job: &UploadJob,
|
||||
pb: &indicatif::ProgressBar,
|
||||
) -> Result<()> {
|
||||
let filename = job
|
||||
.local_path
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.unwrap_or("unnamed");
|
||||
|
||||
// Create the file item in Drive (with retry)
|
||||
let body = serde_json::json!({
|
||||
"title": filename,
|
||||
"filename": filename,
|
||||
"type": "file",
|
||||
});
|
||||
let item = retry_drive_call(|| drive.create_child(&job.parent_id, &body), 5).await?;
|
||||
|
||||
let item_id = item["id"]
|
||||
.as_str()
|
||||
.ok_or_else(|| crate::error::SunbeamError::Other("No item ID in response".into()))?;
|
||||
|
||||
let upload_url = item["policy"]
|
||||
.as_str()
|
||||
.ok_or_else(|| {
|
||||
crate::error::SunbeamError::Other(
|
||||
"No upload policy URL in response \u{2014} is the item a file?".into(),
|
||||
)
|
||||
})?;
|
||||
|
||||
tracing::debug!("S3 presigned URL: {upload_url}");
|
||||
|
||||
// Read the file and upload to S3
|
||||
let data = std::fs::read(&job.local_path)
|
||||
.map_err(|e| crate::error::SunbeamError::Other(format!("reading file: {e}")))?;
|
||||
let len = data.len() as u64;
|
||||
drive
|
||||
.upload_to_s3(upload_url, bytes::Bytes::from(data))
|
||||
.await?;
|
||||
pb.set_position(len);
|
||||
|
||||
// Notify Drive the upload is complete (with retry)
|
||||
retry_drive_call(|| drive.upload_ended(item_id), 5).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Retry a Drive API call on 429/500/502/503 with exponential backoff.
|
||||
async fn retry_drive_call<F, Fut, T>(f: F, max_retries: u32) -> Result<T>
|
||||
where
|
||||
F: Fn() -> Fut,
|
||||
Fut: std::future::Future<Output = Result<T>>,
|
||||
{
|
||||
let mut last_err = None;
|
||||
for attempt in 0..=max_retries {
|
||||
match f().await {
|
||||
Ok(v) => return Ok(v),
|
||||
Err(e) => {
|
||||
let msg = e.to_string();
|
||||
let retryable = msg.contains("429")
|
||||
|| msg.contains("500")
|
||||
|| msg.contains("502")
|
||||
|| msg.contains("503")
|
||||
|| msg.contains("request failed");
|
||||
if retryable && attempt < max_retries {
|
||||
// On 500, try refreshing the SSO token (may have expired)
|
||||
if msg.contains("500") {
|
||||
let _ = crate::auth::get_token().await;
|
||||
}
|
||||
let delay = std::time::Duration::from_millis(
|
||||
500 * 2u64.pow(attempt.min(4)),
|
||||
);
|
||||
tokio::time::sleep(delay).await;
|
||||
last_err = Some(e);
|
||||
continue;
|
||||
}
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(last_err.unwrap())
|
||||
}
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// Mail (Messages)
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
@@ -823,7 +1188,7 @@ pub async fn dispatch_mail(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let mail = messages_client(client.domain()).await?;
|
||||
let mail = client.messages().await?;
|
||||
match cmd {
|
||||
MailCommand::Mailbox { action } => match action {
|
||||
MailboxAction::List => {
|
||||
@@ -1013,7 +1378,7 @@ pub async fn dispatch_cal(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let cal = calendars_client(client.domain()).await?;
|
||||
let cal = client.calendars().await?;
|
||||
match cmd {
|
||||
CalCommand::Calendar { action } => match action {
|
||||
CalendarAction::List => {
|
||||
@@ -1124,7 +1489,7 @@ pub async fn dispatch_find(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let find = find_client(client.domain()).await?;
|
||||
let find = client.find().await?;
|
||||
match cmd {
|
||||
FindCommand::Search { query, page } => {
|
||||
let page_data = find.search(&query, page).await?;
|
||||
|
||||
@@ -6,6 +6,7 @@ use reqwest::Method;
|
||||
use super::types::*;
|
||||
|
||||
/// Client for the La Suite Drive API.
|
||||
#[derive(Clone)]
|
||||
pub struct DriveClient {
|
||||
pub(crate) transport: HttpTransport,
|
||||
}
|
||||
@@ -39,70 +40,164 @@ impl DriveClient {
|
||||
self
|
||||
}
|
||||
|
||||
// -- Files --------------------------------------------------------------
|
||||
// -- Items --------------------------------------------------------------
|
||||
|
||||
/// List files with optional pagination.
|
||||
/// List items with optional pagination and type filter.
|
||||
pub async fn list_items(
|
||||
&self,
|
||||
page: Option<u32>,
|
||||
item_type: Option<&str>,
|
||||
) -> Result<DRFPage<DriveFile>> {
|
||||
let mut path = String::from("items/?");
|
||||
if let Some(p) = page {
|
||||
path.push_str(&format!("page={p}&"));
|
||||
}
|
||||
if let Some(t) = item_type {
|
||||
path.push_str(&format!("type={t}&"));
|
||||
}
|
||||
self.transport
|
||||
.json(Method::GET, &path, Option::<&()>::None, "drive list items")
|
||||
.await
|
||||
}
|
||||
|
||||
/// List files (items with type=file).
|
||||
pub async fn list_files(&self, page: Option<u32>) -> Result<DRFPage<DriveFile>> {
|
||||
let path = match page {
|
||||
Some(p) => format!("files/?page={p}"),
|
||||
None => "files/".to_string(),
|
||||
};
|
||||
self.transport
|
||||
.json(Method::GET, &path, Option::<&()>::None, "drive list files")
|
||||
.await
|
||||
self.list_items(page, Some("file")).await
|
||||
}
|
||||
|
||||
/// Get a single file by ID.
|
||||
pub async fn get_file(&self, id: &str) -> Result<DriveFile> {
|
||||
self.transport
|
||||
.json(
|
||||
Method::GET,
|
||||
&format!("files/{id}/"),
|
||||
Option::<&()>::None,
|
||||
"drive get file",
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Upload a new file.
|
||||
pub async fn upload_file(&self, body: &serde_json::Value) -> Result<DriveFile> {
|
||||
self.transport
|
||||
.json(Method::POST, "files/", Some(body), "drive upload file")
|
||||
.await
|
||||
}
|
||||
|
||||
/// Delete a file.
|
||||
pub async fn delete_file(&self, id: &str) -> Result<()> {
|
||||
self.transport
|
||||
.send(
|
||||
Method::DELETE,
|
||||
&format!("files/{id}/"),
|
||||
Option::<&()>::None,
|
||||
"drive delete file",
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
// -- Folders ------------------------------------------------------------
|
||||
|
||||
/// List folders with optional pagination.
|
||||
/// List folders (items with type=folder).
|
||||
pub async fn list_folders(&self, page: Option<u32>) -> Result<DRFPage<DriveFolder>> {
|
||||
let path = match page {
|
||||
Some(p) => format!("folders/?page={p}"),
|
||||
None => "folders/".to_string(),
|
||||
};
|
||||
let mut path = String::from("items/?type=folder&");
|
||||
if let Some(p) = page {
|
||||
path.push_str(&format!("page={p}&"));
|
||||
}
|
||||
self.transport
|
||||
.json(Method::GET, &path, Option::<&()>::None, "drive list folders")
|
||||
.await
|
||||
}
|
||||
|
||||
/// Create a new folder.
|
||||
/// Get a single item by ID.
|
||||
pub async fn get_file(&self, id: &str) -> Result<DriveFile> {
|
||||
self.transport
|
||||
.json(
|
||||
Method::GET,
|
||||
&format!("items/{id}/"),
|
||||
Option::<&()>::None,
|
||||
"drive get item",
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Create a new item (file or folder) at the root level.
|
||||
pub async fn upload_file(&self, body: &serde_json::Value) -> Result<DriveFile> {
|
||||
self.transport
|
||||
.json(Method::POST, "items/", Some(body), "drive create item")
|
||||
.await
|
||||
}
|
||||
|
||||
/// Delete an item.
|
||||
pub async fn delete_file(&self, id: &str) -> Result<()> {
|
||||
self.transport
|
||||
.send(
|
||||
Method::DELETE,
|
||||
&format!("items/{id}/"),
|
||||
Option::<&()>::None,
|
||||
"drive delete item",
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Create a new folder at the root level.
|
||||
pub async fn create_folder(&self, body: &serde_json::Value) -> Result<DriveFolder> {
|
||||
self.transport
|
||||
.json(Method::POST, "folders/", Some(body), "drive create folder")
|
||||
.json(Method::POST, "items/", Some(body), "drive create folder")
|
||||
.await
|
||||
}
|
||||
|
||||
// -- Items (children API) ------------------------------------------------
|
||||
|
||||
/// Create a child item under a parent folder.
|
||||
/// Returns the created item including its upload_url for files.
|
||||
pub async fn create_child(
|
||||
&self,
|
||||
parent_id: &str,
|
||||
body: &serde_json::Value,
|
||||
) -> Result<serde_json::Value> {
|
||||
self.transport
|
||||
.json(
|
||||
Method::POST,
|
||||
&format!("items/{parent_id}/children/"),
|
||||
Some(body),
|
||||
"drive create child",
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// List children of an item (folder).
|
||||
pub async fn list_children(
|
||||
&self,
|
||||
parent_id: &str,
|
||||
page: Option<u32>,
|
||||
) -> Result<DRFPage<serde_json::Value>> {
|
||||
let path = match page {
|
||||
Some(p) => format!("items/{parent_id}/children/?page={p}"),
|
||||
None => format!("items/{parent_id}/children/"),
|
||||
};
|
||||
self.transport
|
||||
.json(Method::GET, &path, Option::<&()>::None, "drive list children")
|
||||
.await
|
||||
}
|
||||
|
||||
/// Notify Drive that a file upload to S3 is complete.
|
||||
pub async fn upload_ended(&self, item_id: &str) -> Result<serde_json::Value> {
|
||||
self.transport
|
||||
.json(
|
||||
Method::POST,
|
||||
&format!("items/{item_id}/upload-ended/"),
|
||||
Option::<&()>::None,
|
||||
"drive upload ended",
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Upload file bytes directly to a presigned S3 URL.
|
||||
/// The presigned URL's SigV4 signature covers host + x-amz-acl headers.
|
||||
/// Retries up to 3 times on 502/503/connection errors.
|
||||
pub async fn upload_to_s3(&self, presigned_url: &str, data: bytes::Bytes) -> Result<()> {
|
||||
let max_retries = 3;
|
||||
for attempt in 0..=max_retries {
|
||||
let resp = self.transport.http
|
||||
.put(presigned_url)
|
||||
.header("x-amz-acl", "private")
|
||||
.body(data.clone())
|
||||
.send()
|
||||
.await;
|
||||
|
||||
match resp {
|
||||
Ok(r) if r.status().is_success() => return Ok(()),
|
||||
Ok(r) if (r.status() == 502 || r.status() == 503) && attempt < max_retries => {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(500 * (attempt as u64 + 1))).await;
|
||||
continue;
|
||||
}
|
||||
Ok(r) => {
|
||||
let status = r.status();
|
||||
let body = r.text().await.unwrap_or_default();
|
||||
return Err(crate::error::SunbeamError::network(format!(
|
||||
"S3 upload: HTTP {status}: {body}"
|
||||
)));
|
||||
}
|
||||
Err(_) if attempt < max_retries => {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(500 * (attempt as u64 + 1))).await;
|
||||
continue;
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(crate::error::SunbeamError::network(format!("S3 upload: {e}")));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// -- Shares -------------------------------------------------------------
|
||||
|
||||
/// Share a file with a user.
|
||||
|
||||
@@ -219,13 +219,17 @@ pub struct DriveFile {
|
||||
#[serde(default)]
|
||||
pub id: String,
|
||||
#[serde(default)]
|
||||
pub name: Option<String>,
|
||||
pub title: Option<String>,
|
||||
#[serde(default)]
|
||||
pub filename: Option<String>,
|
||||
#[serde(default, rename = "type")]
|
||||
pub item_type: Option<String>,
|
||||
#[serde(default)]
|
||||
pub size: Option<u64>,
|
||||
#[serde(default)]
|
||||
pub mime_type: Option<String>,
|
||||
pub mimetype: Option<String>,
|
||||
#[serde(default)]
|
||||
pub folder_id: Option<String>,
|
||||
pub upload_state: Option<String>,
|
||||
#[serde(default)]
|
||||
pub url: Option<String>,
|
||||
#[serde(default)]
|
||||
@@ -234,15 +238,17 @@ pub struct DriveFile {
|
||||
pub updated_at: Option<String>,
|
||||
}
|
||||
|
||||
/// A folder in the Drive service.
|
||||
/// A folder in the Drive service (same API, type=folder).
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct DriveFolder {
|
||||
#[serde(default)]
|
||||
pub id: String,
|
||||
#[serde(default)]
|
||||
pub name: Option<String>,
|
||||
pub title: Option<String>,
|
||||
#[serde(default, rename = "type")]
|
||||
pub item_type: Option<String>,
|
||||
#[serde(default)]
|
||||
pub parent_id: Option<String>,
|
||||
pub numchild: Option<u32>,
|
||||
#[serde(default)]
|
||||
pub created_at: Option<String>,
|
||||
#[serde(default)]
|
||||
|
||||
@@ -19,6 +19,7 @@ pub mod secrets;
|
||||
pub mod services;
|
||||
pub mod update;
|
||||
pub mod users;
|
||||
pub mod vault_keystore;
|
||||
|
||||
// Feature-gated service client modules
|
||||
#[cfg(feature = "identity")]
|
||||
|
||||
@@ -475,10 +475,15 @@ async fn os_api(path: &str, method: &str, body: Option<&str>) -> Option<String>
|
||||
curl_args.extend_from_slice(&["-H", "Content-Type: application/json", "-d", &body_string]);
|
||||
}
|
||||
|
||||
// Build the full exec command: exec deploy/opensearch -n data -c opensearch -- curl ...
|
||||
let exec_cmd = curl_args;
|
||||
let pod_name = match crate::kube::find_pod_by_label("data", "app=opensearch").await {
|
||||
Some(name) => name,
|
||||
None => {
|
||||
crate::output::warn("No OpenSearch pod found in data namespace");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
match crate::kube::kube_exec("data", "opensearch-0", &exec_cmd, Some("opensearch")).await {
|
||||
match crate::kube::kube_exec("data", &pod_name, &curl_args, Some("opensearch")).await {
|
||||
Ok((0, out)) if !out.is_empty() => Some(out),
|
||||
_ => None,
|
||||
}
|
||||
|
||||
@@ -1,22 +1,10 @@
|
||||
//! CLI dispatch for Matrix chat commands.
|
||||
|
||||
use crate::client::SunbeamClient;
|
||||
use crate::error::Result;
|
||||
use crate::output::{self, OutputFormat};
|
||||
use clap::Subcommand;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Auth helper
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Construct a [`MatrixClient`] with a valid access token from the credential
|
||||
/// cache. Fails if the user is not logged in.
|
||||
async fn matrix_with_token(domain: &str) -> Result<super::MatrixClient> {
|
||||
let token = crate::auth::get_token().await?;
|
||||
let mut m = super::MatrixClient::connect(domain);
|
||||
m.set_token(&token);
|
||||
Ok(m)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Command tree
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -343,8 +331,8 @@ pub enum UserAction {
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Dispatch a parsed [`ChatCommand`] against the Matrix homeserver.
|
||||
pub async fn dispatch(domain: &str, format: OutputFormat, cmd: ChatCommand) -> Result<()> {
|
||||
let m = matrix_with_token(domain).await?;
|
||||
pub async fn dispatch(client: &SunbeamClient, format: OutputFormat, cmd: ChatCommand) -> Result<()> {
|
||||
let m = client.matrix().await?;
|
||||
|
||||
match cmd {
|
||||
// -- Whoami ---------------------------------------------------------
|
||||
|
||||
@@ -32,9 +32,9 @@ impl ServiceClient for MatrixClient {
|
||||
}
|
||||
|
||||
impl MatrixClient {
|
||||
/// Build a MatrixClient from domain (e.g. `https://matrix.{domain}/_matrix`).
|
||||
/// Build a MatrixClient from domain (e.g. `https://messages.{domain}/_matrix`).
|
||||
pub fn connect(domain: &str) -> Self {
|
||||
let base_url = format!("https://matrix.{domain}/_matrix");
|
||||
let base_url = format!("https://messages.{domain}/_matrix");
|
||||
Self::from_parts(base_url, AuthMethod::Bearer(String::new()))
|
||||
}
|
||||
|
||||
@@ -1204,7 +1204,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_connect_url() {
|
||||
let c = MatrixClient::connect("sunbeam.pt");
|
||||
assert_eq!(c.base_url(), "https://matrix.sunbeam.pt/_matrix");
|
||||
assert_eq!(c.base_url(), "https://messages.sunbeam.pt/_matrix");
|
||||
assert_eq!(c.service_name(), "matrix");
|
||||
}
|
||||
|
||||
|
||||
@@ -177,7 +177,7 @@ async fn dispatch_room(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let lk = client.livekit();
|
||||
let lk = client.livekit().await?;
|
||||
match action {
|
||||
RoomAction::List => {
|
||||
let resp = lk.list_rooms().await?;
|
||||
@@ -227,7 +227,7 @@ async fn dispatch_participant(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let lk = client.livekit();
|
||||
let lk = client.livekit().await?;
|
||||
match action {
|
||||
ParticipantAction::List { room } => {
|
||||
let resp = lk
|
||||
@@ -278,7 +278,7 @@ async fn dispatch_egress(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let lk = client.livekit();
|
||||
let lk = client.livekit().await?;
|
||||
match action {
|
||||
EgressAction::List { room } => {
|
||||
let resp = lk
|
||||
|
||||
@@ -425,7 +425,7 @@ async fn dispatch_prometheus(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let prom = client.prometheus();
|
||||
let prom = client.prometheus().await?;
|
||||
match action {
|
||||
PrometheusAction::Query { query, time } => {
|
||||
let res = prom.query(&query, time.as_deref()).await?;
|
||||
@@ -511,7 +511,7 @@ async fn dispatch_loki(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let loki = client.loki();
|
||||
let loki = client.loki().await?;
|
||||
match action {
|
||||
LokiAction::Query { query, limit, time } => {
|
||||
let res = loki.query(&query, limit, time.as_deref()).await?;
|
||||
@@ -631,7 +631,7 @@ async fn dispatch_grafana_dashboard(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let grafana = client.grafana();
|
||||
let grafana = client.grafana().await?;
|
||||
match action {
|
||||
GrafanaDashboardAction::List => {
|
||||
let items = grafana.list_dashboards().await?;
|
||||
@@ -696,7 +696,7 @@ async fn dispatch_grafana_datasource(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let grafana = client.grafana();
|
||||
let grafana = client.grafana().await?;
|
||||
match action {
|
||||
GrafanaDatasourceAction::List => {
|
||||
let items = grafana.list_datasources().await?;
|
||||
@@ -746,7 +746,7 @@ async fn dispatch_grafana_folder(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let grafana = client.grafana();
|
||||
let grafana = client.grafana().await?;
|
||||
match action {
|
||||
GrafanaFolderAction::List => {
|
||||
let items = grafana.list_folders().await?;
|
||||
@@ -794,7 +794,7 @@ async fn dispatch_grafana_annotation(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let grafana = client.grafana();
|
||||
let grafana = client.grafana().await?;
|
||||
match action {
|
||||
GrafanaAnnotationAction::List { params } => {
|
||||
let items = grafana.list_annotations(params.as_deref()).await?;
|
||||
@@ -833,7 +833,7 @@ async fn dispatch_grafana_alert(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let grafana = client.grafana();
|
||||
let grafana = client.grafana().await?;
|
||||
match action {
|
||||
GrafanaAlertAction::List => {
|
||||
let items = grafana.get_alert_rules().await?;
|
||||
@@ -879,7 +879,7 @@ async fn dispatch_grafana_org(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let grafana = client.grafana();
|
||||
let grafana = client.grafana().await?;
|
||||
match action {
|
||||
GrafanaOrgAction::Get => {
|
||||
let item = grafana.get_current_org().await?;
|
||||
|
||||
@@ -27,9 +27,9 @@ impl ServiceClient for GrafanaClient {
|
||||
}
|
||||
|
||||
impl GrafanaClient {
|
||||
/// Build a GrafanaClient from domain (e.g. `https://grafana.{domain}/api`).
|
||||
/// Build a GrafanaClient from domain (e.g. `https://metrics.{domain}/api`).
|
||||
pub fn connect(domain: &str) -> Self {
|
||||
let base_url = format!("https://grafana.{domain}/api");
|
||||
let base_url = format!("https://metrics.{domain}/api");
|
||||
Self::from_parts(base_url, AuthMethod::None)
|
||||
}
|
||||
|
||||
@@ -410,7 +410,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_connect_url() {
|
||||
let c = GrafanaClient::connect("sunbeam.pt");
|
||||
assert_eq!(c.base_url(), "https://grafana.sunbeam.pt/api");
|
||||
assert_eq!(c.base_url(), "https://metrics.sunbeam.pt/api");
|
||||
assert_eq!(c.service_name(), "grafana");
|
||||
}
|
||||
|
||||
|
||||
@@ -27,9 +27,9 @@ impl ServiceClient for LokiClient {
|
||||
}
|
||||
|
||||
impl LokiClient {
|
||||
/// Build a LokiClient from domain (e.g. `https://loki.{domain}/loki/api/v1`).
|
||||
/// Build a LokiClient from domain (e.g. `https://systemlogs.{domain}/loki/api/v1`).
|
||||
pub fn connect(domain: &str) -> Self {
|
||||
let base_url = format!("https://loki.{domain}/loki/api/v1");
|
||||
let base_url = format!("https://systemlogs.{domain}/loki/api/v1");
|
||||
Self::from_parts(base_url, AuthMethod::None)
|
||||
}
|
||||
|
||||
@@ -254,7 +254,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_connect_url() {
|
||||
let c = LokiClient::connect("sunbeam.pt");
|
||||
assert_eq!(c.base_url(), "https://loki.sunbeam.pt/loki/api/v1");
|
||||
assert_eq!(c.base_url(), "https://systemlogs.sunbeam.pt/loki/api/v1");
|
||||
assert_eq!(c.service_name(), "loki");
|
||||
}
|
||||
|
||||
|
||||
@@ -27,9 +27,9 @@ impl ServiceClient for PrometheusClient {
|
||||
}
|
||||
|
||||
impl PrometheusClient {
|
||||
/// Build a PrometheusClient from domain (e.g. `https://prometheus.{domain}/api/v1`).
|
||||
/// Build a PrometheusClient from domain (e.g. `https://systemmetrics.{domain}/api/v1`).
|
||||
pub fn connect(domain: &str) -> Self {
|
||||
let base_url = format!("https://prometheus.{domain}/api/v1");
|
||||
let base_url = format!("https://systemmetrics.{domain}/api/v1");
|
||||
Self::from_parts(base_url, AuthMethod::None)
|
||||
}
|
||||
|
||||
@@ -253,7 +253,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_connect_url() {
|
||||
let c = PrometheusClient::connect("sunbeam.pt");
|
||||
assert_eq!(c.base_url(), "https://prometheus.sunbeam.pt/api/v1");
|
||||
assert_eq!(c.base_url(), "https://systemmetrics.sunbeam.pt/api/v1");
|
||||
assert_eq!(c.service_name(), "prometheus");
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ use std::collections::HashMap;
|
||||
|
||||
use clap::Subcommand;
|
||||
|
||||
use crate::client::SunbeamClient;
|
||||
use crate::error::Result;
|
||||
use crate::output::{self, OutputFormat};
|
||||
|
||||
@@ -65,6 +66,12 @@ pub enum VaultCommand {
|
||||
#[arg(short, long)]
|
||||
data: Option<String>,
|
||||
},
|
||||
/// Re-initialize the vault (destructive — wipes all secrets).
|
||||
Reinit,
|
||||
/// Show local keystore status.
|
||||
Keys,
|
||||
/// Export vault keys as plaintext (for machine migration).
|
||||
ExportKeys,
|
||||
}
|
||||
|
||||
#[derive(Subcommand, Debug)]
|
||||
@@ -226,14 +233,92 @@ fn read_text_input(flag: Option<&str>) -> Result<String> {
|
||||
|
||||
pub async fn dispatch(
|
||||
cmd: VaultCommand,
|
||||
bao: &super::BaoClient,
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
// -- Commands that don't need a BaoClient -------------------------------
|
||||
match cmd {
|
||||
VaultCommand::Keys => {
|
||||
let domain = crate::config::domain();
|
||||
let path = crate::vault_keystore::keystore_path(domain);
|
||||
|
||||
if !crate::vault_keystore::keystore_exists(domain) {
|
||||
output::warn(&format!("No local keystore found at {}", path.display()));
|
||||
output::warn("Run `sunbeam seed` to create one, or `sunbeam vault reinit` to start fresh.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match crate::vault_keystore::verify_vault_keys(domain) {
|
||||
Ok(ks) => {
|
||||
output::ok(&format!("Domain: {}", ks.domain));
|
||||
output::ok(&format!("Created: {}", ks.created_at.format("%Y-%m-%d %H:%M:%S UTC")));
|
||||
output::ok(&format!("Updated: {}", ks.updated_at.format("%Y-%m-%d %H:%M:%S UTC")));
|
||||
output::ok(&format!("Shares: {}/{}", ks.key_threshold, ks.key_shares));
|
||||
output::ok(&format!(
|
||||
"Token: {}...{}",
|
||||
&ks.root_token[..8.min(ks.root_token.len())],
|
||||
&ks.root_token[ks.root_token.len().saturating_sub(4)..]
|
||||
));
|
||||
output::ok(&format!("Unseal keys: {}", ks.unseal_keys_b64.len()));
|
||||
output::ok(&format!("Path: {}", path.display()));
|
||||
}
|
||||
Err(e) => {
|
||||
output::warn(&format!("Keystore at {} is invalid: {e}", path.display()));
|
||||
}
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
VaultCommand::ExportKeys => {
|
||||
let domain = crate::config::domain();
|
||||
output::warn("WARNING: This prints vault root token and unseal keys in PLAINTEXT.");
|
||||
output::warn("Only use this for machine migration. Do not share or log this output.");
|
||||
eprint!(" Type 'export' to confirm: ");
|
||||
let mut answer = String::new();
|
||||
std::io::stdin()
|
||||
.read_line(&mut answer)
|
||||
.map_err(|e| crate::error::SunbeamError::Other(format!("stdin: {e}")))?;
|
||||
if answer.trim() != "export" {
|
||||
output::ok("Aborted.");
|
||||
return Ok(());
|
||||
}
|
||||
let json = crate::vault_keystore::export_plaintext(domain)?;
|
||||
println!("{json}");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
VaultCommand::Reinit => {
|
||||
return dispatch_reinit().await;
|
||||
}
|
||||
|
||||
// All other commands need a BaoClient — fall through.
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let bao = client.bao().await?;
|
||||
match cmd {
|
||||
// -- Status ---------------------------------------------------------
|
||||
VaultCommand::Status => {
|
||||
let status = bao.seal_status().await?;
|
||||
output::render(&status, fmt)
|
||||
output::render(&status, fmt)?;
|
||||
// Show local keystore status
|
||||
let domain = crate::config::domain();
|
||||
if crate::vault_keystore::keystore_exists(domain) {
|
||||
match crate::vault_keystore::load_keystore(domain) {
|
||||
Ok(ks) => {
|
||||
output::ok(&format!(
|
||||
"Local keystore: valid (updated {})",
|
||||
ks.updated_at.format("%Y-%m-%d %H:%M:%S UTC")
|
||||
));
|
||||
}
|
||||
Err(e) => {
|
||||
output::warn(&format!("Local keystore: corrupt ({e})"));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
output::warn("Local keystore: not found");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// -- Init -----------------------------------------------------------
|
||||
@@ -333,5 +418,194 @@ pub async fn dispatch(
|
||||
output::render(&resp, fmt)
|
||||
}
|
||||
}
|
||||
|
||||
// Already handled above; unreachable.
|
||||
VaultCommand::Keys | VaultCommand::ExportKeys | VaultCommand::Reinit => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// Reinit
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
/// Run a kubectl command, returning Ok(()) on success.
|
||||
async fn kubectl(args: &[&str]) -> Result<()> {
|
||||
crate::kube::ensure_tunnel().await?;
|
||||
let ctx = format!("--context={}", crate::kube::context());
|
||||
let status = tokio::process::Command::new("kubectl")
|
||||
.arg(&ctx)
|
||||
.args(args)
|
||||
.stdin(std::process::Stdio::null())
|
||||
.stdout(std::process::Stdio::inherit())
|
||||
.stderr(std::process::Stdio::inherit())
|
||||
.status()
|
||||
.await
|
||||
.map_err(|e| crate::error::SunbeamError::Other(format!("kubectl: {e}")))?;
|
||||
if !status.success() {
|
||||
return Err(crate::error::SunbeamError::Other(format!(
|
||||
"kubectl {} exited with {}",
|
||||
args.join(" "),
|
||||
status.code().unwrap_or(-1)
|
||||
)));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Port-forward guard — cancels the background forwarder on drop.
|
||||
struct PortForwardGuard {
|
||||
_abort_handle: tokio::task::AbortHandle,
|
||||
pub local_port: u16,
|
||||
}
|
||||
|
||||
impl Drop for PortForwardGuard {
|
||||
fn drop(&mut self) {
|
||||
self._abort_handle.abort();
|
||||
}
|
||||
}
|
||||
|
||||
/// Open a kube-rs port-forward to `pod_name` in `namespace` on `remote_port`.
|
||||
async fn port_forward(namespace: &str, pod_name: &str, remote_port: u16) -> Result<PortForwardGuard> {
|
||||
use k8s_openapi::api::core::v1::Pod;
|
||||
use kube::api::{Api, ListParams};
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
let client = crate::kube::get_client().await?;
|
||||
let pods: Api<Pod> = Api::namespaced(client.clone(), namespace);
|
||||
|
||||
let listener = TcpListener::bind("127.0.0.1:0")
|
||||
.await
|
||||
.map_err(|e| crate::error::SunbeamError::Other(format!("bind: {e}")))?;
|
||||
let local_port = listener
|
||||
.local_addr()
|
||||
.map_err(|e| crate::error::SunbeamError::Other(format!("local_addr: {e}")))?
|
||||
.port();
|
||||
|
||||
let pod_name = pod_name.to_string();
|
||||
let ns = namespace.to_string();
|
||||
let task = tokio::spawn(async move {
|
||||
let mut current_pod = pod_name;
|
||||
loop {
|
||||
let (mut client_stream, _) = match listener.accept().await {
|
||||
Ok(s) => s,
|
||||
Err(_) => break,
|
||||
};
|
||||
|
||||
let pf_result = pods.portforward(¤t_pod, &[remote_port]).await;
|
||||
let mut pf = match pf_result {
|
||||
Ok(pf) => pf,
|
||||
Err(e) => {
|
||||
tracing::warn!("Port-forward failed, re-resolving pod: {e}");
|
||||
if let Ok(new_client) = crate::kube::get_client().await {
|
||||
let new_pods: Api<Pod> = Api::namespaced(new_client.clone(), &ns);
|
||||
let lp = ListParams::default();
|
||||
if let Ok(pod_list) = new_pods.list(&lp).await {
|
||||
if let Some(name) = pod_list
|
||||
.items
|
||||
.iter()
|
||||
.find(|p| {
|
||||
p.metadata
|
||||
.name
|
||||
.as_deref()
|
||||
.map(|n| n.starts_with(current_pod.split('-').next().unwrap_or("")))
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.and_then(|p| p.metadata.name.clone())
|
||||
{
|
||||
current_pod = name;
|
||||
}
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let mut upstream = match pf.take_stream(remote_port) {
|
||||
Some(s) => s,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
let _ = tokio::io::copy_bidirectional(&mut client_stream, &mut upstream).await;
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
let abort_handle = task.abort_handle();
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
|
||||
Ok(PortForwardGuard {
|
||||
_abort_handle: abort_handle,
|
||||
local_port,
|
||||
})
|
||||
}
|
||||
|
||||
/// Destructive vault re-initialization workflow.
|
||||
async fn dispatch_reinit() -> Result<()> {
|
||||
output::warn("This will DESTROY all vault secrets. You must re-run `sunbeam seed` after.");
|
||||
eprint!(" Type 'reinit' to confirm: ");
|
||||
let mut answer = String::new();
|
||||
std::io::stdin()
|
||||
.read_line(&mut answer)
|
||||
.map_err(|e| crate::error::SunbeamError::Other(format!("stdin: {e}")))?;
|
||||
if answer.trim() != "reinit" {
|
||||
output::ok("Aborted.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
output::step("Re-initializing vault...");
|
||||
|
||||
// Delete PVC and pod
|
||||
output::ok("Deleting vault storage...");
|
||||
let _ = kubectl(&["-n", "data", "delete", "pvc", "data-openbao-0", "--ignore-not-found"]).await;
|
||||
let _ = kubectl(&["-n", "data", "delete", "pod", "openbao-0", "--ignore-not-found"]).await;
|
||||
|
||||
// Wait for pod to come back
|
||||
output::ok("Waiting for vault pod to restart...");
|
||||
tokio::time::sleep(std::time::Duration::from_secs(15)).await;
|
||||
let _ = kubectl(&[
|
||||
"-n", "data", "wait", "--for=condition=Ready", "pod/openbao-0",
|
||||
"--timeout=120s",
|
||||
])
|
||||
.await;
|
||||
|
||||
// Port-forward and init
|
||||
let pf = port_forward("data", "openbao-0", 8200).await?;
|
||||
let bao_url = format!("http://127.0.0.1:{}", pf.local_port);
|
||||
let fresh_bao = crate::openbao::BaoClient::new(&bao_url);
|
||||
|
||||
let init = fresh_bao.init(1, 1).await?;
|
||||
let unseal_key = init.unseal_keys_b64[0].clone();
|
||||
let root_token = init.root_token.clone();
|
||||
|
||||
// Save to local keystore
|
||||
let domain = crate::config::domain();
|
||||
let ks = crate::vault_keystore::VaultKeystore {
|
||||
version: 1,
|
||||
domain: domain.to_string(),
|
||||
created_at: chrono::Utc::now(),
|
||||
updated_at: chrono::Utc::now(),
|
||||
root_token: root_token.clone(),
|
||||
unseal_keys_b64: vec![unseal_key.clone()],
|
||||
key_shares: 1,
|
||||
key_threshold: 1,
|
||||
};
|
||||
crate::vault_keystore::save_keystore(&ks)?;
|
||||
output::ok(&format!(
|
||||
"Keys saved to local keystore at {}",
|
||||
crate::vault_keystore::keystore_path(domain).display()
|
||||
));
|
||||
|
||||
// Save to K8s Secret
|
||||
let mut data = HashMap::new();
|
||||
data.insert("key".to_string(), unseal_key.clone());
|
||||
data.insert("root-token".to_string(), root_token.clone());
|
||||
crate::kube::create_secret("data", "openbao-keys", data).await?;
|
||||
output::ok("Keys stored in K8s Secret openbao-keys.");
|
||||
|
||||
// Unseal
|
||||
fresh_bao.unseal(&unseal_key).await?;
|
||||
output::ok("Vault unsealed.");
|
||||
|
||||
output::step("Vault re-initialized. Run `sunbeam seed` now to restore all secrets.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -15,6 +15,8 @@ use std::collections::HashMap;
|
||||
pub struct BaoClient {
|
||||
pub base_url: String,
|
||||
pub token: Option<String>,
|
||||
/// Optional bearer token for proxy auth_request (separate from vault token).
|
||||
pub bearer_token: Option<String>,
|
||||
http: reqwest::Client,
|
||||
}
|
||||
|
||||
@@ -67,17 +69,26 @@ impl BaoClient {
|
||||
Self {
|
||||
base_url: base_url.trim_end_matches('/').to_string(),
|
||||
token: None,
|
||||
bearer_token: None,
|
||||
http: reqwest::Client::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a client with an authentication token.
|
||||
/// Create a client with a vault authentication token.
|
||||
pub fn with_token(base_url: &str, token: &str) -> Self {
|
||||
let mut client = Self::new(base_url);
|
||||
client.token = Some(token.to_string());
|
||||
client
|
||||
}
|
||||
|
||||
/// Create a client with both a vault token and a bearer token for proxy auth.
|
||||
pub fn with_proxy_auth(base_url: &str, vault_token: &str, bearer_token: &str) -> Self {
|
||||
let mut client = Self::new(base_url);
|
||||
client.token = Some(vault_token.to_string());
|
||||
client.bearer_token = Some(bearer_token.to_string());
|
||||
client
|
||||
}
|
||||
|
||||
fn url(&self, path: &str) -> String {
|
||||
format!("{}/v1/{}", self.base_url, path.trim_start_matches('/'))
|
||||
}
|
||||
@@ -87,6 +98,9 @@ impl BaoClient {
|
||||
if let Some(ref token) = self.token {
|
||||
req = req.header("X-Vault-Token", token);
|
||||
}
|
||||
if let Some(ref bearer) = self.bearer_token {
|
||||
req = req.header("Authorization", format!("Bearer {bearer}"));
|
||||
}
|
||||
req
|
||||
}
|
||||
|
||||
@@ -95,8 +109,7 @@ impl BaoClient {
|
||||
/// Get the seal status of the OpenBao instance.
|
||||
pub async fn seal_status(&self) -> Result<SealStatusResponse> {
|
||||
let resp = self
|
||||
.http
|
||||
.get(format!("{}/v1/sys/seal-status", self.base_url))
|
||||
.request(reqwest::Method::GET, "sys/seal-status")
|
||||
.send()
|
||||
.await
|
||||
.ctx("Failed to connect to OpenBao")?;
|
||||
|
||||
@@ -6,17 +6,6 @@ use serde_json::json;
|
||||
use crate::error::Result;
|
||||
use crate::output::{self, OutputFormat};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Client helper
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
async fn os_client(domain: &str) -> Result<super::OpenSearchClient> {
|
||||
let token = crate::auth::get_token().await?;
|
||||
let mut c = super::OpenSearchClient::connect(domain);
|
||||
c.set_token(token);
|
||||
Ok(c)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Top-level command enum
|
||||
// ---------------------------------------------------------------------------
|
||||
@@ -413,7 +402,7 @@ pub async fn dispatch(
|
||||
client: &crate::client::SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let c = os_client(client.domain()).await?;
|
||||
let c = client.opensearch().await?;
|
||||
|
||||
match cmd {
|
||||
// -----------------------------------------------------------------
|
||||
|
||||
@@ -1103,4 +1103,50 @@ mod tests {
|
||||
];
|
||||
assert_eq!(PG_USERS, &expected[..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sol_gitea_credential_mapping() {
|
||||
let mut gitea = HashMap::new();
|
||||
gitea.insert("admin-username".to_string(), "gitea_admin".to_string());
|
||||
gitea.insert("admin-password".to_string(), "s3cret".to_string());
|
||||
|
||||
let mut sol_gitea = HashMap::new();
|
||||
if let Some(u) = gitea.get("admin-username") {
|
||||
sol_gitea.insert("gitea-admin-username".to_string(), u.clone());
|
||||
}
|
||||
if let Some(p) = gitea.get("admin-password") {
|
||||
sol_gitea.insert("gitea-admin-password".to_string(), p.clone());
|
||||
}
|
||||
|
||||
assert_eq!(sol_gitea.len(), 2);
|
||||
assert_eq!(sol_gitea["gitea-admin-username"], "gitea_admin");
|
||||
assert_eq!(sol_gitea["gitea-admin-password"], "s3cret");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sol_gitea_credential_mapping_partial() {
|
||||
let gitea: HashMap<String, String> = HashMap::new();
|
||||
let mut sol_gitea = HashMap::new();
|
||||
if let Some(u) = gitea.get("admin-username") {
|
||||
sol_gitea.insert("gitea-admin-username".to_string(), u.clone());
|
||||
}
|
||||
if let Some(p) = gitea.get("admin-password") {
|
||||
sol_gitea.insert("gitea-admin-password".to_string(), p.clone());
|
||||
}
|
||||
assert!(sol_gitea.is_empty(), "No creds should be mapped when gitea map is empty");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sol_agent_policy_hcl() {
|
||||
let sol_policy_hcl = concat!(
|
||||
"path \"secret/data/sol-tokens/*\" { capabilities = [\"create\", \"read\", \"update\", \"delete\"] }\n",
|
||||
"path \"secret/metadata/sol-tokens/*\" { capabilities = [\"read\", \"delete\", \"list\"] }\n",
|
||||
);
|
||||
assert!(sol_policy_hcl.contains("secret/data/sol-tokens/*"));
|
||||
assert!(sol_policy_hcl.contains("secret/metadata/sol-tokens/*"));
|
||||
assert!(sol_policy_hcl.contains("create"));
|
||||
assert!(sol_policy_hcl.contains("delete"));
|
||||
assert!(sol_policy_hcl.contains("list"));
|
||||
assert_eq!(sol_policy_hcl.lines().count(), 2);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,6 +101,21 @@ pub async fn seed_openbao() -> Result<Option<SeedResult>> {
|
||||
data.insert("root-token".to_string(), root_token.clone());
|
||||
k::create_secret("data", "openbao-keys", data).await?;
|
||||
ok("Initialized -- keys stored in secret/openbao-keys.");
|
||||
|
||||
// Save to local keystore
|
||||
let domain = crate::config::domain();
|
||||
let ks = crate::vault_keystore::VaultKeystore {
|
||||
version: 1,
|
||||
domain: domain.to_string(),
|
||||
created_at: chrono::Utc::now(),
|
||||
updated_at: chrono::Utc::now(),
|
||||
root_token: root_token.clone(),
|
||||
unseal_keys_b64: vec![unseal_key.clone()],
|
||||
key_shares: 1,
|
||||
key_threshold: 1,
|
||||
};
|
||||
crate::vault_keystore::save_keystore(&ks)?;
|
||||
ok(&format!("Keys backed up to local keystore at {}", crate::vault_keystore::keystore_path(domain).display()));
|
||||
}
|
||||
Err(e) => {
|
||||
warn(&format!(
|
||||
@@ -114,11 +129,65 @@ pub async fn seed_openbao() -> Result<Option<SeedResult>> {
|
||||
}
|
||||
} else {
|
||||
ok("Already initialized.");
|
||||
if let Ok(key) = k::kube_get_secret_field("data", "openbao-keys", "key").await {
|
||||
unseal_key = key;
|
||||
}
|
||||
if let Ok(token) = k::kube_get_secret_field("data", "openbao-keys", "root-token").await {
|
||||
root_token = token;
|
||||
let domain = crate::config::domain();
|
||||
|
||||
// Try local keystore first (survives K8s Secret overwrites)
|
||||
if crate::vault_keystore::keystore_exists(domain) {
|
||||
match crate::vault_keystore::load_keystore(domain) {
|
||||
Ok(ks) => {
|
||||
unseal_key = ks.unseal_keys_b64.first().cloned().unwrap_or_default();
|
||||
root_token = ks.root_token.clone();
|
||||
ok("Loaded keys from local keystore.");
|
||||
|
||||
// Restore K8s Secret if it was wiped
|
||||
let k8s_token = k::kube_get_secret_field("data", "openbao-keys", "root-token").await.unwrap_or_default();
|
||||
if k8s_token.is_empty() && !root_token.is_empty() {
|
||||
warn("K8s Secret openbao-keys is empty — restoring from local keystore.");
|
||||
let mut data = HashMap::new();
|
||||
data.insert("key".to_string(), unseal_key.clone());
|
||||
data.insert("root-token".to_string(), root_token.clone());
|
||||
k::create_secret("data", "openbao-keys", data).await?;
|
||||
ok("Restored openbao-keys from local keystore.");
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn(&format!("Failed to load local keystore: {e}"));
|
||||
// Fall back to K8s Secret
|
||||
if let Ok(key) = k::kube_get_secret_field("data", "openbao-keys", "key").await {
|
||||
unseal_key = key;
|
||||
}
|
||||
if let Ok(token) = k::kube_get_secret_field("data", "openbao-keys", "root-token").await {
|
||||
root_token = token;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No local keystore — read from K8s Secret and backfill
|
||||
if let Ok(key) = k::kube_get_secret_field("data", "openbao-keys", "key").await {
|
||||
unseal_key = key;
|
||||
}
|
||||
if let Ok(token) = k::kube_get_secret_field("data", "openbao-keys", "root-token").await {
|
||||
root_token = token;
|
||||
}
|
||||
|
||||
// Backfill local keystore if we got keys from the cluster
|
||||
if !root_token.is_empty() && !unseal_key.is_empty() {
|
||||
let ks = crate::vault_keystore::VaultKeystore {
|
||||
version: 1,
|
||||
domain: domain.to_string(),
|
||||
created_at: chrono::Utc::now(),
|
||||
updated_at: chrono::Utc::now(),
|
||||
root_token: root_token.clone(),
|
||||
unseal_keys_b64: vec![unseal_key.clone()],
|
||||
key_shares: 1,
|
||||
key_threshold: 1,
|
||||
};
|
||||
if let Err(e) = crate::vault_keystore::save_keystore(&ks) {
|
||||
warn(&format!("Failed to backfill local keystore: {e}"));
|
||||
} else {
|
||||
ok(&format!("Backfilled local keystore at {}", crate::vault_keystore::keystore_path(domain).display()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -468,7 +537,46 @@ pub async fn seed_openbao() -> Result<Option<SeedResult>> {
|
||||
|
||||
for (path, data) in all_paths {
|
||||
if dirty_paths.contains(*path) {
|
||||
bao.kv_patch("secret", path, data).await?;
|
||||
// Use kv_put for new paths (patch fails with 404 on nonexistent keys).
|
||||
// Try patch first (preserves manually-set fields), fall back to put.
|
||||
if bao.kv_patch("secret", path, data).await.is_err() {
|
||||
bao.kv_put("secret", path, data).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Seed resource server allowed audiences for La Suite external APIs.
|
||||
// Combines the static sunbeam-cli client ID with dynamic service client IDs.
|
||||
ok("Configuring La Suite resource server audiences...");
|
||||
{
|
||||
let mut rs_audiences = HashMap::new();
|
||||
// sunbeam-cli is always static (OAuth2Client CRD name)
|
||||
let mut audiences = vec!["sunbeam-cli".to_string()];
|
||||
// Read the messages client ID from the oidc-messages secret if available
|
||||
if let Ok(client_id) = crate::kube::kube_get_secret_field("lasuite", "oidc-messages", "CLIENT_ID").await {
|
||||
audiences.push(client_id);
|
||||
}
|
||||
rs_audiences.insert(
|
||||
"OIDC_RS_ALLOWED_AUDIENCES".to_string(),
|
||||
audiences.join(","),
|
||||
);
|
||||
bao.kv_put("secret", "drive-rs-audiences", &rs_audiences).await?;
|
||||
}
|
||||
|
||||
// Patch gitea admin credentials into secret/sol for Sol's Gitea integration.
|
||||
// Uses kv_patch to preserve manually-set keys (matrix-access-token etc.).
|
||||
{
|
||||
let mut sol_gitea = HashMap::new();
|
||||
if let Some(u) = gitea.get("admin-username") {
|
||||
sol_gitea.insert("gitea-admin-username".to_string(), u.clone());
|
||||
}
|
||||
if let Some(p) = gitea.get("admin-password") {
|
||||
sol_gitea.insert("gitea-admin-password".to_string(), p.clone());
|
||||
}
|
||||
if !sol_gitea.is_empty() {
|
||||
if bao.kv_patch("secret", "sol", &sol_gitea).await.is_err() {
|
||||
bao.kv_put("secret", "sol", &sol_gitea).await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -503,6 +611,82 @@ pub async fn seed_openbao() -> Result<Option<SeedResult>> {
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Sol agent policy — read/write access to sol-tokens/* for user impersonation PATs
|
||||
ok("Configuring Kubernetes auth for Sol agent...");
|
||||
let sol_policy_hcl = concat!(
|
||||
"path \"secret/data/sol-tokens/*\" { capabilities = [\"create\", \"read\", \"update\", \"delete\"] }\n",
|
||||
"path \"secret/metadata/sol-tokens/*\" { capabilities = [\"read\", \"delete\", \"list\"] }\n",
|
||||
);
|
||||
bao.write_policy("sol-agent", sol_policy_hcl).await?;
|
||||
|
||||
bao.write(
|
||||
"auth/kubernetes/role/sol-agent",
|
||||
&serde_json::json!({
|
||||
"bound_service_account_names": "default",
|
||||
"bound_service_account_namespaces": "matrix",
|
||||
"policies": "sol-agent",
|
||||
"ttl": "1h"
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// ── JWT auth for CLI (OIDC via Hydra) ─────────────────────────────
|
||||
// Enables `sunbeam vault` commands to authenticate with SSO tokens
|
||||
// instead of the root token. Users with `admin: true` in their
|
||||
// Kratos metadata_admin get full vault access.
|
||||
ok("Configuring JWT/OIDC auth for CLI...");
|
||||
let _ = bao.auth_enable("jwt", "jwt").await;
|
||||
|
||||
let domain = crate::config::domain();
|
||||
bao.write(
|
||||
"auth/jwt/config",
|
||||
&serde_json::json!({
|
||||
"oidc_discovery_url": format!("https://auth.{domain}/"),
|
||||
"default_role": "cli-reader"
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Admin role — full access for users with admin: true in JWT
|
||||
let admin_policy_hcl = concat!(
|
||||
"path \"*\" { capabilities = [\"create\", \"read\", \"update\", \"delete\", \"list\", \"sudo\"] }\n",
|
||||
);
|
||||
bao.write_policy("cli-admin", admin_policy_hcl).await?;
|
||||
|
||||
bao.write(
|
||||
"auth/jwt/role/cli-admin",
|
||||
&serde_json::json!({
|
||||
"role_type": "jwt",
|
||||
"bound_audiences": ["sunbeam-cli"],
|
||||
"user_claim": "sub",
|
||||
"bound_claims": { "admin": true },
|
||||
"policies": ["cli-admin"],
|
||||
"ttl": "1h"
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Reader role — read-only access for non-admin SSO users
|
||||
let cli_reader_hcl = concat!(
|
||||
"path \"secret/data/*\" { capabilities = [\"read\"] }\n",
|
||||
"path \"secret/metadata/*\" { capabilities = [\"read\", \"list\"] }\n",
|
||||
"path \"sys/health\" { capabilities = [\"read\", \"sudo\"] }\n",
|
||||
"path \"sys/seal-status\" { capabilities = [\"read\"] }\n",
|
||||
);
|
||||
bao.write_policy("cli-reader", cli_reader_hcl).await?;
|
||||
|
||||
bao.write(
|
||||
"auth/jwt/role/cli-reader",
|
||||
&serde_json::json!({
|
||||
"role_type": "jwt",
|
||||
"bound_audiences": ["sunbeam-cli"],
|
||||
"user_claim": "sub",
|
||||
"policies": ["cli-reader"],
|
||||
"ttl": "1h"
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Build credentials map
|
||||
let mut creds = HashMap::new();
|
||||
let field_map: &[(&str, &str, &HashMap<String, String>)] = &[
|
||||
|
||||
@@ -152,7 +152,7 @@ async fn dispatch_bucket(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let s3 = client.s3();
|
||||
let s3 = client.s3().await?;
|
||||
match action {
|
||||
BucketAction::List => {
|
||||
let resp = s3.list_buckets().await?;
|
||||
@@ -194,7 +194,7 @@ async fn dispatch_object(
|
||||
client: &SunbeamClient,
|
||||
fmt: OutputFormat,
|
||||
) -> Result<()> {
|
||||
let s3 = client.s3();
|
||||
let s3 = client.s3().await?;
|
||||
match action {
|
||||
ObjectAction::List {
|
||||
bucket,
|
||||
|
||||
644
sunbeam-sdk/src/vault_keystore.rs
Normal file
644
sunbeam-sdk/src/vault_keystore.rs
Normal file
@@ -0,0 +1,644 @@
|
||||
//! Encrypted local keystore for OpenBao vault keys.
|
||||
//!
|
||||
//! Stores root tokens and unseal keys locally, encrypted with AES-256-GCM.
|
||||
//! Key derivation uses Argon2id with a machine-specific salt. This ensures
|
||||
//! vault keys survive K8s Secret overwrites and are never lost.
|
||||
|
||||
use crate::error::{Result, SunbeamError};
|
||||
use aes_gcm::aead::{Aead, KeyInit, OsRng};
|
||||
use aes_gcm::{Aes256Gcm, Nonce};
|
||||
use chrono::{DateTime, Utc};
|
||||
use rand::RngCore;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
/// AES-256-GCM nonce size.
|
||||
const NONCE_LEN: usize = 12;
|
||||
/// Argon2 salt size.
|
||||
const SALT_LEN: usize = 16;
|
||||
/// Machine salt size (stored in .machine-salt file).
|
||||
const MACHINE_SALT_LEN: usize = 32;
|
||||
|
||||
/// Vault keys stored in the encrypted keystore.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct VaultKeystore {
|
||||
pub version: u32,
|
||||
pub domain: String,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
pub root_token: String,
|
||||
pub unseal_keys_b64: Vec<String>,
|
||||
pub key_shares: u32,
|
||||
pub key_threshold: u32,
|
||||
}
|
||||
|
||||
/// Result of comparing local keystore with cluster state.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub enum SyncStatus {
|
||||
/// Local and cluster keys match.
|
||||
InSync,
|
||||
/// Local keystore exists but cluster secret is missing/empty.
|
||||
ClusterMissing,
|
||||
/// Cluster secret exists but no local keystore.
|
||||
LocalMissing,
|
||||
/// Both exist but differ.
|
||||
Mismatch,
|
||||
/// Neither exists.
|
||||
NoKeys,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Path helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Base directory for vault keystore files.
|
||||
fn base_dir(override_dir: Option<&Path>) -> PathBuf {
|
||||
if let Some(d) = override_dir {
|
||||
return d.to_path_buf();
|
||||
}
|
||||
dirs::data_dir()
|
||||
.unwrap_or_else(|| {
|
||||
dirs::home_dir()
|
||||
.unwrap_or_else(|| PathBuf::from("."))
|
||||
.join(".local/share")
|
||||
})
|
||||
.join("sunbeam")
|
||||
.join("vault")
|
||||
}
|
||||
|
||||
/// Path to the encrypted keystore file for a domain.
|
||||
pub fn keystore_path(domain: &str) -> PathBuf {
|
||||
keystore_path_in(domain, None)
|
||||
}
|
||||
|
||||
fn keystore_path_in(domain: &str, override_dir: Option<&Path>) -> PathBuf {
|
||||
let dir = base_dir(override_dir);
|
||||
let safe = domain.replace(['/', '\\', ':'], "_");
|
||||
let name = if safe.is_empty() { "default" } else { &safe };
|
||||
dir.join(format!("{name}.enc"))
|
||||
}
|
||||
|
||||
/// Whether a local keystore exists for this domain.
|
||||
pub fn keystore_exists(domain: &str) -> bool {
|
||||
keystore_path(domain).exists()
|
||||
}
|
||||
|
||||
fn keystore_exists_in(domain: &str, dir: Option<&Path>) -> bool {
|
||||
keystore_path_in(domain, dir).exists()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Machine salt
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
fn machine_salt_path(override_dir: Option<&Path>) -> PathBuf {
|
||||
base_dir(override_dir).join(".machine-salt")
|
||||
}
|
||||
|
||||
fn load_or_create_machine_salt(override_dir: Option<&Path>) -> Result<Vec<u8>> {
|
||||
let path = machine_salt_path(override_dir);
|
||||
if path.exists() {
|
||||
let data = std::fs::read(&path)
|
||||
.map_err(|e| SunbeamError::Other(format!("reading machine salt: {e}")))?;
|
||||
if data.len() == MACHINE_SALT_LEN {
|
||||
return Ok(data);
|
||||
}
|
||||
// Wrong length — regenerate
|
||||
}
|
||||
|
||||
// Create parent directories
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent)
|
||||
.map_err(|e| SunbeamError::Other(format!("creating vault dir: {e}")))?;
|
||||
}
|
||||
|
||||
// Generate new salt
|
||||
let mut salt = vec![0u8; MACHINE_SALT_LEN];
|
||||
OsRng.fill_bytes(&mut salt);
|
||||
std::fs::write(&path, &salt)
|
||||
.map_err(|e| SunbeamError::Other(format!("writing machine salt: {e}")))?;
|
||||
|
||||
// Set 0600 permissions
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let perms = std::fs::Permissions::from_mode(0o600);
|
||||
std::fs::set_permissions(&path, perms)
|
||||
.map_err(|e| SunbeamError::Other(format!("setting salt permissions: {e}")))?;
|
||||
}
|
||||
|
||||
Ok(salt)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Key derivation
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
fn derive_key(domain: &str, argon2_salt: &[u8], override_dir: Option<&Path>) -> Result<[u8; 32]> {
|
||||
let machine_salt = load_or_create_machine_salt(override_dir)?;
|
||||
// Combine machine salt + domain for input
|
||||
let mut input = machine_salt;
|
||||
input.extend_from_slice(b"sunbeam-vault-keystore:");
|
||||
input.extend_from_slice(domain.as_bytes());
|
||||
|
||||
let mut key = [0u8; 32];
|
||||
argon2::Argon2::default()
|
||||
.hash_password_into(&input, argon2_salt, &mut key)
|
||||
.map_err(|e| SunbeamError::Other(format!("argon2 key derivation: {e}")))?;
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Encrypt / decrypt
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
fn encrypt(plaintext: &[u8], domain: &str, override_dir: Option<&Path>) -> Result<Vec<u8>> {
|
||||
// Generate random nonce and argon2 salt
|
||||
let mut nonce_bytes = [0u8; NONCE_LEN];
|
||||
let mut argon2_salt = [0u8; SALT_LEN];
|
||||
OsRng.fill_bytes(&mut nonce_bytes);
|
||||
OsRng.fill_bytes(&mut argon2_salt);
|
||||
|
||||
let key = derive_key(domain, &argon2_salt, override_dir)?;
|
||||
let cipher = Aes256Gcm::new_from_slice(&key)
|
||||
.map_err(|e| SunbeamError::Other(format!("AES init: {e}")))?;
|
||||
let nonce = Nonce::from_slice(&nonce_bytes);
|
||||
|
||||
let ciphertext = cipher
|
||||
.encrypt(nonce, plaintext)
|
||||
.map_err(|e| SunbeamError::Other(format!("AES encrypt: {e}")))?;
|
||||
|
||||
// Output: [nonce (12)][argon2_salt (16)][ciphertext+tag]
|
||||
let mut output = Vec::with_capacity(NONCE_LEN + SALT_LEN + ciphertext.len());
|
||||
output.extend_from_slice(&nonce_bytes);
|
||||
output.extend_from_slice(&argon2_salt);
|
||||
output.extend_from_slice(&ciphertext);
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
fn decrypt(data: &[u8], domain: &str, override_dir: Option<&Path>) -> Result<Vec<u8>> {
|
||||
let header_len = NONCE_LEN + SALT_LEN;
|
||||
if data.len() < header_len + 16 {
|
||||
// 16 bytes minimum for AES-GCM tag
|
||||
return Err(SunbeamError::Other(
|
||||
"vault keystore file is too short or corrupt".into(),
|
||||
));
|
||||
}
|
||||
|
||||
let nonce_bytes = &data[..NONCE_LEN];
|
||||
let argon2_salt = &data[NONCE_LEN..header_len];
|
||||
let ciphertext = &data[header_len..];
|
||||
|
||||
let key = derive_key(domain, argon2_salt, override_dir)?;
|
||||
let cipher = Aes256Gcm::new_from_slice(&key)
|
||||
.map_err(|e| SunbeamError::Other(format!("AES init: {e}")))?;
|
||||
let nonce = Nonce::from_slice(nonce_bytes);
|
||||
|
||||
cipher
|
||||
.decrypt(nonce, ciphertext)
|
||||
.map_err(|_| SunbeamError::Other("vault keystore decryption failed — file is corrupt or was encrypted on a different machine".into()))
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Public API
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Save a keystore, encrypted, to the local filesystem.
|
||||
pub fn save_keystore(ks: &VaultKeystore) -> Result<()> {
|
||||
save_keystore_in(ks, None)
|
||||
}
|
||||
|
||||
fn save_keystore_in(ks: &VaultKeystore, override_dir: Option<&Path>) -> Result<()> {
|
||||
let path = keystore_path_in(&ks.domain, override_dir);
|
||||
|
||||
// Create parent directories
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent)
|
||||
.map_err(|e| SunbeamError::Other(format!("creating vault dir: {e}")))?;
|
||||
}
|
||||
|
||||
let plaintext = serde_json::to_vec_pretty(ks)?;
|
||||
let encrypted = encrypt(&plaintext, &ks.domain, override_dir)?;
|
||||
|
||||
std::fs::write(&path, &encrypted)
|
||||
.map_err(|e| SunbeamError::Other(format!("writing keystore: {e}")))?;
|
||||
|
||||
// Set 0600 permissions
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let perms = std::fs::Permissions::from_mode(0o600);
|
||||
std::fs::set_permissions(&path, perms)
|
||||
.map_err(|e| SunbeamError::Other(format!("setting keystore permissions: {e}")))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load and decrypt a keystore from the local filesystem.
|
||||
pub fn load_keystore(domain: &str) -> Result<VaultKeystore> {
|
||||
load_keystore_in(domain, None)
|
||||
}
|
||||
|
||||
fn load_keystore_in(domain: &str, override_dir: Option<&Path>) -> Result<VaultKeystore> {
|
||||
let path = keystore_path_in(domain, override_dir);
|
||||
if !path.exists() {
|
||||
return Err(SunbeamError::Other(format!(
|
||||
"no vault keystore found for domain '{domain}' at {}",
|
||||
path.display()
|
||||
)));
|
||||
}
|
||||
|
||||
let data = std::fs::read(&path)
|
||||
.map_err(|e| SunbeamError::Other(format!("reading keystore: {e}")))?;
|
||||
|
||||
if data.is_empty() {
|
||||
return Err(SunbeamError::Other("vault keystore file is empty".into()));
|
||||
}
|
||||
|
||||
let plaintext = decrypt(&data, domain, override_dir)?;
|
||||
let ks: VaultKeystore = serde_json::from_slice(&plaintext)
|
||||
.map_err(|e| SunbeamError::Other(format!("parsing keystore JSON: {e}")))?;
|
||||
|
||||
if ks.version > 1 {
|
||||
return Err(SunbeamError::Other(format!(
|
||||
"vault keystore version {} is not supported (max: 1)",
|
||||
ks.version
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(ks)
|
||||
}
|
||||
|
||||
/// Load and validate a keystore — fails if any critical fields are empty.
|
||||
pub fn verify_vault_keys(domain: &str) -> Result<VaultKeystore> {
|
||||
verify_vault_keys_in(domain, None)
|
||||
}
|
||||
|
||||
fn verify_vault_keys_in(domain: &str, override_dir: Option<&Path>) -> Result<VaultKeystore> {
|
||||
let ks = load_keystore_in(domain, override_dir)?;
|
||||
|
||||
if ks.root_token.is_empty() {
|
||||
return Err(SunbeamError::Other(
|
||||
"vault keystore has empty root_token".into(),
|
||||
));
|
||||
}
|
||||
if ks.unseal_keys_b64.is_empty() {
|
||||
return Err(SunbeamError::Other(
|
||||
"vault keystore has no unseal keys".into(),
|
||||
));
|
||||
}
|
||||
if ks.key_shares == 0 {
|
||||
return Err(SunbeamError::Other(
|
||||
"vault keystore has key_shares=0".into(),
|
||||
));
|
||||
}
|
||||
if ks.key_threshold == 0 || ks.key_threshold > ks.key_shares {
|
||||
return Err(SunbeamError::Other(format!(
|
||||
"vault keystore has invalid threshold={}/shares={}",
|
||||
ks.key_threshold, ks.key_shares
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(ks)
|
||||
}
|
||||
|
||||
/// Export keystore as plaintext JSON (for machine migration).
|
||||
pub fn export_plaintext(domain: &str) -> Result<String> {
|
||||
let ks = load_keystore(domain)?;
|
||||
serde_json::to_string_pretty(&ks)
|
||||
.map_err(|e| SunbeamError::Other(format!("serializing keystore: {e}")))
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn test_keystore(domain: &str) -> VaultKeystore {
|
||||
VaultKeystore {
|
||||
version: 1,
|
||||
domain: domain.to_string(),
|
||||
created_at: Utc::now(),
|
||||
updated_at: Utc::now(),
|
||||
root_token: "hvs.test-root-token-abc123".to_string(),
|
||||
unseal_keys_b64: vec!["dGVzdC11bnNlYWwta2V5".to_string()],
|
||||
key_shares: 1,
|
||||
key_threshold: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// -- Encryption roundtrip ------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_decrypt_roundtrip() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let ks = test_keystore("sunbeam.pt");
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
let loaded = load_keystore_in("sunbeam.pt", Some(dir.path())).unwrap();
|
||||
assert_eq!(loaded.root_token, ks.root_token);
|
||||
assert_eq!(loaded.unseal_keys_b64, ks.unseal_keys_b64);
|
||||
assert_eq!(loaded.domain, ks.domain);
|
||||
assert_eq!(loaded.key_shares, ks.key_shares);
|
||||
assert_eq!(loaded.key_threshold, ks.key_threshold);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encrypt_decrypt_large_token() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut ks = test_keystore("sunbeam.pt");
|
||||
ks.root_token = format!("hvs.{}", "a".repeat(200));
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
let loaded = load_keystore_in("sunbeam.pt", Some(dir.path())).unwrap();
|
||||
assert_eq!(loaded.root_token, ks.root_token);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_different_domains_different_ciphertext() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let ks_a = test_keystore("a.example.com");
|
||||
let ks_b = VaultKeystore {
|
||||
domain: "b.example.com".into(),
|
||||
..test_keystore("b.example.com")
|
||||
};
|
||||
save_keystore_in(&ks_a, Some(dir.path())).unwrap();
|
||||
save_keystore_in(&ks_b, Some(dir.path())).unwrap();
|
||||
let file_a = std::fs::read(keystore_path_in("a.example.com", Some(dir.path()))).unwrap();
|
||||
let file_b = std::fs::read(keystore_path_in("b.example.com", Some(dir.path()))).unwrap();
|
||||
// Different ciphertext (random nonce + different key derivation)
|
||||
assert_ne!(file_a, file_b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_domain_binding() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let ks = test_keystore("sunbeam.pt");
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
// Try to load with wrong domain — should fail decryption
|
||||
let path_a = keystore_path_in("sunbeam.pt", Some(dir.path()));
|
||||
let path_b = keystore_path_in("evil.com", Some(dir.path()));
|
||||
std::fs::copy(&path_a, &path_b).unwrap();
|
||||
let result = load_keystore_in("evil.com", Some(dir.path()));
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
// -- Machine salt --------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn test_machine_salt_created_on_first_use() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let salt_path = machine_salt_path(Some(dir.path()));
|
||||
assert!(!salt_path.exists());
|
||||
let salt = load_or_create_machine_salt(Some(dir.path())).unwrap();
|
||||
assert!(salt_path.exists());
|
||||
assert_eq!(salt.len(), MACHINE_SALT_LEN);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_machine_salt_reused_on_subsequent_calls() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let salt1 = load_or_create_machine_salt(Some(dir.path())).unwrap();
|
||||
let salt2 = load_or_create_machine_salt(Some(dir.path())).unwrap();
|
||||
assert_eq!(salt1, salt2);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
fn test_machine_salt_permissions() {
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let dir = TempDir::new().unwrap();
|
||||
load_or_create_machine_salt(Some(dir.path())).unwrap();
|
||||
let path = machine_salt_path(Some(dir.path()));
|
||||
let perms = std::fs::metadata(&path).unwrap().permissions();
|
||||
assert_eq!(perms.mode() & 0o777, 0o600);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_machine_salt_32_bytes() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let salt = load_or_create_machine_salt(Some(dir.path())).unwrap();
|
||||
assert_eq!(salt.len(), 32);
|
||||
}
|
||||
|
||||
// -- File integrity ------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn test_corrupt_nonce() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let ks = test_keystore("sunbeam.pt");
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
let path = keystore_path_in("sunbeam.pt", Some(dir.path()));
|
||||
let mut data = std::fs::read(&path).unwrap();
|
||||
data[0] ^= 0xFF; // flip bits in nonce
|
||||
std::fs::write(&path, &data).unwrap();
|
||||
assert!(load_keystore_in("sunbeam.pt", Some(dir.path())).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_corrupt_ciphertext() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let ks = test_keystore("sunbeam.pt");
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
let path = keystore_path_in("sunbeam.pt", Some(dir.path()));
|
||||
let mut data = std::fs::read(&path).unwrap();
|
||||
let last = data.len() - 1;
|
||||
data[last] ^= 0xFF; // flip bits in ciphertext
|
||||
std::fs::write(&path, &data).unwrap();
|
||||
assert!(load_keystore_in("sunbeam.pt", Some(dir.path())).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_truncated_file() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let ks = test_keystore("sunbeam.pt");
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
let path = keystore_path_in("sunbeam.pt", Some(dir.path()));
|
||||
std::fs::write(&path, &[0u8; 10]).unwrap(); // too short
|
||||
let result = load_keystore_in("sunbeam.pt", Some(dir.path()));
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("too short"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_file() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let path = keystore_path_in("sunbeam.pt", Some(dir.path()));
|
||||
std::fs::create_dir_all(path.parent().unwrap()).unwrap();
|
||||
std::fs::write(&path, &[]).unwrap();
|
||||
let result = load_keystore_in("sunbeam.pt", Some(dir.path()));
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("empty"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_wrong_version() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut ks = test_keystore("sunbeam.pt");
|
||||
ks.version = 99;
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
let result = load_keystore_in("sunbeam.pt", Some(dir.path()));
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("not supported"));
|
||||
}
|
||||
|
||||
// -- Concurrency / edge cases -------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn test_save_overwrites_existing() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let ks1 = test_keystore("sunbeam.pt");
|
||||
save_keystore_in(&ks1, Some(dir.path())).unwrap();
|
||||
let mut ks2 = test_keystore("sunbeam.pt");
|
||||
ks2.root_token = "hvs.new-token".into();
|
||||
save_keystore_in(&ks2, Some(dir.path())).unwrap();
|
||||
let loaded = load_keystore_in("sunbeam.pt", Some(dir.path())).unwrap();
|
||||
assert_eq!(loaded.root_token, "hvs.new-token");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_nonexistent_domain() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let result = load_keystore_in("nonexistent.example.com", Some(dir.path()));
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("no vault keystore"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_keystore_exists_true() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let ks = test_keystore("sunbeam.pt");
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
assert!(keystore_exists_in("sunbeam.pt", Some(dir.path())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_keystore_exists_false() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
assert!(!keystore_exists_in("sunbeam.pt", Some(dir.path())));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_save_creates_parent_directories() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let nested = dir.path().join("deeply").join("nested").join("vault");
|
||||
let ks = test_keystore("sunbeam.pt");
|
||||
save_keystore_in(&ks, Some(&nested)).unwrap();
|
||||
assert!(keystore_path_in("sunbeam.pt", Some(&nested)).exists());
|
||||
}
|
||||
|
||||
// -- Field validation ---------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn test_verify_rejects_empty_root_token() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut ks = test_keystore("sunbeam.pt");
|
||||
ks.root_token = String::new();
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
let result = verify_vault_keys_in("sunbeam.pt", Some(dir.path()));
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("empty root_token"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_rejects_empty_unseal_keys() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut ks = test_keystore("sunbeam.pt");
|
||||
ks.unseal_keys_b64 = vec![];
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
let result = verify_vault_keys_in("sunbeam.pt", Some(dir.path()));
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("no unseal keys"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_rejects_zero_shares() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut ks = test_keystore("sunbeam.pt");
|
||||
ks.key_shares = 0;
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
let result = verify_vault_keys_in("sunbeam.pt", Some(dir.path()));
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("key_shares=0"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_rejects_invalid_threshold() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let mut ks = test_keystore("sunbeam.pt");
|
||||
ks.key_shares = 3;
|
||||
ks.key_threshold = 5; // threshold > shares
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
let result = verify_vault_keys_in("sunbeam.pt", Some(dir.path()));
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("invalid threshold"));
|
||||
}
|
||||
|
||||
// -- Integration-style ---------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn test_full_lifecycle() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
// Create
|
||||
let ks = test_keystore("sunbeam.pt");
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
// Verify
|
||||
let verified = verify_vault_keys_in("sunbeam.pt", Some(dir.path())).unwrap();
|
||||
assert_eq!(verified.root_token, ks.root_token);
|
||||
// Modify
|
||||
let mut ks2 = verified;
|
||||
ks2.root_token = "hvs.rotated-token".into();
|
||||
ks2.updated_at = Utc::now();
|
||||
save_keystore_in(&ks2, Some(dir.path())).unwrap();
|
||||
// Reload
|
||||
let reloaded = load_keystore_in("sunbeam.pt", Some(dir.path())).unwrap();
|
||||
assert_eq!(reloaded.root_token, "hvs.rotated-token");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_export_plaintext_format() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
let ks = test_keystore("sunbeam.pt");
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
// Export by loading and serializing (mirrors the public function logic)
|
||||
let loaded = load_keystore_in("sunbeam.pt", Some(dir.path())).unwrap();
|
||||
let json = serde_json::to_string_pretty(&loaded).unwrap();
|
||||
assert!(json.contains("hvs.test-root-token-abc123"));
|
||||
assert!(json.contains("sunbeam.pt"));
|
||||
assert!(json.contains("\"version\": 1"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reinit_flow() {
|
||||
let dir = TempDir::new().unwrap();
|
||||
// Initial save
|
||||
let ks = test_keystore("sunbeam.pt");
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
// Simulate: cluster keys are lost — local keystore still has them
|
||||
let recovered = load_keystore_in("sunbeam.pt", Some(dir.path())).unwrap();
|
||||
assert_eq!(recovered.root_token, ks.root_token);
|
||||
assert_eq!(recovered.unseal_keys_b64, ks.unseal_keys_b64);
|
||||
// Simulate: reinit with new keys
|
||||
let mut new_ks = test_keystore("sunbeam.pt");
|
||||
new_ks.root_token = "hvs.new-after-reinit".into();
|
||||
new_ks.unseal_keys_b64 = vec!["bmV3LXVuc2VhbC1rZXk=".into()];
|
||||
save_keystore_in(&new_ks, Some(dir.path())).unwrap();
|
||||
let final_ks = load_keystore_in("sunbeam.pt", Some(dir.path())).unwrap();
|
||||
assert_eq!(final_ks.root_token, "hvs.new-after-reinit");
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
fn test_keystore_file_permissions() {
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let dir = TempDir::new().unwrap();
|
||||
let ks = test_keystore("sunbeam.pt");
|
||||
save_keystore_in(&ks, Some(dir.path())).unwrap();
|
||||
let path = keystore_path_in("sunbeam.pt", Some(dir.path()));
|
||||
let perms = std::fs::metadata(&path).unwrap().permissions();
|
||||
assert_eq!(perms.mode() & 0o777, 0o600);
|
||||
}
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "sunbeam"
|
||||
version = "1.0.0"
|
||||
version = "1.1.1"
|
||||
edition = "2024"
|
||||
description = "Sunbeam local dev stack manager"
|
||||
description = "Sunbeam Studios SDK, CLI, and ecosystem integrations"
|
||||
|
||||
[[bin]]
|
||||
name = "sunbeam"
|
||||
|
||||
@@ -383,6 +383,14 @@ def _seed_openbao() -> dict:
|
||||
"turn-secret": tuwunel["turn-secret"],
|
||||
"registration-token": tuwunel["registration-token"]})
|
||||
|
||||
# Patch gitea admin credentials into secret/sol for Sol's Gitea integration.
|
||||
# Uses kv patch (not put) to preserve manually-set keys (matrix-access-token etc.).
|
||||
ok("Patching Gitea admin credentials into secret/sol...")
|
||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
||||
f"bao kv patch secret/sol "
|
||||
f"gitea-admin-username='{gitea['admin-username']}' "
|
||||
f"gitea-admin-password='{gitea['admin-password']}'")
|
||||
|
||||
# Configure Kubernetes auth method so VSO can authenticate with OpenBao
|
||||
ok("Configuring Kubernetes auth for VSO...")
|
||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
||||
@@ -407,6 +415,23 @@ def _seed_openbao() -> dict:
|
||||
f"policies=vso-reader "
|
||||
f"ttl=1h")
|
||||
|
||||
# Sol agent policy — read/write access to sol-tokens/* for user impersonation PATs
|
||||
ok("Configuring Kubernetes auth for Sol agent...")
|
||||
sol_policy_hcl = (
|
||||
'path "secret/data/sol-tokens/*" { capabilities = ["create", "read", "update", "delete"] }\n'
|
||||
'path "secret/metadata/sol-tokens/*" { capabilities = ["read", "delete", "list"] }\n'
|
||||
)
|
||||
sol_policy_b64 = base64.b64encode(sol_policy_hcl.encode()).decode()
|
||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
||||
f"sh -c 'echo {sol_policy_b64} | base64 -d | bao policy write sol-agent -'")
|
||||
|
||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
||||
f"bao write auth/kubernetes/role/sol-agent "
|
||||
f"bound_service_account_names=default "
|
||||
f"bound_service_account_namespaces=matrix "
|
||||
f"policies=sol-agent "
|
||||
f"ttl=1h")
|
||||
|
||||
return {
|
||||
"hydra-system-secret": hydra["system-secret"],
|
||||
"hydra-cookie-secret": hydra["cookie-secret"],
|
||||
|
||||
@@ -927,12 +927,14 @@ pub async fn dispatch() -> Result<()> {
|
||||
let sc = sunbeam_sdk::client::SunbeamClient::from_context(
|
||||
&sunbeam_sdk::config::active_context(),
|
||||
);
|
||||
sunbeam_sdk::gitea::cli::dispatch(action, sc.gitea(), cli.output_format).await
|
||||
sunbeam_sdk::gitea::cli::dispatch(action, &sc, cli.output_format).await
|
||||
}
|
||||
|
||||
Some(Verb::Chat { action }) => {
|
||||
let domain = sunbeam_sdk::config::active_context().domain.clone();
|
||||
sunbeam_sdk::matrix::cli::dispatch(&domain, cli.output_format, action).await
|
||||
let sc = sunbeam_sdk::client::SunbeamClient::from_context(
|
||||
&sunbeam_sdk::config::active_context(),
|
||||
);
|
||||
sunbeam_sdk::matrix::cli::dispatch(&sc, cli.output_format, action).await
|
||||
}
|
||||
|
||||
Some(Verb::Search { action }) => {
|
||||
@@ -964,8 +966,10 @@ pub async fn dispatch() -> Result<()> {
|
||||
}
|
||||
|
||||
Some(Verb::Vault { action }) => {
|
||||
let bao = sunbeam_sdk::openbao::BaoClient::new("http://127.0.0.1:8200");
|
||||
sunbeam_sdk::openbao::cli::dispatch(action, &bao, cli.output_format).await
|
||||
let sc = sunbeam_sdk::client::SunbeamClient::from_context(
|
||||
&sunbeam_sdk::config::active_context(),
|
||||
);
|
||||
sunbeam_sdk::openbao::cli::dispatch(action, &sc, cli.output_format).await
|
||||
}
|
||||
|
||||
Some(Verb::People { action }) => {
|
||||
|
||||
Reference in New Issue
Block a user