chore: remove Python code and pyproject.toml
This commit is contained in:
@@ -1,12 +0,0 @@
|
|||||||
[project]
|
|
||||||
name = "sunbeam"
|
|
||||||
version = "0.1.0"
|
|
||||||
requires-python = ">=3.11"
|
|
||||||
dependencies = ["setuptools"]
|
|
||||||
|
|
||||||
[project.scripts]
|
|
||||||
sunbeam = "sunbeam.__main__:main"
|
|
||||||
|
|
||||||
[build-system]
|
|
||||||
requires = ["setuptools>=68"]
|
|
||||||
build-backend = "setuptools.build_meta"
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "sunbeam"
|
|
||||||
version = "1.1.2"
|
|
||||||
edition = "2024"
|
|
||||||
description = "Sunbeam Studios SDK, CLI, and ecosystem integrations"
|
|
||||||
|
|
||||||
[[bin]]
|
|
||||||
name = "sunbeam"
|
|
||||||
path = "src/main.rs"
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
sunbeam-sdk = { path = "../sunbeam-sdk", features = ["all", "cli"] }
|
|
||||||
tokio = { version = "1", features = ["full"] }
|
|
||||||
clap = { version = "4", features = ["derive"] }
|
|
||||||
chrono = "0.4"
|
|
||||||
tracing = "0.1"
|
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
|
||||||
rustls = { version = "0.23", features = ["ring"] }
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
# sunbeam CLI package
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
from sunbeam.cli import main
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
@@ -1,336 +0,0 @@
|
|||||||
"""Service-level health checks — functional probes beyond pod readiness."""
|
|
||||||
import base64
|
|
||||||
import hashlib
|
|
||||||
import hmac
|
|
||||||
import json
|
|
||||||
import ssl
|
|
||||||
import subprocess
|
|
||||||
import urllib.error
|
|
||||||
import urllib.request
|
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
|
||||||
from dataclasses import dataclass
|
|
||||||
from datetime import datetime, timezone
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
from sunbeam.kube import get_domain, kube_exec, kube_out, parse_target
|
|
||||||
from sunbeam.output import ok, step, warn
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class CheckResult:
|
|
||||||
name: str
|
|
||||||
ns: str
|
|
||||||
svc: str
|
|
||||||
passed: bool
|
|
||||||
detail: str = ""
|
|
||||||
|
|
||||||
|
|
||||||
def _ssl_ctx() -> ssl.SSLContext:
|
|
||||||
"""Return an SSL context that trusts the mkcert local CA if available."""
|
|
||||||
ctx = ssl.create_default_context()
|
|
||||||
try:
|
|
||||||
r = subprocess.run(["mkcert", "-CAROOT"], capture_output=True, text=True)
|
|
||||||
if r.returncode == 0:
|
|
||||||
ca_file = Path(r.stdout.strip()) / "rootCA.pem"
|
|
||||||
if ca_file.exists():
|
|
||||||
ctx.load_verify_locations(cafile=str(ca_file))
|
|
||||||
except FileNotFoundError:
|
|
||||||
pass
|
|
||||||
return ctx
|
|
||||||
|
|
||||||
|
|
||||||
def _kube_secret(ns: str, name: str, key: str) -> str:
|
|
||||||
"""Read a base64-encoded K8s secret value and return the decoded string."""
|
|
||||||
raw = kube_out("get", "secret", name, "-n", ns, f"-o=jsonpath={{.data.{key}}}")
|
|
||||||
if not raw:
|
|
||||||
return ""
|
|
||||||
try:
|
|
||||||
return base64.b64decode(raw + "==").decode()
|
|
||||||
except Exception:
|
|
||||||
return ""
|
|
||||||
|
|
||||||
|
|
||||||
class _NoRedirect(urllib.request.HTTPRedirectHandler):
|
|
||||||
"""Prevent urllib from following redirects so we can inspect the status code."""
|
|
||||||
def redirect_request(self, req, fp, code, msg, headers, newurl):
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def _opener(ssl_ctx: ssl.SSLContext) -> urllib.request.OpenerDirector:
|
|
||||||
return urllib.request.build_opener(
|
|
||||||
_NoRedirect(),
|
|
||||||
urllib.request.HTTPSHandler(context=ssl_ctx),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def _http_get(url: str, opener: urllib.request.OpenerDirector, *,
|
|
||||||
headers: dict | None = None, timeout: int = 5) -> tuple[int, bytes]:
|
|
||||||
"""Return (status_code, body). Redirects are not followed.
|
|
||||||
|
|
||||||
Any network/SSL error (including TimeoutError) is re-raised as URLError
|
|
||||||
so callers only need to catch urllib.error.URLError.
|
|
||||||
"""
|
|
||||||
req = urllib.request.Request(url, headers=headers or {})
|
|
||||||
try:
|
|
||||||
with opener.open(req, timeout=timeout) as resp:
|
|
||||||
return resp.status, resp.read()
|
|
||||||
except urllib.error.HTTPError as e:
|
|
||||||
return e.code, b""
|
|
||||||
except urllib.error.URLError:
|
|
||||||
raise
|
|
||||||
except OSError as e:
|
|
||||||
# TimeoutError and other socket/SSL errors don't always get wrapped
|
|
||||||
# in URLError by Python's urllib — normalize them here.
|
|
||||||
raise urllib.error.URLError(e) from e
|
|
||||||
|
|
||||||
|
|
||||||
# ── Individual checks ─────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
def check_gitea_version(domain: str, opener) -> CheckResult:
|
|
||||||
"""GET /api/v1/version -> JSON with version field."""
|
|
||||||
url = f"https://src.{domain}/api/v1/version"
|
|
||||||
try:
|
|
||||||
status, body = _http_get(url, opener)
|
|
||||||
if status == 200:
|
|
||||||
ver = json.loads(body).get("version", "?")
|
|
||||||
return CheckResult("gitea-version", "devtools", "gitea", True, f"v{ver}")
|
|
||||||
return CheckResult("gitea-version", "devtools", "gitea", False, f"HTTP {status}")
|
|
||||||
except urllib.error.URLError as e:
|
|
||||||
return CheckResult("gitea-version", "devtools", "gitea", False, str(e.reason))
|
|
||||||
|
|
||||||
|
|
||||||
def check_gitea_auth(domain: str, opener) -> CheckResult:
|
|
||||||
"""GET /api/v1/user with admin credentials -> 200 and login field."""
|
|
||||||
username = _kube_secret("devtools", "gitea-admin-credentials", "admin-username") or "gitea_admin"
|
|
||||||
password = _kube_secret("devtools", "gitea-admin-credentials", "admin-password")
|
|
||||||
if not password:
|
|
||||||
return CheckResult("gitea-auth", "devtools", "gitea", False,
|
|
||||||
"admin-password not found in secret")
|
|
||||||
creds = base64.b64encode(f"{username}:{password}".encode()).decode()
|
|
||||||
url = f"https://src.{domain}/api/v1/user"
|
|
||||||
try:
|
|
||||||
status, body = _http_get(url, opener, headers={"Authorization": f"Basic {creds}"})
|
|
||||||
if status == 200:
|
|
||||||
login = json.loads(body).get("login", "?")
|
|
||||||
return CheckResult("gitea-auth", "devtools", "gitea", True, f"user={login}")
|
|
||||||
return CheckResult("gitea-auth", "devtools", "gitea", False, f"HTTP {status}")
|
|
||||||
except urllib.error.URLError as e:
|
|
||||||
return CheckResult("gitea-auth", "devtools", "gitea", False, str(e.reason))
|
|
||||||
|
|
||||||
|
|
||||||
def check_postgres(domain: str, opener) -> CheckResult:
|
|
||||||
"""CNPG Cluster readyInstances == instances."""
|
|
||||||
ready = kube_out("get", "cluster", "postgres", "-n", "data",
|
|
||||||
"-o=jsonpath={.status.readyInstances}")
|
|
||||||
total = kube_out("get", "cluster", "postgres", "-n", "data",
|
|
||||||
"-o=jsonpath={.status.instances}")
|
|
||||||
if ready and total and ready == total:
|
|
||||||
return CheckResult("postgres", "data", "postgres", True, f"{ready}/{total} ready")
|
|
||||||
detail = (f"{ready or '?'}/{total or '?'} ready"
|
|
||||||
if (ready or total) else "cluster not found")
|
|
||||||
return CheckResult("postgres", "data", "postgres", False, detail)
|
|
||||||
|
|
||||||
|
|
||||||
def check_valkey(domain: str, opener) -> CheckResult:
|
|
||||||
"""kubectl exec valkey pod -- valkey-cli ping -> PONG."""
|
|
||||||
pod = kube_out("get", "pods", "-n", "data", "-l", "app=valkey",
|
|
||||||
"--no-headers", "-o=custom-columns=NAME:.metadata.name")
|
|
||||||
pod = pod.splitlines()[0].strip() if pod else ""
|
|
||||||
if not pod:
|
|
||||||
return CheckResult("valkey", "data", "valkey", False, "no valkey pod")
|
|
||||||
_, out = kube_exec("data", pod, "valkey-cli", "ping", container="valkey")
|
|
||||||
return CheckResult("valkey", "data", "valkey", out == "PONG", out or "no response")
|
|
||||||
|
|
||||||
|
|
||||||
def check_openbao(domain: str, opener) -> CheckResult:
|
|
||||||
"""kubectl exec openbao-0 -- bao status -format=json -> initialized + unsealed."""
|
|
||||||
rc, out = kube_exec("data", "openbao-0", "bao", "status", "-format=json", container="openbao")
|
|
||||||
if not out:
|
|
||||||
return CheckResult("openbao", "data", "openbao", False, "no response")
|
|
||||||
try:
|
|
||||||
data = json.loads(out)
|
|
||||||
init = data.get("initialized", False)
|
|
||||||
sealed = data.get("sealed", True)
|
|
||||||
return CheckResult("openbao", "data", "openbao", init and not sealed,
|
|
||||||
f"init={init}, sealed={sealed}")
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
return CheckResult("openbao", "data", "openbao", False, out[:80])
|
|
||||||
|
|
||||||
|
|
||||||
def _s3_auth_headers(access_key: str, secret_key: str, host: str) -> dict:
|
|
||||||
"""Return Authorization + x-amz-date headers for an unsigned GET / S3 request."""
|
|
||||||
t = datetime.now(tz=timezone.utc)
|
|
||||||
amzdate = t.strftime("%Y%m%dT%H%M%SZ")
|
|
||||||
datestamp = t.strftime("%Y%m%d")
|
|
||||||
|
|
||||||
payload_hash = hashlib.sha256(b"").hexdigest()
|
|
||||||
canonical = f"GET\n/\n\nhost:{host}\nx-amz-date:{amzdate}\n\nhost;x-amz-date\n{payload_hash}"
|
|
||||||
credential_scope = f"{datestamp}/us-east-1/s3/aws4_request"
|
|
||||||
string_to_sign = (
|
|
||||||
f"AWS4-HMAC-SHA256\n{amzdate}\n{credential_scope}\n"
|
|
||||||
f"{hashlib.sha256(canonical.encode()).hexdigest()}"
|
|
||||||
)
|
|
||||||
|
|
||||||
def _sign(key: bytes, msg: str) -> bytes:
|
|
||||||
return hmac.new(key, msg.encode(), hashlib.sha256).digest()
|
|
||||||
|
|
||||||
k = _sign(f"AWS4{secret_key}".encode(), datestamp)
|
|
||||||
k = _sign(k, "us-east-1")
|
|
||||||
k = _sign(k, "s3")
|
|
||||||
k = _sign(k, "aws4_request")
|
|
||||||
sig = hmac.new(k, string_to_sign.encode(), hashlib.sha256).hexdigest()
|
|
||||||
|
|
||||||
auth = (
|
|
||||||
f"AWS4-HMAC-SHA256 Credential={access_key}/{credential_scope},"
|
|
||||||
f" SignedHeaders=host;x-amz-date, Signature={sig}"
|
|
||||||
)
|
|
||||||
return {"Authorization": auth, "x-amz-date": amzdate}
|
|
||||||
|
|
||||||
|
|
||||||
def check_seaweedfs(domain: str, opener) -> CheckResult:
|
|
||||||
"""GET https://s3.{domain}/ with S3 credentials -> 200 list-buckets response."""
|
|
||||||
access_key = _kube_secret("storage", "seaweedfs-s3-credentials", "S3_ACCESS_KEY")
|
|
||||||
secret_key = _kube_secret("storage", "seaweedfs-s3-credentials", "S3_SECRET_KEY")
|
|
||||||
if not access_key or not secret_key:
|
|
||||||
return CheckResult("seaweedfs", "storage", "seaweedfs", False,
|
|
||||||
"credentials not found in seaweedfs-s3-credentials secret")
|
|
||||||
|
|
||||||
host = f"s3.{domain}"
|
|
||||||
url = f"https://{host}/"
|
|
||||||
headers = _s3_auth_headers(access_key, secret_key, host)
|
|
||||||
try:
|
|
||||||
status, _ = _http_get(url, opener, headers=headers)
|
|
||||||
if status == 200:
|
|
||||||
return CheckResult("seaweedfs", "storage", "seaweedfs", True, "S3 authenticated")
|
|
||||||
return CheckResult("seaweedfs", "storage", "seaweedfs", False, f"HTTP {status}")
|
|
||||||
except urllib.error.URLError as e:
|
|
||||||
return CheckResult("seaweedfs", "storage", "seaweedfs", False, str(e.reason))
|
|
||||||
|
|
||||||
|
|
||||||
def check_kratos(domain: str, opener) -> CheckResult:
|
|
||||||
"""GET /kratos/health/ready -> 200."""
|
|
||||||
url = f"https://auth.{domain}/kratos/health/ready"
|
|
||||||
try:
|
|
||||||
status, body = _http_get(url, opener)
|
|
||||||
ok_flag = status == 200
|
|
||||||
detail = f"HTTP {status}"
|
|
||||||
if not ok_flag and body:
|
|
||||||
detail += f": {body.decode(errors='replace')[:80]}"
|
|
||||||
return CheckResult("kratos", "ory", "kratos", ok_flag, detail)
|
|
||||||
except urllib.error.URLError as e:
|
|
||||||
return CheckResult("kratos", "ory", "kratos", False, str(e.reason))
|
|
||||||
|
|
||||||
|
|
||||||
def check_hydra_oidc(domain: str, opener) -> CheckResult:
|
|
||||||
"""GET /.well-known/openid-configuration -> 200 with issuer field."""
|
|
||||||
url = f"https://auth.{domain}/.well-known/openid-configuration"
|
|
||||||
try:
|
|
||||||
status, body = _http_get(url, opener)
|
|
||||||
if status == 200:
|
|
||||||
issuer = json.loads(body).get("issuer", "?")
|
|
||||||
return CheckResult("hydra-oidc", "ory", "hydra", True, f"issuer={issuer}")
|
|
||||||
return CheckResult("hydra-oidc", "ory", "hydra", False, f"HTTP {status}")
|
|
||||||
except urllib.error.URLError as e:
|
|
||||||
return CheckResult("hydra-oidc", "ory", "hydra", False, str(e.reason))
|
|
||||||
|
|
||||||
|
|
||||||
def check_people(domain: str, opener) -> CheckResult:
|
|
||||||
"""GET https://people.{domain}/ -> any response < 500 (302 to OIDC is fine)."""
|
|
||||||
url = f"https://people.{domain}/"
|
|
||||||
try:
|
|
||||||
status, _ = _http_get(url, opener)
|
|
||||||
return CheckResult("people", "lasuite", "people", status < 500, f"HTTP {status}")
|
|
||||||
except urllib.error.URLError as e:
|
|
||||||
return CheckResult("people", "lasuite", "people", False, str(e.reason))
|
|
||||||
|
|
||||||
|
|
||||||
def check_people_api(domain: str, opener) -> CheckResult:
|
|
||||||
"""GET /api/v1.0/config/ -> any response < 500 (401 auth-required is fine)."""
|
|
||||||
url = f"https://people.{domain}/api/v1.0/config/"
|
|
||||||
try:
|
|
||||||
status, _ = _http_get(url, opener)
|
|
||||||
return CheckResult("people-api", "lasuite", "people", status < 500, f"HTTP {status}")
|
|
||||||
except urllib.error.URLError as e:
|
|
||||||
return CheckResult("people-api", "lasuite", "people", False, str(e.reason))
|
|
||||||
|
|
||||||
|
|
||||||
def check_livekit(domain: str, opener) -> CheckResult:
|
|
||||||
"""kubectl exec livekit-server pod -- wget localhost:7880/ -> rc 0."""
|
|
||||||
pod = kube_out("get", "pods", "-n", "media", "-l", "app.kubernetes.io/name=livekit-server",
|
|
||||||
"--no-headers", "-o=custom-columns=NAME:.metadata.name")
|
|
||||||
pod = pod.splitlines()[0].strip() if pod else ""
|
|
||||||
if not pod:
|
|
||||||
return CheckResult("livekit", "media", "livekit", False, "no livekit pod")
|
|
||||||
rc, _ = kube_exec("media", pod, "wget", "-qO-", "http://localhost:7880/")
|
|
||||||
if rc == 0:
|
|
||||||
return CheckResult("livekit", "media", "livekit", True, "server responding")
|
|
||||||
return CheckResult("livekit", "media", "livekit", False, "server not responding")
|
|
||||||
|
|
||||||
|
|
||||||
# ── Check registry ────────────────────────────────────────────────────────────
|
|
||||||
|
|
||||||
CHECKS: list[tuple[Any, str, str]] = [
|
|
||||||
(check_gitea_version, "devtools", "gitea"),
|
|
||||||
(check_gitea_auth, "devtools", "gitea"),
|
|
||||||
(check_postgres, "data", "postgres"),
|
|
||||||
(check_valkey, "data", "valkey"),
|
|
||||||
(check_openbao, "data", "openbao"),
|
|
||||||
(check_seaweedfs, "storage", "seaweedfs"),
|
|
||||||
(check_kratos, "ory", "kratos"),
|
|
||||||
(check_hydra_oidc, "ory", "hydra"),
|
|
||||||
(check_people, "lasuite", "people"),
|
|
||||||
(check_people_api, "lasuite", "people"),
|
|
||||||
(check_livekit, "media", "livekit"),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def _run_one(fn, domain: str, op, ns: str, svc: str) -> CheckResult:
|
|
||||||
try:
|
|
||||||
return fn(domain, op)
|
|
||||||
except Exception as e:
|
|
||||||
return CheckResult(fn.__name__.replace("check_", ""), ns, svc, False, str(e)[:80])
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_check(target: str | None) -> None:
|
|
||||||
"""Run service-level health checks, optionally scoped to a namespace or service."""
|
|
||||||
step("Service health checks...")
|
|
||||||
|
|
||||||
domain = get_domain()
|
|
||||||
ssl_ctx = _ssl_ctx()
|
|
||||||
op = _opener(ssl_ctx)
|
|
||||||
|
|
||||||
ns_filter, svc_filter = parse_target(target) if target else (None, None)
|
|
||||||
selected = [
|
|
||||||
(fn, ns, svc) for fn, ns, svc in CHECKS
|
|
||||||
if (ns_filter is None or ns == ns_filter)
|
|
||||||
and (svc_filter is None or svc == svc_filter)
|
|
||||||
]
|
|
||||||
|
|
||||||
if not selected:
|
|
||||||
warn(f"No checks match target: {target}")
|
|
||||||
return
|
|
||||||
|
|
||||||
# Run all checks concurrently — total time ≈ slowest single check.
|
|
||||||
with ThreadPoolExecutor(max_workers=len(selected)) as pool:
|
|
||||||
futures = [pool.submit(_run_one, fn, domain, op, ns, svc)
|
|
||||||
for fn, ns, svc in selected]
|
|
||||||
results = [f.result() for f in futures]
|
|
||||||
|
|
||||||
# Print grouped by namespace (mirrors sunbeam status layout).
|
|
||||||
name_w = max(len(r.name) for r in results)
|
|
||||||
cur_ns = None
|
|
||||||
for r in results:
|
|
||||||
if r.ns != cur_ns:
|
|
||||||
print(f" {r.ns}:")
|
|
||||||
cur_ns = r.ns
|
|
||||||
icon = "\u2713" if r.passed else "\u2717"
|
|
||||||
detail = f" {r.detail}" if r.detail else ""
|
|
||||||
print(f" {icon} {r.name:<{name_w}}{detail}")
|
|
||||||
|
|
||||||
print()
|
|
||||||
failed = [r for r in results if not r.passed]
|
|
||||||
if failed:
|
|
||||||
warn(f"{len(failed)} check(s) failed.")
|
|
||||||
else:
|
|
||||||
ok(f"All {len(results)} check(s) passed.")
|
|
||||||
375
sunbeam/cli.py
375
sunbeam/cli.py
@@ -1,375 +0,0 @@
|
|||||||
"""CLI entry point — argparse dispatch table for all sunbeam verbs."""
|
|
||||||
import argparse
|
|
||||||
import datetime
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
def _date_type(value):
|
|
||||||
"""Validate YYYY-MM-DD date format for argparse."""
|
|
||||||
if not value:
|
|
||||||
return value
|
|
||||||
try:
|
|
||||||
datetime.date.fromisoformat(value)
|
|
||||||
except ValueError:
|
|
||||||
raise argparse.ArgumentTypeError(f"Invalid date: {value!r} (expected YYYY-MM-DD)")
|
|
||||||
return value
|
|
||||||
|
|
||||||
|
|
||||||
ENV_CONTEXTS = {
|
|
||||||
"local": "sunbeam",
|
|
||||||
"production": "production",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def main() -> None:
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
prog="sunbeam",
|
|
||||||
description="Sunbeam local dev stack manager",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--env", choices=["local", "production"], default="local",
|
|
||||||
help="Target environment (default: local)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--context", default=None,
|
|
||||||
help="kubectl context override (default: sunbeam for local, default for production)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--domain", default="",
|
|
||||||
help="Domain suffix for production deploys (e.g. sunbeam.pt)",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--email", default="",
|
|
||||||
help="ACME email for cert-manager (e.g. ops@sunbeam.pt)",
|
|
||||||
)
|
|
||||||
|
|
||||||
sub = parser.add_subparsers(dest="verb", metavar="verb")
|
|
||||||
|
|
||||||
# sunbeam up
|
|
||||||
sub.add_parser("up", help="Full cluster bring-up")
|
|
||||||
|
|
||||||
# sunbeam down
|
|
||||||
sub.add_parser("down", help="Tear down Lima VM")
|
|
||||||
|
|
||||||
# sunbeam status [ns[/name]]
|
|
||||||
p_status = sub.add_parser("status", help="Pod health (optionally scoped)")
|
|
||||||
p_status.add_argument("target", nargs="?", default=None,
|
|
||||||
help="namespace or namespace/name")
|
|
||||||
|
|
||||||
# sunbeam apply [namespace]
|
|
||||||
p_apply = sub.add_parser("apply", help="kustomize build + domain subst + kubectl apply")
|
|
||||||
p_apply.add_argument("namespace", nargs="?", default="",
|
|
||||||
help="Limit apply to one namespace (e.g. lasuite, ingress, ory)")
|
|
||||||
p_apply.add_argument("--all", action="store_true", dest="apply_all",
|
|
||||||
help="Apply all namespaces without confirmation")
|
|
||||||
p_apply.add_argument("--domain", default="", help="Domain suffix (e.g. sunbeam.pt)")
|
|
||||||
p_apply.add_argument("--email", default="", help="ACME email for cert-manager")
|
|
||||||
|
|
||||||
# sunbeam seed
|
|
||||||
sub.add_parser("seed", help="Generate/store all credentials in OpenBao")
|
|
||||||
|
|
||||||
# sunbeam verify
|
|
||||||
sub.add_parser("verify", help="E2E VSO + OpenBao integration test")
|
|
||||||
|
|
||||||
# sunbeam logs <ns/name> [-f]
|
|
||||||
p_logs = sub.add_parser("logs", help="kubectl logs for a service")
|
|
||||||
p_logs.add_argument("target", help="namespace/name")
|
|
||||||
p_logs.add_argument("-f", "--follow", action="store_true",
|
|
||||||
help="Stream logs (--follow)")
|
|
||||||
|
|
||||||
# sunbeam get <ns/name> [-o yaml|json|wide]
|
|
||||||
p_get = sub.add_parser("get", help="Raw kubectl get for a pod (ns/name)")
|
|
||||||
p_get.add_argument("target", help="namespace/name")
|
|
||||||
p_get.add_argument("-o", "--output", default="yaml",
|
|
||||||
choices=["yaml", "json", "wide"],
|
|
||||||
help="Output format (default: yaml)")
|
|
||||||
|
|
||||||
# sunbeam restart [ns[/name]]
|
|
||||||
p_restart = sub.add_parser("restart", help="Rolling restart of services")
|
|
||||||
p_restart.add_argument("target", nargs="?", default=None,
|
|
||||||
help="namespace or namespace/name")
|
|
||||||
|
|
||||||
# sunbeam build <what> [--push] [--deploy]
|
|
||||||
p_build = sub.add_parser("build", help="Build an artifact (add --push to push, --deploy to apply+rollout)")
|
|
||||||
p_build.add_argument("what",
|
|
||||||
choices=["proxy", "integration", "kratos-admin", "meet",
|
|
||||||
"docs-frontend", "people-frontend", "people",
|
|
||||||
"messages", "messages-backend", "messages-frontend",
|
|
||||||
"messages-mta-in", "messages-mta-out",
|
|
||||||
"messages-mpa", "messages-socks-proxy",
|
|
||||||
"tuwunel", "calendars", "projects", "sol"],
|
|
||||||
help="What to build")
|
|
||||||
p_build.add_argument("--push", action="store_true",
|
|
||||||
help="Push image to registry after building")
|
|
||||||
p_build.add_argument("--deploy", action="store_true",
|
|
||||||
help="Apply manifests and rollout restart after pushing (implies --push)")
|
|
||||||
p_build.add_argument("--no-cache", action="store_true",
|
|
||||||
help="Disable buildkitd layer cache")
|
|
||||||
|
|
||||||
# sunbeam check [ns[/name]]
|
|
||||||
p_check = sub.add_parser("check", help="Functional service health checks")
|
|
||||||
p_check.add_argument("target", nargs="?", default=None,
|
|
||||||
help="namespace or namespace/name")
|
|
||||||
|
|
||||||
# sunbeam mirror
|
|
||||||
sub.add_parser("mirror", help="Mirror amd64-only La Suite images")
|
|
||||||
|
|
||||||
# sunbeam bootstrap
|
|
||||||
sub.add_parser("bootstrap", help="Create Gitea orgs/repos; set up Lima registry")
|
|
||||||
|
|
||||||
# sunbeam config <action> [args]
|
|
||||||
p_config = sub.add_parser("config", help="Manage sunbeam configuration")
|
|
||||||
config_sub = p_config.add_subparsers(dest="config_action", metavar="action")
|
|
||||||
|
|
||||||
# sunbeam config set --host HOST --infra-dir DIR --acme-email EMAIL
|
|
||||||
p_config_set = config_sub.add_parser("set", help="Set configuration values")
|
|
||||||
p_config_set.add_argument("--host", default="",
|
|
||||||
help="Production SSH host (e.g. user@server.example.com)")
|
|
||||||
p_config_set.add_argument("--infra-dir", default="",
|
|
||||||
help="Infrastructure directory root")
|
|
||||||
p_config_set.add_argument("--acme-email", default="",
|
|
||||||
help="ACME email for Let's Encrypt certificates (e.g. ops@sunbeam.pt)")
|
|
||||||
|
|
||||||
# sunbeam config get
|
|
||||||
config_sub.add_parser("get", help="Get current configuration")
|
|
||||||
|
|
||||||
# sunbeam config clear
|
|
||||||
config_sub.add_parser("clear", help="Clear configuration")
|
|
||||||
|
|
||||||
# sunbeam k8s [kubectl args...] — transparent kubectl --context=sunbeam wrapper
|
|
||||||
p_k8s = sub.add_parser("k8s", help="kubectl --context=sunbeam passthrough")
|
|
||||||
p_k8s.add_argument("kubectl_args", nargs=argparse.REMAINDER,
|
|
||||||
help="arguments forwarded verbatim to kubectl")
|
|
||||||
|
|
||||||
# sunbeam bao [bao args...] — bao CLI inside OpenBao pod with root token injected
|
|
||||||
p_bao = sub.add_parser("bao", help="bao CLI passthrough (runs inside OpenBao pod with root token)")
|
|
||||||
p_bao.add_argument("bao_args", nargs=argparse.REMAINDER,
|
|
||||||
help="arguments forwarded verbatim to bao")
|
|
||||||
|
|
||||||
# sunbeam user <action> [args]
|
|
||||||
p_user = sub.add_parser("user", help="User/identity management")
|
|
||||||
user_sub = p_user.add_subparsers(dest="user_action", metavar="action")
|
|
||||||
|
|
||||||
p_user_list = user_sub.add_parser("list", help="List identities")
|
|
||||||
p_user_list.add_argument("--search", default="", help="Filter by email")
|
|
||||||
|
|
||||||
p_user_get = user_sub.add_parser("get", help="Get identity by email or ID")
|
|
||||||
p_user_get.add_argument("target", help="Email or identity ID")
|
|
||||||
|
|
||||||
p_user_create = user_sub.add_parser("create", help="Create identity")
|
|
||||||
p_user_create.add_argument("email", help="Email address")
|
|
||||||
p_user_create.add_argument("--name", default="", help="Display name")
|
|
||||||
p_user_create.add_argument("--schema", default="default", help="Schema ID")
|
|
||||||
|
|
||||||
p_user_delete = user_sub.add_parser("delete", help="Delete identity")
|
|
||||||
p_user_delete.add_argument("target", help="Email or identity ID")
|
|
||||||
|
|
||||||
p_user_recover = user_sub.add_parser("recover", help="Generate recovery link")
|
|
||||||
p_user_recover.add_argument("target", help="Email or identity ID")
|
|
||||||
|
|
||||||
p_user_disable = user_sub.add_parser("disable", help="Disable identity + revoke sessions (lockout)")
|
|
||||||
p_user_disable.add_argument("target", help="Email or identity ID")
|
|
||||||
|
|
||||||
p_user_enable = user_sub.add_parser("enable", help="Re-enable a disabled identity")
|
|
||||||
p_user_enable.add_argument("target", help="Email or identity ID")
|
|
||||||
|
|
||||||
p_user_set_pw = user_sub.add_parser("set-password", help="Set password for an identity")
|
|
||||||
p_user_set_pw.add_argument("target", help="Email or identity ID")
|
|
||||||
p_user_set_pw.add_argument("password", help="New password")
|
|
||||||
|
|
||||||
p_user_onboard = user_sub.add_parser("onboard", help="Onboard new user (create + welcome email)")
|
|
||||||
p_user_onboard.add_argument("email", help="Email address")
|
|
||||||
p_user_onboard.add_argument("--name", default="", help="Display name (First Last)")
|
|
||||||
p_user_onboard.add_argument("--schema", default="employee", help="Schema ID (default: employee)")
|
|
||||||
p_user_onboard.add_argument("--no-email", action="store_true", help="Skip sending welcome email")
|
|
||||||
p_user_onboard.add_argument("--notify", default="", help="Send welcome email to this address instead of identity email")
|
|
||||||
p_user_onboard.add_argument("--job-title", default="", help="Job title")
|
|
||||||
p_user_onboard.add_argument("--department", default="", help="Department")
|
|
||||||
p_user_onboard.add_argument("--office-location", default="", help="Office location")
|
|
||||||
p_user_onboard.add_argument("--hire-date", default="", type=_date_type, help="Hire date (YYYY-MM-DD)")
|
|
||||||
p_user_onboard.add_argument("--manager", default="", help="Manager name or email")
|
|
||||||
|
|
||||||
p_user_offboard = user_sub.add_parser("offboard", help="Offboard user (disable + revoke all)")
|
|
||||||
p_user_offboard.add_argument("target", help="Email or identity ID")
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# Set kubectl context before any kube calls.
|
|
||||||
# For production, also register the SSH host so the tunnel is opened on demand.
|
|
||||||
# SUNBEAM_SSH_HOST env var: e.g. "user@server.example.com" or just "server.example.com"
|
|
||||||
import os
|
|
||||||
from sunbeam.kube import set_context
|
|
||||||
from sunbeam.config import get_production_host
|
|
||||||
|
|
||||||
ctx = args.context or ENV_CONTEXTS.get(args.env, "sunbeam")
|
|
||||||
ssh_host = ""
|
|
||||||
if args.env == "production":
|
|
||||||
ssh_host = get_production_host()
|
|
||||||
if not ssh_host:
|
|
||||||
from sunbeam.output import die
|
|
||||||
die("Production host not configured. Use --host to set it or set SUNBEAM_SSH_HOST environment variable.")
|
|
||||||
set_context(ctx, ssh_host=ssh_host)
|
|
||||||
|
|
||||||
if args.verb is None:
|
|
||||||
parser.print_help()
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
# Lazy imports to keep startup fast
|
|
||||||
if args.verb == "up":
|
|
||||||
from sunbeam.cluster import cmd_up
|
|
||||||
cmd_up()
|
|
||||||
|
|
||||||
elif args.verb == "down":
|
|
||||||
from sunbeam.cluster import cmd_down
|
|
||||||
cmd_down()
|
|
||||||
|
|
||||||
elif args.verb == "status":
|
|
||||||
from sunbeam.services import cmd_status
|
|
||||||
cmd_status(args.target)
|
|
||||||
|
|
||||||
elif args.verb == "apply":
|
|
||||||
from sunbeam.manifests import cmd_apply, MANAGED_NS
|
|
||||||
# --domain/--email can appear before OR after the verb; subparser wins if both set.
|
|
||||||
domain = getattr(args, "domain", "") or ""
|
|
||||||
email = getattr(args, "email", "") or ""
|
|
||||||
namespace = getattr(args, "namespace", "") or ""
|
|
||||||
apply_all = getattr(args, "apply_all", False)
|
|
||||||
|
|
||||||
# Full apply on production requires --all or interactive confirmation
|
|
||||||
if args.env == "production" and not namespace and not apply_all:
|
|
||||||
from sunbeam.output import warn
|
|
||||||
warn(f"This will apply ALL namespaces ({', '.join(MANAGED_NS)}) to production.")
|
|
||||||
try:
|
|
||||||
answer = input(" Continue? [y/N] ").strip().lower()
|
|
||||||
except (EOFError, KeyboardInterrupt):
|
|
||||||
answer = ""
|
|
||||||
if answer not in ("y", "yes"):
|
|
||||||
print("Aborted.")
|
|
||||||
sys.exit(0)
|
|
||||||
|
|
||||||
cmd_apply(env=args.env, domain=domain, email=email, namespace=namespace)
|
|
||||||
|
|
||||||
elif args.verb == "seed":
|
|
||||||
from sunbeam.secrets import cmd_seed
|
|
||||||
cmd_seed()
|
|
||||||
|
|
||||||
elif args.verb == "verify":
|
|
||||||
from sunbeam.secrets import cmd_verify
|
|
||||||
cmd_verify()
|
|
||||||
|
|
||||||
elif args.verb == "logs":
|
|
||||||
from sunbeam.services import cmd_logs
|
|
||||||
cmd_logs(args.target, follow=args.follow)
|
|
||||||
|
|
||||||
elif args.verb == "get":
|
|
||||||
from sunbeam.services import cmd_get
|
|
||||||
cmd_get(args.target, output=args.output)
|
|
||||||
|
|
||||||
elif args.verb == "restart":
|
|
||||||
from sunbeam.services import cmd_restart
|
|
||||||
cmd_restart(args.target)
|
|
||||||
|
|
||||||
elif args.verb == "build":
|
|
||||||
from sunbeam.images import cmd_build
|
|
||||||
push = args.push or args.deploy
|
|
||||||
cmd_build(args.what, push=push, deploy=args.deploy, no_cache=args.no_cache)
|
|
||||||
|
|
||||||
elif args.verb == "check":
|
|
||||||
from sunbeam.checks import cmd_check
|
|
||||||
cmd_check(args.target)
|
|
||||||
|
|
||||||
elif args.verb == "mirror":
|
|
||||||
from sunbeam.images import cmd_mirror
|
|
||||||
cmd_mirror()
|
|
||||||
|
|
||||||
elif args.verb == "bootstrap":
|
|
||||||
from sunbeam.gitea import cmd_bootstrap
|
|
||||||
cmd_bootstrap()
|
|
||||||
|
|
||||||
elif args.verb == "config":
|
|
||||||
from sunbeam.config import (
|
|
||||||
SunbeamConfig, load_config, save_config, get_production_host, get_infra_directory
|
|
||||||
)
|
|
||||||
action = getattr(args, "config_action", None)
|
|
||||||
if action is None:
|
|
||||||
p_config.print_help()
|
|
||||||
sys.exit(0)
|
|
||||||
elif action == "set":
|
|
||||||
config = load_config()
|
|
||||||
if args.host:
|
|
||||||
config.production_host = args.host
|
|
||||||
if args.infra_dir:
|
|
||||||
config.infra_directory = args.infra_dir
|
|
||||||
if args.acme_email:
|
|
||||||
config.acme_email = args.acme_email
|
|
||||||
save_config(config)
|
|
||||||
elif action == "get":
|
|
||||||
from sunbeam.output import ok
|
|
||||||
config = load_config()
|
|
||||||
ok(f"Production host: {config.production_host or '(not set)'}")
|
|
||||||
ok(f"Infrastructure directory: {config.infra_directory or '(not set)'}")
|
|
||||||
ok(f"ACME email: {config.acme_email or '(not set)'}")
|
|
||||||
|
|
||||||
# Also show effective production host (from config or env)
|
|
||||||
effective_host = get_production_host()
|
|
||||||
if effective_host:
|
|
||||||
ok(f"Effective production host: {effective_host}")
|
|
||||||
elif action == "clear":
|
|
||||||
import os
|
|
||||||
config_path = os.path.expanduser("~/.sunbeam.json")
|
|
||||||
if os.path.exists(config_path):
|
|
||||||
os.remove(config_path)
|
|
||||||
from sunbeam.output import ok
|
|
||||||
ok(f"Configuration cleared from {config_path}")
|
|
||||||
else:
|
|
||||||
from sunbeam.output import warn
|
|
||||||
warn("No configuration file found to clear")
|
|
||||||
|
|
||||||
elif args.verb == "k8s":
|
|
||||||
from sunbeam.kube import cmd_k8s
|
|
||||||
sys.exit(cmd_k8s(args.kubectl_args))
|
|
||||||
|
|
||||||
elif args.verb == "bao":
|
|
||||||
from sunbeam.kube import cmd_bao
|
|
||||||
sys.exit(cmd_bao(args.bao_args))
|
|
||||||
|
|
||||||
elif args.verb == "user":
|
|
||||||
from sunbeam.users import (cmd_user_list, cmd_user_get, cmd_user_create,
|
|
||||||
cmd_user_delete, cmd_user_recover,
|
|
||||||
cmd_user_disable, cmd_user_enable,
|
|
||||||
cmd_user_set_password,
|
|
||||||
cmd_user_onboard, cmd_user_offboard)
|
|
||||||
action = getattr(args, "user_action", None)
|
|
||||||
if action is None:
|
|
||||||
p_user.print_help()
|
|
||||||
sys.exit(0)
|
|
||||||
elif action == "list":
|
|
||||||
cmd_user_list(search=args.search)
|
|
||||||
elif action == "get":
|
|
||||||
cmd_user_get(args.target)
|
|
||||||
elif action == "create":
|
|
||||||
cmd_user_create(args.email, name=args.name, schema_id=args.schema)
|
|
||||||
elif action == "delete":
|
|
||||||
cmd_user_delete(args.target)
|
|
||||||
elif action == "recover":
|
|
||||||
cmd_user_recover(args.target)
|
|
||||||
elif action == "disable":
|
|
||||||
cmd_user_disable(args.target)
|
|
||||||
elif action == "enable":
|
|
||||||
cmd_user_enable(args.target)
|
|
||||||
elif action == "set-password":
|
|
||||||
cmd_user_set_password(args.target, args.password)
|
|
||||||
elif action == "onboard":
|
|
||||||
cmd_user_onboard(args.email, name=args.name, schema_id=args.schema,
|
|
||||||
send_email=not args.no_email, notify=args.notify,
|
|
||||||
job_title=args.job_title, department=args.department,
|
|
||||||
office_location=args.office_location,
|
|
||||||
hire_date=args.hire_date, manager=args.manager)
|
|
||||||
elif action == "offboard":
|
|
||||||
cmd_user_offboard(args.target)
|
|
||||||
|
|
||||||
else:
|
|
||||||
parser.print_help()
|
|
||||||
sys.exit(1)
|
|
||||||
@@ -1,301 +0,0 @@
|
|||||||
"""Cluster lifecycle — Lima VM, kubeconfig, Linkerd, TLS, core service readiness."""
|
|
||||||
import base64
|
|
||||||
import json
|
|
||||||
import shutil
|
|
||||||
import subprocess
|
|
||||||
import time
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from sunbeam.kube import (kube, kube_out, kube_ok, kube_apply,
|
|
||||||
kustomize_build, get_lima_ip, ensure_ns, create_secret, ns_exists)
|
|
||||||
from sunbeam.tools import run_tool, CACHE_DIR
|
|
||||||
from sunbeam.output import step, ok, warn, die
|
|
||||||
|
|
||||||
LIMA_VM = "sunbeam"
|
|
||||||
from sunbeam.config import get_infra_dir as _get_infra_dir
|
|
||||||
SECRETS_DIR = _get_infra_dir() / "secrets" / "local"
|
|
||||||
|
|
||||||
GITEA_ADMIN_USER = "gitea_admin"
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Lima VM
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def _lima_status() -> str:
|
|
||||||
"""Return the Lima VM status, handling both JSON-array and NDJSON output."""
|
|
||||||
r = subprocess.run(["limactl", "list", "--json"],
|
|
||||||
capture_output=True, text=True)
|
|
||||||
raw = r.stdout.strip() if r.returncode == 0 else ""
|
|
||||||
if not raw:
|
|
||||||
return "none"
|
|
||||||
vms: list[dict] = []
|
|
||||||
try:
|
|
||||||
parsed = json.loads(raw)
|
|
||||||
vms = parsed if isinstance(parsed, list) else [parsed]
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
for line in raw.splitlines():
|
|
||||||
line = line.strip()
|
|
||||||
if not line:
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
vms.append(json.loads(line))
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
continue
|
|
||||||
for vm in vms:
|
|
||||||
if vm.get("name") == LIMA_VM:
|
|
||||||
return vm.get("status", "unknown")
|
|
||||||
return "none"
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_lima_vm():
|
|
||||||
step("Lima VM...")
|
|
||||||
status = _lima_status()
|
|
||||||
if status == "none":
|
|
||||||
ok("Creating 'sunbeam' (k3s 6 CPU / 12 GB / 60 GB)...")
|
|
||||||
subprocess.run(
|
|
||||||
["limactl", "start",
|
|
||||||
"--name=sunbeam", "template:k3s",
|
|
||||||
"--memory=12", "--cpus=6", "--disk=60",
|
|
||||||
"--vm-type=vz", "--mount-type=virtiofs",
|
|
||||||
"--rosetta"],
|
|
||||||
check=True,
|
|
||||||
)
|
|
||||||
elif status == "Running":
|
|
||||||
ok("Already running.")
|
|
||||||
else:
|
|
||||||
ok(f"Starting (current status: {status})...")
|
|
||||||
subprocess.run(["limactl", "start", LIMA_VM], check=True)
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Kubeconfig
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def merge_kubeconfig():
|
|
||||||
step("Merging kubeconfig...")
|
|
||||||
lima_kube = Path.home() / f".lima/{LIMA_VM}/copied-from-guest/kubeconfig.yaml"
|
|
||||||
if not lima_kube.exists():
|
|
||||||
die(f"Lima kubeconfig not found: {lima_kube}")
|
|
||||||
|
|
||||||
tmp = Path("/tmp/sunbeam-kube")
|
|
||||||
tmp.mkdir(exist_ok=True)
|
|
||||||
try:
|
|
||||||
for query, filename in [
|
|
||||||
(".clusters[0].cluster.certificate-authority-data", "ca.crt"),
|
|
||||||
(".users[0].user.client-certificate-data", "client.crt"),
|
|
||||||
(".users[0].user.client-key-data", "client.key"),
|
|
||||||
]:
|
|
||||||
r = subprocess.run(["yq", query, str(lima_kube)],
|
|
||||||
capture_output=True, text=True)
|
|
||||||
b64 = r.stdout.strip() if r.returncode == 0 else ""
|
|
||||||
(tmp / filename).write_bytes(base64.b64decode(b64))
|
|
||||||
|
|
||||||
subprocess.run(
|
|
||||||
["kubectl", "config", "set-cluster", LIMA_VM,
|
|
||||||
"--server=https://127.0.0.1:6443",
|
|
||||||
f"--certificate-authority={tmp}/ca.crt", "--embed-certs=true"],
|
|
||||||
check=True,
|
|
||||||
)
|
|
||||||
subprocess.run(
|
|
||||||
["kubectl", "config", "set-credentials", f"{LIMA_VM}-admin",
|
|
||||||
f"--client-certificate={tmp}/client.crt",
|
|
||||||
f"--client-key={tmp}/client.key", "--embed-certs=true"],
|
|
||||||
check=True,
|
|
||||||
)
|
|
||||||
subprocess.run(
|
|
||||||
["kubectl", "config", "set-context", LIMA_VM,
|
|
||||||
f"--cluster={LIMA_VM}", f"--user={LIMA_VM}-admin"],
|
|
||||||
check=True,
|
|
||||||
)
|
|
||||||
finally:
|
|
||||||
shutil.rmtree(tmp, ignore_errors=True)
|
|
||||||
ok("Context 'sunbeam' ready.")
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Traefik
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def disable_traefik():
|
|
||||||
step("Traefik...")
|
|
||||||
if kube_ok("get", "helmchart", "traefik", "-n", "kube-system"):
|
|
||||||
ok("Removing (replaced by Pingora)...")
|
|
||||||
kube("delete", "helmchart", "traefik", "traefik-crd",
|
|
||||||
"-n", "kube-system", check=False)
|
|
||||||
subprocess.run(
|
|
||||||
["limactl", "shell", LIMA_VM,
|
|
||||||
"sudo", "rm", "-f",
|
|
||||||
"/var/lib/rancher/k3s/server/manifests/traefik.yaml"],
|
|
||||||
capture_output=True,
|
|
||||||
)
|
|
||||||
# Write k3s config so Traefik can never return after a k3s restart.
|
|
||||||
subprocess.run(
|
|
||||||
["limactl", "shell", LIMA_VM, "sudo", "tee",
|
|
||||||
"/etc/rancher/k3s/config.yaml"],
|
|
||||||
input="disable:\n - traefik\n",
|
|
||||||
text=True,
|
|
||||||
capture_output=True,
|
|
||||||
)
|
|
||||||
ok("Done.")
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# cert-manager
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def ensure_cert_manager():
|
|
||||||
step("cert-manager...")
|
|
||||||
if ns_exists("cert-manager"):
|
|
||||||
ok("Already installed.")
|
|
||||||
return
|
|
||||||
ok("Installing...")
|
|
||||||
kube("apply", "-f",
|
|
||||||
"https://github.com/cert-manager/cert-manager/releases/download/v1.17.0/cert-manager.yaml")
|
|
||||||
for dep in ["cert-manager", "cert-manager-webhook", "cert-manager-cainjector"]:
|
|
||||||
kube("rollout", "status", f"deployment/{dep}",
|
|
||||||
"-n", "cert-manager", "--timeout=120s")
|
|
||||||
ok("Installed.")
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Linkerd
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def ensure_linkerd():
|
|
||||||
step("Linkerd...")
|
|
||||||
if ns_exists("linkerd"):
|
|
||||||
ok("Already installed.")
|
|
||||||
return
|
|
||||||
ok("Installing Gateway API CRDs...")
|
|
||||||
kube("apply", "--server-side", "-f",
|
|
||||||
"https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/standard-install.yaml")
|
|
||||||
ok("Installing Linkerd CRDs...")
|
|
||||||
r = subprocess.run(["linkerd", "install", "--crds"],
|
|
||||||
capture_output=True, text=True)
|
|
||||||
crds = r.stdout.strip() if r.returncode == 0 else ""
|
|
||||||
kube_apply(crds)
|
|
||||||
ok("Installing Linkerd control plane...")
|
|
||||||
r = subprocess.run(["linkerd", "install"],
|
|
||||||
capture_output=True, text=True)
|
|
||||||
cp = r.stdout.strip() if r.returncode == 0 else ""
|
|
||||||
kube_apply(cp)
|
|
||||||
for dep in ["linkerd-identity", "linkerd-destination", "linkerd-proxy-injector"]:
|
|
||||||
kube("rollout", "status", f"deployment/{dep}",
|
|
||||||
"-n", "linkerd", "--timeout=120s")
|
|
||||||
ok("Installed.")
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# TLS certificate
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def ensure_tls_cert(domain: str | None = None) -> str:
|
|
||||||
step("TLS certificate...")
|
|
||||||
ip = get_lima_ip()
|
|
||||||
if domain is None:
|
|
||||||
domain = f"{ip}.sslip.io"
|
|
||||||
cert = SECRETS_DIR / "tls.crt"
|
|
||||||
if cert.exists():
|
|
||||||
ok(f"Cert exists. Domain: {domain}")
|
|
||||||
return domain
|
|
||||||
ok(f"Generating wildcard cert for *.{domain}...")
|
|
||||||
SECRETS_DIR.mkdir(parents=True, exist_ok=True)
|
|
||||||
subprocess.run(["mkcert", f"*.{domain}"], cwd=SECRETS_DIR, check=True)
|
|
||||||
for src, dst in [
|
|
||||||
(f"_wildcard.{domain}.pem", "tls.crt"),
|
|
||||||
(f"_wildcard.{domain}-key.pem", "tls.key"),
|
|
||||||
]:
|
|
||||||
(SECRETS_DIR / src).rename(SECRETS_DIR / dst)
|
|
||||||
ok(f"Cert generated. Domain: {domain}")
|
|
||||||
return domain
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# TLS secret
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def ensure_tls_secret(domain: str):
|
|
||||||
step("TLS secret...")
|
|
||||||
ensure_ns("ingress")
|
|
||||||
manifest = kube_out(
|
|
||||||
"create", "secret", "tls", "pingora-tls",
|
|
||||||
f"--cert={SECRETS_DIR}/tls.crt",
|
|
||||||
f"--key={SECRETS_DIR}/tls.key",
|
|
||||||
"-n", "ingress",
|
|
||||||
"--dry-run=client", "-o=yaml",
|
|
||||||
)
|
|
||||||
if manifest:
|
|
||||||
kube_apply(manifest)
|
|
||||||
ok("Done.")
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Wait for core
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def wait_for_core():
|
|
||||||
step("Waiting for core services...")
|
|
||||||
for ns, dep in [("data", "valkey"), ("ory", "kratos"), ("ory", "hydra")]:
|
|
||||||
kube("rollout", "status", f"deployment/{dep}",
|
|
||||||
"-n", ns, "--timeout=120s", check=False)
|
|
||||||
ok("Core services ready.")
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Print URLs
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def print_urls(domain: str, gitea_admin_pass: str = ""):
|
|
||||||
print(f"\n{'─' * 60}")
|
|
||||||
print(f" Stack is up. Domain: {domain}")
|
|
||||||
print(f"{'─' * 60}")
|
|
||||||
for name, url in [
|
|
||||||
("Auth", f"https://auth.{domain}/"),
|
|
||||||
("Docs", f"https://docs.{domain}/"),
|
|
||||||
("Meet", f"https://meet.{domain}/"),
|
|
||||||
("Drive", f"https://drive.{domain}/"),
|
|
||||||
("Chat", f"https://chat.{domain}/"),
|
|
||||||
("Mail", f"https://mail.{domain}/"),
|
|
||||||
("People", f"https://people.{domain}/"),
|
|
||||||
("Gitea", f"https://src.{domain}/ ({GITEA_ADMIN_USER} / {gitea_admin_pass})"),
|
|
||||||
]:
|
|
||||||
print(f" {name:<10} {url}")
|
|
||||||
print()
|
|
||||||
print(" OpenBao UI:")
|
|
||||||
print(f" kubectl --context=sunbeam -n data port-forward svc/openbao 8200:8200")
|
|
||||||
print(f" http://localhost:8200")
|
|
||||||
token_cmd = "kubectl --context=sunbeam -n data get secret openbao-keys -o jsonpath='{.data.root-token}' | base64 -d"
|
|
||||||
print(f" token: {token_cmd}")
|
|
||||||
print(f"{'─' * 60}\n")
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Commands
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def cmd_up():
|
|
||||||
from sunbeam.manifests import cmd_apply
|
|
||||||
from sunbeam.secrets import cmd_seed
|
|
||||||
from sunbeam.gitea import cmd_bootstrap, setup_lima_vm_registry
|
|
||||||
from sunbeam.images import cmd_mirror
|
|
||||||
|
|
||||||
ensure_lima_vm()
|
|
||||||
merge_kubeconfig()
|
|
||||||
disable_traefik()
|
|
||||||
ensure_cert_manager()
|
|
||||||
ensure_linkerd()
|
|
||||||
domain = ensure_tls_cert()
|
|
||||||
ensure_tls_secret(domain)
|
|
||||||
cmd_apply()
|
|
||||||
creds = cmd_seed()
|
|
||||||
admin_pass = creds.get("gitea-admin-password", "") if isinstance(creds, dict) else ""
|
|
||||||
setup_lima_vm_registry(domain, admin_pass)
|
|
||||||
cmd_bootstrap()
|
|
||||||
cmd_mirror()
|
|
||||||
wait_for_core()
|
|
||||||
print_urls(domain, admin_pass)
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_down():
|
|
||||||
subprocess.run(["limactl", "stop", LIMA_VM])
|
|
||||||
@@ -1,96 +0,0 @@
|
|||||||
"""Configuration management — load/save ~/.sunbeam.json for production host and infra directory."""
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
|
|
||||||
CONFIG_PATH = Path.home() / ".sunbeam.json"
|
|
||||||
|
|
||||||
|
|
||||||
class SunbeamConfig:
|
|
||||||
"""Sunbeam configuration with production host and infrastructure directory."""
|
|
||||||
|
|
||||||
def __init__(self, production_host: str = "", infra_directory: str = "",
|
|
||||||
acme_email: str = ""):
|
|
||||||
self.production_host = production_host
|
|
||||||
self.infra_directory = infra_directory
|
|
||||||
self.acme_email = acme_email
|
|
||||||
|
|
||||||
def to_dict(self) -> dict:
|
|
||||||
"""Convert configuration to dictionary for JSON serialization."""
|
|
||||||
return {
|
|
||||||
"production_host": self.production_host,
|
|
||||||
"infra_directory": self.infra_directory,
|
|
||||||
"acme_email": self.acme_email,
|
|
||||||
}
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def from_dict(cls, data: dict) -> 'SunbeamConfig':
|
|
||||||
"""Create configuration from dictionary."""
|
|
||||||
return cls(
|
|
||||||
production_host=data.get("production_host", ""),
|
|
||||||
infra_directory=data.get("infra_directory", ""),
|
|
||||||
acme_email=data.get("acme_email", ""),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def load_config() -> SunbeamConfig:
|
|
||||||
"""Load configuration from ~/.sunbeam.json, return empty config if not found."""
|
|
||||||
if not CONFIG_PATH.exists():
|
|
||||||
return SunbeamConfig()
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(CONFIG_PATH, 'r') as f:
|
|
||||||
data = json.load(f)
|
|
||||||
return SunbeamConfig.from_dict(data)
|
|
||||||
except (json.JSONDecodeError, IOError, KeyError) as e:
|
|
||||||
from sunbeam.output import warn
|
|
||||||
warn(f"Failed to load config from {CONFIG_PATH}: {e}")
|
|
||||||
return SunbeamConfig()
|
|
||||||
|
|
||||||
|
|
||||||
def save_config(config: SunbeamConfig) -> None:
|
|
||||||
"""Save configuration to ~/.sunbeam.json."""
|
|
||||||
try:
|
|
||||||
CONFIG_PATH.parent.mkdir(parents=True, exist_ok=True)
|
|
||||||
with open(CONFIG_PATH, 'w') as f:
|
|
||||||
json.dump(config.to_dict(), f, indent=2)
|
|
||||||
from sunbeam.output import ok
|
|
||||||
ok(f"Configuration saved to {CONFIG_PATH}")
|
|
||||||
except IOError as e:
|
|
||||||
from sunbeam.output import die
|
|
||||||
die(f"Failed to save config to {CONFIG_PATH}: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
def get_production_host() -> str:
|
|
||||||
"""Get production host from config or SUNBEAM_SSH_HOST environment variable."""
|
|
||||||
config = load_config()
|
|
||||||
if config.production_host:
|
|
||||||
return config.production_host
|
|
||||||
return os.environ.get("SUNBEAM_SSH_HOST", "")
|
|
||||||
|
|
||||||
|
|
||||||
def get_infra_directory() -> str:
|
|
||||||
"""Get infrastructure directory from config."""
|
|
||||||
config = load_config()
|
|
||||||
return config.infra_directory
|
|
||||||
|
|
||||||
|
|
||||||
def get_infra_dir() -> "Path":
|
|
||||||
"""Infrastructure manifests directory as a Path.
|
|
||||||
|
|
||||||
Prefers the configured infra_directory; falls back to the package-relative
|
|
||||||
path (works when running from the development checkout).
|
|
||||||
"""
|
|
||||||
from pathlib import Path
|
|
||||||
configured = load_config().infra_directory
|
|
||||||
if configured:
|
|
||||||
return Path(configured)
|
|
||||||
# Dev fallback: cli/sunbeam/config.py → parents[0]=cli/sunbeam, [1]=cli, [2]=monorepo root
|
|
||||||
return Path(__file__).resolve().parents[2] / "infrastructure"
|
|
||||||
|
|
||||||
|
|
||||||
def get_repo_root() -> "Path":
|
|
||||||
"""Monorepo root directory (parent of the infrastructure directory)."""
|
|
||||||
return get_infra_dir().parent
|
|
||||||
259
sunbeam/gitea.py
259
sunbeam/gitea.py
@@ -1,259 +0,0 @@
|
|||||||
"""Gitea bootstrap — registry trust, admin setup, org creation."""
|
|
||||||
import base64
|
|
||||||
import json
|
|
||||||
import subprocess
|
|
||||||
import time
|
|
||||||
|
|
||||||
from sunbeam.kube import kube, kube_out, context_arg
|
|
||||||
from sunbeam.output import step, ok, warn
|
|
||||||
|
|
||||||
LIMA_VM = "sunbeam"
|
|
||||||
GITEA_ADMIN_USER = "gitea_admin"
|
|
||||||
GITEA_ADMIN_EMAIL = "gitea@local.domain"
|
|
||||||
|
|
||||||
|
|
||||||
def _capture_out(cmd, *, default=""):
|
|
||||||
r = subprocess.run(cmd, capture_output=True, text=True)
|
|
||||||
return r.stdout.strip() if r.returncode == 0 else default
|
|
||||||
|
|
||||||
|
|
||||||
def _run(cmd, *, check=True, input=None, capture=False, cwd=None):
|
|
||||||
text = not isinstance(input, bytes)
|
|
||||||
return subprocess.run(cmd, check=check, text=text, input=input,
|
|
||||||
capture_output=capture, cwd=cwd)
|
|
||||||
|
|
||||||
|
|
||||||
def _kube_ok(*args):
|
|
||||||
return subprocess.run(
|
|
||||||
["kubectl", context_arg(), *args], capture_output=True
|
|
||||||
).returncode == 0
|
|
||||||
|
|
||||||
|
|
||||||
def setup_lima_vm_registry(domain: str, gitea_admin_pass: str = ""):
|
|
||||||
"""Install mkcert root CA in the Lima VM and configure k3s to auth with Gitea.
|
|
||||||
|
|
||||||
Restarts k3s if either configuration changes so pods don't fight TLS errors
|
|
||||||
or get unauthenticated pulls on the first deploy.
|
|
||||||
"""
|
|
||||||
step("Configuring Lima VM registry trust...")
|
|
||||||
changed = False
|
|
||||||
|
|
||||||
# Install mkcert root CA so containerd trusts our wildcard TLS cert
|
|
||||||
caroot = _capture_out(["mkcert", "-CAROOT"])
|
|
||||||
if caroot:
|
|
||||||
from pathlib import Path
|
|
||||||
ca_pem = Path(caroot) / "rootCA.pem"
|
|
||||||
if ca_pem.exists():
|
|
||||||
already = subprocess.run(
|
|
||||||
["limactl", "shell", LIMA_VM, "test", "-f",
|
|
||||||
"/usr/local/share/ca-certificates/mkcert-root.crt"],
|
|
||||||
capture_output=True,
|
|
||||||
).returncode == 0
|
|
||||||
if not already:
|
|
||||||
_run(["limactl", "copy", str(ca_pem),
|
|
||||||
f"{LIMA_VM}:/tmp/mkcert-root.pem"])
|
|
||||||
_run(["limactl", "shell", LIMA_VM, "sudo", "cp",
|
|
||||||
"/tmp/mkcert-root.pem",
|
|
||||||
"/usr/local/share/ca-certificates/mkcert-root.crt"])
|
|
||||||
_run(["limactl", "shell", LIMA_VM, "sudo",
|
|
||||||
"update-ca-certificates"])
|
|
||||||
ok("mkcert CA installed in VM.")
|
|
||||||
changed = True
|
|
||||||
else:
|
|
||||||
ok("mkcert CA already installed.")
|
|
||||||
|
|
||||||
# Write k3s registries.yaml (auth for Gitea container registry)
|
|
||||||
registry_host = f"src.{domain}"
|
|
||||||
want = (
|
|
||||||
f'configs:\n'
|
|
||||||
f' "{registry_host}":\n'
|
|
||||||
f' auth:\n'
|
|
||||||
f' username: "{GITEA_ADMIN_USER}"\n'
|
|
||||||
f' password: "{gitea_admin_pass}"\n'
|
|
||||||
)
|
|
||||||
existing = _capture_out(["limactl", "shell", LIMA_VM,
|
|
||||||
"sudo", "cat",
|
|
||||||
"/etc/rancher/k3s/registries.yaml"])
|
|
||||||
if existing.strip() != want.strip():
|
|
||||||
subprocess.run(
|
|
||||||
["limactl", "shell", LIMA_VM, "sudo", "tee",
|
|
||||||
"/etc/rancher/k3s/registries.yaml"],
|
|
||||||
input=want, text=True, capture_output=True,
|
|
||||||
)
|
|
||||||
ok(f"Registry config written for {registry_host}.")
|
|
||||||
changed = True
|
|
||||||
else:
|
|
||||||
ok("Registry config up to date.")
|
|
||||||
|
|
||||||
if changed:
|
|
||||||
ok("Restarting k3s to apply changes...")
|
|
||||||
subprocess.run(
|
|
||||||
["limactl", "shell", LIMA_VM, "sudo", "systemctl", "restart",
|
|
||||||
"k3s"],
|
|
||||||
capture_output=True,
|
|
||||||
)
|
|
||||||
# Wait for API server to come back
|
|
||||||
for _ in range(40):
|
|
||||||
if _kube_ok("get", "nodes"):
|
|
||||||
break
|
|
||||||
time.sleep(3)
|
|
||||||
# Extra settle time -- pods take a moment to start terminating/restarting
|
|
||||||
time.sleep(15)
|
|
||||||
ok("k3s restarted.")
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_bootstrap(domain: str = "", gitea_admin_pass: str = ""):
|
|
||||||
"""Ensure Gitea admin has a known password and create the studio/internal orgs."""
|
|
||||||
if not domain:
|
|
||||||
from sunbeam.kube import get_lima_ip
|
|
||||||
ip = get_lima_ip()
|
|
||||||
domain = f"{ip}.sslip.io"
|
|
||||||
if not gitea_admin_pass:
|
|
||||||
b64 = kube_out("-n", "devtools", "get", "secret",
|
|
||||||
"gitea-admin-credentials",
|
|
||||||
"-o=jsonpath={.data.password}")
|
|
||||||
if b64:
|
|
||||||
gitea_admin_pass = base64.b64decode(b64).decode()
|
|
||||||
|
|
||||||
step("Bootstrapping Gitea...")
|
|
||||||
|
|
||||||
# Wait for a Running + Ready Gitea pod
|
|
||||||
pod = ""
|
|
||||||
for _ in range(60):
|
|
||||||
candidate = kube_out(
|
|
||||||
"-n", "devtools", "get", "pods",
|
|
||||||
"-l=app.kubernetes.io/name=gitea",
|
|
||||||
"--field-selector=status.phase=Running",
|
|
||||||
"-o=jsonpath={.items[0].metadata.name}",
|
|
||||||
)
|
|
||||||
if candidate:
|
|
||||||
ready = kube_out("-n", "devtools", "get", "pod", candidate,
|
|
||||||
"-o=jsonpath={.status.containerStatuses[0].ready}")
|
|
||||||
if ready == "true":
|
|
||||||
pod = candidate
|
|
||||||
break
|
|
||||||
time.sleep(3)
|
|
||||||
|
|
||||||
if not pod:
|
|
||||||
warn("Gitea pod not ready after 3 min -- skipping bootstrap.")
|
|
||||||
return
|
|
||||||
|
|
||||||
def gitea_exec(*args):
|
|
||||||
return subprocess.run(
|
|
||||||
["kubectl", context_arg(), "-n", "devtools", "exec", pod, "-c",
|
|
||||||
"gitea", "--"] + list(args),
|
|
||||||
capture_output=True, text=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Ensure admin has the generated password and no forced-change flag.
|
|
||||||
r = gitea_exec("gitea", "admin", "user", "change-password",
|
|
||||||
"--username", GITEA_ADMIN_USER, "--password",
|
|
||||||
gitea_admin_pass, "--must-change-password=false")
|
|
||||||
if r.returncode == 0 or "password" in (r.stdout + r.stderr).lower():
|
|
||||||
ok(f"Admin '{GITEA_ADMIN_USER}' password set.")
|
|
||||||
else:
|
|
||||||
warn(f"change-password: {r.stderr.strip()}")
|
|
||||||
|
|
||||||
def api(method, path, data=None):
|
|
||||||
args = [
|
|
||||||
"curl", "-s", "-X", method,
|
|
||||||
f"http://localhost:3000/api/v1{path}",
|
|
||||||
"-H", "Content-Type: application/json",
|
|
||||||
"-u", f"{GITEA_ADMIN_USER}:{gitea_admin_pass}",
|
|
||||||
]
|
|
||||||
if data:
|
|
||||||
args += ["-d", json.dumps(data)]
|
|
||||||
r = gitea_exec(*args)
|
|
||||||
try:
|
|
||||||
return json.loads(r.stdout)
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
# Mark admin account as private so it doesn't appear in public listings.
|
|
||||||
r = api("PATCH", f"/admin/users/{GITEA_ADMIN_USER}", {
|
|
||||||
"source_id": 0,
|
|
||||||
"login_name": GITEA_ADMIN_USER,
|
|
||||||
"email": GITEA_ADMIN_EMAIL,
|
|
||||||
"visibility": "private",
|
|
||||||
})
|
|
||||||
if r.get("login") == GITEA_ADMIN_USER:
|
|
||||||
ok(f"Admin '{GITEA_ADMIN_USER}' marked as private.")
|
|
||||||
else:
|
|
||||||
warn(f"Could not set admin visibility: {r}")
|
|
||||||
|
|
||||||
for org_name, visibility, desc in [
|
|
||||||
("studio", "public", "Public source code"),
|
|
||||||
("internal", "private", "Internal tools and services"),
|
|
||||||
]:
|
|
||||||
result = api("POST", "/orgs", {
|
|
||||||
"username": org_name,
|
|
||||||
"visibility": visibility,
|
|
||||||
"description": desc,
|
|
||||||
})
|
|
||||||
if "id" in result:
|
|
||||||
ok(f"Created org '{org_name}'.")
|
|
||||||
elif "already" in result.get("message", "").lower():
|
|
||||||
ok(f"Org '{org_name}' already exists.")
|
|
||||||
else:
|
|
||||||
warn(f"Org '{org_name}': {result.get('message', result)}")
|
|
||||||
|
|
||||||
# Configure Hydra as the OIDC authentication source.
|
|
||||||
# Source name "Sunbeam" determines the callback URL:
|
|
||||||
# /user/oauth2/Sunbeam/callback (must match oidc-clients.yaml redirectUri)
|
|
||||||
auth_list = gitea_exec("gitea", "admin", "auth", "list")
|
|
||||||
# Parse tab-separated rows: ID\tName\tType\tEnabled
|
|
||||||
existing_id = None
|
|
||||||
exact_ok = False
|
|
||||||
for line in auth_list.stdout.splitlines()[1:]: # skip header
|
|
||||||
parts = line.split("\t")
|
|
||||||
if len(parts) < 2:
|
|
||||||
continue
|
|
||||||
src_id, src_name = parts[0].strip(), parts[1].strip()
|
|
||||||
if src_name == "Sunbeam":
|
|
||||||
exact_ok = True
|
|
||||||
break
|
|
||||||
if src_name in ("Sunbeam Auth",) or (src_name.startswith("Sunbeam") and parts[2].strip() == "OAuth2"):
|
|
||||||
existing_id = src_id
|
|
||||||
|
|
||||||
if exact_ok:
|
|
||||||
ok("OIDC auth source 'Sunbeam' already present.")
|
|
||||||
elif existing_id:
|
|
||||||
# Wrong name (e.g. "Sunbeam Auth") — rename in-place to fix callback URL
|
|
||||||
r = gitea_exec("gitea", "admin", "auth", "update-oauth",
|
|
||||||
"--id", existing_id, "--name", "Sunbeam")
|
|
||||||
if r.returncode == 0:
|
|
||||||
ok(f"Renamed OIDC auth source (id={existing_id}) to 'Sunbeam'.")
|
|
||||||
else:
|
|
||||||
warn(f"Rename failed: {r.stderr.strip()}")
|
|
||||||
else:
|
|
||||||
oidc_id_b64 = kube_out("-n", "lasuite", "get", "secret", "oidc-gitea",
|
|
||||||
"-o=jsonpath={.data.CLIENT_ID}")
|
|
||||||
oidc_secret_b64 = kube_out("-n", "lasuite", "get", "secret", "oidc-gitea",
|
|
||||||
"-o=jsonpath={.data.CLIENT_SECRET}")
|
|
||||||
if oidc_id_b64 and oidc_secret_b64:
|
|
||||||
oidc_id = base64.b64decode(oidc_id_b64).decode()
|
|
||||||
oidc_sec = base64.b64decode(oidc_secret_b64).decode()
|
|
||||||
discover_url = (
|
|
||||||
"http://hydra-public.ory.svc.cluster.local:4444"
|
|
||||||
"/.well-known/openid-configuration"
|
|
||||||
)
|
|
||||||
r = gitea_exec(
|
|
||||||
"gitea", "admin", "auth", "add-oauth",
|
|
||||||
"--name", "Sunbeam",
|
|
||||||
"--provider", "openidConnect",
|
|
||||||
"--key", oidc_id,
|
|
||||||
"--secret", oidc_sec,
|
|
||||||
"--auto-discover-url", discover_url,
|
|
||||||
"--scopes", "openid",
|
|
||||||
"--scopes", "email",
|
|
||||||
"--scopes", "profile",
|
|
||||||
)
|
|
||||||
if r.returncode == 0:
|
|
||||||
ok("OIDC auth source 'Sunbeam' configured.")
|
|
||||||
else:
|
|
||||||
warn(f"OIDC auth source config failed: {r.stderr.strip()}")
|
|
||||||
else:
|
|
||||||
warn("oidc-gitea secret not found -- OIDC auth source not configured.")
|
|
||||||
|
|
||||||
ok(f"Gitea ready -- https://src.{domain} ({GITEA_ADMIN_USER} / <from "
|
|
||||||
f"openbao>)")
|
|
||||||
1049
sunbeam/images.py
1049
sunbeam/images.py
File diff suppressed because it is too large
Load Diff
257
sunbeam/kube.py
257
sunbeam/kube.py
@@ -1,257 +0,0 @@
|
|||||||
"""Kubernetes interface — kubectl/kustomize wrappers, domain substitution, target parsing."""
|
|
||||||
import subprocess
|
|
||||||
import time
|
|
||||||
from contextlib import contextmanager
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from sunbeam.tools import run_tool, CACHE_DIR
|
|
||||||
from sunbeam.output import die, ok
|
|
||||||
|
|
||||||
# Active kubectl context. Set once at startup via set_context().
|
|
||||||
# Defaults to "sunbeam" (Lima VM) for local dev.
|
|
||||||
_context: str = "sunbeam"
|
|
||||||
|
|
||||||
# SSH host for production tunnel. Set alongside context for production env.
|
|
||||||
_ssh_host: str = ""
|
|
||||||
_tunnel_proc: subprocess.Popen | None = None
|
|
||||||
|
|
||||||
|
|
||||||
def set_context(ctx: str, ssh_host: str = "") -> None:
|
|
||||||
global _context, _ssh_host
|
|
||||||
_context = ctx
|
|
||||||
_ssh_host = ssh_host
|
|
||||||
|
|
||||||
|
|
||||||
def context_arg() -> str:
|
|
||||||
"""Return '--context=<active>' for use in subprocess command lists."""
|
|
||||||
return f"--context={_context}"
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_tunnel() -> None:
|
|
||||||
"""Open SSH tunnel to localhost:16443 → remote:6443 for production if needed."""
|
|
||||||
global _tunnel_proc
|
|
||||||
if not _ssh_host:
|
|
||||||
return
|
|
||||||
import socket
|
|
||||||
try:
|
|
||||||
with socket.create_connection(("127.0.0.1", 16443), timeout=0.5):
|
|
||||||
return # already open
|
|
||||||
except (ConnectionRefusedError, TimeoutError, OSError):
|
|
||||||
pass
|
|
||||||
ok(f"Opening SSH tunnel to {_ssh_host}...")
|
|
||||||
_tunnel_proc = subprocess.Popen(
|
|
||||||
["ssh", "-p", "2222", "-L", "16443:127.0.0.1:6443", "-N", "-o", "ExitOnForwardFailure=yes",
|
|
||||||
"-o", "StrictHostKeyChecking=no", _ssh_host],
|
|
||||||
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
|
|
||||||
)
|
|
||||||
for _ in range(10):
|
|
||||||
try:
|
|
||||||
with socket.create_connection(("127.0.0.1", 16443), timeout=0.5):
|
|
||||||
return
|
|
||||||
except (ConnectionRefusedError, TimeoutError, OSError):
|
|
||||||
time.sleep(0.5)
|
|
||||||
die(f"SSH tunnel to {_ssh_host} did not open in time")
|
|
||||||
|
|
||||||
|
|
||||||
def parse_target(s: str | None) -> tuple[str | None, str | None]:
|
|
||||||
"""Parse 'ns/name' -> ('ns', 'name'), 'ns' -> ('ns', None), None -> (None, None)."""
|
|
||||||
if s is None:
|
|
||||||
return (None, None)
|
|
||||||
parts = s.split("/")
|
|
||||||
if len(parts) == 1:
|
|
||||||
return (parts[0], None)
|
|
||||||
if len(parts) == 2:
|
|
||||||
return (parts[0], parts[1])
|
|
||||||
raise ValueError(f"Invalid target {s!r}: expected 'namespace' or 'namespace/name'")
|
|
||||||
|
|
||||||
|
|
||||||
def domain_replace(text: str, domain: str) -> str:
|
|
||||||
"""Replace all occurrences of DOMAIN_SUFFIX with domain."""
|
|
||||||
return text.replace("DOMAIN_SUFFIX", domain)
|
|
||||||
|
|
||||||
|
|
||||||
def get_lima_ip() -> str:
|
|
||||||
"""Get the socket_vmnet IP of the Lima sunbeam VM (192.168.105.x)."""
|
|
||||||
r = subprocess.run(
|
|
||||||
["limactl", "shell", "sunbeam", "ip", "-4", "addr", "show", "eth1"],
|
|
||||||
capture_output=True, text=True,
|
|
||||||
)
|
|
||||||
for line in r.stdout.splitlines():
|
|
||||||
if "inet " in line:
|
|
||||||
return line.strip().split()[1].split("/")[0]
|
|
||||||
# fallback: second IP from hostname -I
|
|
||||||
r2 = subprocess.run(
|
|
||||||
["limactl", "shell", "sunbeam", "hostname", "-I"],
|
|
||||||
capture_output=True, text=True,
|
|
||||||
)
|
|
||||||
ips = r2.stdout.strip().split()
|
|
||||||
return ips[-1] if len(ips) >= 2 else (ips[0] if ips else "")
|
|
||||||
|
|
||||||
|
|
||||||
def kube(*args, input=None, check=True) -> subprocess.CompletedProcess:
|
|
||||||
"""Run kubectl against the active context, opening SSH tunnel if needed."""
|
|
||||||
ensure_tunnel()
|
|
||||||
text = not isinstance(input, bytes)
|
|
||||||
return run_tool("kubectl", context_arg(), *args,
|
|
||||||
input=input, text=text, check=check,
|
|
||||||
capture_output=False)
|
|
||||||
|
|
||||||
|
|
||||||
def kube_out(*args) -> str:
|
|
||||||
"""Run kubectl and return stdout (empty string on failure)."""
|
|
||||||
ensure_tunnel()
|
|
||||||
r = run_tool("kubectl", context_arg(), *args,
|
|
||||||
capture_output=True, text=True, check=False)
|
|
||||||
return r.stdout.strip() if r.returncode == 0 else ""
|
|
||||||
|
|
||||||
|
|
||||||
def kube_ok(*args) -> bool:
|
|
||||||
"""Return True if kubectl command exits 0."""
|
|
||||||
ensure_tunnel()
|
|
||||||
r = run_tool("kubectl", context_arg(), *args,
|
|
||||||
capture_output=True, check=False)
|
|
||||||
return r.returncode == 0
|
|
||||||
|
|
||||||
|
|
||||||
def kube_apply(manifest: str, *, server_side: bool = True) -> None:
|
|
||||||
"""Pipe manifest YAML to kubectl apply."""
|
|
||||||
args = ["apply", "-f", "-"]
|
|
||||||
if server_side:
|
|
||||||
args += ["--server-side", "--force-conflicts"]
|
|
||||||
kube(*args, input=manifest)
|
|
||||||
|
|
||||||
|
|
||||||
def ns_exists(ns: str) -> bool:
|
|
||||||
return kube_ok("get", "namespace", ns)
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_ns(ns: str) -> None:
|
|
||||||
manifest = kube_out("create", "namespace", ns, "--dry-run=client", "-o=yaml")
|
|
||||||
if manifest:
|
|
||||||
kube_apply(manifest)
|
|
||||||
|
|
||||||
|
|
||||||
def create_secret(ns: str, name: str, **literals) -> None:
|
|
||||||
"""Create or update a K8s generic secret idempotently via server-side apply."""
|
|
||||||
args = ["create", "secret", "generic", name, f"-n={ns}"]
|
|
||||||
for k, v in literals.items():
|
|
||||||
args.append(f"--from-literal={k}={v}")
|
|
||||||
args += ["--dry-run=client", "-o=yaml"]
|
|
||||||
manifest = kube_out(*args)
|
|
||||||
if manifest:
|
|
||||||
kube("apply", "--server-side", "--force-conflicts",
|
|
||||||
"--field-manager=sunbeam", "-f", "-", input=manifest)
|
|
||||||
|
|
||||||
|
|
||||||
def kube_exec(ns: str, pod: str, *cmd: str, container: str | None = None) -> tuple[int, str]:
|
|
||||||
"""Run a command inside a pod. Returns (returncode, stdout)."""
|
|
||||||
args = ["kubectl", context_arg(), "exec", "-n", ns, pod]
|
|
||||||
if container:
|
|
||||||
args += ["-c", container]
|
|
||||||
args += ["--", *cmd]
|
|
||||||
r = run_tool(*args, capture_output=True, text=True, check=False)
|
|
||||||
return r.returncode, r.stdout.strip()
|
|
||||||
|
|
||||||
|
|
||||||
def get_domain() -> str:
|
|
||||||
"""Discover the active domain from cluster state.
|
|
||||||
|
|
||||||
Tries multiple reliable anchors; falls back to the Lima VM IP for local dev.
|
|
||||||
"""
|
|
||||||
import base64
|
|
||||||
|
|
||||||
# 1. Gitea inline-config secret: server section contains DOMAIN=src.<domain>
|
|
||||||
# Works in both local and production because DOMAIN_SUFFIX is substituted
|
|
||||||
# into gitea-values.yaml at apply time.
|
|
||||||
raw = kube_out("get", "secret", "gitea-inline-config", "-n", "devtools",
|
|
||||||
"-o=jsonpath={.data.server}", "--ignore-not-found")
|
|
||||||
if raw:
|
|
||||||
try:
|
|
||||||
server_ini = base64.b64decode(raw).decode()
|
|
||||||
for line in server_ini.splitlines():
|
|
||||||
if line.startswith("DOMAIN=src."):
|
|
||||||
# e.g. "DOMAIN=src.sunbeam.pt"
|
|
||||||
return line.split("DOMAIN=src.", 1)[1].strip()
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# 2. Fallback: lasuite-oidc-provider configmap (works if La Suite is deployed)
|
|
||||||
raw2 = kube_out("get", "configmap", "lasuite-oidc-provider", "-n", "lasuite",
|
|
||||||
"-o=jsonpath={.data.OIDC_OP_JWKS_ENDPOINT}", "--ignore-not-found")
|
|
||||||
if raw2 and "https://auth." in raw2:
|
|
||||||
return raw2.split("https://auth.")[1].split("/")[0]
|
|
||||||
|
|
||||||
# 3. Local dev fallback
|
|
||||||
ip = get_lima_ip()
|
|
||||||
return f"{ip}.sslip.io"
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_k8s(kubectl_args: list[str]) -> int:
|
|
||||||
"""Transparent kubectl passthrough for the active context."""
|
|
||||||
ensure_tunnel()
|
|
||||||
from sunbeam.tools import ensure_tool
|
|
||||||
bin_path = ensure_tool("kubectl")
|
|
||||||
r = subprocess.run([str(bin_path), context_arg(), *kubectl_args])
|
|
||||||
return r.returncode
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_bao(bao_args: list[str]) -> int:
|
|
||||||
"""Run bao CLI inside the OpenBao pod with the root token. Returns exit code.
|
|
||||||
|
|
||||||
Automatically resolves the pod name and root token from the cluster, then
|
|
||||||
runs ``kubectl exec openbao-0 -- sh -c "VAULT_TOKEN=<tok> bao <args>"``
|
|
||||||
so callers never need to handle raw kubectl exec or token management.
|
|
||||||
"""
|
|
||||||
ob_pod = kube_out("-n", "data", "get", "pod",
|
|
||||||
"-l", "app.kubernetes.io/name=openbao",
|
|
||||||
"-o", "jsonpath={.items[0].metadata.name}")
|
|
||||||
if not ob_pod:
|
|
||||||
from sunbeam.output import die
|
|
||||||
die("OpenBao pod not found — is the cluster running?")
|
|
||||||
|
|
||||||
token_b64 = kube_out("-n", "data", "get", "secret", "openbao-keys",
|
|
||||||
"-o", "jsonpath={.data.root-token}")
|
|
||||||
import base64
|
|
||||||
root_token = base64.b64decode(token_b64).decode() if token_b64 else ""
|
|
||||||
if not root_token:
|
|
||||||
from sunbeam.output import die
|
|
||||||
die("root-token not found in openbao-keys secret")
|
|
||||||
|
|
||||||
cmd_str = "VAULT_TOKEN=" + root_token + " bao " + " ".join(bao_args)
|
|
||||||
r = subprocess.run(
|
|
||||||
["kubectl", context_arg(), "-n", "data", "exec", ob_pod,
|
|
||||||
"-c", "openbao", "--", "sh", "-c", cmd_str]
|
|
||||||
)
|
|
||||||
return r.returncode
|
|
||||||
|
|
||||||
|
|
||||||
def kustomize_build(overlay: Path, domain: str, email: str = "") -> str:
|
|
||||||
"""Run kustomize build --enable-helm and apply domain/email substitution."""
|
|
||||||
import socket as _socket
|
|
||||||
r = run_tool(
|
|
||||||
"kustomize", "build", "--enable-helm", str(overlay),
|
|
||||||
capture_output=True, text=True, check=True,
|
|
||||||
)
|
|
||||||
text = r.stdout
|
|
||||||
text = domain_replace(text, domain)
|
|
||||||
if email:
|
|
||||||
text = text.replace("ACME_EMAIL", email)
|
|
||||||
if "REGISTRY_HOST_IP" in text:
|
|
||||||
registry_ip = ""
|
|
||||||
try:
|
|
||||||
registry_ip = _socket.gethostbyname(f"src.{domain}")
|
|
||||||
except _socket.gaierror:
|
|
||||||
pass
|
|
||||||
if not registry_ip:
|
|
||||||
# DNS not resolvable locally (VPN, split-horizon, etc.) — derive IP from SSH host config
|
|
||||||
from sunbeam.config import get_production_host as _get_host
|
|
||||||
ssh_host = _get_host()
|
|
||||||
# ssh_host may be "user@host" or just "host"
|
|
||||||
raw = ssh_host.split("@")[-1].split(":")[0]
|
|
||||||
try:
|
|
||||||
registry_ip = _socket.gethostbyname(raw)
|
|
||||||
except _socket.gaierror:
|
|
||||||
registry_ip = raw # raw is already an IP in typical config
|
|
||||||
text = text.replace("REGISTRY_HOST_IP", registry_ip)
|
|
||||||
text = text.replace("\n annotations: null", "")
|
|
||||||
return text
|
|
||||||
@@ -1,437 +0,0 @@
|
|||||||
"""Manifest build + apply — kustomize overlay with domain substitution."""
|
|
||||||
import time
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from sunbeam.kube import kube, kube_out, kube_ok, kube_apply, kustomize_build, get_lima_ip, get_domain
|
|
||||||
from sunbeam.output import step, ok, warn
|
|
||||||
|
|
||||||
from sunbeam.config import get_infra_dir as _get_infra_dir
|
|
||||||
REPO_ROOT = _get_infra_dir()
|
|
||||||
MANAGED_NS = ["data", "devtools", "ingress", "lasuite", "matrix", "media", "monitoring",
|
|
||||||
"ory", "storage", "vault-secrets-operator"]
|
|
||||||
|
|
||||||
|
|
||||||
def pre_apply_cleanup(namespaces=None):
|
|
||||||
"""Delete immutable resources that must be re-created on each apply.
|
|
||||||
|
|
||||||
Also prunes VaultStaticSecrets that share a name with a VaultDynamicSecret --
|
|
||||||
kubectl apply doesn't delete the old resource when a manifest switches kinds,
|
|
||||||
and VSO refuses to overwrite a secret owned by a different resource type.
|
|
||||||
|
|
||||||
namespaces: if given, only clean those namespaces; otherwise clean all MANAGED_NS.
|
|
||||||
"""
|
|
||||||
ns_list = namespaces if namespaces is not None else MANAGED_NS
|
|
||||||
ok("Cleaning up immutable Jobs and test Pods...")
|
|
||||||
for ns in ns_list:
|
|
||||||
kube("delete", "jobs", "--all", "-n", ns, "--ignore-not-found", check=False)
|
|
||||||
# Query all pods (no phase filter) — CrashLoopBackOff pods report phase=Running
|
|
||||||
# so filtering on phase!=Running would silently skip them.
|
|
||||||
pods_out = kube_out("get", "pods", "-n", ns,
|
|
||||||
"-o=jsonpath={.items[*].metadata.name}")
|
|
||||||
for pod in pods_out.split():
|
|
||||||
if pod.endswith(("-test-connection", "-server-test", "-test")):
|
|
||||||
kube("delete", "pod", pod, "-n", ns, "--ignore-not-found", check=False)
|
|
||||||
|
|
||||||
# Prune VaultStaticSecrets that were replaced by VaultDynamicSecrets.
|
|
||||||
# When a manifest transitions a resource from VSS -> VDS, apply won't delete
|
|
||||||
# the old VSS; it just creates the new VDS alongside it. VSO then errors
|
|
||||||
# "not the owner" because the K8s secret's ownerRef still points to the VSS.
|
|
||||||
ok("Pruning stale VaultStaticSecrets superseded by VaultDynamicSecrets...")
|
|
||||||
for ns in ns_list:
|
|
||||||
vss_names = set(kube_out(
|
|
||||||
"get", "vaultstaticsecret", "-n", ns,
|
|
||||||
"-o=jsonpath={.items[*].metadata.name}", "--ignore-not-found",
|
|
||||||
).split())
|
|
||||||
vds_names = set(kube_out(
|
|
||||||
"get", "vaultdynamicsecret", "-n", ns,
|
|
||||||
"-o=jsonpath={.items[*].metadata.name}", "--ignore-not-found",
|
|
||||||
).split())
|
|
||||||
for stale in vss_names & vds_names:
|
|
||||||
ok(f" deleting stale VaultStaticSecret {ns}/{stale}")
|
|
||||||
kube("delete", "vaultstaticsecret", stale, "-n", ns,
|
|
||||||
"--ignore-not-found", check=False)
|
|
||||||
|
|
||||||
|
|
||||||
def _snapshot_configmaps() -> dict:
|
|
||||||
"""Return {ns/name: resourceVersion} for all ConfigMaps in managed namespaces."""
|
|
||||||
result = {}
|
|
||||||
for ns in MANAGED_NS:
|
|
||||||
out = kube_out(
|
|
||||||
"get", "configmaps", "-n", ns, "--ignore-not-found",
|
|
||||||
"-o=jsonpath={range .items[*]}{.metadata.name}={.metadata.resourceVersion}\\n{end}",
|
|
||||||
)
|
|
||||||
for line in out.splitlines():
|
|
||||||
if "=" in line:
|
|
||||||
name, rv = line.split("=", 1)
|
|
||||||
result[f"{ns}/{name}"] = rv
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def _restart_for_changed_configmaps(before: dict, after: dict):
|
|
||||||
"""Restart deployments that mount any ConfigMap whose resourceVersion changed."""
|
|
||||||
changed_by_ns: dict = {}
|
|
||||||
for key, rv in after.items():
|
|
||||||
if before.get(key) != rv:
|
|
||||||
ns, name = key.split("/", 1)
|
|
||||||
changed_by_ns.setdefault(ns, set()).add(name)
|
|
||||||
|
|
||||||
for ns, cm_names in changed_by_ns.items():
|
|
||||||
out = kube_out(
|
|
||||||
"get", "deployments", "-n", ns, "--ignore-not-found",
|
|
||||||
"-o=jsonpath={range .items[*]}{.metadata.name}:"
|
|
||||||
"{range .spec.template.spec.volumes[*]}{.configMap.name},{end};{end}",
|
|
||||||
)
|
|
||||||
for entry in out.split(";"):
|
|
||||||
entry = entry.strip()
|
|
||||||
if not entry or ":" not in entry:
|
|
||||||
continue
|
|
||||||
dep, vols = entry.split(":", 1)
|
|
||||||
mounted = {v.strip() for v in vols.split(",") if v.strip()}
|
|
||||||
if mounted & cm_names:
|
|
||||||
ok(f"Restarting {ns}/{dep} (ConfigMap updated)...")
|
|
||||||
kube("rollout", "restart", f"deployment/{dep}", "-n", ns, check=False)
|
|
||||||
|
|
||||||
|
|
||||||
def _wait_for_webhook(ns: str, svc: str, timeout: int = 120) -> bool:
|
|
||||||
"""Poll until a webhook service endpoint exists (signals webhook is ready).
|
|
||||||
|
|
||||||
Returns True if the webhook is ready within timeout seconds.
|
|
||||||
"""
|
|
||||||
ok(f"Waiting for {ns}/{svc} webhook (up to {timeout}s)...")
|
|
||||||
deadline = time.time() + timeout
|
|
||||||
while time.time() < deadline:
|
|
||||||
eps = kube_out("get", "endpoints", svc, "-n", ns,
|
|
||||||
"-o=jsonpath={.subsets[0].addresses[0].ip}", "--ignore-not-found")
|
|
||||||
if eps:
|
|
||||||
ok(f" {ns}/{svc} ready.")
|
|
||||||
return True
|
|
||||||
time.sleep(3)
|
|
||||||
warn(f" {ns}/{svc} not ready after {timeout}s — continuing anyway.")
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def _apply_mkcert_ca_configmap():
|
|
||||||
"""Create/update gitea-mkcert-ca ConfigMap from the local mkcert root CA.
|
|
||||||
|
|
||||||
Only called in local env. The ConfigMap is mounted into Gitea so Go's TLS
|
|
||||||
stack trusts the mkcert wildcard cert when making server-side OIDC calls.
|
|
||||||
"""
|
|
||||||
import subprocess, json
|
|
||||||
from pathlib import Path
|
|
||||||
caroot = subprocess.run(["mkcert", "-CAROOT"], capture_output=True, text=True).stdout.strip()
|
|
||||||
if not caroot:
|
|
||||||
warn("mkcert not found — skipping gitea-mkcert-ca ConfigMap.")
|
|
||||||
return
|
|
||||||
ca_pem = Path(caroot) / "rootCA.pem"
|
|
||||||
if not ca_pem.exists():
|
|
||||||
warn(f"mkcert root CA not found at {ca_pem} — skipping.")
|
|
||||||
return
|
|
||||||
cm = json.dumps({
|
|
||||||
"apiVersion": "v1",
|
|
||||||
"kind": "ConfigMap",
|
|
||||||
"metadata": {"name": "gitea-mkcert-ca", "namespace": "devtools"},
|
|
||||||
"data": {"ca.crt": ca_pem.read_text()},
|
|
||||||
})
|
|
||||||
kube("apply", "--server-side", "-f", "-", input=cm)
|
|
||||||
ok("gitea-mkcert-ca ConfigMap applied.")
|
|
||||||
|
|
||||||
|
|
||||||
def _filter_by_namespace(manifests: str, namespace: str) -> str:
|
|
||||||
"""Return only the YAML documents that belong to the given namespace.
|
|
||||||
|
|
||||||
Also includes the Namespace resource itself (safe to re-apply).
|
|
||||||
Uses simple string matching — namespace always appears as 'namespace: <name>'
|
|
||||||
in top-level metadata, so this is reliable without a full YAML parser.
|
|
||||||
"""
|
|
||||||
kept = []
|
|
||||||
for doc in manifests.split("\n---"):
|
|
||||||
doc = doc.strip()
|
|
||||||
if not doc:
|
|
||||||
continue
|
|
||||||
if f"namespace: {namespace}" in doc:
|
|
||||||
kept.append(doc)
|
|
||||||
elif "kind: Namespace" in doc and f"name: {namespace}" in doc:
|
|
||||||
kept.append(doc)
|
|
||||||
if not kept:
|
|
||||||
return ""
|
|
||||||
return "---\n" + "\n---\n".join(kept) + "\n"
|
|
||||||
|
|
||||||
|
|
||||||
def _patch_tuwunel_oauth2_redirect(domain: str):
|
|
||||||
"""Patch the tuwunel OAuth2Client redirect URI with the actual client_id.
|
|
||||||
|
|
||||||
Hydra-maester generates the client_id when it first reconciles the
|
|
||||||
OAuth2Client CRD, storing it in the oidc-tuwunel Secret. We read that
|
|
||||||
secret and patch the CRD's redirectUris to include the correct callback
|
|
||||||
path that tuwunel will use.
|
|
||||||
"""
|
|
||||||
import base64, json
|
|
||||||
|
|
||||||
client_id_b64 = kube_out("get", "secret", "oidc-tuwunel", "-n", "matrix",
|
|
||||||
"-o=jsonpath={.data.CLIENT_ID}", "--ignore-not-found")
|
|
||||||
if not client_id_b64:
|
|
||||||
warn("oidc-tuwunel secret not yet available — skipping redirect URI patch. "
|
|
||||||
"Re-run 'sunbeam apply matrix' after hydra-maester has reconciled.")
|
|
||||||
return
|
|
||||||
|
|
||||||
client_id = base64.b64decode(client_id_b64).decode()
|
|
||||||
redirect_uri = f"https://messages.{domain}/_matrix/client/unstable/login/sso/callback/{client_id}"
|
|
||||||
|
|
||||||
# Check current redirect URIs to avoid unnecessary patches.
|
|
||||||
current = kube_out("get", "oauth2client", "tuwunel", "-n", "matrix",
|
|
||||||
"-o=jsonpath={.spec.redirectUris[*]}", "--ignore-not-found")
|
|
||||||
if redirect_uri in current.split():
|
|
||||||
return
|
|
||||||
|
|
||||||
patch = json.dumps({"spec": {"redirectUris": [redirect_uri]}})
|
|
||||||
kube("patch", "oauth2client", "tuwunel", "-n", "matrix",
|
|
||||||
"--type=merge", f"-p={patch}", check=False)
|
|
||||||
ok(f"Patched tuwunel OAuth2Client redirect URI.")
|
|
||||||
|
|
||||||
|
|
||||||
def _os_api(path: str, method: str = "GET", data: str | None = None) -> str:
|
|
||||||
"""Call OpenSearch API via kubectl exec. Returns response body."""
|
|
||||||
cmd = ["exec", "deploy/opensearch", "-n", "data", "-c", "opensearch", "--"]
|
|
||||||
curl = ["curl", "-sf", f"http://localhost:9200{path}"]
|
|
||||||
if method != "GET":
|
|
||||||
curl += ["-X", method]
|
|
||||||
if data is not None:
|
|
||||||
curl += ["-H", "Content-Type: application/json", "-d", data]
|
|
||||||
return kube_out(*cmd, *curl)
|
|
||||||
|
|
||||||
|
|
||||||
def _ensure_opensearch_ml():
|
|
||||||
"""Idempotently configure OpenSearch ML Commons for neural search.
|
|
||||||
|
|
||||||
1. Sets cluster settings to allow ML on data nodes.
|
|
||||||
2. Registers and deploys all-mpnet-base-v2 (pre-trained, 384-dim).
|
|
||||||
3. Creates ingest + search pipelines for hybrid BM25+neural scoring.
|
|
||||||
"""
|
|
||||||
import json, time
|
|
||||||
|
|
||||||
# Check OpenSearch is reachable.
|
|
||||||
if not _os_api("/_cluster/health"):
|
|
||||||
warn("OpenSearch not reachable — skipping ML setup.")
|
|
||||||
return
|
|
||||||
|
|
||||||
# 1. Ensure ML Commons cluster settings (idempotent PUT).
|
|
||||||
_os_api("/_cluster/settings", "PUT", json.dumps({"persistent": {
|
|
||||||
"plugins.ml_commons.only_run_on_ml_node": False,
|
|
||||||
"plugins.ml_commons.native_memory_threshold": 90,
|
|
||||||
"plugins.ml_commons.model_access_control_enabled": False,
|
|
||||||
"plugins.ml_commons.allow_registering_model_via_url": True,
|
|
||||||
}}))
|
|
||||||
|
|
||||||
# 2. Check if model already registered and deployed.
|
|
||||||
search_resp = _os_api("/_plugins/_ml/models/_search", "POST",
|
|
||||||
'{"query":{"match":{"name":"huggingface/sentence-transformers/all-mpnet-base-v2"}}}')
|
|
||||||
if not search_resp:
|
|
||||||
warn("OpenSearch ML search API failed — skipping ML setup.")
|
|
||||||
return
|
|
||||||
|
|
||||||
resp = json.loads(search_resp)
|
|
||||||
hits = resp.get("hits", {}).get("hits", [])
|
|
||||||
model_id = None
|
|
||||||
|
|
||||||
for hit in hits:
|
|
||||||
state = hit.get("_source", {}).get("model_state", "")
|
|
||||||
if state == "DEPLOYED":
|
|
||||||
model_id = hit["_id"]
|
|
||||||
break
|
|
||||||
elif state in ("REGISTERED", "DEPLOYING"):
|
|
||||||
model_id = hit["_id"]
|
|
||||||
|
|
||||||
if model_id and any(h["_source"].get("model_state") == "DEPLOYED" for h in hits):
|
|
||||||
pass # Already deployed, skip to pipelines.
|
|
||||||
elif model_id:
|
|
||||||
# Registered but not deployed — deploy it.
|
|
||||||
ok("Deploying OpenSearch ML model...")
|
|
||||||
_os_api(f"/_plugins/_ml/models/{model_id}/_deploy", "POST")
|
|
||||||
for _ in range(30):
|
|
||||||
time.sleep(5)
|
|
||||||
r = _os_api(f"/_plugins/_ml/models/{model_id}")
|
|
||||||
if r and '"DEPLOYED"' in r:
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
# Register from pre-trained hub.
|
|
||||||
ok("Registering OpenSearch ML model (all-mpnet-base-v2)...")
|
|
||||||
reg_resp = _os_api("/_plugins/_ml/models/_register", "POST", json.dumps({
|
|
||||||
"name": "huggingface/sentence-transformers/all-mpnet-base-v2",
|
|
||||||
"version": "1.0.1",
|
|
||||||
"model_format": "TORCH_SCRIPT",
|
|
||||||
}))
|
|
||||||
if not reg_resp:
|
|
||||||
warn("Failed to register ML model — skipping.")
|
|
||||||
return
|
|
||||||
task_id = json.loads(reg_resp).get("task_id", "")
|
|
||||||
if not task_id:
|
|
||||||
warn("No task_id from model registration — skipping.")
|
|
||||||
return
|
|
||||||
|
|
||||||
# Wait for registration.
|
|
||||||
ok("Waiting for model registration...")
|
|
||||||
for _ in range(60):
|
|
||||||
time.sleep(10)
|
|
||||||
task_resp = _os_api(f"/_plugins/_ml/tasks/{task_id}")
|
|
||||||
if not task_resp:
|
|
||||||
continue
|
|
||||||
task = json.loads(task_resp)
|
|
||||||
state = task.get("state", "")
|
|
||||||
if state == "COMPLETED":
|
|
||||||
model_id = task.get("model_id", "")
|
|
||||||
break
|
|
||||||
if state == "FAILED":
|
|
||||||
warn(f"ML model registration failed: {task_resp}")
|
|
||||||
return
|
|
||||||
|
|
||||||
if not model_id:
|
|
||||||
warn("ML model registration timed out.")
|
|
||||||
return
|
|
||||||
|
|
||||||
# Deploy.
|
|
||||||
ok("Deploying ML model...")
|
|
||||||
_os_api(f"/_plugins/_ml/models/{model_id}/_deploy", "POST")
|
|
||||||
for _ in range(30):
|
|
||||||
time.sleep(5)
|
|
||||||
r = _os_api(f"/_plugins/_ml/models/{model_id}")
|
|
||||||
if r and '"DEPLOYED"' in r:
|
|
||||||
break
|
|
||||||
|
|
||||||
if not model_id:
|
|
||||||
warn("No ML model available — skipping pipeline setup.")
|
|
||||||
return
|
|
||||||
|
|
||||||
# 3. Create/update ingest pipeline (PUT is idempotent).
|
|
||||||
_os_api("/_ingest/pipeline/tuwunel_embedding_pipeline", "PUT", json.dumps({
|
|
||||||
"description": "Tuwunel message embedding pipeline",
|
|
||||||
"processors": [{"text_embedding": {
|
|
||||||
"model_id": model_id,
|
|
||||||
"field_map": {"body": "embedding"},
|
|
||||||
}}],
|
|
||||||
}))
|
|
||||||
|
|
||||||
# 4. Create/update search pipeline (PUT is idempotent).
|
|
||||||
_os_api("/_search/pipeline/tuwunel_hybrid_pipeline", "PUT", json.dumps({
|
|
||||||
"description": "Tuwunel hybrid BM25+neural search pipeline",
|
|
||||||
"phase_results_processors": [{"normalization-processor": {
|
|
||||||
"normalization": {"technique": "min_max"},
|
|
||||||
"combination": {"technique": "arithmetic_mean", "parameters": {"weights": [0.3, 0.7]}},
|
|
||||||
}}],
|
|
||||||
}))
|
|
||||||
|
|
||||||
ok(f"OpenSearch ML ready (model: {model_id}).")
|
|
||||||
return model_id
|
|
||||||
|
|
||||||
|
|
||||||
def _inject_opensearch_model_id():
|
|
||||||
"""Read deployed ML model_id from OpenSearch, write to ConfigMap in matrix ns.
|
|
||||||
|
|
||||||
The tuwunel deployment reads TUWUNEL_SEARCH_OPENSEARCH_MODEL_ID from this
|
|
||||||
ConfigMap. Creates or updates the ConfigMap idempotently.
|
|
||||||
|
|
||||||
Reads the model_id from the ingest pipeline (which _ensure_opensearch_ml
|
|
||||||
already configured with the correct model_id).
|
|
||||||
"""
|
|
||||||
import json
|
|
||||||
|
|
||||||
# Read model_id from the ingest pipeline that _ensure_opensearch_ml created.
|
|
||||||
pipe_resp = _os_api("/_ingest/pipeline/tuwunel_embedding_pipeline")
|
|
||||||
if not pipe_resp:
|
|
||||||
warn("OpenSearch ingest pipeline not found — skipping model_id injection. "
|
|
||||||
"Run 'sunbeam apply data' first.")
|
|
||||||
return
|
|
||||||
|
|
||||||
pipe = json.loads(pipe_resp)
|
|
||||||
processors = (pipe.get("tuwunel_embedding_pipeline", {})
|
|
||||||
.get("processors", []))
|
|
||||||
model_id = None
|
|
||||||
for proc in processors:
|
|
||||||
model_id = proc.get("text_embedding", {}).get("model_id")
|
|
||||||
if model_id:
|
|
||||||
break
|
|
||||||
|
|
||||||
if not model_id:
|
|
||||||
warn("No model_id in ingest pipeline — tuwunel hybrid search will be unavailable.")
|
|
||||||
return
|
|
||||||
|
|
||||||
# Check if ConfigMap already has this value.
|
|
||||||
current = kube_out("get", "configmap", "opensearch-ml-config", "-n", "matrix",
|
|
||||||
"-o=jsonpath={.data.model_id}", "--ignore-not-found")
|
|
||||||
if current == model_id:
|
|
||||||
return
|
|
||||||
|
|
||||||
cm = json.dumps({
|
|
||||||
"apiVersion": "v1",
|
|
||||||
"kind": "ConfigMap",
|
|
||||||
"metadata": {"name": "opensearch-ml-config", "namespace": "matrix"},
|
|
||||||
"data": {"model_id": model_id},
|
|
||||||
})
|
|
||||||
kube("apply", "--server-side", "-f", "-", input=cm)
|
|
||||||
ok(f"Injected OpenSearch model_id ({model_id}) into matrix/opensearch-ml-config.")
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_apply(env: str = "local", domain: str = "", email: str = "", namespace: str = ""):
|
|
||||||
"""Build kustomize overlay for env, substitute domain/email, kubectl apply.
|
|
||||||
|
|
||||||
Runs a second convergence pass if cert-manager is present in the overlay —
|
|
||||||
cert-manager registers a ValidatingWebhook that must be running before
|
|
||||||
ClusterIssuer / Certificate resources can be created.
|
|
||||||
"""
|
|
||||||
# Fall back to config for ACME email if not provided via CLI flag.
|
|
||||||
if not email:
|
|
||||||
from sunbeam.config import load_config
|
|
||||||
email = load_config().acme_email
|
|
||||||
|
|
||||||
if env == "production":
|
|
||||||
if not domain:
|
|
||||||
# Try to discover domain from running cluster
|
|
||||||
domain = get_domain()
|
|
||||||
if not domain:
|
|
||||||
from sunbeam.output import die
|
|
||||||
die("--domain is required for production apply on first deploy")
|
|
||||||
overlay = REPO_ROOT / "overlays" / "production"
|
|
||||||
else:
|
|
||||||
ip = get_lima_ip()
|
|
||||||
domain = f"{ip}.sslip.io"
|
|
||||||
overlay = REPO_ROOT / "overlays" / "local"
|
|
||||||
|
|
||||||
scope = f" [{namespace}]" if namespace else ""
|
|
||||||
step(f"Applying manifests (env: {env}, domain: {domain}){scope}...")
|
|
||||||
if env == "local":
|
|
||||||
_apply_mkcert_ca_configmap()
|
|
||||||
ns_list = [namespace] if namespace else None
|
|
||||||
pre_apply_cleanup(namespaces=ns_list)
|
|
||||||
before = _snapshot_configmaps()
|
|
||||||
manifests = kustomize_build(overlay, domain, email=email)
|
|
||||||
|
|
||||||
if namespace:
|
|
||||||
manifests = _filter_by_namespace(manifests, namespace)
|
|
||||||
if not manifests.strip():
|
|
||||||
warn(f"No resources found for namespace '{namespace}' — check the name and try again.")
|
|
||||||
return
|
|
||||||
|
|
||||||
# First pass: may emit errors for resources that depend on webhooks not yet running
|
|
||||||
# (e.g. cert-manager ClusterIssuer/Certificate), which is expected on first deploy.
|
|
||||||
kube("apply", "--server-side", "--force-conflicts", "-f", "-",
|
|
||||||
input=manifests, check=False)
|
|
||||||
|
|
||||||
# If cert-manager is in the overlay, wait for its webhook then re-apply
|
|
||||||
# so that ClusterIssuer and Certificate resources converge.
|
|
||||||
# Skip for partial applies unless the target IS cert-manager.
|
|
||||||
cert_manager_present = (overlay / "../../base/cert-manager").resolve().exists()
|
|
||||||
if cert_manager_present and not namespace:
|
|
||||||
if _wait_for_webhook("cert-manager", "cert-manager-webhook", timeout=120):
|
|
||||||
ok("Running convergence pass for cert-manager resources...")
|
|
||||||
manifests2 = kustomize_build(overlay, domain, email=email)
|
|
||||||
kube("apply", "--server-side", "--force-conflicts", "-f", "-", input=manifests2)
|
|
||||||
|
|
||||||
_restart_for_changed_configmaps(before, _snapshot_configmaps())
|
|
||||||
|
|
||||||
# Post-apply hooks for namespaces that need runtime patching.
|
|
||||||
if not namespace or namespace == "matrix":
|
|
||||||
_patch_tuwunel_oauth2_redirect(domain)
|
|
||||||
_inject_opensearch_model_id()
|
|
||||||
if not namespace or namespace == "data":
|
|
||||||
_ensure_opensearch_ml()
|
|
||||||
|
|
||||||
ok("Applied.")
|
|
||||||
@@ -1,47 +0,0 @@
|
|||||||
"""Output helpers — step/ok/warn/die + aligned text table."""
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
def step(msg: str) -> None:
|
|
||||||
"""Print a step header."""
|
|
||||||
print(f"\n==> {msg}", flush=True)
|
|
||||||
|
|
||||||
|
|
||||||
def ok(msg: str) -> None:
|
|
||||||
"""Print a success/info line."""
|
|
||||||
print(f" {msg}", flush=True)
|
|
||||||
|
|
||||||
|
|
||||||
def warn(msg: str) -> None:
|
|
||||||
"""Print a warning to stderr."""
|
|
||||||
print(f" WARN: {msg}", file=sys.stderr, flush=True)
|
|
||||||
|
|
||||||
|
|
||||||
def die(msg: str) -> None:
|
|
||||||
"""Print an error to stderr and exit."""
|
|
||||||
print(f"\nERROR: {msg}", file=sys.stderr, flush=True)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def table(rows: list[list[str]], headers: list[str]) -> str:
|
|
||||||
"""Return an aligned text table. Columns padded to max width."""
|
|
||||||
if not headers:
|
|
||||||
return ""
|
|
||||||
# Compute column widths
|
|
||||||
col_widths = [len(h) for h in headers]
|
|
||||||
for row in rows:
|
|
||||||
for i, cell in enumerate(row):
|
|
||||||
if i < len(col_widths):
|
|
||||||
col_widths[i] = max(col_widths[i], len(cell))
|
|
||||||
# Format header
|
|
||||||
header_line = " ".join(h.ljust(col_widths[i]) for i, h in enumerate(headers))
|
|
||||||
separator = " ".join("-" * w for w in col_widths)
|
|
||||||
lines = [header_line, separator]
|
|
||||||
# Format rows
|
|
||||||
for row in rows:
|
|
||||||
cells = []
|
|
||||||
for i in range(len(headers)):
|
|
||||||
val = row[i] if i < len(row) else ""
|
|
||||||
cells.append(val.ljust(col_widths[i]))
|
|
||||||
lines.append(" ".join(cells))
|
|
||||||
return "\n".join(lines)
|
|
||||||
@@ -1,978 +0,0 @@
|
|||||||
"""Secrets management — OpenBao KV seeding, DB engine config, VSO verification."""
|
|
||||||
import base64
|
|
||||||
import json
|
|
||||||
import secrets as _secrets
|
|
||||||
import subprocess
|
|
||||||
import time
|
|
||||||
import urllib.error
|
|
||||||
import urllib.request
|
|
||||||
from contextlib import contextmanager
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
from sunbeam.kube import kube, kube_out, kube_ok, kube_apply, ensure_ns, create_secret, get_domain, context_arg
|
|
||||||
from sunbeam.output import step, ok, warn, die
|
|
||||||
|
|
||||||
ADMIN_USERNAME = "estudio-admin"
|
|
||||||
|
|
||||||
|
|
||||||
def _gen_fernet_key() -> str:
|
|
||||||
"""Generate a Fernet-compatible key (32 random bytes, URL-safe base64)."""
|
|
||||||
return base64.urlsafe_b64encode(_secrets.token_bytes(32)).decode()
|
|
||||||
|
|
||||||
|
|
||||||
def _gen_dkim_key_pair() -> tuple[str, str]:
|
|
||||||
"""Generate an RSA 2048-bit DKIM key pair using openssl.
|
|
||||||
|
|
||||||
Returns (private_pem_pkcs8, public_pem). Returns ("", "") on failure.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
r1 = subprocess.run(["openssl", "genrsa", "2048"], capture_output=True, text=True)
|
|
||||||
if r1.returncode != 0:
|
|
||||||
warn(f"openssl genrsa failed: {r1.stderr.strip()}")
|
|
||||||
return ("", "")
|
|
||||||
# Convert to PKCS8 (format expected by rspamd)
|
|
||||||
r2 = subprocess.run(["openssl", "pkcs8", "-topk8", "-nocrypt"],
|
|
||||||
input=r1.stdout, capture_output=True, text=True)
|
|
||||||
private_pem = r2.stdout.strip() if r2.returncode == 0 else r1.stdout.strip()
|
|
||||||
# Extract public key from the original RSA key
|
|
||||||
r3 = subprocess.run(["openssl", "rsa", "-pubout"],
|
|
||||||
input=r1.stdout, capture_output=True, text=True)
|
|
||||||
if r3.returncode != 0:
|
|
||||||
warn(f"openssl rsa -pubout failed: {r3.stderr.strip()}")
|
|
||||||
return (private_pem, "")
|
|
||||||
return (private_pem, r3.stdout.strip())
|
|
||||||
except FileNotFoundError:
|
|
||||||
warn("openssl not found -- skipping DKIM key generation.")
|
|
||||||
return ("", "")
|
|
||||||
|
|
||||||
LIMA_VM = "sunbeam"
|
|
||||||
GITEA_ADMIN_USER = "gitea_admin"
|
|
||||||
PG_USERS = [
|
|
||||||
"kratos", "hydra", "gitea", "hive",
|
|
||||||
"docs", "meet", "drive", "messages", "conversations",
|
|
||||||
"people", "find", "calendars", "projects",
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# OpenBao KV seeding
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def _seed_openbao() -> dict:
|
|
||||||
"""Initialize/unseal OpenBao, generate/read credentials idempotently, configure VSO auth.
|
|
||||||
|
|
||||||
Returns a dict of all generated credentials. Values are read from existing
|
|
||||||
OpenBao KV entries when present -- re-running never rotates credentials.
|
|
||||||
"""
|
|
||||||
ob_pod = kube_out(
|
|
||||||
"-n", "data", "get", "pods",
|
|
||||||
"-l=app.kubernetes.io/name=openbao,component=server",
|
|
||||||
"-o=jsonpath={.items[0].metadata.name}",
|
|
||||||
)
|
|
||||||
if not ob_pod:
|
|
||||||
ok("OpenBao pod not found -- skipping.")
|
|
||||||
return {}
|
|
||||||
|
|
||||||
ok(f"OpenBao ({ob_pod})...")
|
|
||||||
kube("wait", "-n", "data", f"pod/{ob_pod}",
|
|
||||||
"--for=jsonpath={.status.phase}=Running", "--timeout=120s", check=False)
|
|
||||||
|
|
||||||
def bao(cmd):
|
|
||||||
r = subprocess.run(
|
|
||||||
["kubectl", context_arg(), "-n", "data", "exec", ob_pod, "-c", "openbao",
|
|
||||||
"--", "sh", "-c", cmd],
|
|
||||||
capture_output=True, text=True,
|
|
||||||
)
|
|
||||||
return r.stdout.strip()
|
|
||||||
|
|
||||||
def bao_status():
|
|
||||||
out = bao("bao status -format=json 2>/dev/null || echo '{}'")
|
|
||||||
try:
|
|
||||||
return json.loads(out)
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
unseal_key = ""
|
|
||||||
root_token = ""
|
|
||||||
|
|
||||||
status = bao_status()
|
|
||||||
already_initialized = status.get("initialized", False)
|
|
||||||
if not already_initialized:
|
|
||||||
existing_key = kube_out("-n", "data", "get", "secret", "openbao-keys",
|
|
||||||
"-o=jsonpath={.data.key}")
|
|
||||||
already_initialized = bool(existing_key)
|
|
||||||
|
|
||||||
if not already_initialized:
|
|
||||||
ok("Initializing OpenBao...")
|
|
||||||
init_json = bao("bao operator init -key-shares=1 -key-threshold=1 -format=json 2>/dev/null || echo '{}'")
|
|
||||||
try:
|
|
||||||
init = json.loads(init_json)
|
|
||||||
unseal_key = init["unseal_keys_b64"][0]
|
|
||||||
root_token = init["root_token"]
|
|
||||||
create_secret("data", "openbao-keys",
|
|
||||||
key=unseal_key, **{"root-token": root_token})
|
|
||||||
ok("Initialized -- keys stored in secret/openbao-keys.")
|
|
||||||
except (json.JSONDecodeError, KeyError):
|
|
||||||
warn("Init failed -- resetting OpenBao storage for local dev...")
|
|
||||||
kube("delete", "pvc", "data-openbao-0", "-n", "data", "--ignore-not-found", check=False)
|
|
||||||
kube("delete", "pod", ob_pod, "-n", "data", "--ignore-not-found", check=False)
|
|
||||||
warn("OpenBao storage reset. Run --seed again after the pod restarts.")
|
|
||||||
return {}
|
|
||||||
else:
|
|
||||||
ok("Already initialized.")
|
|
||||||
existing_key = kube_out("-n", "data", "get", "secret", "openbao-keys",
|
|
||||||
"-o=jsonpath={.data.key}")
|
|
||||||
if existing_key:
|
|
||||||
unseal_key = base64.b64decode(existing_key).decode()
|
|
||||||
root_token_enc = kube_out("-n", "data", "get", "secret", "openbao-keys",
|
|
||||||
"-o=jsonpath={.data.root-token}")
|
|
||||||
if root_token_enc:
|
|
||||||
root_token = base64.b64decode(root_token_enc).decode()
|
|
||||||
|
|
||||||
if bao_status().get("sealed", False) and unseal_key:
|
|
||||||
ok("Unsealing...")
|
|
||||||
bao(f"bao operator unseal '{unseal_key}' 2>/dev/null")
|
|
||||||
|
|
||||||
if not root_token:
|
|
||||||
warn("No root token available -- skipping KV seeding.")
|
|
||||||
return {}
|
|
||||||
|
|
||||||
# Read-or-generate helper: preserves existing KV values; only generates missing ones.
|
|
||||||
# Tracks which paths had new values so we only write back when necessary.
|
|
||||||
_dirty_paths: set = set()
|
|
||||||
|
|
||||||
def get_or_create(path, **fields):
|
|
||||||
raw = bao(
|
|
||||||
f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
|
||||||
f"bao kv get -format=json secret/{path} 2>/dev/null || echo '{{}}'"
|
|
||||||
)
|
|
||||||
existing = {}
|
|
||||||
try:
|
|
||||||
existing = json.loads(raw).get("data", {}).get("data", {})
|
|
||||||
except (json.JSONDecodeError, AttributeError):
|
|
||||||
pass
|
|
||||||
result = {}
|
|
||||||
for key, default_fn in fields.items():
|
|
||||||
val = existing.get(key)
|
|
||||||
if val:
|
|
||||||
result[key] = val
|
|
||||||
else:
|
|
||||||
result[key] = default_fn()
|
|
||||||
_dirty_paths.add(path)
|
|
||||||
return result
|
|
||||||
|
|
||||||
def rand():
|
|
||||||
return _secrets.token_urlsafe(32)
|
|
||||||
|
|
||||||
ok("Seeding KV (idempotent -- existing values preserved)...")
|
|
||||||
|
|
||||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
|
||||||
f"bao secrets enable -path=secret -version=2 kv 2>/dev/null || true")
|
|
||||||
|
|
||||||
# DB passwords removed -- OpenBao database secrets engine manages them via static roles.
|
|
||||||
hydra = get_or_create("hydra",
|
|
||||||
**{"system-secret": rand,
|
|
||||||
"cookie-secret": rand,
|
|
||||||
"pairwise-salt": rand})
|
|
||||||
|
|
||||||
SMTP_URI = "smtp://postfix.lasuite.svc.cluster.local:25/?skip_ssl_verify=true"
|
|
||||||
kratos = get_or_create("kratos",
|
|
||||||
**{"secrets-default": rand,
|
|
||||||
"secrets-cookie": rand,
|
|
||||||
"smtp-connection-uri": lambda: SMTP_URI})
|
|
||||||
|
|
||||||
seaweedfs = get_or_create("seaweedfs",
|
|
||||||
**{"access-key": rand, "secret-key": rand})
|
|
||||||
|
|
||||||
gitea = get_or_create("gitea",
|
|
||||||
**{"admin-username": lambda: GITEA_ADMIN_USER,
|
|
||||||
"admin-password": rand})
|
|
||||||
|
|
||||||
hive = get_or_create("hive",
|
|
||||||
**{"oidc-client-id": lambda: "hive-local",
|
|
||||||
"oidc-client-secret": rand})
|
|
||||||
|
|
||||||
livekit = get_or_create("livekit",
|
|
||||||
**{"api-key": lambda: "devkey",
|
|
||||||
"api-secret": rand})
|
|
||||||
|
|
||||||
people = get_or_create("people",
|
|
||||||
**{"django-secret-key": rand})
|
|
||||||
|
|
||||||
login_ui = get_or_create("login-ui",
|
|
||||||
**{"cookie-secret": rand,
|
|
||||||
"csrf-cookie-secret": rand})
|
|
||||||
|
|
||||||
kratos_admin = get_or_create("kratos-admin",
|
|
||||||
**{"cookie-secret": rand,
|
|
||||||
"csrf-cookie-secret": rand,
|
|
||||||
"admin-identity-ids": lambda: "",
|
|
||||||
"s3-access-key": lambda: seaweedfs["access-key"],
|
|
||||||
"s3-secret-key": lambda: seaweedfs["secret-key"]})
|
|
||||||
|
|
||||||
docs = get_or_create("docs",
|
|
||||||
**{"django-secret-key": rand,
|
|
||||||
"collaboration-secret": rand})
|
|
||||||
|
|
||||||
meet = get_or_create("meet",
|
|
||||||
**{"django-secret-key": rand,
|
|
||||||
"application-jwt-secret-key": rand})
|
|
||||||
|
|
||||||
drive = get_or_create("drive",
|
|
||||||
**{"django-secret-key": rand})
|
|
||||||
|
|
||||||
projects = get_or_create("projects",
|
|
||||||
**{"secret-key": rand})
|
|
||||||
|
|
||||||
calendars = get_or_create("calendars",
|
|
||||||
**{"django-secret-key": lambda: _secrets.token_urlsafe(50),
|
|
||||||
"salt-key": rand,
|
|
||||||
"caldav-inbound-api-key": rand,
|
|
||||||
"caldav-outbound-api-key": rand,
|
|
||||||
"caldav-internal-api-key": rand})
|
|
||||||
|
|
||||||
# DKIM key pair -- generated together since private and public keys are coupled.
|
|
||||||
# Read existing keys first; only generate a new pair when absent.
|
|
||||||
existing_messages_raw = bao(
|
|
||||||
f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
|
||||||
f"bao kv get -format=json secret/messages 2>/dev/null || echo '{{}}'"
|
|
||||||
)
|
|
||||||
existing_messages = {}
|
|
||||||
try:
|
|
||||||
existing_messages = json.loads(existing_messages_raw).get("data", {}).get("data", {})
|
|
||||||
except (json.JSONDecodeError, AttributeError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
if existing_messages.get("dkim-private-key"):
|
|
||||||
_dkim_private = existing_messages["dkim-private-key"]
|
|
||||||
_dkim_public = existing_messages.get("dkim-public-key", "")
|
|
||||||
else:
|
|
||||||
_dkim_private, _dkim_public = _gen_dkim_key_pair()
|
|
||||||
|
|
||||||
messages = get_or_create("messages",
|
|
||||||
**{"django-secret-key": rand,
|
|
||||||
"salt-key": rand,
|
|
||||||
"mda-api-secret": rand,
|
|
||||||
"oidc-refresh-token-key": _gen_fernet_key,
|
|
||||||
"dkim-private-key": lambda: _dkim_private,
|
|
||||||
"dkim-public-key": lambda: _dkim_public,
|
|
||||||
"rspamd-password": rand,
|
|
||||||
"socks-proxy-users": lambda: f"sunbeam:{rand()}",
|
|
||||||
"mta-out-smtp-username": lambda: "sunbeam",
|
|
||||||
"mta-out-smtp-password": rand})
|
|
||||||
|
|
||||||
collabora = get_or_create("collabora",
|
|
||||||
**{"username": lambda: "admin",
|
|
||||||
"password": rand})
|
|
||||||
|
|
||||||
tuwunel = get_or_create("tuwunel",
|
|
||||||
**{"oidc-client-id": lambda: "",
|
|
||||||
"oidc-client-secret": lambda: "",
|
|
||||||
"turn-secret": lambda: "",
|
|
||||||
"registration-token": rand})
|
|
||||||
|
|
||||||
# Scaleway S3 credentials for CNPG barman backups.
|
|
||||||
# Read from `scw config` at seed time; falls back to empty string (operator must fill in).
|
|
||||||
def _scw_config(key):
|
|
||||||
try:
|
|
||||||
r = subprocess.run(["scw", "config", "get", key],
|
|
||||||
capture_output=True, text=True, timeout=5)
|
|
||||||
return r.stdout.strip() if r.returncode == 0 else ""
|
|
||||||
except (FileNotFoundError, subprocess.TimeoutExpired):
|
|
||||||
return ""
|
|
||||||
|
|
||||||
grafana = get_or_create("grafana",
|
|
||||||
**{"admin-password": rand})
|
|
||||||
|
|
||||||
scaleway_s3 = get_or_create("scaleway-s3",
|
|
||||||
**{"access-key-id": lambda: _scw_config("access-key"),
|
|
||||||
"secret-access-key": lambda: _scw_config("secret-key")})
|
|
||||||
|
|
||||||
# Only write secrets to OpenBao KV for paths that have new/missing values.
|
|
||||||
# This avoids unnecessary KV version bumps which trigger VSO re-syncs and
|
|
||||||
# rollout restarts across the cluster.
|
|
||||||
if not _dirty_paths:
|
|
||||||
ok("All OpenBao KV secrets already present -- skipping writes.")
|
|
||||||
else:
|
|
||||||
ok(f"Writing new secrets to OpenBao KV ({', '.join(sorted(_dirty_paths))})...")
|
|
||||||
|
|
||||||
def _kv_put(path, **kv):
|
|
||||||
pairs = " ".join(f'{k}="{v}"' for k, v in kv.items())
|
|
||||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
|
||||||
f"bao kv put secret/{path} {pairs}")
|
|
||||||
|
|
||||||
if "messages" in _dirty_paths:
|
|
||||||
_kv_put("messages",
|
|
||||||
**{"django-secret-key": messages["django-secret-key"],
|
|
||||||
"salt-key": messages["salt-key"],
|
|
||||||
"mda-api-secret": messages["mda-api-secret"],
|
|
||||||
"oidc-refresh-token-key": messages["oidc-refresh-token-key"],
|
|
||||||
"rspamd-password": messages["rspamd-password"],
|
|
||||||
"socks-proxy-users": messages["socks-proxy-users"],
|
|
||||||
"mta-out-smtp-username": messages["mta-out-smtp-username"],
|
|
||||||
"mta-out-smtp-password": messages["mta-out-smtp-password"]})
|
|
||||||
# DKIM keys stored separately (large PEM values)
|
|
||||||
dkim_priv_b64 = base64.b64encode(messages['dkim-private-key'].encode()).decode()
|
|
||||||
dkim_pub_b64 = base64.b64encode(messages['dkim-public-key'].encode()).decode()
|
|
||||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' sh -c '"
|
|
||||||
f"echo {dkim_priv_b64} | base64 -d > /tmp/dkim_priv.pem && "
|
|
||||||
f"echo {dkim_pub_b64} | base64 -d > /tmp/dkim_pub.pem && "
|
|
||||||
f"bao kv patch secret/messages"
|
|
||||||
f" dkim-private-key=\"$(cat /tmp/dkim_priv.pem)\""
|
|
||||||
f" dkim-public-key=\"$(cat /tmp/dkim_pub.pem)\" && "
|
|
||||||
f"rm /tmp/dkim_priv.pem /tmp/dkim_pub.pem"
|
|
||||||
f"'")
|
|
||||||
if "hydra" in _dirty_paths:
|
|
||||||
_kv_put("hydra", **{"system-secret": hydra["system-secret"],
|
|
||||||
"cookie-secret": hydra["cookie-secret"],
|
|
||||||
"pairwise-salt": hydra["pairwise-salt"]})
|
|
||||||
if "kratos" in _dirty_paths:
|
|
||||||
_kv_put("kratos", **{"secrets-default": kratos["secrets-default"],
|
|
||||||
"secrets-cookie": kratos["secrets-cookie"],
|
|
||||||
"smtp-connection-uri": kratos["smtp-connection-uri"]})
|
|
||||||
if "gitea" in _dirty_paths:
|
|
||||||
_kv_put("gitea", **{"admin-username": gitea["admin-username"],
|
|
||||||
"admin-password": gitea["admin-password"]})
|
|
||||||
if "seaweedfs" in _dirty_paths:
|
|
||||||
_kv_put("seaweedfs", **{"access-key": seaweedfs["access-key"],
|
|
||||||
"secret-key": seaweedfs["secret-key"]})
|
|
||||||
if "hive" in _dirty_paths:
|
|
||||||
_kv_put("hive", **{"oidc-client-id": hive["oidc-client-id"],
|
|
||||||
"oidc-client-secret": hive["oidc-client-secret"]})
|
|
||||||
if "livekit" in _dirty_paths:
|
|
||||||
_kv_put("livekit", **{"api-key": livekit["api-key"],
|
|
||||||
"api-secret": livekit["api-secret"]})
|
|
||||||
if "people" in _dirty_paths:
|
|
||||||
_kv_put("people", **{"django-secret-key": people["django-secret-key"]})
|
|
||||||
if "login-ui" in _dirty_paths:
|
|
||||||
_kv_put("login-ui", **{"cookie-secret": login_ui["cookie-secret"],
|
|
||||||
"csrf-cookie-secret": login_ui["csrf-cookie-secret"]})
|
|
||||||
if "kratos-admin" in _dirty_paths:
|
|
||||||
_kv_put("kratos-admin", **{"cookie-secret": kratos_admin["cookie-secret"],
|
|
||||||
"csrf-cookie-secret": kratos_admin["csrf-cookie-secret"],
|
|
||||||
"admin-identity-ids": kratos_admin["admin-identity-ids"],
|
|
||||||
"s3-access-key": kratos_admin["s3-access-key"],
|
|
||||||
"s3-secret-key": kratos_admin["s3-secret-key"]})
|
|
||||||
if "docs" in _dirty_paths:
|
|
||||||
_kv_put("docs", **{"django-secret-key": docs["django-secret-key"],
|
|
||||||
"collaboration-secret": docs["collaboration-secret"]})
|
|
||||||
if "meet" in _dirty_paths:
|
|
||||||
_kv_put("meet", **{"django-secret-key": meet["django-secret-key"],
|
|
||||||
"application-jwt-secret-key": meet["application-jwt-secret-key"]})
|
|
||||||
if "drive" in _dirty_paths:
|
|
||||||
_kv_put("drive", **{"django-secret-key": drive["django-secret-key"]})
|
|
||||||
if "projects" in _dirty_paths:
|
|
||||||
_kv_put("projects", **{"secret-key": projects["secret-key"]})
|
|
||||||
if "calendars" in _dirty_paths:
|
|
||||||
_kv_put("calendars", **{"django-secret-key": calendars["django-secret-key"],
|
|
||||||
"salt-key": calendars["salt-key"],
|
|
||||||
"caldav-inbound-api-key": calendars["caldav-inbound-api-key"],
|
|
||||||
"caldav-outbound-api-key": calendars["caldav-outbound-api-key"],
|
|
||||||
"caldav-internal-api-key": calendars["caldav-internal-api-key"]})
|
|
||||||
if "collabora" in _dirty_paths:
|
|
||||||
_kv_put("collabora", **{"username": collabora["username"],
|
|
||||||
"password": collabora["password"]})
|
|
||||||
if "grafana" in _dirty_paths:
|
|
||||||
_kv_put("grafana", **{"admin-password": grafana["admin-password"]})
|
|
||||||
if "scaleway-s3" in _dirty_paths:
|
|
||||||
_kv_put("scaleway-s3", **{"access-key-id": scaleway_s3["access-key-id"],
|
|
||||||
"secret-access-key": scaleway_s3["secret-access-key"]})
|
|
||||||
if "tuwunel" in _dirty_paths:
|
|
||||||
_kv_put("tuwunel", **{"oidc-client-id": tuwunel["oidc-client-id"],
|
|
||||||
"oidc-client-secret": tuwunel["oidc-client-secret"],
|
|
||||||
"turn-secret": tuwunel["turn-secret"],
|
|
||||||
"registration-token": tuwunel["registration-token"]})
|
|
||||||
|
|
||||||
# Patch gitea admin credentials into secret/sol for Sol's Gitea integration.
|
|
||||||
# Uses kv patch (not put) to preserve manually-set keys (matrix-access-token etc.).
|
|
||||||
ok("Patching Gitea admin credentials into secret/sol...")
|
|
||||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
|
||||||
f"bao kv patch secret/sol "
|
|
||||||
f"gitea-admin-username='{gitea['admin-username']}' "
|
|
||||||
f"gitea-admin-password='{gitea['admin-password']}'")
|
|
||||||
|
|
||||||
# Configure Kubernetes auth method so VSO can authenticate with OpenBao
|
|
||||||
ok("Configuring Kubernetes auth for VSO...")
|
|
||||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
|
||||||
f"bao auth enable kubernetes 2>/dev/null; true")
|
|
||||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
|
||||||
f"bao write auth/kubernetes/config "
|
|
||||||
f"kubernetes_host=https://kubernetes.default.svc.cluster.local")
|
|
||||||
|
|
||||||
policy_hcl = (
|
|
||||||
'path "secret/data/*" { capabilities = ["read"] }\n'
|
|
||||||
'path "secret/metadata/*" { capabilities = ["read", "list"] }\n'
|
|
||||||
'path "database/static-creds/*" { capabilities = ["read"] }\n'
|
|
||||||
)
|
|
||||||
policy_b64 = base64.b64encode(policy_hcl.encode()).decode()
|
|
||||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
|
||||||
f"sh -c 'echo {policy_b64} | base64 -d | bao policy write vso-reader -'")
|
|
||||||
|
|
||||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
|
||||||
f"bao write auth/kubernetes/role/vso "
|
|
||||||
f"bound_service_account_names=default "
|
|
||||||
f"bound_service_account_namespaces=ory,devtools,storage,lasuite,matrix,media,data,monitoring "
|
|
||||||
f"policies=vso-reader "
|
|
||||||
f"ttl=1h")
|
|
||||||
|
|
||||||
# Sol agent policy — read/write access to sol-tokens/* for user impersonation PATs
|
|
||||||
ok("Configuring Kubernetes auth for Sol agent...")
|
|
||||||
sol_policy_hcl = (
|
|
||||||
'path "secret/data/sol-tokens/*" { capabilities = ["create", "read", "update", "delete"] }\n'
|
|
||||||
'path "secret/metadata/sol-tokens/*" { capabilities = ["read", "delete", "list"] }\n'
|
|
||||||
)
|
|
||||||
sol_policy_b64 = base64.b64encode(sol_policy_hcl.encode()).decode()
|
|
||||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
|
||||||
f"sh -c 'echo {sol_policy_b64} | base64 -d | bao policy write sol-agent -'")
|
|
||||||
|
|
||||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
|
||||||
f"bao write auth/kubernetes/role/sol-agent "
|
|
||||||
f"bound_service_account_names=default "
|
|
||||||
f"bound_service_account_namespaces=matrix "
|
|
||||||
f"policies=sol-agent "
|
|
||||||
f"ttl=1h")
|
|
||||||
|
|
||||||
return {
|
|
||||||
"hydra-system-secret": hydra["system-secret"],
|
|
||||||
"hydra-cookie-secret": hydra["cookie-secret"],
|
|
||||||
"hydra-pairwise-salt": hydra["pairwise-salt"],
|
|
||||||
"kratos-secrets-default": kratos["secrets-default"],
|
|
||||||
"kratos-secrets-cookie": kratos["secrets-cookie"],
|
|
||||||
"s3-access-key": seaweedfs["access-key"],
|
|
||||||
"s3-secret-key": seaweedfs["secret-key"],
|
|
||||||
"gitea-admin-password": gitea["admin-password"],
|
|
||||||
"hive-oidc-client-id": hive["oidc-client-id"],
|
|
||||||
"hive-oidc-client-secret": hive["oidc-client-secret"],
|
|
||||||
"people-django-secret": people["django-secret-key"],
|
|
||||||
"livekit-api-key": livekit["api-key"],
|
|
||||||
"livekit-api-secret": livekit["api-secret"],
|
|
||||||
"kratos-admin-cookie-secret": kratos_admin["cookie-secret"],
|
|
||||||
"messages-dkim-public-key": messages.get("dkim-public-key", ""),
|
|
||||||
"_ob_pod": ob_pod,
|
|
||||||
"_root_token": root_token,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# Database secrets engine
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def _configure_db_engine(ob_pod, root_token, pg_user, pg_pass):
|
|
||||||
"""Enable OpenBao database secrets engine and create PostgreSQL static roles.
|
|
||||||
|
|
||||||
Static roles cause OpenBao to immediately set (and later rotate) each service
|
|
||||||
user's password via ALTER USER, eliminating hardcoded DB passwords.
|
|
||||||
Idempotent: bao write overwrites existing config/roles safely.
|
|
||||||
|
|
||||||
The `vault` PG user is created here (if absent) and used as the DB engine
|
|
||||||
connection user. pg_user/pg_pass (the CNPG superuser) are kept for potential
|
|
||||||
future use but are no longer used for the connection URL.
|
|
||||||
"""
|
|
||||||
ok("Configuring OpenBao database secrets engine...")
|
|
||||||
pg_rw = "postgres-rw.data.svc.cluster.local:5432"
|
|
||||||
bao_env = f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}'"
|
|
||||||
|
|
||||||
def bao(cmd, check=True):
|
|
||||||
r = subprocess.run(
|
|
||||||
["kubectl", context_arg(), "-n", "data", "exec", ob_pod, "-c", "openbao",
|
|
||||||
"--", "sh", "-c", cmd],
|
|
||||||
capture_output=True, text=True,
|
|
||||||
)
|
|
||||||
if check and r.returncode != 0:
|
|
||||||
raise RuntimeError(f"bao command failed (exit {r.returncode}):\n{r.stderr.strip()}")
|
|
||||||
return r.stdout.strip()
|
|
||||||
|
|
||||||
# Enable database secrets engine -- tolerate "already enabled" error via || true.
|
|
||||||
bao(f"{bao_env} bao secrets enable database 2>/dev/null || true", check=False)
|
|
||||||
|
|
||||||
# -- vault PG user setup ---------------------------------------------------
|
|
||||||
# Locate the CNPG primary pod for psql exec (peer auth -- no password needed).
|
|
||||||
cnpg_pod = kube_out(
|
|
||||||
"-n", "data", "get", "pods",
|
|
||||||
"-l=cnpg.io/cluster=postgres,role=primary",
|
|
||||||
"-o=jsonpath={.items[0].metadata.name}",
|
|
||||||
)
|
|
||||||
if not cnpg_pod:
|
|
||||||
raise RuntimeError("Could not find CNPG primary pod for vault user setup.")
|
|
||||||
|
|
||||||
def psql(sql):
|
|
||||||
r = subprocess.run(
|
|
||||||
["kubectl", context_arg(), "-n", "data", "exec", cnpg_pod, "-c", "postgres",
|
|
||||||
"--", "psql", "-U", "postgres", "-c", sql],
|
|
||||||
capture_output=True, text=True,
|
|
||||||
)
|
|
||||||
if r.returncode != 0:
|
|
||||||
raise RuntimeError(f"psql failed: {r.stderr.strip()}")
|
|
||||||
return r.stdout.strip()
|
|
||||||
|
|
||||||
# Read existing vault pg-password from OpenBao KV, or generate a new one.
|
|
||||||
existing_vault_pass = bao(
|
|
||||||
f"{bao_env} bao kv get -field=pg-password secret/vault 2>/dev/null || true",
|
|
||||||
check=False,
|
|
||||||
)
|
|
||||||
vault_pg_pass = existing_vault_pass.strip() if existing_vault_pass.strip() else _secrets.token_urlsafe(32)
|
|
||||||
|
|
||||||
# Store vault pg-password in OpenBao KV (idempotent).
|
|
||||||
bao(f"{bao_env} bao kv put secret/vault pg-password=\"{vault_pg_pass}\"")
|
|
||||||
ok("vault KV entry written.")
|
|
||||||
|
|
||||||
# Create vault PG user if absent, set its password, grant ADMIN OPTION on all service users.
|
|
||||||
create_vault_sql = (
|
|
||||||
f"DO $$ BEGIN "
|
|
||||||
f"IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'vault') THEN "
|
|
||||||
f"CREATE USER vault WITH LOGIN CREATEROLE; "
|
|
||||||
f"END IF; "
|
|
||||||
f"END $$;"
|
|
||||||
)
|
|
||||||
psql(create_vault_sql)
|
|
||||||
psql(f"ALTER USER vault WITH PASSWORD '{vault_pg_pass}';")
|
|
||||||
for user in PG_USERS:
|
|
||||||
psql(f"GRANT {user} TO vault WITH ADMIN OPTION;")
|
|
||||||
ok("vault PG user configured with ADMIN OPTION on all service roles.")
|
|
||||||
|
|
||||||
# -- DB engine connection config (uses vault user) -------------------------
|
|
||||||
conn_url = (
|
|
||||||
"postgresql://{{username}}:{{password}}"
|
|
||||||
f"@{pg_rw}/postgres?sslmode=disable"
|
|
||||||
)
|
|
||||||
bao(
|
|
||||||
f"{bao_env} bao write database/config/cnpg-postgres"
|
|
||||||
f" plugin_name=postgresql-database-plugin"
|
|
||||||
f" allowed_roles='*'"
|
|
||||||
f" connection_url='{conn_url}'"
|
|
||||||
f" username='vault'"
|
|
||||||
f" password='{vault_pg_pass}'"
|
|
||||||
)
|
|
||||||
ok("DB engine connection configured (vault user).")
|
|
||||||
|
|
||||||
# Encode the rotation statement to avoid shell quoting issues with inner quotes.
|
|
||||||
rotation_b64 = base64.b64encode(
|
|
||||||
b"ALTER USER \"{{name}}\" WITH PASSWORD '{{password}}';"
|
|
||||||
).decode()
|
|
||||||
|
|
||||||
for user in PG_USERS:
|
|
||||||
bao(
|
|
||||||
f"{bao_env} sh -c '"
|
|
||||||
f"bao write database/static-roles/{user}"
|
|
||||||
f" db_name=cnpg-postgres"
|
|
||||||
f" username={user}"
|
|
||||||
f" rotation_period=86400"
|
|
||||||
f" \"rotation_statements=$(echo {rotation_b64} | base64 -d)\"'"
|
|
||||||
)
|
|
||||||
ok(f" static-role/{user}")
|
|
||||||
|
|
||||||
ok("Database secrets engine configured.")
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# cmd_seed — main entry point
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def _kratos_admin_pf(local_port=14434):
|
|
||||||
"""Port-forward directly to the Kratos admin API."""
|
|
||||||
proc = subprocess.Popen(
|
|
||||||
["kubectl", context_arg(), "-n", "ory", "port-forward",
|
|
||||||
"svc/kratos-admin", f"{local_port}:80"],
|
|
||||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
|
||||||
)
|
|
||||||
time.sleep(1.5)
|
|
||||||
try:
|
|
||||||
yield f"http://localhost:{local_port}"
|
|
||||||
finally:
|
|
||||||
proc.terminate()
|
|
||||||
proc.wait()
|
|
||||||
|
|
||||||
|
|
||||||
def _kratos_api(base, path, method="GET", body=None):
|
|
||||||
url = f"{base}/admin{path}"
|
|
||||||
data = json.dumps(body).encode() if body is not None else None
|
|
||||||
req = urllib.request.Request(
|
|
||||||
url, data=data,
|
|
||||||
headers={"Content-Type": "application/json", "Accept": "application/json"},
|
|
||||||
method=method,
|
|
||||||
)
|
|
||||||
try:
|
|
||||||
with urllib.request.urlopen(req) as resp:
|
|
||||||
raw = resp.read()
|
|
||||||
return json.loads(raw) if raw else None
|
|
||||||
except urllib.error.HTTPError as e:
|
|
||||||
raise RuntimeError(f"Kratos API {method} {url} → {e.code}: {e.read().decode()}")
|
|
||||||
|
|
||||||
|
|
||||||
def _seed_kratos_admin_identity(ob_pod: str, root_token: str) -> tuple[str, str]:
|
|
||||||
"""Ensure estudio-admin@<domain> exists in Kratos and is the only admin identity.
|
|
||||||
|
|
||||||
Returns (recovery_link, recovery_code), or ("", "") if Kratos is unreachable.
|
|
||||||
Idempotent: if the identity already exists, skips creation and just returns
|
|
||||||
a fresh recovery link+code.
|
|
||||||
"""
|
|
||||||
domain = get_domain()
|
|
||||||
admin_email = f"{ADMIN_USERNAME}@{domain}"
|
|
||||||
|
|
||||||
ok(f"Ensuring Kratos admin identity ({admin_email})...")
|
|
||||||
try:
|
|
||||||
with _kratos_admin_pf() as base:
|
|
||||||
# Check if the identity already exists by searching by email
|
|
||||||
result = _kratos_api(base, f"/identities?credentials_identifier={admin_email}&page_size=1")
|
|
||||||
existing = result[0] if isinstance(result, list) and result else None
|
|
||||||
|
|
||||||
if existing:
|
|
||||||
identity_id = existing["id"]
|
|
||||||
ok(f" admin identity exists ({identity_id[:8]}...)")
|
|
||||||
else:
|
|
||||||
identity = _kratos_api(base, "/identities", method="POST", body={
|
|
||||||
"schema_id": "employee",
|
|
||||||
"traits": {"email": admin_email},
|
|
||||||
"state": "active",
|
|
||||||
})
|
|
||||||
identity_id = identity["id"]
|
|
||||||
ok(f" created admin identity ({identity_id[:8]}...)")
|
|
||||||
|
|
||||||
# Generate fresh recovery code + link
|
|
||||||
recovery = _kratos_api(base, "/recovery/code", method="POST", body={
|
|
||||||
"identity_id": identity_id,
|
|
||||||
"expires_in": "24h",
|
|
||||||
})
|
|
||||||
recovery_link = recovery.get("recovery_link", "") if recovery else ""
|
|
||||||
recovery_code = recovery.get("recovery_code", "") if recovery else ""
|
|
||||||
except Exception as exc:
|
|
||||||
warn(f"Could not seed Kratos admin identity (Kratos may not be ready): {exc}")
|
|
||||||
return ("", "")
|
|
||||||
|
|
||||||
# Update admin-identity-ids in OpenBao KV so kratos-admin-ui enforces access
|
|
||||||
bao_env = f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}'"
|
|
||||||
|
|
||||||
def _bao(cmd):
|
|
||||||
return subprocess.run(
|
|
||||||
["kubectl", context_arg(), "-n", "data", "exec", ob_pod, "-c", "openbao",
|
|
||||||
"--", "sh", "-c", cmd],
|
|
||||||
capture_output=True, text=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
_bao(f"{bao_env} bao kv patch secret/kratos-admin admin-identity-ids=\"{admin_email}\"")
|
|
||||||
ok(f" ADMIN_IDENTITY_IDS set to {admin_email}")
|
|
||||||
return (recovery_link, recovery_code)
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_seed() -> dict:
|
|
||||||
"""Seed OpenBao KV with crypto-random credentials, then mirror to K8s Secrets.
|
|
||||||
|
|
||||||
Returns a dict of credentials for use by callers (gitea admin pass, etc.).
|
|
||||||
Idempotent: reads existing OpenBao values before generating; never rotates.
|
|
||||||
"""
|
|
||||||
step("Seeding secrets...")
|
|
||||||
|
|
||||||
creds = _seed_openbao()
|
|
||||||
|
|
||||||
ob_pod = creds.pop("_ob_pod", "")
|
|
||||||
root_token = creds.pop("_root_token", "")
|
|
||||||
|
|
||||||
s3_access_key = creds.get("s3-access-key", "")
|
|
||||||
s3_secret_key = creds.get("s3-secret-key", "")
|
|
||||||
hydra_system = creds.get("hydra-system-secret", "")
|
|
||||||
hydra_cookie = creds.get("hydra-cookie-secret", "")
|
|
||||||
hydra_pairwise = creds.get("hydra-pairwise-salt", "")
|
|
||||||
kratos_secrets_default = creds.get("kratos-secrets-default", "")
|
|
||||||
kratos_secrets_cookie = creds.get("kratos-secrets-cookie", "")
|
|
||||||
hive_oidc_id = creds.get("hive-oidc-client-id", "hive-local")
|
|
||||||
hive_oidc_sec = creds.get("hive-oidc-client-secret", "")
|
|
||||||
django_secret = creds.get("people-django-secret", "")
|
|
||||||
gitea_admin_pass = creds.get("gitea-admin-password", "")
|
|
||||||
|
|
||||||
ok("Waiting for postgres cluster...")
|
|
||||||
pg_pod = ""
|
|
||||||
for _ in range(60):
|
|
||||||
phase = kube_out("-n", "data", "get", "cluster", "postgres",
|
|
||||||
"-o=jsonpath={.status.phase}")
|
|
||||||
if phase == "Cluster in healthy state":
|
|
||||||
pg_pod = kube_out("-n", "data", "get", "pods",
|
|
||||||
"-l=cnpg.io/cluster=postgres,role=primary",
|
|
||||||
"-o=jsonpath={.items[0].metadata.name}")
|
|
||||||
ok(f"Postgres ready ({pg_pod}).")
|
|
||||||
break
|
|
||||||
time.sleep(5)
|
|
||||||
else:
|
|
||||||
warn("Postgres not ready after 5 min -- continuing anyway.")
|
|
||||||
|
|
||||||
if pg_pod:
|
|
||||||
ok("Ensuring postgres roles and databases exist...")
|
|
||||||
db_map = {
|
|
||||||
"kratos": "kratos_db", "hydra": "hydra_db", "gitea": "gitea_db",
|
|
||||||
"hive": "hive_db", "docs": "docs_db", "meet": "meet_db",
|
|
||||||
"drive": "drive_db", "messages": "messages_db",
|
|
||||||
"conversations": "conversations_db",
|
|
||||||
"people": "people_db", "find": "find_db",
|
|
||||||
"calendars": "calendars_db", "projects": "projects_db",
|
|
||||||
}
|
|
||||||
for user in PG_USERS:
|
|
||||||
# Only CREATE if missing -- passwords are managed by OpenBao static roles.
|
|
||||||
ensure_sql = (
|
|
||||||
f"DO $$ BEGIN "
|
|
||||||
f"IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname='{user}') "
|
|
||||||
f"THEN EXECUTE 'CREATE USER {user}'; END IF; END $$;"
|
|
||||||
)
|
|
||||||
kube("exec", "-n", "data", pg_pod, "-c", "postgres", "--",
|
|
||||||
"psql", "-U", "postgres", "-c", ensure_sql, check=False)
|
|
||||||
db = db_map.get(user, f"{user}_db")
|
|
||||||
kube("exec", "-n", "data", pg_pod, "-c", "postgres", "--",
|
|
||||||
"psql", "-U", "postgres", "-c",
|
|
||||||
f"CREATE DATABASE {db} OWNER {user};", check=False)
|
|
||||||
|
|
||||||
# Read CNPG superuser credentials and configure database secrets engine.
|
|
||||||
# CNPG creates secret named "{cluster}-app" (not "{cluster}-superuser")
|
|
||||||
# when owner is specified without an explicit secret field.
|
|
||||||
pg_user_b64 = kube_out("-n", "data", "get", "secret", "postgres-app",
|
|
||||||
"-o=jsonpath={.data.username}")
|
|
||||||
pg_pass_b64 = kube_out("-n", "data", "get", "secret", "postgres-app",
|
|
||||||
"-o=jsonpath={.data.password}")
|
|
||||||
pg_user = base64.b64decode(pg_user_b64).decode() if pg_user_b64 else "postgres"
|
|
||||||
pg_pass = base64.b64decode(pg_pass_b64).decode() if pg_pass_b64 else ""
|
|
||||||
|
|
||||||
if ob_pod and root_token and pg_pass:
|
|
||||||
try:
|
|
||||||
_configure_db_engine(ob_pod, root_token, pg_user, pg_pass)
|
|
||||||
except Exception as exc:
|
|
||||||
warn(f"DB engine config failed: {exc}")
|
|
||||||
else:
|
|
||||||
warn("Skipping DB engine config -- missing ob_pod, root_token, or pg_pass.")
|
|
||||||
|
|
||||||
ok("Creating K8s secrets (VSO will overwrite on next sync)...")
|
|
||||||
|
|
||||||
ensure_ns("ory")
|
|
||||||
# Hydra app secrets -- DSN comes from VaultDynamicSecret hydra-db-creds.
|
|
||||||
create_secret("ory", "hydra",
|
|
||||||
secretsSystem=hydra_system,
|
|
||||||
secretsCookie=hydra_cookie,
|
|
||||||
**{"pairwise-salt": hydra_pairwise},
|
|
||||||
)
|
|
||||||
# Kratos non-rotating encryption keys -- DSN comes from VaultDynamicSecret kratos-db-creds.
|
|
||||||
create_secret("ory", "kratos-app-secrets",
|
|
||||||
secretsDefault=kratos_secrets_default,
|
|
||||||
secretsCookie=kratos_secrets_cookie,
|
|
||||||
)
|
|
||||||
|
|
||||||
ensure_ns("devtools")
|
|
||||||
# gitea-db-credentials comes from VaultDynamicSecret (static-creds/gitea).
|
|
||||||
create_secret("devtools", "gitea-s3-credentials",
|
|
||||||
**{"access-key": s3_access_key, "secret-key": s3_secret_key})
|
|
||||||
create_secret("devtools", "gitea-admin-credentials",
|
|
||||||
username=GITEA_ADMIN_USER, password=gitea_admin_pass)
|
|
||||||
|
|
||||||
# Sync Gitea admin password to Gitea's own DB (Gitea's existingSecret only
|
|
||||||
# applies on first run — subsequent K8s secret updates are not picked up
|
|
||||||
# automatically by Gitea).
|
|
||||||
if gitea_admin_pass:
|
|
||||||
gitea_pod = kube_out(
|
|
||||||
"-n", "devtools", "get", "pods",
|
|
||||||
"-l=app.kubernetes.io/name=gitea",
|
|
||||||
"-o=jsonpath={.items[0].metadata.name}",
|
|
||||||
)
|
|
||||||
if gitea_pod:
|
|
||||||
r = subprocess.run(
|
|
||||||
["kubectl", context_arg(), "-n", "devtools", "exec", gitea_pod,
|
|
||||||
"--", "gitea", "admin", "user", "change-password",
|
|
||||||
"--username", GITEA_ADMIN_USER, "--password", gitea_admin_pass,
|
|
||||||
"--must-change-password=false"],
|
|
||||||
capture_output=True, text=True,
|
|
||||||
)
|
|
||||||
if r.returncode == 0:
|
|
||||||
ok(f"Gitea admin password synced to Gitea DB.")
|
|
||||||
else:
|
|
||||||
warn(f"Could not sync Gitea admin password: {r.stderr.strip()}")
|
|
||||||
else:
|
|
||||||
warn("Gitea pod not found — admin password NOT synced to Gitea DB. Run seed again after Gitea is deployed.")
|
|
||||||
|
|
||||||
ensure_ns("storage")
|
|
||||||
s3_json = (
|
|
||||||
'{"identities":[{"name":"seaweed","credentials":[{"accessKey":"'
|
|
||||||
+ s3_access_key + '","secretKey":"' + s3_secret_key
|
|
||||||
+ '"}],"actions":["Admin","Read","Write","List","Tagging"]}]}'
|
|
||||||
)
|
|
||||||
create_secret("storage", "seaweedfs-s3-credentials",
|
|
||||||
S3_ACCESS_KEY=s3_access_key, S3_SECRET_KEY=s3_secret_key)
|
|
||||||
create_secret("storage", "seaweedfs-s3-json", **{"s3.json": s3_json})
|
|
||||||
|
|
||||||
ensure_ns("lasuite")
|
|
||||||
create_secret("lasuite", "seaweedfs-s3-credentials",
|
|
||||||
S3_ACCESS_KEY=s3_access_key, S3_SECRET_KEY=s3_secret_key)
|
|
||||||
# hive-db-url and people-db-credentials come from VaultDynamicSecrets.
|
|
||||||
create_secret("lasuite", "hive-oidc",
|
|
||||||
**{"client-id": hive_oidc_id, "client-secret": hive_oidc_sec})
|
|
||||||
create_secret("lasuite", "people-django-secret",
|
|
||||||
DJANGO_SECRET_KEY=django_secret)
|
|
||||||
|
|
||||||
ensure_ns("matrix")
|
|
||||||
|
|
||||||
ensure_ns("media")
|
|
||||||
ensure_ns("monitoring")
|
|
||||||
|
|
||||||
# Ensure the Kratos admin identity exists and ADMIN_IDENTITY_IDS is set.
|
|
||||||
# This runs after all other secrets are in place (Kratos must be up).
|
|
||||||
recovery_link, recovery_code = _seed_kratos_admin_identity(ob_pod, root_token)
|
|
||||||
if recovery_link:
|
|
||||||
ok("Admin recovery link (valid 24h):")
|
|
||||||
print(f" {recovery_link}")
|
|
||||||
if recovery_code:
|
|
||||||
ok("Admin recovery code (enter on the page above):")
|
|
||||||
print(f" {recovery_code}")
|
|
||||||
|
|
||||||
dkim_pub = creds.get("messages-dkim-public-key", "")
|
|
||||||
if dkim_pub:
|
|
||||||
b64_key = "".join(
|
|
||||||
dkim_pub.replace("-----BEGIN PUBLIC KEY-----", "")
|
|
||||||
.replace("-----END PUBLIC KEY-----", "")
|
|
||||||
.split()
|
|
||||||
)
|
|
||||||
domain = get_domain()
|
|
||||||
ok("DKIM DNS record (add to DNS at your registrar):")
|
|
||||||
print(f" default._domainkey.{domain} TXT \"v=DKIM1; k=rsa; p={b64_key}\"")
|
|
||||||
|
|
||||||
ok("All secrets seeded.")
|
|
||||||
return creds
|
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# cmd_verify — VSO E2E verification
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
def cmd_verify():
|
|
||||||
"""End-to-end test of VSO -> OpenBao integration.
|
|
||||||
|
|
||||||
1. Writes a random value to OpenBao KV at secret/vso-test.
|
|
||||||
2. Creates a VaultAuth + VaultStaticSecret in the 'ory' namespace
|
|
||||||
(already bound to the 'vso' Kubernetes auth role).
|
|
||||||
3. Polls until VSO syncs the K8s Secret (up to 60s).
|
|
||||||
4. Reads and base64-decodes the K8s Secret; compares to the expected value.
|
|
||||||
5. Cleans up all test resources in a finally block.
|
|
||||||
"""
|
|
||||||
step("Verifying VSO -> OpenBao integration (E2E)...")
|
|
||||||
|
|
||||||
ob_pod = kube_out(
|
|
||||||
"-n", "data", "get", "pods",
|
|
||||||
"-l=app.kubernetes.io/name=openbao,component=server",
|
|
||||||
"-o=jsonpath={.items[0].metadata.name}",
|
|
||||||
)
|
|
||||||
if not ob_pod:
|
|
||||||
die("OpenBao pod not found -- run full bring-up first.")
|
|
||||||
|
|
||||||
root_token_enc = kube_out(
|
|
||||||
"-n", "data", "get", "secret", "openbao-keys",
|
|
||||||
"-o=jsonpath={.data.root-token}",
|
|
||||||
)
|
|
||||||
if not root_token_enc:
|
|
||||||
die("Could not read openbao-keys secret.")
|
|
||||||
root_token = base64.b64decode(root_token_enc).decode()
|
|
||||||
|
|
||||||
bao_env = f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}'"
|
|
||||||
|
|
||||||
def bao(cmd, *, check=True):
|
|
||||||
r = subprocess.run(
|
|
||||||
["kubectl", context_arg(), "-n", "data", "exec", ob_pod, "-c", "openbao",
|
|
||||||
"--", "sh", "-c", cmd],
|
|
||||||
capture_output=True, text=True,
|
|
||||||
)
|
|
||||||
if check and r.returncode != 0:
|
|
||||||
raise RuntimeError(f"bao failed (exit {r.returncode}): {r.stderr.strip()}")
|
|
||||||
return r.stdout.strip()
|
|
||||||
|
|
||||||
test_value = _secrets.token_urlsafe(16)
|
|
||||||
test_ns = "ory"
|
|
||||||
test_name = "vso-verify"
|
|
||||||
|
|
||||||
def cleanup():
|
|
||||||
ok("Cleaning up test resources...")
|
|
||||||
kube("delete", "vaultstaticsecret", test_name, f"-n={test_ns}",
|
|
||||||
"--ignore-not-found", check=False)
|
|
||||||
kube("delete", "vaultauth", test_name, f"-n={test_ns}",
|
|
||||||
"--ignore-not-found", check=False)
|
|
||||||
kube("delete", "secret", test_name, f"-n={test_ns}",
|
|
||||||
"--ignore-not-found", check=False)
|
|
||||||
bao(f"{bao_env} bao kv delete secret/vso-test 2>/dev/null || true", check=False)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# 1. Write test value to OpenBao KV
|
|
||||||
ok(f"Writing test sentinel to OpenBao secret/vso-test ...")
|
|
||||||
bao(f"{bao_env} bao kv put secret/vso-test test-key='{test_value}'")
|
|
||||||
|
|
||||||
# 2. Create VaultAuth in ory (already in vso role's bound namespaces)
|
|
||||||
ok(f"Creating VaultAuth {test_ns}/{test_name} ...")
|
|
||||||
kube_apply(f"""
|
|
||||||
apiVersion: secrets.hashicorp.com/v1beta1
|
|
||||||
kind: VaultAuth
|
|
||||||
metadata:
|
|
||||||
name: {test_name}
|
|
||||||
namespace: {test_ns}
|
|
||||||
spec:
|
|
||||||
method: kubernetes
|
|
||||||
mount: kubernetes
|
|
||||||
kubernetes:
|
|
||||||
role: vso
|
|
||||||
serviceAccount: default
|
|
||||||
""")
|
|
||||||
|
|
||||||
# 3. Create VaultStaticSecret pointing at our test KV path
|
|
||||||
ok(f"Creating VaultStaticSecret {test_ns}/{test_name} ...")
|
|
||||||
kube_apply(f"""
|
|
||||||
apiVersion: secrets.hashicorp.com/v1beta1
|
|
||||||
kind: VaultStaticSecret
|
|
||||||
metadata:
|
|
||||||
name: {test_name}
|
|
||||||
namespace: {test_ns}
|
|
||||||
spec:
|
|
||||||
vaultAuthRef: {test_name}
|
|
||||||
mount: secret
|
|
||||||
type: kv-v2
|
|
||||||
path: vso-test
|
|
||||||
refreshAfter: 10s
|
|
||||||
destination:
|
|
||||||
name: {test_name}
|
|
||||||
create: true
|
|
||||||
overwrite: true
|
|
||||||
""")
|
|
||||||
|
|
||||||
# 4. Poll until VSO sets secretMAC (= synced)
|
|
||||||
ok("Waiting for VSO to sync (up to 60s) ...")
|
|
||||||
deadline = time.time() + 60
|
|
||||||
synced = False
|
|
||||||
while time.time() < deadline:
|
|
||||||
mac = kube_out(
|
|
||||||
"get", "vaultstaticsecret", test_name, f"-n={test_ns}",
|
|
||||||
"-o=jsonpath={.status.secretMAC}", "--ignore-not-found",
|
|
||||||
)
|
|
||||||
if mac and mac not in ("<none>", ""):
|
|
||||||
synced = True
|
|
||||||
break
|
|
||||||
time.sleep(3)
|
|
||||||
|
|
||||||
if not synced:
|
|
||||||
msg = kube_out(
|
|
||||||
"get", "vaultstaticsecret", test_name, f"-n={test_ns}",
|
|
||||||
"-o=jsonpath={.status.conditions[0].message}", "--ignore-not-found",
|
|
||||||
)
|
|
||||||
raise RuntimeError(f"VSO did not sync within 60s. Last status: {msg or 'unknown'}")
|
|
||||||
|
|
||||||
# 5. Read and verify the K8s Secret value
|
|
||||||
ok("Verifying K8s Secret contents ...")
|
|
||||||
raw = kube_out(
|
|
||||||
"get", "secret", test_name, f"-n={test_ns}",
|
|
||||||
"-o=jsonpath={.data.test-key}", "--ignore-not-found",
|
|
||||||
)
|
|
||||||
if not raw:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"K8s Secret {test_ns}/{test_name} not found or missing key 'test-key'."
|
|
||||||
)
|
|
||||||
actual = base64.b64decode(raw).decode()
|
|
||||||
if actual != test_value:
|
|
||||||
raise RuntimeError(
|
|
||||||
f"Value mismatch!\n expected: {test_value!r}\n got: {actual!r}"
|
|
||||||
)
|
|
||||||
|
|
||||||
ok(f"Sentinel value matches -- VSO -> OpenBao integration is working.")
|
|
||||||
|
|
||||||
except Exception as exc:
|
|
||||||
cleanup()
|
|
||||||
die(f"VSO verification FAILED: {exc}")
|
|
||||||
|
|
||||||
cleanup()
|
|
||||||
ok("VSO E2E verification passed.")
|
|
||||||
@@ -1,237 +0,0 @@
|
|||||||
"""Service management — status, logs, restart."""
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
import sunbeam.kube as _kube_mod
|
|
||||||
from sunbeam.kube import kube, kube_out, parse_target
|
|
||||||
from sunbeam.tools import ensure_tool
|
|
||||||
from sunbeam.output import step, ok, warn, die
|
|
||||||
|
|
||||||
MANAGED_NS = ["data", "devtools", "ingress", "lasuite", "matrix", "media", "ory",
|
|
||||||
"storage", "vault-secrets-operator"]
|
|
||||||
|
|
||||||
SERVICES_TO_RESTART = [
|
|
||||||
("ory", "hydra"),
|
|
||||||
("ory", "kratos"),
|
|
||||||
("ory", "login-ui"),
|
|
||||||
("devtools", "gitea"),
|
|
||||||
("storage", "seaweedfs-filer"),
|
|
||||||
("lasuite", "hive"),
|
|
||||||
("lasuite", "people-backend"),
|
|
||||||
("lasuite", "people-frontend"),
|
|
||||||
("lasuite", "people-celery-worker"),
|
|
||||||
("lasuite", "people-celery-beat"),
|
|
||||||
("lasuite", "projects"),
|
|
||||||
("matrix", "tuwunel"),
|
|
||||||
("media", "livekit-server"),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def _k8s_ctx():
|
|
||||||
"""Return the kubectl --context flag matching the active environment."""
|
|
||||||
return [_kube_mod.context_arg()]
|
|
||||||
|
|
||||||
|
|
||||||
def _capture_out(cmd, *, default=""):
|
|
||||||
r = subprocess.run(cmd, capture_output=True, text=True)
|
|
||||||
return r.stdout.strip() if r.returncode == 0 else default
|
|
||||||
|
|
||||||
|
|
||||||
def _vso_sync_status():
|
|
||||||
"""Print VSO VaultStaticSecret and VaultDynamicSecret sync health.
|
|
||||||
|
|
||||||
VSS synced = status.secretMAC is non-empty.
|
|
||||||
VDS synced = status.lastRenewalTime is non-zero.
|
|
||||||
"""
|
|
||||||
step("VSO secret sync status...")
|
|
||||||
all_ok = True
|
|
||||||
|
|
||||||
# VaultStaticSecrets: synced when secretMAC is populated
|
|
||||||
vss_raw = _capture_out([
|
|
||||||
"kubectl", *_k8s_ctx(), "get", "vaultstaticsecret", "-A", "--no-headers",
|
|
||||||
"-o=custom-columns="
|
|
||||||
"NS:.metadata.namespace,NAME:.metadata.name,MAC:.status.secretMAC",
|
|
||||||
])
|
|
||||||
cur_ns = None
|
|
||||||
for line in sorted(vss_raw.splitlines()):
|
|
||||||
cols = line.split()
|
|
||||||
if len(cols) < 2:
|
|
||||||
continue
|
|
||||||
ns, name = cols[0], cols[1]
|
|
||||||
mac = cols[2] if len(cols) > 2 else ""
|
|
||||||
synced = bool(mac and mac != "<none>")
|
|
||||||
if not synced:
|
|
||||||
all_ok = False
|
|
||||||
icon = "\u2713" if synced else "\u2717"
|
|
||||||
if ns != cur_ns:
|
|
||||||
print(f" {ns} (VSS):")
|
|
||||||
cur_ns = ns
|
|
||||||
print(f" {icon} {name}")
|
|
||||||
|
|
||||||
# VaultDynamicSecrets: synced when lastRenewalTime is non-zero
|
|
||||||
vds_raw = _capture_out([
|
|
||||||
"kubectl", *_k8s_ctx(), "get", "vaultdynamicsecret", "-A", "--no-headers",
|
|
||||||
"-o=custom-columns="
|
|
||||||
"NS:.metadata.namespace,NAME:.metadata.name,RENEWED:.status.lastRenewalTime",
|
|
||||||
])
|
|
||||||
cur_ns = None
|
|
||||||
for line in sorted(vds_raw.splitlines()):
|
|
||||||
cols = line.split()
|
|
||||||
if len(cols) < 2:
|
|
||||||
continue
|
|
||||||
ns, name = cols[0], cols[1]
|
|
||||||
renewed = cols[2] if len(cols) > 2 else "0"
|
|
||||||
synced = renewed not in ("", "0", "<none>")
|
|
||||||
if not synced:
|
|
||||||
all_ok = False
|
|
||||||
icon = "\u2713" if synced else "\u2717"
|
|
||||||
if ns != cur_ns:
|
|
||||||
print(f" {ns} (VDS):")
|
|
||||||
cur_ns = ns
|
|
||||||
print(f" {icon} {name}")
|
|
||||||
|
|
||||||
print()
|
|
||||||
if all_ok:
|
|
||||||
ok("All VSO secrets synced.")
|
|
||||||
else:
|
|
||||||
warn("Some VSO secrets are not synced.")
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_status(target: str | None):
|
|
||||||
"""Show pod health, optionally filtered by namespace or namespace/service."""
|
|
||||||
step("Pod health across all namespaces...")
|
|
||||||
|
|
||||||
ns_set = set(MANAGED_NS)
|
|
||||||
|
|
||||||
if target is None:
|
|
||||||
# All pods across managed namespaces
|
|
||||||
raw = _capture_out([
|
|
||||||
"kubectl", *_k8s_ctx(),
|
|
||||||
"get", "pods",
|
|
||||||
"--field-selector=metadata.namespace!= kube-system",
|
|
||||||
"-A", "--no-headers",
|
|
||||||
])
|
|
||||||
pods = []
|
|
||||||
for line in raw.splitlines():
|
|
||||||
cols = line.split()
|
|
||||||
if len(cols) < 4:
|
|
||||||
continue
|
|
||||||
ns = cols[0]
|
|
||||||
if ns not in ns_set:
|
|
||||||
continue
|
|
||||||
pods.append(cols)
|
|
||||||
else:
|
|
||||||
ns, name = parse_target(target)
|
|
||||||
if name:
|
|
||||||
# Specific service: namespace/service
|
|
||||||
raw = _capture_out([
|
|
||||||
"kubectl", *_k8s_ctx(),
|
|
||||||
"get", "pods", "-n", ns, "-l", f"app={name}", "--no-headers",
|
|
||||||
])
|
|
||||||
pods = []
|
|
||||||
for line in raw.splitlines():
|
|
||||||
cols = line.split()
|
|
||||||
if len(cols) < 3:
|
|
||||||
continue
|
|
||||||
# Prepend namespace since -n output doesn't include it
|
|
||||||
pods.append([ns] + cols)
|
|
||||||
else:
|
|
||||||
# Namespace only
|
|
||||||
raw = _capture_out([
|
|
||||||
"kubectl", *_k8s_ctx(),
|
|
||||||
"get", "pods", "-n", ns, "--no-headers",
|
|
||||||
])
|
|
||||||
pods = []
|
|
||||||
for line in raw.splitlines():
|
|
||||||
cols = line.split()
|
|
||||||
if len(cols) < 3:
|
|
||||||
continue
|
|
||||||
pods.append([ns] + cols)
|
|
||||||
|
|
||||||
if not pods:
|
|
||||||
warn("No pods found in managed namespaces.")
|
|
||||||
return
|
|
||||||
|
|
||||||
all_ok = True
|
|
||||||
cur_ns = None
|
|
||||||
icon_map = {"Running": "\u2713", "Completed": "\u2713", "Succeeded": "\u2713",
|
|
||||||
"Pending": "\u25cb", "Failed": "\u2717", "Unknown": "?"}
|
|
||||||
for cols in sorted(pods, key=lambda c: (c[0], c[1])):
|
|
||||||
ns, name, ready, status = cols[0], cols[1], cols[2], cols[3]
|
|
||||||
if ns != cur_ns:
|
|
||||||
print(f" {ns}:")
|
|
||||||
cur_ns = ns
|
|
||||||
icon = icon_map.get(status, "?")
|
|
||||||
unhealthy = status not in ("Running", "Completed", "Succeeded")
|
|
||||||
# Only check ready ratio for Running pods — Completed/Succeeded pods
|
|
||||||
# legitimately report 0/N containers ready.
|
|
||||||
if not unhealthy and status == "Running" and "/" in ready:
|
|
||||||
r, t = ready.split("/")
|
|
||||||
unhealthy = r != t
|
|
||||||
if unhealthy:
|
|
||||||
all_ok = False
|
|
||||||
print(f" {icon} {name:<50} {ready:<6} {status}")
|
|
||||||
|
|
||||||
print()
|
|
||||||
if all_ok:
|
|
||||||
ok("All pods healthy.")
|
|
||||||
else:
|
|
||||||
warn("Some pods are not ready.")
|
|
||||||
|
|
||||||
_vso_sync_status()
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_logs(target: str, follow: bool):
|
|
||||||
"""Stream logs for a service. Target must include service name (e.g. ory/kratos)."""
|
|
||||||
ns, name = parse_target(target)
|
|
||||||
if not name:
|
|
||||||
die("Logs require a service name, e.g. 'ory/kratos'.")
|
|
||||||
|
|
||||||
_kube_mod.ensure_tunnel()
|
|
||||||
kubectl = str(ensure_tool("kubectl"))
|
|
||||||
cmd = [kubectl, _kube_mod.context_arg(), "-n", ns, "logs",
|
|
||||||
"-l", f"app={name}", "--tail=100"]
|
|
||||||
if follow:
|
|
||||||
cmd.append("--follow")
|
|
||||||
|
|
||||||
proc = subprocess.Popen(cmd)
|
|
||||||
proc.wait()
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_get(target: str, output: str = "yaml"):
|
|
||||||
"""Print raw kubectl get output for a pod or resource (ns/name).
|
|
||||||
|
|
||||||
Usage: sunbeam get vault-secrets-operator/vault-secrets-operator-test
|
|
||||||
sunbeam get ory/kratos-abc -o json
|
|
||||||
"""
|
|
||||||
ns, name = parse_target(target)
|
|
||||||
if not ns or not name:
|
|
||||||
die("get requires namespace/name, e.g. 'sunbeam get ory/kratos-abc'")
|
|
||||||
# Try pod first, fall back to any resource type if caller passes kind/ns/name
|
|
||||||
result = kube_out("get", "pod", name, "-n", ns, f"-o={output}")
|
|
||||||
if not result:
|
|
||||||
die(f"Pod {ns}/{name} not found.")
|
|
||||||
print(result)
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_restart(target: str | None):
|
|
||||||
"""Restart deployments. None=all, 'ory'=namespace, 'ory/kratos'=specific."""
|
|
||||||
step("Restarting services...")
|
|
||||||
|
|
||||||
if target is None:
|
|
||||||
matched = SERVICES_TO_RESTART
|
|
||||||
else:
|
|
||||||
ns, name = parse_target(target)
|
|
||||||
if name:
|
|
||||||
matched = [(n, d) for n, d in SERVICES_TO_RESTART if n == ns and d == name]
|
|
||||||
else:
|
|
||||||
matched = [(n, d) for n, d in SERVICES_TO_RESTART if n == ns]
|
|
||||||
|
|
||||||
if not matched:
|
|
||||||
warn(f"No matching services for target: {target}")
|
|
||||||
return
|
|
||||||
|
|
||||||
for ns, dep in matched:
|
|
||||||
kube("-n", ns, "rollout", "restart", f"deployment/{dep}", check=False)
|
|
||||||
ok("Done.")
|
|
||||||
1011
sunbeam/src/cli.rs
1011
sunbeam/src/cli.rs
File diff suppressed because it is too large
Load Diff
@@ -1,39 +0,0 @@
|
|||||||
mod cli;
|
|
||||||
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
// Install rustls crypto provider (ring) before any TLS operations.
|
|
||||||
rustls::crypto::ring::default_provider()
|
|
||||||
.install_default()
|
|
||||||
.expect("Failed to install rustls crypto provider");
|
|
||||||
|
|
||||||
// Initialize tracing subscriber.
|
|
||||||
// Respects RUST_LOG env var (e.g. RUST_LOG=debug, RUST_LOG=sunbeam=trace).
|
|
||||||
// Default: warn for dependencies, info for sunbeam + sunbeam_sdk.
|
|
||||||
tracing_subscriber::fmt()
|
|
||||||
.with_env_filter(
|
|
||||||
tracing_subscriber::EnvFilter::try_from_default_env().unwrap_or_else(|_| {
|
|
||||||
tracing_subscriber::EnvFilter::new("sunbeam=info,sunbeam_sdk=info,warn")
|
|
||||||
}),
|
|
||||||
)
|
|
||||||
.with_target(false)
|
|
||||||
.with_writer(std::io::stderr)
|
|
||||||
.init();
|
|
||||||
|
|
||||||
match cli::dispatch().await {
|
|
||||||
Ok(()) => {}
|
|
||||||
Err(e) => {
|
|
||||||
let code = e.exit_code();
|
|
||||||
tracing::error!("{e}");
|
|
||||||
|
|
||||||
// Print source chain for non-trivial errors
|
|
||||||
let mut source = std::error::Error::source(&e);
|
|
||||||
while let Some(cause) = source {
|
|
||||||
tracing::debug!("caused by: {cause}");
|
|
||||||
source = std::error::Error::source(cause);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::process::exit(code);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,317 +0,0 @@
|
|||||||
"""Tests for checks.py — service-level health probes."""
|
|
||||||
import json
|
|
||||||
import unittest
|
|
||||||
from unittest.mock import MagicMock, patch
|
|
||||||
|
|
||||||
|
|
||||||
class TestCheckGiteaVersion(unittest.TestCase):
|
|
||||||
def test_ok_returns_version(self):
|
|
||||||
body = json.dumps({"version": "1.21.0"}).encode()
|
|
||||||
with patch("sunbeam.checks._http_get", return_value=(200, body)):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_gitea_version("testdomain", None)
|
|
||||||
self.assertTrue(r.passed)
|
|
||||||
self.assertIn("1.21.0", r.detail)
|
|
||||||
|
|
||||||
def test_non_200_fails(self):
|
|
||||||
with patch("sunbeam.checks._http_get", return_value=(502, b"")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_gitea_version("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
self.assertIn("502", r.detail)
|
|
||||||
|
|
||||||
def test_connection_error_fails(self):
|
|
||||||
import urllib.error
|
|
||||||
with patch("sunbeam.checks._http_get", side_effect=urllib.error.URLError("refused")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_gitea_version("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCheckGiteaAuth(unittest.TestCase):
|
|
||||||
def _secret(self, key, val):
|
|
||||||
def side_effect(ns, name, k):
|
|
||||||
return val if k == key else "gitea_admin"
|
|
||||||
return side_effect
|
|
||||||
|
|
||||||
def test_ok_returns_login(self):
|
|
||||||
body = json.dumps({"login": "gitea_admin"}).encode()
|
|
||||||
with patch("sunbeam.checks._kube_secret",
|
|
||||||
side_effect=self._secret("admin-password", "hunter2")):
|
|
||||||
with patch("sunbeam.checks._http_get", return_value=(200, body)):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_gitea_auth("testdomain", None)
|
|
||||||
self.assertTrue(r.passed)
|
|
||||||
self.assertIn("gitea_admin", r.detail)
|
|
||||||
|
|
||||||
def test_missing_password_fails(self):
|
|
||||||
with patch("sunbeam.checks._kube_secret", return_value=""):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_gitea_auth("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
self.assertIn("secret", r.detail)
|
|
||||||
|
|
||||||
def test_non_200_fails(self):
|
|
||||||
with patch("sunbeam.checks._kube_secret",
|
|
||||||
side_effect=self._secret("admin-password", "hunter2")):
|
|
||||||
with patch("sunbeam.checks._http_get", return_value=(401, b"")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_gitea_auth("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCheckPostgres(unittest.TestCase):
|
|
||||||
def test_ready_passes(self):
|
|
||||||
with patch("sunbeam.checks.kube_out", side_effect=["1", "1"]):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_postgres("testdomain", None)
|
|
||||||
self.assertTrue(r.passed)
|
|
||||||
self.assertIn("1/1", r.detail)
|
|
||||||
|
|
||||||
def test_not_ready_fails(self):
|
|
||||||
with patch("sunbeam.checks.kube_out", side_effect=["0", "1"]):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_postgres("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
|
|
||||||
def test_cluster_not_found_fails(self):
|
|
||||||
with patch("sunbeam.checks.kube_out", return_value=""):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_postgres("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCheckValkey(unittest.TestCase):
|
|
||||||
def test_pong_passes(self):
|
|
||||||
with patch("sunbeam.checks.kube_out", return_value="valkey-abc"):
|
|
||||||
with patch("sunbeam.checks.kube_exec", return_value=(0, "PONG")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_valkey("testdomain", None)
|
|
||||||
self.assertTrue(r.passed)
|
|
||||||
|
|
||||||
def test_no_pod_fails(self):
|
|
||||||
with patch("sunbeam.checks.kube_out", return_value=""):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_valkey("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
|
|
||||||
def test_no_pong_fails(self):
|
|
||||||
with patch("sunbeam.checks.kube_out", return_value="valkey-abc"):
|
|
||||||
with patch("sunbeam.checks.kube_exec", return_value=(1, "")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_valkey("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCheckOpenbao(unittest.TestCase):
|
|
||||||
def test_unsealed_passes(self):
|
|
||||||
out = json.dumps({"initialized": True, "sealed": False})
|
|
||||||
with patch("sunbeam.checks.kube_exec", return_value=(0, out)):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_openbao("testdomain", None)
|
|
||||||
self.assertTrue(r.passed)
|
|
||||||
|
|
||||||
def test_sealed_fails(self):
|
|
||||||
out = json.dumps({"initialized": True, "sealed": True})
|
|
||||||
with patch("sunbeam.checks.kube_exec", return_value=(2, out)):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_openbao("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
|
|
||||||
def test_no_output_fails(self):
|
|
||||||
with patch("sunbeam.checks.kube_exec", return_value=(1, "")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_openbao("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCheckSeaweedfs(unittest.TestCase):
|
|
||||||
def _with_creds(self, http_result=None, http_error=None):
|
|
||||||
"""Helper: patch both _kube_secret (returns creds) and _http_get."""
|
|
||||||
def secret_side_effect(ns, name, key):
|
|
||||||
return "testkey" if key == "S3_ACCESS_KEY" else "testsecret"
|
|
||||||
|
|
||||||
patches = [
|
|
||||||
patch("sunbeam.checks._kube_secret", side_effect=secret_side_effect),
|
|
||||||
]
|
|
||||||
if http_error:
|
|
||||||
patches.append(patch("sunbeam.checks._http_get", side_effect=http_error))
|
|
||||||
else:
|
|
||||||
patches.append(patch("sunbeam.checks._http_get", return_value=http_result))
|
|
||||||
return patches
|
|
||||||
|
|
||||||
def test_200_authenticated_passes(self):
|
|
||||||
with patch("sunbeam.checks._kube_secret", return_value="val"), \
|
|
||||||
patch("sunbeam.checks._http_get", return_value=(200, b"")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_seaweedfs("testdomain", None)
|
|
||||||
self.assertTrue(r.passed)
|
|
||||||
self.assertIn("authenticated", r.detail)
|
|
||||||
|
|
||||||
def test_missing_credentials_fails(self):
|
|
||||||
with patch("sunbeam.checks._kube_secret", return_value=""):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_seaweedfs("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
self.assertIn("secret", r.detail)
|
|
||||||
|
|
||||||
def test_403_bad_credentials_fails(self):
|
|
||||||
with patch("sunbeam.checks._kube_secret", return_value="val"), \
|
|
||||||
patch("sunbeam.checks._http_get", return_value=(403, b"")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_seaweedfs("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
self.assertIn("403", r.detail)
|
|
||||||
|
|
||||||
def test_502_fails(self):
|
|
||||||
with patch("sunbeam.checks._kube_secret", return_value="val"), \
|
|
||||||
patch("sunbeam.checks._http_get", return_value=(502, b"")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_seaweedfs("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
|
|
||||||
def test_connection_error_fails(self):
|
|
||||||
import urllib.error
|
|
||||||
with patch("sunbeam.checks._kube_secret", return_value="val"), \
|
|
||||||
patch("sunbeam.checks._http_get",
|
|
||||||
side_effect=urllib.error.URLError("refused")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_seaweedfs("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCheckKratos(unittest.TestCase):
|
|
||||||
def test_200_passes(self):
|
|
||||||
with patch("sunbeam.checks._http_get", return_value=(200, b"")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_kratos("testdomain", None)
|
|
||||||
self.assertTrue(r.passed)
|
|
||||||
|
|
||||||
def test_503_fails(self):
|
|
||||||
with patch("sunbeam.checks._http_get", return_value=(503, b"not ready")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_kratos("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
self.assertIn("503", r.detail)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCheckHydraOidc(unittest.TestCase):
|
|
||||||
def test_200_with_issuer_passes(self):
|
|
||||||
body = json.dumps({"issuer": "https://auth.testdomain/"}).encode()
|
|
||||||
with patch("sunbeam.checks._http_get", return_value=(200, body)):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_hydra_oidc("testdomain", None)
|
|
||||||
self.assertTrue(r.passed)
|
|
||||||
self.assertIn("testdomain", r.detail)
|
|
||||||
|
|
||||||
def test_502_fails(self):
|
|
||||||
with patch("sunbeam.checks._http_get", return_value=(502, b"")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_hydra_oidc("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCheckPeople(unittest.TestCase):
|
|
||||||
def test_200_passes(self):
|
|
||||||
with patch("sunbeam.checks._http_get", return_value=(200, b"<html>")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_people("testdomain", None)
|
|
||||||
self.assertTrue(r.passed)
|
|
||||||
|
|
||||||
def test_302_redirect_passes(self):
|
|
||||||
with patch("sunbeam.checks._http_get", return_value=(302, b"")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_people("testdomain", None)
|
|
||||||
self.assertTrue(r.passed)
|
|
||||||
self.assertIn("302", r.detail)
|
|
||||||
|
|
||||||
def test_502_fails(self):
|
|
||||||
with patch("sunbeam.checks._http_get", return_value=(502, b"")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_people("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
self.assertIn("502", r.detail)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCheckPeopleApi(unittest.TestCase):
|
|
||||||
def test_200_passes(self):
|
|
||||||
with patch("sunbeam.checks._http_get", return_value=(200, b"{}")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_people_api("testdomain", None)
|
|
||||||
self.assertTrue(r.passed)
|
|
||||||
|
|
||||||
def test_401_auth_required_passes(self):
|
|
||||||
with patch("sunbeam.checks._http_get", return_value=(401, b"")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_people_api("testdomain", None)
|
|
||||||
self.assertTrue(r.passed)
|
|
||||||
|
|
||||||
def test_502_fails(self):
|
|
||||||
with patch("sunbeam.checks._http_get", return_value=(502, b"")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_people_api("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCheckLivekit(unittest.TestCase):
|
|
||||||
def test_responding_passes(self):
|
|
||||||
with patch("sunbeam.checks.kube_out", return_value="livekit-server-abc"):
|
|
||||||
with patch("sunbeam.checks.kube_exec", return_value=(0, "")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_livekit("testdomain", None)
|
|
||||||
self.assertTrue(r.passed)
|
|
||||||
|
|
||||||
def test_no_pod_fails(self):
|
|
||||||
with patch("sunbeam.checks.kube_out", return_value=""):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_livekit("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
|
|
||||||
def test_exec_fails(self):
|
|
||||||
with patch("sunbeam.checks.kube_out", return_value="livekit-server-abc"):
|
|
||||||
with patch("sunbeam.checks.kube_exec", return_value=(1, "")):
|
|
||||||
from sunbeam import checks
|
|
||||||
r = checks.check_livekit("testdomain", None)
|
|
||||||
self.assertFalse(r.passed)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCmdCheck(unittest.TestCase):
|
|
||||||
def _run(self, target, mock_list):
|
|
||||||
from sunbeam import checks
|
|
||||||
result = checks.CheckResult("x", "ns", "svc", True, "ok")
|
|
||||||
fns = [MagicMock(return_value=result) for _ in mock_list]
|
|
||||||
patched = list(zip(fns, [ns for _, ns, _ in mock_list], [s for _, _, s in mock_list]))
|
|
||||||
with patch("sunbeam.checks.get_domain", return_value="td"), \
|
|
||||||
patch("sunbeam.checks._ssl_ctx", return_value=None), \
|
|
||||||
patch("sunbeam.checks._opener", return_value=None), \
|
|
||||||
patch.object(checks, "CHECKS", patched):
|
|
||||||
checks.cmd_check(target)
|
|
||||||
return fns
|
|
||||||
|
|
||||||
def test_no_target_runs_all(self):
|
|
||||||
mock_list = [("unused", "devtools", "gitea"), ("unused", "data", "postgres")]
|
|
||||||
fns = self._run(None, mock_list)
|
|
||||||
fns[0].assert_called_once_with("td", None)
|
|
||||||
fns[1].assert_called_once_with("td", None)
|
|
||||||
|
|
||||||
def test_ns_filter_skips_other_namespaces(self):
|
|
||||||
mock_list = [("unused", "devtools", "gitea"), ("unused", "data", "postgres")]
|
|
||||||
fns = self._run("devtools", mock_list)
|
|
||||||
fns[0].assert_called_once()
|
|
||||||
fns[1].assert_not_called()
|
|
||||||
|
|
||||||
def test_svc_filter(self):
|
|
||||||
mock_list = [("unused", "ory", "kratos"), ("unused", "ory", "hydra")]
|
|
||||||
fns = self._run("ory/kratos", mock_list)
|
|
||||||
fns[0].assert_called_once()
|
|
||||||
fns[1].assert_not_called()
|
|
||||||
|
|
||||||
def test_no_match_warns(self):
|
|
||||||
from sunbeam import checks
|
|
||||||
with patch("sunbeam.checks.get_domain", return_value="td"), \
|
|
||||||
patch("sunbeam.checks._ssl_ctx", return_value=None), \
|
|
||||||
patch("sunbeam.checks._opener", return_value=None), \
|
|
||||||
patch.object(checks, "CHECKS", []), \
|
|
||||||
patch("sunbeam.checks.warn") as mock_warn:
|
|
||||||
checks.cmd_check("nonexistent")
|
|
||||||
mock_warn.assert_called_once()
|
|
||||||
@@ -1,850 +0,0 @@
|
|||||||
"""Tests for CLI routing and argument validation."""
|
|
||||||
import sys
|
|
||||||
import unittest
|
|
||||||
from unittest.mock import MagicMock, patch
|
|
||||||
import argparse
|
|
||||||
|
|
||||||
|
|
||||||
class TestArgParsing(unittest.TestCase):
|
|
||||||
"""Test that argparse parses arguments correctly."""
|
|
||||||
|
|
||||||
def _parse(self, argv):
|
|
||||||
"""Parse argv using the same parser as main(), return args namespace."""
|
|
||||||
parser = argparse.ArgumentParser(prog="sunbeam")
|
|
||||||
sub = parser.add_subparsers(dest="verb", metavar="verb")
|
|
||||||
sub.add_parser("up")
|
|
||||||
sub.add_parser("down")
|
|
||||||
p_status = sub.add_parser("status")
|
|
||||||
p_status.add_argument("target", nargs="?", default=None)
|
|
||||||
p_apply = sub.add_parser("apply")
|
|
||||||
p_apply.add_argument("namespace", nargs="?", default="")
|
|
||||||
p_apply.add_argument("--domain", default="")
|
|
||||||
p_apply.add_argument("--email", default="")
|
|
||||||
sub.add_parser("seed")
|
|
||||||
sub.add_parser("verify")
|
|
||||||
p_logs = sub.add_parser("logs")
|
|
||||||
p_logs.add_argument("target")
|
|
||||||
p_logs.add_argument("-f", "--follow", action="store_true")
|
|
||||||
p_get = sub.add_parser("get")
|
|
||||||
p_get.add_argument("target")
|
|
||||||
p_get.add_argument("-o", "--output", default="yaml", choices=["yaml", "json", "wide"])
|
|
||||||
p_restart = sub.add_parser("restart")
|
|
||||||
p_restart.add_argument("target", nargs="?", default=None)
|
|
||||||
p_build = sub.add_parser("build")
|
|
||||||
p_build.add_argument("what", choices=["proxy", "integration", "kratos-admin", "meet",
|
|
||||||
"docs-frontend", "people-frontend", "people",
|
|
||||||
"messages", "messages-backend", "messages-frontend",
|
|
||||||
"messages-mta-in", "messages-mta-out",
|
|
||||||
"messages-mpa", "messages-socks-proxy"])
|
|
||||||
p_build.add_argument("--push", action="store_true")
|
|
||||||
p_build.add_argument("--deploy", action="store_true")
|
|
||||||
sub.add_parser("mirror")
|
|
||||||
sub.add_parser("bootstrap")
|
|
||||||
p_check = sub.add_parser("check")
|
|
||||||
p_check.add_argument("target", nargs="?", default=None)
|
|
||||||
p_user = sub.add_parser("user")
|
|
||||||
user_sub = p_user.add_subparsers(dest="user_action")
|
|
||||||
p_user_list = user_sub.add_parser("list")
|
|
||||||
p_user_list.add_argument("--search", default="")
|
|
||||||
p_user_get = user_sub.add_parser("get")
|
|
||||||
p_user_get.add_argument("target")
|
|
||||||
p_user_create = user_sub.add_parser("create")
|
|
||||||
p_user_create.add_argument("email")
|
|
||||||
p_user_create.add_argument("--name", default="")
|
|
||||||
p_user_create.add_argument("--schema", default="default")
|
|
||||||
p_user_delete = user_sub.add_parser("delete")
|
|
||||||
p_user_delete.add_argument("target")
|
|
||||||
p_user_recover = user_sub.add_parser("recover")
|
|
||||||
p_user_recover.add_argument("target")
|
|
||||||
p_user_disable = user_sub.add_parser("disable")
|
|
||||||
p_user_disable.add_argument("target")
|
|
||||||
p_user_enable = user_sub.add_parser("enable")
|
|
||||||
p_user_enable.add_argument("target")
|
|
||||||
p_user_set_pw = user_sub.add_parser("set-password")
|
|
||||||
p_user_set_pw.add_argument("target")
|
|
||||||
p_user_set_pw.add_argument("password")
|
|
||||||
p_user_onboard = user_sub.add_parser("onboard")
|
|
||||||
p_user_onboard.add_argument("email")
|
|
||||||
p_user_onboard.add_argument("--name", default="")
|
|
||||||
p_user_onboard.add_argument("--schema", default="employee")
|
|
||||||
p_user_onboard.add_argument("--no-email", action="store_true")
|
|
||||||
p_user_onboard.add_argument("--notify", default="")
|
|
||||||
p_user_onboard.add_argument("--job-title", default="")
|
|
||||||
p_user_onboard.add_argument("--department", default="")
|
|
||||||
p_user_onboard.add_argument("--office-location", default="")
|
|
||||||
p_user_onboard.add_argument("--hire-date", default="")
|
|
||||||
p_user_onboard.add_argument("--manager", default="")
|
|
||||||
p_user_offboard = user_sub.add_parser("offboard")
|
|
||||||
p_user_offboard.add_argument("target")
|
|
||||||
|
|
||||||
# Add config subcommand for testing
|
|
||||||
p_config = sub.add_parser("config")
|
|
||||||
config_sub = p_config.add_subparsers(dest="config_action")
|
|
||||||
p_config_set = config_sub.add_parser("set")
|
|
||||||
p_config_set.add_argument("--host", default="")
|
|
||||||
p_config_set.add_argument("--infra-dir", default="")
|
|
||||||
config_sub.add_parser("get")
|
|
||||||
config_sub.add_parser("clear")
|
|
||||||
|
|
||||||
return parser.parse_args(argv)
|
|
||||||
|
|
||||||
def test_up(self):
|
|
||||||
args = self._parse(["up"])
|
|
||||||
self.assertEqual(args.verb, "up")
|
|
||||||
|
|
||||||
def test_status_no_target(self):
|
|
||||||
args = self._parse(["status"])
|
|
||||||
self.assertEqual(args.verb, "status")
|
|
||||||
self.assertIsNone(args.target)
|
|
||||||
|
|
||||||
def test_status_with_namespace(self):
|
|
||||||
args = self._parse(["status", "ory"])
|
|
||||||
self.assertEqual(args.verb, "status")
|
|
||||||
self.assertEqual(args.target, "ory")
|
|
||||||
|
|
||||||
def test_logs_no_follow(self):
|
|
||||||
args = self._parse(["logs", "ory/kratos"])
|
|
||||||
self.assertEqual(args.verb, "logs")
|
|
||||||
self.assertEqual(args.target, "ory/kratos")
|
|
||||||
self.assertFalse(args.follow)
|
|
||||||
|
|
||||||
def test_logs_follow_short(self):
|
|
||||||
args = self._parse(["logs", "ory/kratos", "-f"])
|
|
||||||
self.assertTrue(args.follow)
|
|
||||||
|
|
||||||
def test_logs_follow_long(self):
|
|
||||||
args = self._parse(["logs", "ory/kratos", "--follow"])
|
|
||||||
self.assertTrue(args.follow)
|
|
||||||
|
|
||||||
def test_build_proxy(self):
|
|
||||||
args = self._parse(["build", "proxy"])
|
|
||||||
self.assertEqual(args.what, "proxy")
|
|
||||||
self.assertFalse(args.push)
|
|
||||||
self.assertFalse(args.deploy)
|
|
||||||
|
|
||||||
def test_build_integration(self):
|
|
||||||
args = self._parse(["build", "integration"])
|
|
||||||
self.assertEqual(args.what, "integration")
|
|
||||||
|
|
||||||
def test_build_push_flag(self):
|
|
||||||
args = self._parse(["build", "proxy", "--push"])
|
|
||||||
self.assertTrue(args.push)
|
|
||||||
self.assertFalse(args.deploy)
|
|
||||||
|
|
||||||
def test_build_deploy_flag(self):
|
|
||||||
args = self._parse(["build", "proxy", "--deploy"])
|
|
||||||
self.assertFalse(args.push)
|
|
||||||
self.assertTrue(args.deploy)
|
|
||||||
|
|
||||||
def test_build_invalid_target(self):
|
|
||||||
with self.assertRaises(SystemExit):
|
|
||||||
self._parse(["build", "notavalidtarget"])
|
|
||||||
|
|
||||||
def test_user_set_password(self):
|
|
||||||
args = self._parse(["user", "set-password", "admin@example.com", "hunter2"])
|
|
||||||
self.assertEqual(args.verb, "user")
|
|
||||||
self.assertEqual(args.user_action, "set-password")
|
|
||||||
self.assertEqual(args.target, "admin@example.com")
|
|
||||||
self.assertEqual(args.password, "hunter2")
|
|
||||||
|
|
||||||
def test_user_disable(self):
|
|
||||||
args = self._parse(["user", "disable", "admin@example.com"])
|
|
||||||
self.assertEqual(args.user_action, "disable")
|
|
||||||
self.assertEqual(args.target, "admin@example.com")
|
|
||||||
|
|
||||||
def test_user_enable(self):
|
|
||||||
args = self._parse(["user", "enable", "admin@example.com"])
|
|
||||||
self.assertEqual(args.user_action, "enable")
|
|
||||||
self.assertEqual(args.target, "admin@example.com")
|
|
||||||
|
|
||||||
def test_user_list_search(self):
|
|
||||||
args = self._parse(["user", "list", "--search", "sienna"])
|
|
||||||
self.assertEqual(args.user_action, "list")
|
|
||||||
self.assertEqual(args.search, "sienna")
|
|
||||||
|
|
||||||
def test_user_create(self):
|
|
||||||
args = self._parse(["user", "create", "x@example.com", "--name", "X Y"])
|
|
||||||
self.assertEqual(args.user_action, "create")
|
|
||||||
self.assertEqual(args.email, "x@example.com")
|
|
||||||
self.assertEqual(args.name, "X Y")
|
|
||||||
|
|
||||||
def test_user_onboard_basic(self):
|
|
||||||
args = self._parse(["user", "onboard", "a@b.com"])
|
|
||||||
self.assertEqual(args.user_action, "onboard")
|
|
||||||
self.assertEqual(args.email, "a@b.com")
|
|
||||||
self.assertEqual(args.name, "")
|
|
||||||
self.assertEqual(args.schema, "employee")
|
|
||||||
self.assertFalse(args.no_email)
|
|
||||||
self.assertEqual(args.notify, "")
|
|
||||||
|
|
||||||
def test_user_onboard_full(self):
|
|
||||||
args = self._parse(["user", "onboard", "a@b.com", "--name", "A B", "--schema", "default",
|
|
||||||
"--no-email", "--job-title", "Engineer", "--department", "Dev",
|
|
||||||
"--office-location", "Paris", "--hire-date", "2026-01-15",
|
|
||||||
"--manager", "boss@b.com"])
|
|
||||||
self.assertEqual(args.user_action, "onboard")
|
|
||||||
self.assertEqual(args.email, "a@b.com")
|
|
||||||
self.assertEqual(args.name, "A B")
|
|
||||||
self.assertEqual(args.schema, "default")
|
|
||||||
self.assertTrue(args.no_email)
|
|
||||||
self.assertEqual(args.job_title, "Engineer")
|
|
||||||
self.assertEqual(args.department, "Dev")
|
|
||||||
self.assertEqual(args.office_location, "Paris")
|
|
||||||
self.assertEqual(args.hire_date, "2026-01-15")
|
|
||||||
self.assertEqual(args.manager, "boss@b.com")
|
|
||||||
|
|
||||||
def test_user_onboard_notify(self):
|
|
||||||
args = self._parse(["user", "onboard", "a@work.com", "--notify", "a@personal.com"])
|
|
||||||
self.assertEqual(args.email, "a@work.com")
|
|
||||||
self.assertEqual(args.notify, "a@personal.com")
|
|
||||||
self.assertFalse(args.no_email)
|
|
||||||
|
|
||||||
def test_user_offboard(self):
|
|
||||||
args = self._parse(["user", "offboard", "a@b.com"])
|
|
||||||
self.assertEqual(args.user_action, "offboard")
|
|
||||||
self.assertEqual(args.target, "a@b.com")
|
|
||||||
|
|
||||||
def test_get_with_target(self):
|
|
||||||
args = self._parse(["get", "ory/kratos-abc"])
|
|
||||||
self.assertEqual(args.verb, "get")
|
|
||||||
self.assertEqual(args.target, "ory/kratos-abc")
|
|
||||||
self.assertEqual(args.output, "yaml")
|
|
||||||
|
|
||||||
def test_get_json_output(self):
|
|
||||||
args = self._parse(["get", "ory/kratos-abc", "-o", "json"])
|
|
||||||
self.assertEqual(args.output, "json")
|
|
||||||
|
|
||||||
def test_get_invalid_output_format(self):
|
|
||||||
with self.assertRaises(SystemExit):
|
|
||||||
self._parse(["get", "ory/kratos-abc", "-o", "toml"])
|
|
||||||
|
|
||||||
def test_check_no_target(self):
|
|
||||||
args = self._parse(["check"])
|
|
||||||
self.assertEqual(args.verb, "check")
|
|
||||||
self.assertIsNone(args.target)
|
|
||||||
|
|
||||||
def test_check_with_namespace(self):
|
|
||||||
args = self._parse(["check", "devtools"])
|
|
||||||
self.assertEqual(args.verb, "check")
|
|
||||||
self.assertEqual(args.target, "devtools")
|
|
||||||
|
|
||||||
def test_check_with_service(self):
|
|
||||||
args = self._parse(["check", "lasuite/people"])
|
|
||||||
self.assertEqual(args.verb, "check")
|
|
||||||
self.assertEqual(args.target, "lasuite/people")
|
|
||||||
|
|
||||||
def test_apply_no_namespace(self):
|
|
||||||
args = self._parse(["apply"])
|
|
||||||
self.assertEqual(args.verb, "apply")
|
|
||||||
self.assertEqual(args.namespace, "")
|
|
||||||
|
|
||||||
def test_apply_with_namespace(self):
|
|
||||||
args = self._parse(["apply", "lasuite"])
|
|
||||||
self.assertEqual(args.verb, "apply")
|
|
||||||
self.assertEqual(args.namespace, "lasuite")
|
|
||||||
|
|
||||||
def test_apply_ingress_namespace(self):
|
|
||||||
args = self._parse(["apply", "ingress"])
|
|
||||||
self.assertEqual(args.namespace, "ingress")
|
|
||||||
|
|
||||||
def test_build_meet(self):
|
|
||||||
args = self._parse(["build", "meet"])
|
|
||||||
self.assertEqual(args.what, "meet")
|
|
||||||
|
|
||||||
def test_config_set_with_host_and_infra_dir(self):
|
|
||||||
args = self._parse(["config", "set", "--host", "user@example.com", "--infra-dir", "/path/to/infra"])
|
|
||||||
self.assertEqual(args.verb, "config")
|
|
||||||
self.assertEqual(args.config_action, "set")
|
|
||||||
self.assertEqual(args.host, "user@example.com")
|
|
||||||
self.assertEqual(args.infra_dir, "/path/to/infra")
|
|
||||||
|
|
||||||
def test_config_set_with_only_host(self):
|
|
||||||
args = self._parse(["config", "set", "--host", "user@example.com"])
|
|
||||||
self.assertEqual(args.verb, "config")
|
|
||||||
self.assertEqual(args.config_action, "set")
|
|
||||||
self.assertEqual(args.host, "user@example.com")
|
|
||||||
self.assertEqual(args.infra_dir, "")
|
|
||||||
|
|
||||||
def test_config_set_with_only_infra_dir(self):
|
|
||||||
args = self._parse(["config", "set", "--infra-dir", "/path/to/infra"])
|
|
||||||
self.assertEqual(args.verb, "config")
|
|
||||||
self.assertEqual(args.config_action, "set")
|
|
||||||
self.assertEqual(args.host, "")
|
|
||||||
self.assertEqual(args.infra_dir, "/path/to/infra")
|
|
||||||
|
|
||||||
def test_config_get(self):
|
|
||||||
args = self._parse(["config", "get"])
|
|
||||||
self.assertEqual(args.verb, "config")
|
|
||||||
self.assertEqual(args.config_action, "get")
|
|
||||||
|
|
||||||
def test_config_clear(self):
|
|
||||||
args = self._parse(["config", "clear"])
|
|
||||||
self.assertEqual(args.verb, "config")
|
|
||||||
self.assertEqual(args.config_action, "clear")
|
|
||||||
|
|
||||||
def test_build_people(self):
|
|
||||||
args = self._parse(["build", "people"])
|
|
||||||
self.assertEqual(args.what, "people")
|
|
||||||
self.assertFalse(args.push)
|
|
||||||
self.assertFalse(args.deploy)
|
|
||||||
|
|
||||||
def test_build_people_push(self):
|
|
||||||
args = self._parse(["build", "people", "--push"])
|
|
||||||
self.assertEqual(args.what, "people")
|
|
||||||
self.assertTrue(args.push)
|
|
||||||
self.assertFalse(args.deploy)
|
|
||||||
|
|
||||||
def test_build_people_push_deploy(self):
|
|
||||||
args = self._parse(["build", "people", "--push", "--deploy"])
|
|
||||||
self.assertEqual(args.what, "people")
|
|
||||||
self.assertTrue(args.push)
|
|
||||||
self.assertTrue(args.deploy)
|
|
||||||
|
|
||||||
def test_no_args_verb_is_none(self):
|
|
||||||
args = self._parse([])
|
|
||||||
self.assertIsNone(args.verb)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCliDispatch(unittest.TestCase):
|
|
||||||
"""Test that main() dispatches to the correct command function."""
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _mock_users(**overrides):
|
|
||||||
defaults = {f: MagicMock() for f in [
|
|
||||||
"cmd_user_list", "cmd_user_get", "cmd_user_create", "cmd_user_delete",
|
|
||||||
"cmd_user_recover", "cmd_user_disable", "cmd_user_enable",
|
|
||||||
"cmd_user_set_password", "cmd_user_onboard", "cmd_user_offboard",
|
|
||||||
]}
|
|
||||||
defaults.update(overrides)
|
|
||||||
return MagicMock(**defaults)
|
|
||||||
|
|
||||||
def test_no_verb_exits_0(self):
|
|
||||||
with patch.object(sys, "argv", ["sunbeam"]):
|
|
||||||
from sunbeam import cli
|
|
||||||
with self.assertRaises(SystemExit) as ctx:
|
|
||||||
cli.main()
|
|
||||||
self.assertEqual(ctx.exception.code, 0)
|
|
||||||
|
|
||||||
def test_unknown_verb_exits_nonzero(self):
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "unknown-verb"]):
|
|
||||||
from sunbeam import cli
|
|
||||||
with self.assertRaises(SystemExit) as ctx:
|
|
||||||
cli.main()
|
|
||||||
self.assertNotEqual(ctx.exception.code, 0)
|
|
||||||
|
|
||||||
def test_up_calls_cmd_up(self):
|
|
||||||
mock_up = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "up"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.cluster": MagicMock(cmd_up=mock_up)}):
|
|
||||||
import importlib
|
|
||||||
import sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_up.assert_called_once()
|
|
||||||
|
|
||||||
def test_status_no_target(self):
|
|
||||||
mock_status = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "status"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.services": MagicMock(cmd_status=mock_status)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_status.assert_called_once_with(None)
|
|
||||||
|
|
||||||
def test_status_with_namespace(self):
|
|
||||||
mock_status = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "status", "ory"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.services": MagicMock(cmd_status=mock_status)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_status.assert_called_once_with("ory")
|
|
||||||
|
|
||||||
def test_logs_with_target(self):
|
|
||||||
mock_logs = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "logs", "ory/kratos"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.services": MagicMock(cmd_logs=mock_logs)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_logs.assert_called_once_with("ory/kratos", follow=False)
|
|
||||||
|
|
||||||
def test_logs_follow_flag(self):
|
|
||||||
mock_logs = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "logs", "ory/kratos", "-f"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.services": MagicMock(cmd_logs=mock_logs)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_logs.assert_called_once_with("ory/kratos", follow=True)
|
|
||||||
|
|
||||||
def test_get_dispatches_with_target_and_output(self):
|
|
||||||
mock_get = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "get", "ory/kratos-abc"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.services": MagicMock(cmd_get=mock_get)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_get.assert_called_once_with("ory/kratos-abc", output="yaml")
|
|
||||||
|
|
||||||
def test_build_proxy(self):
|
|
||||||
mock_build = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "build", "proxy"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.images": MagicMock(cmd_build=mock_build)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_build.assert_called_once_with("proxy", push=False, deploy=False, no_cache=False)
|
|
||||||
|
|
||||||
def test_build_with_push_flag(self):
|
|
||||||
mock_build = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "build", "integration", "--push"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.images": MagicMock(cmd_build=mock_build)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_build.assert_called_once_with("integration", push=True, deploy=False, no_cache=False)
|
|
||||||
|
|
||||||
def test_build_with_deploy_flag_implies_push(self):
|
|
||||||
mock_build = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "build", "proxy", "--deploy"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.images": MagicMock(cmd_build=mock_build)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_build.assert_called_once_with("proxy", push=True, deploy=True, no_cache=False)
|
|
||||||
|
|
||||||
def test_user_set_password_dispatches(self):
|
|
||||||
mock_set_pw = MagicMock()
|
|
||||||
mock_users = self._mock_users(cmd_user_set_password=mock_set_pw)
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "user", "set-password",
|
|
||||||
"admin@sunbeam.pt", "s3cr3t"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.users": mock_users}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_set_pw.assert_called_once_with("admin@sunbeam.pt", "s3cr3t")
|
|
||||||
|
|
||||||
def test_user_disable_dispatches(self):
|
|
||||||
mock_disable = MagicMock()
|
|
||||||
mock_users = self._mock_users(cmd_user_disable=mock_disable)
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "user", "disable", "x@sunbeam.pt"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.users": mock_users}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_disable.assert_called_once_with("x@sunbeam.pt")
|
|
||||||
|
|
||||||
def test_user_enable_dispatches(self):
|
|
||||||
mock_enable = MagicMock()
|
|
||||||
mock_users = self._mock_users(cmd_user_enable=mock_enable)
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "user", "enable", "x@sunbeam.pt"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.users": mock_users}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_enable.assert_called_once_with("x@sunbeam.pt")
|
|
||||||
|
|
||||||
def test_apply_full_dispatches_without_namespace(self):
|
|
||||||
mock_apply = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "apply"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.manifests": MagicMock(cmd_apply=mock_apply)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_apply.assert_called_once_with(env="local", domain="", email="", namespace="")
|
|
||||||
|
|
||||||
def test_apply_partial_passes_namespace(self):
|
|
||||||
mock_apply = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "apply", "lasuite"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.manifests": MagicMock(cmd_apply=mock_apply)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_apply.assert_called_once_with(env="local", domain="", email="", namespace="lasuite")
|
|
||||||
|
|
||||||
def test_build_people_dispatches(self):
|
|
||||||
mock_build = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "build", "people"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.images": MagicMock(cmd_build=mock_build)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_build.assert_called_once_with("people", push=False, deploy=False, no_cache=False)
|
|
||||||
|
|
||||||
def test_build_people_push_dispatches(self):
|
|
||||||
mock_build = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "build", "people", "--push"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.images": MagicMock(cmd_build=mock_build)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_build.assert_called_once_with("people", push=True, deploy=False, no_cache=False)
|
|
||||||
|
|
||||||
def test_build_people_deploy_implies_push(self):
|
|
||||||
mock_build = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "build", "people", "--push", "--deploy"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.images": MagicMock(cmd_build=mock_build)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_build.assert_called_once_with("people", push=True, deploy=True, no_cache=False)
|
|
||||||
|
|
||||||
def test_build_meet_dispatches(self):
|
|
||||||
mock_build = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "build", "meet"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.images": MagicMock(cmd_build=mock_build)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_build.assert_called_once_with("meet", push=False, deploy=False, no_cache=False)
|
|
||||||
|
|
||||||
def test_check_no_target(self):
|
|
||||||
mock_check = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "check"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.checks": MagicMock(cmd_check=mock_check)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_check.assert_called_once_with(None)
|
|
||||||
|
|
||||||
def test_check_with_target(self):
|
|
||||||
mock_check = MagicMock()
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "check", "lasuite/people"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.checks": MagicMock(cmd_check=mock_check)}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_check.assert_called_once_with("lasuite/people")
|
|
||||||
|
|
||||||
|
|
||||||
def test_user_onboard_dispatches(self):
|
|
||||||
mock_onboard = MagicMock()
|
|
||||||
mock_users = self._mock_users(cmd_user_onboard=mock_onboard)
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "user", "onboard",
|
|
||||||
"new@sunbeam.pt", "--name", "New User"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.users": mock_users}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_onboard.assert_called_once_with("new@sunbeam.pt", name="New User",
|
|
||||||
schema_id="employee", send_email=True,
|
|
||||||
notify="", job_title="", department="",
|
|
||||||
office_location="", hire_date="",
|
|
||||||
manager="")
|
|
||||||
|
|
||||||
def test_user_onboard_no_email_dispatches(self):
|
|
||||||
mock_onboard = MagicMock()
|
|
||||||
mock_users = self._mock_users(cmd_user_onboard=mock_onboard)
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "user", "onboard",
|
|
||||||
"new@sunbeam.pt", "--no-email"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.users": mock_users}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_onboard.assert_called_once_with("new@sunbeam.pt", name="",
|
|
||||||
schema_id="employee", send_email=False,
|
|
||||||
notify="", job_title="", department="",
|
|
||||||
office_location="", hire_date="",
|
|
||||||
manager="")
|
|
||||||
|
|
||||||
def test_user_offboard_dispatches(self):
|
|
||||||
mock_offboard = MagicMock()
|
|
||||||
mock_users = self._mock_users(cmd_user_offboard=mock_offboard)
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "user", "offboard", "x@sunbeam.pt"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.users": mock_users}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
mock_offboard.assert_called_once_with("x@sunbeam.pt")
|
|
||||||
|
|
||||||
|
|
||||||
class TestConfigCli(unittest.TestCase):
|
|
||||||
"""Test config subcommand functionality."""
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
"""Set up test fixtures."""
|
|
||||||
import tempfile
|
|
||||||
import os
|
|
||||||
self.temp_dir = tempfile.mkdtemp()
|
|
||||||
self.original_home = os.environ.get('HOME')
|
|
||||||
os.environ['HOME'] = self.temp_dir
|
|
||||||
|
|
||||||
# Import and mock config path
|
|
||||||
from pathlib import Path
|
|
||||||
import sunbeam.config
|
|
||||||
self.original_config_path = sunbeam.config.CONFIG_PATH
|
|
||||||
sunbeam.config.CONFIG_PATH = Path(self.temp_dir) / ".sunbeam.json"
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
"""Clean up test fixtures."""
|
|
||||||
import shutil
|
|
||||||
import os
|
|
||||||
import sunbeam.config
|
|
||||||
|
|
||||||
# Restore original config path
|
|
||||||
sunbeam.config.CONFIG_PATH = self.original_config_path
|
|
||||||
|
|
||||||
# Clean up temp directory
|
|
||||||
shutil.rmtree(self.temp_dir)
|
|
||||||
|
|
||||||
# Restore original HOME
|
|
||||||
if self.original_home:
|
|
||||||
os.environ['HOME'] = self.original_home
|
|
||||||
else:
|
|
||||||
del os.environ['HOME']
|
|
||||||
|
|
||||||
def test_config_set_and_get(self):
|
|
||||||
"""Test config set and get functionality."""
|
|
||||||
from sunbeam.config import SunbeamConfig, load_config, save_config
|
|
||||||
|
|
||||||
# Test initial state
|
|
||||||
config = load_config()
|
|
||||||
self.assertEqual(config.production_host, "")
|
|
||||||
self.assertEqual(config.infra_directory, "")
|
|
||||||
|
|
||||||
# Test setting config
|
|
||||||
test_config = SunbeamConfig(
|
|
||||||
production_host="user@example.com",
|
|
||||||
infra_directory="/path/to/infra"
|
|
||||||
)
|
|
||||||
save_config(test_config)
|
|
||||||
|
|
||||||
# Test loading config
|
|
||||||
loaded_config = load_config()
|
|
||||||
self.assertEqual(loaded_config.production_host, "user@example.com")
|
|
||||||
self.assertEqual(loaded_config.infra_directory, "/path/to/infra")
|
|
||||||
|
|
||||||
def test_config_clear(self):
|
|
||||||
"""Test config clear functionality."""
|
|
||||||
from sunbeam.config import SunbeamConfig, load_config, save_config
|
|
||||||
from pathlib import Path
|
|
||||||
import os
|
|
||||||
|
|
||||||
# Set a config first
|
|
||||||
test_config = SunbeamConfig(
|
|
||||||
production_host="user@example.com",
|
|
||||||
infra_directory="/path/to/infra"
|
|
||||||
)
|
|
||||||
save_config(test_config)
|
|
||||||
|
|
||||||
# Verify it exists
|
|
||||||
config_path = Path(self.temp_dir) / ".sunbeam.json"
|
|
||||||
self.assertTrue(config_path.exists())
|
|
||||||
|
|
||||||
# Clear config
|
|
||||||
os.remove(config_path)
|
|
||||||
|
|
||||||
# Verify cleared state
|
|
||||||
cleared_config = load_config()
|
|
||||||
self.assertEqual(cleared_config.production_host, "")
|
|
||||||
self.assertEqual(cleared_config.infra_directory, "")
|
|
||||||
|
|
||||||
def test_config_get_production_host_priority(self):
|
|
||||||
"""Test that config file takes priority over environment variable."""
|
|
||||||
from sunbeam.config import SunbeamConfig, save_config, get_production_host
|
|
||||||
import os
|
|
||||||
|
|
||||||
# Set environment variable
|
|
||||||
os.environ['SUNBEAM_SSH_HOST'] = "env@example.com"
|
|
||||||
|
|
||||||
# Get production host without config - should use env var
|
|
||||||
host_no_config = get_production_host()
|
|
||||||
self.assertEqual(host_no_config, "env@example.com")
|
|
||||||
|
|
||||||
# Set config
|
|
||||||
test_config = SunbeamConfig(
|
|
||||||
production_host="config@example.com",
|
|
||||||
infra_directory=""
|
|
||||||
)
|
|
||||||
save_config(test_config)
|
|
||||||
|
|
||||||
# Get production host with config - should use config
|
|
||||||
host_with_config = get_production_host()
|
|
||||||
self.assertEqual(host_with_config, "config@example.com")
|
|
||||||
|
|
||||||
# Clean up env var
|
|
||||||
del os.environ['SUNBEAM_SSH_HOST']
|
|
||||||
|
|
||||||
def test_config_cli_set_dispatch(self):
|
|
||||||
"""Test that config set CLI dispatches correctly."""
|
|
||||||
mock_existing = MagicMock()
|
|
||||||
mock_existing.production_host = "old@example.com"
|
|
||||||
mock_existing.infra_directory = "/old/infra"
|
|
||||||
mock_existing.acme_email = ""
|
|
||||||
mock_save = MagicMock()
|
|
||||||
mock_config = MagicMock(
|
|
||||||
load_config=MagicMock(return_value=mock_existing),
|
|
||||||
save_config=mock_save
|
|
||||||
)
|
|
||||||
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "config", "set",
|
|
||||||
"--host", "cli@example.com",
|
|
||||||
"--infra-dir", "/cli/infra"]):
|
|
||||||
with patch.dict("sys.modules", {"sunbeam.config": mock_config}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Verify existing config was loaded and updated
|
|
||||||
self.assertEqual(mock_existing.production_host, "cli@example.com")
|
|
||||||
self.assertEqual(mock_existing.infra_directory, "/cli/infra")
|
|
||||||
# Verify save_config was called with the updated config
|
|
||||||
mock_save.assert_called_once_with(mock_existing)
|
|
||||||
|
|
||||||
def test_config_cli_get_dispatch(self):
|
|
||||||
"""Test that config get CLI dispatches correctly."""
|
|
||||||
mock_load = MagicMock()
|
|
||||||
mock_ok = MagicMock()
|
|
||||||
mock_config = MagicMock(
|
|
||||||
load_config=mock_load,
|
|
||||||
get_production_host=MagicMock(return_value="effective@example.com")
|
|
||||||
)
|
|
||||||
mock_output = MagicMock(ok=mock_ok)
|
|
||||||
|
|
||||||
# Mock config with some values
|
|
||||||
mock_config_instance = MagicMock()
|
|
||||||
mock_config_instance.production_host = "loaded@example.com"
|
|
||||||
mock_config_instance.infra_directory = "/loaded/infra"
|
|
||||||
mock_load.return_value = mock_config_instance
|
|
||||||
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "config", "get"]):
|
|
||||||
with patch.dict("sys.modules", {
|
|
||||||
"sunbeam.config": mock_config,
|
|
||||||
"sunbeam.output": mock_output
|
|
||||||
}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Verify load_config was called
|
|
||||||
mock_load.assert_called_once()
|
|
||||||
# Verify ok was called with expected messages
|
|
||||||
mock_ok.assert_any_call("Production host: loaded@example.com")
|
|
||||||
mock_ok.assert_any_call("Infrastructure directory: /loaded/infra")
|
|
||||||
mock_ok.assert_any_call("Effective production host: effective@example.com")
|
|
||||||
|
|
||||||
def test_config_cli_clear_dispatch(self):
|
|
||||||
"""Test that config clear CLI dispatches correctly."""
|
|
||||||
mock_ok = MagicMock()
|
|
||||||
mock_warn = MagicMock()
|
|
||||||
mock_output = MagicMock(ok=mock_ok, warn=mock_warn)
|
|
||||||
mock_os = MagicMock()
|
|
||||||
mock_os.path.exists.return_value = True
|
|
||||||
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "config", "clear"]):
|
|
||||||
with patch.dict("sys.modules", {
|
|
||||||
"sunbeam.output": mock_output,
|
|
||||||
"os": mock_os
|
|
||||||
}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Verify os.remove was called
|
|
||||||
mock_os.remove.assert_called_once()
|
|
||||||
# Verify ok was called
|
|
||||||
mock_ok.assert_called_once()
|
|
||||||
|
|
||||||
def test_config_cli_clear_no_file(self):
|
|
||||||
"""Test that config clear handles missing file gracefully."""
|
|
||||||
mock_ok = MagicMock()
|
|
||||||
mock_warn = MagicMock()
|
|
||||||
mock_output = MagicMock(ok=mock_ok, warn=mock_warn)
|
|
||||||
mock_os = MagicMock()
|
|
||||||
mock_os.path.exists.return_value = False
|
|
||||||
|
|
||||||
with patch.object(sys, "argv", ["sunbeam", "config", "clear"]):
|
|
||||||
with patch.dict("sys.modules", {
|
|
||||||
"sunbeam.output": mock_output,
|
|
||||||
"os": mock_os
|
|
||||||
}):
|
|
||||||
import importlib, sunbeam.cli as cli_mod
|
|
||||||
importlib.reload(cli_mod)
|
|
||||||
try:
|
|
||||||
cli_mod.main()
|
|
||||||
except SystemExit:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Verify os.remove was not called
|
|
||||||
mock_os.remove.assert_not_called()
|
|
||||||
# Verify warn was called
|
|
||||||
mock_warn.assert_called_once_with("No configuration file found to clear")
|
|
||||||
@@ -1,108 +0,0 @@
|
|||||||
"""Tests for kube.py — domain substitution, target parsing, kubectl wrappers."""
|
|
||||||
import unittest
|
|
||||||
from unittest.mock import MagicMock, patch
|
|
||||||
|
|
||||||
|
|
||||||
class TestParseTarget(unittest.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
from sunbeam.kube import parse_target
|
|
||||||
self.parse = parse_target
|
|
||||||
|
|
||||||
def test_none(self):
|
|
||||||
self.assertEqual(self.parse(None), (None, None))
|
|
||||||
|
|
||||||
def test_namespace_only(self):
|
|
||||||
self.assertEqual(self.parse("ory"), ("ory", None))
|
|
||||||
|
|
||||||
def test_namespace_and_name(self):
|
|
||||||
self.assertEqual(self.parse("ory/kratos"), ("ory", "kratos"))
|
|
||||||
|
|
||||||
def test_too_many_parts_raises(self):
|
|
||||||
with self.assertRaises(ValueError):
|
|
||||||
self.parse("too/many/parts")
|
|
||||||
|
|
||||||
def test_empty_string(self):
|
|
||||||
result = self.parse("")
|
|
||||||
self.assertEqual(result, ("", None))
|
|
||||||
|
|
||||||
|
|
||||||
class TestDomainReplace(unittest.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
from sunbeam.kube import domain_replace
|
|
||||||
self.replace = domain_replace
|
|
||||||
|
|
||||||
def test_single_occurrence(self):
|
|
||||||
result = self.replace("src.DOMAIN_SUFFIX/foo", "192.168.1.1.sslip.io")
|
|
||||||
self.assertEqual(result, "src.192.168.1.1.sslip.io/foo")
|
|
||||||
|
|
||||||
def test_multiple_occurrences(self):
|
|
||||||
text = "DOMAIN_SUFFIX and DOMAIN_SUFFIX"
|
|
||||||
result = self.replace(text, "x.sslip.io")
|
|
||||||
self.assertEqual(result, "x.sslip.io and x.sslip.io")
|
|
||||||
|
|
||||||
def test_no_occurrence(self):
|
|
||||||
result = self.replace("no match here", "x.sslip.io")
|
|
||||||
self.assertEqual(result, "no match here")
|
|
||||||
|
|
||||||
|
|
||||||
class TestKustomizeBuild(unittest.TestCase):
|
|
||||||
def test_calls_run_tool_and_applies_domain_replace(self):
|
|
||||||
from pathlib import Path
|
|
||||||
mock_result = MagicMock()
|
|
||||||
mock_result.stdout = "image: src.DOMAIN_SUFFIX/foo\nimage: src.DOMAIN_SUFFIX/bar"
|
|
||||||
with patch("sunbeam.kube.run_tool", return_value=mock_result) as mock_rt:
|
|
||||||
from sunbeam.kube import kustomize_build
|
|
||||||
result = kustomize_build(Path("/some/overlay"), "192.168.1.1.sslip.io")
|
|
||||||
mock_rt.assert_called_once()
|
|
||||||
call_args = mock_rt.call_args[0]
|
|
||||||
self.assertEqual(call_args[0], "kustomize")
|
|
||||||
self.assertIn("build", call_args)
|
|
||||||
self.assertIn("--enable-helm", call_args)
|
|
||||||
self.assertIn("192.168.1.1.sslip.io", result)
|
|
||||||
self.assertNotIn("DOMAIN_SUFFIX", result)
|
|
||||||
|
|
||||||
def test_strips_null_annotations(self):
|
|
||||||
from pathlib import Path
|
|
||||||
mock_result = MagicMock()
|
|
||||||
mock_result.stdout = "metadata:\n annotations: null\n name: test"
|
|
||||||
with patch("sunbeam.kube.run_tool", return_value=mock_result):
|
|
||||||
from sunbeam.kube import kustomize_build
|
|
||||||
result = kustomize_build(Path("/overlay"), "x.sslip.io")
|
|
||||||
self.assertNotIn("annotations: null", result)
|
|
||||||
|
|
||||||
|
|
||||||
class TestKubeWrappers(unittest.TestCase):
|
|
||||||
def test_kube_passes_context(self):
|
|
||||||
with patch("sunbeam.kube.run_tool") as mock_rt:
|
|
||||||
mock_rt.return_value = MagicMock(returncode=0)
|
|
||||||
from sunbeam.kube import kube
|
|
||||||
kube("get", "pods")
|
|
||||||
call_args = mock_rt.call_args[0]
|
|
||||||
self.assertEqual(call_args[0], "kubectl")
|
|
||||||
self.assertIn("--context=sunbeam", call_args)
|
|
||||||
|
|
||||||
def test_kube_out_returns_stdout_on_success(self):
|
|
||||||
with patch("sunbeam.kube.run_tool") as mock_rt:
|
|
||||||
mock_rt.return_value = MagicMock(returncode=0, stdout=" output ")
|
|
||||||
from sunbeam.kube import kube_out
|
|
||||||
result = kube_out("get", "pods")
|
|
||||||
self.assertEqual(result, "output")
|
|
||||||
|
|
||||||
def test_kube_out_returns_empty_on_failure(self):
|
|
||||||
with patch("sunbeam.kube.run_tool") as mock_rt:
|
|
||||||
mock_rt.return_value = MagicMock(returncode=1, stdout="error text")
|
|
||||||
from sunbeam.kube import kube_out
|
|
||||||
result = kube_out("get", "pods")
|
|
||||||
self.assertEqual(result, "")
|
|
||||||
|
|
||||||
def test_kube_ok_returns_true_on_zero(self):
|
|
||||||
with patch("sunbeam.kube.run_tool") as mock_rt:
|
|
||||||
mock_rt.return_value = MagicMock(returncode=0)
|
|
||||||
from sunbeam.kube import kube_ok
|
|
||||||
self.assertTrue(kube_ok("get", "ns", "default"))
|
|
||||||
|
|
||||||
def test_kube_ok_returns_false_on_nonzero(self):
|
|
||||||
with patch("sunbeam.kube.run_tool") as mock_rt:
|
|
||||||
mock_rt.return_value = MagicMock(returncode=1)
|
|
||||||
from sunbeam.kube import kube_ok
|
|
||||||
self.assertFalse(kube_ok("get", "ns", "missing"))
|
|
||||||
@@ -1,99 +0,0 @@
|
|||||||
"""Tests for manifests.py — primarily _filter_by_namespace."""
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
from sunbeam.manifests import _filter_by_namespace
|
|
||||||
|
|
||||||
|
|
||||||
MULTI_DOC = """\
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: meet-config
|
|
||||||
namespace: lasuite
|
|
||||||
data:
|
|
||||||
FOO: bar
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: meet-backend
|
|
||||||
namespace: lasuite
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Namespace
|
|
||||||
metadata:
|
|
||||||
name: lasuite
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ConfigMap
|
|
||||||
metadata:
|
|
||||||
name: pingora-config
|
|
||||||
namespace: ingress
|
|
||||||
data:
|
|
||||||
config.toml: |
|
|
||||||
hello
|
|
||||||
---
|
|
||||||
apiVersion: apps/v1
|
|
||||||
kind: Deployment
|
|
||||||
metadata:
|
|
||||||
name: pingora
|
|
||||||
namespace: ingress
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
class TestFilterByNamespace(unittest.TestCase):
|
|
||||||
|
|
||||||
def test_keeps_matching_namespace(self):
|
|
||||||
result = _filter_by_namespace(MULTI_DOC, "lasuite")
|
|
||||||
self.assertIn("name: meet-config", result)
|
|
||||||
self.assertIn("name: meet-backend", result)
|
|
||||||
|
|
||||||
def test_excludes_other_namespaces(self):
|
|
||||||
result = _filter_by_namespace(MULTI_DOC, "lasuite")
|
|
||||||
self.assertNotIn("namespace: ingress", result)
|
|
||||||
self.assertNotIn("name: pingora-config", result)
|
|
||||||
self.assertNotIn("name: pingora", result)
|
|
||||||
|
|
||||||
def test_includes_namespace_resource_itself(self):
|
|
||||||
result = _filter_by_namespace(MULTI_DOC, "lasuite")
|
|
||||||
self.assertIn("kind: Namespace", result)
|
|
||||||
|
|
||||||
def test_ingress_filter(self):
|
|
||||||
result = _filter_by_namespace(MULTI_DOC, "ingress")
|
|
||||||
self.assertIn("name: pingora-config", result)
|
|
||||||
self.assertIn("name: pingora", result)
|
|
||||||
self.assertNotIn("namespace: lasuite", result)
|
|
||||||
|
|
||||||
def test_unknown_namespace_returns_empty(self):
|
|
||||||
result = _filter_by_namespace(MULTI_DOC, "nonexistent")
|
|
||||||
self.assertEqual(result.strip(), "")
|
|
||||||
|
|
||||||
def test_empty_input_returns_empty(self):
|
|
||||||
result = _filter_by_namespace("", "lasuite")
|
|
||||||
self.assertEqual(result.strip(), "")
|
|
||||||
|
|
||||||
def test_result_is_valid_multidoc_yaml(self):
|
|
||||||
# Each non-empty doc in the result should start with '---'
|
|
||||||
result = _filter_by_namespace(MULTI_DOC, "lasuite")
|
|
||||||
self.assertTrue(result.startswith("---"))
|
|
||||||
|
|
||||||
def test_does_not_include_namespace_resource_for_wrong_ns(self):
|
|
||||||
# The lasuite Namespace CR should NOT appear in an ingress-filtered result
|
|
||||||
result = _filter_by_namespace(MULTI_DOC, "ingress")
|
|
||||||
# There's no ingress Namespace CR in the fixture, so kind: Namespace should be absent
|
|
||||||
self.assertNotIn("kind: Namespace", result)
|
|
||||||
|
|
||||||
def test_single_doc_matching(self):
|
|
||||||
doc = "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: x\n namespace: ory\n"
|
|
||||||
result = _filter_by_namespace(doc, "ory")
|
|
||||||
self.assertIn("name: x", result)
|
|
||||||
|
|
||||||
def test_single_doc_not_matching(self):
|
|
||||||
doc = "apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: x\n namespace: ory\n"
|
|
||||||
result = _filter_by_namespace(doc, "lasuite")
|
|
||||||
self.assertEqual(result.strip(), "")
|
|
||||||
@@ -1,93 +0,0 @@
|
|||||||
"""Tests for secrets.py — seed idempotency, verify flow."""
|
|
||||||
import base64
|
|
||||||
import unittest
|
|
||||||
from unittest.mock import MagicMock, patch, call
|
|
||||||
|
|
||||||
|
|
||||||
class TestSeedIdempotency(unittest.TestCase):
|
|
||||||
"""_seed_openbao() must read existing values before writing (never rotates)."""
|
|
||||||
|
|
||||||
def test_get_or_create_skips_existing(self):
|
|
||||||
"""If OpenBao already has a value, it's reused not regenerated."""
|
|
||||||
with patch("sunbeam.secrets._seed_openbao") as mock_seed:
|
|
||||||
mock_seed.return_value = {
|
|
||||||
"hydra-system-secret": "existingvalue",
|
|
||||||
"_ob_pod": "openbao-0",
|
|
||||||
"_root_token": "token123",
|
|
||||||
}
|
|
||||||
from sunbeam import secrets
|
|
||||||
result = secrets._seed_openbao()
|
|
||||||
self.assertIn("hydra-system-secret", result)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCmdVerify(unittest.TestCase):
|
|
||||||
def _mock_kube_out(self, ob_pod="openbao-0", root_token="testtoken", mac=""):
|
|
||||||
"""Create a side_effect function for kube_out that simulates verify flow."""
|
|
||||||
encoded_token = base64.b64encode(root_token.encode()).decode()
|
|
||||||
def side_effect(*args, **kwargs):
|
|
||||||
args_str = " ".join(str(a) for a in args)
|
|
||||||
if "app.kubernetes.io/name=openbao" in args_str:
|
|
||||||
return ob_pod
|
|
||||||
if "root-token" in args_str:
|
|
||||||
return encoded_token
|
|
||||||
if "secretMAC" in args_str:
|
|
||||||
return mac
|
|
||||||
if "conditions" in args_str:
|
|
||||||
return "unknown"
|
|
||||||
if ".data.test-key" in args_str:
|
|
||||||
return ""
|
|
||||||
return ""
|
|
||||||
return side_effect
|
|
||||||
|
|
||||||
def test_verify_cleans_up_on_timeout(self):
|
|
||||||
"""cmd_verify() must clean up test resources even when VSO doesn't sync."""
|
|
||||||
kube_out_fn = self._mock_kube_out(mac="") # MAC never set -> timeout
|
|
||||||
with patch("sunbeam.secrets.kube_out", side_effect=kube_out_fn):
|
|
||||||
with patch("sunbeam.secrets.kube") as mock_kube:
|
|
||||||
with patch("sunbeam.secrets.kube_apply"):
|
|
||||||
with patch("subprocess.run") as mock_run:
|
|
||||||
mock_run.return_value = MagicMock(returncode=0, stdout="", stderr="")
|
|
||||||
with patch("time.time") as mock_time:
|
|
||||||
# start=0, first check=0, second check past deadline
|
|
||||||
mock_time.side_effect = [0, 0, 100]
|
|
||||||
with patch("time.sleep"):
|
|
||||||
from sunbeam import secrets
|
|
||||||
with self.assertRaises(SystemExit):
|
|
||||||
secrets.cmd_verify()
|
|
||||||
# Cleanup should have been called (delete calls)
|
|
||||||
delete_calls = [c for c in mock_kube.call_args_list
|
|
||||||
if "delete" in str(c)]
|
|
||||||
self.assertGreater(len(delete_calls), 0)
|
|
||||||
|
|
||||||
def test_verify_succeeds_when_synced(self):
|
|
||||||
"""cmd_verify() succeeds when VSO syncs the secret and value matches."""
|
|
||||||
# We need a fixed test_value. Patch _secrets.token_urlsafe to return known value.
|
|
||||||
test_val = "fixed-test-value"
|
|
||||||
encoded_val = base64.b64encode(test_val.encode()).decode()
|
|
||||||
encoded_token = base64.b64encode(b"testtoken").decode()
|
|
||||||
|
|
||||||
call_count = [0]
|
|
||||||
def kube_out_fn(*args, **kwargs):
|
|
||||||
args_str = " ".join(str(a) for a in args)
|
|
||||||
if "app.kubernetes.io/name=openbao" in args_str:
|
|
||||||
return "openbao-0"
|
|
||||||
if "root-token" in args_str:
|
|
||||||
return encoded_token
|
|
||||||
if "secretMAC" in args_str:
|
|
||||||
call_count[0] += 1
|
|
||||||
return "somemac" if call_count[0] >= 1 else ""
|
|
||||||
if ".data.test-key" in args_str:
|
|
||||||
return encoded_val
|
|
||||||
return ""
|
|
||||||
|
|
||||||
with patch("sunbeam.secrets.kube_out", side_effect=kube_out_fn):
|
|
||||||
with patch("sunbeam.secrets.kube") as mock_kube:
|
|
||||||
with patch("sunbeam.secrets.kube_apply"):
|
|
||||||
with patch("subprocess.run") as mock_run:
|
|
||||||
mock_run.return_value = MagicMock(returncode=0, stdout="", stderr="")
|
|
||||||
with patch("sunbeam.secrets._secrets.token_urlsafe", return_value=test_val):
|
|
||||||
with patch("time.time", return_value=0):
|
|
||||||
with patch("time.sleep"):
|
|
||||||
from sunbeam import secrets
|
|
||||||
# Should not raise
|
|
||||||
secrets.cmd_verify()
|
|
||||||
@@ -1,128 +0,0 @@
|
|||||||
"""Tests for services.py — status scoping, log command construction, restart."""
|
|
||||||
import unittest
|
|
||||||
from unittest.mock import MagicMock, patch, call
|
|
||||||
|
|
||||||
|
|
||||||
class TestCmdStatus(unittest.TestCase):
|
|
||||||
def test_all_namespaces_when_no_target(self):
|
|
||||||
fake_output = (
|
|
||||||
"ory hydra-abc 1/1 Running 0 1d\n"
|
|
||||||
"data valkey-xyz 1/1 Running 0 1d\n"
|
|
||||||
)
|
|
||||||
with patch("sunbeam.services._capture_out", return_value=fake_output):
|
|
||||||
from sunbeam import services
|
|
||||||
services.cmd_status(None)
|
|
||||||
|
|
||||||
def test_namespace_scoped(self):
|
|
||||||
fake_output = "ory kratos-abc 1/1 Running 0 1d\n"
|
|
||||||
with patch("sunbeam.services._capture_out", return_value=fake_output) as mock_co:
|
|
||||||
from sunbeam import services
|
|
||||||
services.cmd_status("ory")
|
|
||||||
# Should have called _capture_out with -n ory
|
|
||||||
calls_str = str(mock_co.call_args_list)
|
|
||||||
self.assertIn("ory", calls_str)
|
|
||||||
|
|
||||||
def test_pod_scoped(self):
|
|
||||||
fake_output = "kratos-abc 1/1 Running 0 1d\n"
|
|
||||||
with patch("sunbeam.services._capture_out", return_value=fake_output) as mock_co:
|
|
||||||
from sunbeam import services
|
|
||||||
services.cmd_status("ory/kratos")
|
|
||||||
calls_str = str(mock_co.call_args_list)
|
|
||||||
self.assertIn("ory", calls_str)
|
|
||||||
self.assertIn("kratos", calls_str)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCmdLogs(unittest.TestCase):
|
|
||||||
def test_logs_no_follow(self):
|
|
||||||
with patch("subprocess.Popen") as mock_popen:
|
|
||||||
mock_proc = MagicMock()
|
|
||||||
mock_proc.wait.return_value = 0
|
|
||||||
mock_popen.return_value = mock_proc
|
|
||||||
with patch("sunbeam.tools.ensure_tool", return_value="/fake/kubectl"):
|
|
||||||
from sunbeam import services
|
|
||||||
services.cmd_logs("ory/kratos", follow=False)
|
|
||||||
args = mock_popen.call_args[0][0]
|
|
||||||
self.assertIn("-n", args)
|
|
||||||
self.assertIn("ory", args)
|
|
||||||
self.assertNotIn("--follow", args)
|
|
||||||
|
|
||||||
def test_logs_follow(self):
|
|
||||||
with patch("subprocess.Popen") as mock_popen:
|
|
||||||
mock_proc = MagicMock()
|
|
||||||
mock_proc.wait.return_value = 0
|
|
||||||
mock_popen.return_value = mock_proc
|
|
||||||
with patch("sunbeam.tools.ensure_tool", return_value="/fake/kubectl"):
|
|
||||||
from sunbeam import services
|
|
||||||
services.cmd_logs("ory/kratos", follow=True)
|
|
||||||
args = mock_popen.call_args[0][0]
|
|
||||||
self.assertIn("--follow", args)
|
|
||||||
|
|
||||||
def test_logs_requires_service_name(self):
|
|
||||||
"""Passing just a namespace (no service) should die()."""
|
|
||||||
with self.assertRaises(SystemExit):
|
|
||||||
from sunbeam import services
|
|
||||||
services.cmd_logs("ory", follow=False)
|
|
||||||
|
|
||||||
|
|
||||||
class TestCmdGet(unittest.TestCase):
|
|
||||||
def test_prints_yaml_for_pod(self):
|
|
||||||
with patch("sunbeam.services.kube_out", return_value="apiVersion: v1\nkind: Pod") as mock_ko:
|
|
||||||
from sunbeam import services
|
|
||||||
services.cmd_get("ory/kratos-abc")
|
|
||||||
mock_ko.assert_called_once_with("get", "pod", "kratos-abc", "-n", "ory", "-o=yaml")
|
|
||||||
|
|
||||||
def test_default_output_is_yaml(self):
|
|
||||||
with patch("sunbeam.services.kube_out", return_value="kind: Pod"):
|
|
||||||
from sunbeam import services
|
|
||||||
# no output kwarg → defaults to yaml
|
|
||||||
services.cmd_get("ory/kratos-abc")
|
|
||||||
|
|
||||||
def test_json_output_format(self):
|
|
||||||
with patch("sunbeam.services.kube_out", return_value='{"kind":"Pod"}') as mock_ko:
|
|
||||||
from sunbeam import services
|
|
||||||
services.cmd_get("ory/kratos-abc", output="json")
|
|
||||||
mock_ko.assert_called_once_with("get", "pod", "kratos-abc", "-n", "ory", "-o=json")
|
|
||||||
|
|
||||||
def test_missing_name_exits(self):
|
|
||||||
with self.assertRaises(SystemExit):
|
|
||||||
from sunbeam import services
|
|
||||||
services.cmd_get("ory") # namespace-only, no pod name
|
|
||||||
|
|
||||||
def test_not_found_exits(self):
|
|
||||||
with patch("sunbeam.services.kube_out", return_value=""):
|
|
||||||
with self.assertRaises(SystemExit):
|
|
||||||
from sunbeam import services
|
|
||||||
services.cmd_get("ory/nonexistent")
|
|
||||||
|
|
||||||
|
|
||||||
class TestCmdRestart(unittest.TestCase):
|
|
||||||
def test_restart_all(self):
|
|
||||||
with patch("sunbeam.services.kube") as mock_kube:
|
|
||||||
from sunbeam import services
|
|
||||||
services.cmd_restart(None)
|
|
||||||
# Should restart all SERVICES_TO_RESTART
|
|
||||||
self.assertGreater(mock_kube.call_count, 0)
|
|
||||||
|
|
||||||
def test_restart_namespace_scoped(self):
|
|
||||||
with patch("sunbeam.services.kube") as mock_kube:
|
|
||||||
from sunbeam import services
|
|
||||||
services.cmd_restart("ory")
|
|
||||||
calls_str = str(mock_kube.call_args_list)
|
|
||||||
# Should only restart ory/* services
|
|
||||||
self.assertIn("ory", calls_str)
|
|
||||||
self.assertNotIn("devtools", calls_str)
|
|
||||||
|
|
||||||
def test_restart_specific_service(self):
|
|
||||||
with patch("sunbeam.services.kube") as mock_kube:
|
|
||||||
from sunbeam import services
|
|
||||||
services.cmd_restart("ory/kratos")
|
|
||||||
# Should restart exactly deployment/kratos in ory
|
|
||||||
calls_str = str(mock_kube.call_args_list)
|
|
||||||
self.assertIn("kratos", calls_str)
|
|
||||||
|
|
||||||
def test_restart_unknown_service_warns(self):
|
|
||||||
with patch("sunbeam.services.kube") as mock_kube:
|
|
||||||
from sunbeam import services
|
|
||||||
services.cmd_restart("nonexistent/nosuch")
|
|
||||||
# kube should not be called since no match
|
|
||||||
mock_kube.assert_not_called()
|
|
||||||
@@ -1,162 +0,0 @@
|
|||||||
"""Tests for tools.py binary bundler."""
|
|
||||||
import hashlib
|
|
||||||
import stat
|
|
||||||
import unittest
|
|
||||||
from pathlib import Path
|
|
||||||
from unittest.mock import MagicMock, patch
|
|
||||||
import tempfile
|
|
||||||
import shutil
|
|
||||||
|
|
||||||
|
|
||||||
class TestSha256(unittest.TestCase):
|
|
||||||
def test_computes_correct_hash(self):
|
|
||||||
from sunbeam.tools import _sha256
|
|
||||||
with tempfile.NamedTemporaryFile(delete=False) as f:
|
|
||||||
f.write(b"hello world")
|
|
||||||
f.flush()
|
|
||||||
path = Path(f.name)
|
|
||||||
try:
|
|
||||||
expected = hashlib.sha256(b"hello world").hexdigest()
|
|
||||||
self.assertEqual(_sha256(path), expected)
|
|
||||||
finally:
|
|
||||||
path.unlink()
|
|
||||||
|
|
||||||
|
|
||||||
class TestEnsureTool(unittest.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
self.tmpdir = tempfile.mkdtemp()
|
|
||||||
self.cache_patcher = patch("sunbeam.tools.CACHE_DIR", Path(self.tmpdir))
|
|
||||||
self.cache_patcher.start()
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
self.cache_patcher.stop()
|
|
||||||
shutil.rmtree(self.tmpdir, ignore_errors=True)
|
|
||||||
|
|
||||||
def test_returns_cached_if_sha_matches(self):
|
|
||||||
binary_data = b"#!/bin/sh\necho kubectl"
|
|
||||||
dest = Path(self.tmpdir) / "kubectl"
|
|
||||||
dest.write_bytes(binary_data)
|
|
||||||
dest.chmod(dest.stat().st_mode | stat.S_IXUSR)
|
|
||||||
expected_sha = hashlib.sha256(binary_data).hexdigest()
|
|
||||||
tools_spec = {"kubectl": {"url": "http://x", "sha256": expected_sha}}
|
|
||||||
with patch("sunbeam.tools.TOOLS", tools_spec):
|
|
||||||
from sunbeam import tools
|
|
||||||
result = tools.ensure_tool("kubectl")
|
|
||||||
self.assertEqual(result, dest)
|
|
||||||
|
|
||||||
def test_returns_cached_if_sha_empty(self):
|
|
||||||
binary_data = b"#!/bin/sh\necho kubectl"
|
|
||||||
dest = Path(self.tmpdir) / "kubectl"
|
|
||||||
dest.write_bytes(binary_data)
|
|
||||||
dest.chmod(dest.stat().st_mode | stat.S_IXUSR)
|
|
||||||
tools_spec = {"kubectl": {"url": "http://x", "sha256": ""}}
|
|
||||||
with patch("sunbeam.tools.TOOLS", tools_spec):
|
|
||||||
from sunbeam import tools
|
|
||||||
result = tools.ensure_tool("kubectl")
|
|
||||||
self.assertEqual(result, dest)
|
|
||||||
|
|
||||||
def test_downloads_on_cache_miss(self):
|
|
||||||
binary_data = b"#!/bin/sh\necho kubectl"
|
|
||||||
tools_spec = {"kubectl": {"url": "http://example.com/kubectl", "sha256": ""}}
|
|
||||||
with patch("sunbeam.tools.TOOLS", tools_spec):
|
|
||||||
with patch("urllib.request.urlopen") as mock_url:
|
|
||||||
mock_resp = MagicMock()
|
|
||||||
mock_resp.read.return_value = binary_data
|
|
||||||
mock_resp.__enter__ = lambda s: s
|
|
||||||
mock_resp.__exit__ = MagicMock(return_value=False)
|
|
||||||
mock_url.return_value = mock_resp
|
|
||||||
from sunbeam import tools
|
|
||||||
result = tools.ensure_tool("kubectl")
|
|
||||||
dest = Path(self.tmpdir) / "kubectl"
|
|
||||||
self.assertTrue(dest.exists())
|
|
||||||
self.assertEqual(dest.read_bytes(), binary_data)
|
|
||||||
# Should be executable
|
|
||||||
self.assertTrue(dest.stat().st_mode & stat.S_IXUSR)
|
|
||||||
|
|
||||||
def test_raises_on_sha256_mismatch(self):
|
|
||||||
binary_data = b"#!/bin/sh\necho fake"
|
|
||||||
tools_spec = {"kubectl": {
|
|
||||||
"url": "http://example.com/kubectl",
|
|
||||||
"sha256": "a" * 64, # wrong hash
|
|
||||||
}}
|
|
||||||
with patch("sunbeam.tools.TOOLS", tools_spec):
|
|
||||||
with patch("urllib.request.urlopen") as mock_url:
|
|
||||||
mock_resp = MagicMock()
|
|
||||||
mock_resp.read.return_value = binary_data
|
|
||||||
mock_resp.__enter__ = lambda s: s
|
|
||||||
mock_resp.__exit__ = MagicMock(return_value=False)
|
|
||||||
mock_url.return_value = mock_resp
|
|
||||||
from sunbeam import tools
|
|
||||||
with self.assertRaises(RuntimeError) as ctx:
|
|
||||||
tools.ensure_tool("kubectl")
|
|
||||||
self.assertIn("SHA256 mismatch", str(ctx.exception))
|
|
||||||
# Binary should be cleaned up
|
|
||||||
self.assertFalse((Path(self.tmpdir) / "kubectl").exists())
|
|
||||||
|
|
||||||
def test_redownloads_on_sha_mismatch_cached(self):
|
|
||||||
"""If cached binary has wrong hash, it's deleted and re-downloaded."""
|
|
||||||
old_data = b"old binary"
|
|
||||||
new_data = b"new binary"
|
|
||||||
dest = Path(self.tmpdir) / "kubectl"
|
|
||||||
dest.write_bytes(old_data)
|
|
||||||
new_sha = hashlib.sha256(new_data).hexdigest()
|
|
||||||
tools_spec = {"kubectl": {"url": "http://x/kubectl", "sha256": new_sha}}
|
|
||||||
with patch("sunbeam.tools.TOOLS", tools_spec):
|
|
||||||
with patch("urllib.request.urlopen") as mock_url:
|
|
||||||
mock_resp = MagicMock()
|
|
||||||
mock_resp.read.return_value = new_data
|
|
||||||
mock_resp.__enter__ = lambda s: s
|
|
||||||
mock_resp.__exit__ = MagicMock(return_value=False)
|
|
||||||
mock_url.return_value = mock_resp
|
|
||||||
from sunbeam import tools
|
|
||||||
result = tools.ensure_tool("kubectl")
|
|
||||||
self.assertEqual(dest.read_bytes(), new_data)
|
|
||||||
|
|
||||||
def test_unknown_tool_raises_value_error(self):
|
|
||||||
from sunbeam import tools
|
|
||||||
with self.assertRaises(ValueError):
|
|
||||||
tools.ensure_tool("notarealtool")
|
|
||||||
|
|
||||||
|
|
||||||
class TestRunTool(unittest.TestCase):
|
|
||||||
def setUp(self):
|
|
||||||
self.tmpdir = tempfile.mkdtemp()
|
|
||||||
self.cache_patcher = patch("sunbeam.tools.CACHE_DIR", Path(self.tmpdir))
|
|
||||||
self.cache_patcher.start()
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
self.cache_patcher.stop()
|
|
||||||
shutil.rmtree(self.tmpdir, ignore_errors=True)
|
|
||||||
|
|
||||||
def test_kustomize_prepends_cache_dir_to_path(self):
|
|
||||||
binary_data = b"#!/bin/sh"
|
|
||||||
dest = Path(self.tmpdir) / "kustomize"
|
|
||||||
dest.write_bytes(binary_data)
|
|
||||||
dest.chmod(dest.stat().st_mode | stat.S_IXUSR)
|
|
||||||
tools_spec = {"kustomize": {"url": "http://x", "sha256": ""}}
|
|
||||||
with patch("sunbeam.tools.TOOLS", tools_spec):
|
|
||||||
with patch("subprocess.run") as mock_run:
|
|
||||||
mock_run.return_value = MagicMock(returncode=0)
|
|
||||||
from sunbeam import tools
|
|
||||||
tools.run_tool("kustomize", "build", ".")
|
|
||||||
call_kwargs = mock_run.call_args[1]
|
|
||||||
env = call_kwargs.get("env", {})
|
|
||||||
self.assertTrue(env.get("PATH", "").startswith(str(self.tmpdir)))
|
|
||||||
|
|
||||||
def test_non_kustomize_does_not_modify_path(self):
|
|
||||||
binary_data = b"#!/bin/sh"
|
|
||||||
dest = Path(self.tmpdir) / "kubectl"
|
|
||||||
dest.write_bytes(binary_data)
|
|
||||||
dest.chmod(dest.stat().st_mode | stat.S_IXUSR)
|
|
||||||
tools_spec = {"kubectl": {"url": "http://x", "sha256": ""}}
|
|
||||||
with patch("sunbeam.tools.TOOLS", tools_spec):
|
|
||||||
with patch("subprocess.run") as mock_run:
|
|
||||||
mock_run.return_value = MagicMock(returncode=0)
|
|
||||||
from sunbeam import tools
|
|
||||||
import os
|
|
||||||
original_path = os.environ.get("PATH", "")
|
|
||||||
tools.run_tool("kubectl", "get", "pods")
|
|
||||||
call_kwargs = mock_run.call_args[1]
|
|
||||||
env = call_kwargs.get("env", {})
|
|
||||||
# PATH should not be modified (starts same as original)
|
|
||||||
self.assertFalse(env.get("PATH", "").startswith(str(self.tmpdir)))
|
|
||||||
171
sunbeam/tools.py
171
sunbeam/tools.py
@@ -1,171 +0,0 @@
|
|||||||
"""Binary bundler — downloads kubectl, kustomize, helm, buildctl at pinned versions.
|
|
||||||
|
|
||||||
Binaries are cached in ~/.local/share/sunbeam/bin/ and SHA256-verified.
|
|
||||||
Platform (OS + arch) is detected at runtime so the same package works on
|
|
||||||
darwin/arm64 (development Mac), darwin/amd64, linux/arm64, and linux/amd64.
|
|
||||||
"""
|
|
||||||
import hashlib
|
|
||||||
import io
|
|
||||||
import os
|
|
||||||
import platform
|
|
||||||
import stat
|
|
||||||
import subprocess
|
|
||||||
import tarfile
|
|
||||||
import urllib.request
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
CACHE_DIR = Path.home() / ".local/share/sunbeam/bin"
|
|
||||||
|
|
||||||
# Tool specs — URL and extract templates use {version}, {os}, {arch}.
|
|
||||||
# {os} : darwin | linux
|
|
||||||
# {arch} : arm64 | amd64
|
|
||||||
_TOOL_SPECS: dict[str, dict] = {
|
|
||||||
"kubectl": {
|
|
||||||
"version": "v1.32.2",
|
|
||||||
"url": "https://dl.k8s.io/release/{version}/bin/{os}/{arch}/kubectl",
|
|
||||||
# plain binary, no archive
|
|
||||||
},
|
|
||||||
"kustomize": {
|
|
||||||
"version": "v5.8.1",
|
|
||||||
"url": (
|
|
||||||
"https://github.com/kubernetes-sigs/kustomize/releases/download/"
|
|
||||||
"kustomize%2F{version}/kustomize_{version}_{os}_{arch}.tar.gz"
|
|
||||||
),
|
|
||||||
"extract": "kustomize",
|
|
||||||
},
|
|
||||||
"helm": {
|
|
||||||
"version": "v4.1.0",
|
|
||||||
"url": "https://get.helm.sh/helm-{version}-{os}-{arch}.tar.gz",
|
|
||||||
"extract": "{os}-{arch}/helm",
|
|
||||||
"sha256": {
|
|
||||||
"darwin_arm64": "82f7065bf4e08d4c8d7881b85c0a080581ef4968a4ae6df4e7b432f8f7a88d0c",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
"buildctl": {
|
|
||||||
"version": "v0.28.0",
|
|
||||||
# BuildKit releases: buildkit-v0.28.0.linux.amd64.tar.gz
|
|
||||||
"url": (
|
|
||||||
"https://github.com/moby/buildkit/releases/download/{version}/"
|
|
||||||
"buildkit-{version}.{os}-{arch}.tar.gz"
|
|
||||||
),
|
|
||||||
"extract": "bin/buildctl",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
# Expose as TOOLS for callers that do `if "helm" in TOOLS`.
|
|
||||||
TOOLS = _TOOL_SPECS
|
|
||||||
|
|
||||||
|
|
||||||
def _detect_platform() -> tuple[str, str]:
|
|
||||||
"""Return (os_name, arch) for the current host."""
|
|
||||||
sys_os = platform.system().lower()
|
|
||||||
machine = platform.machine().lower()
|
|
||||||
os_name = {"darwin": "darwin", "linux": "linux"}.get(sys_os)
|
|
||||||
if not os_name:
|
|
||||||
raise RuntimeError(f"Unsupported OS: {sys_os}")
|
|
||||||
arch = "arm64" if machine in ("arm64", "aarch64") else "amd64"
|
|
||||||
return os_name, arch
|
|
||||||
|
|
||||||
|
|
||||||
def _resolve_spec(name: str) -> dict:
|
|
||||||
"""Return a tool spec with {os} / {arch} / {version} substituted.
|
|
||||||
|
|
||||||
Uses the module-level TOOLS dict so that tests can patch it.
|
|
||||||
"""
|
|
||||||
if name not in TOOLS:
|
|
||||||
raise ValueError(f"Unknown tool: {name}")
|
|
||||||
os_name, arch = _detect_platform()
|
|
||||||
raw = TOOLS[name]
|
|
||||||
version = raw.get("version", "")
|
|
||||||
fmt = {"version": version, "os": os_name, "arch": arch}
|
|
||||||
spec = dict(raw)
|
|
||||||
spec["version"] = version
|
|
||||||
spec["url"] = raw["url"].format(**fmt)
|
|
||||||
if "extract" in raw:
|
|
||||||
spec["extract"] = raw["extract"].format(**fmt)
|
|
||||||
# sha256 may be a per-platform dict {"darwin_arm64": "..."} or a plain string.
|
|
||||||
sha256_val = raw.get("sha256", {})
|
|
||||||
if isinstance(sha256_val, dict):
|
|
||||||
spec["sha256"] = sha256_val.get(f"{os_name}_{arch}", "")
|
|
||||||
return spec
|
|
||||||
|
|
||||||
|
|
||||||
def _sha256(path: Path) -> str:
|
|
||||||
h = hashlib.sha256()
|
|
||||||
with open(path, "rb") as f:
|
|
||||||
for chunk in iter(lambda: f.read(65536), b""):
|
|
||||||
h.update(chunk)
|
|
||||||
return h.hexdigest()
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_tool(name: str) -> Path:
|
|
||||||
"""Return path to cached binary, downloading + verifying if needed.
|
|
||||||
|
|
||||||
Re-downloads automatically when the pinned version in _TOOL_SPECS changes.
|
|
||||||
A <name>.version sidecar file records the version of the cached binary.
|
|
||||||
"""
|
|
||||||
spec = _resolve_spec(name)
|
|
||||||
CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
|
||||||
dest = CACHE_DIR / name
|
|
||||||
version_file = CACHE_DIR / f"{name}.version"
|
|
||||||
|
|
||||||
expected_sha = spec.get("sha256", "")
|
|
||||||
expected_version = spec.get("version", "")
|
|
||||||
|
|
||||||
if dest.exists():
|
|
||||||
version_ok = (
|
|
||||||
not expected_version
|
|
||||||
or (version_file.exists() and version_file.read_text().strip() == expected_version)
|
|
||||||
)
|
|
||||||
sha_ok = not expected_sha or _sha256(dest) == expected_sha
|
|
||||||
if version_ok and sha_ok:
|
|
||||||
return dest
|
|
||||||
|
|
||||||
# Version mismatch or SHA mismatch — re-download
|
|
||||||
if dest.exists():
|
|
||||||
dest.unlink()
|
|
||||||
if version_file.exists():
|
|
||||||
version_file.unlink()
|
|
||||||
|
|
||||||
url = spec["url"]
|
|
||||||
with urllib.request.urlopen(url) as resp: # noqa: S310
|
|
||||||
data = resp.read()
|
|
||||||
|
|
||||||
extract_path = spec.get("extract")
|
|
||||||
if extract_path:
|
|
||||||
with tarfile.open(fileobj=io.BytesIO(data)) as tf:
|
|
||||||
member = tf.getmember(extract_path)
|
|
||||||
fobj = tf.extractfile(member)
|
|
||||||
binary_data = fobj.read()
|
|
||||||
else:
|
|
||||||
binary_data = data
|
|
||||||
|
|
||||||
dest.write_bytes(binary_data)
|
|
||||||
|
|
||||||
if expected_sha:
|
|
||||||
actual = _sha256(dest)
|
|
||||||
if actual != expected_sha:
|
|
||||||
dest.unlink()
|
|
||||||
raise RuntimeError(
|
|
||||||
f"SHA256 mismatch for {name}: expected {expected_sha}, got {actual}"
|
|
||||||
)
|
|
||||||
|
|
||||||
dest.chmod(dest.stat().st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
|
|
||||||
version_file.write_text(expected_version)
|
|
||||||
return dest
|
|
||||||
|
|
||||||
|
|
||||||
def run_tool(name: str, *args, **kwargs) -> subprocess.CompletedProcess:
|
|
||||||
"""Run a bundled tool, ensuring it is downloaded first.
|
|
||||||
|
|
||||||
For kustomize: prepends CACHE_DIR to PATH so helm is found.
|
|
||||||
"""
|
|
||||||
bin_path = ensure_tool(name)
|
|
||||||
env = kwargs.pop("env", None)
|
|
||||||
if env is None:
|
|
||||||
env = os.environ.copy()
|
|
||||||
if name == "kustomize":
|
|
||||||
if "helm" in TOOLS:
|
|
||||||
ensure_tool("helm")
|
|
||||||
env["PATH"] = str(CACHE_DIR) + os.pathsep + env.get("PATH", "")
|
|
||||||
return subprocess.run([str(bin_path), *args], env=env, **kwargs)
|
|
||||||
528
sunbeam/users.py
528
sunbeam/users.py
@@ -1,528 +0,0 @@
|
|||||||
"""User management — Kratos identity operations via port-forwarded admin API."""
|
|
||||||
import json
|
|
||||||
import smtplib
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
import urllib.request
|
|
||||||
import urllib.error
|
|
||||||
from contextlib import contextmanager
|
|
||||||
from email.message import EmailMessage
|
|
||||||
|
|
||||||
import sunbeam.kube as _kube_mod
|
|
||||||
from sunbeam.output import step, ok, warn, die, table
|
|
||||||
|
|
||||||
_SMTP_LOCAL_PORT = 10025
|
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def _port_forward(ns="ory", svc="kratos-admin", local_port=4434, remote_port=80):
|
|
||||||
"""Port-forward to a cluster service and yield the local base URL."""
|
|
||||||
proc = subprocess.Popen(
|
|
||||||
["kubectl", _kube_mod.context_arg(), "-n", ns, "port-forward",
|
|
||||||
f"svc/{svc}", f"{local_port}:{remote_port}"],
|
|
||||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
|
||||||
)
|
|
||||||
# Wait for port-forward to be ready
|
|
||||||
time.sleep(1.5)
|
|
||||||
try:
|
|
||||||
yield f"http://localhost:{local_port}"
|
|
||||||
finally:
|
|
||||||
proc.terminate()
|
|
||||||
proc.wait()
|
|
||||||
|
|
||||||
|
|
||||||
def _api(base_url, path, method="GET", body=None, prefix="/admin", ok_statuses=()):
|
|
||||||
"""Make a request to an admin API via port-forward."""
|
|
||||||
url = f"{base_url}{prefix}{path}"
|
|
||||||
data = json.dumps(body).encode() if body is not None else None
|
|
||||||
headers = {"Content-Type": "application/json", "Accept": "application/json"}
|
|
||||||
req = urllib.request.Request(url, data=data, headers=headers, method=method)
|
|
||||||
try:
|
|
||||||
with urllib.request.urlopen(req) as resp:
|
|
||||||
resp_body = resp.read()
|
|
||||||
return json.loads(resp_body) if resp_body else None
|
|
||||||
except urllib.error.HTTPError as e:
|
|
||||||
if e.code in ok_statuses:
|
|
||||||
return None
|
|
||||||
err_text = e.read().decode()
|
|
||||||
die(f"API error {e.code}: {err_text}")
|
|
||||||
|
|
||||||
|
|
||||||
def _find_identity(base_url, target, required=True):
|
|
||||||
"""Find identity by email or ID. Returns identity dict or None if not required."""
|
|
||||||
# Try as ID first
|
|
||||||
if len(target) == 36 and target.count("-") == 4:
|
|
||||||
return _api(base_url, f"/identities/{target}")
|
|
||||||
# Search by email
|
|
||||||
result = _api(base_url, f"/identities?credentials_identifier={target}&page_size=1")
|
|
||||||
if isinstance(result, list) and result:
|
|
||||||
return result[0]
|
|
||||||
if required:
|
|
||||||
die(f"Identity not found: {target}")
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def _identity_put_body(identity, state=None, **extra):
|
|
||||||
"""Build the PUT body for updating an identity, preserving all required fields."""
|
|
||||||
body = {
|
|
||||||
"schema_id": identity["schema_id"],
|
|
||||||
"traits": identity["traits"],
|
|
||||||
"state": state or identity.get("state", "active"),
|
|
||||||
"metadata_public": identity.get("metadata_public"),
|
|
||||||
"metadata_admin": identity.get("metadata_admin"),
|
|
||||||
}
|
|
||||||
body.update(extra)
|
|
||||||
return body
|
|
||||||
|
|
||||||
|
|
||||||
def _generate_recovery(base_url, identity_id):
|
|
||||||
"""Generate a 24h recovery code. Returns (link, code)."""
|
|
||||||
recovery = _api(base_url, "/recovery/code", method="POST", body={
|
|
||||||
"identity_id": identity_id,
|
|
||||||
"expires_in": "24h",
|
|
||||||
})
|
|
||||||
return recovery.get("recovery_link", ""), recovery.get("recovery_code", "")
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_user_list(search=""):
|
|
||||||
step("Listing identities...")
|
|
||||||
with _port_forward() as base:
|
|
||||||
path = f"/identities?page_size=20"
|
|
||||||
if search:
|
|
||||||
path += f"&credentials_identifier={search}"
|
|
||||||
identities = _api(base, path)
|
|
||||||
|
|
||||||
rows = []
|
|
||||||
for i in identities or []:
|
|
||||||
traits = i.get("traits", {})
|
|
||||||
email = traits.get("email", "")
|
|
||||||
# Support both employee (given_name/family_name) and default (name.first/last) schemas
|
|
||||||
given = traits.get("given_name", "")
|
|
||||||
family = traits.get("family_name", "")
|
|
||||||
if given or family:
|
|
||||||
display_name = f"{given} {family}".strip()
|
|
||||||
else:
|
|
||||||
name = traits.get("name", {})
|
|
||||||
if isinstance(name, dict):
|
|
||||||
display_name = f"{name.get('first', '')} {name.get('last', '')}".strip()
|
|
||||||
else:
|
|
||||||
display_name = str(name) if name else ""
|
|
||||||
rows.append([i["id"][:8] + "...", email, display_name, i.get("state", "active")])
|
|
||||||
|
|
||||||
print(table(rows, ["ID", "Email", "Name", "State"]))
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_user_get(target):
|
|
||||||
step(f"Getting identity: {target}")
|
|
||||||
with _port_forward() as base:
|
|
||||||
identity = _find_identity(base, target)
|
|
||||||
print(json.dumps(identity, indent=2))
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_user_create(email, name="", schema_id="default"):
|
|
||||||
step(f"Creating identity: {email}")
|
|
||||||
traits = {"email": email}
|
|
||||||
if name:
|
|
||||||
parts = name.split(" ", 1)
|
|
||||||
traits["name"] = {"first": parts[0], "last": parts[1] if len(parts) > 1 else ""}
|
|
||||||
|
|
||||||
body = {
|
|
||||||
"schema_id": schema_id,
|
|
||||||
"traits": traits,
|
|
||||||
"state": "active",
|
|
||||||
}
|
|
||||||
|
|
||||||
with _port_forward() as base:
|
|
||||||
identity = _api(base, "/identities", method="POST", body=body)
|
|
||||||
ok(f"Created identity: {identity['id']}")
|
|
||||||
link, code = _generate_recovery(base, identity["id"])
|
|
||||||
|
|
||||||
ok("Recovery link (valid 24h):")
|
|
||||||
print(link)
|
|
||||||
ok("Recovery code (enter on the page above):")
|
|
||||||
print(code)
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_user_delete(target):
|
|
||||||
step(f"Deleting identity: {target}")
|
|
||||||
|
|
||||||
confirm = input(f"Delete identity '{target}'? This cannot be undone. [y/N] ").strip().lower()
|
|
||||||
if confirm != "y":
|
|
||||||
ok("Cancelled.")
|
|
||||||
return
|
|
||||||
|
|
||||||
with _port_forward() as base:
|
|
||||||
identity = _find_identity(base, target)
|
|
||||||
_api(base, f"/identities/{identity['id']}", method="DELETE")
|
|
||||||
ok(f"Deleted.")
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_user_recover(target):
|
|
||||||
step(f"Generating recovery link for: {target}")
|
|
||||||
with _port_forward() as base:
|
|
||||||
identity = _find_identity(base, target)
|
|
||||||
link, code = _generate_recovery(base, identity["id"])
|
|
||||||
ok("Recovery link (valid 24h):")
|
|
||||||
print(link)
|
|
||||||
ok("Recovery code (enter on the page above):")
|
|
||||||
print(code)
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_user_disable(target):
|
|
||||||
"""Disable identity + revoke all Kratos sessions (emergency lockout).
|
|
||||||
|
|
||||||
After this:
|
|
||||||
- No new logins possible.
|
|
||||||
- Existing Hydra OAuth2 tokens are revoked.
|
|
||||||
- Django app sessions expire within SESSION_COOKIE_AGE (1h).
|
|
||||||
"""
|
|
||||||
step(f"Disabling identity: {target}")
|
|
||||||
with _port_forward() as base:
|
|
||||||
identity = _find_identity(base, target)
|
|
||||||
iid = identity["id"]
|
|
||||||
_api(base, f"/identities/{iid}", method="PUT",
|
|
||||||
body=_identity_put_body(identity, state="inactive"))
|
|
||||||
_api(base, f"/identities/{iid}/sessions", method="DELETE")
|
|
||||||
ok(f"Identity {iid[:8]}... disabled and all Kratos sessions revoked.")
|
|
||||||
warn("App sessions (docs/people) expire within SESSION_COOKIE_AGE — currently 1h.")
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_user_set_password(target, password):
|
|
||||||
"""Set (or reset) the password credential for an identity."""
|
|
||||||
step(f"Setting password for: {target}")
|
|
||||||
with _port_forward() as base:
|
|
||||||
identity = _find_identity(base, target)
|
|
||||||
iid = identity["id"]
|
|
||||||
_api(base, f"/identities/{iid}", method="PUT",
|
|
||||||
body=_identity_put_body(identity, credentials={
|
|
||||||
"password": {"config": {"password": password}},
|
|
||||||
}))
|
|
||||||
ok(f"Password set for {iid[:8]}...")
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_user_enable(target):
|
|
||||||
"""Re-enable a previously disabled identity."""
|
|
||||||
step(f"Enabling identity: {target}")
|
|
||||||
with _port_forward() as base:
|
|
||||||
identity = _find_identity(base, target)
|
|
||||||
iid = identity["id"]
|
|
||||||
_api(base, f"/identities/{iid}", method="PUT",
|
|
||||||
body=_identity_put_body(identity, state="active"))
|
|
||||||
ok(f"Identity {iid[:8]}... re-enabled.")
|
|
||||||
|
|
||||||
|
|
||||||
def _send_welcome_email(domain, email, name, recovery_link, recovery_code,
|
|
||||||
job_title="", department=""):
|
|
||||||
"""Send a welcome email via cluster Postfix (port-forward to svc/postfix in lasuite)."""
|
|
||||||
greeting = f"Hi {name}" if name else "Hi"
|
|
||||||
body_text = f"""{greeting},
|
|
||||||
|
|
||||||
Welcome to Sunbeam Studios!{f" You're joining as {job_title} in the {department} department." if job_title and department else ""} Your account has been created.
|
|
||||||
|
|
||||||
To set your password, open this link and enter the recovery code below:
|
|
||||||
|
|
||||||
Link: {recovery_link}
|
|
||||||
Code: {recovery_code}
|
|
||||||
|
|
||||||
This link expires in 24 hours.
|
|
||||||
|
|
||||||
Once signed in you will be prompted to set up 2FA (mandatory).
|
|
||||||
|
|
||||||
After that, head to https://auth.{domain}/settings to set up your
|
|
||||||
profile — add your name, profile picture, and any other details.
|
|
||||||
|
|
||||||
Your services:
|
|
||||||
Calendar: https://cal.{domain}
|
|
||||||
Drive: https://drive.{domain}
|
|
||||||
Mail: https://mail.{domain}
|
|
||||||
Meet: https://meet.{domain}
|
|
||||||
Projects: https://projects.{domain}
|
|
||||||
Source Code: https://src.{domain}
|
|
||||||
|
|
||||||
Messages (Matrix):
|
|
||||||
Download Element for your platform:
|
|
||||||
Desktop: https://element.io/download
|
|
||||||
iOS: https://apps.apple.com/app/element-messenger/id1083446067
|
|
||||||
Android: https://play.google.com/store/apps/details?id=im.vector.app
|
|
||||||
|
|
||||||
Setup:
|
|
||||||
1. Open Element and tap "Sign in"
|
|
||||||
2. Tap "Edit" next to the homeserver field (matrix.org)
|
|
||||||
3. Enter: https://messages.{domain}
|
|
||||||
4. Tap "Continue" — you'll be redirected to Sunbeam Studios SSO
|
|
||||||
5. Sign in with your {domain} email and password
|
|
||||||
|
|
||||||
\u2014 With Love & Warmth, Sunbeam Studios
|
|
||||||
"""
|
|
||||||
msg = EmailMessage()
|
|
||||||
msg["Subject"] = "Welcome to Sunbeam Studios — Set Your Password"
|
|
||||||
msg["From"] = f"Sunbeam Studios <noreply@{domain}>"
|
|
||||||
msg["To"] = email
|
|
||||||
msg.set_content(body_text)
|
|
||||||
|
|
||||||
with _port_forward(ns="lasuite", svc="postfix", local_port=_SMTP_LOCAL_PORT, remote_port=25):
|
|
||||||
with smtplib.SMTP("localhost", _SMTP_LOCAL_PORT) as smtp:
|
|
||||||
smtp.send_message(msg)
|
|
||||||
ok(f"Welcome email sent to {email}")
|
|
||||||
|
|
||||||
|
|
||||||
def _next_employee_id(base_url):
|
|
||||||
"""Find the next sequential employee ID by scanning all employee identities."""
|
|
||||||
identities = _api(base_url, "/identities?page_size=200") or []
|
|
||||||
max_num = 0
|
|
||||||
for ident in identities:
|
|
||||||
eid = ident.get("traits", {}).get("employee_id", "")
|
|
||||||
if eid and eid.isdigit():
|
|
||||||
max_num = max(max_num, int(eid))
|
|
||||||
return str(max_num + 1)
|
|
||||||
|
|
||||||
|
|
||||||
def _create_mailbox(email, name=""):
|
|
||||||
"""Create a mailbox in Messages via kubectl exec into the backend."""
|
|
||||||
local_part, domain_part = email.split("@", 1)
|
|
||||||
display_name = name or local_part
|
|
||||||
step(f"Creating mailbox: {email}")
|
|
||||||
result = _kube_mod.kube_out(
|
|
||||||
"exec", "deployment/messages-backend", "-n", "lasuite",
|
|
||||||
"-c", "messages-backend", "--",
|
|
||||||
"python", "manage.py", "shell", "-c",
|
|
||||||
f"""
|
|
||||||
mb, created = Mailbox.objects.get_or_create(
|
|
||||||
local_part="{local_part}",
|
|
||||||
domain=MailDomain.objects.get(name="{domain_part}"),
|
|
||||||
)
|
|
||||||
print("created" if created else "exists")
|
|
||||||
""",
|
|
||||||
)
|
|
||||||
if "created" in (result or ""):
|
|
||||||
ok(f"Mailbox {email} created.")
|
|
||||||
elif "exists" in (result or ""):
|
|
||||||
ok(f"Mailbox {email} already exists.")
|
|
||||||
else:
|
|
||||||
warn(f"Could not create mailbox (Messages backend may not be running): {result}")
|
|
||||||
|
|
||||||
|
|
||||||
def _delete_mailbox(email):
|
|
||||||
"""Delete a mailbox and associated Django user in Messages."""
|
|
||||||
local_part, domain_part = email.split("@", 1)
|
|
||||||
step(f"Cleaning up mailbox: {email}")
|
|
||||||
result = _kube_mod.kube_out(
|
|
||||||
"exec", "deployment/messages-backend", "-n", "lasuite",
|
|
||||||
"-c", "messages-backend", "--",
|
|
||||||
"python", "manage.py", "shell", "-c",
|
|
||||||
f"""
|
|
||||||
from django.contrib.auth import get_user_model
|
|
||||||
User = get_user_model()
|
|
||||||
# Delete mailbox + access + contacts
|
|
||||||
deleted = 0
|
|
||||||
for mb in Mailbox.objects.filter(local_part="{local_part}", domain__name="{domain_part}"):
|
|
||||||
mb.delete()
|
|
||||||
deleted += 1
|
|
||||||
# Delete Django user
|
|
||||||
try:
|
|
||||||
u = User.objects.get(email="{email}")
|
|
||||||
u.delete()
|
|
||||||
deleted += 1
|
|
||||||
except User.DoesNotExist:
|
|
||||||
pass
|
|
||||||
print(f"deleted {{deleted}}")
|
|
||||||
""",
|
|
||||||
)
|
|
||||||
if "deleted" in (result or ""):
|
|
||||||
ok(f"Mailbox and user cleaned up.")
|
|
||||||
else:
|
|
||||||
warn(f"Could not clean up mailbox: {result}")
|
|
||||||
|
|
||||||
|
|
||||||
def _setup_projects_user(email, name=""):
|
|
||||||
"""Create a Projects (Planka) user and add them as manager of the Default project."""
|
|
||||||
step(f"Setting up Projects user: {email}")
|
|
||||||
js = f"""
|
|
||||||
const knex = require('knex')({{client: 'pg', connection: process.env.DATABASE_URL}});
|
|
||||||
async function go() {{
|
|
||||||
// Create or find user
|
|
||||||
let user = await knex('user_account').where({{email: '{email}'}}).first();
|
|
||||||
if (!user) {{
|
|
||||||
const id = Date.now().toString();
|
|
||||||
await knex('user_account').insert({{
|
|
||||||
id, email: '{email}', name: '{name}', password: '',
|
|
||||||
is_admin: true, is_sso: true, language: 'en-US',
|
|
||||||
created_at: new Date(), updated_at: new Date()
|
|
||||||
}});
|
|
||||||
user = {{id}};
|
|
||||||
console.log('user_created');
|
|
||||||
}} else {{
|
|
||||||
console.log('user_exists');
|
|
||||||
}}
|
|
||||||
// Add to Default project
|
|
||||||
const project = await knex('project').where({{name: 'Default'}}).first();
|
|
||||||
if (project) {{
|
|
||||||
const exists = await knex('project_manager').where({{project_id: project.id, user_id: user.id}}).first();
|
|
||||||
if (!exists) {{
|
|
||||||
await knex('project_manager').insert({{
|
|
||||||
id: (Date.now()+1).toString(), project_id: project.id,
|
|
||||||
user_id: user.id, created_at: new Date()
|
|
||||||
}});
|
|
||||||
console.log('manager_added');
|
|
||||||
}} else {{
|
|
||||||
console.log('manager_exists');
|
|
||||||
}}
|
|
||||||
}} else {{
|
|
||||||
console.log('no_default_project');
|
|
||||||
}}
|
|
||||||
}}
|
|
||||||
go().then(() => process.exit(0)).catch(e => {{ console.error(e.message); process.exit(1); }});
|
|
||||||
"""
|
|
||||||
result = _kube_mod.kube_out(
|
|
||||||
"exec", "deployment/projects", "-n", "lasuite",
|
|
||||||
"-c", "projects", "--", "node", "-e", js,
|
|
||||||
)
|
|
||||||
if "manager_added" in (result or "") or "manager_exists" in (result or ""):
|
|
||||||
ok(f"Projects user ready.")
|
|
||||||
elif "no_default_project" in (result or ""):
|
|
||||||
warn("No Default project found in Projects — skip.")
|
|
||||||
else:
|
|
||||||
warn(f"Could not set up Projects user: {result}")
|
|
||||||
|
|
||||||
|
|
||||||
def _cleanup_projects_user(email):
|
|
||||||
"""Remove a user from Projects (Planka) — delete memberships and user record."""
|
|
||||||
step(f"Cleaning up Projects user: {email}")
|
|
||||||
js = f"""
|
|
||||||
const knex = require('knex')({{client: 'pg', connection: process.env.DATABASE_URL}});
|
|
||||||
async function go() {{
|
|
||||||
const user = await knex('user_account').where({{email: '{email}'}}).first();
|
|
||||||
if (!user) {{ console.log('not_found'); return; }}
|
|
||||||
await knex('board_membership').where({{user_id: user.id}}).del();
|
|
||||||
await knex('project_manager').where({{user_id: user.id}}).del();
|
|
||||||
await knex('user_account').where({{id: user.id}}).update({{deleted_at: new Date()}});
|
|
||||||
console.log('cleaned');
|
|
||||||
}}
|
|
||||||
go().then(() => process.exit(0)).catch(e => {{ console.error(e.message); process.exit(1); }});
|
|
||||||
"""
|
|
||||||
result = _kube_mod.kube_out(
|
|
||||||
"exec", "deployment/projects", "-n", "lasuite",
|
|
||||||
"-c", "projects", "--", "node", "-e", js,
|
|
||||||
)
|
|
||||||
if "cleaned" in (result or ""):
|
|
||||||
ok("Projects user cleaned up.")
|
|
||||||
else:
|
|
||||||
warn(f"Could not clean up Projects user: {result}")
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_user_onboard(email, name="", schema_id="employee", send_email=True,
|
|
||||||
notify="", job_title="", department="", office_location="",
|
|
||||||
hire_date="", manager=""):
|
|
||||||
"""Onboard a new user: create identity, generate recovery link, optionally send welcome email."""
|
|
||||||
step(f"Onboarding: {email}")
|
|
||||||
|
|
||||||
with _port_forward() as base:
|
|
||||||
existing = _find_identity(base, email, required=False)
|
|
||||||
|
|
||||||
if existing:
|
|
||||||
warn(f"Identity already exists: {existing['id'][:8]}...")
|
|
||||||
step("Generating fresh recovery link...")
|
|
||||||
iid = existing["id"]
|
|
||||||
recovery_link, recovery_code = _generate_recovery(base, iid)
|
|
||||||
else:
|
|
||||||
traits = {"email": email}
|
|
||||||
if name:
|
|
||||||
parts = name.split(" ", 1)
|
|
||||||
traits["given_name"] = parts[0]
|
|
||||||
traits["family_name"] = parts[1] if len(parts) > 1 else ""
|
|
||||||
|
|
||||||
# Auto-assign employee ID if not provided and using employee schema
|
|
||||||
employee_id = ""
|
|
||||||
if schema_id == "employee":
|
|
||||||
employee_id = _next_employee_id(base)
|
|
||||||
traits["employee_id"] = employee_id
|
|
||||||
if job_title:
|
|
||||||
traits["job_title"] = job_title
|
|
||||||
if department:
|
|
||||||
traits["department"] = department
|
|
||||||
if office_location:
|
|
||||||
traits["office_location"] = office_location
|
|
||||||
if hire_date:
|
|
||||||
traits["hire_date"] = hire_date
|
|
||||||
if manager:
|
|
||||||
traits["manager"] = manager
|
|
||||||
|
|
||||||
identity = _api(base, "/identities", method="POST", body={
|
|
||||||
"schema_id": schema_id,
|
|
||||||
"traits": traits,
|
|
||||||
"state": "active",
|
|
||||||
"verifiable_addresses": [{
|
|
||||||
"value": email,
|
|
||||||
"verified": True,
|
|
||||||
"via": "email",
|
|
||||||
}],
|
|
||||||
})
|
|
||||||
iid = identity["id"]
|
|
||||||
ok(f"Created identity: {iid}")
|
|
||||||
if employee_id:
|
|
||||||
ok(f"Employee #{employee_id}")
|
|
||||||
|
|
||||||
# Kratos ignores verifiable_addresses on POST — PATCH is required
|
|
||||||
_api(base, f"/identities/{iid}", method="PATCH", body=[
|
|
||||||
{"op": "replace", "path": "/verifiable_addresses/0/verified", "value": True},
|
|
||||||
{"op": "replace", "path": "/verifiable_addresses/0/status", "value": "completed"},
|
|
||||||
])
|
|
||||||
|
|
||||||
recovery_link, recovery_code = _generate_recovery(base, iid)
|
|
||||||
|
|
||||||
# Provision app-level accounts
|
|
||||||
if not existing:
|
|
||||||
_create_mailbox(email, name)
|
|
||||||
_setup_projects_user(email, name)
|
|
||||||
|
|
||||||
if send_email:
|
|
||||||
domain = _kube_mod.get_domain()
|
|
||||||
recipient = notify or email
|
|
||||||
_send_welcome_email(domain, recipient, name, recovery_link, recovery_code,
|
|
||||||
job_title=job_title, department=department)
|
|
||||||
|
|
||||||
ok(f"Identity ID: {iid}")
|
|
||||||
ok("Recovery link (valid 24h):")
|
|
||||||
print(recovery_link)
|
|
||||||
ok("Recovery code:")
|
|
||||||
print(recovery_code)
|
|
||||||
|
|
||||||
|
|
||||||
def cmd_user_offboard(target):
|
|
||||||
"""Offboard a user: disable identity, revoke all Kratos + Hydra sessions."""
|
|
||||||
step(f"Offboarding: {target}")
|
|
||||||
|
|
||||||
confirm = input(f"Offboard '{target}'? This will disable the account and revoke all sessions. [y/N] ").strip().lower()
|
|
||||||
if confirm != "y":
|
|
||||||
ok("Cancelled.")
|
|
||||||
return
|
|
||||||
|
|
||||||
with _port_forward() as base:
|
|
||||||
identity = _find_identity(base, target)
|
|
||||||
iid = identity["id"]
|
|
||||||
|
|
||||||
step("Disabling identity...")
|
|
||||||
_api(base, f"/identities/{iid}", method="PUT",
|
|
||||||
body=_identity_put_body(identity, state="inactive"))
|
|
||||||
ok(f"Identity {iid[:8]}... disabled.")
|
|
||||||
|
|
||||||
step("Revoking Kratos sessions...")
|
|
||||||
_api(base, f"/identities/{iid}/sessions", method="DELETE", ok_statuses=(404,))
|
|
||||||
ok("Kratos sessions revoked.")
|
|
||||||
|
|
||||||
step("Revoking Hydra consent sessions...")
|
|
||||||
with _port_forward(svc="hydra-admin", local_port=14445, remote_port=4445) as hydra_base:
|
|
||||||
_api(hydra_base, f"/oauth2/auth/sessions/consent?subject={iid}&all=true",
|
|
||||||
method="DELETE", prefix="/admin", ok_statuses=(404,))
|
|
||||||
ok("Hydra consent sessions revoked.")
|
|
||||||
|
|
||||||
# Clean up Messages Django user and mailbox
|
|
||||||
email = identity.get("traits", {}).get("email", "")
|
|
||||||
if email:
|
|
||||||
_delete_mailbox(email)
|
|
||||||
_cleanup_projects_user(email)
|
|
||||||
|
|
||||||
ok(f"Offboarding complete for {iid[:8]}...")
|
|
||||||
warn("Existing access tokens expire within ~1h (Hydra TTL).")
|
|
||||||
warn("App sessions (docs/people) expire within SESSION_COOKIE_AGE (~1h).")
|
|
||||||
16
vendor/chumsky/examples/sample.py
vendored
16
vendor/chumsky/examples/sample.py
vendored
@@ -1,16 +0,0 @@
|
|||||||
import turtle
|
|
||||||
|
|
||||||
board = turtle.Turtle(
|
|
||||||
foo,
|
|
||||||
bar,
|
|
||||||
baz,
|
|
||||||
)
|
|
||||||
|
|
||||||
for i in range(6):
|
|
||||||
board.forward(50)
|
|
||||||
if i % 2 == 0:
|
|
||||||
board.right(144)
|
|
||||||
else:
|
|
||||||
board.left(72)
|
|
||||||
|
|
||||||
turtle.done()
|
|
||||||
2250
vendor/unicode-width/scripts/unicode.py
vendored
2250
vendor/unicode-width/scripts/unicode.py
vendored
File diff suppressed because it is too large
Load Diff
142
vendor/zerocopy/ci/validate_auto_approvers.py
vendored
142
vendor/zerocopy/ci/validate_auto_approvers.py
vendored
@@ -1,142 +0,0 @@
|
|||||||
#!/usr/bin/env python3
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
import posixpath
|
|
||||||
import os
|
|
||||||
|
|
||||||
# Exit codes
|
|
||||||
SUCCESS = 0
|
|
||||||
NOT_APPROVED = 1
|
|
||||||
TECHNICAL_ERROR = 255
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="Validate PR changes against auto-approver rules."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--config",
|
|
||||||
default=".github/auto-approvers.json",
|
|
||||||
help="Path to the rules JSON.",
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--changed-files", help="Path to the fetched changed files JSON."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--expected-count", type=int, help="Total number of files expected in the PR."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--contributors", nargs="+", help="List of GitHub usernames to validate."
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
|
||||||
"--check-config",
|
|
||||||
action="store_true",
|
|
||||||
help="Only validate the configuration file and exit.",
|
|
||||||
)
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
# REGEX: Strict path structure, prevents absolute paths and weird characters
|
|
||||||
VALID_PATH = re.compile(r"^([a-zA-Z0-9_.-]+/)+$")
|
|
||||||
|
|
||||||
# Load and validate config
|
|
||||||
try:
|
|
||||||
with open(args.config) as f:
|
|
||||||
rules = json.load(f)
|
|
||||||
except FileNotFoundError:
|
|
||||||
print(f"::error::❌ Config file not found at {args.config}")
|
|
||||||
sys.exit(TECHNICAL_ERROR)
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
print(f"::error::❌ Failed to parse config JSON: {e}")
|
|
||||||
sys.exit(TECHNICAL_ERROR)
|
|
||||||
|
|
||||||
safe_rules = {}
|
|
||||||
for directory, users in rules.items():
|
|
||||||
if not isinstance(users, list):
|
|
||||||
print(
|
|
||||||
f"::error::❌ Users for '{directory}' must be a JSON array (list), not a string."
|
|
||||||
)
|
|
||||||
sys.exit(TECHNICAL_ERROR)
|
|
||||||
|
|
||||||
if not VALID_PATH.match(directory) or ".." in directory.split("/"):
|
|
||||||
print(f"::error::❌ Invalid config path: {directory}")
|
|
||||||
sys.exit(TECHNICAL_ERROR)
|
|
||||||
|
|
||||||
safe_rules[directory] = [str(u).lower() for u in users]
|
|
||||||
|
|
||||||
if not args.check_config:
|
|
||||||
# Validate that required arguments are present if not in --check-config mode
|
|
||||||
if not (
|
|
||||||
args.changed_files and args.expected_count is not None and args.contributors
|
|
||||||
):
|
|
||||||
print(
|
|
||||||
"::error::❌ Missing required arguments: --changed-files, --expected-count, and --contributors are required unless --check-config is used."
|
|
||||||
)
|
|
||||||
sys.exit(TECHNICAL_ERROR)
|
|
||||||
|
|
||||||
# Load and flatten changed files
|
|
||||||
try:
|
|
||||||
with open(args.changed_files) as f:
|
|
||||||
file_objects = json.load(f)
|
|
||||||
except FileNotFoundError:
|
|
||||||
print(f"::error::❌ Changed files JSON not found at {args.changed_files}")
|
|
||||||
sys.exit(TECHNICAL_ERROR)
|
|
||||||
except json.JSONDecodeError as e:
|
|
||||||
print(f"::error::❌ Failed to parse changed files JSON: {e}")
|
|
||||||
sys.exit(TECHNICAL_ERROR)
|
|
||||||
|
|
||||||
if not file_objects or len(file_objects) != args.expected_count:
|
|
||||||
print(
|
|
||||||
f"::error::❌ File truncation mismatch or empty PR. Expected {args.expected_count}, got {len(file_objects) if file_objects else 0}."
|
|
||||||
)
|
|
||||||
sys.exit(TECHNICAL_ERROR)
|
|
||||||
|
|
||||||
if not all(isinstance(obj, list) for obj in file_objects):
|
|
||||||
print("::error::❌ Invalid payload format. Expected a list of lists.")
|
|
||||||
sys.exit(TECHNICAL_ERROR)
|
|
||||||
|
|
||||||
changed_files = [path for obj in file_objects for path in obj]
|
|
||||||
|
|
||||||
# Validate every file against every contributor
|
|
||||||
contributors = set(str(c).lower() for c in args.contributors)
|
|
||||||
print(f"👥 Validating contributors: {', '.join(contributors)}")
|
|
||||||
|
|
||||||
for raw_file_path in changed_files:
|
|
||||||
file_path = posixpath.normpath(raw_file_path)
|
|
||||||
|
|
||||||
# Find the most specific (longest) matching directory rule.
|
|
||||||
longest_match_dir = None
|
|
||||||
for directory in safe_rules.keys():
|
|
||||||
if file_path.startswith(directory):
|
|
||||||
if longest_match_dir is None or len(directory) > len(
|
|
||||||
longest_match_dir
|
|
||||||
):
|
|
||||||
longest_match_dir = directory
|
|
||||||
|
|
||||||
# First, explicitly fail if the file isn't covered by ANY rule.
|
|
||||||
if not longest_match_dir:
|
|
||||||
print(
|
|
||||||
f"::error::❌ File '{file_path}' does not fall under any configured auto-approve directory."
|
|
||||||
)
|
|
||||||
sys.exit(NOT_APPROVED)
|
|
||||||
|
|
||||||
# Then, verify every contributor has access to that specific rule.
|
|
||||||
for user in contributors:
|
|
||||||
if user not in safe_rules[longest_match_dir]:
|
|
||||||
print(
|
|
||||||
f"::error::❌ Contributor @{user} not authorized for '{file_path}'."
|
|
||||||
)
|
|
||||||
sys.exit(NOT_APPROVED)
|
|
||||||
|
|
||||||
if args.check_config:
|
|
||||||
print("✅ Configuration is structurally valid")
|
|
||||||
else:
|
|
||||||
print("✅ Validation passed")
|
|
||||||
|
|
||||||
sys.exit(SUCCESS)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
main()
|
|
||||||
Reference in New Issue
Block a user