feat(cli): meet build/seed support, production kube tunnel, gitea OIDC bootstrap
- secrets.py: seed secret/meet (django-secret-key, application-jwt-secret-key) - images.py: add sunbeam build meet (meet-backend + meet-frontend from source) - kube.py: production SSH tunnel support, domain discovery from cluster, cmd_bao - gitea.py: configure Hydra as OIDC auth source; mark admin account as private - services.py: minor VSO sync status and services list fixes - users.py: add cmd_user_enable
This commit is contained in:
@@ -4,13 +4,12 @@ import json
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
from sunbeam.kube import kube, kube_out
|
||||
from sunbeam.kube import kube, kube_out, context_arg
|
||||
from sunbeam.output import step, ok, warn
|
||||
|
||||
LIMA_VM = "sunbeam"
|
||||
GITEA_ADMIN_USER = "gitea_admin"
|
||||
GITEA_ADMIN_EMAIL = "gitea@local.domain"
|
||||
K8S_CTX = ["--context=sunbeam"]
|
||||
|
||||
|
||||
def _capture_out(cmd, *, default=""):
|
||||
@@ -26,7 +25,7 @@ def _run(cmd, *, check=True, input=None, capture=False, cwd=None):
|
||||
|
||||
def _kube_ok(*args):
|
||||
return subprocess.run(
|
||||
["kubectl", *K8S_CTX, *args], capture_output=True
|
||||
["kubectl", context_arg(), *args], capture_output=True
|
||||
).returncode == 0
|
||||
|
||||
|
||||
@@ -141,7 +140,7 @@ def cmd_bootstrap(domain: str = "", gitea_admin_pass: str = ""):
|
||||
|
||||
def gitea_exec(*args):
|
||||
return subprocess.run(
|
||||
["kubectl", *K8S_CTX, "-n", "devtools", "exec", pod, "-c",
|
||||
["kubectl", context_arg(), "-n", "devtools", "exec", pod, "-c",
|
||||
"gitea", "--"] + list(args),
|
||||
capture_output=True, text=True,
|
||||
)
|
||||
@@ -170,6 +169,18 @@ def cmd_bootstrap(domain: str = "", gitea_admin_pass: str = ""):
|
||||
except json.JSONDecodeError:
|
||||
return {}
|
||||
|
||||
# Mark admin account as private so it doesn't appear in public listings.
|
||||
r = api("PATCH", f"/admin/users/{GITEA_ADMIN_USER}", {
|
||||
"source_id": 0,
|
||||
"login_name": GITEA_ADMIN_USER,
|
||||
"email": GITEA_ADMIN_EMAIL,
|
||||
"visibility": "private",
|
||||
})
|
||||
if r.get("login") == GITEA_ADMIN_USER:
|
||||
ok(f"Admin '{GITEA_ADMIN_USER}' marked as private.")
|
||||
else:
|
||||
warn(f"Could not set admin visibility: {r}")
|
||||
|
||||
for org_name, visibility, desc in [
|
||||
("studio", "public", "Public source code"),
|
||||
("internal", "private", "Internal tools and services"),
|
||||
@@ -186,5 +197,63 @@ def cmd_bootstrap(domain: str = "", gitea_admin_pass: str = ""):
|
||||
else:
|
||||
warn(f"Org '{org_name}': {result.get('message', result)}")
|
||||
|
||||
# Configure Hydra as the OIDC authentication source.
|
||||
# Source name "Sunbeam" determines the callback URL:
|
||||
# /user/oauth2/Sunbeam/callback (must match oidc-clients.yaml redirectUri)
|
||||
auth_list = gitea_exec("gitea", "admin", "auth", "list")
|
||||
# Parse tab-separated rows: ID\tName\tType\tEnabled
|
||||
existing_id = None
|
||||
exact_ok = False
|
||||
for line in auth_list.stdout.splitlines()[1:]: # skip header
|
||||
parts = line.split("\t")
|
||||
if len(parts) < 2:
|
||||
continue
|
||||
src_id, src_name = parts[0].strip(), parts[1].strip()
|
||||
if src_name == "Sunbeam":
|
||||
exact_ok = True
|
||||
break
|
||||
if src_name in ("Sunbeam Auth",) or (src_name.startswith("Sunbeam") and parts[2].strip() == "OAuth2"):
|
||||
existing_id = src_id
|
||||
|
||||
if exact_ok:
|
||||
ok("OIDC auth source 'Sunbeam' already present.")
|
||||
elif existing_id:
|
||||
# Wrong name (e.g. "Sunbeam Auth") — rename in-place to fix callback URL
|
||||
r = gitea_exec("gitea", "admin", "auth", "update-oauth",
|
||||
"--id", existing_id, "--name", "Sunbeam")
|
||||
if r.returncode == 0:
|
||||
ok(f"Renamed OIDC auth source (id={existing_id}) to 'Sunbeam'.")
|
||||
else:
|
||||
warn(f"Rename failed: {r.stderr.strip()}")
|
||||
else:
|
||||
oidc_id_b64 = kube_out("-n", "lasuite", "get", "secret", "oidc-gitea",
|
||||
"-o=jsonpath={.data.CLIENT_ID}")
|
||||
oidc_secret_b64 = kube_out("-n", "lasuite", "get", "secret", "oidc-gitea",
|
||||
"-o=jsonpath={.data.CLIENT_SECRET}")
|
||||
if oidc_id_b64 and oidc_secret_b64:
|
||||
oidc_id = base64.b64decode(oidc_id_b64).decode()
|
||||
oidc_sec = base64.b64decode(oidc_secret_b64).decode()
|
||||
discover_url = (
|
||||
"http://hydra-public.ory.svc.cluster.local:4444"
|
||||
"/.well-known/openid-configuration"
|
||||
)
|
||||
r = gitea_exec(
|
||||
"gitea", "admin", "auth", "add-oauth",
|
||||
"--name", "Sunbeam",
|
||||
"--provider", "openidConnect",
|
||||
"--key", oidc_id,
|
||||
"--secret", oidc_sec,
|
||||
"--auto-discover-url", discover_url,
|
||||
"--scopes", "openid",
|
||||
"--scopes", "email",
|
||||
"--scopes", "profile",
|
||||
)
|
||||
if r.returncode == 0:
|
||||
ok("OIDC auth source 'Sunbeam' configured.")
|
||||
else:
|
||||
warn(f"OIDC auth source config failed: {r.stderr.strip()}")
|
||||
else:
|
||||
warn("oidc-gitea secret not found -- OIDC auth source not configured.")
|
||||
|
||||
ok(f"Gitea ready -- https://src.{domain} ({GITEA_ADMIN_USER} / <from "
|
||||
f"openbao>)")
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Image mirroring — patch amd64-only images + push to Gitea registry."""
|
||||
import base64
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
@@ -200,6 +201,43 @@ def _run(cmd, *, check=True, input=None, capture=False, cwd=None):
|
||||
capture_output=capture, cwd=cwd)
|
||||
|
||||
|
||||
def _seed_and_push(image: str, admin_pass: str):
|
||||
"""Pre-seed a locally-built Docker image into k3s containerd, then push
|
||||
to the Gitea registry via 'ctr images push' inside the Lima VM.
|
||||
|
||||
This avoids 'docker push' entirely — the Lima k3s VM's containerd already
|
||||
trusts the mkcert CA (used for image pulls from Gitea), so ctr push works
|
||||
where docker push would hit a TLS cert verification error on the Mac.
|
||||
"""
|
||||
ok("Pre-seeding image into k3s containerd...")
|
||||
save = subprocess.Popen(["docker", "save", image], stdout=subprocess.PIPE)
|
||||
ctr = subprocess.run(
|
||||
["limactl", "shell", LIMA_VM, "--",
|
||||
"sudo", "ctr", "-n", "k8s.io", "images", "import", "-"],
|
||||
stdin=save.stdout,
|
||||
capture_output=True,
|
||||
)
|
||||
save.stdout.close()
|
||||
save.wait()
|
||||
if ctr.returncode != 0:
|
||||
warn(f"containerd import failed:\n{ctr.stderr.decode().strip()}")
|
||||
else:
|
||||
ok("Image pre-seeded.")
|
||||
|
||||
ok("Pushing to Gitea registry (via ctr in Lima VM)...")
|
||||
push = subprocess.run(
|
||||
["limactl", "shell", LIMA_VM, "--",
|
||||
"sudo", "ctr", "-n", "k8s.io", "images", "push",
|
||||
"--user", f"{GITEA_ADMIN_USER}:{admin_pass}", image],
|
||||
capture_output=True, text=True,
|
||||
)
|
||||
if push.returncode != 0:
|
||||
warn(f"ctr push failed (image is pre-seeded; cluster will work without push):\n"
|
||||
f"{push.stderr.strip()}")
|
||||
else:
|
||||
ok(f"Pushed {image}")
|
||||
|
||||
|
||||
def cmd_mirror(domain: str = "", gitea_admin_pass: str = ""):
|
||||
"""Patch amd64-only images with an arm64 alias and push to Gitea registry."""
|
||||
if not domain:
|
||||
@@ -271,19 +309,84 @@ def _trust_registry_in_docker_vm(registry: str):
|
||||
ok(f"mkcert CA installed in Docker VM for {registry}.")
|
||||
|
||||
|
||||
def cmd_build(what: str):
|
||||
"""Build and push an image. Supports 'proxy', 'integration', and 'kratos-admin'."""
|
||||
def cmd_build(what: str, push: bool = False, deploy: bool = False):
|
||||
"""Build an image. Pass push=True to push, deploy=True to also apply + rollout."""
|
||||
if what == "proxy":
|
||||
_build_proxy()
|
||||
_build_proxy(push=push, deploy=deploy)
|
||||
elif what == "integration":
|
||||
_build_integration()
|
||||
_build_integration(push=push, deploy=deploy)
|
||||
elif what == "kratos-admin":
|
||||
_build_kratos_admin()
|
||||
_build_kratos_admin(push=push, deploy=deploy)
|
||||
elif what == "meet":
|
||||
_build_meet(push=push, deploy=deploy)
|
||||
elif what == "docs-frontend":
|
||||
_build_la_suite_frontend(
|
||||
app="docs-frontend",
|
||||
repo_dir=Path(__file__).resolve().parents[2] / "docs",
|
||||
workspace_rel="src/frontend",
|
||||
app_rel="src/frontend/apps/impress",
|
||||
dockerfile_rel="src/frontend/Dockerfile",
|
||||
image_name="impress-frontend",
|
||||
deployment="docs-frontend",
|
||||
namespace="lasuite",
|
||||
push=push,
|
||||
deploy=deploy,
|
||||
)
|
||||
elif what == "people-frontend":
|
||||
_build_la_suite_frontend(
|
||||
app="people-frontend",
|
||||
repo_dir=Path(__file__).resolve().parents[2] / "people",
|
||||
workspace_rel="src/frontend",
|
||||
app_rel="src/frontend/apps/desk",
|
||||
dockerfile_rel="src/frontend/Dockerfile",
|
||||
image_name="people-frontend",
|
||||
deployment="people-frontend",
|
||||
namespace="lasuite",
|
||||
push=push,
|
||||
deploy=deploy,
|
||||
)
|
||||
else:
|
||||
die(f"Unknown build target: {what}")
|
||||
|
||||
|
||||
def _build_proxy():
|
||||
|
||||
def _seed_image_production(image: str, ssh_host: str, admin_pass: str):
|
||||
"""Build linux/amd64 image, pipe into production containerd via SSH, then push to Gitea."""
|
||||
ok("Importing image into production containerd via SSH pipe...")
|
||||
save = subprocess.Popen(["docker", "save", image], stdout=subprocess.PIPE)
|
||||
import_cmd = f"sudo ctr -n k8s.io images import -"
|
||||
ctr = subprocess.run(
|
||||
["ssh", "-p", "2222", "-o", "StrictHostKeyChecking=no", ssh_host, import_cmd],
|
||||
stdin=save.stdout,
|
||||
capture_output=True,
|
||||
)
|
||||
save.stdout.close()
|
||||
save.wait()
|
||||
if ctr.returncode != 0:
|
||||
warn(f"containerd import failed:\n{ctr.stderr.decode().strip()}")
|
||||
return False
|
||||
ok("Image imported into production containerd.")
|
||||
|
||||
ok("Pushing image to Gitea registry (via ctr on production server)...")
|
||||
push = subprocess.run(
|
||||
["ssh", "-p", "2222", "-o", "StrictHostKeyChecking=no", ssh_host,
|
||||
f"sudo ctr -n k8s.io images push --user {GITEA_ADMIN_USER}:{admin_pass} {image}"],
|
||||
capture_output=True, text=True,
|
||||
)
|
||||
if push.returncode != 0:
|
||||
warn(f"ctr push failed (image is pre-seeded; cluster will start):\n{push.stderr.strip()}")
|
||||
else:
|
||||
ok(f"Pushed {image} to Gitea registry.")
|
||||
return True
|
||||
|
||||
|
||||
def _build_proxy(push: bool = False, deploy: bool = False):
|
||||
from sunbeam import kube as _kube
|
||||
is_prod = bool(_kube._ssh_host)
|
||||
|
||||
if is_prod:
|
||||
domain = os.environ.get("SUNBEAM_DOMAIN", "sunbeam.pt")
|
||||
else:
|
||||
ip = get_lima_ip()
|
||||
domain = f"{ip}.sslip.io"
|
||||
|
||||
@@ -302,63 +405,94 @@ def _build_proxy():
|
||||
die(f"Proxy source not found at {proxy_dir}")
|
||||
|
||||
registry = f"src.{domain}"
|
||||
image = f"{registry}/studio/sunbeam-proxy:latest"
|
||||
image = f"{registry}/studio/proxy:latest"
|
||||
|
||||
step(f"Building sunbeam-proxy -> {image} ...")
|
||||
|
||||
# Ensure the Lima Docker VM trusts our mkcert CA for this registry.
|
||||
if is_prod:
|
||||
# Production (x86_64 server): cross-compile on the Mac arm64 host using
|
||||
# x86_64-linux-musl-gcc (brew install filosottile/musl-cross/musl-cross),
|
||||
# then package the pre-built static binary into a minimal Docker image.
|
||||
# This avoids QEMU x86_64 emulation which crashes rustc (SIGSEGV).
|
||||
musl_gcc = shutil.which("x86_64-linux-musl-gcc")
|
||||
if not musl_gcc:
|
||||
die(
|
||||
"x86_64-linux-musl-gcc not found.\n"
|
||||
"Install: brew install filosottile/musl-cross/musl-cross"
|
||||
)
|
||||
ok("Cross-compiling sunbeam-proxy for x86_64-musl (native, no QEMU)...")
|
||||
import os as _os
|
||||
env = dict(_os.environ)
|
||||
env["CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER"] = musl_gcc
|
||||
env["CC_x86_64_unknown_linux_musl"] = musl_gcc
|
||||
env["RUSTFLAGS"] = "-C target-feature=+crt-static"
|
||||
r = subprocess.run(
|
||||
["cargo", "build", "--release", "--target", "x86_64-unknown-linux-musl"],
|
||||
cwd=str(proxy_dir),
|
||||
env=env,
|
||||
)
|
||||
if r.returncode != 0:
|
||||
die("cargo build failed.")
|
||||
binary = proxy_dir / "target" / "x86_64-unknown-linux-musl" / "release" / "sunbeam-proxy"
|
||||
|
||||
# Download tini static binary for amd64 if not cached
|
||||
import tempfile, urllib.request
|
||||
tmpdir = Path(tempfile.mkdtemp(prefix="proxy-pkg-"))
|
||||
tini_path = tmpdir / "tini"
|
||||
ok("Downloading tini-static-amd64...")
|
||||
urllib.request.urlretrieve(
|
||||
"https://github.com/krallin/tini/releases/download/v0.19.0/tini-static-amd64",
|
||||
str(tini_path),
|
||||
)
|
||||
tini_path.chmod(0o755)
|
||||
shutil.copy(str(binary), str(tmpdir / "sunbeam-proxy"))
|
||||
(tmpdir / "Dockerfile").write_text(
|
||||
"FROM cgr.dev/chainguard/static:latest\n"
|
||||
"COPY tini /tini\n"
|
||||
"COPY sunbeam-proxy /usr/local/bin/sunbeam-proxy\n"
|
||||
"EXPOSE 80 443\n"
|
||||
'ENTRYPOINT ["/tini", "--", "/usr/local/bin/sunbeam-proxy"]\n'
|
||||
)
|
||||
ok("Packaging into Docker image (linux/amd64, pre-built binary)...")
|
||||
_run(["docker", "buildx", "build",
|
||||
"--platform", "linux/amd64",
|
||||
"--provenance=false",
|
||||
"--load",
|
||||
"-t", image,
|
||||
str(tmpdir)])
|
||||
shutil.rmtree(str(tmpdir), ignore_errors=True)
|
||||
if push:
|
||||
_seed_image_production(image, _kube._ssh_host, admin_pass)
|
||||
else:
|
||||
# Local Lima dev: build linux/arm64 natively.
|
||||
_trust_registry_in_docker_vm(registry)
|
||||
|
||||
# Authenticate Docker with Gitea before the build so --push succeeds.
|
||||
ok("Logging in to Gitea registry...")
|
||||
r = subprocess.run(
|
||||
["docker", "login", registry,
|
||||
["limactl", "shell", LIMA_DOCKER_VM, "--",
|
||||
"docker", "login", registry,
|
||||
"--username", GITEA_ADMIN_USER, "--password-stdin"],
|
||||
input=admin_pass, text=True, capture_output=True,
|
||||
)
|
||||
if r.returncode != 0:
|
||||
die(f"docker login failed:\n{r.stderr.strip()}")
|
||||
|
||||
ok("Building image (linux/arm64, push)...")
|
||||
ok("Building image (linux/arm64)...")
|
||||
_run(["docker", "buildx", "build",
|
||||
"--platform", "linux/arm64",
|
||||
"--push",
|
||||
"--provenance=false",
|
||||
"--load",
|
||||
"-t", image,
|
||||
str(proxy_dir)])
|
||||
|
||||
ok(f"Pushed {image}")
|
||||
if push:
|
||||
ok("Pushing image...")
|
||||
_run(["docker", "push", image])
|
||||
_seed_and_push(image, admin_pass)
|
||||
|
||||
# On single-node clusters, pre-seed the image directly into k3s containerd.
|
||||
# This breaks the circular dependency: when the proxy restarts, Pingora goes
|
||||
# down before the new pod starts, making the Gitea registry (behind Pingora)
|
||||
# unreachable for the image pull. By importing into containerd first,
|
||||
# imagePullPolicy: IfNotPresent means k8s never needs to contact the registry.
|
||||
nodes = kube_out("get", "nodes", "-o=jsonpath={.items[*].metadata.name}").split()
|
||||
if len(nodes) == 1:
|
||||
ok("Single-node cluster: pre-seeding image into k3s containerd...")
|
||||
save = subprocess.Popen(
|
||||
["docker", "save", image],
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
ctr = subprocess.run(
|
||||
["limactl", "shell", LIMA_VM, "--",
|
||||
"sudo", "ctr", "-n", "k8s.io", "images", "import", "-"],
|
||||
stdin=save.stdout,
|
||||
capture_output=True,
|
||||
)
|
||||
save.stdout.close()
|
||||
save.wait()
|
||||
if ctr.returncode != 0:
|
||||
warn(f"containerd import failed (will fall back to registry pull):\n"
|
||||
f"{ctr.stderr.decode().strip()}")
|
||||
else:
|
||||
ok("Image pre-seeded.")
|
||||
|
||||
# Apply manifests so the Deployment spec reflects the Gitea image ref.
|
||||
if deploy:
|
||||
from sunbeam.manifests import cmd_apply
|
||||
cmd_apply()
|
||||
|
||||
# Roll the pingora pod.
|
||||
cmd_apply(env="production" if is_prod else "local", domain=domain)
|
||||
ok("Rolling pingora deployment...")
|
||||
kube("rollout", "restart", "deployment/pingora", "-n", "ingress")
|
||||
kube("rollout", "status", "deployment/pingora", "-n", "ingress",
|
||||
@@ -366,7 +500,13 @@ def _build_proxy():
|
||||
ok("Pingora redeployed.")
|
||||
|
||||
|
||||
def _build_integration():
|
||||
def _build_integration(push: bool = False, deploy: bool = False):
|
||||
from sunbeam import kube as _kube
|
||||
is_prod = bool(_kube._ssh_host)
|
||||
|
||||
if is_prod:
|
||||
domain = os.environ.get("SUNBEAM_DOMAIN", "sunbeam.pt")
|
||||
else:
|
||||
ip = get_lima_ip()
|
||||
domain = f"{ip}.sslip.io"
|
||||
|
||||
@@ -397,31 +537,21 @@ def _build_integration():
|
||||
|
||||
step(f"Building integration -> {image} ...")
|
||||
|
||||
_trust_registry_in_docker_vm(registry)
|
||||
platform = "linux/amd64" if is_prod else "linux/arm64"
|
||||
|
||||
ok("Logging in to Gitea registry...")
|
||||
r = subprocess.run(
|
||||
["docker", "login", registry,
|
||||
"--username", GITEA_ADMIN_USER, "--password-stdin"],
|
||||
input=admin_pass, text=True, capture_output=True,
|
||||
)
|
||||
if r.returncode != 0:
|
||||
die(f"docker login failed:\n{r.stderr.strip()}")
|
||||
|
||||
ok("Building image (linux/arm64, push)...")
|
||||
# --file points to integration-service/Dockerfile; context is sunbeam/ root.
|
||||
# Docker resolves .dockerignore relative to the build context root, but since
|
||||
# --file is outside the context root we provide it explicitly via env or flag.
|
||||
# Workaround: copy .dockerignore to sunbeam/ root temporarily, then remove.
|
||||
# Copy .dockerignore to context root temporarily if needed.
|
||||
root_ignore = sunbeam_dir / ".dockerignore"
|
||||
copied_ignore = False
|
||||
if not root_ignore.exists():
|
||||
if not root_ignore.exists() and dockerignore.exists():
|
||||
shutil.copy(str(dockerignore), str(root_ignore))
|
||||
copied_ignore = True
|
||||
try:
|
||||
ok(f"Building image ({platform})...")
|
||||
_run(["docker", "buildx", "build",
|
||||
"--platform", "linux/arm64",
|
||||
"--push",
|
||||
"--platform", platform,
|
||||
"--provenance=false",
|
||||
"--load",
|
||||
"-f", str(dockerfile),
|
||||
"-t", image,
|
||||
str(sunbeam_dir)])
|
||||
@@ -429,33 +559,25 @@ def _build_integration():
|
||||
if copied_ignore and root_ignore.exists():
|
||||
root_ignore.unlink()
|
||||
|
||||
ok(f"Pushed {image}")
|
||||
|
||||
# Pre-seed into k3s containerd (same pattern as other custom images).
|
||||
nodes = kube_out("get", "nodes", "-o=jsonpath={.items[*].metadata.name}").split()
|
||||
if len(nodes) == 1:
|
||||
ok("Single-node cluster: pre-seeding image into k3s containerd...")
|
||||
save = subprocess.Popen(
|
||||
["docker", "save", image],
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
ctr = subprocess.run(
|
||||
["limactl", "shell", LIMA_VM, "--",
|
||||
"sudo", "ctr", "-n", "k8s.io", "images", "import", "-"],
|
||||
stdin=save.stdout,
|
||||
capture_output=True,
|
||||
)
|
||||
save.stdout.close()
|
||||
save.wait()
|
||||
if ctr.returncode != 0:
|
||||
warn(f"containerd import failed (will fall back to registry pull):\n"
|
||||
f"{ctr.stderr.decode().strip()}")
|
||||
if push:
|
||||
if is_prod:
|
||||
_seed_image_production(image, _kube._ssh_host, admin_pass)
|
||||
else:
|
||||
ok("Image pre-seeded.")
|
||||
_trust_registry_in_docker_vm(registry)
|
||||
ok("Logging in to Gitea registry...")
|
||||
r = subprocess.run(
|
||||
["limactl", "shell", LIMA_DOCKER_VM, "--",
|
||||
"docker", "login", registry,
|
||||
"--username", GITEA_ADMIN_USER, "--password-stdin"],
|
||||
input=admin_pass, text=True, capture_output=True,
|
||||
)
|
||||
if r.returncode != 0:
|
||||
die(f"docker login failed:\n{r.stderr.strip()}")
|
||||
_seed_and_push(image, admin_pass)
|
||||
|
||||
if deploy:
|
||||
from sunbeam.manifests import cmd_apply
|
||||
cmd_apply()
|
||||
|
||||
cmd_apply(env="production" if is_prod else "local", domain=domain)
|
||||
ok("Rolling integration deployment...")
|
||||
kube("rollout", "restart", "deployment/integration", "-n", "lasuite")
|
||||
kube("rollout", "status", "deployment/integration", "-n", "lasuite",
|
||||
@@ -463,7 +585,138 @@ def _build_integration():
|
||||
ok("Integration redeployed.")
|
||||
|
||||
|
||||
def _build_kratos_admin():
|
||||
def _build_kratos_admin(push: bool = False, deploy: bool = False):
|
||||
from sunbeam import kube as _kube
|
||||
|
||||
is_prod = bool(_kube._ssh_host)
|
||||
|
||||
b64 = kube_out("-n", "devtools", "get", "secret",
|
||||
"gitea-admin-credentials", "-o=jsonpath={.data.password}")
|
||||
if not b64:
|
||||
die("gitea-admin-credentials secret not found -- run seed first.")
|
||||
admin_pass = base64.b64decode(b64).decode()
|
||||
|
||||
# kratos-admin source
|
||||
kratos_admin_dir = Path(__file__).resolve().parents[2] / "kratos-admin"
|
||||
if not kratos_admin_dir.is_dir():
|
||||
die(f"kratos-admin source not found at {kratos_admin_dir}")
|
||||
|
||||
if is_prod:
|
||||
domain = os.environ.get("SUNBEAM_DOMAIN", "sunbeam.pt")
|
||||
registry = f"src.{domain}"
|
||||
image = f"{registry}/studio/kratos-admin-ui:latest"
|
||||
ssh_host = _kube._ssh_host
|
||||
|
||||
step(f"Building kratos-admin-ui (linux/amd64, native cross-compile) -> {image} ...")
|
||||
|
||||
if not shutil.which("deno"):
|
||||
die("deno not found — install Deno: https://deno.land/")
|
||||
if not shutil.which("npm"):
|
||||
die("npm not found — install Node.js")
|
||||
|
||||
ok("Building UI assets (npm run build)...")
|
||||
_run(["npm", "run", "build"], cwd=str(kratos_admin_dir / "ui"))
|
||||
|
||||
ok("Cross-compiling Deno binary for x86_64-linux-gnu...")
|
||||
_run([
|
||||
"deno", "compile",
|
||||
"--target", "x86_64-unknown-linux-gnu",
|
||||
"--allow-net", "--allow-read", "--allow-env",
|
||||
"--include", "ui/dist",
|
||||
"-o", "kratos-admin-x86_64",
|
||||
"main.ts",
|
||||
], cwd=str(kratos_admin_dir))
|
||||
|
||||
bin_path = kratos_admin_dir / "kratos-admin-x86_64"
|
||||
if not bin_path.exists():
|
||||
die("Deno cross-compilation produced no binary")
|
||||
|
||||
# Build minimal Docker image
|
||||
pkg_dir = Path("/tmp/kratos-admin-pkg")
|
||||
pkg_dir.mkdir(exist_ok=True)
|
||||
import shutil as _sh
|
||||
_sh.copy2(str(bin_path), str(pkg_dir / "kratos-admin"))
|
||||
# Copy ui/dist for serveStatic (binary has it embedded but keep external copy for fallback)
|
||||
(pkg_dir / "dockerfile").write_text(
|
||||
"FROM gcr.io/distroless/cc-debian12:nonroot\n"
|
||||
"WORKDIR /app\n"
|
||||
"COPY kratos-admin ./\n"
|
||||
"EXPOSE 3000\n"
|
||||
'ENTRYPOINT ["/app/kratos-admin"]\n'
|
||||
)
|
||||
|
||||
ok("Building Docker image...")
|
||||
_run([
|
||||
"docker", "buildx", "build",
|
||||
"--platform", "linux/amd64",
|
||||
"--provenance=false",
|
||||
"--load",
|
||||
"-f", str(pkg_dir / "dockerfile"),
|
||||
"-t", image,
|
||||
str(pkg_dir),
|
||||
])
|
||||
|
||||
if push:
|
||||
_seed_image_production(image, ssh_host, admin_pass)
|
||||
|
||||
if deploy:
|
||||
from sunbeam.manifests import cmd_apply
|
||||
cmd_apply(env="production", domain=domain)
|
||||
|
||||
else:
|
||||
ip = get_lima_ip()
|
||||
domain = f"{ip}.sslip.io"
|
||||
registry = f"src.{domain}"
|
||||
image = f"{registry}/studio/kratos-admin-ui:latest"
|
||||
|
||||
if not shutil.which("docker"):
|
||||
die("docker not found -- is the Lima docker VM running?")
|
||||
|
||||
step(f"Building kratos-admin-ui -> {image} ...")
|
||||
|
||||
_trust_registry_in_docker_vm(registry)
|
||||
|
||||
ok("Logging in to Gitea registry...")
|
||||
r = subprocess.run(
|
||||
["limactl", "shell", LIMA_DOCKER_VM, "--",
|
||||
"docker", "login", registry,
|
||||
"--username", GITEA_ADMIN_USER, "--password-stdin"],
|
||||
input=admin_pass, text=True, capture_output=True,
|
||||
)
|
||||
if r.returncode != 0:
|
||||
die(f"docker login failed:\n{r.stderr.strip()}")
|
||||
|
||||
ok("Building image (linux/arm64)...")
|
||||
_run(["docker", "buildx", "build",
|
||||
"--platform", "linux/arm64",
|
||||
"--provenance=false",
|
||||
"--load",
|
||||
"-t", image,
|
||||
str(kratos_admin_dir)])
|
||||
|
||||
if push:
|
||||
_seed_and_push(image, admin_pass)
|
||||
|
||||
if deploy:
|
||||
from sunbeam.manifests import cmd_apply
|
||||
cmd_apply()
|
||||
|
||||
if deploy:
|
||||
ok("Rolling kratos-admin-ui deployment...")
|
||||
kube("rollout", "restart", "deployment/kratos-admin-ui", "-n", "ory")
|
||||
kube("rollout", "status", "deployment/kratos-admin-ui", "-n", "ory",
|
||||
"--timeout=120s")
|
||||
ok("kratos-admin-ui redeployed.")
|
||||
|
||||
|
||||
def _build_meet(push: bool = False, deploy: bool = False):
|
||||
"""Build meet-backend and meet-frontend images from source."""
|
||||
from sunbeam import kube as _kube
|
||||
is_prod = bool(_kube._ssh_host)
|
||||
|
||||
if is_prod:
|
||||
domain = os.environ.get("SUNBEAM_DOMAIN", "sunbeam.pt")
|
||||
else:
|
||||
ip = get_lima_ip()
|
||||
domain = f"{ip}.sslip.io"
|
||||
|
||||
@@ -476,62 +729,162 @@ def _build_kratos_admin():
|
||||
if not shutil.which("docker"):
|
||||
die("docker not found -- is the Lima docker VM running?")
|
||||
|
||||
# kratos-admin source
|
||||
kratos_admin_dir = Path(__file__).resolve().parents[2] / "kratos-admin"
|
||||
if not kratos_admin_dir.is_dir():
|
||||
die(f"kratos-admin source not found at {kratos_admin_dir}")
|
||||
meet_dir = Path(__file__).resolve().parents[2] / "meet"
|
||||
if not meet_dir.is_dir():
|
||||
die(f"meet source not found at {meet_dir}")
|
||||
|
||||
registry = f"src.{domain}"
|
||||
image = f"{registry}/studio/kratos-admin-ui:latest"
|
||||
|
||||
step(f"Building kratos-admin-ui -> {image} ...")
|
||||
backend_image = f"{registry}/studio/meet-backend:latest"
|
||||
frontend_image = f"{registry}/studio/meet-frontend:latest"
|
||||
platform = "linux/amd64" if is_prod else "linux/arm64"
|
||||
|
||||
if not is_prod:
|
||||
_trust_registry_in_docker_vm(registry)
|
||||
|
||||
ok("Logging in to Gitea registry...")
|
||||
r = subprocess.run(
|
||||
["docker", "login", registry,
|
||||
["limactl", "shell", LIMA_DOCKER_VM, "--",
|
||||
"docker", "login", registry,
|
||||
"--username", GITEA_ADMIN_USER, "--password-stdin"],
|
||||
input=admin_pass, text=True, capture_output=True,
|
||||
)
|
||||
if r.returncode != 0:
|
||||
die(f"docker login failed:\n{r.stderr.strip()}")
|
||||
|
||||
ok("Building image (linux/arm64, push)...")
|
||||
step(f"Building meet-backend -> {backend_image} ...")
|
||||
ok(f"Building image ({platform}, backend-production target)...")
|
||||
_run(["docker", "buildx", "build",
|
||||
"--platform", platform,
|
||||
"--provenance=false",
|
||||
"--target", "backend-production",
|
||||
"--load",
|
||||
"-t", backend_image,
|
||||
str(meet_dir)])
|
||||
|
||||
if push:
|
||||
if is_prod:
|
||||
_seed_image_production(backend_image, _kube._ssh_host, admin_pass)
|
||||
else:
|
||||
_seed_and_push(backend_image, admin_pass)
|
||||
|
||||
step(f"Building meet-frontend -> {frontend_image} ...")
|
||||
frontend_dockerfile = meet_dir / "src" / "frontend" / "Dockerfile"
|
||||
if not frontend_dockerfile.exists():
|
||||
die(f"meet frontend Dockerfile not found at {frontend_dockerfile}")
|
||||
|
||||
ok(f"Building image ({platform}, frontend-production target)...")
|
||||
_run(["docker", "buildx", "build",
|
||||
"--platform", platform,
|
||||
"--provenance=false",
|
||||
"--target", "frontend-production",
|
||||
"--build-arg", "VITE_API_BASE_URL=",
|
||||
"--load",
|
||||
"-f", str(frontend_dockerfile),
|
||||
"-t", frontend_image,
|
||||
str(meet_dir)])
|
||||
|
||||
if push:
|
||||
if is_prod:
|
||||
_seed_image_production(frontend_image, _kube._ssh_host, admin_pass)
|
||||
else:
|
||||
_seed_and_push(frontend_image, admin_pass)
|
||||
|
||||
if deploy:
|
||||
from sunbeam.manifests import cmd_apply
|
||||
cmd_apply(env="production" if is_prod else "local", domain=domain)
|
||||
for deployment in ("meet-backend", "meet-celery-worker", "meet-frontend"):
|
||||
ok(f"Rolling {deployment} deployment...")
|
||||
kube("rollout", "restart", f"deployment/{deployment}", "-n", "lasuite")
|
||||
for deployment in ("meet-backend", "meet-celery-worker", "meet-frontend"):
|
||||
kube("rollout", "status", f"deployment/{deployment}", "-n", "lasuite",
|
||||
"--timeout=180s")
|
||||
ok("Meet redeployed.")
|
||||
|
||||
|
||||
def _build_la_suite_frontend(
|
||||
app: str,
|
||||
repo_dir: Path,
|
||||
workspace_rel: str,
|
||||
app_rel: str,
|
||||
dockerfile_rel: str,
|
||||
image_name: str,
|
||||
deployment: str,
|
||||
namespace: str,
|
||||
push: bool = False,
|
||||
deploy: bool = False,
|
||||
):
|
||||
"""Build a La Suite frontend image from source and push to the Gitea registry.
|
||||
|
||||
Steps:
|
||||
1. yarn install in the workspace root — updates yarn.lock for new packages.
|
||||
2. yarn build-theme in the app dir — regenerates cunningham token CSS/TS.
|
||||
3. docker buildx build --target frontend-production → push.
|
||||
4. Pre-seed into k3s containerd.
|
||||
5. sunbeam apply + rollout restart.
|
||||
"""
|
||||
if not shutil.which("yarn"):
|
||||
die("yarn not found on PATH — install Node.js + yarn first (nvm use 22).")
|
||||
if not shutil.which("docker"):
|
||||
die("docker not found — is the Lima docker VM running?")
|
||||
|
||||
ip = get_lima_ip()
|
||||
domain = f"{ip}.sslip.io"
|
||||
|
||||
b64 = kube_out("-n", "devtools", "get", "secret",
|
||||
"gitea-admin-credentials", "-o=jsonpath={.data.password}")
|
||||
if not b64:
|
||||
die("gitea-admin-credentials secret not found — run seed first.")
|
||||
admin_pass = base64.b64decode(b64).decode()
|
||||
|
||||
workspace_dir = repo_dir / workspace_rel
|
||||
app_dir = repo_dir / app_rel
|
||||
dockerfile = repo_dir / dockerfile_rel
|
||||
|
||||
if not repo_dir.is_dir():
|
||||
die(f"{app} source not found at {repo_dir}")
|
||||
if not dockerfile.exists():
|
||||
die(f"Dockerfile not found at {dockerfile}")
|
||||
|
||||
registry = f"src.{domain}"
|
||||
image = f"{registry}/studio/{image_name}:latest"
|
||||
|
||||
step(f"Building {app} -> {image} ...")
|
||||
|
||||
ok("Updating yarn.lock (yarn install in workspace)...")
|
||||
_run(["yarn", "install"], cwd=str(workspace_dir))
|
||||
|
||||
ok("Regenerating cunningham design tokens (yarn build-theme)...")
|
||||
_run(["yarn", "build-theme"], cwd=str(app_dir))
|
||||
|
||||
if push:
|
||||
_trust_registry_in_docker_vm(registry)
|
||||
ok("Logging in to Gitea registry...")
|
||||
r = subprocess.run(
|
||||
["limactl", "shell", LIMA_DOCKER_VM, "--",
|
||||
"docker", "login", registry,
|
||||
"--username", GITEA_ADMIN_USER, "--password-stdin"],
|
||||
input=admin_pass, text=True, capture_output=True,
|
||||
)
|
||||
if r.returncode != 0:
|
||||
die(f"docker login failed:\n{r.stderr.strip()}")
|
||||
|
||||
ok("Building image (linux/arm64, frontend-production target)...")
|
||||
_run(["docker", "buildx", "build",
|
||||
"--platform", "linux/arm64",
|
||||
"--push",
|
||||
"--provenance=false",
|
||||
"--target", "frontend-production",
|
||||
"--load",
|
||||
"-f", str(dockerfile),
|
||||
"-t", image,
|
||||
str(kratos_admin_dir)])
|
||||
str(repo_dir)])
|
||||
|
||||
ok(f"Pushed {image}")
|
||||
|
||||
# Pre-seed into k3s containerd (same pattern as proxy)
|
||||
nodes = kube_out("get", "nodes", "-o=jsonpath={.items[*].metadata.name}").split()
|
||||
if len(nodes) == 1:
|
||||
ok("Single-node cluster: pre-seeding image into k3s containerd...")
|
||||
save = subprocess.Popen(
|
||||
["docker", "save", image],
|
||||
stdout=subprocess.PIPE,
|
||||
)
|
||||
ctr = subprocess.run(
|
||||
["limactl", "shell", LIMA_VM, "--",
|
||||
"sudo", "ctr", "-n", "k8s.io", "images", "import", "-"],
|
||||
stdin=save.stdout,
|
||||
capture_output=True,
|
||||
)
|
||||
save.stdout.close()
|
||||
save.wait()
|
||||
if ctr.returncode != 0:
|
||||
warn(f"containerd import failed:\n{ctr.stderr.decode().strip()}")
|
||||
else:
|
||||
ok("Image pre-seeded.")
|
||||
if push:
|
||||
_seed_and_push(image, admin_pass)
|
||||
|
||||
if deploy:
|
||||
from sunbeam.manifests import cmd_apply
|
||||
cmd_apply()
|
||||
|
||||
ok("Rolling kratos-admin-ui deployment...")
|
||||
kube("rollout", "restart", "deployment/kratos-admin-ui", "-n", "ory")
|
||||
kube("rollout", "status", "deployment/kratos-admin-ui", "-n", "ory",
|
||||
"--timeout=120s")
|
||||
ok("kratos-admin-ui redeployed.")
|
||||
ok(f"Rolling {deployment} deployment...")
|
||||
kube("rollout", "restart", f"deployment/{deployment}", "-n", namespace)
|
||||
kube("rollout", "status", f"deployment/{deployment}", "-n", namespace,
|
||||
"--timeout=180s")
|
||||
ok(f"{deployment} redeployed.")
|
||||
|
||||
106
sunbeam/kube.py
106
sunbeam/kube.py
@@ -1,9 +1,56 @@
|
||||
"""Kubernetes interface — kubectl/kustomize wrappers, domain substitution, target parsing."""
|
||||
import subprocess
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
|
||||
from sunbeam.tools import run_tool, CACHE_DIR
|
||||
from sunbeam.output import die
|
||||
from sunbeam.output import die, ok
|
||||
|
||||
# Active kubectl context. Set once at startup via set_context().
|
||||
# Defaults to "sunbeam" (Lima VM) for local dev.
|
||||
_context: str = "sunbeam"
|
||||
|
||||
# SSH host for production tunnel. Set alongside context for production env.
|
||||
_ssh_host: str = ""
|
||||
_tunnel_proc: subprocess.Popen | None = None
|
||||
|
||||
|
||||
def set_context(ctx: str, ssh_host: str = "") -> None:
|
||||
global _context, _ssh_host
|
||||
_context = ctx
|
||||
_ssh_host = ssh_host
|
||||
|
||||
|
||||
def context_arg() -> str:
|
||||
"""Return '--context=<active>' for use in subprocess command lists."""
|
||||
return f"--context={_context}"
|
||||
|
||||
|
||||
def ensure_tunnel() -> None:
|
||||
"""Open SSH tunnel to localhost:16443 → remote:6443 for production if needed."""
|
||||
global _tunnel_proc
|
||||
if not _ssh_host:
|
||||
return
|
||||
import socket
|
||||
try:
|
||||
with socket.create_connection(("127.0.0.1", 16443), timeout=0.5):
|
||||
return # already open
|
||||
except (ConnectionRefusedError, TimeoutError, OSError):
|
||||
pass
|
||||
ok(f"Opening SSH tunnel to {_ssh_host}...")
|
||||
_tunnel_proc = subprocess.Popen(
|
||||
["ssh", "-p", "2222", "-L", "16443:127.0.0.1:6443", "-N", "-o", "ExitOnForwardFailure=yes",
|
||||
"-o", "StrictHostKeyChecking=no", _ssh_host],
|
||||
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL,
|
||||
)
|
||||
for _ in range(10):
|
||||
try:
|
||||
with socket.create_connection(("127.0.0.1", 16443), timeout=0.5):
|
||||
return
|
||||
except (ConnectionRefusedError, TimeoutError, OSError):
|
||||
time.sleep(0.5)
|
||||
die(f"SSH tunnel to {_ssh_host} did not open in time")
|
||||
|
||||
|
||||
def parse_target(s: str | None) -> tuple[str | None, str | None]:
|
||||
@@ -42,23 +89,26 @@ def get_lima_ip() -> str:
|
||||
|
||||
|
||||
def kube(*args, input=None, check=True) -> subprocess.CompletedProcess:
|
||||
"""Run kubectl with --context=sunbeam."""
|
||||
"""Run kubectl against the active context, opening SSH tunnel if needed."""
|
||||
ensure_tunnel()
|
||||
text = not isinstance(input, bytes)
|
||||
return run_tool("kubectl", "--context=sunbeam", *args,
|
||||
return run_tool("kubectl", context_arg(), *args,
|
||||
input=input, text=text, check=check,
|
||||
capture_output=False)
|
||||
|
||||
|
||||
def kube_out(*args) -> str:
|
||||
"""Run kubectl and return stdout (empty string on failure)."""
|
||||
r = run_tool("kubectl", "--context=sunbeam", *args,
|
||||
ensure_tunnel()
|
||||
r = run_tool("kubectl", context_arg(), *args,
|
||||
capture_output=True, text=True, check=False)
|
||||
return r.stdout.strip() if r.returncode == 0 else ""
|
||||
|
||||
|
||||
def kube_ok(*args) -> bool:
|
||||
"""Return True if kubectl command exits 0."""
|
||||
r = run_tool("kubectl", "--context=sunbeam", *args,
|
||||
ensure_tunnel()
|
||||
r = run_tool("kubectl", context_arg(), *args,
|
||||
capture_output=True, check=False)
|
||||
return r.returncode == 0
|
||||
|
||||
@@ -95,7 +145,7 @@ def create_secret(ns: str, name: str, **literals) -> None:
|
||||
|
||||
def kube_exec(ns: str, pod: str, *cmd: str, container: str | None = None) -> tuple[int, str]:
|
||||
"""Run a command inside a pod. Returns (returncode, stdout)."""
|
||||
args = ["kubectl", "--context=sunbeam", "exec", "-n", ns, pod]
|
||||
args = ["kubectl", context_arg(), "exec", "-n", ns, pod]
|
||||
if container:
|
||||
args += ["-c", container]
|
||||
args += ["--", *cmd]
|
||||
@@ -106,22 +156,42 @@ def kube_exec(ns: str, pod: str, *cmd: str, container: str | None = None) -> tup
|
||||
def get_domain() -> str:
|
||||
"""Discover the active domain from cluster state.
|
||||
|
||||
Reads a known substituted configmap value; falls back to the Lima VM IP.
|
||||
Tries multiple reliable anchors; falls back to the Lima VM IP for local dev.
|
||||
"""
|
||||
raw = kube_out("get", "configmap", "lasuite-oidc-provider", "-n", "lasuite",
|
||||
"-o=jsonpath={.data.OIDC_OP_JWKS_ENDPOINT}")
|
||||
if raw and "https://auth." in raw:
|
||||
# e.g. "https://auth.192.168.105.2.sslip.io/.well-known/jwks.json"
|
||||
return raw.split("https://auth.")[1].split("/")[0]
|
||||
import base64
|
||||
|
||||
# 1. Gitea inline-config secret: server section contains DOMAIN=src.<domain>
|
||||
# Works in both local and production because DOMAIN_SUFFIX is substituted
|
||||
# into gitea-values.yaml at apply time.
|
||||
raw = kube_out("get", "secret", "gitea-inline-config", "-n", "devtools",
|
||||
"-o=jsonpath={.data.server}", "--ignore-not-found")
|
||||
if raw:
|
||||
try:
|
||||
server_ini = base64.b64decode(raw).decode()
|
||||
for line in server_ini.splitlines():
|
||||
if line.startswith("DOMAIN=src."):
|
||||
# e.g. "DOMAIN=src.sunbeam.pt"
|
||||
return line.split("DOMAIN=src.", 1)[1].strip()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 2. Fallback: lasuite-oidc-provider configmap (works if La Suite is deployed)
|
||||
raw2 = kube_out("get", "configmap", "lasuite-oidc-provider", "-n", "lasuite",
|
||||
"-o=jsonpath={.data.OIDC_OP_JWKS_ENDPOINT}", "--ignore-not-found")
|
||||
if raw2 and "https://auth." in raw2:
|
||||
return raw2.split("https://auth.")[1].split("/")[0]
|
||||
|
||||
# 3. Local dev fallback
|
||||
ip = get_lima_ip()
|
||||
return f"{ip}.sslip.io"
|
||||
|
||||
|
||||
def cmd_k8s(kubectl_args: list[str]) -> int:
|
||||
"""Transparent kubectl --context=sunbeam passthrough. Returns kubectl's exit code."""
|
||||
"""Transparent kubectl passthrough for the active context."""
|
||||
ensure_tunnel()
|
||||
from sunbeam.tools import ensure_tool
|
||||
bin_path = ensure_tool("kubectl")
|
||||
r = subprocess.run([str(bin_path), "--context=sunbeam", *kubectl_args])
|
||||
r = subprocess.run([str(bin_path), context_arg(), *kubectl_args])
|
||||
return r.returncode
|
||||
|
||||
|
||||
@@ -149,19 +219,21 @@ def cmd_bao(bao_args: list[str]) -> int:
|
||||
|
||||
cmd_str = "VAULT_TOKEN=" + root_token + " bao " + " ".join(bao_args)
|
||||
r = subprocess.run(
|
||||
["kubectl", "--context=sunbeam", "-n", "data", "exec", ob_pod,
|
||||
["kubectl", context_arg(), "-n", "data", "exec", ob_pod,
|
||||
"-c", "openbao", "--", "sh", "-c", cmd_str]
|
||||
)
|
||||
return r.returncode
|
||||
|
||||
|
||||
def kustomize_build(overlay: Path, domain: str) -> str:
|
||||
"""Run kustomize build --enable-helm and apply domain substitution."""
|
||||
def kustomize_build(overlay: Path, domain: str, email: str = "") -> str:
|
||||
"""Run kustomize build --enable-helm and apply domain/email substitution."""
|
||||
r = run_tool(
|
||||
"kustomize", "build", "--enable-helm", str(overlay),
|
||||
capture_output=True, text=True, check=True,
|
||||
)
|
||||
text = r.stdout
|
||||
text = domain_replace(text, domain)
|
||||
if email:
|
||||
text = text.replace("ACME_EMAIL", email)
|
||||
text = text.replace("\n annotations: null", "")
|
||||
return text
|
||||
|
||||
@@ -9,7 +9,7 @@ import urllib.request
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
|
||||
from sunbeam.kube import kube, kube_out, kube_ok, kube_apply, ensure_ns, create_secret, get_domain
|
||||
from sunbeam.kube import kube, kube_out, kube_ok, kube_apply, ensure_ns, create_secret, get_domain, context_arg
|
||||
from sunbeam.output import step, ok, warn, die
|
||||
|
||||
ADMIN_USERNAME = "estudio-admin"
|
||||
@@ -21,7 +21,6 @@ PG_USERS = [
|
||||
"docs", "meet", "drive", "messages", "conversations",
|
||||
"people", "find",
|
||||
]
|
||||
K8S_CTX = ["--context=sunbeam"]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -49,7 +48,7 @@ def _seed_openbao() -> dict:
|
||||
|
||||
def bao(cmd):
|
||||
r = subprocess.run(
|
||||
["kubectl", *K8S_CTX, "-n", "data", "exec", ob_pod, "-c", "openbao",
|
||||
["kubectl", context_arg(), "-n", "data", "exec", ob_pod, "-c", "openbao",
|
||||
"--", "sh", "-c", cmd],
|
||||
capture_output=True, text=True,
|
||||
)
|
||||
@@ -174,6 +173,27 @@ def _seed_openbao() -> dict:
|
||||
**{"django-secret-key": rand,
|
||||
"collaboration-secret": rand})
|
||||
|
||||
meet = get_or_create("meet",
|
||||
**{"django-secret-key": rand,
|
||||
"application-jwt-secret-key": rand})
|
||||
|
||||
# Scaleway S3 credentials for CNPG barman backups.
|
||||
# Read from `scw config` at seed time; falls back to empty string (operator must fill in).
|
||||
def _scw_config(key):
|
||||
try:
|
||||
r = subprocess.run(["scw", "config", "get", key],
|
||||
capture_output=True, text=True, timeout=5)
|
||||
return r.stdout.strip() if r.returncode == 0 else ""
|
||||
except (FileNotFoundError, subprocess.TimeoutExpired):
|
||||
return ""
|
||||
|
||||
grafana = get_or_create("grafana",
|
||||
**{"admin-password": rand})
|
||||
|
||||
scaleway_s3 = get_or_create("scaleway-s3",
|
||||
**{"access-key-id": lambda: _scw_config("access-key"),
|
||||
"secret-access-key": lambda: _scw_config("secret-key")})
|
||||
|
||||
# Write all secrets to KV (idempotent -- puts same values back)
|
||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' sh -c '"
|
||||
f"bao kv put secret/hydra system-secret=\"{hydra['system-secret']}\" cookie-secret=\"{hydra['cookie-secret']}\" pairwise-salt=\"{hydra['pairwise-salt']}\" && "
|
||||
@@ -185,7 +205,10 @@ def _seed_openbao() -> dict:
|
||||
f"bao kv put secret/people django-secret-key=\"{people['django-secret-key']}\" && "
|
||||
f"bao kv put secret/login-ui cookie-secret=\"{login_ui['cookie-secret']}\" csrf-cookie-secret=\"{login_ui['csrf-cookie-secret']}\" && "
|
||||
f"bao kv put secret/kratos-admin cookie-secret=\"{kratos_admin['cookie-secret']}\" csrf-cookie-secret=\"{kratos_admin['csrf-cookie-secret']}\" admin-identity-ids=\"{kratos_admin['admin-identity-ids']}\" && "
|
||||
f"bao kv put secret/docs django-secret-key=\"{docs['django-secret-key']}\" collaboration-secret=\"{docs['collaboration-secret']}\""
|
||||
f"bao kv put secret/docs django-secret-key=\"{docs['django-secret-key']}\" collaboration-secret=\"{docs['collaboration-secret']}\" && "
|
||||
f"bao kv put secret/meet django-secret-key=\"{meet['django-secret-key']}\" application-jwt-secret-key=\"{meet['application-jwt-secret-key']}\" && "
|
||||
f"bao kv put secret/grafana admin-password=\"{grafana['admin-password']}\" && "
|
||||
f"bao kv put secret/scaleway-s3 access-key-id=\"{scaleway_s3['access-key-id']}\" secret-access-key=\"{scaleway_s3['secret-access-key']}\""
|
||||
f"'")
|
||||
|
||||
# Configure Kubernetes auth method so VSO can authenticate with OpenBao
|
||||
@@ -208,7 +231,7 @@ def _seed_openbao() -> dict:
|
||||
bao(f"BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' "
|
||||
f"bao write auth/kubernetes/role/vso "
|
||||
f"bound_service_account_names=default "
|
||||
f"bound_service_account_namespaces=ory,devtools,storage,lasuite,media "
|
||||
f"bound_service_account_namespaces=ory,devtools,storage,lasuite,media,data,monitoring "
|
||||
f"policies=vso-reader "
|
||||
f"ttl=1h")
|
||||
|
||||
@@ -253,7 +276,7 @@ def _configure_db_engine(ob_pod, root_token, pg_user, pg_pass):
|
||||
|
||||
def bao(cmd, check=True):
|
||||
r = subprocess.run(
|
||||
["kubectl", *K8S_CTX, "-n", "data", "exec", ob_pod, "-c", "openbao",
|
||||
["kubectl", context_arg(), "-n", "data", "exec", ob_pod, "-c", "openbao",
|
||||
"--", "sh", "-c", cmd],
|
||||
capture_output=True, text=True,
|
||||
)
|
||||
@@ -276,7 +299,7 @@ def _configure_db_engine(ob_pod, root_token, pg_user, pg_pass):
|
||||
|
||||
def psql(sql):
|
||||
r = subprocess.run(
|
||||
["kubectl", *K8S_CTX, "-n", "data", "exec", cnpg_pod, "-c", "postgres",
|
||||
["kubectl", context_arg(), "-n", "data", "exec", cnpg_pod, "-c", "postgres",
|
||||
"--", "psql", "-U", "postgres", "-c", sql],
|
||||
capture_output=True, text=True,
|
||||
)
|
||||
@@ -351,7 +374,7 @@ def _configure_db_engine(ob_pod, root_token, pg_user, pg_pass):
|
||||
def _kratos_admin_pf(local_port=14434):
|
||||
"""Port-forward directly to the Kratos admin API."""
|
||||
proc = subprocess.Popen(
|
||||
["kubectl", *K8S_CTX, "-n", "ory", "port-forward",
|
||||
["kubectl", context_arg(), "-n", "ory", "port-forward",
|
||||
"svc/kratos-admin", f"{local_port}:80"],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
)
|
||||
@@ -424,7 +447,7 @@ def _seed_kratos_admin_identity(ob_pod: str, root_token: str) -> tuple[str, str]
|
||||
|
||||
def _bao(cmd):
|
||||
return subprocess.run(
|
||||
["kubectl", *K8S_CTX, "-n", "data", "exec", ob_pod, "-c", "openbao",
|
||||
["kubectl", context_arg(), "-n", "data", "exec", ob_pod, "-c", "openbao",
|
||||
"--", "sh", "-c", cmd],
|
||||
capture_output=True, text=True,
|
||||
)
|
||||
@@ -498,9 +521,11 @@ def cmd_seed() -> dict:
|
||||
f"CREATE DATABASE {db} OWNER {user};", check=False)
|
||||
|
||||
# Read CNPG superuser credentials and configure database secrets engine.
|
||||
pg_user_b64 = kube_out("-n", "data", "get", "secret", "postgres-superuser",
|
||||
# CNPG creates secret named "{cluster}-app" (not "{cluster}-superuser")
|
||||
# when owner is specified without an explicit secret field.
|
||||
pg_user_b64 = kube_out("-n", "data", "get", "secret", "postgres-app",
|
||||
"-o=jsonpath={.data.username}")
|
||||
pg_pass_b64 = kube_out("-n", "data", "get", "secret", "postgres-superuser",
|
||||
pg_pass_b64 = kube_out("-n", "data", "get", "secret", "postgres-app",
|
||||
"-o=jsonpath={.data.password}")
|
||||
pg_user = base64.b64decode(pg_user_b64).decode() if pg_user_b64 else "postgres"
|
||||
pg_pass = base64.b64decode(pg_pass_b64).decode() if pg_pass_b64 else ""
|
||||
@@ -555,6 +580,7 @@ def cmd_seed() -> dict:
|
||||
DJANGO_SECRET_KEY=django_secret)
|
||||
|
||||
ensure_ns("media")
|
||||
ensure_ns("monitoring")
|
||||
|
||||
# Ensure the Kratos admin identity exists and ADMIN_IDENTITY_IDS is set.
|
||||
# This runs after all other secrets are in place (Kratos must be up).
|
||||
@@ -606,7 +632,7 @@ def cmd_verify():
|
||||
|
||||
def bao(cmd, *, check=True):
|
||||
r = subprocess.run(
|
||||
["kubectl", *K8S_CTX, "-n", "data", "exec", ob_pod, "-c", "openbao",
|
||||
["kubectl", context_arg(), "-n", "data", "exec", ob_pod, "-c", "openbao",
|
||||
"--", "sh", "-c", cmd],
|
||||
capture_output=True, text=True,
|
||||
)
|
||||
|
||||
@@ -3,6 +3,7 @@ import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import sunbeam.kube as _kube_mod
|
||||
from sunbeam.kube import kube, kube_out, parse_target
|
||||
from sunbeam.tools import ensure_tool
|
||||
from sunbeam.output import step, ok, warn, die
|
||||
@@ -24,7 +25,10 @@ SERVICES_TO_RESTART = [
|
||||
("media", "livekit-server"),
|
||||
]
|
||||
|
||||
K8S_CTX = ["--context=sunbeam"]
|
||||
|
||||
def _k8s_ctx():
|
||||
"""Return the kubectl --context flag matching the active environment."""
|
||||
return [_kube_mod.context_arg()]
|
||||
|
||||
|
||||
def _capture_out(cmd, *, default=""):
|
||||
@@ -43,7 +47,7 @@ def _vso_sync_status():
|
||||
|
||||
# VaultStaticSecrets: synced when secretMAC is populated
|
||||
vss_raw = _capture_out([
|
||||
"kubectl", *K8S_CTX, "get", "vaultstaticsecret", "-A", "--no-headers",
|
||||
"kubectl", *_k8s_ctx(), "get", "vaultstaticsecret", "-A", "--no-headers",
|
||||
"-o=custom-columns="
|
||||
"NS:.metadata.namespace,NAME:.metadata.name,MAC:.status.secretMAC",
|
||||
])
|
||||
@@ -65,7 +69,7 @@ def _vso_sync_status():
|
||||
|
||||
# VaultDynamicSecrets: synced when lastRenewalTime is non-zero
|
||||
vds_raw = _capture_out([
|
||||
"kubectl", *K8S_CTX, "get", "vaultdynamicsecret", "-A", "--no-headers",
|
||||
"kubectl", *_k8s_ctx(), "get", "vaultdynamicsecret", "-A", "--no-headers",
|
||||
"-o=custom-columns="
|
||||
"NS:.metadata.namespace,NAME:.metadata.name,RENEWED:.status.lastRenewalTime",
|
||||
])
|
||||
@@ -101,7 +105,7 @@ def cmd_status(target: str | None):
|
||||
if target is None:
|
||||
# All pods across managed namespaces
|
||||
raw = _capture_out([
|
||||
"kubectl", *K8S_CTX,
|
||||
"kubectl", *_k8s_ctx(),
|
||||
"get", "pods",
|
||||
"--field-selector=metadata.namespace!= kube-system",
|
||||
"-A", "--no-headers",
|
||||
@@ -120,7 +124,7 @@ def cmd_status(target: str | None):
|
||||
if name:
|
||||
# Specific service: namespace/service
|
||||
raw = _capture_out([
|
||||
"kubectl", *K8S_CTX,
|
||||
"kubectl", *_k8s_ctx(),
|
||||
"get", "pods", "-n", ns, "-l", f"app={name}", "--no-headers",
|
||||
])
|
||||
pods = []
|
||||
@@ -133,7 +137,7 @@ def cmd_status(target: str | None):
|
||||
else:
|
||||
# Namespace only
|
||||
raw = _capture_out([
|
||||
"kubectl", *K8S_CTX,
|
||||
"kubectl", *_k8s_ctx(),
|
||||
"get", "pods", "-n", ns, "--no-headers",
|
||||
])
|
||||
pods = []
|
||||
|
||||
@@ -7,16 +7,15 @@ import urllib.request
|
||||
import urllib.error
|
||||
from contextlib import contextmanager
|
||||
|
||||
import sunbeam.kube as _kube_mod
|
||||
from sunbeam.output import step, ok, warn, die, table
|
||||
|
||||
K8S_CTX = ["--context=sunbeam"]
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _port_forward(ns="ory", svc="kratos-admin", local_port=4434, remote_port=80):
|
||||
"""Port-forward directly to the Kratos admin HTTP API and yield the local URL."""
|
||||
proc = subprocess.Popen(
|
||||
["kubectl", *K8S_CTX, "-n", ns, "port-forward",
|
||||
["kubectl", _kube_mod.context_arg(), "-n", ns, "port-forward",
|
||||
f"svc/{svc}", f"{local_port}:{remote_port}"],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
||||
)
|
||||
@@ -166,6 +165,27 @@ def cmd_user_disable(target):
|
||||
warn("App sessions (docs/people) expire within SESSION_COOKIE_AGE — currently 1h.")
|
||||
|
||||
|
||||
def cmd_user_set_password(target, password):
|
||||
"""Set (or reset) the password credential for an identity."""
|
||||
step(f"Setting password for: {target}")
|
||||
with _port_forward() as base:
|
||||
identity = _find_identity(base, target)
|
||||
iid = identity["id"]
|
||||
_api(base, f"/identities/{iid}", method="PUT", body={
|
||||
"schema_id": identity["schema_id"],
|
||||
"traits": identity["traits"],
|
||||
"state": identity.get("state", "active"),
|
||||
"metadata_public": identity.get("metadata_public"),
|
||||
"metadata_admin": identity.get("metadata_admin"),
|
||||
"credentials": {
|
||||
"password": {
|
||||
"config": {"password": password},
|
||||
},
|
||||
},
|
||||
})
|
||||
ok(f"Password set for {iid[:8]}...")
|
||||
|
||||
|
||||
def cmd_user_enable(target):
|
||||
"""Re-enable a previously disabled identity."""
|
||||
step(f"Enabling identity: {target}")
|
||||
|
||||
Reference in New Issue
Block a user