scripts: replace local-up.sh with idempotent Python lifecycle script

local-up.py is a stdlib-only Python rewrite of local-up.sh +
local-seed-secrets.sh. Key improvements:

- Correctly parses limactl list --json NDJSON output (json.load()
  choked on NDJSON, causing spurious VM creation attempts)
- Handles all Lima VM states: none, Running, Stopped, Broken, etc.
- Inlines seed secrets (no separate local-seed-secrets.sh subprocess)
- Partial runs: --seed, --apply, --restart flags
- Consistent idempotency: every step checks state before acting
- Adds people-backend/celery to restart list; find to PG users list

local-up.sh patched: yq in prereqs, NDJSON-safe VM detection,
--server-side for Linkerd apply, people in restart list, Mail URL.
This commit is contained in:
2026-03-01 18:22:54 +00:00
parent 5e36322a3b
commit 5c119e2b26
3 changed files with 576 additions and 11 deletions

View File

@@ -58,7 +58,7 @@ done
echo "==> Setting postgres user passwords..."
PG_POD=$(kubectl $CTX -n data get pods -l cnpg.io/cluster=postgres,role=primary -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
if [[ -n "$PG_POD" ]]; then
for user in kratos hydra gitea hive docs meet drive messages conversations people; do
for user in kratos hydra gitea hive docs meet drive messages conversations people find; do
kubectl $CTX -n data exec "$PG_POD" -c postgres -- \
psql -U postgres -c "ALTER USER $user WITH PASSWORD '$DB_PASSWORD';" 2>/dev/null || true
done
@@ -113,6 +113,13 @@ create_secret lasuite hive-oidc \
--from-literal=client-id="hive-local" \
--from-literal=client-secret="hive-local-secret"
# People (desk)
create_secret lasuite people-db-credentials \
--from-literal=password="$DB_PASSWORD"
create_secret lasuite people-django-secret \
--from-literal=DJANGO_SECRET_KEY="local-dev-people-django-secret-key-not-for-production"
# Media namespace
ensure_ns media
@@ -173,6 +180,7 @@ else
bao kv put secret/gitea db-password='$DB_PASSWORD' s3-access-key='$S3_ACCESS_KEY' s3-secret-key='$S3_SECRET_KEY'
bao kv put secret/seaweedfs access-key='$S3_ACCESS_KEY' secret-key='$S3_SECRET_KEY'
bao kv put secret/hive db-url='postgresql://hive:${DB_PASSWORD}@postgres-rw.data.svc.cluster.local:5432/hive_db' oidc-client-id='hive-local' oidc-client-secret='hive-local-secret'
bao kv put secret/people db-password='$DB_PASSWORD' django-secret-key='local-dev-people-django-secret-key-not-for-production'
bao kv put secret/livekit api-key='$LIVEKIT_API_KEY' api-secret='$LIVEKIT_API_SECRET'
" 2>/dev/null
echo " Done."

551
scripts/local-up.py Executable file
View File

@@ -0,0 +1,551 @@
#!/usr/bin/env python3
"""
local-up.py — Sunbeam local dev stack lifecycle manager.
Idempotent: safe to run from any state (fresh Mac, existing VM, partial deploy).
Consolidates local-up.sh + local-seed-secrets.sh into one place.
Usage:
./scripts/local-up.py # full stack bring-up
./scripts/local-up.py --seed # re-seed secrets only (e.g. after adding a service)
./scripts/local-up.py --apply # re-apply manifests only (e.g. after a config change)
./scripts/local-up.py --restart # restart services only
Requires: limactl mkcert kubectl kustomize linkerd jq yq
"""
import argparse
import base64
import json
import os
import shutil
import subprocess
import sys
import time
from pathlib import Path
# ── Paths ─────────────────────────────────────────────────────────────────────
SCRIPT_DIR = Path(__file__).parent.resolve()
REPO_ROOT = SCRIPT_DIR.parent
SECRETS_DIR = REPO_ROOT / "secrets" / "local"
# ── Config ────────────────────────────────────────────────────────────────────
LIMA_VM = "sunbeam"
K8S_CTX = ["--context=sunbeam"]
# Deterministic local-dev credentials (not for production)
DB_PASSWORD = "localdev"
S3_ACCESS_KEY = "minioadmin"
S3_SECRET_KEY = "minioadmin"
HYDRA_SYSTEM_SECRET = "local-hydra-system-secret-at-least-16"
HYDRA_COOKIE_SECRET = "local-hydra-cookie-secret-at-least-16"
HYDRA_PAIRWISE_SALT = "local-hydra-pairwise-salt-value-1"
LIVEKIT_API_KEY = "devkey"
LIVEKIT_API_SECRET = "secret-placeholder"
PEOPLE_DJANGO_SECRET = "local-dev-people-django-secret-key-not-for-production"
REQUIRED_TOOLS = ["limactl", "mkcert", "kubectl", "kustomize", "linkerd", "jq", "yq"]
PG_USERS = [
"kratos", "hydra", "gitea", "hive",
"docs", "meet", "drive", "messages", "conversations",
"people", "find",
]
SERVICES_TO_RESTART = [
("ory", "hydra"),
("ory", "kratos"),
("ory", "login-ui"),
("devtools", "gitea"),
("storage", "seaweedfs-filer"),
("lasuite", "hive"),
("lasuite", "people-backend"),
("lasuite", "people-celery-worker"),
("lasuite", "people-celery-beat"),
("media", "livekit-server"),
]
# ── Output ────────────────────────────────────────────────────────────────────
def step(msg: str) -> None:
print(f"\n==> {msg}", flush=True)
def ok(msg: str) -> None:
print(f" {msg}", flush=True)
def warn(msg: str) -> None:
print(f" WARN: {msg}", file=sys.stderr, flush=True)
def die(msg: str) -> None:
print(f"\nERROR: {msg}", file=sys.stderr)
sys.exit(1)
# ── Subprocess helpers ────────────────────────────────────────────────────────
def run(cmd: list, *, check: bool = True, input: str | None = None,
capture: bool = False, cwd: Path | None = None) -> subprocess.CompletedProcess:
return subprocess.run(
cmd, check=check, text=True, input=input,
capture_output=capture, cwd=cwd,
)
def capture(cmd: list, *, default: str = "") -> str:
r = subprocess.run(cmd, capture_output=True, text=True)
return r.stdout.strip() if r.returncode == 0 else default
def succeeds(cmd: list) -> bool:
return subprocess.run(cmd, capture_output=True).returncode == 0
# ── kubectl wrappers ──────────────────────────────────────────────────────────
def kube(*args, input: str | None = None, check: bool = True) -> subprocess.CompletedProcess:
return run(["kubectl", *K8S_CTX, *args], input=input, check=check)
def kube_out(*args) -> str:
return capture(["kubectl", *K8S_CTX, *args])
def kube_ok(*args) -> bool:
return succeeds(["kubectl", *K8S_CTX, *args])
def kube_apply(manifest: str, *, server_side: bool = True) -> None:
args = ["apply", "-f", "-"]
if server_side:
args += ["--server-side", "--force-conflicts"]
kube(*args, input=manifest)
def ns_exists(ns: str) -> bool:
return kube_ok("get", "namespace", ns)
def ensure_ns(ns: str) -> None:
manifest = kube_out("create", "namespace", ns, "--dry-run=client", "-o=yaml")
if manifest:
kube_apply(manifest)
def create_secret(ns: str, name: str, **literals) -> None:
"""Create or update a generic secret idempotently."""
args = ["create", "secret", "generic", name, f"-n={ns}"]
for k, v in literals.items():
args.append(f"--from-literal={k}={v}")
args += ["--dry-run=client", "-o=yaml"]
manifest = kube_out(*args)
if manifest:
kube("apply", "--server-side", "-f", "-", input=manifest)
# ── 1. Prerequisites ──────────────────────────────────────────────────────────
def check_prerequisites() -> None:
step("Checking prerequisites...")
missing = [t for t in REQUIRED_TOOLS if not shutil.which(t)]
if missing:
die(f"missing tools: {', '.join(missing)}\nInstall: brew install {' '.join(missing)}")
ok("All tools present.")
# ── 2. Lima VM ────────────────────────────────────────────────────────────────
def ensure_lima_vm() -> None:
step("Lima VM...")
status = _lima_status()
if status == "none":
ok("Creating 'sunbeam' (k3s 6 CPU / 12 GB / 60 GB)...")
run(["limactl", "start",
"--name=sunbeam", "template:k3s",
"--memory=12", "--cpus=6", "--disk=60",
"--vm-type=vz", "--mount-type=virtiofs"])
elif status == "Running":
ok("Already running.")
else:
ok(f"Starting (current status: {status})...")
run(["limactl", "start", LIMA_VM])
def _lima_status() -> str:
"""Return the Lima VM status, handling both JSON-array and NDJSON output."""
raw = capture(["limactl", "list", "--json"])
if not raw:
return "none"
vms: list = []
# Try JSON array first
try:
parsed = json.loads(raw)
vms = parsed if isinstance(parsed, list) else [parsed]
except json.JSONDecodeError:
# Fall back to NDJSON (one object per line)
for line in raw.splitlines():
line = line.strip()
if not line:
continue
try:
vms.append(json.loads(line))
except json.JSONDecodeError:
continue
for vm in vms:
if vm.get("name") == LIMA_VM:
return vm.get("status", "unknown")
return "none"
# ── 3. Kubeconfig ─────────────────────────────────────────────────────────────
def merge_kubeconfig() -> None:
step("Merging kubeconfig...")
lima_kube = Path.home() / f".lima/{LIMA_VM}/copied-from-guest/kubeconfig.yaml"
if not lima_kube.exists():
die(f"Lima kubeconfig not found: {lima_kube}")
tmp = Path("/tmp/sunbeam-kube")
tmp.mkdir(exist_ok=True)
try:
for query, filename in [
(".clusters[0].cluster.certificate-authority-data", "ca.crt"),
(".users[0].user.client-certificate-data", "client.crt"),
(".users[0].user.client-key-data", "client.key"),
]:
b64 = capture(["yq", query, str(lima_kube)])
(tmp / filename).write_bytes(base64.b64decode(b64))
run(["kubectl", "config", "set-cluster", LIMA_VM,
"--server=https://127.0.0.1:6443",
f"--certificate-authority={tmp}/ca.crt", "--embed-certs=true"])
run(["kubectl", "config", "set-credentials", f"{LIMA_VM}-admin",
f"--client-certificate={tmp}/client.crt",
f"--client-key={tmp}/client.key", "--embed-certs=true"])
run(["kubectl", "config", "set-context", LIMA_VM,
f"--cluster={LIMA_VM}", f"--user={LIMA_VM}-admin"])
finally:
shutil.rmtree(tmp, ignore_errors=True)
ok("Context 'sunbeam' ready.")
# ── 4. Traefik ────────────────────────────────────────────────────────────────
def disable_traefik() -> None:
step("Traefik...")
if kube_ok("get", "helmchart", "traefik", "-n", "kube-system"):
ok("Removing (replaced by Pingora)...")
kube("delete", "helmchart", "traefik", "traefik-crd",
"-n", "kube-system", check=False)
subprocess.run(
["limactl", "shell", LIMA_VM,
"sudo", "rm", "-f",
"/var/lib/rancher/k3s/server/manifests/traefik.yaml"],
capture_output=True,
)
ok("Done.")
# ── 5. cert-manager ───────────────────────────────────────────────────────────
def ensure_cert_manager() -> None:
step("cert-manager...")
if ns_exists("cert-manager"):
ok("Already installed.")
return
ok("Installing...")
kube("apply", "-f",
"https://github.com/cert-manager/cert-manager/releases/download/v1.17.0/cert-manager.yaml")
for dep in ["cert-manager", "cert-manager-webhook", "cert-manager-cainjector"]:
kube("rollout", "status", f"deployment/{dep}",
"-n", "cert-manager", "--timeout=120s")
ok("Installed.")
# ── 6. Linkerd ────────────────────────────────────────────────────────────────
def ensure_linkerd() -> None:
step("Linkerd...")
if ns_exists("linkerd"):
ok("Already installed.")
return
ok("Installing Gateway API CRDs...")
kube("apply", "--server-side", "-f",
"https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/standard-install.yaml")
ok("Installing Linkerd CRDs...")
crds = capture(["linkerd", "install", "--crds"])
kube_apply(crds)
ok("Installing Linkerd control plane...")
cp = capture(["linkerd", "install"])
kube_apply(cp)
for dep in ["linkerd-identity", "linkerd-destination", "linkerd-proxy-injector"]:
kube("rollout", "status", f"deployment/{dep}",
"-n", "linkerd", "--timeout=120s")
ok("Installed.")
# ── 7. TLS certificate ────────────────────────────────────────────────────────
def get_lima_ip() -> str:
raw = capture(["limactl", "shell", LIMA_VM,
"ip", "-4", "addr", "show", "eth1"])
for line in raw.splitlines():
if "inet " in line:
return line.strip().split()[1].split("/")[0]
# Fallback: first non-loopback IP
return capture(["limactl", "shell", LIMA_VM, "hostname", "-I"]).split()[0]
def ensure_tls_cert() -> str:
step("TLS certificate...")
ip = get_lima_ip()
domain = f"{ip}.sslip.io"
cert = SECRETS_DIR / "tls.crt"
if cert.exists():
ok(f"Cert exists. Domain: {domain}")
return domain
ok(f"Generating wildcard cert for *.{domain}...")
SECRETS_DIR.mkdir(parents=True, exist_ok=True)
run(["mkcert", f"*.{domain}"], cwd=SECRETS_DIR)
for src, dst in [
(f"_wildcard.{domain}.pem", "tls.crt"),
(f"_wildcard.{domain}-key.pem", "tls.key"),
]:
(SECRETS_DIR / src).rename(SECRETS_DIR / dst)
ok(f"Cert generated. Domain: {domain}")
return domain
# ── 8. TLS secret ─────────────────────────────────────────────────────────────
def ensure_tls_secret(domain: str) -> None:
step("TLS secret...")
ensure_ns("ingress")
manifest = kube_out(
"create", "secret", "tls", "pingora-tls",
f"--cert={SECRETS_DIR}/tls.crt",
f"--key={SECRETS_DIR}/tls.key",
"-n", "ingress",
"--dry-run=client", "-o=yaml",
)
if manifest:
kube_apply(manifest)
ok("Done.")
# ── 9. Apply manifests ────────────────────────────────────────────────────────
def apply_manifests(domain: str) -> None:
step(f"Applying manifests (domain: {domain})...")
r = run(
["kustomize", "build", "--enable-helm", "overlays/local/"],
capture=True, cwd=REPO_ROOT,
)
manifests = r.stdout.replace("DOMAIN_SUFFIX", domain)
kube("apply", "--server-side", "--force-conflicts", "-f", "-", input=manifests)
ok("Applied.")
# ── 10. Seed secrets ──────────────────────────────────────────────────────────
def seed_secrets() -> None:
step("Seeding secrets...")
# ── Wait for postgres ─────────────────────────────────────────────────────
ok("Waiting for postgres cluster...")
pg_pod = ""
for _ in range(60):
phase = kube_out("-n", "data", "get", "cluster", "postgres",
"-o=jsonpath={.status.phase}")
if phase == "Cluster in healthy state":
pg_pod = kube_out("-n", "data", "get", "pods",
"-l=cnpg.io/cluster=postgres,role=primary",
"-o=jsonpath={.items[0].metadata.name}")
ok(f"Postgres ready ({pg_pod}).")
break
time.sleep(5)
else:
warn("Postgres not ready after 5 min — continuing anyway.")
# ── Set postgres passwords ────────────────────────────────────────────────
if pg_pod:
ok("Setting postgres user passwords...")
for user in PG_USERS:
kube("exec", "-n", "data", pg_pod, "-c", "postgres", "--",
"psql", "-U", "postgres", "-c",
f"ALTER USER {user} WITH PASSWORD '{DB_PASSWORD}';",
check=False)
# ── K8s secrets ───────────────────────────────────────────────────────────
ok("Creating K8s secrets...")
# Ory
ensure_ns("ory")
create_secret("ory", "hydra",
dsn=(f"postgresql://hydra:{DB_PASSWORD}@"
"postgres-rw.data.svc.cluster.local:5432/hydra_db?sslmode=disable"),
secretsSystem=HYDRA_SYSTEM_SECRET,
secretsCookie=HYDRA_COOKIE_SECRET,
**{"pairwise-salt": HYDRA_PAIRWISE_SALT},
)
# Devtools
ensure_ns("devtools")
create_secret("devtools", "gitea-db-credentials", password=DB_PASSWORD)
create_secret("devtools", "gitea-s3-credentials",
**{"access-key": S3_ACCESS_KEY, "secret-key": S3_SECRET_KEY})
# Storage
ensure_ns("storage")
create_secret("storage", "seaweedfs-s3-credentials",
S3_ACCESS_KEY=S3_ACCESS_KEY, S3_SECRET_KEY=S3_SECRET_KEY)
# La Suite
ensure_ns("lasuite")
create_secret("lasuite", "seaweedfs-s3-credentials",
S3_ACCESS_KEY=S3_ACCESS_KEY, S3_SECRET_KEY=S3_SECRET_KEY)
create_secret("lasuite", "hive-db-url",
url=(f"postgresql://hive:{DB_PASSWORD}@"
"postgres-rw.data.svc.cluster.local:5432/hive_db"))
create_secret("lasuite", "hive-oidc",
**{"client-id": "hive-local", "client-secret": "hive-local-secret"})
create_secret("lasuite", "people-db-credentials", password=DB_PASSWORD)
create_secret("lasuite", "people-django-secret",
DJANGO_SECRET_KEY=PEOPLE_DJANGO_SECRET)
# Media
ensure_ns("media")
# ── OpenBao ───────────────────────────────────────────────────────────────
_seed_openbao()
ok("All secrets seeded.")
def _seed_openbao() -> None:
ob_pod = kube_out(
"-n", "data", "get", "pods",
"-l=app.kubernetes.io/name=openbao,component=server",
"-o=jsonpath={.items[0].metadata.name}",
)
if not ob_pod:
ok("OpenBao pod not found — skipping.")
return
ok(f"OpenBao ({ob_pod})...")
# Wait for pod to be Running (won't be Ready until unsealed)
kube("wait", "-n", "data", f"pod/{ob_pod}",
"--for=jsonpath={.status.phase}=Running", "--timeout=120s", check=False)
def bao(cmd: str) -> str:
r = kube_out("-n", "data", "exec", ob_pod, "-c", "openbao",
"--", "sh", "-c", cmd)
return r
status_json = bao("bao status -format=json 2>/dev/null || echo '{}'")
try:
status = json.loads(status_json)
except json.JSONDecodeError:
status = {}
unseal_key = ""
root_token = ""
if not status.get("initialized"):
ok("Initializing OpenBao...")
init_json = bao("bao operator init -key-shares=1 -key-threshold=1 -format=json 2>/dev/null")
try:
init = json.loads(init_json)
unseal_key = init["unseal_keys_b64"][0]
root_token = init["root_token"]
create_secret("data", "openbao-keys",
key=unseal_key, **{"root-token": root_token})
ok("Initialized — keys stored in secret/openbao-keys.")
except (json.JSONDecodeError, KeyError) as e:
warn(f"OpenBao init parse failed: {e}")
return
else:
ok("Already initialized.")
unseal_key = kube_out("-n", "data", "get", "secret", "openbao-keys",
"-o=jsonpath={.data.key}")
if unseal_key:
unseal_key = base64.b64decode(unseal_key).decode()
root_token = kube_out("-n", "data", "get", "secret", "openbao-keys",
"-o=jsonpath={.data.root-token}")
if root_token:
root_token = base64.b64decode(root_token).decode()
# Unseal if sealed
try:
sealed = json.loads(bao("bao status -format=json 2>/dev/null || echo '{}'")).get("sealed", False)
except json.JSONDecodeError:
sealed = False
if sealed and unseal_key:
ok("Unsealing...")
bao(f"bao operator unseal '{unseal_key}' 2>/dev/null")
if root_token:
ok("Seeding KV...")
pg_rw = "postgres-rw.data.svc.cluster.local:5432"
bao(f"""
BAO_ADDR=http://127.0.0.1:8200 BAO_TOKEN='{root_token}' sh -c '
bao secrets enable -path=secret -version=2 kv 2>/dev/null || true
bao kv put secret/postgres password="{DB_PASSWORD}"
bao kv put secret/hydra db-password="{DB_PASSWORD}" system-secret="{HYDRA_SYSTEM_SECRET}" cookie-secret="{HYDRA_COOKIE_SECRET}" pairwise-salt="{HYDRA_PAIRWISE_SALT}"
bao kv put secret/kratos db-password="{DB_PASSWORD}"
bao kv put secret/gitea db-password="{DB_PASSWORD}" s3-access-key="{S3_ACCESS_KEY}" s3-secret-key="{S3_SECRET_KEY}"
bao kv put secret/seaweedfs access-key="{S3_ACCESS_KEY}" secret-key="{S3_SECRET_KEY}"
bao kv put secret/hive db-url="postgresql://hive:{DB_PASSWORD}@{pg_rw}/hive_db" oidc-client-id="hive-local" oidc-client-secret="hive-local-secret"
bao kv put secret/livekit api-key="{LIVEKIT_API_KEY}" api-secret="{LIVEKIT_API_SECRET}"
bao kv put secret/people db-password="{DB_PASSWORD}" django-secret-key="{PEOPLE_DJANGO_SECRET}"
'
""")
# ── 11. Restart services ──────────────────────────────────────────────────────
def restart_services() -> None:
step("Restarting services waiting for secrets...")
for ns, dep in SERVICES_TO_RESTART:
kube("-n", ns, "rollout", "restart", f"deployment/{dep}", check=False)
ok("Done.")
# ── 12. Wait for core ─────────────────────────────────────────────────────────
def wait_for_core() -> None:
step("Waiting for core services...")
for ns, dep in [("data", "valkey"), ("ory", "kratos"), ("ory", "hydra")]:
kube("rollout", "status", f"deployment/{dep}",
"-n", ns, "--timeout=120s", check=False)
ok("Core services ready.")
# ── 13. Print URLs ────────────────────────────────────────────────────────────
def print_urls(domain: str) -> None:
print(f"\n{''*60}")
print(f" Stack is up. Domain: {domain}")
print(f"{''*60}")
services = [
("Auth", f"https://auth.{domain}/"),
("Docs", f"https://docs.{domain}/"),
("Meet", f"https://meet.{domain}/"),
("Drive", f"https://drive.{domain}/"),
("Chat", f"https://chat.{domain}/"),
("Mail", f"https://mail.{domain}/"),
("People", f"https://people.{domain}/"),
("Gitea", f"https://src.{domain}/"),
]
for name, url in services:
print(f" {name:<10} {url}")
print()
print(" OpenBao UI:")
print(f" kubectl --context=sunbeam -n data port-forward svc/openbao 8200:8200")
print(f" http://localhost:8200")
token_cmd = "kubectl --context=sunbeam -n data get secret openbao-keys -o jsonpath='{.data.root-token}' | base64 -d"
print(f" token: {token_cmd}")
print(f"{''*60}\n")
# ── Main ──────────────────────────────────────────────────────────────────────
def main() -> None:
parser = argparse.ArgumentParser(description="Sunbeam local dev stack manager")
parser.add_argument("--seed", action="store_true", help="Re-seed secrets only")
parser.add_argument("--apply", action="store_true", help="Re-apply manifests only")
parser.add_argument("--restart", action="store_true", help="Restart services only")
args = parser.parse_args()
check_prerequisites()
if args.seed:
seed_secrets()
restart_services()
return
if args.restart:
restart_services()
return
domain = get_lima_ip()
domain = f"{domain}.sslip.io"
if args.apply:
apply_manifests(domain)
restart_services()
return
# Full bring-up
ensure_lima_vm()
merge_kubeconfig()
disable_traefik()
ensure_cert_manager()
ensure_linkerd()
domain = ensure_tls_cert() # also computes domain from current IP
ensure_tls_secret(domain)
apply_manifests(domain)
seed_secrets()
restart_services()
wait_for_core()
print_urls(domain)
if __name__ == "__main__":
main()

View File

@@ -11,7 +11,7 @@ CTX="--context=sunbeam"
# 1. Check prerequisites
# ---------------------------------------------------------------------------
echo "==> Checking prerequisites..."
for tool in limactl mkcert kubectl kustomize linkerd jq; do
for tool in limactl mkcert kubectl kustomize linkerd jq yq; do
if ! command -v "$tool" &>/dev/null; then
echo "ERROR: '$tool' not found. Install with: brew install $tool" >&2
exit 1
@@ -22,15 +22,12 @@ echo " OK"
# ---------------------------------------------------------------------------
# 2. Start Lima VM (skip if already running)
# ---------------------------------------------------------------------------
# Separate existence check from status — avoids falling through to "create"
# when VM exists but has an unexpected status (Broken, Starting, etc.)
LIMA_STATUS=$(limactl list --json 2>/dev/null | \
python3 -c "import sys,json; vms=[v for v in json.load(sys.stdin) if v['name']=='sunbeam']; print(vms[0]['status'] if vms else 'none')" 2>/dev/null || echo "none")
if [[ "$LIMA_STATUS" == "Running" ]]; then
echo "==> Lima VM 'sunbeam' already running."
elif [[ "$LIMA_STATUS" == "Stopped" ]]; then
echo "==> Starting existing Lima VM 'sunbeam'..."
limactl start sunbeam
else
if [[ "$LIMA_STATUS" == "none" ]]; then
echo "==> Creating Lima VM 'sunbeam' (k3s, 6 CPU / 12 GB / 60 GB)..."
limactl start \
--name=sunbeam \
@@ -40,6 +37,12 @@ else
--disk=60 \
--vm-type=vz \
--mount-type=virtiofs
elif [[ "$LIMA_STATUS" == "Running" ]]; then
echo "==> Lima VM 'sunbeam' already running."
else
# Covers Stopped, Broken, Starting, or any other state
echo "==> Starting Lima VM 'sunbeam' (status: $LIMA_STATUS)..."
limactl start sunbeam
fi
# ---------------------------------------------------------------------------
@@ -96,10 +99,10 @@ if ! kubectl $CTX get ns linkerd &>/dev/null; then
kubectl $CTX apply --server-side -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/standard-install.yaml
echo "==> Installing Linkerd CRDs..."
linkerd install --crds | kubectl $CTX apply -f -
linkerd install --crds | kubectl $CTX apply --server-side -f -
echo "==> Installing Linkerd control plane..."
linkerd install | kubectl $CTX apply -f -
linkerd install | kubectl $CTX apply --server-side -f -
kubectl $CTX -n linkerd rollout status deployment/linkerd-identity --timeout=120s
kubectl $CTX -n linkerd rollout status deployment/linkerd-destination --timeout=120s
kubectl $CTX -n linkerd rollout status deployment/linkerd-proxy-injector --timeout=120s
@@ -164,6 +167,9 @@ for ns_deploy in \
"devtools/gitea" \
"storage/seaweedfs-filer" \
"lasuite/hive" \
"lasuite/people-backend" \
"lasuite/people-celery-worker" \
"lasuite/people-celery-beat" \
"media/livekit-server"; do
ns="${ns_deploy%%/*}"
dep="${ns_deploy##*/}"
@@ -192,9 +198,9 @@ echo " Docs: https://docs.${DOMAIN}/"
echo " Meet: https://meet.${DOMAIN}/"
echo " Drive: https://drive.${DOMAIN}/"
echo " Chat: https://chat.${DOMAIN}/"
echo " Mail: https://mail.${DOMAIN}/"
echo " People: https://people.${DOMAIN}/"
echo " Gitea: https://src.${DOMAIN}/"
echo " Mailpit: https://mailpit.${DOMAIN}/ (captured outbound email)"
echo ""
echo "OpenBao UI: kubectl $CTX -n data port-forward svc/openbao 8200:8200"
echo " http://localhost:8200 (token from: kubectl $CTX -n data get secret openbao-keys -o jsonpath='{.data.root-token}' | base64 -d)"