feat: bring up local dev stack — all services running

- Ory Hydra + Kratos: fixed secret management, DSN config, DB migrations,
  OAuth2Client CRD (helm template skips crds/ dir), login-ui env vars
- SeaweedFS: added s3.json credentials file via -s3.config CLI flag
- OpenBao: standalone mode with auto-unseal sidecar, keys in K8s secret
- OpenSearch: increased memory to 1.5Gi / JVM 1g heap
- Gitea: SSL_MODE disable, S3 bucket creation fixed
- Hive: automountServiceAccountToken: false (Lima virtiofs read-only rootfs quirk)
- LiveKit: API keys in values, hostPort conflict resolved
- Linkerd: native sidecar (proxy.nativeSidecar=true) to avoid blocking Jobs
- All placeholder images replaced: pingora→nginx:alpine, login-ui→oryd/kratos-selfservice-ui-node

Full stack running: postgres, valkey, openbao, opensearch, seaweedfs,
kratos, hydra, gitea, livekit, hive (placeholder), login-ui
This commit is contained in:
2026-02-28 22:08:38 +00:00
parent 92e80a761c
commit a589e6280d
19 changed files with 852 additions and 103 deletions

View File

@@ -10,12 +10,20 @@ resources:
- valkey-service.yaml
- opensearch-deployment.yaml
- opensearch-service.yaml
- openbao-keys-placeholder.yaml
helmCharts:
# helm repo add cnpg https://cloudnative-pg.github.io/charts
# releaseName=cloudnative-pg matches chart name → operator Deployment is named `cloudnative-pg`
- name: cloudnative-pg
repo: https://cloudnative-pg.github.io/charts
version: "0.27.1"
releaseName: cloudnative-pg
namespace: data
# helm repo add openbao https://openbao.github.io/openbao-helm
- name: openbao
repo: https://openbao.github.io/openbao-helm
version: "0.25.6"
releaseName: openbao
namespace: data
valuesFile: openbao-values.yaml

View File

@@ -0,0 +1,9 @@
# Placeholder secret — replaced by the init script after `bao operator init`.
# Exists so the auto-unseal sidecar's volume mount doesn't block pod startup.
apiVersion: v1
kind: Secret
metadata:
name: openbao-keys
namespace: data
type: Opaque
data: {}

View File

@@ -0,0 +1,78 @@
# OpenBao Helm values — standalone single-instance mode.
# Root token + unseal key stored in K8s secret `openbao-keys` (created by init script).
global:
tlsDisable: true
injector:
enabled: true
agentDefaults:
cpuLimit: "250m"
cpuRequest: "50m"
memLimit: "64Mi"
memRequest: "32Mi"
server:
image:
registry: quay.io
repository: openbao/openbao
standalone:
enabled: true
config: |
ui = true
listener "tcp" {
tls_disable = 1
address = "[::]:8200"
}
storage "file" {
path = "/openbao/data"
}
dataStorage:
enabled: true
size: 1Gi
resources:
limits:
memory: 128Mi
requests:
memory: 64Mi
cpu: 50m
# Auto-unseal sidecar: reads unseal key from K8s secret and unseals on restart.
extraContainers:
- name: auto-unseal
image: quay.io/openbao/openbao
command: ["/bin/sh", "-c"]
args:
- |
export BAO_ADDR=http://127.0.0.1:8200
echo "auto-unseal: waiting for openbao to start..."
until bao status -format=json 2>/dev/null; do sleep 2; done
while true; do
if [ -f /openbao/unseal/key ]; then
SEALED=$(bao status -format=json 2>/dev/null | grep '"sealed"' | grep -c 'true')
if [ "$SEALED" = "1" ]; then
echo "auto-unseal: unsealing..."
bao operator unseal "$(cat /openbao/unseal/key)"
fi
fi
sleep 15
done
volumeMounts:
- name: userconfig-openbao-keys
mountPath: /openbao/unseal
readOnly: true
resources:
limits:
memory: 32Mi
requests:
memory: 16Mi
cpu: 10m
extraVolumes:
- type: secret
name: openbao-keys
path: openbao-unseal
optional: true

View File

@@ -33,12 +33,12 @@ spec:
- name: discovery.type
value: single-node
- name: OPENSEARCH_JAVA_OPTS
value: "-Xms256m -Xmx512m"
value: "-Xms512m -Xmx1g"
- name: DISABLE_SECURITY_PLUGIN
value: "true"
resources:
limits:
memory: 512Mi
memory: 1500Mi
requests:
memory: 256Mi
memory: 768Mi
cpu: 100m

View File

@@ -19,26 +19,27 @@ spec:
secret:
name: postgres-superuser
postInitSQL:
# Create all 10 logical databases and their owners
- CREATE USER kratos;
# Create all 10 logical databases and their owners.
# Passwords are set by the seed-secrets script via ALTER USER after init.
- CREATE USER kratos WITH LOGIN;
- CREATE DATABASE kratos_db OWNER kratos;
- CREATE USER hydra;
- CREATE USER hydra WITH LOGIN;
- CREATE DATABASE hydra_db OWNER hydra;
- CREATE USER docs;
- CREATE USER docs WITH LOGIN;
- CREATE DATABASE docs_db OWNER docs;
- CREATE USER meet;
- CREATE USER meet WITH LOGIN;
- CREATE DATABASE meet_db OWNER meet;
- CREATE USER drive;
- CREATE USER drive WITH LOGIN;
- CREATE DATABASE drive_db OWNER drive;
- CREATE USER messages;
- CREATE USER messages WITH LOGIN;
- CREATE DATABASE messages_db OWNER messages;
- CREATE USER conversations;
- CREATE USER conversations WITH LOGIN;
- CREATE DATABASE conversations_db OWNER conversations;
- CREATE USER people;
- CREATE USER people WITH LOGIN;
- CREATE DATABASE people_db OWNER people;
- CREATE USER gitea;
- CREATE USER gitea WITH LOGIN;
- CREATE DATABASE gitea_db OWNER gitea;
- CREATE USER hive;
- CREATE USER hive WITH LOGIN;
- CREATE DATABASE hive_db OWNER hive;
storage:

View File

@@ -21,10 +21,11 @@ gitea:
LFS_START_SERVER: "true"
database:
DB_TYPE: postgres
HOST: postgres-rw.data.svc.cluster.local:5432
NAME: gitea_db
USER: gitea
DB_TYPE: postgres
HOST: postgres-rw.data.svc.cluster.local:5432
NAME: gitea_db
USER: gitea
SSL_MODE: disable
# PASSWD injected via additionalConfigFromEnvs below
cache:

View File

@@ -18,7 +18,7 @@ spec:
spec:
containers:
- name: pingora
image: ghcr.io/sunbeam-studio/pingora:latest
image: nginx:alpine # placeholder until custom Pingora image is built
ports:
- name: http
containerPort: 80

View File

@@ -13,9 +13,10 @@ spec:
labels:
app: hive
spec:
automountServiceAccountToken: false
containers:
- name: hive
image: ghcr.io/sunbeam-studio/hive:latest
image: nginx:alpine # placeholder until La Suite Hive image is built
volumeMounts:
- name: config
mountPath: /etc/hive

View File

@@ -24,9 +24,10 @@ livekit:
# Valkey is protocol-compatible with Redis; LiveKit sees this as a Redis endpoint
address: valkey.data.svc.cluster.local:6379
# API keys are loaded from a Kubernetes Secret and mounted as env vars.
# keys:
# <key>: <secret> # set in overlay Secret, not here
# API keys — overridden per-environment via secrets.
# At least one key must be present for the server to start.
keys:
devkey: secret-placeholder
deployment:
resources:

View File

@@ -0,0 +1,376 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.19.0
name: oauth2clients.hydra.ory.sh
spec:
group: hydra.ory.sh
names:
kind: OAuth2Client
listKind: OAuth2ClientList
plural: oauth2clients
singular: oauth2client
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: OAuth2Client is the Schema for the oauth2clients API
properties:
apiVersion:
description: |-
APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
type: string
kind:
description: |-
Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
type: string
metadata:
type: object
spec:
description:
OAuth2ClientSpec defines the desired state of OAuth2Client
properties:
allowedCorsOrigins:
description:
AllowedCorsOrigins is an array of allowed CORS origins
items:
description:
RedirectURI represents a redirect URI for the client
pattern: \w+:/?/?[^\s]+
type: string
type: array
audience:
description:
Audience is a whitelist defining the audiences this client
is allowed to request tokens for
items:
type: string
type: array
backChannelLogoutSessionRequired:
default: false
description:
BackChannelLogoutSessionRequired Boolean value specifying
whether the RP requires that a sid (session ID) Claim be
included in the Logout Token to identify the RP session with
the OP when the backchannel_logout_uri is used. If omitted,
the default value is false.
type: boolean
backChannelLogoutURI:
description:
BackChannelLogoutURI RP URL that will cause the RP to log
itself out when sent a Logout Token by the OP
pattern: (^$|^https?://.*)
type: string
clientName:
description:
ClientName is the human-readable string name of the client
to be presented to the end-user during authorization.
type: string
deletionPolicy:
description: |-
Indicates if a deleted OAuth2Client custom resource should delete the database row or not.
Values can be 'delete' to delete the OAuth2 client, value 'orphan' to keep an orphan oauth2 client.
enum:
- delete
- orphan
type: string
frontChannelLogoutSessionRequired:
default: false
description:
FrontChannelLogoutSessionRequired Boolean value specifying
whether the RP requires that iss (issuer) and sid (session
ID) query parameters be included to identify the RP session
with the OP when the frontchannel_logout_uri is used
type: boolean
frontChannelLogoutURI:
description:
FrontChannelLogoutURI RP URL that will cause the RP to log
itself out when rendered in an iframe by the OP. An iss
(issuer) query parameter and a sid (session ID) query
parameter MAY be included by the OP to enable the RP to
validate the request and to determine which of the
potentially multiple sessions is to be logged out; if either
is included, both MUST be
pattern: (^$|^https?://.*)
type: string
grantTypes:
description:
GrantTypes is an array of grant types the client is allowed
to use.
items:
description: GrantType represents an OAuth 2.0 grant type
enum:
- client_credentials
- authorization_code
- implicit
- refresh_token
type: string
maxItems: 4
minItems: 1
type: array
hydraAdmin:
description: |-
HydraAdmin is the optional configuration to use for managing
this client
properties:
endpoint:
description: |-
Endpoint is the endpoint for the hydra instance on which
to set up the client. This value will override the value
provided to `--endpoint` (defaults to `"/clients"` in the
application)
pattern: (^$|^/.*)
type: string
forwardedProto:
description: |-
ForwardedProto overrides the `--forwarded-proto` flag. The
value "off" will force this to be off even if
`--forwarded-proto` is specified
pattern: (^$|https?|off)
type: string
port:
description: |-
Port is the port for the hydra instance on
which to set up the client. This value will override the value
provided to `--hydra-port`
maximum: 65535
type: integer
url:
description: |-
URL is the URL for the hydra instance on
which to set up the client. This value will override the value
provided to `--hydra-url`
maxLength: 256
pattern: (^$|^https?://.*)
type: string
type: object
jwksUri:
description:
JwksUri Define the URL where the JSON Web Key Set should be
fetched from when performing the private_key_jwt client
authentication method.
pattern: (^$|^https?://.*)
type: string
metadata:
description: Metadata is arbitrary data
nullable: true
type: object
x-kubernetes-preserve-unknown-fields: true
postLogoutRedirectUris:
description:
PostLogoutRedirectURIs is an array of the post logout
redirect URIs allowed for the application
items:
description:
RedirectURI represents a redirect URI for the client
pattern: \w+:/?/?[^\s]+
type: string
type: array
redirectUris:
description:
RedirectURIs is an array of the redirect URIs allowed for
the application
items:
description:
RedirectURI represents a redirect URI for the client
pattern: \w+:/?/?[^\s]+
type: string
type: array
responseTypes:
description: |-
ResponseTypes is an array of the OAuth 2.0 response type strings that the client can
use at the authorization endpoint.
items:
description:
ResponseType represents an OAuth 2.0 response type strings
enum:
- id_token
- code
- token
- code token
- code id_token
- id_token token
- code id_token token
type: string
maxItems: 3
minItems: 1
type: array
scope:
description: |-
Scope is a string containing a space-separated list of scope values (as
described in Section 3.3 of OAuth 2.0 [RFC6749]) that the client
can use when requesting access tokens.
Use scopeArray instead.
pattern: ([a-zA-Z0-9\.\*]+\s?)*
type: string
scopeArray:
description: |-
Scope is an array of scope values (as described in Section 3.3 of OAuth 2.0 [RFC6749])
that the client can use when requesting access tokens.
items:
type: string
type: array
secretName:
description:
SecretName points to the K8s secret that contains this
client's ID and password
maxLength: 253
minLength: 1
pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*'
type: string
skipConsent:
default: false
description:
SkipConsent skips the consent screen for this client.
type: boolean
tokenEndpointAuthMethod:
allOf:
- enum:
- client_secret_basic
- client_secret_post
- private_key_jwt
- none
- enum:
- client_secret_basic
- client_secret_post
- private_key_jwt
- none
description:
Indication which authentication method should be used for
the token endpoint
type: string
tokenLifespans:
description: |-
TokenLifespans is the configuration to use for managing different token lifespans
depending on the used grant type.
properties:
authorization_code_grant_access_token_lifespan:
description: |-
AuthorizationCodeGrantAccessTokenLifespan is the access token lifespan
issued on an authorization_code grant.
pattern: "[0-9]+(ns|us|ms|s|m|h)"
type: string
authorization_code_grant_id_token_lifespan:
description: |-
AuthorizationCodeGrantIdTokenLifespan is the id token lifespan
issued on an authorization_code grant.
pattern: "[0-9]+(ns|us|ms|s|m|h)"
type: string
authorization_code_grant_refresh_token_lifespan:
description: |-
AuthorizationCodeGrantRefreshTokenLifespan is the refresh token lifespan
issued on an authorization_code grant.
pattern: "[0-9]+(ns|us|ms|s|m|h)"
type: string
client_credentials_grant_access_token_lifespan:
description: |-
AuthorizationCodeGrantRefreshTokenLifespan is the access token lifespan
issued on a client_credentials grant.
pattern: "[0-9]+(ns|us|ms|s|m|h)"
type: string
implicit_grant_access_token_lifespan:
description: |-
ImplicitGrantAccessTokenLifespan is the access token lifespan
issued on an implicit grant.
pattern: "[0-9]+(ns|us|ms|s|m|h)"
type: string
implicit_grant_id_token_lifespan:
description: |-
ImplicitGrantIdTokenLifespan is the id token lifespan
issued on an implicit grant.
pattern: "[0-9]+(ns|us|ms|s|m|h)"
type: string
jwt_bearer_grant_access_token_lifespan:
description: |-
JwtBearerGrantAccessTokenLifespan is the access token lifespan
issued on a jwt_bearer grant.
pattern: "[0-9]+(ns|us|ms|s|m|h)"
type: string
refresh_token_grant_access_token_lifespan:
description: |-
RefreshTokenGrantAccessTokenLifespan is the access token lifespan
issued on a refresh_token grant.
pattern: "[0-9]+(ns|us|ms|s|m|h)"
type: string
refresh_token_grant_id_token_lifespan:
description: |-
RefreshTokenGrantIdTokenLifespan is the id token lifespan
issued on a refresh_token grant.
pattern: "[0-9]+(ns|us|ms|s|m|h)"
type: string
refresh_token_grant_refresh_token_lifespan:
description: |-
RefreshTokenGrantRefreshTokenLifespan is the refresh token lifespan
issued on a refresh_token grant.
pattern: "[0-9]+(ns|us|ms|s|m|h)"
type: string
type: object
logoUri:
type: string
description: LogoURI is the URL for the client's logo
pattern: (^$|^https?://.*)
required:
- grantTypes
- secretName
type: object
status:
description:
OAuth2ClientStatus defines the observed state of OAuth2Client
properties:
conditions:
items:
description:
OAuth2ClientCondition contains condition information for
an OAuth2Client
properties:
status:
enum:
- "True"
- "False"
- Unknown
type: string
type:
type: string
required:
- status
- type
type: object
type: array
observedGeneration:
description:
ObservedGeneration represents the most recent generation
observed by the daemon set controller.
format: int64
type: integer
reconciliationError:
description:
ReconciliationError represents an error that occurred during
the reconciliation process
properties:
description:
description:
Description is the description of the reconciliation
error
type: string
statusCode:
description:
Code is the status code of the reconciliation error
type: string
type: object
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@@ -1,11 +1,13 @@
# Base Ory Hydra Helm values.
# DOMAIN_SUFFIX is replaced by overlay patches.
# DSN and system secrets come from the overlay-specific Secret.
# DOMAIN_SUFFIX is replaced at apply time via sed.
# secret.enabled: false — we create the "hydra" K8s Secret via seed script.
# DSN is set in config (chart strips it from env, so must be in values).
hydra:
automigration:
enabled: true
config:
dsn: "postgresql://hydra:$(HYDRA_DB_PASSWORD)@postgres-rw.data.svc.cluster.local:5432/hydra_db"
dsn: "postgresql://hydra:localdev@postgres-rw.data.svc.cluster.local:5432/hydra_db?sslmode=disable"
urls:
self:
issuer: https://auth.DOMAIN_SUFFIX/
@@ -14,19 +16,6 @@ hydra:
logout: https://auth.DOMAIN_SUFFIX/logout
error: https://auth.DOMAIN_SUFFIX/error
secrets:
system:
- $(HYDRA_SYSTEM_SECRET)
cookie:
- $(HYDRA_COOKIE_SECRET)
oidc:
subject_identifiers:
supported_types:
- public
pairwise:
salt: $(HYDRA_PAIRWISE_SALT)
serve:
cookies:
same_site_mode: Lax
@@ -36,6 +25,11 @@ hydra:
allowed_origins:
- https://*.DOMAIN_SUFFIX
# Disable chart's secret generation — we create the "hydra" secret via seed script
# with keys: secretsSystem, secretsCookie, pairwise-salt.
secret:
enabled: false
deployment:
resources:
limits:

View File

@@ -1,12 +1,13 @@
# Base Ory Kratos Helm values.
# DOMAIN_SUFFIX is replaced by overlay patches (sunbeam.pt / <LIMA_IP>.sslip.io).
# DSN and SMTP credentials come from the overlay-specific Secret.
# DOMAIN_SUFFIX is replaced at apply time via sed.
# DSN is set in config (chart renders it into kratos-secrets Secret automatically).
kratos:
automigration:
enabled: true
config:
version: v0.13.0
dsn: "postgresql://kratos:$(KRATOS_DB_PASSWORD)@postgres-rw.data.svc.cluster.local:5432/kratos_db"
dsn: "postgresql://kratos:localdev@postgres-rw.data.svc.cluster.local:5432/kratos_db?sslmode=disable"
selfservice:
default_browser_return_url: https://auth.DOMAIN_SUFFIX/
@@ -24,6 +25,7 @@ kratos:
ui_url: https://auth.DOMAIN_SUFFIX/login
registration:
ui_url: https://auth.DOMAIN_SUFFIX/registration
enabled: true
recovery:
ui_url: https://auth.DOMAIN_SUFFIX/recovery
settings:
@@ -33,11 +35,11 @@ kratos:
default_schema_id: default
schemas:
- id: default
url: file:///etc/config/kratos/identity.schema.json
url: base64://ewogICIkaWQiOiAiaHR0cHM6Ly9zY2hlbWFzLnN1bmJlYW0uc3R1ZGlvL2lkZW50aXR5Lmpzb24iLAogICIkc2NoZW1hIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDcvc2NoZW1hIyIsCiAgInR5cGUiOiAib2JqZWN0IiwKICAidGl0bGUiOiAiUGVyc29uIiwKICAicHJvcGVydGllcyI6IHsKICAgICJ0cmFpdHMiOiB7CiAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICJwcm9wZXJ0aWVzIjogewogICAgICAgICJlbWFpbCI6IHsKICAgICAgICAgICJ0eXBlIjogInN0cmluZyIsCiAgICAgICAgICAiZm9ybWF0IjogImVtYWlsIiwKICAgICAgICAgICJ0aXRsZSI6ICJFbWFpbCIsCiAgICAgICAgICAib3J5LnNoL2tyYXRvcyI6IHsKICAgICAgICAgICAgImNyZWRlbnRpYWxzIjogewogICAgICAgICAgICAgICJwYXNzd29yZCI6IHsKICAgICAgICAgICAgICAgICJpZGVudGlmaWVyIjogdHJ1ZQogICAgICAgICAgICAgIH0KICAgICAgICAgICAgfSwKICAgICAgICAgICAgInJlY292ZXJ5IjogewogICAgICAgICAgICAgICJ2aWEiOiAiZW1haWwiCiAgICAgICAgICAgIH0sCiAgICAgICAgICAgICJ2ZXJpZmljYXRpb24iOiB7CiAgICAgICAgICAgICAgInZpYSI6ICJlbWFpbCIKICAgICAgICAgICAgfQogICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgIm5hbWUiOiB7CiAgICAgICAgICAidHlwZSI6ICJvYmplY3QiLAogICAgICAgICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgICAgICJmaXJzdCI6IHsgInR5cGUiOiAic3RyaW5nIiwgInRpdGxlIjogIkZpcnN0IG5hbWUiIH0sCiAgICAgICAgICAgICJsYXN0IjogeyAidHlwZSI6ICJzdHJpbmciLCAidGl0bGUiOiAiTGFzdCBuYW1lIiB9CiAgICAgICAgICB9CiAgICAgICAgfQogICAgICB9LAogICAgICAicmVxdWlyZWQiOiBbImVtYWlsIl0KICAgIH0KICB9Cn0K
courier:
smtp:
connection_uri: "smtp://$(SMTP_USER):$(SMTP_PASSWORD)@localhost:25/"
connection_uri: "smtp://local:local@localhost:25/"
from_address: no-reply@DOMAIN_SUFFIX
from_name: Sunbeam
@@ -51,6 +53,11 @@ kratos:
admin:
base_url: http://kratos-admin.ory.svc.cluster.local:4434/
# Chart creates kratos-secrets from Helm values (dsn + generated random secrets).
secret:
enabled: true
nameOverride: kratos-secrets
deployment:
resources:
limits:

View File

@@ -6,6 +6,8 @@ namespace: ory
resources:
- namespace.yaml
- login-ui-deployment.yaml
# Hydra chart CRDs are not rendered by helm template; apply manually.
- hydra-oauth2client-crd.yaml
# The hydra-maester sub-chart does not set .Release.Namespace in its Deployment template.
patches:

View File

@@ -15,7 +15,7 @@ spec:
spec:
containers:
- name: login-ui
image: ghcr.io/sunbeam-studio/login-ui:latest
image: oryd/kratos-selfservice-ui-node:v1.3.0
ports:
- name: http
containerPort: 3000
@@ -23,15 +23,23 @@ spec:
env:
- name: KRATOS_PUBLIC_URL
value: "http://kratos-public.ory.svc.cluster.local:4433"
- name: KRATOS_BROWSER_URL
value: "https://auth.DOMAIN_SUFFIX/kratos"
- name: HYDRA_ADMIN_URL
value: "http://hydra-admin.ory.svc.cluster.local:4445"
- name: PORT
value: "3000"
- name: COOKIE_SECRET
value: "localdev-cookie-secret"
- name: CSRF_COOKIE_NAME
value: "csrf"
- name: CSRF_COOKIE_SECRET
value: "localdev-csrf-secret"
resources:
limits:
memory: 64Mi
memory: 256Mi
requests:
memory: 32Mi
memory: 128Mi
cpu: 25m
---
apiVersion: v1

View File

@@ -15,8 +15,22 @@ data:
[s3]
enabled = true
port = 8333
# Credentials are loaded from the seaweedfs-s3-credentials Secret
# and passed as env vars (S3_ACCESS_KEY, S3_SECRET_KEY) to the filer.
s3.json: |
{
"identities": [
{
"name": "minioadmin",
"credentials": [
{
"accessKey": "minioadmin",
"secretKey": "minioadmin"
}
],
"actions": ["Admin", "Read", "Write", "List", "Tagging"]
}
]
}
master.toml: |
[master.maintenance]

View File

@@ -21,6 +21,7 @@ spec:
- -port=8888
- -s3
- -s3.port=8333
- -s3.config=/etc/seaweedfs/s3.json
- -master=seaweedfs-master.storage.svc.cluster.local:9333
ports:
- name: http

View File

@@ -18,11 +18,11 @@ spec:
value: "false"
ports:
# Expose full TURN relay range as hostPort so the Lima VM forwards UDP
- name: turn-relay-start
- name: turn-start
containerPort: 49152
hostPort: 49152
protocol: UDP
- name: turn-relay-end
- name: turn-end
containerPort: 49252
hostPort: 49252
protocol: UDP

183
scripts/local-seed-secrets.sh Executable file
View File

@@ -0,0 +1,183 @@
#!/usr/bin/env bash
# Seed all secrets for the local dev stack.
# - Initializes OpenBao (if needed) and stores root token + unseal key
# - Sets postgres user passwords
# - Creates K8s secrets consumed by each service
# - Stores all secrets in OpenBao as source of truth
#
# Idempotent: safe to run multiple times.
set -euo pipefail
CTX="--context=sunbeam"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Deterministic local-dev passwords (simple, memorable, not for production)
DB_PASSWORD="localdev"
S3_ACCESS_KEY="minioadmin"
S3_SECRET_KEY="minioadmin"
HYDRA_SYSTEM_SECRET="local-hydra-system-secret-at-least-16"
HYDRA_COOKIE_SECRET="local-hydra-cookie-secret-at-least-16"
HYDRA_PAIRWISE_SALT="local-hydra-pairwise-salt-value-1"
LIVEKIT_API_KEY="devkey"
LIVEKIT_API_SECRET="secret-placeholder"
# ---------------------------------------------------------------------------
# Helper
# ---------------------------------------------------------------------------
ensure_ns() {
kubectl $CTX create namespace "$1" --dry-run=client -o yaml | kubectl $CTX apply -f - 2>/dev/null
}
create_secret() {
local ns="$1"; shift
local name="$1"; shift
# remaining args are --from-literal=key=value
kubectl $CTX create secret generic "$name" -n "$ns" "$@" \
--dry-run=client -o yaml | kubectl $CTX apply -f -
}
# ---------------------------------------------------------------------------
# 1. Wait for postgres to be ready
# ---------------------------------------------------------------------------
echo "==> Waiting for postgres cluster..."
for i in $(seq 1 60); do
PHASE=$(kubectl $CTX -n data get cluster postgres -o jsonpath='{.status.phase}' 2>/dev/null || echo "")
if [[ "$PHASE" == "Cluster in healthy state" ]]; then
echo " Postgres is ready."
break
fi
if [[ $i -eq 60 ]]; then
echo "WARN: Postgres not ready after 5 min, continuing anyway..."
fi
sleep 5
done
# ---------------------------------------------------------------------------
# 2. Set postgres user passwords
# ---------------------------------------------------------------------------
echo "==> Setting postgres user passwords..."
PG_POD=$(kubectl $CTX -n data get pods -l cnpg.io/cluster=postgres,role=primary -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
if [[ -n "$PG_POD" ]]; then
for user in kratos hydra gitea hive docs meet drive messages conversations people; do
kubectl $CTX -n data exec "$PG_POD" -c postgres -- \
psql -U postgres -c "ALTER USER $user WITH PASSWORD '$DB_PASSWORD';" 2>/dev/null || true
done
echo " Done."
else
echo "WARN: No postgres primary pod found, skipping password setup."
fi
# ---------------------------------------------------------------------------
# 3. Create K8s secrets for each service
# ---------------------------------------------------------------------------
echo "==> Creating K8s secrets..."
# Ory namespace
ensure_ns ory
# Secret name must match chart release name (secret.enabled: false means chart uses release name)
create_secret ory hydra \
--from-literal=dsn="postgresql://hydra:${DB_PASSWORD}@postgres-rw.data.svc.cluster.local:5432/hydra_db?sslmode=disable" \
--from-literal=secretsSystem="$HYDRA_SYSTEM_SECRET" \
--from-literal=secretsCookie="$HYDRA_COOKIE_SECRET" \
--from-literal=pairwise-salt="$HYDRA_PAIRWISE_SALT"
# Kratos chart (secret.enabled: true, nameOverride: kratos-secrets) creates kratos-secrets
# from Helm values — DSN is in kratos-values.yaml, random secrets generated by chart.
# This create is a no-op placeholder; chart apply overwrites with Helm-generated values.
# Devtools namespace
ensure_ns devtools
create_secret devtools gitea-db-credentials \
--from-literal=password="$DB_PASSWORD"
create_secret devtools gitea-s3-credentials \
--from-literal=access-key="$S3_ACCESS_KEY" \
--from-literal=secret-key="$S3_SECRET_KEY"
# Storage namespace
ensure_ns storage
create_secret storage seaweedfs-s3-credentials \
--from-literal=S3_ACCESS_KEY="$S3_ACCESS_KEY" \
--from-literal=S3_SECRET_KEY="$S3_SECRET_KEY"
# La Suite namespace
ensure_ns lasuite
create_secret lasuite seaweedfs-s3-credentials \
--from-literal=S3_ACCESS_KEY="$S3_ACCESS_KEY" \
--from-literal=S3_SECRET_KEY="$S3_SECRET_KEY"
create_secret lasuite hive-db-url \
--from-literal=url="postgresql://hive:${DB_PASSWORD}@postgres-rw.data.svc.cluster.local:5432/hive_db"
create_secret lasuite hive-oidc \
--from-literal=client-id="hive-local" \
--from-literal=client-secret="hive-local-secret"
# Media namespace
ensure_ns media
echo " Done."
# ---------------------------------------------------------------------------
# 4. Initialize and unseal OpenBao (if deployed)
# ---------------------------------------------------------------------------
echo "==> Checking OpenBao..."
OB_POD=$(kubectl $CTX -n data get pods -l app.kubernetes.io/name=openbao,component=server -o jsonpath='{.items[0].metadata.name}' 2>/dev/null || echo "")
if [[ -z "$OB_POD" ]]; then
echo " OpenBao pod not found, skipping."
else
# Wait for pod to be running (not necessarily ready — it won't be ready until unsealed)
kubectl $CTX -n data wait pod "$OB_POD" --for=jsonpath='{.status.phase}'=Running --timeout=120s 2>/dev/null || true
# Check if initialized
INIT_STATUS=$(kubectl $CTX -n data exec "$OB_POD" -c openbao -- bao status -format=json 2>/dev/null | grep '"initialized"' | grep -c 'true' || echo "0")
if [[ "$INIT_STATUS" != "1" ]]; then
echo "==> Initializing OpenBao..."
INIT_OUTPUT=$(kubectl $CTX -n data exec "$OB_POD" -c openbao -- bao operator init -key-shares=1 -key-threshold=1 -format=json 2>/dev/null)
UNSEAL_KEY=$(echo "$INIT_OUTPUT" | jq -r '.unseal_keys_b64[0]')
ROOT_TOKEN=$(echo "$INIT_OUTPUT" | jq -r '.root_token')
# Store keys in K8s secret
create_secret data openbao-keys \
--from-literal=key="$UNSEAL_KEY" \
--from-literal=root-token="$ROOT_TOKEN"
echo " Initialized. Keys stored in secret/openbao-keys."
else
echo " Already initialized."
# Read unseal key from existing secret
UNSEAL_KEY=$(kubectl $CTX -n data get secret openbao-keys -o jsonpath='{.data.key}' 2>/dev/null | base64 -d || echo "")
ROOT_TOKEN=$(kubectl $CTX -n data get secret openbao-keys -o jsonpath='{.data.root-token}' 2>/dev/null | base64 -d || echo "")
fi
# Unseal if sealed
SEALED=$(kubectl $CTX -n data exec "$OB_POD" -c openbao -- bao status -format=json 2>/dev/null | grep '"sealed"' | grep -c 'true' || echo "0")
if [[ "$SEALED" == "1" && -n "$UNSEAL_KEY" ]]; then
echo "==> Unsealing OpenBao..."
kubectl $CTX -n data exec "$OB_POD" -c openbao -- bao operator unseal "$UNSEAL_KEY"
echo " Unsealed."
fi
# Seed secrets into OpenBao
if [[ -n "$ROOT_TOKEN" ]]; then
echo "==> Seeding secrets into OpenBao..."
kubectl $CTX -n data exec "$OB_POD" -c openbao -- sh -c "
export BAO_ADDR=http://127.0.0.1:8200
export BAO_TOKEN='$ROOT_TOKEN'
bao secrets enable -path=secret -version=2 kv 2>/dev/null || true
bao kv put secret/postgres password='$DB_PASSWORD'
bao kv put secret/hydra db-password='$DB_PASSWORD' system-secret='$HYDRA_SYSTEM_SECRET' cookie-secret='$HYDRA_COOKIE_SECRET' pairwise-salt='$HYDRA_PAIRWISE_SALT'
bao kv put secret/kratos db-password='$DB_PASSWORD'
bao kv put secret/gitea db-password='$DB_PASSWORD' s3-access-key='$S3_ACCESS_KEY' s3-secret-key='$S3_SECRET_KEY'
bao kv put secret/seaweedfs access-key='$S3_ACCESS_KEY' secret-key='$S3_SECRET_KEY'
bao kv put secret/hive db-url='postgresql://hive:${DB_PASSWORD}@postgres-rw.data.svc.cluster.local:5432/hive_db' oidc-client-id='hive-local' oidc-client-secret='hive-local-secret'
bao kv put secret/livekit api-key='$LIVEKIT_API_KEY' api-secret='$LIVEKIT_API_SECRET'
" 2>/dev/null
echo " Done."
fi
fi
echo ""
echo "==> All secrets seeded."

View File

@@ -5,12 +5,13 @@ set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
CTX="--context=sunbeam"
# ---------------------------------------------------------------------------
# 1. Check prerequisites
# ---------------------------------------------------------------------------
echo "==> Checking prerequisites..."
for tool in limactl mkcert kubectl helm linkerd; do
for tool in limactl mkcert kubectl kustomize linkerd jq; do
if ! command -v "$tool" &>/dev/null; then
echo "ERROR: '$tool' not found. Install with: brew install $tool" >&2
exit 1
@@ -33,7 +34,7 @@ else
echo "==> Creating Lima VM 'sunbeam' (k3s, 6 CPU / 12 GB / 60 GB)..."
limactl start \
--name=sunbeam \
template://k3s \
template:k3s \
--memory=12 \
--cpus=6 \
--disk=60 \
@@ -42,81 +43,145 @@ else
fi
# ---------------------------------------------------------------------------
# 3. Export kubeconfig
# 3. Merge kubeconfig into ~/.kube/config as context "sunbeam"
# ---------------------------------------------------------------------------
echo "==> Exporting kubeconfig..."
mkdir -p ~/.kube
limactl shell sunbeam kubectl config view --raw > ~/.kube/sunbeam.yaml
export KUBECONFIG=~/.kube/sunbeam.yaml
echo " KUBECONFIG=$KUBECONFIG"
echo "==> Merging kubeconfig..."
LIMA_KUBECONFIG="/Users/$USER/.lima/sunbeam/copied-from-guest/kubeconfig.yaml"
if [[ ! -f "$LIMA_KUBECONFIG" ]]; then
echo "ERROR: Lima kubeconfig not found at $LIMA_KUBECONFIG" >&2
exit 1
fi
# Extract cert data and set context
mkdir -p ~/.kube /tmp/sunbeam-kube
yq '.clusters[0].cluster.certificate-authority-data' "$LIMA_KUBECONFIG" | base64 -d > /tmp/sunbeam-kube/ca.crt
yq '.users[0].user.client-certificate-data' "$LIMA_KUBECONFIG" | base64 -d > /tmp/sunbeam-kube/client.crt
yq '.users[0].user.client-key-data' "$LIMA_KUBECONFIG" | base64 -d > /tmp/sunbeam-kube/client.key
kubectl config set-cluster sunbeam --server=https://127.0.0.1:6443 --certificate-authority=/tmp/sunbeam-kube/ca.crt --embed-certs=true
kubectl config set-credentials sunbeam-admin --client-certificate=/tmp/sunbeam-kube/client.crt --client-key=/tmp/sunbeam-kube/client.key --embed-certs=true
kubectl config set-context sunbeam --cluster=sunbeam --user=sunbeam-admin
rm -rf /tmp/sunbeam-kube
echo " Context 'sunbeam' ready."
# ---------------------------------------------------------------------------
# 4. Install Linkerd CRDs + control plane
# 4. Disable Traefik (k3s default) if still present
# ---------------------------------------------------------------------------
echo "==> Adding Linkerd Helm repo..."
helm repo add linkerd https://helm.linkerd.io/stable --force-update
helm repo update linkerd
echo "==> Installing Linkerd CRDs..."
helm upgrade --install linkerd-crds linkerd/linkerd-crds \
-n mesh --create-namespace --wait
echo "==> Installing Linkerd control plane..."
helm upgrade --install linkerd-control-plane linkerd/linkerd-control-plane \
-n mesh \
--set-file identityTrustAnchorsPEM="$(linkerd identity trust-anchors 2>/dev/null || echo '')" \
--wait || {
echo "==> Bootstrapping Linkerd identity (first install)..."
linkerd install --crds | kubectl apply -f -
linkerd install | kubectl apply -f -
linkerd check
}
if kubectl $CTX get helmchart traefik -n kube-system &>/dev/null; then
echo "==> Removing Traefik (replaced by Pingora)..."
kubectl $CTX delete helmchart traefik traefik-crd -n kube-system 2>/dev/null || true
fi
# Remove startup manifest so k3s doesn't re-create it
limactl shell sunbeam sudo rm -f /var/lib/rancher/k3s/server/manifests/traefik.yaml 2>/dev/null || true
# ---------------------------------------------------------------------------
# 5. Generate mkcert wildcard cert
# 5. Install Gateway API CRDs + Linkerd via CLI
# ---------------------------------------------------------------------------
echo "==> Generating TLS cert..."
bash "$SCRIPT_DIR/local-certs.sh"
if ! kubectl $CTX get ns linkerd &>/dev/null; then
echo "==> Installing Gateway API CRDs..."
kubectl $CTX apply --server-side -f https://github.com/kubernetes-sigs/gateway-api/releases/download/v1.4.0/standard-install.yaml
echo "==> Installing Linkerd CRDs..."
linkerd install --crds | kubectl $CTX apply -f -
echo "==> Installing Linkerd control plane..."
linkerd install | kubectl $CTX apply -f -
kubectl $CTX -n linkerd rollout status deployment/linkerd-identity --timeout=120s
kubectl $CTX -n linkerd rollout status deployment/linkerd-destination --timeout=120s
kubectl $CTX -n linkerd rollout status deployment/linkerd-proxy-injector --timeout=120s
echo " Linkerd installed."
else
echo "==> Linkerd already installed."
fi
# ---------------------------------------------------------------------------
# 6. Generate mkcert wildcard cert
# ---------------------------------------------------------------------------
LIMA_IP=$(limactl shell sunbeam hostname -I | awk '{print $1}')
DOMAIN="${LIMA_IP}.sslip.io"
SECRETS_DIR="$REPO_ROOT/secrets/local"
if [[ ! -f "$SECRETS_DIR/tls.crt" ]]; then
echo "==> Generating TLS cert for *.$DOMAIN..."
mkdir -p "$SECRETS_DIR"
cd "$SECRETS_DIR"
mkcert "*.$DOMAIN"
mv "_wildcard.${DOMAIN}.pem" tls.crt
mv "_wildcard.${DOMAIN}-key.pem" tls.key
cd "$REPO_ROOT"
else
echo "==> TLS cert already exists."
fi
# ---------------------------------------------------------------------------
# 6. Create TLS Secret in ingress namespace
# 7. Create TLS Secret in ingress namespace
# ---------------------------------------------------------------------------
echo "==> Applying TLS Secret to ingress namespace..."
kubectl create namespace ingress --dry-run=client -o yaml | kubectl apply -f -
kubectl create secret tls pingora-tls \
--cert="$REPO_ROOT/secrets/local/tls.crt" \
--key="$REPO_ROOT/secrets/local/tls.key" \
kubectl $CTX create namespace ingress --dry-run=client -o yaml | kubectl $CTX apply -f -
kubectl $CTX create secret tls pingora-tls \
--cert="$SECRETS_DIR/tls.crt" \
--key="$SECRETS_DIR/tls.key" \
-n ingress \
--dry-run=client -o yaml | kubectl apply -f -
--dry-run=client -o yaml | kubectl $CTX apply -f -
# ---------------------------------------------------------------------------
# 7. Substitute domain and apply manifests
# 8. Apply manifests (server-side apply handles large CRDs)
# ---------------------------------------------------------------------------
echo "==> Applying manifests (domain: $DOMAIN)..."
# Substitute DOMAIN_SUFFIX placeholder before piping to kubectl
kubectl kustomize "$REPO_ROOT/overlays/local" --enable-helm | \
cd "$REPO_ROOT"
kustomize build overlays/local --enable-helm | \
sed "s/DOMAIN_SUFFIX/${DOMAIN}/g" | \
kubectl apply -f -
kubectl $CTX apply --server-side --force-conflicts -f -
# ---------------------------------------------------------------------------
# 8. Wait for core components
# 9. Seed secrets (waits for postgres, creates K8s secrets, inits OpenBao)
# ---------------------------------------------------------------------------
echo "==> Waiting for PostgreSQL cluster..."
kubectl wait --for=condition=Ready cluster/postgres -n data --timeout=180s || true
echo "==> Seeding secrets..."
bash "$SCRIPT_DIR/local-seed-secrets.sh"
echo "==> Waiting for Redis..."
kubectl rollout status deployment/redis -n data --timeout=120s || true
# ---------------------------------------------------------------------------
# 10. Restart deployments that were waiting for secrets
# ---------------------------------------------------------------------------
echo "==> Restarting services that were waiting for secrets..."
for ns_deploy in \
"ory/hydra" \
"ory/kratos" \
"ory/login-ui" \
"devtools/gitea" \
"storage/seaweedfs-filer" \
"lasuite/hive" \
"media/livekit-server"; do
ns="${ns_deploy%%/*}"
dep="${ns_deploy##*/}"
kubectl $CTX -n "$ns" rollout restart deployment/"$dep" 2>/dev/null || true
done
# ---------------------------------------------------------------------------
# 11. Wait for core components
# ---------------------------------------------------------------------------
echo "==> Waiting for Valkey..."
kubectl $CTX rollout status deployment/valkey -n data --timeout=120s || true
echo "==> Waiting for Kratos..."
kubectl rollout status deployment/kratos -n ory --timeout=120s || true
kubectl $CTX rollout status deployment/kratos -n ory --timeout=120s || true
echo "==> Waiting for Hydra..."
kubectl rollout status deployment/hydra -n ory --timeout=120s || true
kubectl $CTX rollout status deployment/hydra -n ory --timeout=120s || true
# ---------------------------------------------------------------------------
# 9. Print URLs
# 12. Print URLs
# ---------------------------------------------------------------------------
bash "$SCRIPT_DIR/local-urls.sh"
echo ""
echo "==> Stack is up. Domain: $DOMAIN"
echo ""
echo "Services:"
echo " Auth: https://auth.${DOMAIN}/"
echo " Docs: https://docs.${DOMAIN}/"
echo " Meet: https://meet.${DOMAIN}/"
echo " Drive: https://drive.${DOMAIN}/"
echo " Chat: https://chat.${DOMAIN}/"
echo " People: https://people.${DOMAIN}/"
echo " Gitea: https://src.${DOMAIN}/"
echo ""
echo "OpenBao UI: kubectl $CTX -n data port-forward svc/openbao 8200:8200"
echo " http://localhost:8200 (token from: kubectl $CTX -n data get secret openbao-keys -o jsonpath='{.data.root-token}' | base64 -d)"