chore: initial infrastructure scaffold

Kustomize base + overlays for the full Sunbeam k3s stack:
- base/mesh      — Linkerd edge (crds + control-plane + viz)
- base/ingress   — custom Pingora edge proxy
- base/ory       — Kratos 0.60.1 + Hydra 0.60.1 + login-ui
- base/data      — CloudNativePG 0.27.1, Valkey 8, OpenSearch 2
- base/storage   — SeaweedFS master + volume + filer (S3 on :8333)
- base/lasuite   — Hive sync daemon + La Suite app placeholders
- base/media     — LiveKit livekit-server 1.9.0
- base/devtools  — Gitea 12.5.0 (external PG + Valkey)
overlays/local   — sslip.io domain, mkcert TLS, Lima hostPort
overlays/production — stub (TODOs for sunbeam.pt values)
scripts/         — local-up/down/certs/urls helpers
justfile         — up / down / certs / urls targets
This commit is contained in:
2026-02-28 13:42:27 +00:00
commit 5d9bd7b067
51 changed files with 2647 additions and 0 deletions

View File

@@ -0,0 +1,21 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: data
resources:
- namespace.yaml
- postgres-cluster.yaml
- valkey-deployment.yaml
- valkey-service.yaml
- opensearch-deployment.yaml
- opensearch-service.yaml
helmCharts:
# CloudNativePG operator — chart name: cloudnative-pg
# helm repo add cnpg https://cloudnative-pg.github.io/charts
- name: cloudnative-pg
repo: https://cloudnative-pg.github.io/charts
version: "0.27.1"
releaseName: cnpg
namespace: data

6
base/data/namespace.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: data
annotations:
linkerd.io/inject: enabled

View File

@@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: opensearch
namespace: data
spec:
replicas: 1
selector:
matchLabels:
app: opensearch
template:
metadata:
labels:
app: opensearch
spec:
initContainers:
- name: sysctl
image: busybox
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
containers:
- name: opensearch
image: opensearchproject/opensearch:2
ports:
- name: http
containerPort: 9200
protocol: TCP
- name: transport
containerPort: 9300
protocol: TCP
env:
- name: discovery.type
value: single-node
- name: OPENSEARCH_JAVA_OPTS
value: "-Xms256m -Xmx512m"
- name: DISABLE_SECURITY_PLUGIN
value: "true"
resources:
limits:
memory: 512Mi
requests:
memory: 256Mi
cpu: 100m

View File

@@ -0,0 +1,17 @@
apiVersion: v1
kind: Service
metadata:
name: opensearch
namespace: data
spec:
selector:
app: opensearch
ports:
- name: http
port: 9200
targetPort: 9200
protocol: TCP
- name: transport
port: 9300
targetPort: 9300
protocol: TCP

View File

@@ -0,0 +1,52 @@
apiVersion: postgresql.cnpg.io/v1
kind: Cluster
metadata:
name: postgres
namespace: data
spec:
instances: 1
postgresql:
parameters:
max_connections: "100"
shared_buffers: "128MB"
work_mem: "4MB"
bootstrap:
initdb:
database: postgres
owner: postgres
secret:
name: postgres-superuser
postInitSQL:
# Create all 10 logical databases and their owners
- CREATE USER kratos;
- CREATE DATABASE kratos_db OWNER kratos;
- CREATE USER hydra;
- CREATE DATABASE hydra_db OWNER hydra;
- CREATE USER docs;
- CREATE DATABASE docs_db OWNER docs;
- CREATE USER meet;
- CREATE DATABASE meet_db OWNER meet;
- CREATE USER drive;
- CREATE DATABASE drive_db OWNER drive;
- CREATE USER messages;
- CREATE DATABASE messages_db OWNER messages;
- CREATE USER conversations;
- CREATE DATABASE conversations_db OWNER conversations;
- CREATE USER people;
- CREATE DATABASE people_db OWNER people;
- CREATE USER gitea;
- CREATE DATABASE gitea_db OWNER gitea;
- CREATE USER hive;
- CREATE DATABASE hive_db OWNER hive;
storage:
size: 10Gi
resources:
requests:
memory: 256Mi
cpu: 250m
limits:
memory: 512Mi

View File

@@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: valkey
namespace: data
spec:
replicas: 1
selector:
matchLabels:
app: valkey
template:
metadata:
labels:
app: valkey
spec:
containers:
- name: valkey
image: valkey/valkey:8-alpine
ports:
- name: valkey
containerPort: 6379
protocol: TCP
args:
- valkey-server
- --maxmemory
- 56mb
- --maxmemory-policy
- allkeys-lru
resources:
limits:
memory: 64Mi
requests:
memory: 32Mi
cpu: 25m

View File

@@ -0,0 +1,13 @@
apiVersion: v1
kind: Service
metadata:
name: valkey
namespace: data
spec:
selector:
app: valkey
ports:
- name: valkey
port: 6379
targetPort: 6379
protocol: TCP

View File

@@ -0,0 +1,76 @@
# Base Gitea Helm values (chart: gitea/gitea, v12.5.0).
# DOMAIN_SUFFIX is replaced by overlay patches.
# Reference: https://gitea.com/gitea/helm-gitea/src/branch/main/values.yaml
# Disable bundled DB and cache — we use shared CloudNativePG + Valkey
postgresql:
enabled: false
postgresql-ha:
enabled: false
valkey-cluster:
enabled: false
valkey:
enabled: false
gitea:
config:
server:
DOMAIN: src.DOMAIN_SUFFIX
ROOT_URL: https://src.DOMAIN_SUFFIX/
SSH_DOMAIN: src.DOMAIN_SUFFIX
LFS_START_SERVER: "true"
database:
DB_TYPE: postgres
HOST: postgres-rw.data.svc.cluster.local:5432
NAME: gitea_db
USER: gitea
# PASSWD injected via additionalConfigFromEnvs below
cache:
ADAPTER: redis
# Valkey is Redis protocol-compatible; Gitea's redis adapter works against Valkey
HOST: redis://valkey.data.svc.cluster.local:6379/0?pool_size=100&idle_timeout=180s
session:
PROVIDER: redis
PROVIDER_CONFIG: redis://valkey.data.svc.cluster.local:6379/1?pool_size=100&idle_timeout=180s
queue:
TYPE: redis
CONN_STR: redis://valkey.data.svc.cluster.local:6379/2?pool_size=100&idle_timeout=180s
storage:
STORAGE_TYPE: minio
MINIO_ENDPOINT: seaweedfs-filer.storage.svc.cluster.local:8333
MINIO_BUCKET: sunbeam-git-lfs
MINIO_USE_SSL: "false"
# MINIO_ACCESS_KEY_ID / MINIO_SECRET_ACCESS_KEY from gitea-s3-credentials Secret
additionalConfigFromEnvs:
- name: GITEA__DATABASE__PASSWD
valueFrom:
secretKeyRef:
name: gitea-db-credentials
key: password
- name: GITEA__STORAGE__MINIO_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
name: gitea-s3-credentials
key: access-key
- name: GITEA__STORAGE__MINIO_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: gitea-s3-credentials
key: secret-key
resources:
limits:
memory: 256Mi
requests:
memory: 128Mi
cpu: 100m
persistence:
enabled: true
size: 5Gi

View File

@@ -0,0 +1,18 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: devtools
resources:
- namespace.yaml
helmCharts:
# helm repo add gitea-charts https://dl.gitea.com/charts/
# Note: Gitea chart v10+ replaced Redis with Valkey-cluster by default.
# We disable bundled DB/cache (external CloudNativePG + Redis — see gitea-values.yaml).
- name: gitea
repo: https://dl.gitea.com/charts/
version: "12.5.0"
releaseName: gitea
namespace: devtools
valuesFile: gitea-values.yaml

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: devtools
annotations:
linkerd.io/inject: enabled

View File

@@ -0,0 +1,10 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: ingress
resources:
- namespace.yaml
- pingora-deployment.yaml
- pingora-service.yaml
- pingora-config.yaml

View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: Namespace
metadata:
name: ingress
# Linkerd annotation intentionally omitted — Pingora is the mesh ingress gateway

View File

@@ -0,0 +1,70 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: pingora-config
namespace: ingress
data:
config.toml: |
# Pingora hostname routing table
# The domain suffix (sunbeam.pt / <LIMA_IP>.sslip.io) is patched per overlay.
# TLS cert source (rustls-acme / mkcert) is patched per overlay.
[tls]
cert_path = "/etc/tls/tls.crt"
key_path = "/etc/tls/tls.key"
# acme = true # Uncommented in production overlay (rustls-acme + Let's Encrypt)
acme = false
[listen]
http = "0.0.0.0:80"
https = "0.0.0.0:443"
[turn]
backend = "livekit.media.svc.cluster.local:7880"
udp_listen = "0.0.0.0:3478"
relay_port_start = 49152
relay_port_end = 49252
# Host-prefix → backend mapping.
# Pingora matches on the subdomain prefix regardless of domain suffix,
# so these routes work identically for sunbeam.pt and *.sslip.io.
[[routes]]
host_prefix = "docs"
backend = "http://docs.lasuite.svc.cluster.local:8000"
websocket = true # Y.js CRDT sync
[[routes]]
host_prefix = "meet"
backend = "http://meet.lasuite.svc.cluster.local:8000"
websocket = true # LiveKit signaling
[[routes]]
host_prefix = "drive"
backend = "http://drive.lasuite.svc.cluster.local:8000"
[[routes]]
host_prefix = "mail"
backend = "http://messages.lasuite.svc.cluster.local:8000"
[[routes]]
host_prefix = "chat"
backend = "http://conversations.lasuite.svc.cluster.local:8000"
websocket = true # Vercel AI SDK streaming
[[routes]]
host_prefix = "people"
backend = "http://people.lasuite.svc.cluster.local:8000"
[[routes]]
host_prefix = "src"
backend = "http://gitea.devtools.svc.cluster.local:3000"
websocket = true # Gitea Actions runner
[[routes]]
host_prefix = "auth"
backend = "http://hydra.ory.svc.cluster.local:4444"
[[routes]]
host_prefix = "s3"
backend = "http://seaweedfs-filer.storage.svc.cluster.local:8333"

View File

@@ -0,0 +1,52 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: pingora
namespace: ingress
spec:
replicas: 1
selector:
matchLabels:
app: pingora
template:
metadata:
labels:
app: pingora
annotations:
# Pingora terminates TLS at the mesh boundary; sidecar injection is disabled here
linkerd.io/inject: disabled
spec:
containers:
- name: pingora
image: ghcr.io/sunbeam-studio/pingora:latest
ports:
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
- name: turn-udp
containerPort: 3478
protocol: UDP
# TURN relay range 4915249252 exposed via hostPort in local overlay
volumeMounts:
- name: config
mountPath: /etc/pingora
readOnly: true
- name: tls
mountPath: /etc/tls
readOnly: true
resources:
limits:
memory: 64Mi
requests:
memory: 32Mi
cpu: 50m
volumes:
- name: config
configMap:
name: pingora-config
- name: tls
secret:
secretName: pingora-tls

View File

@@ -0,0 +1,24 @@
apiVersion: v1
kind: Service
metadata:
name: pingora
namespace: ingress
spec:
selector:
app: pingora
type: ClusterIP
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP
- name: https
port: 443
targetPort: 443
protocol: TCP
- name: turn-udp
port: 3478
targetPort: 3478
protocol: UDP
# TURN relay ports 4915249252 are forwarded via hostPort on the pod (see deployment).
# Kubernetes Services do not support port ranges; UDP relay is handled at the node level.

View File

@@ -0,0 +1,28 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: hive-config
namespace: lasuite
data:
config.toml: |
[drive]
base_url = "http://drive.lasuite.svc.cluster.local:8000"
workspace = "Game Assets"
oidc_client_id = "hive"
oidc_token_url = "http://hydra.ory.svc.cluster.local:4444/oauth2/token"
# oidc_client_secret_file = "/run/secrets/hive-oidc" # mounted from Secret
[s3]
endpoint = "http://seaweedfs-filer.storage.svc.cluster.local:8333"
bucket = "sunbeam-game-assets"
region = "us-east-1"
# access_key_file = "/run/secrets/seaweedfs-key" # mounted from Secret
# secret_key_file = "/run/secrets/seaweedfs-secret" # mounted from Secret
[postgres]
# url_file = "/run/secrets/hive-db-url" # mounted from Secret
[sync]
interval_seconds = 30
temp_dir = "/tmp/hive"
large_file_threshold_mb = 50

View File

@@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: hive
namespace: lasuite
spec:
replicas: 1
selector:
matchLabels:
app: hive
template:
metadata:
labels:
app: hive
spec:
containers:
- name: hive
image: ghcr.io/sunbeam-studio/hive:latest
volumeMounts:
- name: config
mountPath: /etc/hive
readOnly: true
- name: secrets
mountPath: /run/secrets
readOnly: true
resources:
limits:
memory: 64Mi
requests:
memory: 32Mi
cpu: 25m
volumes:
- name: config
configMap:
name: hive-config
- name: secrets
projected:
sources:
- secret:
name: hive-oidc
- secret:
name: seaweedfs-s3-credentials
- secret:
name: hive-db-url

View File

@@ -0,0 +1,15 @@
# Hive has no inbound HTTP API — it is a reconciliation daemon only.
# This Service exists for Linkerd observability (metrics scraping).
apiVersion: v1
kind: Service
metadata:
name: hive
namespace: lasuite
spec:
selector:
app: hive
ports:
- name: metrics
port: 9090
targetPort: 9090
protocol: TCP

View File

@@ -0,0 +1,69 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: lasuite
resources:
- namespace.yaml
- hive-config.yaml
- hive-deployment.yaml
- hive-service.yaml
- seaweedfs-buckets.yaml
# La Suite Numérique Helm charts:
# Each component's chart lives in-tree inside its own GitHub repo (under helm/ or charts/).
# There is NO published Helm repo index at a suitenumerique.github.io URL — charts must be
# pulled from each component's repo individually.
#
# Options:
# a) Use Flux HelmRepository with type=git pointing at each suitenumerique/<app> repo.
# b) Package each chart locally (`helm package`) and commit to this repo under charts/.
# c) Use OCI if/when they start publishing to GHCR (check each repo's CI for ghcr.io pushes).
#
# Recommended starting points:
# - https://github.com/suitenumerique/docs (helm/ directory)
# - https://github.com/suitenumerique/meet (helm/ directory)
# - https://github.com/suitenumerique/drive (helm/ directory)
# - https://github.com/suitenumerique/people (helm/ directory)
# - https://github.com/suitenumerique/messages (check for helm/ directory)
# - https://github.com/suitenumerique/conversations (check for helm/ directory)
#
# TODO: Once each app's chart path is confirmed, add helmCharts entries here.
# Placeholder entries (commented out) — verify chart name and repo format first:
# helmCharts:
# - name: docs
# repo: oci://ghcr.io/suitenumerique/docs # hypothetical; verify on ghcr.io first
# version: "1.0.0"
# releaseName: docs
# namespace: lasuite
#
# - name: meet
# repo: oci://ghcr.io/suitenumerique/meet
# version: "1.0.0"
# releaseName: meet
# namespace: lasuite
#
# - name: drive
# repo: oci://ghcr.io/suitenumerique/drive
# version: "1.0.0"
# releaseName: drive
# namespace: lasuite
#
# - name: messages
# repo: oci://ghcr.io/suitenumerique/messages
# version: "1.0.0"
# releaseName: messages
# namespace: lasuite
#
# - name: conversations
# repo: oci://ghcr.io/suitenumerique/conversations
# version: "1.0.0"
# releaseName: conversations
# namespace: lasuite
#
# - name: people
# repo: oci://ghcr.io/suitenumerique/people
# version: "1.0.0"
# releaseName: people
# namespace: lasuite

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: lasuite
annotations:
linkerd.io/inject: enabled

View File

@@ -0,0 +1,37 @@
apiVersion: batch/v1
kind: Job
metadata:
name: seaweedfs-bucket-init
namespace: lasuite
annotations:
# Run once on first deploy; manually delete to re-run if needed.
helm.sh/hook: post-install
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: mc
image: minio/mc:latest
command:
- /bin/sh
- -c
- |
set -e
ENDPOINT=http://seaweedfs-filer.storage.svc.cluster.local:8333
mc alias set weed "$ENDPOINT" "$S3_ACCESS_KEY" "$S3_SECRET_KEY"
for bucket in \
sunbeam-docs \
sunbeam-meet \
sunbeam-drive \
sunbeam-messages \
sunbeam-conversations \
sunbeam-git-lfs \
sunbeam-game-assets; do
mc mb --ignore-existing "weed/$bucket"
echo "Ensured bucket: $bucket"
done
envFrom:
- secretRef:
name: seaweedfs-s3-credentials

View File

@@ -0,0 +1,17 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: media
resources:
- namespace.yaml
helmCharts:
# chart name is `livekit-server`, not `livekit-helm`
# helm repo add livekit https://helm.livekit.io
- name: livekit-server
repo: https://helm.livekit.io
version: "1.9.0"
releaseName: livekit
namespace: media
valuesFile: livekit-values.yaml

View File

@@ -0,0 +1,37 @@
# Base LiveKit Helm values (chart: livekit/livekit-server).
# DOMAIN_SUFFIX is replaced by overlay patches.
# API keys/secrets come from the livekit-keys Secret (loaded via extraEnv or config file).
# Reference: https://github.com/livekit/livekit-helm/blob/master/server-sample.yaml
livekit:
# LiveKit server config injected as config.yaml
port: 7880
log_level: info
rtc:
port_range_start: 49152
port_range_end: 49252
use_external_ip: true
turn:
enabled: true
domain: meet.DOMAIN_SUFFIX
tls_port: 5349
udp_port: 3478
external_tls: true
redis:
# Valkey is protocol-compatible with Redis; LiveKit sees this as a Redis endpoint
address: valkey.data.svc.cluster.local:6379
# API keys are loaded from a Kubernetes Secret and mounted as env vars.
# keys:
# <key>: <secret> # set in overlay Secret, not here
deployment:
resources:
limits:
memory: 128Mi
requests:
memory: 64Mi
cpu: 100m

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: media
annotations:
linkerd.io/inject: enabled

View File

@@ -0,0 +1,37 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- namespace.yaml
# NOTE: Linkerd stable releases moved behind a commercial paywall in Feb 2024.
# As of 2.15, stable artifacts are Buoyant Enterprise for Linkerd (BEL) only.
# The free channel is "edge" (weekly date-versioned builds).
#
# For local dev: local-up.sh installs Linkerd via the CLI directly:
# linkerd install --crds | kubectl apply -f -
# linkerd install | kubectl apply -f -
# which is simpler and uses whatever edge version the CLI was built against.
#
# The blocks below are the production Helm path (edge channel, pinned dates).
# To use stable BEL, change repo to https://helm.linkerd.io/stable with a
# valid BEL entitlement secret and use versions 1.8.0 / 1.16.11 / 30.12.11.
helmCharts:
- name: linkerd-crds
repo: https://helm.linkerd.io/edge
version: "2026.1.2"
releaseName: linkerd-crds
namespace: mesh
- name: linkerd-control-plane
repo: https://helm.linkerd.io/edge
version: "2025.12.3"
releaseName: linkerd-control-plane
namespace: mesh
- name: linkerd-viz
repo: https://helm.linkerd.io/edge
version: "2026.1.4"
releaseName: linkerd-viz
namespace: mesh

5
base/mesh/namespace.yaml Normal file
View File

@@ -0,0 +1,5 @@
apiVersion: v1
kind: Namespace
metadata:
name: mesh
# Linkerd annotation intentionally omitted — the control plane is not self-injected

View File

@@ -0,0 +1,45 @@
# Base Ory Hydra Helm values.
# DOMAIN_SUFFIX is replaced by overlay patches.
# DSN and system secrets come from the overlay-specific Secret.
hydra:
config:
dsn: "postgresql://hydra:$(HYDRA_DB_PASSWORD)@postgres-rw.data.svc.cluster.local:5432/hydra_db"
urls:
self:
issuer: https://auth.DOMAIN_SUFFIX/
consent: https://auth.DOMAIN_SUFFIX/consent
login: https://auth.DOMAIN_SUFFIX/login
logout: https://auth.DOMAIN_SUFFIX/logout
error: https://auth.DOMAIN_SUFFIX/error
secrets:
system:
- $(HYDRA_SYSTEM_SECRET)
cookie:
- $(HYDRA_COOKIE_SECRET)
oidc:
subject_identifiers:
supported_types:
- public
pairwise:
salt: $(HYDRA_PAIRWISE_SALT)
serve:
cookies:
same_site_mode: Lax
public:
cors:
enabled: true
allowed_origins:
- https://*.DOMAIN_SUFFIX
deployment:
resources:
limits:
memory: 64Mi
requests:
memory: 32Mi
cpu: 25m

View File

@@ -0,0 +1,60 @@
# Base Ory Kratos Helm values.
# DOMAIN_SUFFIX is replaced by overlay patches (sunbeam.pt / <LIMA_IP>.sslip.io).
# DSN and SMTP credentials come from the overlay-specific Secret.
kratos:
config:
version: v0.13.0
dsn: "postgresql://kratos:$(KRATOS_DB_PASSWORD)@postgres-rw.data.svc.cluster.local:5432/kratos_db"
selfservice:
default_browser_return_url: https://auth.DOMAIN_SUFFIX/
allowed_return_urls:
- https://auth.DOMAIN_SUFFIX/
- https://docs.DOMAIN_SUFFIX/
- https://meet.DOMAIN_SUFFIX/
- https://drive.DOMAIN_SUFFIX/
- https://mail.DOMAIN_SUFFIX/
- https://chat.DOMAIN_SUFFIX/
- https://people.DOMAIN_SUFFIX/
- https://src.DOMAIN_SUFFIX/
flows:
login:
ui_url: https://auth.DOMAIN_SUFFIX/login
registration:
ui_url: https://auth.DOMAIN_SUFFIX/registration
recovery:
ui_url: https://auth.DOMAIN_SUFFIX/recovery
settings:
ui_url: https://auth.DOMAIN_SUFFIX/settings
identity:
default_schema_id: default
schemas:
- id: default
url: file:///etc/config/kratos/identity.schema.json
courier:
smtp:
connection_uri: "smtp://$(SMTP_USER):$(SMTP_PASSWORD)@localhost:25/"
from_address: no-reply@DOMAIN_SUFFIX
from_name: Sunbeam
serve:
public:
base_url: https://auth.DOMAIN_SUFFIX/kratos/
cors:
enabled: true
allowed_origins:
- https://*.DOMAIN_SUFFIX
admin:
base_url: http://kratos-admin.ory.svc.cluster.local:4434/
deployment:
resources:
limits:
memory: 64Mi
requests:
memory: 32Mi
cpu: 25m

View File

@@ -0,0 +1,24 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: ory
resources:
- namespace.yaml
- login-ui-deployment.yaml
helmCharts:
# helm repo add ory https://k8s.ory.sh/helm/charts
- name: kratos
repo: https://k8s.ory.sh/helm/charts
version: "0.60.1"
releaseName: kratos
namespace: ory
valuesFile: kratos-values.yaml
- name: hydra
repo: https://k8s.ory.sh/helm/charts
version: "0.60.1"
releaseName: hydra
namespace: ory
valuesFile: hydra-values.yaml

View File

@@ -0,0 +1,49 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: login-ui
namespace: ory
spec:
replicas: 1
selector:
matchLabels:
app: login-ui
template:
metadata:
labels:
app: login-ui
spec:
containers:
- name: login-ui
image: ghcr.io/sunbeam-studio/login-ui:latest
ports:
- name: http
containerPort: 3000
protocol: TCP
env:
- name: KRATOS_PUBLIC_URL
value: "http://kratos-public.ory.svc.cluster.local:4433"
- name: HYDRA_ADMIN_URL
value: "http://hydra-admin.ory.svc.cluster.local:4445"
- name: PORT
value: "3000"
resources:
limits:
memory: 64Mi
requests:
memory: 32Mi
cpu: 25m
---
apiVersion: v1
kind: Service
metadata:
name: login-ui
namespace: ory
spec:
selector:
app: login-ui
ports:
- name: http
port: 3000
targetPort: 3000
protocol: TCP

6
base/ory/namespace.yaml Normal file
View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: ory
annotations:
linkerd.io/inject: enabled

View File

@@ -0,0 +1,11 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: storage
resources:
- namespace.yaml
- seaweedfs-config.yaml
- seaweedfs-master.yaml
- seaweedfs-volume.yaml
- seaweedfs-filer.yaml

View File

@@ -0,0 +1,6 @@
apiVersion: v1
kind: Namespace
metadata:
name: storage
annotations:
linkerd.io/inject: enabled

View File

@@ -0,0 +1,24 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: seaweedfs-filer-config
namespace: storage
data:
filer.toml: |
# SeaweedFS filer configuration
# S3 API enabled on port 8333
[leveldb2]
enabled = true
dir = "/data/filer"
[s3]
enabled = true
port = 8333
# Credentials are loaded from the seaweedfs-s3-credentials Secret
# and passed as env vars (S3_ACCESS_KEY, S3_SECRET_KEY) to the filer.
master.toml: |
[master.maintenance]
sleep_minutes = 17
garbage_threshold = 0.3

View File

@@ -0,0 +1,74 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: seaweedfs-filer
namespace: storage
spec:
replicas: 1
selector:
matchLabels:
app: seaweedfs-filer
template:
metadata:
labels:
app: seaweedfs-filer
spec:
containers:
- name: filer
image: chrislusf/seaweedfs:latest
args:
- filer
- -port=8888
- -s3
- -s3.port=8333
- -master=seaweedfs-master.storage.svc.cluster.local:9333
ports:
- name: http
containerPort: 8888
protocol: TCP
- name: s3
containerPort: 8333
protocol: TCP
- name: grpc
containerPort: 18888
protocol: TCP
envFrom:
- secretRef:
name: seaweedfs-s3-credentials
volumeMounts:
- name: config
mountPath: /etc/seaweedfs
readOnly: true
- name: filer-data
mountPath: /data/filer
resources:
limits:
memory: 256Mi
requests:
memory: 128Mi
cpu: 50m
volumes:
- name: config
configMap:
name: seaweedfs-filer-config
- name: filer-data
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: seaweedfs-filer
namespace: storage
spec:
selector:
app: seaweedfs-filer
ports:
- name: http
port: 8888
targetPort: 8888
- name: s3
port: 8333
targetPort: 8333
- name: grpc
port: 18888
targetPort: 18888

View File

@@ -0,0 +1,66 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: seaweedfs-master
namespace: storage
spec:
serviceName: seaweedfs-master
replicas: 1
selector:
matchLabels:
app: seaweedfs-master
template:
metadata:
labels:
app: seaweedfs-master
spec:
containers:
- name: master
image: chrislusf/seaweedfs:latest
args:
- master
- -port=9333
- -mdir=/data
- -defaultReplication=000
- -volumeSizeLimitMB=1000
ports:
- name: http
containerPort: 9333
protocol: TCP
- name: grpc
containerPort: 19333
protocol: TCP
volumeMounts:
- name: data
mountPath: /data
resources:
limits:
memory: 64Mi
requests:
memory: 32Mi
cpu: 25m
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
name: seaweedfs-master
namespace: storage
spec:
selector:
app: seaweedfs-master
clusterIP: None
ports:
- name: http
port: 9333
targetPort: 9333
- name: grpc
port: 19333
targetPort: 19333

View File

@@ -0,0 +1,66 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: seaweedfs-volume
namespace: storage
spec:
serviceName: seaweedfs-volume
replicas: 1
selector:
matchLabels:
app: seaweedfs-volume
template:
metadata:
labels:
app: seaweedfs-volume
spec:
containers:
- name: volume
image: chrislusf/seaweedfs:latest
args:
- volume
- -port=8080
- -mserver=seaweedfs-master.storage.svc.cluster.local:9333
- -dir=/data
- -max=50
ports:
- name: http
containerPort: 8080
protocol: TCP
- name: grpc
containerPort: 18080
protocol: TCP
volumeMounts:
- name: data
mountPath: /data
resources:
limits:
memory: 256Mi
requests:
memory: 128Mi
cpu: 50m
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 20Gi
---
apiVersion: v1
kind: Service
metadata:
name: seaweedfs-volume
namespace: storage
spec:
selector:
app: seaweedfs-volume
clusterIP: None
ports:
- name: http
port: 8080
targetPort: 8080
- name: grpc
port: 18080
targetPort: 18080