Files
sbbb/base/ingress/pingora-config.yaml
Sienna Meridian Satterwhite e5741c4df6 feat: integrate tuwunel with Ory SSO, rename chat to messages subdomain
- Add matrix to hydra-maester enabledNamespaces for OAuth2Client CRD
- Update allowed_return_urls and selfservice URLs: chat→messages
- Add Kratos verification flow, employee/external identity schemas
- Extend session lifespan to 30 days with persistent cookies
- Route messages.* to tuwunel via Pingora with WebSocket support
- Replace login-ui with kratos-admin-ui as unified auth frontend
- Update TLS certificate SANs: chat→messages, add monitoring subdomains
- Add tuwunel + La Suite images to production overlay
- Switch DDoS/scanner detection to compiled-in ensemble models (observe_only)
2026-03-10 18:52:47 +00:00

243 lines
7.5 KiB
YAML

apiVersion: v1
kind: ConfigMap
metadata:
name: pingora-config
namespace: ingress
data:
config.toml: |
# Sunbeam proxy config.
#
# Substitution placeholders (replaced by sed at deploy time):
# DOMAIN_SUFFIX — e.g. <LIMA_IP>.sslip.io (local) or yourdomain.com (production)
[listen]
http = "0.0.0.0:80"
https = "0.0.0.0:443"
[tls]
# Cert files are written here by the proxy on startup and on cert renewal
# via the K8s API. The /etc/tls directory is an emptyDir volume.
cert_path = "/etc/tls/tls.crt"
key_path = "/etc/tls/tls.key"
[telemetry]
otlp_endpoint = ""
metrics_port = 9090
# Kubernetes resource names for cert/config watchers.
# Override these if your namespace or Secret/ConfigMap names differ.
[kubernetes]
namespace = "ingress"
tls_secret = "pingora-tls"
config_configmap = "pingora-config"
# DDoS detection — decision tree + MLP ensemble (compiled-in weights).
[ddos]
enabled = true
observe_only = true
threshold = 0.6
window_secs = 60
window_capacity = 1000
min_events = 10
# Scanner detection — decision tree + MLP ensemble (compiled-in weights).
[scanner]
enabled = true
observe_only = true
threshold = 0.5
bot_cache_ttl_secs = 86400
[[scanner.allowlist]]
ua_prefix = "Googlebot"
reason = "Google crawler"
dns_suffixes = ["googlebot.com", "google.com"]
cidrs = ["66.249.64.0/19"]
[[scanner.allowlist]]
ua_prefix = "Bingbot"
reason = "Microsoft crawler"
dns_suffixes = ["search.msn.com"]
cidrs = ["40.77.167.0/24", "157.55.39.0/24"]
[[scanner.allowlist]]
ua_prefix = "containerd"
reason = "Container registry client (buildkitd/containerd)"
# Rate limiting — leaky bucket per-identity throttling.
[rate_limit]
enabled = true
eviction_interval_secs = 300
stale_after_secs = 600
bypass_cidrs = ["10.0.0.0/8", "127.0.0.0/8", "::1/128"]
[rate_limit.authenticated]
burst = 200
rate = 50.0
[rate_limit.unauthenticated]
burst = 50
rate = 10.0
# Host-prefix → backend routing table.
# The prefix is the subdomain before the first dot, so these routes work
# identically for yourdomain.com and *.sslip.io.
# Edit to match your own service names and namespaces.
#
# Per-route options:
# host_prefix — subdomain to match (required)
# backend — upstream URL, e.g. "http://svc.ns.svc.cluster.local:8000" (required)
# websocket — proxy WebSocket upgrades (default: false)
# disable_secure_redirection — when true, plain-HTTP requests are forwarded
# as-is instead of being 301-redirected to HTTPS.
# Default: false (all HTTP → HTTPS redirect enforced).
[[routes]]
host_prefix = "docs"
backend = "http://collabora.lasuite.svc.cluster.local:9980"
websocket = true
[[routes]]
host_prefix = "meet"
backend = "http://meet-frontend.lasuite.svc.cluster.local:80"
websocket = true
[[routes.paths]]
prefix = "/api/"
backend = "http://meet-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/admin/"
backend = "http://meet-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/oidc/"
backend = "http://meet-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/static/"
backend = "http://meet-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/__"
backend = "http://meet-backend.lasuite.svc.cluster.local:80"
[[routes]]
host_prefix = "drive"
backend = "http://drive-frontend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/api/"
backend = "http://drive-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/admin/"
backend = "http://drive-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/static/"
backend = "http://drive-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/external_api/"
backend = "http://drive-backend.lasuite.svc.cluster.local:80"
# /media/ falls through to frontend nginx which handles auth_request internally
[[routes]]
host_prefix = "mail"
# Caddy is the unified entry point — proxies /api/, /admin/, /static/, /oidc/ internally.
backend = "http://messages-frontend.lasuite.svc.cluster.local:80"
[[routes]]
host_prefix = "messages"
backend = "http://tuwunel.matrix.svc.cluster.local:6167"
websocket = true
# Serve .well-known from tuwunel directly
[[routes.paths]]
prefix = "/.well-known/matrix"
backend = "http://tuwunel.matrix.svc.cluster.local:6167"
[[routes]]
host_prefix = "people"
backend = "http://people-frontend.lasuite.svc.cluster.local:80"
# Backend handles the API, Django admin, and OAuth2 provider.
[[routes.paths]]
prefix = "/api/"
backend = "http://people-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/admin/"
backend = "http://people-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/o/"
backend = "http://people-backend.lasuite.svc.cluster.local:80"
[[routes]]
host_prefix = "find"
backend = "http://find-backend.lasuite.svc.cluster.local:8000"
[[routes]]
host_prefix = "src"
backend = "http://gitea-http.devtools.svc.cluster.local:3000"
websocket = true
# auth: unified IAM dashboard; Hydra handles OAuth2/OIDC; Kratos handles self-service flows.
[[routes]]
host_prefix = "auth"
backend = "http://kratos-admin-ui.ory.svc.cluster.local:3000"
[[routes.paths]]
prefix = "/oauth2"
backend = "http://hydra-public.ory.svc.cluster.local:4444"
[[routes.paths]]
prefix = "/.well-known"
backend = "http://hydra-public.ory.svc.cluster.local:4444"
[[routes.paths]]
prefix = "/userinfo"
backend = "http://hydra-public.ory.svc.cluster.local:4444"
# /kratos prefix is stripped before forwarding so Kratos sees its native paths.
[[routes.paths]]
prefix = "/kratos"
backend = "http://kratos-public.ory.svc.cluster.local:80"
strip_prefix = true
[[routes]]
host_prefix = "integration"
backend = "http://integration.lasuite.svc.cluster.local:80"
[[routes]]
host_prefix = "metrics"
backend = "http://kube-prometheus-stack-grafana.monitoring.svc.cluster.local:80"
[[routes]]
host_prefix = "systemmetrics"
backend = "http://kube-prometheus-stack-prometheus.monitoring.svc.cluster.local:9090"
[[routes]]
host_prefix = "systemlogs"
backend = "http://loki-gateway.monitoring.svc.cluster.local:80"
[[routes]]
host_prefix = "systemtracing"
backend = "http://tempo.monitoring.svc.cluster.local:3200"
[[routes]]
host_prefix = "livekit"
backend = "http://livekit-server.media.svc.cluster.local:80"
websocket = true
[[routes]]
host_prefix = "s3"
backend = "http://seaweedfs-filer.storage.svc.cluster.local:8333"
# SSH TCP passthrough: port 22 → Gitea SSH pod (headless service → pod:2222).
[ssh]
listen = "0.0.0.0:22"
backend = "gitea-ssh.devtools.svc.cluster.local:2222"