Files
sbbb/base/ingress/pingora-config.yaml
Sienna Meridian Satterwhite d3943c9a84 feat(monitoring): wire up full LGTM observability stack
- Prometheus: discover ServiceMonitors/PodMonitors in all namespaces,
  enable remote write receiver for Tempo metrics generator
- Tempo: enable metrics generator (service-graphs + span-metrics)
  with remote write to Prometheus
- Loki: add Grafana Alloy DaemonSet to ship container logs
- Grafana: enable dashboard sidecar, add Pingora/Loki/Tempo/OpenBao
  dashboards, add stable UIDs and cross-linking between datasources
  (Loki↔Tempo derived fields, traces→logs, traces→metrics, service map)
- Linkerd: enable proxy tracing to Alloy OTLP collector, point
  linkerd-viz at existing Prometheus instead of deploying its own
- Pingora: add OTLP rollout plan (endpoint commented out until proxy
  telemetry panic fix is deployed and Alloy is verified healthy)
2026-03-21 17:36:54 +00:00

295 lines
9.2 KiB
YAML

apiVersion: v1
kind: ConfigMap
metadata:
name: pingora-config
namespace: ingress
data:
config.toml: |
# Sunbeam proxy config.
#
# Substitution placeholders (replaced by sed at deploy time):
# DOMAIN_SUFFIX — e.g. <LIMA_IP>.sslip.io (local) or yourdomain.com (production)
[listen]
http = "0.0.0.0:80"
https = "0.0.0.0:443"
[tls]
# Cert files are written here by the proxy on startup and on cert renewal
# via the K8s API. The /etc/tls directory is an emptyDir volume.
cert_path = "/etc/tls/tls.crt"
key_path = "/etc/tls/tls.key"
[telemetry]
# Rollout plan for OTLP tracing:
# 1. Deploy proxy build that includes the graceful telemetry init
# (proxy/src/telemetry.rs — no longer panics on exporter failure)
# 2. Verify Alloy is running:
# kubectl -n monitoring get pods -l app.kubernetes.io/name=alloy
# 3. Uncomment the line below:
# otlp_endpoint = "http://alloy.monitoring.svc.cluster.local:4318"
otlp_endpoint = ""
metrics_port = 9090
# Kubernetes resource names for cert/config watchers.
# Override these if your namespace or Secret/ConfigMap names differ.
[kubernetes]
namespace = "ingress"
tls_secret = "pingora-tls"
config_configmap = "pingora-config"
# DDoS detection — decision tree + MLP ensemble (compiled-in weights).
[ddos]
enabled = true
observe_only = true
threshold = 0.6
window_secs = 60
window_capacity = 1000
min_events = 10
# Scanner detection — decision tree + MLP ensemble (compiled-in weights).
[scanner]
enabled = true
observe_only = true
threshold = 0.5
bot_cache_ttl_secs = 86400
[[scanner.allowlist]]
ua_prefix = "Googlebot"
reason = "Google crawler"
dns_suffixes = ["googlebot.com", "google.com"]
cidrs = ["66.249.64.0/19"]
[[scanner.allowlist]]
ua_prefix = "Bingbot"
reason = "Microsoft crawler"
dns_suffixes = ["search.msn.com"]
cidrs = ["40.77.167.0/24", "157.55.39.0/24"]
[[scanner.allowlist]]
ua_prefix = "containerd"
reason = "Container registry client (buildkitd/containerd)"
# Rate limiting — leaky bucket per-identity throttling.
[rate_limit]
enabled = true
eviction_interval_secs = 300
stale_after_secs = 600
bypass_cidrs = ["10.0.0.0/8", "127.0.0.0/8", "::1/128"]
[rate_limit.authenticated]
burst = 200
rate = 50.0
[rate_limit.unauthenticated]
burst = 50
rate = 10.0
# Host-prefix → backend routing table.
# The prefix is the subdomain before the first dot, so these routes work
# identically for yourdomain.com and *.sslip.io.
# Edit to match your own service names and namespaces.
#
# Per-route options:
# host_prefix — subdomain to match (required)
# backend — upstream URL, e.g. "http://svc.ns.svc.cluster.local:8000" (required)
# websocket — proxy WebSocket upgrades (default: false)
# disable_secure_redirection — when true, plain-HTTP requests are forwarded
# as-is instead of being 301-redirected to HTTPS.
# Default: false (all HTTP → HTTPS redirect enforced).
[[routes]]
host_prefix = "docs"
backend = "http://collabora.lasuite.svc.cluster.local:9980"
websocket = true
[[routes]]
host_prefix = "meet"
backend = "http://meet-frontend.lasuite.svc.cluster.local:80"
websocket = true
[[routes.paths]]
prefix = "/api/"
backend = "http://meet-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/admin/"
backend = "http://meet-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/oidc/"
backend = "http://meet-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/static/"
backend = "http://meet-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/__"
backend = "http://meet-backend.lasuite.svc.cluster.local:80"
[[routes]]
host_prefix = "drive"
backend = "http://drive-frontend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/api/"
backend = "http://drive-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/admin/"
backend = "http://drive-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/static/"
backend = "http://drive-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/external_api/"
backend = "http://drive-backend.lasuite.svc.cluster.local:80"
# /media/ falls through to frontend nginx which handles auth_request internally
[[routes]]
host_prefix = "mail"
# Caddy is the unified entry point — proxies /api/, /admin/, /static/, /oidc/ internally.
backend = "http://messages-frontend.lasuite.svc.cluster.local:80"
[[routes]]
host_prefix = "messages"
backend = "http://tuwunel.matrix.svc.cluster.local:6167"
websocket = true
# Serve .well-known from tuwunel directly
[[routes.paths]]
prefix = "/.well-known/matrix"
backend = "http://tuwunel.matrix.svc.cluster.local:6167"
[[routes]]
host_prefix = "people"
backend = "http://people-frontend.lasuite.svc.cluster.local:80"
# Backend handles the API, Django admin, and OAuth2 provider.
[[routes.paths]]
prefix = "/api/"
backend = "http://people-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/admin/"
backend = "http://people-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/o/"
backend = "http://people-backend.lasuite.svc.cluster.local:80"
[[routes]]
host_prefix = "find"
backend = "http://find-backend.lasuite.svc.cluster.local:8000"
[[routes]]
host_prefix = "src"
backend = "http://gitea-http.devtools.svc.cluster.local:3000"
websocket = true
# auth: unified IAM dashboard; Hydra handles OAuth2/OIDC; Kratos handles self-service flows.
[[routes]]
host_prefix = "auth"
backend = "http://kratos-admin-ui.ory.svc.cluster.local:3000"
[[routes.paths]]
prefix = "/oauth2"
backend = "http://hydra-public.ory.svc.cluster.local:4444"
[[routes.paths]]
prefix = "/.well-known"
backend = "http://hydra-public.ory.svc.cluster.local:4444"
[[routes.paths]]
prefix = "/userinfo"
backend = "http://hydra-public.ory.svc.cluster.local:4444"
# /kratos prefix is stripped before forwarding so Kratos sees its native paths.
[[routes.paths]]
prefix = "/kratos"
backend = "http://kratos-public.ory.svc.cluster.local:80"
strip_prefix = true
[[routes]]
host_prefix = "integration"
backend = "http://integration.lasuite.svc.cluster.local:80"
[[routes]]
host_prefix = "metrics"
backend = "http://kube-prometheus-stack-grafana.monitoring.svc.cluster.local:80"
[[routes]]
host_prefix = "systemmetrics"
backend = "http://kube-prometheus-stack-prometheus.monitoring.svc.cluster.local:9090"
[[routes]]
host_prefix = "systemlogs"
backend = "http://loki-gateway.monitoring.svc.cluster.local:80"
[[routes]]
host_prefix = "systemtracing"
backend = "http://tempo.monitoring.svc.cluster.local:3200"
[[routes]]
host_prefix = "livekit"
backend = "http://livekit-server.media.svc.cluster.local:80"
websocket = true
[[routes]]
host_prefix = "cal"
backend = "http://calendars-frontend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/api/"
backend = "http://calendars-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/admin/"
backend = "http://calendars-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/static/"
backend = "http://calendars-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/caldav"
backend = "http://calendars-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/.well-known/caldav"
backend = "http://calendars-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/rsvp/"
backend = "http://calendars-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/ical/"
backend = "http://calendars-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/external_api/"
backend = "http://calendars-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/__"
backend = "http://calendars-backend.lasuite.svc.cluster.local:80"
[[routes]]
host_prefix = "projects"
backend = "http://projects.lasuite.svc.cluster.local:80"
websocket = true
[[routes]]
host_prefix = "s3"
backend = "http://seaweedfs-filer.storage.svc.cluster.local:8333"
# SSH TCP passthrough: port 22 → Gitea SSH pod (headless service → pod:2222).
[ssh]
listen = "0.0.0.0:22"
backend = "gitea-ssh.devtools.svc.cluster.local:2222"