feat(ingress): add detection pipeline config and metrics port

- Add DDoS, scanner, and rate limiter configuration to pingora-config
- Add kubernetes config section with configurable namespace/resource names
- Expose metrics port 9090 on deployment and service
This commit is contained in:
2026-03-08 20:37:49 +00:00
parent f3faf31d4b
commit 7c1676d2b9
3 changed files with 101 additions and 20 deletions

View File

@@ -23,6 +23,59 @@ data:
[telemetry]
# Empty = OTEL disabled. Set to http://otel-collector.data.svc:4318 when ready.
otlp_endpoint = ""
metrics_port = 9090
# Kubernetes resource names for cert/config watchers.
# Override these if your namespace or Secret/ConfigMap names differ.
[kubernetes]
namespace = "ingress"
tls_secret = "pingora-tls"
config_configmap = "pingora-config"
# DDoS detection — KNN-based per-IP behavioral classification.
[ddos]
enabled = true
model_path = "/models/ddos_model.bin"
k = 5
threshold = 0.6
window_secs = 60
window_capacity = 1000
min_events = 10
# Scanner detection — logistic regression per-request classification.
[scanner]
enabled = true
model_path = "/models/scanner_model.bin"
threshold = 0.5
poll_interval_secs = 30
bot_cache_ttl_secs = 86400
[[scanner.allowlist]]
ua_prefix = "Googlebot"
reason = "Google crawler"
dns_suffixes = ["googlebot.com", "google.com"]
cidrs = ["66.249.64.0/19"]
[[scanner.allowlist]]
ua_prefix = "Bingbot"
reason = "Microsoft crawler"
dns_suffixes = ["search.msn.com"]
cidrs = ["40.77.167.0/24", "157.55.39.0/24"]
# Rate limiting — leaky bucket per-identity throttling.
[rate_limit]
enabled = true
eviction_interval_secs = 300
stale_after_secs = 600
bypass_cidrs = ["10.42.0.0/16"]
[rate_limit.authenticated]
burst = 200
rate = 50.0
[rate_limit.unauthenticated]
burst = 50
rate = 10.0
# Host-prefix → backend routing table.
# The prefix is the subdomain before the first dot, so these routes work
@@ -39,22 +92,8 @@ data:
[[routes]]
host_prefix = "docs"
backend = "http://docs-frontend.lasuite.svc.cluster.local:80"
# API and admin go to the backend.
[[routes.paths]]
prefix = "/api/"
backend = "http://docs-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/admin/"
backend = "http://docs-backend.lasuite.svc.cluster.local:80"
# Real-time collaboration WebSocket (y-provider / Hocuspocus).
[[routes.paths]]
prefix = "/collaboration/ws/"
backend = "http://docs-y-provider.lasuite.svc.cluster.local:443"
websocket = true
backend = "http://collabora.lasuite.svc.cluster.local:9980"
websocket = true
[[routes]]
host_prefix = "meet"
@@ -83,17 +122,40 @@ data:
[[routes]]
host_prefix = "drive"
backend = "http://drive.lasuite.svc.cluster.local:8000"
backend = "http://drive-frontend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/api/"
backend = "http://drive-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/admin/"
backend = "http://drive-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/static/"
backend = "http://drive-backend.lasuite.svc.cluster.local:80"
[[routes.paths]]
prefix = "/external_api/"
backend = "http://drive-backend.lasuite.svc.cluster.local:80"
# /media/ falls through to frontend nginx which handles auth_request internally
[[routes]]
host_prefix = "mail"
backend = "http://messages.lasuite.svc.cluster.local:8000"
# Caddy is the unified entry point — proxies /api/, /admin/, /static/, /oidc/ internally.
backend = "http://messages-frontend.lasuite.svc.cluster.local:80"
[[routes]]
host_prefix = "chat"
backend = "http://conversations.lasuite.svc.cluster.local:8000"
backend = "http://tuwunel.matrix.svc.cluster.local:6167"
websocket = true
# Serve .well-known from tuwunel directly
[[routes.paths]]
prefix = "/.well-known/matrix"
backend = "http://tuwunel.matrix.svc.cluster.local:6167"
[[routes]]
host_prefix = "people"
backend = "http://people-frontend.lasuite.svc.cluster.local:80"
@@ -153,9 +215,21 @@ data:
[[routes]]
host_prefix = "grafana"
host_prefix = "metrics"
backend = "http://kube-prometheus-stack-grafana.monitoring.svc.cluster.local:80"
[[routes]]
host_prefix = "systemmetrics"
backend = "http://kube-prometheus-stack-prometheus.monitoring.svc.cluster.local:9090"
[[routes]]
host_prefix = "systemlogs"
backend = "http://loki-gateway.monitoring.svc.cluster.local:80"
[[routes]]
host_prefix = "systemtracing"
backend = "http://tempo.monitoring.svc.cluster.local:3200"
[[routes]]
host_prefix = "livekit"
backend = "http://livekit-server.media.svc.cluster.local:80"

View File

@@ -34,6 +34,9 @@ spec:
- name: ssh
containerPort: 22
protocol: TCP
- name: metrics
containerPort: 9090
protocol: TCP
- name: turn-udp
containerPort: 3478
protocol: UDP

View File

@@ -16,6 +16,10 @@ spec:
port: 443
targetPort: 443
protocol: TCP
- name: metrics
port: 9090
targetPort: 9090
protocol: TCP
- name: turn-udp
port: 3478
targetPort: 3478