fix: meet external-api route, drive media proxy, alertbot, misc tweaks

- Meet: add external-api backend path, CSRF trusted origins
- Drive: fix media proxy regex for preview URLs and S3 key signing
- OpenBao: enable Prometheus telemetry
- Postgres alerts: fix metric name (cnpg_backends_total)
- Gitea: bump memory limits for mirror workloads
- Alertbot: expanded deployment config
- Kratos: add find/cal/projects to allowed return URLs, settings path
- Pingora: meet external-api route fix
- Sol: config update
This commit is contained in:
2026-03-25 18:01:15 +00:00
parent eab91eb85d
commit 9f15f5099e
10 changed files with 139 additions and 31 deletions

View File

@@ -31,6 +31,10 @@ server:
storage "file" {
path = "/openbao/data"
}
telemetry {
prometheus_retention_time = "30s"
disable_hostname = true
}
dataStorage:
enabled: true

View File

@@ -28,7 +28,7 @@ spec:
description: "Database {{ $labels.datname }} is {{ $value | humanize1024 }} (PVC limit 10Gi)"
- alert: PostgresHighConnections
expr: sum by (pod) (cnpg_pg_stat_activity_count) > 80
expr: sum by (pod) (cnpg_backends_total) > 80
for: 5m
labels:
severity: warning

View File

@@ -119,9 +119,9 @@ extraContainerVolumeMounts:
resources:
limits:
memory: 256Mi
memory: 3Gi
requests:
memory: 128Mi
memory: 512Mi
cpu: 100m
service:

View File

@@ -50,9 +50,15 @@ data:
}
# Protected media: auth via Drive backend, then proxy to S3 with signed headers.
# media-auth returns S3 SigV4 Authorization/X-Amz-Date headers; nginx captures
# and forwards them so SeaweedFS can verify the request.
location /media/ {
# media-auth returns SigV4 Authorization/X-Amz-Date/X-Amz-Content-SHA256
# headers signed for the S3 key (item/UUID/file). nginx captures them and
# forwards to SeaweedFS. The regex strips /media/ and optional /preview/
# so the proxy path matches the signed S3 key exactly.
location ~ ^/media/(preview/)?(.*) {
set $original_uri $request_uri;
set $s3_key $2;
resolver kube-dns.kube-system.svc.cluster.local valid=30s;
set $s3_backend http://seaweedfs-filer.storage.svc.cluster.local:8333;
auth_request /internal/media-auth;
auth_request_set $auth_header $upstream_http_authorization;
auth_request_set $amz_date $upstream_http_x_amz_date;
@@ -60,7 +66,7 @@ data:
proxy_set_header Authorization $auth_header;
proxy_set_header X-Amz-Date $amz_date;
proxy_set_header X-Amz-Content-Sha256 $amz_content;
proxy_pass http://seaweedfs-filer.storage.svc.cluster.local:8333/sunbeam-drive/;
proxy_pass $s3_backend/sunbeam-drive/$s3_key;
}
# Internal subrequest: Django checks session and item access, returns S3 auth headers.
@@ -69,8 +75,9 @@ data:
proxy_pass http://drive-backend.lasuite.svc.cluster.local:80/api/v1.0/items/media-auth/;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
proxy_set_header Host drive.sunbeam.pt;
proxy_set_header X-Original-URL $scheme://$host$request_uri;
proxy_set_header Cookie $http_cookie;
proxy_set_header Host drive.DOMAIN_SUFFIX;
proxy_set_header X-Original-URL https://drive.DOMAIN_SUFFIX$original_uri;
}
error_page 500 502 503 504 @blank_error;

View File

@@ -28,6 +28,8 @@ spec:
name: lasuite-s3
- configMapRef:
name: lasuite-oidc-provider
- configMapRef:
name: lasuite-resource-server
env:
- name: DB_PASSWORD
valueFrom:
@@ -64,6 +66,16 @@ spec:
secretKeyRef:
name: oidc-meet
key: CLIENT_SECRET
- name: OIDC_RS_CLIENT_ID
valueFrom:
secretKeyRef:
name: oidc-meet
key: CLIENT_ID
- name: OIDC_RS_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: oidc-meet
key: CLIENT_SECRET
- name: AWS_S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
@@ -102,6 +114,8 @@ spec:
name: lasuite-s3
- configMapRef:
name: lasuite-oidc-provider
- configMapRef:
name: lasuite-resource-server
env:
- name: DB_PASSWORD
valueFrom:
@@ -138,6 +152,16 @@ spec:
secretKeyRef:
name: oidc-meet
key: CLIENT_SECRET
- name: OIDC_RS_CLIENT_ID
valueFrom:
secretKeyRef:
name: oidc-meet
key: CLIENT_ID
- name: OIDC_RS_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: oidc-meet
key: CLIENT_SECRET
- name: AWS_S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:

View File

@@ -12,3 +12,10 @@ data:
DB_USER: meet
AWS_STORAGE_BUCKET_NAME: sunbeam-meet
LIVEKIT_API_URL: https://livekit.DOMAIN_SUFFIX
EXTERNAL_API_ENABLED: "True"
# Resource server settings for CLI bearer token auth.
# Override defaults (ES256 + encrypted) to match Hydra's RS256 + opaque tokens.
OIDC_RS_SIGNING_ALGO: RS256
OIDC_RS_SCOPES: openid,email,profile,offline_access
OIDC_RS_ENCRYPTION_ALGO: ""
OIDC_RS_ENCRYPTION_ENCODING: ""

View File

@@ -61,3 +61,19 @@ data:
OIDC_RP_SIGN_ALGO: RS256
OIDC_RP_SCOPES: openid email profile
OIDC_VERIFY_SSL: "true"
---
# Resource server config — shared by all La Suite services.
# Enables bearer token auth via Hydra token introspection for the external_api.
apiVersion: v1
kind: ConfigMap
metadata:
name: lasuite-resource-server
namespace: lasuite
data:
OIDC_RESOURCE_SERVER_ENABLED: "True"
OIDC_OP_URL: https://auth.DOMAIN_SUFFIX/
OIDC_OP_INTROSPECTION_ENDPOINT: http://hydra-admin.ory.svc.cluster.local:4445/admin/oauth2/introspect
# Audience claim value for the sunbeam CLI. All La Suite services should
# include this in OIDC_RS_ALLOWED_AUDIENCES so the CLI can access their
# external APIs with an SSO bearer token.
OIDC_RS_CLI_AUDIENCE: sunbeam-cli

View File

@@ -61,6 +61,9 @@ data:
research_max_agents = 25
research_max_depth = 4
[grpc]
listen_addr = "0.0.0.0:50051"
[vault]
url = "http://openbao.data.svc.cluster.local:8200"
role = "sol-agent"

View File

@@ -1,4 +1,29 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: matrix-alertmanager-receiver-config
namespace: monitoring
data:
config.yaml: |
http:
port: 3000
alerts-path-prefix: /alerts
matrix:
homeserver-url: "http://tuwunel.matrix.svc.cluster.local:6167"
user-id: "@alertbot:sunbeam.pt"
access-token: "ACCESS_TOKEN_PLACEHOLDER"
room-mapping:
alerts: "ROOM_ID_PLACEHOLDER"
templating:
firing-template: |
🔥 <strong>{{ .Alert.Labels.alertname }}</strong> [{{ .Alert.Labels.severity }}]<br/>
{{ .Alert.Annotations.summary }}<br/>
<em>{{ .Alert.Annotations.description }}</em>
resolved-template: |
✅ <strong>RESOLVED: {{ .Alert.Labels.alertname }}</strong><br/>
{{ .Alert.Annotations.summary }}
---
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -16,37 +41,59 @@ spec:
labels:
app: matrix-alertmanager-receiver
spec:
initContainers:
# Inject secrets into config file — the receiver reads a YAML file,
# not env vars. We template the placeholders with real values from
# the matrix-bot-creds Secret.
- name: inject-secrets
image: busybox
command: ["sh", "-c"]
args:
- |
cp /config-template/config.yaml /config/config.yaml
sed -i "s|ACCESS_TOKEN_PLACEHOLDER|$(cat /secrets/access_token)|" /config/config.yaml
sed -i "s|ROOM_ID_PLACEHOLDER|$(cat /secrets/room_id)|" /config/config.yaml
volumeMounts:
- name: config-template
mountPath: /config-template
readOnly: true
- name: config
mountPath: /config
- name: secrets
mountPath: /secrets
readOnly: true
resources:
limits:
memory: 16Mi
requests:
memory: 8Mi
cpu: 5m
containers:
- name: receiver
image: ghcr.io/metio/matrix-alertmanager-receiver:2024.11.27
image: metio/matrix-alertmanager-receiver:latest
args: ["--config-path", "/config/config.yaml"]
ports:
- containerPort: 3000
protocol: TCP
env:
- name: MAR_HOMESERVER_URL
value: "http://tuwunel.matrix.svc.cluster.local:6167"
- name: MAR_USER_ID
value: "@alertbot:sunbeam.pt"
- name: MAR_ACCESS_TOKEN
valueFrom:
secretKeyRef:
name: matrix-bot-creds
key: access_token
- name: MAR_ROOM_MAPPING
value: "ops=$(ROOM_ID)"
- name: ROOM_ID
valueFrom:
secretKeyRef:
name: matrix-bot-creds
key: room_id
- name: MAR_PORT
value: "3000"
volumeMounts:
- name: config
mountPath: /config
readOnly: true
resources:
requests:
cpu: 10m
memory: 32Mi
limits:
memory: 64Mi
volumes:
- name: config-template
configMap:
name: matrix-alertmanager-receiver-config
- name: config
emptyDir: {}
- name: secrets
secret:
secretName: matrix-bot-creds
---
apiVersion: v1
kind: Service

View File

@@ -158,11 +158,11 @@ alertmanager:
send_resolved: true
- name: matrix
webhook_configs:
- url: "http://matrix-alertmanager-receiver.monitoring.svc.cluster.local:3000/alerts"
- url: "http://matrix-alertmanager-receiver.monitoring.svc.cluster.local:3000/alerts/alerts"
send_resolved: true
- name: critical
webhook_configs:
- url: "http://matrix-alertmanager-receiver.monitoring.svc.cluster.local:3000/alerts"
- url: "http://matrix-alertmanager-receiver.monitoring.svc.cluster.local:3000/alerts/alerts"
send_resolved: true
email_configs:
- to: "ops@DOMAIN_SUFFIX"