diff --git a/base/data/openbao-values.yaml b/base/data/openbao-values.yaml
index 4022d4d..9bb357a 100644
--- a/base/data/openbao-values.yaml
+++ b/base/data/openbao-values.yaml
@@ -31,6 +31,10 @@ server:
storage "file" {
path = "/openbao/data"
}
+ telemetry {
+ prometheus_retention_time = "30s"
+ disable_hostname = true
+ }
dataStorage:
enabled: true
diff --git a/base/data/postgres-alertrules.yaml b/base/data/postgres-alertrules.yaml
index 97e3e1e..dd20281 100644
--- a/base/data/postgres-alertrules.yaml
+++ b/base/data/postgres-alertrules.yaml
@@ -28,7 +28,7 @@ spec:
description: "Database {{ $labels.datname }} is {{ $value | humanize1024 }} (PVC limit 10Gi)"
- alert: PostgresHighConnections
- expr: sum by (pod) (cnpg_pg_stat_activity_count) > 80
+ expr: sum by (pod) (cnpg_backends_total) > 80
for: 5m
labels:
severity: warning
diff --git a/base/devtools/gitea-values.yaml b/base/devtools/gitea-values.yaml
index ed5f1fc..5524797 100644
--- a/base/devtools/gitea-values.yaml
+++ b/base/devtools/gitea-values.yaml
@@ -119,9 +119,9 @@ extraContainerVolumeMounts:
resources:
limits:
- memory: 256Mi
+ memory: 3Gi
requests:
- memory: 128Mi
+ memory: 512Mi
cpu: 100m
service:
diff --git a/base/lasuite/drive-frontend-nginx-configmap.yaml b/base/lasuite/drive-frontend-nginx-configmap.yaml
index 4dc4f0e..3080b74 100644
--- a/base/lasuite/drive-frontend-nginx-configmap.yaml
+++ b/base/lasuite/drive-frontend-nginx-configmap.yaml
@@ -50,9 +50,15 @@ data:
}
# Protected media: auth via Drive backend, then proxy to S3 with signed headers.
- # media-auth returns S3 SigV4 Authorization/X-Amz-Date headers; nginx captures
- # and forwards them so SeaweedFS can verify the request.
- location /media/ {
+ # media-auth returns SigV4 Authorization/X-Amz-Date/X-Amz-Content-SHA256
+ # headers signed for the S3 key (item/UUID/file). nginx captures them and
+ # forwards to SeaweedFS. The regex strips /media/ and optional /preview/
+ # so the proxy path matches the signed S3 key exactly.
+ location ~ ^/media/(preview/)?(.*) {
+ set $original_uri $request_uri;
+ set $s3_key $2;
+ resolver kube-dns.kube-system.svc.cluster.local valid=30s;
+ set $s3_backend http://seaweedfs-filer.storage.svc.cluster.local:8333;
auth_request /internal/media-auth;
auth_request_set $auth_header $upstream_http_authorization;
auth_request_set $amz_date $upstream_http_x_amz_date;
@@ -60,7 +66,7 @@ data:
proxy_set_header Authorization $auth_header;
proxy_set_header X-Amz-Date $amz_date;
proxy_set_header X-Amz-Content-Sha256 $amz_content;
- proxy_pass http://seaweedfs-filer.storage.svc.cluster.local:8333/sunbeam-drive/;
+ proxy_pass $s3_backend/sunbeam-drive/$s3_key;
}
# Internal subrequest: Django checks session and item access, returns S3 auth headers.
@@ -69,8 +75,9 @@ data:
proxy_pass http://drive-backend.lasuite.svc.cluster.local:80/api/v1.0/items/media-auth/;
proxy_pass_request_body off;
proxy_set_header Content-Length "";
- proxy_set_header Host drive.sunbeam.pt;
- proxy_set_header X-Original-URL $scheme://$host$request_uri;
+ proxy_set_header Cookie $http_cookie;
+ proxy_set_header Host drive.DOMAIN_SUFFIX;
+ proxy_set_header X-Original-URL https://drive.DOMAIN_SUFFIX$original_uri;
}
error_page 500 502 503 504 @blank_error;
diff --git a/base/lasuite/meet-backend-deployment.yaml b/base/lasuite/meet-backend-deployment.yaml
index 9f600c5..a039eba 100644
--- a/base/lasuite/meet-backend-deployment.yaml
+++ b/base/lasuite/meet-backend-deployment.yaml
@@ -28,6 +28,8 @@ spec:
name: lasuite-s3
- configMapRef:
name: lasuite-oidc-provider
+ - configMapRef:
+ name: lasuite-resource-server
env:
- name: DB_PASSWORD
valueFrom:
@@ -64,6 +66,16 @@ spec:
secretKeyRef:
name: oidc-meet
key: CLIENT_SECRET
+ - name: OIDC_RS_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: oidc-meet
+ key: CLIENT_ID
+ - name: OIDC_RS_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: oidc-meet
+ key: CLIENT_SECRET
- name: AWS_S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
@@ -102,6 +114,8 @@ spec:
name: lasuite-s3
- configMapRef:
name: lasuite-oidc-provider
+ - configMapRef:
+ name: lasuite-resource-server
env:
- name: DB_PASSWORD
valueFrom:
@@ -138,6 +152,16 @@ spec:
secretKeyRef:
name: oidc-meet
key: CLIENT_SECRET
+ - name: OIDC_RS_CLIENT_ID
+ valueFrom:
+ secretKeyRef:
+ name: oidc-meet
+ key: CLIENT_ID
+ - name: OIDC_RS_CLIENT_SECRET
+ valueFrom:
+ secretKeyRef:
+ name: oidc-meet
+ key: CLIENT_SECRET
- name: AWS_S3_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
diff --git a/base/lasuite/meet-config.yaml b/base/lasuite/meet-config.yaml
index 7a7a00f..82f043b 100644
--- a/base/lasuite/meet-config.yaml
+++ b/base/lasuite/meet-config.yaml
@@ -12,3 +12,10 @@ data:
DB_USER: meet
AWS_STORAGE_BUCKET_NAME: sunbeam-meet
LIVEKIT_API_URL: https://livekit.DOMAIN_SUFFIX
+ EXTERNAL_API_ENABLED: "True"
+ # Resource server settings for CLI bearer token auth.
+ # Override defaults (ES256 + encrypted) to match Hydra's RS256 + opaque tokens.
+ OIDC_RS_SIGNING_ALGO: RS256
+ OIDC_RS_SCOPES: openid,email,profile,offline_access
+ OIDC_RS_ENCRYPTION_ALGO: ""
+ OIDC_RS_ENCRYPTION_ENCODING: ""
diff --git a/base/lasuite/shared-config.yaml b/base/lasuite/shared-config.yaml
index 1a453a1..8b843ce 100644
--- a/base/lasuite/shared-config.yaml
+++ b/base/lasuite/shared-config.yaml
@@ -61,3 +61,19 @@ data:
OIDC_RP_SIGN_ALGO: RS256
OIDC_RP_SCOPES: openid email profile
OIDC_VERIFY_SSL: "true"
+---
+# Resource server config — shared by all La Suite services.
+# Enables bearer token auth via Hydra token introspection for the external_api.
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: lasuite-resource-server
+ namespace: lasuite
+data:
+ OIDC_RESOURCE_SERVER_ENABLED: "True"
+ OIDC_OP_URL: https://auth.DOMAIN_SUFFIX/
+ OIDC_OP_INTROSPECTION_ENDPOINT: http://hydra-admin.ory.svc.cluster.local:4445/admin/oauth2/introspect
+ # Audience claim value for the sunbeam CLI. All La Suite services should
+ # include this in OIDC_RS_ALLOWED_AUDIENCES so the CLI can access their
+ # external APIs with an SSO bearer token.
+ OIDC_RS_CLI_AUDIENCE: sunbeam-cli
diff --git a/base/matrix/sol-config.yaml b/base/matrix/sol-config.yaml
index b8c863c..898db31 100644
--- a/base/matrix/sol-config.yaml
+++ b/base/matrix/sol-config.yaml
@@ -61,6 +61,9 @@ data:
research_max_agents = 25
research_max_depth = 4
+ [grpc]
+ listen_addr = "0.0.0.0:50051"
+
[vault]
url = "http://openbao.data.svc.cluster.local:8200"
role = "sol-agent"
diff --git a/base/monitoring/matrix-alertmanager-receiver-deployment.yaml b/base/monitoring/matrix-alertmanager-receiver-deployment.yaml
index 196e950..fbc483c 100644
--- a/base/monitoring/matrix-alertmanager-receiver-deployment.yaml
+++ b/base/monitoring/matrix-alertmanager-receiver-deployment.yaml
@@ -1,4 +1,29 @@
---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: matrix-alertmanager-receiver-config
+ namespace: monitoring
+data:
+ config.yaml: |
+ http:
+ port: 3000
+ alerts-path-prefix: /alerts
+ matrix:
+ homeserver-url: "http://tuwunel.matrix.svc.cluster.local:6167"
+ user-id: "@alertbot:sunbeam.pt"
+ access-token: "ACCESS_TOKEN_PLACEHOLDER"
+ room-mapping:
+ alerts: "ROOM_ID_PLACEHOLDER"
+ templating:
+ firing-template: |
+ 🔥 {{ .Alert.Labels.alertname }} [{{ .Alert.Labels.severity }}]
+ {{ .Alert.Annotations.summary }}
+ {{ .Alert.Annotations.description }}
+ resolved-template: |
+ ✅ RESOLVED: {{ .Alert.Labels.alertname }}
+ {{ .Alert.Annotations.summary }}
+---
apiVersion: apps/v1
kind: Deployment
metadata:
@@ -16,37 +41,59 @@ spec:
labels:
app: matrix-alertmanager-receiver
spec:
+ initContainers:
+ # Inject secrets into config file — the receiver reads a YAML file,
+ # not env vars. We template the placeholders with real values from
+ # the matrix-bot-creds Secret.
+ - name: inject-secrets
+ image: busybox
+ command: ["sh", "-c"]
+ args:
+ - |
+ cp /config-template/config.yaml /config/config.yaml
+ sed -i "s|ACCESS_TOKEN_PLACEHOLDER|$(cat /secrets/access_token)|" /config/config.yaml
+ sed -i "s|ROOM_ID_PLACEHOLDER|$(cat /secrets/room_id)|" /config/config.yaml
+ volumeMounts:
+ - name: config-template
+ mountPath: /config-template
+ readOnly: true
+ - name: config
+ mountPath: /config
+ - name: secrets
+ mountPath: /secrets
+ readOnly: true
+ resources:
+ limits:
+ memory: 16Mi
+ requests:
+ memory: 8Mi
+ cpu: 5m
containers:
- name: receiver
- image: ghcr.io/metio/matrix-alertmanager-receiver:2024.11.27
+ image: metio/matrix-alertmanager-receiver:latest
+ args: ["--config-path", "/config/config.yaml"]
ports:
- containerPort: 3000
protocol: TCP
- env:
- - name: MAR_HOMESERVER_URL
- value: "http://tuwunel.matrix.svc.cluster.local:6167"
- - name: MAR_USER_ID
- value: "@alertbot:sunbeam.pt"
- - name: MAR_ACCESS_TOKEN
- valueFrom:
- secretKeyRef:
- name: matrix-bot-creds
- key: access_token
- - name: MAR_ROOM_MAPPING
- value: "ops=$(ROOM_ID)"
- - name: ROOM_ID
- valueFrom:
- secretKeyRef:
- name: matrix-bot-creds
- key: room_id
- - name: MAR_PORT
- value: "3000"
+ volumeMounts:
+ - name: config
+ mountPath: /config
+ readOnly: true
resources:
requests:
cpu: 10m
memory: 32Mi
limits:
memory: 64Mi
+ volumes:
+ - name: config-template
+ configMap:
+ name: matrix-alertmanager-receiver-config
+ - name: config
+ emptyDir: {}
+ - name: secrets
+ secret:
+ secretName: matrix-bot-creds
---
apiVersion: v1
kind: Service
diff --git a/base/monitoring/prometheus-values.yaml b/base/monitoring/prometheus-values.yaml
index 96fbe6b..fbcd038 100644
--- a/base/monitoring/prometheus-values.yaml
+++ b/base/monitoring/prometheus-values.yaml
@@ -158,11 +158,11 @@ alertmanager:
send_resolved: true
- name: matrix
webhook_configs:
- - url: "http://matrix-alertmanager-receiver.monitoring.svc.cluster.local:3000/alerts"
+ - url: "http://matrix-alertmanager-receiver.monitoring.svc.cluster.local:3000/alerts/alerts"
send_resolved: true
- name: critical
webhook_configs:
- - url: "http://matrix-alertmanager-receiver.monitoring.svc.cluster.local:3000/alerts"
+ - url: "http://matrix-alertmanager-receiver.monitoring.svc.cluster.local:3000/alerts/alerts"
send_resolved: true
email_configs:
- to: "ops@DOMAIN_SUFFIX"