Files
sbbb/overlays/local/values-resources.yaml

258 lines
4.6 KiB
YAML
Raw Normal View History

# Patch: apply §10.7 memory limits to all Deployments in the local overlay.
# These are intentionally tight to stay within the 12 GB Lima VM budget.
#
# Applied as a strategic merge patch. Each stanza targets one Deployment by name.
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: cloudnative-pg
namespace: data
spec:
template:
spec:
containers:
- name: manager
resources:
limits:
memory: 256Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: livekit-server
namespace: media
spec:
template:
spec:
containers:
- name: livekit-server
resources:
limits:
memory: 128Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: pingora
namespace: ingress
spec:
template:
spec:
containers:
- name: pingora
resources:
limits:
memory: 128Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: valkey
namespace: data
spec:
template:
spec:
containers:
- name: valkey
resources:
limits:
memory: 64Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: opensearch
namespace: data
spec:
template:
spec:
containers:
- name: opensearch
env:
# Reduce JVM heap so it fits within the 512Mi container limit.
# Base sets -Xms512m -Xmx1g which immediately OOMs the container.
- name: OPENSEARCH_JAVA_OPTS
value: "-Xms192m -Xmx256m"
resources:
limits:
memory: 512Mi
requests:
memory: 256Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: seaweedfs-filer
namespace: storage
spec:
template:
spec:
containers:
- name: filer
resources:
limits:
memory: 512Mi
requests:
memory: 128Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: hydra-hydra-maester
namespace: ory
spec:
template:
spec:
containers:
- name: hydra-maester
resources:
limits:
memory: 64Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: login-ui
namespace: ory
spec:
template:
spec:
containers:
- name: login-ui
resources:
limits:
memory: 192Mi
requests:
memory: 64Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: hive
namespace: lasuite
spec:
template:
spec:
containers:
- name: hive
resources:
limits:
memory: 64Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: people-backend
namespace: lasuite
spec:
replicas: 1
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: people-celery-worker
namespace: lasuite
spec:
replicas: 1
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: people-frontend
namespace: lasuite
spec:
replicas: 1
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: docs-celery-worker
namespace: lasuite
spec:
replicas: 1
template:
spec:
containers:
- name: docs
env:
# Celery workers: 2 concurrent workers fits within local memory budget.
- name: CELERY_WORKER_CONCURRENCY
value: "2"
resources:
limits:
memory: 384Mi
requests:
memory: 128Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: docs-backend
namespace: lasuite
spec:
replicas: 1
template:
spec:
containers:
- name: docs
env:
# 2 uvicorn workers instead of the default 4 to stay within local memory budget.
# Each worker loads the full Django+impress app (~150 MB), so 4 workers
# pushed peak RSS above 384 Mi and triggered OOMKill at startup.
- name: WEB_CONCURRENCY
value: "2"
resources:
limits:
memory: 512Mi
requests:
memory: 192Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: docs-frontend
namespace: lasuite
spec:
replicas: 1
template:
spec:
containers:
- name: docs
resources:
limits:
memory: 128Mi
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: docs-y-provider
namespace: lasuite
spec:
replicas: 1
template:
spec:
containers:
- name: docs
resources:
limits:
memory: 256Mi
requests:
memory: 64Mi