From f07b3353aa9a589e1ca5a578d263a6f3f3d49b36 Mon Sep 17 00:00:00 2001 From: Sienna Meridian Satterwhite Date: Mon, 6 Apr 2026 13:33:10 +0100 Subject: [PATCH] fix(longhorn): upgrade to v1.11.1, fix 38GB instance-manager memory leak v1.11.0 had a critical proxy connection leak in the instance-manager (longhorn/longhorn#12575) that consumed 38.8GB on apollo, pushing the server to 92% memory with swap exhausted. v1.11.1 fixes the leak. Also adds a 2Gi per-container LimitRange in longhorn-system as a safety net against future regressions. --- base/longhorn/kustomization.yaml | 2 +- base/longhorn/values.yaml | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/base/longhorn/kustomization.yaml b/base/longhorn/kustomization.yaml index 4d864dc..a12cf0f 100644 --- a/base/longhorn/kustomization.yaml +++ b/base/longhorn/kustomization.yaml @@ -7,7 +7,7 @@ resources: helmCharts: - name: longhorn repo: https://charts.longhorn.io - version: "1.11.0" + version: "1.11.1" releaseName: longhorn namespace: longhorn-system valuesFile: values.yaml diff --git a/base/longhorn/values.yaml b/base/longhorn/values.yaml index b432d96..755e4d3 100644 --- a/base/longhorn/values.yaml +++ b/base/longhorn/values.yaml @@ -22,3 +22,21 @@ defaultSettings: persistence: defaultClass: true defaultClassReplicaCount: 1 + +# Cap instance-manager memory as a safety net against future leaks. +# v1.11.0 had a proxy connection leak (longhorn/longhorn#12575) that let +# instance-manager grow to 38 GB+. Fixed in v1.11.1, but the LimitRange +# ensures any regression is OOM-killed at 2 Gi instead of eating all RAM. +extraObjects: + - apiVersion: v1 + kind: LimitRange + metadata: + name: instance-manager-limits + namespace: longhorn-system + spec: + limits: + - type: Container + default: + memory: 2Gi + defaultRequest: + memory: 128Mi