feat(proxy): add SSH TCP passthrough and graceful HTTP-only startup

Add optional [ssh] config block that proxies port 22 → Gitea SSH pod,
running on a dedicated thread/runtime matching the cert-watcher pattern.

Also start HTTP-only on first deploy when the TLS cert file doesn't exist
yet — once ACME challenge completes and the cert watcher writes the file,
a graceful upgrade adds the TLS listener without downtime.

Fix ACME watcher to handle InitApply events (kube-runtime v3+) so
Ingresses that existed before the proxy started are picked up correctly.

Signed-off-by: Sienna Meridian Satterwhite <sienna@sunbeam.pt>
This commit is contained in:
2026-03-10 23:38:19 +00:00
parent 10de00990c
commit e5b6802107
5 changed files with 89 additions and 4 deletions

View File

@@ -43,7 +43,10 @@ pub async fn watch_ingresses(client: Client, routes: AcmeRoutes) {
while let Some(result) = stream.next().await {
match result {
Ok(watcher::Event::Apply(ing)) => {
// InitApply fires for each Ingress during the initial list (kube v3+).
// Apply fires for subsequent creates/updates.
// Both must be handled to catch Ingresses that existed before the proxy started.
Ok(watcher::Event::InitApply(ing)) | Ok(watcher::Event::Apply(ing)) => {
let mut map = routes.write().unwrap_or_else(|e| e.into_inner());
upsert_routes(&ing, &mut map);
}

View File

@@ -2,12 +2,22 @@ use anyhow::{Context, Result};
use serde::Deserialize;
use std::fs;
#[derive(Debug, Deserialize, Clone)]
pub struct SshConfig {
/// Address to bind the SSH listener on, e.g. "0.0.0.0:22".
pub listen: String,
/// Upstream backend address, e.g. "gitea-ssh.devtools.svc.cluster.local:2222".
pub backend: String,
}
#[derive(Debug, Deserialize, Clone)]
pub struct Config {
pub listen: ListenConfig,
pub tls: TlsFileConfig,
pub telemetry: TelemetryConfig,
pub routes: Vec<RouteConfig>,
/// Optional SSH TCP passthrough (port 22 → Gitea SSH).
pub ssh: Option<SshConfig>,
}
#[derive(Debug, Deserialize, Clone)]

View File

@@ -4,3 +4,4 @@
pub mod acme;
pub mod config;
pub mod proxy;
pub mod ssh;

View File

@@ -86,13 +86,43 @@ fn main() -> Result<()> {
};
let mut svc = http_proxy_service(&server.configuration, proxy);
// Port 80: plain HTTP — 301 → HTTPS, except for ACME HTTP-01 challenges.
// Port 443: TLS-terminated HTTPS. Cert written to /etc/tls/ by cert::* above.
// Port 80: always serve plain HTTP (ACME challenges + redirect to HTTPS).
svc.add_tcp(&cfg.listen.http);
svc.add_tls(&cfg.listen.https, &cfg.tls.cert_path, &cfg.tls.key_path)?;
// Port 443: only add the TLS listener if the cert files exist.
// On first deploy cert-manager hasn't issued the cert yet, so we start
// HTTP-only. Once the pingora-tls Secret is created (ACME challenge
// completes), the watcher in step 6 writes the cert files and triggers
// a graceful upgrade. The upgrade process finds the cert files and adds
// the TLS listener, inheriting the port-80 socket from the old process.
let cert_exists = std::path::Path::new(&cfg.tls.cert_path).exists();
if cert_exists {
svc.add_tls(&cfg.listen.https, &cfg.tls.cert_path, &cfg.tls.key_path)?;
tracing::info!("TLS listener added on {}", cfg.listen.https);
} else {
tracing::warn!(
cert_path = %cfg.tls.cert_path,
"cert not found — starting HTTP-only; ACME challenge will complete and trigger upgrade"
);
}
server.add_service(svc);
// 5b. SSH TCP passthrough (port 22 → Gitea SSH), if configured.
// Runs on its own OS thread + Tokio runtime — same pattern as the cert/ingress watcher.
if let Some(ssh_cfg) = &cfg.ssh {
let listen = ssh_cfg.listen.clone();
let backend = ssh_cfg.backend.clone();
tracing::info!(%listen, %backend, "SSH TCP proxy enabled");
std::thread::spawn(move || {
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("ssh proxy runtime");
rt.block_on(sunbeam_proxy::ssh::run_tcp_proxy(&listen, &backend));
});
}
// 6. Background K8s watchers on their own OS thread + tokio runtime so they
// don't interfere with Pingora's internal runtime. A fresh Client is
// created here so its tower workers live on this runtime (not the

41
src/ssh.rs Normal file
View File

@@ -0,0 +1,41 @@
use tokio::io::copy_bidirectional;
use tokio::net::{TcpListener, TcpStream};
/// Listens on `listen` and proxies every TCP connection to `backend`.
/// Runs forever; intended to be spawned on a dedicated OS thread + Tokio runtime,
/// matching the pattern used for the cert/ingress watcher.
pub async fn run_tcp_proxy(listen: &str, backend: &str) {
let listener = match TcpListener::bind(listen).await {
Ok(l) => {
tracing::info!(%listen, %backend, "SSH TCP proxy listening");
l
}
Err(e) => {
tracing::error!(error = %e, %listen, "SSH TCP proxy: bind failed");
return;
}
};
loop {
match listener.accept().await {
Ok((mut socket, peer_addr)) => {
let backend = backend.to_string();
tokio::spawn(async move {
match TcpStream::connect(&backend).await {
Ok(mut upstream) => {
if let Err(e) = copy_bidirectional(&mut socket, &mut upstream).await {
tracing::debug!(error = %e, %peer_addr, "ssh: session ended");
}
}
Err(e) => {
tracing::error!(error = %e, %peer_addr, %backend, "ssh: upstream connect failed");
}
}
});
}
Err(e) => {
tracing::error!(error = %e, "ssh: accept failed");
}
}
}
}