diff --git a/src/acme.rs b/src/acme.rs index 1c72470..3e46bfd 100644 --- a/src/acme.rs +++ b/src/acme.rs @@ -43,7 +43,10 @@ pub async fn watch_ingresses(client: Client, routes: AcmeRoutes) { while let Some(result) = stream.next().await { match result { - Ok(watcher::Event::Apply(ing)) => { + // InitApply fires for each Ingress during the initial list (kube v3+). + // Apply fires for subsequent creates/updates. + // Both must be handled to catch Ingresses that existed before the proxy started. + Ok(watcher::Event::InitApply(ing)) | Ok(watcher::Event::Apply(ing)) => { let mut map = routes.write().unwrap_or_else(|e| e.into_inner()); upsert_routes(&ing, &mut map); } diff --git a/src/config.rs b/src/config.rs index 09fcf74..6191d06 100644 --- a/src/config.rs +++ b/src/config.rs @@ -2,12 +2,22 @@ use anyhow::{Context, Result}; use serde::Deserialize; use std::fs; +#[derive(Debug, Deserialize, Clone)] +pub struct SshConfig { + /// Address to bind the SSH listener on, e.g. "0.0.0.0:22". + pub listen: String, + /// Upstream backend address, e.g. "gitea-ssh.devtools.svc.cluster.local:2222". + pub backend: String, +} + #[derive(Debug, Deserialize, Clone)] pub struct Config { pub listen: ListenConfig, pub tls: TlsFileConfig, pub telemetry: TelemetryConfig, pub routes: Vec, + /// Optional SSH TCP passthrough (port 22 → Gitea SSH). + pub ssh: Option, } #[derive(Debug, Deserialize, Clone)] diff --git a/src/lib.rs b/src/lib.rs index 190da03..6ca049e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,3 +4,4 @@ pub mod acme; pub mod config; pub mod proxy; +pub mod ssh; diff --git a/src/main.rs b/src/main.rs index 263f943..ae779f0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -86,13 +86,43 @@ fn main() -> Result<()> { }; let mut svc = http_proxy_service(&server.configuration, proxy); - // Port 80: plain HTTP — 301 → HTTPS, except for ACME HTTP-01 challenges. - // Port 443: TLS-terminated HTTPS. Cert written to /etc/tls/ by cert::* above. + // Port 80: always serve plain HTTP (ACME challenges + redirect to HTTPS). svc.add_tcp(&cfg.listen.http); - svc.add_tls(&cfg.listen.https, &cfg.tls.cert_path, &cfg.tls.key_path)?; + + // Port 443: only add the TLS listener if the cert files exist. + // On first deploy cert-manager hasn't issued the cert yet, so we start + // HTTP-only. Once the pingora-tls Secret is created (ACME challenge + // completes), the watcher in step 6 writes the cert files and triggers + // a graceful upgrade. The upgrade process finds the cert files and adds + // the TLS listener, inheriting the port-80 socket from the old process. + let cert_exists = std::path::Path::new(&cfg.tls.cert_path).exists(); + if cert_exists { + svc.add_tls(&cfg.listen.https, &cfg.tls.cert_path, &cfg.tls.key_path)?; + tracing::info!("TLS listener added on {}", cfg.listen.https); + } else { + tracing::warn!( + cert_path = %cfg.tls.cert_path, + "cert not found — starting HTTP-only; ACME challenge will complete and trigger upgrade" + ); + } server.add_service(svc); + // 5b. SSH TCP passthrough (port 22 → Gitea SSH), if configured. + // Runs on its own OS thread + Tokio runtime — same pattern as the cert/ingress watcher. + if let Some(ssh_cfg) = &cfg.ssh { + let listen = ssh_cfg.listen.clone(); + let backend = ssh_cfg.backend.clone(); + tracing::info!(%listen, %backend, "SSH TCP proxy enabled"); + std::thread::spawn(move || { + let rt = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .expect("ssh proxy runtime"); + rt.block_on(sunbeam_proxy::ssh::run_tcp_proxy(&listen, &backend)); + }); + } + // 6. Background K8s watchers on their own OS thread + tokio runtime so they // don't interfere with Pingora's internal runtime. A fresh Client is // created here so its tower workers live on this runtime (not the diff --git a/src/ssh.rs b/src/ssh.rs new file mode 100644 index 0000000..f131011 --- /dev/null +++ b/src/ssh.rs @@ -0,0 +1,41 @@ +use tokio::io::copy_bidirectional; +use tokio::net::{TcpListener, TcpStream}; + +/// Listens on `listen` and proxies every TCP connection to `backend`. +/// Runs forever; intended to be spawned on a dedicated OS thread + Tokio runtime, +/// matching the pattern used for the cert/ingress watcher. +pub async fn run_tcp_proxy(listen: &str, backend: &str) { + let listener = match TcpListener::bind(listen).await { + Ok(l) => { + tracing::info!(%listen, %backend, "SSH TCP proxy listening"); + l + } + Err(e) => { + tracing::error!(error = %e, %listen, "SSH TCP proxy: bind failed"); + return; + } + }; + + loop { + match listener.accept().await { + Ok((mut socket, peer_addr)) => { + let backend = backend.to_string(); + tokio::spawn(async move { + match TcpStream::connect(&backend).await { + Ok(mut upstream) => { + if let Err(e) = copy_bidirectional(&mut socket, &mut upstream).await { + tracing::debug!(error = %e, %peer_addr, "ssh: session ended"); + } + } + Err(e) => { + tracing::error!(error = %e, %peer_addr, %backend, "ssh: upstream connect failed"); + } + } + }); + } + Err(e) => { + tracing::error!(error = %e, "ssh: accept failed"); + } + } + } +}