Files
proxy/src/main.rs

128 lines
4.9 KiB
Rust
Raw Normal View History

mod acme;
mod cert;
mod config;
mod proxy;
mod telemetry;
mod watcher;
use std::{collections::HashMap, sync::Arc};
use anyhow::Result;
use kube::Client;
use pingora::server::{configuration::Opt, Server};
use pingora_proxy::http_proxy_service;
use proxy::SunbeamProxy;
use std::sync::RwLock;
fn main() -> Result<()> {
// Install the aws-lc-rs crypto provider for rustls before any TLS init.
// Required because rustls 0.23 no longer auto-selects a provider at compile time.
rustls::crypto::aws_lc_rs::default_provider()
.install_default()
.expect("crypto provider already installed");
let config_path = std::env::var("SUNBEAM_CONFIG")
.unwrap_or_else(|_| "/etc/pingora/config.toml".to_string());
let cfg = config::Config::load(&config_path)?;
// 1. Init telemetry (JSON logs + optional OTEL traces).
telemetry::init(&cfg.telemetry.otlp_endpoint);
// 2. Detect --upgrade flag. When present, Pingora inherits listening socket
// FDs from the upgrade Unix socket instead of binding fresh ports, enabling
// zero-downtime cert/config reloads triggered by the K8s watcher below.
let upgrade = std::env::args().any(|a| a == "--upgrade");
// 3. Fetch the TLS cert from K8s before Pingora binds the TLS port.
// The Client is created and dropped within this temp runtime — we do NOT
// carry it across runtime boundaries, which would kill its tower workers.
// The watcher thread creates its own fresh Client on its own runtime.
let k8s_available = {
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()?;
rt.block_on(async {
match Client::try_default().await {
Ok(c) => {
if !upgrade {
if let Err(e) =
cert::fetch_and_write(&c, &cfg.tls.cert_path, &cfg.tls.key_path).await
{
// Non-fatal: Secret may not exist yet on first deploy (cert-manager
// is still issuing), or the Secret name may differ in dev.
tracing::warn!(error = %e, "cert fetch from K8s failed; using existing files");
}
}
true
}
Err(e) => {
tracing::warn!(error = %e, "no K8s client; cert auto-reload and ACME routing disabled");
false
}
}
})
};
let opt = Opt {
upgrade,
daemon: false,
nocapture: false,
test: false,
conf: None,
};
// 4. Create Pingora server and bootstrap (binds ports or inherits FDs).
let mut server = Server::new(Some(opt))?;
server.bootstrap();
// 5. Shared ACME challenge route table. Populated by the Ingress watcher;
// consulted by the proxy for every /.well-known/acme-challenge/ request.
// Uses std::sync::RwLock so reads are sync and lock-guard-safe across
// Pingora's async proxy calls without cross-runtime waker concerns.
let acme_routes: acme::AcmeRoutes = Arc::new(RwLock::new(HashMap::new()));
let proxy = SunbeamProxy {
routes: cfg.routes.clone(),
acme_routes: acme_routes.clone(),
};
let mut svc = http_proxy_service(&server.configuration, proxy);
// Port 80: plain HTTP — 301 → HTTPS, except for ACME HTTP-01 challenges.
// Port 443: TLS-terminated HTTPS. Cert written to /etc/tls/ by cert::* above.
svc.add_tcp(&cfg.listen.http);
svc.add_tls(&cfg.listen.https, &cfg.tls.cert_path, &cfg.tls.key_path)?;
server.add_service(svc);
// 6. Background K8s watchers on their own OS thread + tokio runtime so they
// don't interfere with Pingora's internal runtime. A fresh Client is
// created here so its tower workers live on this runtime (not the
// now-dropped temp runtime from step 3).
if k8s_available {
let cert_path = cfg.tls.cert_path.clone();
let key_path = cfg.tls.key_path.clone();
std::thread::spawn(move || {
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("watcher runtime");
rt.block_on(async move {
let client = match Client::try_default().await {
Ok(c) => c,
Err(e) => {
tracing::error!(error = %e, "watcher: failed to create K8s client; watchers disabled");
return;
}
};
tokio::join!(
acme::watch_ingresses(client.clone(), acme_routes),
watcher::run_watcher(client, cert_path, key_path),
);
});
});
}
tracing::info!(upgrade, "sunbeam-proxy starting");
server.run_forever();
}