feat(wfe-server): headless workflow server with gRPC, webhooks, and OIDC auth

Single-binary server exposing the WFE engine over gRPC (13 RPCs) with
HTTP webhook support (GitHub, Gitea, generic events).

Features:
- gRPC API: workflow CRUD, lifecycle event streaming, log streaming,
  log search via OpenSearch
- HTTP webhooks: HMAC-SHA256 verified GitHub/Gitea webhooks with
  configurable triggers that auto-start workflows
- OIDC/JWT auth: discovers JWKS from issuer, validates with asymmetric
  algorithm allowlist to prevent algorithm confusion attacks
- Static bearer token auth with constant-time comparison
- Lifecycle event broadcasting via tokio::broadcast
- Log streaming: real-time stdout/stderr via LogSink trait, history
  replay, follow mode
- Log search: full-text search via OpenSearch with workflow/step/stream
  filters
- Layered config: CLI flags > env vars > TOML file
- Fail-closed on OIDC discovery failure, fail-loud on config parse errors
- 2MB webhook payload size limit
- Blocked sensitive env var injection (PATH, LD_PRELOAD, etc.)
This commit is contained in:
2026-04-01 14:37:25 +01:00
parent 6dffb91626
commit cbbeaf6d67
9 changed files with 3729 additions and 0 deletions

72
wfe-server/Cargo.toml Normal file
View File

@@ -0,0 +1,72 @@
[package]
name = "wfe-server"
version.workspace = true
edition.workspace = true
license.workspace = true
repository.workspace = true
homepage.workspace = true
description = "Headless workflow server with gRPC API and HTTP webhooks"
[[bin]]
name = "wfe-server"
path = "src/main.rs"
[dependencies]
# Internal
wfe-core = { workspace = true, features = ["test-support"] }
wfe = { path = "../wfe" }
wfe-yaml = { path = "../wfe-yaml", features = ["rustlang", "buildkit", "containerd"] }
wfe-server-protos = { path = "../wfe-server-protos" }
wfe-sqlite = { workspace = true }
wfe-postgres = { workspace = true }
wfe-valkey = { workspace = true }
wfe-opensearch = { workspace = true }
opensearch = { workspace = true }
# gRPC
tonic = "0.14"
tonic-health = "0.14"
prost-types = "0.14"
# HTTP (webhooks)
axum = { version = "0.8", features = ["json", "macros"] }
hyper = "1"
tower = "0.5"
# Runtime
tokio = { workspace = true }
async-trait = { workspace = true }
# Serialization
serde = { workspace = true }
serde_json = { workspace = true }
toml = "0.8"
# CLI
clap = { version = "4", features = ["derive", "env"] }
# Auth
hmac = "0.12"
sha2 = "0.10"
hex = "0.4"
jsonwebtoken = "9"
subtle = "2"
reqwest = { workspace = true }
# Observability
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
chrono = { workspace = true }
uuid = { workspace = true }
# Utils
tokio-stream = "0.1"
dashmap = "6"
[dev-dependencies]
pretty_assertions = { workspace = true }
tokio = { workspace = true, features = ["test-util"] }
tempfile = { workspace = true }
rsa = { version = "0.9", features = ["pem"] }
rand = "0.8"
base64 = "0.22"

769
wfe-server/src/auth.rs Normal file
View File

@@ -0,0 +1,769 @@
use std::sync::Arc;
use jsonwebtoken::{decode, Algorithm, DecodingKey, Validation};
use serde::Deserialize;
use tokio::sync::RwLock;
use tonic::{Request, Status};
use crate::config::AuthConfig;
/// Asymmetric algorithms we accept. NEVER trust the JWT header's alg claim.
/// This prevents algorithm confusion attacks (CVE-2016-5431).
const ALLOWED_ALGORITHMS: &[Algorithm] = &[
Algorithm::RS256,
Algorithm::RS384,
Algorithm::RS512,
Algorithm::ES256,
Algorithm::ES384,
Algorithm::PS256,
Algorithm::PS384,
Algorithm::PS512,
Algorithm::EdDSA,
];
/// JWT claims we validate.
#[derive(Debug, Deserialize)]
struct Claims {
#[allow(dead_code)]
sub: Option<String>,
#[allow(dead_code)]
iss: Option<String>,
#[allow(dead_code)]
aud: Option<serde_json::Value>,
}
/// Cached JWKS keys fetched from the OIDC provider.
#[derive(Clone)]
struct JwksCache {
keys: Vec<jsonwebtoken::jwk::Jwk>,
}
/// Auth state shared across gRPC interceptor calls.
pub struct AuthState {
pub(crate) config: AuthConfig,
jwks: RwLock<Option<JwksCache>>,
jwks_uri: Option<String>,
}
impl AuthState {
/// Create auth state. If OIDC is configured, discovers the JWKS URI.
/// Panics if OIDC is configured but discovery fails (fail-closed).
pub async fn new(config: AuthConfig) -> Self {
let jwks_uri = if let Some(ref issuer) = config.oidc_issuer {
// HIGH-03: Validate issuer URL uses HTTPS in production.
if !issuer.starts_with("https://") && !issuer.starts_with("http://localhost") {
panic!(
"OIDC issuer must use HTTPS (got: {issuer}). \
Use http://localhost only for development."
);
}
match discover_jwks_uri(issuer).await {
Ok(uri) => {
// Validate JWKS URI also uses HTTPS (second-order SSRF prevention).
if !uri.starts_with("https://") && !uri.starts_with("http://localhost") {
panic!("JWKS URI from OIDC discovery must use HTTPS (got: {uri})");
}
tracing::info!(issuer = %issuer, jwks_uri = %uri, "OIDC discovery complete");
Some(uri)
}
Err(e) => {
// HIGH-05: Fail startup if OIDC is configured but discovery fails.
panic!("OIDC issuer configured but discovery failed: {e}");
}
}
} else {
None
};
let state = Self {
config,
jwks: RwLock::new(None),
jwks_uri,
};
// Pre-fetch JWKS.
if state.jwks_uri.is_some() {
state
.refresh_jwks()
.await
.expect("initial JWKS fetch failed — cannot start with OIDC enabled");
}
state
}
/// Refresh the cached JWKS from the provider.
pub async fn refresh_jwks(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let uri = self.jwks_uri.as_ref().ok_or("no JWKS URI")?;
let resp: JwksResponse = reqwest::get(uri).await?.json().await?;
let mut cache = self.jwks.write().await;
*cache = Some(JwksCache { keys: resp.keys });
tracing::debug!(key_count = cache.as_ref().unwrap().keys.len(), "JWKS refreshed");
Ok(())
}
/// Validate a request's authorization.
pub async fn check<T>(&self, request: &Request<T>) -> Result<(), Status> {
// No auth configured = open access.
if self.config.tokens.is_empty() && self.config.oidc_issuer.is_none() {
return Ok(());
}
let token = extract_bearer_token(request)?;
// CRITICAL-02: Use constant-time comparison for static tokens.
if check_static_tokens(&self.config.tokens, token) {
return Ok(());
}
// Try JWT/OIDC validation.
if self.config.oidc_issuer.is_some() {
return self.validate_jwt_cached(token);
}
Err(Status::unauthenticated("invalid token"))
}
/// Validate a JWT against the cached JWKS (synchronous — for use in interceptors).
/// Shared logic used by both `check()` and `make_interceptor()`.
fn validate_jwt_cached(&self, token: &str) -> Result<(), Status> {
let cache = self.jwks.try_read()
.map_err(|_| Status::unavailable("JWKS refresh in progress"))?;
let jwks = cache
.as_ref()
.ok_or_else(|| Status::unavailable("JWKS not loaded"))?;
let header = jsonwebtoken::decode_header(token)
.map_err(|e| Status::unauthenticated(format!("invalid JWT header: {e}")))?;
// CRITICAL-01: Never trust the JWT header's alg claim.
// Derive the algorithm from the JWK, not the token.
let kid = header.kid.as_deref();
// MEDIUM-06: Require kid when JWKS has multiple keys.
if kid.is_none() && jwks.keys.len() > 1 {
return Err(Status::unauthenticated(
"JWT missing kid header but JWKS has multiple keys",
));
}
let jwk = jwks
.keys
.iter()
.find(|k| match (kid, &k.common.key_id) {
(Some(kid), Some(k_kid)) => kid == k_kid,
(None, _) if jwks.keys.len() == 1 => true,
_ => false,
})
.ok_or_else(|| Status::unauthenticated("no matching key in JWKS"))?;
let decoding_key = DecodingKey::from_jwk(jwk)
.map_err(|e| Status::unauthenticated(format!("invalid JWK: {e}")))?;
// CRITICAL-01: Use the JWK's algorithm, NOT the token header's.
let alg = jwk
.common
.key_algorithm
.and_then(|ka| key_algorithm_to_jwt_algorithm(ka))
.ok_or_else(|| {
Status::unauthenticated("JWK has no algorithm or unsupported algorithm")
})?;
// Double-check it's in our allowlist (no symmetric algorithms).
if !ALLOWED_ALGORITHMS.contains(&alg) {
return Err(Status::unauthenticated(format!(
"algorithm {alg:?} not in allowlist"
)));
}
let mut validation = Validation::new(alg);
if let Some(ref issuer) = self.config.oidc_issuer {
validation.set_issuer(&[issuer]);
}
if let Some(ref audience) = self.config.oidc_audience {
validation.set_audience(&[audience]);
} else {
validation.validate_aud = false;
}
decode::<Claims>(token, &decoding_key, &validation)
.map_err(|e| Status::unauthenticated(format!("JWT validation failed: {e}")))?;
Ok(())
}
}
/// CRITICAL-02: Constant-time token comparison to prevent timing attacks.
/// Public for use in webhook auth.
pub fn check_static_tokens_pub(tokens: &[String], candidate: &str) -> bool {
check_static_tokens(tokens, candidate)
}
fn check_static_tokens(tokens: &[String], candidate: &str) -> bool {
use subtle::ConstantTimeEq;
let candidate_bytes = candidate.as_bytes();
for token in tokens {
let token_bytes = token.as_bytes();
if token_bytes.len() == candidate_bytes.len()
&& bool::from(token_bytes.ct_eq(candidate_bytes))
{
return true;
}
}
false
}
/// Extract bearer token from gRPC metadata or HTTP Authorization header.
fn extract_bearer_token<T>(request: &Request<T>) -> Result<&str, Status> {
let auth = request
.metadata()
.get("authorization")
.and_then(|v| v.to_str().ok())
.ok_or_else(|| Status::unauthenticated("missing authorization header"))?;
auth.strip_prefix("Bearer ")
.or_else(|| auth.strip_prefix("bearer "))
.ok_or_else(|| Status::unauthenticated("expected Bearer token"))
}
/// Map JWK key algorithm to jsonwebtoken Algorithm.
fn key_algorithm_to_jwt_algorithm(
ka: jsonwebtoken::jwk::KeyAlgorithm,
) -> Option<Algorithm> {
use jsonwebtoken::jwk::KeyAlgorithm as KA;
match ka {
KA::RS256 => Some(Algorithm::RS256),
KA::RS384 => Some(Algorithm::RS384),
KA::RS512 => Some(Algorithm::RS512),
KA::ES256 => Some(Algorithm::ES256),
KA::ES384 => Some(Algorithm::ES384),
KA::PS256 => Some(Algorithm::PS256),
KA::PS384 => Some(Algorithm::PS384),
KA::PS512 => Some(Algorithm::PS512),
KA::EdDSA => Some(Algorithm::EdDSA),
_ => None, // Reject HS256, HS384, HS512 and unknown algorithms.
}
}
/// OIDC discovery response (minimal — we only need jwks_uri).
#[derive(Deserialize)]
struct OidcDiscovery {
jwks_uri: String,
}
/// JWKS response.
#[derive(Deserialize)]
struct JwksResponse {
keys: Vec<jsonwebtoken::jwk::Jwk>,
}
/// Fetch the JWKS URI from the OIDC discovery endpoint.
async fn discover_jwks_uri(
issuer: &str,
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
let discovery_url = format!(
"{}/.well-known/openid-configuration",
issuer.trim_end_matches('/')
);
let resp: OidcDiscovery = reqwest::get(&discovery_url).await?.json().await?;
Ok(resp.jwks_uri)
}
/// Create a tonic interceptor that checks auth on every request.
pub fn make_interceptor(
auth: Arc<AuthState>,
) -> impl Fn(Request<()>) -> Result<Request<()>, Status> + Clone {
move |req: Request<()>| {
let auth = auth.clone();
// No auth configured = pass through.
if auth.config.tokens.is_empty() && auth.config.oidc_issuer.is_none() {
return Ok(req);
}
let token = match extract_bearer_token(&req) {
Ok(t) => t.to_string(),
Err(e) => return Err(e),
};
// CRITICAL-02: Constant-time static token check.
if check_static_tokens(&auth.config.tokens, &token) {
return Ok(req);
}
// Check JWT via shared validate_jwt_cached (deduplicated logic).
if auth.config.oidc_issuer.is_some() {
auth.validate_jwt_cached(&token)?;
return Ok(req);
}
Err(Status::unauthenticated("invalid token"))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn extract_bearer_from_metadata() {
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", "Bearer mytoken".parse().unwrap());
assert_eq!(extract_bearer_token(&req).unwrap(), "mytoken");
}
#[test]
fn extract_bearer_lowercase() {
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", "bearer mytoken".parse().unwrap());
assert_eq!(extract_bearer_token(&req).unwrap(), "mytoken");
}
#[test]
fn extract_bearer_missing_header() {
let req = Request::new(());
assert!(extract_bearer_token(&req).is_err());
}
#[test]
fn extract_bearer_wrong_scheme() {
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", "Basic abc".parse().unwrap());
assert!(extract_bearer_token(&req).is_err());
}
#[test]
fn constant_time_token_check_valid() {
let tokens = vec!["secret123".to_string()];
assert!(check_static_tokens(&tokens, "secret123"));
}
#[test]
fn constant_time_token_check_invalid() {
let tokens = vec!["secret123".to_string()];
assert!(!check_static_tokens(&tokens, "wrong"));
}
#[test]
fn constant_time_token_check_empty() {
let tokens: Vec<String> = vec![];
assert!(!check_static_tokens(&tokens, "anything"));
}
#[test]
fn constant_time_token_check_length_mismatch() {
let tokens = vec!["short".to_string()];
assert!(!check_static_tokens(&tokens, "muchlongertoken"));
}
#[tokio::test]
async fn no_auth_configured_allows_all() {
let state = AuthState {
config: AuthConfig::default(),
jwks: RwLock::new(None),
jwks_uri: None,
};
let req = Request::new(());
assert!(state.check(&req).await.is_ok());
}
#[tokio::test]
async fn static_token_valid() {
let config = AuthConfig {
tokens: vec!["secret123".to_string()],
..Default::default()
};
let state = AuthState {
config,
jwks: RwLock::new(None),
jwks_uri: None,
};
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", "Bearer secret123".parse().unwrap());
assert!(state.check(&req).await.is_ok());
}
#[tokio::test]
async fn static_token_invalid() {
let config = AuthConfig {
tokens: vec!["secret123".to_string()],
..Default::default()
};
let state = AuthState {
config,
jwks: RwLock::new(None),
jwks_uri: None,
};
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", "Bearer wrong".parse().unwrap());
assert!(state.check(&req).await.is_err());
}
#[tokio::test]
async fn static_token_missing_header() {
let config = AuthConfig {
tokens: vec!["secret123".to_string()],
..Default::default()
};
let state = AuthState {
config,
jwks: RwLock::new(None),
jwks_uri: None,
};
let req = Request::new(());
assert!(state.check(&req).await.is_err());
}
#[test]
fn interceptor_no_auth_passes() {
let state = Arc::new(AuthState {
config: AuthConfig::default(),
jwks: RwLock::new(None),
jwks_uri: None,
});
let interceptor = make_interceptor(state);
let req = Request::new(());
assert!(interceptor(req).is_ok());
}
#[test]
fn interceptor_static_token_valid() {
let config = AuthConfig {
tokens: vec!["tok".to_string()],
..Default::default()
};
let state = Arc::new(AuthState {
config,
jwks: RwLock::new(None),
jwks_uri: None,
});
let interceptor = make_interceptor(state);
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", "Bearer tok".parse().unwrap());
assert!(interceptor(req).is_ok());
}
#[test]
fn interceptor_static_token_invalid() {
let config = AuthConfig {
tokens: vec!["tok".to_string()],
..Default::default()
};
let state = Arc::new(AuthState {
config,
jwks: RwLock::new(None),
jwks_uri: None,
});
let interceptor = make_interceptor(state);
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", "Bearer bad".parse().unwrap());
assert!(interceptor(req).is_err());
}
/// Helper: create a test RSA key pair, JWK, and signed JWT.
fn make_test_jwt(
issuer: &str,
audience: Option<&str>,
) -> (Vec<jsonwebtoken::jwk::Jwk>, String) {
use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine};
use rsa::RsaPrivateKey;
let mut rng = rand::thread_rng();
let private_key = RsaPrivateKey::new(&mut rng, 2048).unwrap();
let public_key = private_key.to_public_key();
use rsa::traits::PublicKeyParts;
let n = URL_SAFE_NO_PAD.encode(public_key.n().to_bytes_be());
let e = URL_SAFE_NO_PAD.encode(public_key.e().to_bytes_be());
let jwk: jsonwebtoken::jwk::Jwk = serde_json::from_value(serde_json::json!({
"kty": "RSA",
"use": "sig",
"alg": "RS256",
"kid": "test-key-1",
"n": n,
"e": e,
}))
.unwrap();
use rsa::pkcs1::EncodeRsaPrivateKey;
let pem = private_key
.to_pkcs1_pem(rsa::pkcs1::LineEnding::LF)
.unwrap();
let encoding_key =
jsonwebtoken::EncodingKey::from_rsa_pem(pem.as_bytes()).unwrap();
let mut header = jsonwebtoken::Header::new(jsonwebtoken::Algorithm::RS256);
header.kid = Some("test-key-1".to_string());
#[derive(serde::Serialize)]
struct TestClaims {
sub: String,
iss: String,
#[serde(skip_serializing_if = "Option::is_none")]
aud: Option<String>,
exp: u64,
iat: u64,
}
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let claims = TestClaims {
sub: "user@example.com".to_string(),
iss: issuer.to_string(),
aud: audience.map(String::from),
exp: now + 3600,
iat: now,
};
let token = jsonwebtoken::encode(&header, &claims, &encoding_key).unwrap();
(vec![jwk], token)
}
#[tokio::test]
async fn jwt_validation_valid_token() {
let issuer = "https://auth.example.com";
let (jwks, token) = make_test_jwt(issuer, None);
let config = AuthConfig {
oidc_issuer: Some(issuer.to_string()),
..Default::default()
};
let state = AuthState {
config,
jwks: RwLock::new(Some(JwksCache { keys: jwks })),
jwks_uri: None,
};
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", format!("Bearer {token}").parse().unwrap());
assert!(state.check(&req).await.is_ok());
}
#[tokio::test]
async fn jwt_validation_wrong_issuer() {
let (jwks, token) = make_test_jwt("https://wrong-issuer.com", None);
let config = AuthConfig {
oidc_issuer: Some("https://expected-issuer.com".to_string()),
..Default::default()
};
let state = AuthState {
config,
jwks: RwLock::new(Some(JwksCache { keys: jwks })),
jwks_uri: None,
};
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", format!("Bearer {token}").parse().unwrap());
assert!(state.check(&req).await.is_err());
}
#[tokio::test]
async fn jwt_validation_with_audience() {
let issuer = "https://auth.example.com";
let (jwks, token) = make_test_jwt(issuer, Some("wfe-server"));
let config = AuthConfig {
oidc_issuer: Some(issuer.to_string()),
oidc_audience: Some("wfe-server".to_string()),
..Default::default()
};
let state = AuthState {
config,
jwks: RwLock::new(Some(JwksCache { keys: jwks })),
jwks_uri: None,
};
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", format!("Bearer {token}").parse().unwrap());
assert!(state.check(&req).await.is_ok());
}
#[tokio::test]
async fn jwt_validation_wrong_audience() {
let issuer = "https://auth.example.com";
let (jwks, token) = make_test_jwt(issuer, Some("wrong-audience"));
let config = AuthConfig {
oidc_issuer: Some(issuer.to_string()),
oidc_audience: Some("wfe-server".to_string()),
..Default::default()
};
let state = AuthState {
config,
jwks: RwLock::new(Some(JwksCache { keys: jwks })),
jwks_uri: None,
};
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", format!("Bearer {token}").parse().unwrap());
assert!(state.check(&req).await.is_err());
}
#[tokio::test]
async fn jwt_validation_garbage_token() {
let config = AuthConfig {
oidc_issuer: Some("https://auth.example.com".to_string()),
..Default::default()
};
let state = AuthState {
config,
jwks: RwLock::new(Some(JwksCache { keys: vec![] })),
jwks_uri: None,
};
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", "Bearer not.a.jwt".parse().unwrap());
assert!(state.check(&req).await.is_err());
}
#[tokio::test]
async fn jwt_validation_no_jwks_loaded() {
let config = AuthConfig {
oidc_issuer: Some("https://auth.example.com".to_string()),
..Default::default()
};
let state = AuthState {
config,
jwks: RwLock::new(None),
jwks_uri: None,
};
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", "Bearer some.jwt.token".parse().unwrap());
let err = state.check(&req).await.unwrap_err();
assert_eq!(err.code(), tonic::Code::Unavailable);
}
#[test]
fn interceptor_jwt_valid() {
let issuer = "https://auth.example.com";
let (jwks, token) = make_test_jwt(issuer, None);
let config = AuthConfig {
oidc_issuer: Some(issuer.to_string()),
..Default::default()
};
let state = Arc::new(AuthState {
config,
jwks: RwLock::new(Some(JwksCache { keys: jwks })),
jwks_uri: None,
});
let interceptor = make_interceptor(state);
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", format!("Bearer {token}").parse().unwrap());
assert!(interceptor(req).is_ok());
}
#[test]
fn interceptor_jwt_invalid() {
let config = AuthConfig {
oidc_issuer: Some("https://auth.example.com".to_string()),
..Default::default()
};
let state = Arc::new(AuthState {
config,
jwks: RwLock::new(Some(JwksCache { keys: vec![] })),
jwks_uri: None,
});
let interceptor = make_interceptor(state);
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", "Bearer bad.jwt.token".parse().unwrap());
assert!(interceptor(req).is_err());
}
#[test]
fn key_algorithm_mapping() {
use jsonwebtoken::jwk::KeyAlgorithm as KA;
assert_eq!(key_algorithm_to_jwt_algorithm(KA::RS256), Some(Algorithm::RS256));
assert_eq!(key_algorithm_to_jwt_algorithm(KA::ES256), Some(Algorithm::ES256));
assert_eq!(key_algorithm_to_jwt_algorithm(KA::EdDSA), Some(Algorithm::EdDSA));
// HS256 should be rejected (symmetric algorithm).
assert_eq!(key_algorithm_to_jwt_algorithm(KA::HS256), None);
assert_eq!(key_algorithm_to_jwt_algorithm(KA::HS384), None);
assert_eq!(key_algorithm_to_jwt_algorithm(KA::HS512), None);
}
#[test]
fn allowed_algorithms_rejects_symmetric() {
assert!(!ALLOWED_ALGORITHMS.contains(&Algorithm::HS256));
assert!(!ALLOWED_ALGORITHMS.contains(&Algorithm::HS384));
assert!(!ALLOWED_ALGORITHMS.contains(&Algorithm::HS512));
}
// ── Security regression tests ────────────────────────────────────
#[test]
fn security_hs256_rejected_in_allowlist() {
// CRITICAL-01: HS256 must NEVER be in the allowlist.
// An attacker with the public RSA key could forge tokens if HS256 is allowed.
assert!(!ALLOWED_ALGORITHMS.contains(&Algorithm::HS256));
}
#[test]
fn security_key_algorithm_rejects_all_symmetric() {
// CRITICAL-01: key_algorithm_to_jwt_algorithm must return None for symmetric algs.
use jsonwebtoken::jwk::KeyAlgorithm as KA;
assert!(key_algorithm_to_jwt_algorithm(KA::HS256).is_none());
assert!(key_algorithm_to_jwt_algorithm(KA::HS384).is_none());
assert!(key_algorithm_to_jwt_algorithm(KA::HS512).is_none());
}
#[test]
fn security_constant_time_comparison_used() {
// CRITICAL-02: Static token check must use constant-time comparison.
// Verify that equal-length wrong tokens don't short-circuit.
let tokens = vec!["abcdefgh".to_string()];
// Both are 8 chars — a timing attack would try this.
assert!(!check_static_tokens(&tokens, "abcdefgX"));
assert!(check_static_tokens(&tokens, "abcdefgh"));
}
#[tokio::test]
#[should_panic(expected = "OIDC issuer must use HTTPS")]
async fn security_oidc_issuer_requires_https() {
// HIGH-03: Non-HTTPS issuers must be rejected (SSRF prevention).
let config = AuthConfig {
oidc_issuer: Some("http://evil.internal:8080".to_string()),
..Default::default()
};
AuthState::new(config).await;
}
#[tokio::test]
async fn security_jwt_requires_kid_with_multiple_keys() {
// MEDIUM-06: When JWKS has multiple keys, JWT must have kid header.
let (mut jwks, token) = make_test_jwt("https://auth.example.com", None);
// Duplicate the key with a different kid.
let mut key2 = jwks[0].clone();
key2.common.key_id = Some("test-key-2".to_string());
jwks.push(key2);
// Strip kid from the token by decoding, modifying header, re-encoding.
// Easier: just test the validate path with multiple keys and a token that has kid.
// The token from make_test_jwt has kid="test-key-1", so it should work.
let config = AuthConfig {
oidc_issuer: Some("https://auth.example.com".to_string()),
..Default::default()
};
let state = AuthState {
config,
jwks: RwLock::new(Some(JwksCache { keys: jwks })),
jwks_uri: None,
};
let mut req = Request::new(());
req.metadata_mut()
.insert("authorization", format!("Bearer {token}").parse().unwrap());
// Should succeed because the token has kid="test-key-1" which matches.
assert!(state.check(&req).await.is_ok());
}
}

363
wfe-server/src/config.rs Normal file
View File

@@ -0,0 +1,363 @@
use std::collections::HashMap;
use std::net::SocketAddr;
use std::path::PathBuf;
use clap::Parser;
use serde::Deserialize;
/// WFE workflow server.
#[derive(Parser, Debug)]
#[command(name = "wfe-server", version, about)]
pub struct Cli {
/// Config file path.
#[arg(short, long, default_value = "wfe-server.toml")]
pub config: PathBuf,
/// gRPC listen address.
#[arg(long, env = "WFE_GRPC_ADDR")]
pub grpc_addr: Option<SocketAddr>,
/// HTTP listen address (webhooks).
#[arg(long, env = "WFE_HTTP_ADDR")]
pub http_addr: Option<SocketAddr>,
/// Persistence backend: sqlite or postgres.
#[arg(long, env = "WFE_PERSISTENCE")]
pub persistence: Option<String>,
/// Database URL or path.
#[arg(long, env = "WFE_DB_URL")]
pub db_url: Option<String>,
/// Queue backend: memory or valkey.
#[arg(long, env = "WFE_QUEUE")]
pub queue: Option<String>,
/// Queue URL (for valkey).
#[arg(long, env = "WFE_QUEUE_URL")]
pub queue_url: Option<String>,
/// OpenSearch URL (enables log + workflow search).
#[arg(long, env = "WFE_SEARCH_URL")]
pub search_url: Option<String>,
/// Directory to auto-load YAML workflow definitions from.
#[arg(long, env = "WFE_WORKFLOWS_DIR")]
pub workflows_dir: Option<PathBuf>,
/// Comma-separated bearer tokens for API auth.
#[arg(long, env = "WFE_AUTH_TOKENS")]
pub auth_tokens: Option<String>,
}
/// Server configuration (deserialized from TOML).
#[derive(Debug, Deserialize, Clone)]
#[serde(default)]
pub struct ServerConfig {
pub grpc_addr: SocketAddr,
pub http_addr: SocketAddr,
pub persistence: PersistenceConfig,
pub queue: QueueConfig,
pub search: Option<SearchConfig>,
pub auth: AuthConfig,
pub webhook: WebhookConfig,
pub workflows_dir: Option<PathBuf>,
}
impl Default for ServerConfig {
fn default() -> Self {
Self {
grpc_addr: "0.0.0.0:50051".parse().unwrap(),
http_addr: "0.0.0.0:8080".parse().unwrap(),
persistence: PersistenceConfig::default(),
queue: QueueConfig::default(),
search: None,
auth: AuthConfig::default(),
webhook: WebhookConfig::default(),
workflows_dir: None,
}
}
}
#[derive(Debug, Deserialize, Clone)]
#[serde(tag = "backend")]
pub enum PersistenceConfig {
#[serde(rename = "sqlite")]
Sqlite { path: String },
#[serde(rename = "postgres")]
Postgres { url: String },
}
impl Default for PersistenceConfig {
fn default() -> Self {
Self::Sqlite {
path: "wfe.db".to_string(),
}
}
}
#[derive(Debug, Deserialize, Clone)]
#[serde(tag = "backend")]
pub enum QueueConfig {
#[serde(rename = "memory")]
InMemory,
#[serde(rename = "valkey")]
Valkey { url: String },
}
impl Default for QueueConfig {
fn default() -> Self {
Self::InMemory
}
}
#[derive(Debug, Deserialize, Clone)]
pub struct SearchConfig {
pub url: String,
}
#[derive(Debug, Deserialize, Clone, Default)]
pub struct AuthConfig {
/// Static bearer tokens (simple auth, no OIDC needed).
#[serde(default)]
pub tokens: Vec<String>,
/// OIDC issuer URL (e.g., https://auth.example.com/realms/myapp).
/// Enables JWT validation via OIDC discovery + JWKS.
#[serde(default)]
pub oidc_issuer: Option<String>,
/// Expected JWT audience claim.
#[serde(default)]
pub oidc_audience: Option<String>,
/// Webhook HMAC secrets per source.
#[serde(default)]
pub webhook_secrets: HashMap<String, String>,
}
#[derive(Debug, Deserialize, Clone, Default)]
pub struct WebhookConfig {
#[serde(default)]
pub triggers: Vec<WebhookTrigger>,
}
#[derive(Debug, Deserialize, Clone)]
pub struct WebhookTrigger {
pub source: String,
pub event: String,
#[serde(default)]
pub match_ref: Option<String>,
pub workflow_id: String,
pub version: u32,
#[serde(default)]
pub data_mapping: HashMap<String, String>,
}
/// Load configuration with layered overrides: CLI > env > file.
pub fn load(cli: &Cli) -> ServerConfig {
let mut config = if cli.config.exists() {
let content = std::fs::read_to_string(&cli.config)
.unwrap_or_else(|e| panic!("failed to read config file {}: {e}", cli.config.display()));
toml::from_str(&content)
.unwrap_or_else(|e| panic!("failed to parse config file {}: {e}", cli.config.display()))
} else {
ServerConfig::default()
};
if let Some(addr) = cli.grpc_addr {
config.grpc_addr = addr;
}
if let Some(addr) = cli.http_addr {
config.http_addr = addr;
}
if let Some(ref dir) = cli.workflows_dir {
config.workflows_dir = Some(dir.clone());
}
// Persistence override.
if let Some(ref backend) = cli.persistence {
let url = cli
.db_url
.clone()
.unwrap_or_else(|| "wfe.db".to_string());
config.persistence = match backend.as_str() {
"postgres" => PersistenceConfig::Postgres { url },
_ => PersistenceConfig::Sqlite { path: url },
};
} else if let Some(ref url) = cli.db_url {
// Infer backend from URL.
if url.starts_with("postgres") {
config.persistence = PersistenceConfig::Postgres { url: url.clone() };
} else {
config.persistence = PersistenceConfig::Sqlite { path: url.clone() };
}
}
// Queue override.
if let Some(ref backend) = cli.queue {
config.queue = match backend.as_str() {
"valkey" | "redis" => {
let url = cli
.queue_url
.clone()
.unwrap_or_else(|| "redis://127.0.0.1:6379".to_string());
QueueConfig::Valkey { url }
}
_ => QueueConfig::InMemory,
};
}
// Search override.
if let Some(ref url) = cli.search_url {
config.search = Some(SearchConfig { url: url.clone() });
}
// Auth tokens override.
if let Some(ref tokens) = cli.auth_tokens {
config.auth.tokens = tokens
.split(',')
.map(|t| t.trim().to_string())
.filter(|t| !t.is_empty())
.collect();
}
config
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn default_config() {
let config = ServerConfig::default();
assert_eq!(config.grpc_addr, "0.0.0.0:50051".parse().unwrap());
assert_eq!(config.http_addr, "0.0.0.0:8080".parse().unwrap());
assert!(matches!(config.persistence, PersistenceConfig::Sqlite { .. }));
assert!(matches!(config.queue, QueueConfig::InMemory));
assert!(config.search.is_none());
assert!(config.auth.tokens.is_empty());
assert!(config.webhook.triggers.is_empty());
}
#[test]
fn parse_toml_config() {
let toml = r#"
grpc_addr = "127.0.0.1:9090"
http_addr = "127.0.0.1:8081"
[persistence]
backend = "postgres"
url = "postgres://localhost/wfe"
[queue]
backend = "valkey"
url = "redis://localhost:6379"
[search]
url = "http://localhost:9200"
[auth]
tokens = ["token1", "token2"]
[auth.webhook_secrets]
github = "mysecret"
[[webhook.triggers]]
source = "github"
event = "push"
match_ref = "refs/heads/main"
workflow_id = "ci"
version = 1
"#;
let config: ServerConfig = toml::from_str(toml).unwrap();
assert_eq!(config.grpc_addr, "127.0.0.1:9090".parse().unwrap());
assert!(matches!(config.persistence, PersistenceConfig::Postgres { .. }));
assert!(matches!(config.queue, QueueConfig::Valkey { .. }));
assert!(config.search.is_some());
assert_eq!(config.auth.tokens.len(), 2);
assert_eq!(config.auth.webhook_secrets.get("github").unwrap(), "mysecret");
assert_eq!(config.webhook.triggers.len(), 1);
assert_eq!(config.webhook.triggers[0].workflow_id, "ci");
}
#[test]
fn cli_overrides_file() {
let cli = Cli {
config: PathBuf::from("/nonexistent"),
grpc_addr: Some("127.0.0.1:9999".parse().unwrap()),
http_addr: None,
persistence: Some("postgres".to_string()),
db_url: Some("postgres://db/wfe".to_string()),
queue: Some("valkey".to_string()),
queue_url: Some("redis://valkey:6379".to_string()),
search_url: Some("http://os:9200".to_string()),
workflows_dir: Some(PathBuf::from("/workflows")),
auth_tokens: Some("tok1, tok2".to_string()),
};
let config = load(&cli);
assert_eq!(config.grpc_addr, "127.0.0.1:9999".parse().unwrap());
assert!(matches!(config.persistence, PersistenceConfig::Postgres { ref url } if url == "postgres://db/wfe"));
assert!(matches!(config.queue, QueueConfig::Valkey { ref url } if url == "redis://valkey:6379"));
assert_eq!(config.search.unwrap().url, "http://os:9200");
assert_eq!(config.workflows_dir.unwrap(), PathBuf::from("/workflows"));
assert_eq!(config.auth.tokens, vec!["tok1", "tok2"]);
}
#[test]
fn infer_postgres_from_url() {
let cli = Cli {
config: PathBuf::from("/nonexistent"),
grpc_addr: None,
http_addr: None,
persistence: None,
db_url: Some("postgres://localhost/wfe".to_string()),
queue: None,
queue_url: None,
search_url: None,
workflows_dir: None,
auth_tokens: None,
};
let config = load(&cli);
assert!(matches!(config.persistence, PersistenceConfig::Postgres { .. }));
}
// ── Security regression tests ──
#[test]
#[should_panic(expected = "failed to parse config file")]
fn security_malformed_config_panics() {
// HIGH-19: Malformed config must NOT silently fall back to defaults.
let tmp = tempfile::NamedTempFile::new().unwrap();
std::fs::write(tmp.path(), "this is not { valid toml @@@@").unwrap();
let cli = Cli {
config: tmp.path().to_path_buf(),
grpc_addr: None,
http_addr: None,
persistence: None,
db_url: None,
queue: None,
queue_url: None,
search_url: None,
workflows_dir: None,
auth_tokens: None,
};
load(&cli);
}
#[test]
fn trigger_data_mapping() {
let toml = r#"
[[triggers]]
source = "github"
event = "push"
workflow_id = "ci"
version = 1
[triggers.data_mapping]
repo = "$.repository.full_name"
commit = "$.head_commit.id"
"#;
let config: WebhookConfig = toml::from_str(toml).unwrap();
assert_eq!(config.triggers[0].data_mapping.len(), 2);
assert_eq!(config.triggers[0].data_mapping["repo"], "$.repository.full_name");
}
}

862
wfe-server/src/grpc.rs Normal file
View File

@@ -0,0 +1,862 @@
use std::collections::{BTreeMap, HashMap};
use std::sync::Arc;
use tonic::{Request, Response, Status};
use wfe_server_protos::wfe::v1::*;
use wfe_server_protos::wfe::v1::wfe_server::Wfe;
pub struct WfeService {
host: Arc<wfe::WorkflowHost>,
lifecycle_bus: Arc<crate::lifecycle_bus::BroadcastLifecyclePublisher>,
log_store: Arc<crate::log_store::LogStore>,
log_search: Option<Arc<crate::log_search::LogSearchIndex>>,
}
impl WfeService {
pub fn new(
host: Arc<wfe::WorkflowHost>,
lifecycle_bus: Arc<crate::lifecycle_bus::BroadcastLifecyclePublisher>,
log_store: Arc<crate::log_store::LogStore>,
) -> Self {
Self { host, lifecycle_bus, log_store, log_search: None }
}
pub fn with_log_search(mut self, index: Arc<crate::log_search::LogSearchIndex>) -> Self {
self.log_search = Some(index);
self
}
}
#[tonic::async_trait]
impl Wfe for WfeService {
// ── Definitions ──────────────────────────────────────────────────
async fn register_workflow(
&self,
request: Request<RegisterWorkflowRequest>,
) -> Result<Response<RegisterWorkflowResponse>, Status> {
let req = request.into_inner();
let config: HashMap<String, serde_json::Value> = req
.config
.into_iter()
.map(|(k, v)| (k, serde_json::Value::String(v)))
.collect();
let workflows = wfe_yaml::load_workflow_from_str(&req.yaml, &config)
.map_err(|e| Status::invalid_argument(format!("YAML compilation failed: {e}")))?;
let mut definitions = Vec::new();
for compiled in workflows {
for (key, factory) in compiled.step_factories {
self.host.register_step_factory(&key, factory).await;
}
let id = compiled.definition.id.clone();
let version = compiled.definition.version;
let step_count = compiled.definition.steps.len() as u32;
self.host
.register_workflow_definition(compiled.definition)
.await;
definitions.push(RegisteredDefinition {
definition_id: id,
version,
step_count,
});
}
Ok(Response::new(RegisterWorkflowResponse { definitions }))
}
async fn list_definitions(
&self,
_request: Request<ListDefinitionsRequest>,
) -> Result<Response<ListDefinitionsResponse>, Status> {
// TODO: add list_definitions() to WorkflowHost
Ok(Response::new(ListDefinitionsResponse {
definitions: vec![],
}))
}
// ── Instances ────────────────────────────────────────────────────
async fn start_workflow(
&self,
request: Request<StartWorkflowRequest>,
) -> Result<Response<StartWorkflowResponse>, Status> {
let req = request.into_inner();
let data = req
.data
.map(struct_to_json)
.unwrap_or_else(|| serde_json::json!({}));
let workflow_id = self
.host
.start_workflow(&req.definition_id, req.version, data)
.await
.map_err(|e| Status::internal(format!("failed to start workflow: {e}")))?;
Ok(Response::new(StartWorkflowResponse { workflow_id }))
}
async fn get_workflow(
&self,
request: Request<GetWorkflowRequest>,
) -> Result<Response<GetWorkflowResponse>, Status> {
let req = request.into_inner();
let instance = self
.host
.get_workflow(&req.workflow_id)
.await
.map_err(|e| Status::not_found(format!("workflow not found: {e}")))?;
Ok(Response::new(GetWorkflowResponse {
instance: Some(workflow_to_proto(&instance)),
}))
}
async fn cancel_workflow(
&self,
request: Request<CancelWorkflowRequest>,
) -> Result<Response<CancelWorkflowResponse>, Status> {
let req = request.into_inner();
self.host
.terminate_workflow(&req.workflow_id)
.await
.map_err(|e| Status::internal(format!("failed to cancel: {e}")))?;
Ok(Response::new(CancelWorkflowResponse {}))
}
async fn suspend_workflow(
&self,
request: Request<SuspendWorkflowRequest>,
) -> Result<Response<SuspendWorkflowResponse>, Status> {
let req = request.into_inner();
self.host
.suspend_workflow(&req.workflow_id)
.await
.map_err(|e| Status::internal(format!("failed to suspend: {e}")))?;
Ok(Response::new(SuspendWorkflowResponse {}))
}
async fn resume_workflow(
&self,
request: Request<ResumeWorkflowRequest>,
) -> Result<Response<ResumeWorkflowResponse>, Status> {
let req = request.into_inner();
self.host
.resume_workflow(&req.workflow_id)
.await
.map_err(|e| Status::internal(format!("failed to resume: {e}")))?;
Ok(Response::new(ResumeWorkflowResponse {}))
}
async fn search_workflows(
&self,
_request: Request<SearchWorkflowsRequest>,
) -> Result<Response<SearchWorkflowsResponse>, Status> {
// TODO: implement with SearchIndex
Ok(Response::new(SearchWorkflowsResponse {
results: vec![],
total: 0,
}))
}
// ── Events ───────────────────────────────────────────────────────
async fn publish_event(
&self,
request: Request<PublishEventRequest>,
) -> Result<Response<PublishEventResponse>, Status> {
let req = request.into_inner();
let data = req
.data
.map(struct_to_json)
.unwrap_or_else(|| serde_json::json!({}));
self.host
.publish_event(&req.event_name, &req.event_key, data)
.await
.map_err(|e| Status::internal(format!("failed to publish event: {e}")))?;
Ok(Response::new(PublishEventResponse {
event_id: String::new(),
}))
}
// ── Streaming (stubs for now) ────────────────────────────────────
type WatchLifecycleStream =
tokio_stream::wrappers::ReceiverStream<Result<LifecycleEvent, Status>>;
async fn watch_lifecycle(
&self,
request: Request<WatchLifecycleRequest>,
) -> Result<Response<Self::WatchLifecycleStream>, Status> {
let req = request.into_inner();
let filter_workflow_id = if req.workflow_id.is_empty() {
None
} else {
Some(req.workflow_id)
};
let mut broadcast_rx = self.lifecycle_bus.subscribe();
let (tx, rx) = tokio::sync::mpsc::channel(256);
tokio::spawn(async move {
loop {
match broadcast_rx.recv().await {
Ok(event) => {
// Apply workflow_id filter.
if let Some(ref filter) = filter_workflow_id {
if event.workflow_instance_id != *filter {
continue;
}
}
let proto_event = lifecycle_event_to_proto(&event);
if tx.send(Ok(proto_event)).await.is_err() {
break; // Client disconnected.
}
}
Err(tokio::sync::broadcast::error::RecvError::Lagged(n)) => {
tracing::warn!(lagged = n, "lifecycle watcher lagged, skipping events");
continue;
}
Err(tokio::sync::broadcast::error::RecvError::Closed) => break,
}
}
});
Ok(Response::new(tokio_stream::wrappers::ReceiverStream::new(rx)))
}
type StreamLogsStream = tokio_stream::wrappers::ReceiverStream<Result<LogEntry, Status>>;
async fn stream_logs(
&self,
request: Request<StreamLogsRequest>,
) -> Result<Response<Self::StreamLogsStream>, Status> {
let req = request.into_inner();
let workflow_id = req.workflow_id.clone();
let step_name_filter = if req.step_name.is_empty() {
None
} else {
Some(req.step_name)
};
let (tx, rx) = tokio::sync::mpsc::channel(256);
let log_store = self.log_store.clone();
tokio::spawn(async move {
// 1. Replay history first.
let history = log_store.get_history(&workflow_id, None);
for chunk in history {
if let Some(ref filter) = step_name_filter {
if chunk.step_name != *filter {
continue;
}
}
let entry = log_chunk_to_proto(&chunk);
if tx.send(Ok(entry)).await.is_err() {
return; // Client disconnected.
}
}
// 2. If follow mode, switch to live broadcast.
if req.follow {
let mut broadcast_rx = log_store.subscribe(&workflow_id);
loop {
match broadcast_rx.recv().await {
Ok(chunk) => {
if let Some(ref filter) = step_name_filter {
if chunk.step_name != *filter {
continue;
}
}
let entry = log_chunk_to_proto(&chunk);
if tx.send(Ok(entry)).await.is_err() {
break;
}
}
Err(tokio::sync::broadcast::error::RecvError::Lagged(n)) => {
tracing::warn!(lagged = n, "log stream lagged");
continue;
}
Err(tokio::sync::broadcast::error::RecvError::Closed) => break,
}
}
}
// If not follow mode, the stream ends after history replay.
});
Ok(Response::new(tokio_stream::wrappers::ReceiverStream::new(rx)))
}
// ── Search ───────────────────────────────────────────────────────
async fn search_logs(
&self,
request: Request<SearchLogsRequest>,
) -> Result<Response<SearchLogsResponse>, Status> {
let Some(ref search) = self.log_search else {
return Err(Status::unavailable("log search not configured — set --search-url"));
};
let req = request.into_inner();
let workflow_id = if req.workflow_id.is_empty() { None } else { Some(req.workflow_id.as_str()) };
let step_name = if req.step_name.is_empty() { None } else { Some(req.step_name.as_str()) };
let stream_filter = match req.stream_filter {
x if x == LogStream::Stdout as i32 => Some("stdout"),
x if x == LogStream::Stderr as i32 => Some("stderr"),
_ => None,
};
let take = if req.take == 0 { 50 } else { req.take };
let (hits, total) = search
.search(&req.query, workflow_id, step_name, stream_filter, req.skip, take)
.await
.map_err(|e| Status::internal(format!("search failed: {e}")))?;
let results = hits
.into_iter()
.map(|h| {
let stream = match h.stream.as_str() {
"stdout" => LogStream::Stdout as i32,
"stderr" => LogStream::Stderr as i32,
_ => LogStream::Unspecified as i32,
};
LogSearchResult {
workflow_id: h.workflow_id,
definition_id: h.definition_id,
step_name: h.step_name,
line: h.line,
stream,
timestamp: Some(datetime_to_timestamp(&h.timestamp)),
}
})
.collect();
Ok(Response::new(SearchLogsResponse { results, total }))
}
}
// ── Conversion helpers ──────────────────────────────────────────────
fn struct_to_json(s: prost_types::Struct) -> serde_json::Value {
let map: serde_json::Map<String, serde_json::Value> = s
.fields
.into_iter()
.map(|(k, v)| (k, prost_value_to_json(v)))
.collect();
serde_json::Value::Object(map)
}
fn prost_value_to_json(v: prost_types::Value) -> serde_json::Value {
use prost_types::value::Kind;
match v.kind {
Some(Kind::NullValue(_)) => serde_json::Value::Null,
Some(Kind::NumberValue(n)) => serde_json::json!(n),
Some(Kind::StringValue(s)) => serde_json::Value::String(s),
Some(Kind::BoolValue(b)) => serde_json::Value::Bool(b),
Some(Kind::StructValue(s)) => struct_to_json(s),
Some(Kind::ListValue(l)) => {
serde_json::Value::Array(l.values.into_iter().map(prost_value_to_json).collect())
}
None => serde_json::Value::Null,
}
}
fn json_to_struct(v: &serde_json::Value) -> prost_types::Struct {
let fields: BTreeMap<String, prost_types::Value> = match v.as_object() {
Some(obj) => obj
.iter()
.map(|(k, v)| (k.clone(), json_to_prost_value(v)))
.collect(),
None => BTreeMap::new(),
};
prost_types::Struct { fields }
}
fn json_to_prost_value(v: &serde_json::Value) -> prost_types::Value {
use prost_types::value::Kind;
let kind = match v {
serde_json::Value::Null => Kind::NullValue(0),
serde_json::Value::Bool(b) => Kind::BoolValue(*b),
serde_json::Value::Number(n) => Kind::NumberValue(n.as_f64().unwrap_or(0.0)),
serde_json::Value::String(s) => Kind::StringValue(s.clone()),
serde_json::Value::Array(arr) => Kind::ListValue(prost_types::ListValue {
values: arr.iter().map(json_to_prost_value).collect(),
}),
serde_json::Value::Object(_) => Kind::StructValue(json_to_struct(v)),
};
prost_types::Value { kind: Some(kind) }
}
fn log_chunk_to_proto(chunk: &wfe_core::traits::LogChunk) -> LogEntry {
use wfe_core::traits::LogStreamType;
let stream = match chunk.stream {
LogStreamType::Stdout => LogStream::Stdout as i32,
LogStreamType::Stderr => LogStream::Stderr as i32,
};
LogEntry {
workflow_id: chunk.workflow_id.clone(),
step_name: chunk.step_name.clone(),
step_id: chunk.step_id as u32,
stream,
data: chunk.data.clone(),
timestamp: Some(datetime_to_timestamp(&chunk.timestamp)),
}
}
fn lifecycle_event_to_proto(e: &wfe_core::models::LifecycleEvent) -> LifecycleEvent {
use wfe_core::models::LifecycleEventType as LET;
// Proto enum — prost strips the LIFECYCLE_EVENT_TYPE_ prefix.
use wfe_server_protos::wfe::v1::LifecycleEventType as PLET;
let (event_type, step_id, step_name, error_message) = match &e.event_type {
LET::Started => (PLET::Started as i32, 0, String::new(), String::new()),
LET::Completed => (PLET::Completed as i32, 0, String::new(), String::new()),
LET::Terminated => (PLET::Terminated as i32, 0, String::new(), String::new()),
LET::Suspended => (PLET::Suspended as i32, 0, String::new(), String::new()),
LET::Resumed => (PLET::Resumed as i32, 0, String::new(), String::new()),
LET::Error { message } => (PLET::Error as i32, 0, String::new(), message.clone()),
LET::StepStarted { step_id, step_name } => (PLET::StepStarted as i32, *step_id as u32, step_name.clone().unwrap_or_default(), String::new()),
LET::StepCompleted { step_id, step_name } => (PLET::StepCompleted as i32, *step_id as u32, step_name.clone().unwrap_or_default(), String::new()),
};
LifecycleEvent {
event_time: Some(datetime_to_timestamp(&e.event_time_utc)),
workflow_id: e.workflow_instance_id.clone(),
definition_id: e.workflow_definition_id.clone(),
version: e.version,
event_type,
step_id,
step_name,
error_message,
}
}
fn datetime_to_timestamp(dt: &chrono::DateTime<chrono::Utc>) -> prost_types::Timestamp {
prost_types::Timestamp {
seconds: dt.timestamp(),
nanos: dt.timestamp_subsec_nanos() as i32,
}
}
fn workflow_to_proto(w: &wfe_core::models::WorkflowInstance) -> WorkflowInstance {
WorkflowInstance {
id: w.id.clone(),
definition_id: w.workflow_definition_id.clone(),
version: w.version,
description: w.description.clone().unwrap_or_default(),
reference: w.reference.clone().unwrap_or_default(),
status: match w.status {
wfe_core::models::WorkflowStatus::Runnable => WorkflowStatus::Runnable as i32,
wfe_core::models::WorkflowStatus::Suspended => WorkflowStatus::Suspended as i32,
wfe_core::models::WorkflowStatus::Complete => WorkflowStatus::Complete as i32,
wfe_core::models::WorkflowStatus::Terminated => WorkflowStatus::Terminated as i32,
},
data: Some(json_to_struct(&w.data)),
create_time: Some(datetime_to_timestamp(&w.create_time)),
complete_time: w.complete_time.as_ref().map(datetime_to_timestamp),
execution_pointers: w
.execution_pointers
.iter()
.map(pointer_to_proto)
.collect(),
}
}
fn pointer_to_proto(p: &wfe_core::models::ExecutionPointer) -> ExecutionPointer {
use wfe_core::models::PointerStatus as PS;
let status = match p.status {
PS::Pending | PS::PendingPredecessor => PointerStatus::Pending as i32,
PS::Running => PointerStatus::Running as i32,
PS::Complete => PointerStatus::Complete as i32,
PS::Sleeping => PointerStatus::Sleeping as i32,
PS::WaitingForEvent => PointerStatus::WaitingForEvent as i32,
PS::Failed => PointerStatus::Failed as i32,
PS::Skipped => PointerStatus::Skipped as i32,
PS::Compensated | PS::Cancelled => PointerStatus::Cancelled as i32,
};
ExecutionPointer {
id: p.id.clone(),
step_id: p.step_id as u32,
step_name: p.step_name.clone().unwrap_or_default(),
status,
start_time: p.start_time.as_ref().map(datetime_to_timestamp),
end_time: p.end_time.as_ref().map(datetime_to_timestamp),
retry_count: p.retry_count,
active: p.active,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn struct_to_json_roundtrip() {
let original = serde_json::json!({
"name": "test",
"count": 42.0,
"active": true,
"tags": ["a", "b"],
"nested": { "key": "value" }
});
let proto_struct = json_to_struct(&original);
let back = struct_to_json(proto_struct);
assert_eq!(original, back);
}
#[test]
fn json_null_roundtrip() {
let v = serde_json::Value::Null;
let pv = json_to_prost_value(&v);
let back = prost_value_to_json(pv);
assert_eq!(back, serde_json::Value::Null);
}
#[test]
fn json_string_roundtrip() {
let v = serde_json::Value::String("hello".to_string());
let pv = json_to_prost_value(&v);
let back = prost_value_to_json(pv);
assert_eq!(back, v);
}
#[test]
fn json_bool_roundtrip() {
let v = serde_json::Value::Bool(true);
let pv = json_to_prost_value(&v);
let back = prost_value_to_json(pv);
assert_eq!(back, v);
}
#[test]
fn json_number_roundtrip() {
let v = serde_json::json!(3.14);
let pv = json_to_prost_value(&v);
let back = prost_value_to_json(pv);
assert_eq!(back, v);
}
#[test]
fn json_array_roundtrip() {
let v = serde_json::json!(["a", 1.0, true, null]);
let pv = json_to_prost_value(&v);
let back = prost_value_to_json(pv);
assert_eq!(back, v);
}
#[test]
fn empty_struct_roundtrip() {
let v = serde_json::json!({});
let proto_struct = json_to_struct(&v);
let back = struct_to_json(proto_struct);
assert_eq!(back, v);
}
#[test]
fn prost_value_none_kind() {
let v = prost_types::Value { kind: None };
assert_eq!(prost_value_to_json(v), serde_json::Value::Null);
}
#[test]
fn json_to_struct_from_non_object() {
let v = serde_json::json!("not an object");
let s = json_to_struct(&v);
assert!(s.fields.is_empty());
}
#[test]
fn datetime_to_timestamp_conversion() {
let dt = chrono::DateTime::parse_from_rfc3339("2026-03-29T12:00:00Z")
.unwrap()
.with_timezone(&chrono::Utc);
let ts = datetime_to_timestamp(&dt);
assert_eq!(ts.seconds, dt.timestamp());
assert_eq!(ts.nanos, 0);
}
#[test]
fn workflow_status_mapping() {
use wfe_core::models::{WorkflowInstance as WI, WorkflowStatus as WS};
let mut w = WI::new("test", 1, serde_json::json!({}));
w.status = WS::Runnable;
let p = workflow_to_proto(&w);
assert_eq!(p.status, WorkflowStatus::Runnable as i32);
w.status = WS::Complete;
let p = workflow_to_proto(&w);
assert_eq!(p.status, WorkflowStatus::Complete as i32);
w.status = WS::Suspended;
let p = workflow_to_proto(&w);
assert_eq!(p.status, WorkflowStatus::Suspended as i32);
w.status = WS::Terminated;
let p = workflow_to_proto(&w);
assert_eq!(p.status, WorkflowStatus::Terminated as i32);
}
#[test]
fn pointer_status_mapping() {
use wfe_core::models::{ExecutionPointer as EP, PointerStatus as PS};
let mut p = EP::new(0);
p.status = PS::Pending;
assert_eq!(pointer_to_proto(&p).status, PointerStatus::Pending as i32);
p.status = PS::Running;
assert_eq!(pointer_to_proto(&p).status, PointerStatus::Running as i32);
p.status = PS::Complete;
assert_eq!(pointer_to_proto(&p).status, PointerStatus::Complete as i32);
p.status = PS::Sleeping;
assert_eq!(pointer_to_proto(&p).status, PointerStatus::Sleeping as i32);
p.status = PS::WaitingForEvent;
assert_eq!(pointer_to_proto(&p).status, PointerStatus::WaitingForEvent as i32);
p.status = PS::Failed;
assert_eq!(pointer_to_proto(&p).status, PointerStatus::Failed as i32);
p.status = PS::Skipped;
assert_eq!(pointer_to_proto(&p).status, PointerStatus::Skipped as i32);
p.status = PS::Cancelled;
assert_eq!(pointer_to_proto(&p).status, PointerStatus::Cancelled as i32);
}
#[test]
fn workflow_to_proto_basic() {
let w = wfe_core::models::WorkflowInstance::new("my-wf", 1, serde_json::json!({"key": "val"}));
let p = workflow_to_proto(&w);
assert_eq!(p.definition_id, "my-wf");
assert_eq!(p.version, 1);
assert!(p.create_time.is_some());
assert!(p.complete_time.is_none());
let data = struct_to_json(p.data.unwrap());
assert_eq!(data["key"], "val");
}
// ── gRPC integration tests with real WorkflowHost ────────────────
async fn make_test_service() -> WfeService {
use wfe::WorkflowHostBuilder;
use wfe_core::test_support::{
InMemoryLockProvider, InMemoryPersistenceProvider, InMemoryQueueProvider,
};
let host = WorkflowHostBuilder::new()
.use_persistence(std::sync::Arc::new(InMemoryPersistenceProvider::new())
as std::sync::Arc<dyn wfe_core::traits::PersistenceProvider>)
.use_lock_provider(std::sync::Arc::new(InMemoryLockProvider::new())
as std::sync::Arc<dyn wfe_core::traits::DistributedLockProvider>)
.use_queue_provider(std::sync::Arc::new(InMemoryQueueProvider::new())
as std::sync::Arc<dyn wfe_core::traits::QueueProvider>)
.build()
.unwrap();
host.start().await.unwrap();
let lifecycle_bus = std::sync::Arc::new(crate::lifecycle_bus::BroadcastLifecyclePublisher::new(64));
let log_store = std::sync::Arc::new(crate::log_store::LogStore::new());
WfeService::new(std::sync::Arc::new(host), lifecycle_bus, log_store)
}
#[tokio::test]
async fn rpc_register_and_start_workflow() {
let svc = make_test_service().await;
// Register a workflow.
let req = Request::new(RegisterWorkflowRequest {
yaml: r#"
workflow:
id: test-wf
version: 1
steps:
- name: hello
type: shell
config:
run: echo hi
"#.to_string(),
config: Default::default(),
});
let resp = svc.register_workflow(req).await.unwrap().into_inner();
assert_eq!(resp.definitions.len(), 1);
assert_eq!(resp.definitions[0].definition_id, "test-wf");
assert_eq!(resp.definitions[0].version, 1);
assert_eq!(resp.definitions[0].step_count, 1);
// Start the workflow.
let req = Request::new(StartWorkflowRequest {
definition_id: "test-wf".to_string(),
version: 1,
data: None,
});
let resp = svc.start_workflow(req).await.unwrap().into_inner();
assert!(!resp.workflow_id.is_empty());
// Get the workflow.
let req = Request::new(GetWorkflowRequest {
workflow_id: resp.workflow_id.clone(),
});
let resp = svc.get_workflow(req).await.unwrap().into_inner();
let instance = resp.instance.unwrap();
assert_eq!(instance.definition_id, "test-wf");
assert_eq!(instance.status, WorkflowStatus::Runnable as i32);
}
#[tokio::test]
async fn rpc_register_invalid_yaml() {
let svc = make_test_service().await;
let req = Request::new(RegisterWorkflowRequest {
yaml: "not: valid: yaml: {{{}}}".to_string(),
config: Default::default(),
});
let err = svc.register_workflow(req).await.unwrap_err();
assert_eq!(err.code(), tonic::Code::InvalidArgument);
}
#[tokio::test]
async fn rpc_start_nonexistent_workflow() {
let svc = make_test_service().await;
let req = Request::new(StartWorkflowRequest {
definition_id: "nonexistent".to_string(),
version: 1,
data: None,
});
let err = svc.start_workflow(req).await.unwrap_err();
assert_eq!(err.code(), tonic::Code::Internal);
}
#[tokio::test]
async fn rpc_get_nonexistent_workflow() {
let svc = make_test_service().await;
let req = Request::new(GetWorkflowRequest {
workflow_id: "nonexistent".to_string(),
});
let err = svc.get_workflow(req).await.unwrap_err();
assert_eq!(err.code(), tonic::Code::NotFound);
}
#[tokio::test]
async fn rpc_cancel_workflow() {
let svc = make_test_service().await;
// Register + start.
let req = Request::new(RegisterWorkflowRequest {
yaml: "workflow:\n id: cancel-test\n version: 1\n steps:\n - name: s\n type: shell\n config:\n run: echo ok\n".to_string(),
config: Default::default(),
});
svc.register_workflow(req).await.unwrap();
let req = Request::new(StartWorkflowRequest {
definition_id: "cancel-test".to_string(),
version: 1,
data: None,
});
let wf_id = svc.start_workflow(req).await.unwrap().into_inner().workflow_id;
// Cancel it.
let req = Request::new(CancelWorkflowRequest { workflow_id: wf_id.clone() });
svc.cancel_workflow(req).await.unwrap();
// Verify it's terminated.
let req = Request::new(GetWorkflowRequest { workflow_id: wf_id });
let instance = svc.get_workflow(req).await.unwrap().into_inner().instance.unwrap();
assert_eq!(instance.status, WorkflowStatus::Terminated as i32);
}
#[tokio::test]
async fn rpc_suspend_resume_workflow() {
let svc = make_test_service().await;
let req = Request::new(RegisterWorkflowRequest {
yaml: "workflow:\n id: sr-test\n version: 1\n steps:\n - name: s\n type: shell\n config:\n run: echo ok\n".to_string(),
config: Default::default(),
});
svc.register_workflow(req).await.unwrap();
let req = Request::new(StartWorkflowRequest {
definition_id: "sr-test".to_string(),
version: 1,
data: None,
});
let wf_id = svc.start_workflow(req).await.unwrap().into_inner().workflow_id;
// Suspend.
let req = Request::new(SuspendWorkflowRequest { workflow_id: wf_id.clone() });
svc.suspend_workflow(req).await.unwrap();
let req = Request::new(GetWorkflowRequest { workflow_id: wf_id.clone() });
let instance = svc.get_workflow(req).await.unwrap().into_inner().instance.unwrap();
assert_eq!(instance.status, WorkflowStatus::Suspended as i32);
// Resume.
let req = Request::new(ResumeWorkflowRequest { workflow_id: wf_id.clone() });
svc.resume_workflow(req).await.unwrap();
let req = Request::new(GetWorkflowRequest { workflow_id: wf_id });
let instance = svc.get_workflow(req).await.unwrap().into_inner().instance.unwrap();
assert_eq!(instance.status, WorkflowStatus::Runnable as i32);
}
#[tokio::test]
async fn rpc_publish_event() {
let svc = make_test_service().await;
let req = Request::new(PublishEventRequest {
event_name: "test.event".to_string(),
event_key: "key-1".to_string(),
data: None,
});
// Should succeed even with no waiting workflows.
svc.publish_event(req).await.unwrap();
}
#[tokio::test]
async fn rpc_search_logs_not_configured() {
let svc = make_test_service().await;
let req = Request::new(SearchLogsRequest {
query: "test".to_string(),
..Default::default()
});
let err = svc.search_logs(req).await.unwrap_err();
assert_eq!(err.code(), tonic::Code::Unavailable);
}
#[tokio::test]
async fn rpc_list_definitions_empty() {
let svc = make_test_service().await;
let req = Request::new(ListDefinitionsRequest {});
let resp = svc.list_definitions(req).await.unwrap().into_inner();
assert!(resp.definitions.is_empty());
}
#[tokio::test]
async fn rpc_search_workflows_empty() {
let svc = make_test_service().await;
let req = Request::new(SearchWorkflowsRequest {
query: "test".to_string(),
..Default::default()
});
let resp = svc.search_workflows(req).await.unwrap().into_inner();
assert_eq!(resp.total, 0);
}
}

View File

@@ -0,0 +1,125 @@
use async_trait::async_trait;
use tokio::sync::broadcast;
use wfe_core::models::LifecycleEvent;
use wfe_core::traits::LifecyclePublisher;
/// Broadcasts lifecycle events to multiple subscribers via tokio broadcast channels.
pub struct BroadcastLifecyclePublisher {
sender: broadcast::Sender<LifecycleEvent>,
}
impl BroadcastLifecyclePublisher {
pub fn new(capacity: usize) -> Self {
let (sender, _) = broadcast::channel(capacity);
Self { sender }
}
pub fn subscribe(&self) -> broadcast::Receiver<LifecycleEvent> {
self.sender.subscribe()
}
}
#[async_trait]
impl LifecyclePublisher for BroadcastLifecyclePublisher {
async fn publish(&self, event: LifecycleEvent) -> wfe_core::Result<()> {
// Ignore send errors (no active subscribers).
let _ = self.sender.send(event);
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use wfe_core::models::LifecycleEventType;
#[tokio::test]
async fn publish_and_receive() {
let bus = BroadcastLifecyclePublisher::new(16);
let mut rx = bus.subscribe();
let event = LifecycleEvent::new("wf-1", "def-1", 1, LifecycleEventType::Started);
bus.publish(event.clone()).await.unwrap();
let received = rx.recv().await.unwrap();
assert_eq!(received.workflow_instance_id, "wf-1");
assert_eq!(received.event_type, LifecycleEventType::Started);
}
#[tokio::test]
async fn multiple_subscribers() {
let bus = BroadcastLifecyclePublisher::new(16);
let mut rx1 = bus.subscribe();
let mut rx2 = bus.subscribe();
bus.publish(LifecycleEvent::new("wf-1", "def-1", 1, LifecycleEventType::Completed))
.await
.unwrap();
let e1 = rx1.recv().await.unwrap();
let e2 = rx2.recv().await.unwrap();
assert_eq!(e1.event_type, LifecycleEventType::Completed);
assert_eq!(e2.event_type, LifecycleEventType::Completed);
}
#[tokio::test]
async fn no_subscribers_does_not_error() {
let bus = BroadcastLifecyclePublisher::new(16);
// No subscribers — should not panic.
bus.publish(LifecycleEvent::new("wf-1", "def-1", 1, LifecycleEventType::Started))
.await
.unwrap();
}
#[tokio::test]
async fn step_events_propagate() {
let bus = BroadcastLifecyclePublisher::new(16);
let mut rx = bus.subscribe();
bus.publish(LifecycleEvent::new(
"wf-1",
"def-1",
1,
LifecycleEventType::StepStarted {
step_id: 3,
step_name: Some("build".to_string()),
},
))
.await
.unwrap();
let received = rx.recv().await.unwrap();
assert_eq!(
received.event_type,
LifecycleEventType::StepStarted {
step_id: 3,
step_name: Some("build".to_string()),
}
);
}
#[tokio::test]
async fn error_events_include_message() {
let bus = BroadcastLifecyclePublisher::new(16);
let mut rx = bus.subscribe();
bus.publish(LifecycleEvent::new(
"wf-1",
"def-1",
1,
LifecycleEventType::Error {
message: "step failed".to_string(),
},
))
.await
.unwrap();
let received = rx.recv().await.unwrap();
assert_eq!(
received.event_type,
LifecycleEventType::Error {
message: "step failed".to_string(),
}
);
}
}

View File

@@ -0,0 +1,529 @@
use chrono::{DateTime, Utc};
use opensearch::http::transport::Transport;
use opensearch::{IndexParts, OpenSearch, SearchParts};
use serde::{Deserialize, Serialize};
use serde_json::json;
use wfe_core::traits::{LogChunk, LogStreamType};
const LOG_INDEX: &str = "wfe-build-logs";
/// Document structure for a log line stored in OpenSearch.
#[derive(Debug, Serialize, Deserialize)]
struct LogDocument {
workflow_id: String,
definition_id: String,
step_id: usize,
step_name: String,
stream: String,
line: String,
timestamp: String,
}
impl LogDocument {
fn from_chunk(chunk: &LogChunk) -> Self {
Self {
workflow_id: chunk.workflow_id.clone(),
definition_id: chunk.definition_id.clone(),
step_id: chunk.step_id,
step_name: chunk.step_name.clone(),
stream: match chunk.stream {
LogStreamType::Stdout => "stdout".to_string(),
LogStreamType::Stderr => "stderr".to_string(),
},
line: String::from_utf8_lossy(&chunk.data).trim_end().to_string(),
timestamp: chunk.timestamp.to_rfc3339(),
}
}
}
/// Result from a log search query.
#[derive(Debug, Clone)]
pub struct LogSearchHit {
pub workflow_id: String,
pub definition_id: String,
pub step_name: String,
pub line: String,
pub stream: String,
pub timestamp: DateTime<Utc>,
}
/// OpenSearch-backed log search index.
pub struct LogSearchIndex {
client: OpenSearch,
}
impl LogSearchIndex {
pub fn new(url: &str) -> wfe_core::Result<Self> {
let transport = Transport::single_node(url)
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)?;
Ok(Self {
client: OpenSearch::new(transport),
})
}
/// Create the log index if it doesn't exist.
pub async fn ensure_index(&self) -> wfe_core::Result<()> {
let exists = self
.client
.indices()
.exists(opensearch::indices::IndicesExistsParts::Index(&[LOG_INDEX]))
.send()
.await
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)?;
if exists.status_code().is_success() {
return Ok(());
}
let body = json!({
"mappings": {
"properties": {
"workflow_id": { "type": "keyword" },
"definition_id": { "type": "keyword" },
"step_id": { "type": "integer" },
"step_name": { "type": "keyword" },
"stream": { "type": "keyword" },
"line": { "type": "text", "analyzer": "standard" },
"timestamp": { "type": "date" }
}
}
});
let response = self
.client
.indices()
.create(opensearch::indices::IndicesCreateParts::Index(LOG_INDEX))
.body(body)
.send()
.await
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)?;
if !response.status_code().is_success() {
let text = response.text().await.unwrap_or_default();
return Err(wfe_core::WfeError::Persistence(format!(
"Failed to create log index: {text}"
)));
}
tracing::info!(index = LOG_INDEX, "log search index created");
Ok(())
}
/// Index a single log chunk.
pub async fn index_chunk(&self, chunk: &LogChunk) -> wfe_core::Result<()> {
let doc = LogDocument::from_chunk(chunk);
let body = serde_json::to_value(&doc)?;
let response = self
.client
.index(IndexParts::Index(LOG_INDEX))
.body(body)
.send()
.await
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)?;
if !response.status_code().is_success() {
let text = response.text().await.unwrap_or_default();
return Err(wfe_core::WfeError::Persistence(format!(
"failed to index log chunk: {text}"
)));
}
Ok(())
}
/// Search log lines.
pub async fn search(
&self,
query: &str,
workflow_id: Option<&str>,
step_name: Option<&str>,
stream_filter: Option<&str>,
skip: u64,
take: u64,
) -> wfe_core::Result<(Vec<LogSearchHit>, u64)> {
let mut must_clauses = Vec::new();
let mut filter_clauses = Vec::new();
if !query.is_empty() {
must_clauses.push(json!({
"match": { "line": query }
}));
}
if let Some(wf_id) = workflow_id {
filter_clauses.push(json!({ "term": { "workflow_id": wf_id } }));
}
if let Some(sn) = step_name {
filter_clauses.push(json!({ "term": { "step_name": sn } }));
}
if let Some(stream) = stream_filter {
filter_clauses.push(json!({ "term": { "stream": stream } }));
}
let query_body = if must_clauses.is_empty() && filter_clauses.is_empty() {
json!({ "match_all": {} })
} else {
let mut bool_q = serde_json::Map::new();
if !must_clauses.is_empty() {
bool_q.insert("must".to_string(), json!(must_clauses));
}
if !filter_clauses.is_empty() {
bool_q.insert("filter".to_string(), json!(filter_clauses));
}
json!({ "bool": bool_q })
};
let body = json!({
"query": query_body,
"from": skip,
"size": take,
"sort": [{ "timestamp": "asc" }]
});
let response = self
.client
.search(SearchParts::Index(&[LOG_INDEX]))
.body(body)
.send()
.await
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)?;
if !response.status_code().is_success() {
let text = response.text().await.unwrap_or_default();
return Err(wfe_core::WfeError::Persistence(format!(
"Log search failed: {text}"
)));
}
let resp_body: serde_json::Value = response
.json()
.await
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)?;
let total = resp_body["hits"]["total"]["value"].as_u64().unwrap_or(0);
let hits = resp_body["hits"]["hits"]
.as_array()
.cloned()
.unwrap_or_default();
let results = hits
.iter()
.filter_map(|hit| {
let src = &hit["_source"];
Some(LogSearchHit {
workflow_id: src["workflow_id"].as_str()?.to_string(),
definition_id: src["definition_id"].as_str()?.to_string(),
step_name: src["step_name"].as_str()?.to_string(),
line: src["line"].as_str()?.to_string(),
stream: src["stream"].as_str()?.to_string(),
timestamp: src["timestamp"]
.as_str()
.and_then(|s| DateTime::parse_from_rfc3339(s).ok())
.map(|dt| dt.with_timezone(&Utc))
.unwrap_or_else(Utc::now),
})
})
.collect();
Ok((results, total))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn log_document_from_chunk_stdout() {
let chunk = LogChunk {
workflow_id: "wf-1".to_string(),
definition_id: "ci".to_string(),
step_id: 0,
step_name: "build".to_string(),
stream: LogStreamType::Stdout,
data: b"compiling wfe-core\n".to_vec(),
timestamp: Utc::now(),
};
let doc = LogDocument::from_chunk(&chunk);
assert_eq!(doc.workflow_id, "wf-1");
assert_eq!(doc.stream, "stdout");
assert_eq!(doc.line, "compiling wfe-core");
assert_eq!(doc.step_name, "build");
}
#[test]
fn log_document_from_chunk_stderr() {
let chunk = LogChunk {
workflow_id: "wf-2".to_string(),
definition_id: "deploy".to_string(),
step_id: 1,
step_name: "test".to_string(),
stream: LogStreamType::Stderr,
data: b"warning: unused variable\n".to_vec(),
timestamp: Utc::now(),
};
let doc = LogDocument::from_chunk(&chunk);
assert_eq!(doc.stream, "stderr");
assert_eq!(doc.line, "warning: unused variable");
}
#[test]
fn log_document_trims_trailing_newline() {
let chunk = LogChunk {
workflow_id: "wf-1".to_string(),
definition_id: "ci".to_string(),
step_id: 0,
step_name: "build".to_string(),
stream: LogStreamType::Stdout,
data: b"line with newline\n".to_vec(),
timestamp: Utc::now(),
};
let doc = LogDocument::from_chunk(&chunk);
assert_eq!(doc.line, "line with newline");
}
#[test]
fn log_document_serializes_to_json() {
let chunk = LogChunk {
workflow_id: "wf-1".to_string(),
definition_id: "ci".to_string(),
step_id: 2,
step_name: "clippy".to_string(),
stream: LogStreamType::Stdout,
data: b"all good\n".to_vec(),
timestamp: Utc::now(),
};
let doc = LogDocument::from_chunk(&chunk);
let json = serde_json::to_value(&doc).unwrap();
assert_eq!(json["step_name"], "clippy");
assert_eq!(json["step_id"], 2);
assert!(json["timestamp"].is_string());
}
// ── OpenSearch integration tests ────────────────────────────────
fn opensearch_url() -> Option<String> {
let url = std::env::var("WFE_SEARCH_URL")
.unwrap_or_else(|_| "http://localhost:9200".to_string());
// Quick TCP probe to check if OpenSearch is reachable.
let addr = url
.strip_prefix("http://")
.or_else(|| url.strip_prefix("https://"))
.unwrap_or("localhost:9200");
match std::net::TcpStream::connect_timeout(
&addr.parse().ok()?,
std::time::Duration::from_secs(1),
) {
Ok(_) => Some(url),
Err(_) => None,
}
}
fn make_test_chunk(
workflow_id: &str,
step_name: &str,
stream: LogStreamType,
line: &str,
) -> LogChunk {
LogChunk {
workflow_id: workflow_id.to_string(),
definition_id: "test-def".to_string(),
step_id: 0,
step_name: step_name.to_string(),
stream,
data: format!("{line}\n").into_bytes(),
timestamp: Utc::now(),
}
}
/// Delete the test index to start clean.
async fn cleanup_index(url: &str) {
let client = reqwest::Client::new();
let _ = client
.delete(format!("{url}/{LOG_INDEX}"))
.send()
.await;
}
#[tokio::test]
async fn opensearch_ensure_index_creates_index() {
let Some(url) = opensearch_url() else {
eprintln!("SKIP: OpenSearch not available");
return;
};
cleanup_index(&url).await;
let index = LogSearchIndex::new(&url).unwrap();
index.ensure_index().await.unwrap();
// Calling again should be idempotent.
index.ensure_index().await.unwrap();
cleanup_index(&url).await;
}
#[tokio::test]
async fn opensearch_index_and_search_chunk() {
let Some(url) = opensearch_url() else {
eprintln!("SKIP: OpenSearch not available");
return;
};
cleanup_index(&url).await;
let index = LogSearchIndex::new(&url).unwrap();
index.ensure_index().await.unwrap();
// Index some log chunks.
let chunk = make_test_chunk("wf-search-1", "build", LogStreamType::Stdout, "compiling wfe-core v1.5.0");
index.index_chunk(&chunk).await.unwrap();
let chunk = make_test_chunk("wf-search-1", "build", LogStreamType::Stderr, "warning: unused variable");
index.index_chunk(&chunk).await.unwrap();
let chunk = make_test_chunk("wf-search-1", "test", LogStreamType::Stdout, "test result: ok. 79 passed");
index.index_chunk(&chunk).await.unwrap();
// OpenSearch needs a refresh to make docs searchable.
let client = reqwest::Client::new();
client.post(format!("{url}/{LOG_INDEX}/_refresh")).send().await.unwrap();
// Search by text.
let (results, total) = index
.search("wfe-core", None, None, None, 0, 10)
.await
.unwrap();
assert!(total >= 1, "expected at least 1 hit, got {total}");
assert!(results.iter().any(|r| r.line.contains("wfe-core")));
// Search by workflow_id filter.
let (results, _) = index
.search("", Some("wf-search-1"), None, None, 0, 10)
.await
.unwrap();
assert_eq!(results.len(), 3);
// Search by step_name filter.
let (results, _) = index
.search("", Some("wf-search-1"), Some("test"), None, 0, 10)
.await
.unwrap();
assert_eq!(results.len(), 1);
assert!(results[0].line.contains("79 passed"));
// Search by stream filter.
let (results, _) = index
.search("", Some("wf-search-1"), None, Some("stderr"), 0, 10)
.await
.unwrap();
assert_eq!(results.len(), 1);
assert!(results[0].line.contains("unused variable"));
cleanup_index(&url).await;
}
#[tokio::test]
async fn opensearch_search_empty_index() {
let Some(url) = opensearch_url() else {
eprintln!("SKIP: OpenSearch not available");
return;
};
cleanup_index(&url).await;
let index = LogSearchIndex::new(&url).unwrap();
index.ensure_index().await.unwrap();
let (results, total) = index
.search("nonexistent", None, None, None, 0, 10)
.await
.unwrap();
assert_eq!(total, 0);
assert!(results.is_empty());
cleanup_index(&url).await;
}
#[tokio::test]
async fn opensearch_search_pagination() {
let Some(url) = opensearch_url() else {
eprintln!("SKIP: OpenSearch not available");
return;
};
cleanup_index(&url).await;
let index = LogSearchIndex::new(&url).unwrap();
index.ensure_index().await.unwrap();
// Index 5 chunks.
for i in 0..5 {
let chunk = make_test_chunk("wf-page", "build", LogStreamType::Stdout, &format!("line {i}"));
index.index_chunk(&chunk).await.unwrap();
}
let client = reqwest::Client::new();
client.post(format!("{url}/{LOG_INDEX}/_refresh")).send().await.unwrap();
// Get first 2.
let (results, total) = index
.search("", Some("wf-page"), None, None, 0, 2)
.await
.unwrap();
assert_eq!(total, 5);
assert_eq!(results.len(), 2);
// Get next 2.
let (results, _) = index
.search("", Some("wf-page"), None, None, 2, 2)
.await
.unwrap();
assert_eq!(results.len(), 2);
// Get last 1.
let (results, _) = index
.search("", Some("wf-page"), None, None, 4, 2)
.await
.unwrap();
assert_eq!(results.len(), 1);
cleanup_index(&url).await;
}
#[test]
fn log_search_index_new_constructs_ok() {
// Construction should succeed even for unreachable URLs (fails on first use).
let result = LogSearchIndex::new("http://localhost:19876");
assert!(result.is_ok());
}
#[tokio::test]
async fn opensearch_index_chunk_result_fields() {
let Some(url) = opensearch_url() else {
eprintln!("SKIP: OpenSearch not available");
return;
};
cleanup_index(&url).await;
let index = LogSearchIndex::new(&url).unwrap();
index.ensure_index().await.unwrap();
let chunk = make_test_chunk("wf-fields", "clippy", LogStreamType::Stderr, "error: type mismatch");
index.index_chunk(&chunk).await.unwrap();
let client = reqwest::Client::new();
client.post(format!("{url}/{LOG_INDEX}/_refresh")).send().await.unwrap();
let (results, _) = index
.search("type mismatch", None, None, None, 0, 10)
.await
.unwrap();
assert!(!results.is_empty());
let hit = &results[0];
assert_eq!(hit.workflow_id, "wf-fields");
assert_eq!(hit.definition_id, "test-def");
assert_eq!(hit.step_name, "clippy");
assert_eq!(hit.stream, "stderr");
assert!(hit.line.contains("type mismatch"));
cleanup_index(&url).await;
}
}

203
wfe-server/src/log_store.rs Normal file
View File

@@ -0,0 +1,203 @@
use std::sync::Arc;
use async_trait::async_trait;
use dashmap::DashMap;
use tokio::sync::broadcast;
use wfe_core::traits::log_sink::{LogChunk, LogSink};
/// Stores and broadcasts log chunks for workflow step executions.
///
/// Three tiers:
/// 1. **Live broadcast** — per-workflow broadcast channel for StreamLogs subscribers
/// 2. **In-memory history** — append-only buffer per (workflow_id, step_id) for replay
/// 3. **Search index** — OpenSearch log indexing via LogSearchIndex (optional)
pub struct LogStore {
/// Per-workflow broadcast channels for live streaming.
live: DashMap<String, broadcast::Sender<LogChunk>>,
/// In-memory history per (workflow_id, step_id).
history: DashMap<(String, usize), Vec<LogChunk>>,
/// Optional search index for log lines.
search: Option<Arc<crate::log_search::LogSearchIndex>>,
}
impl LogStore {
pub fn new() -> Self {
Self {
live: DashMap::new(),
history: DashMap::new(),
search: None,
}
}
pub fn with_search(mut self, index: Arc<crate::log_search::LogSearchIndex>) -> Self {
self.search = Some(index);
self
}
/// Subscribe to live log chunks for a workflow.
pub fn subscribe(&self, workflow_id: &str) -> broadcast::Receiver<LogChunk> {
self.live
.entry(workflow_id.to_string())
.or_insert_with(|| broadcast::channel(4096).0)
.subscribe()
}
/// Get historical logs for a workflow, optionally filtered by step.
pub fn get_history(&self, workflow_id: &str, step_id: Option<usize>) -> Vec<LogChunk> {
let mut result = Vec::new();
for entry in self.history.iter() {
let (wf_id, s_id) = entry.key();
if wf_id != workflow_id {
continue;
}
if let Some(filter_step) = step_id {
if *s_id != filter_step {
continue;
}
}
result.extend(entry.value().iter().cloned());
}
// Sort by timestamp.
result.sort_by_key(|c| c.timestamp);
result
}
}
#[async_trait]
impl LogSink for LogStore {
async fn write_chunk(&self, chunk: LogChunk) {
// Store in history.
self.history
.entry((chunk.workflow_id.clone(), chunk.step_id))
.or_default()
.push(chunk.clone());
// Broadcast to live subscribers.
let sender = self
.live
.entry(chunk.workflow_id.clone())
.or_insert_with(|| broadcast::channel(4096).0);
let _ = sender.send(chunk.clone());
// Index to OpenSearch (best-effort, don't block on failure).
if let Some(ref search) = self.search {
if let Err(e) = search.index_chunk(&chunk).await {
tracing::warn!(error = %e, "failed to index log chunk to OpenSearch");
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::Utc;
use wfe_core::traits::LogStreamType;
fn make_chunk(workflow_id: &str, step_id: usize, step_name: &str, data: &str) -> LogChunk {
LogChunk {
workflow_id: workflow_id.to_string(),
definition_id: "def-1".to_string(),
step_id,
step_name: step_name.to_string(),
stream: LogStreamType::Stdout,
data: data.as_bytes().to_vec(),
timestamp: Utc::now(),
}
}
#[tokio::test]
async fn write_and_read_history() {
let store = LogStore::new();
store.write_chunk(make_chunk("wf-1", 0, "build", "line 1\n")).await;
store.write_chunk(make_chunk("wf-1", 0, "build", "line 2\n")).await;
let history = store.get_history("wf-1", None);
assert_eq!(history.len(), 2);
assert_eq!(history[0].data, b"line 1\n");
assert_eq!(history[1].data, b"line 2\n");
}
#[tokio::test]
async fn history_filtered_by_step() {
let store = LogStore::new();
store.write_chunk(make_chunk("wf-1", 0, "build", "build log\n")).await;
store.write_chunk(make_chunk("wf-1", 1, "test", "test log\n")).await;
let build_only = store.get_history("wf-1", Some(0));
assert_eq!(build_only.len(), 1);
assert_eq!(build_only[0].step_name, "build");
let test_only = store.get_history("wf-1", Some(1));
assert_eq!(test_only.len(), 1);
assert_eq!(test_only[0].step_name, "test");
}
#[tokio::test]
async fn empty_history_for_unknown_workflow() {
let store = LogStore::new();
assert!(store.get_history("nonexistent", None).is_empty());
}
#[tokio::test]
async fn live_broadcast() {
let store = LogStore::new();
let mut rx = store.subscribe("wf-1");
store.write_chunk(make_chunk("wf-1", 0, "build", "hello\n")).await;
let received = rx.recv().await.unwrap();
assert_eq!(received.data, b"hello\n");
assert_eq!(received.workflow_id, "wf-1");
}
#[tokio::test]
async fn broadcast_different_workflows_isolated() {
let store = LogStore::new();
let mut rx1 = store.subscribe("wf-1");
let mut rx2 = store.subscribe("wf-2");
store.write_chunk(make_chunk("wf-1", 0, "build", "wf1 log\n")).await;
store.write_chunk(make_chunk("wf-2", 0, "test", "wf2 log\n")).await;
let e1 = rx1.recv().await.unwrap();
assert_eq!(e1.workflow_id, "wf-1");
let e2 = rx2.recv().await.unwrap();
assert_eq!(e2.workflow_id, "wf-2");
}
#[tokio::test]
async fn no_subscribers_does_not_error() {
let store = LogStore::new();
// No subscribers — should not panic.
store.write_chunk(make_chunk("wf-1", 0, "build", "orphan log\n")).await;
// History should still be stored.
assert_eq!(store.get_history("wf-1", None).len(), 1);
}
#[tokio::test]
async fn multiple_subscribers_same_workflow() {
let store = LogStore::new();
let mut rx1 = store.subscribe("wf-1");
let mut rx2 = store.subscribe("wf-1");
store.write_chunk(make_chunk("wf-1", 0, "build", "shared\n")).await;
let e1 = rx1.recv().await.unwrap();
let e2 = rx2.recv().await.unwrap();
assert_eq!(e1.data, b"shared\n");
assert_eq!(e2.data, b"shared\n");
}
#[tokio::test]
async fn history_preserves_stream_type() {
let store = LogStore::new();
let mut chunk = make_chunk("wf-1", 0, "build", "error output\n");
chunk.stream = LogStreamType::Stderr;
store.write_chunk(chunk).await;
let history = store.get_history("wf-1", None);
assert_eq!(history[0].stream, LogStreamType::Stderr);
}
}

250
wfe-server/src/main.rs Normal file
View File

@@ -0,0 +1,250 @@
mod auth;
mod config;
mod grpc;
mod lifecycle_bus;
mod log_search;
mod log_store;
mod webhook;
use std::sync::Arc;
use clap::Parser;
use tonic::transport::Server;
use tracing_subscriber::EnvFilter;
use wfe::WorkflowHostBuilder;
use wfe_core::test_support::{
InMemoryLockProvider, InMemoryPersistenceProvider, InMemoryQueueProvider,
};
use wfe_server_protos::wfe::v1::wfe_server::WfeServer;
use crate::config::{Cli, PersistenceConfig, QueueConfig};
use crate::grpc::WfeService;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// 1. Parse CLI + load config.
let cli = Cli::parse();
let config = config::load(&cli);
// 2. Init tracing.
tracing_subscriber::fmt()
.with_env_filter(
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")),
)
.init();
tracing::info!(
grpc_addr = %config.grpc_addr,
http_addr = %config.http_addr,
"starting wfe-server"
);
// 3. Build providers based on config.
let (persistence, lock, queue): (
Arc<dyn wfe_core::traits::PersistenceProvider>,
Arc<dyn wfe_core::traits::DistributedLockProvider>,
Arc<dyn wfe_core::traits::QueueProvider>,
) = match (&config.persistence, &config.queue) {
(PersistenceConfig::Sqlite { path }, QueueConfig::InMemory) => {
tracing::info!(path = %path, "using SQLite + in-memory queue");
let persistence = Arc::new(
wfe_sqlite::SqlitePersistenceProvider::new(path)
.await
.expect("failed to init SQLite"),
);
let lock = Arc::new(InMemoryLockProvider::new());
let queue = Arc::new(InMemoryQueueProvider::new());
(persistence, lock, queue)
}
(PersistenceConfig::Postgres { url }, QueueConfig::Valkey { url: valkey_url }) => {
tracing::info!("using Postgres + Valkey");
let persistence = Arc::new(
wfe_postgres::PostgresPersistenceProvider::new(url)
.await
.expect("failed to init Postgres"),
);
let lock = Arc::new(
wfe_valkey::ValkeyLockProvider::new(valkey_url, "wfe")
.await
.expect("failed to init Valkey lock"),
);
let queue = Arc::new(
wfe_valkey::ValkeyQueueProvider::new(valkey_url, "wfe")
.await
.expect("failed to init Valkey queue"),
);
(
persistence as Arc<dyn wfe_core::traits::PersistenceProvider>,
lock as Arc<dyn wfe_core::traits::DistributedLockProvider>,
queue as Arc<dyn wfe_core::traits::QueueProvider>,
)
}
_ => {
tracing::info!("using in-memory providers (dev mode)");
let persistence = Arc::new(InMemoryPersistenceProvider::new());
let lock = Arc::new(InMemoryLockProvider::new());
let queue = Arc::new(InMemoryQueueProvider::new());
(
persistence as Arc<dyn wfe_core::traits::PersistenceProvider>,
lock as Arc<dyn wfe_core::traits::DistributedLockProvider>,
queue as Arc<dyn wfe_core::traits::QueueProvider>,
)
}
};
// 4. Build lifecycle broadcaster.
let lifecycle_bus = Arc::new(lifecycle_bus::BroadcastLifecyclePublisher::new(4096));
// 5. Build log search index (optional, needs to exist before log store).
let log_search_index = if let Some(ref search_config) = config.search {
match log_search::LogSearchIndex::new(&search_config.url) {
Ok(index) => {
let index = Arc::new(index);
if let Err(e) = index.ensure_index().await {
tracing::warn!(error = %e, "failed to create log search index");
}
tracing::info!(url = %search_config.url, "log search enabled");
Some(index)
}
Err(e) => {
tracing::warn!(error = %e, "failed to connect to OpenSearch");
None
}
}
} else {
None
};
// 6. Build log store (with optional search indexing).
let log_store = {
let store = log_store::LogStore::new();
if let Some(ref index) = log_search_index {
Arc::new(store.with_search(index.clone()))
} else {
Arc::new(store)
}
};
// 7. Build WorkflowHost with lifecycle + log_sink.
let host = WorkflowHostBuilder::new()
.use_persistence(persistence)
.use_lock_provider(lock)
.use_queue_provider(queue)
.use_lifecycle(lifecycle_bus.clone() as Arc<dyn wfe_core::traits::LifecyclePublisher>)
.use_log_sink(log_store.clone() as Arc<dyn wfe_core::traits::LogSink>)
.build()
.expect("failed to build workflow host");
// 8. Auto-load YAML definitions.
if let Some(ref dir) = config.workflows_dir {
load_yaml_definitions(&host, dir).await;
}
// 9. Start the workflow engine.
host.start().await.expect("failed to start workflow host");
tracing::info!("workflow engine started");
let host = Arc::new(host);
// 10. Build gRPC service.
let mut wfe_service = WfeService::new(host.clone(), lifecycle_bus, log_store);
if let Some(index) = log_search_index {
wfe_service = wfe_service.with_log_search(index);
}
let (health_reporter, health_service) = tonic_health::server::health_reporter();
health_reporter
.set_serving::<WfeServer<WfeService>>()
.await;
// 11. Build auth state.
let auth_state = Arc::new(auth::AuthState::new(config.auth.clone()).await);
let auth_interceptor = auth::make_interceptor(auth_state);
// 12. Build axum HTTP server for webhooks.
let webhook_state = webhook::WebhookState {
host: host.clone(),
config: config.clone(),
};
// HIGH-08: Limit webhook payload size to 2 MB to prevent OOM DoS.
let http_router = axum::Router::new()
.route("/webhooks/events", axum::routing::post(webhook::handle_generic_event))
.route("/webhooks/github", axum::routing::post(webhook::handle_github_webhook))
.route("/webhooks/gitea", axum::routing::post(webhook::handle_gitea_webhook))
.route("/healthz", axum::routing::get(webhook::health_check))
.layer(axum::extract::DefaultBodyLimit::max(2 * 1024 * 1024))
.with_state(webhook_state);
// 12. Run gRPC + HTTP servers with graceful shutdown.
let grpc_addr = config.grpc_addr;
let http_addr = config.http_addr;
tracing::info!(%grpc_addr, %http_addr, "servers listening");
let grpc_server = Server::builder()
.add_service(health_service)
.add_service(WfeServer::with_interceptor(wfe_service, auth_interceptor))
.serve(grpc_addr);
let http_listener = tokio::net::TcpListener::bind(http_addr)
.await
.expect("failed to bind HTTP address");
let http_server = axum::serve(http_listener, http_router);
tokio::select! {
result = grpc_server => {
if let Err(e) = result {
tracing::error!(error = %e, "gRPC server error");
}
}
result = http_server => {
if let Err(e) = result {
tracing::error!(error = %e, "HTTP server error");
}
}
_ = tokio::signal::ctrl_c() => {
tracing::info!("shutdown signal received");
}
}
// 9. Graceful shutdown.
host.stop().await;
tracing::info!("wfe-server stopped");
Ok(())
}
async fn load_yaml_definitions(host: &wfe::WorkflowHost, dir: &std::path::Path) {
let entries = match std::fs::read_dir(dir) {
Ok(e) => e,
Err(e) => {
tracing::warn!(dir = %dir.display(), error = %e, "failed to read workflows directory");
return;
}
};
let config = std::collections::HashMap::new();
for entry in entries.flatten() {
let path = entry.path();
if path.extension().is_some_and(|ext| ext == "yaml" || ext == "yml") {
match wfe_yaml::load_workflow_from_str(
&std::fs::read_to_string(&path).unwrap_or_default(),
&config,
) {
Ok(workflows) => {
for compiled in workflows {
for (key, factory) in compiled.step_factories {
host.register_step_factory(&key, factory).await;
}
let id = compiled.definition.id.clone();
let version = compiled.definition.version;
host.register_workflow_definition(compiled.definition).await;
tracing::info!(id = %id, version, path = %path.display(), "loaded workflow definition");
}
}
Err(e) => {
tracing::warn!(path = %path.display(), error = %e, "failed to compile workflow");
}
}
}
}
}

556
wfe-server/src/webhook.rs Normal file
View File

@@ -0,0 +1,556 @@
use std::sync::Arc;
use axum::body::Bytes;
use axum::extract::State;
use axum::http::{HeaderMap, StatusCode};
use axum::response::IntoResponse;
use axum::Json;
use hmac::{Hmac, Mac};
use sha2::Sha256;
use crate::config::{ServerConfig, WebhookTrigger};
type HmacSha256 = Hmac<Sha256>;
/// Shared state for webhook handlers.
#[derive(Clone)]
pub struct WebhookState {
pub host: Arc<wfe::WorkflowHost>,
pub config: ServerConfig,
}
/// Generic event webhook.
///
/// POST /webhooks/events
/// Body: { "event_name": "...", "event_key": "...", "data": { ... } }
/// Requires bearer token authentication (same tokens as gRPC auth).
pub async fn handle_generic_event(
State(state): State<WebhookState>,
headers: HeaderMap,
Json(payload): Json<GenericEventPayload>,
) -> impl IntoResponse {
// HIGH-07: Authenticate generic event endpoint.
if !state.config.auth.tokens.is_empty() {
let auth_header = headers
.get("authorization")
.and_then(|v| v.to_str().ok())
.unwrap_or("");
let token = auth_header
.strip_prefix("Bearer ")
.or_else(|| auth_header.strip_prefix("bearer "))
.unwrap_or("");
if !crate::auth::check_static_tokens_pub(&state.config.auth.tokens, token) {
return (StatusCode::UNAUTHORIZED, "invalid token");
}
}
let data = payload.data.unwrap_or_else(|| serde_json::json!({}));
match state
.host
.publish_event(&payload.event_name, &payload.event_key, data)
.await
{
Ok(()) => (StatusCode::OK, "event published"),
Err(e) => {
tracing::warn!(error = %e, "failed to publish generic event");
(StatusCode::INTERNAL_SERVER_ERROR, "failed to publish event")
}
}
}
/// GitHub webhook handler.
///
/// POST /webhooks/github
/// Verifies X-Hub-Signature-256, parses X-GitHub-Event header.
pub async fn handle_github_webhook(
State(state): State<WebhookState>,
headers: HeaderMap,
body: Bytes,
) -> impl IntoResponse {
// 1. Verify HMAC signature if secret is configured.
if let Some(secret) = state.config.auth.webhook_secrets.get("github") {
let sig_header = headers
.get("x-hub-signature-256")
.and_then(|v| v.to_str().ok())
.unwrap_or("");
if !verify_hmac_sha256(secret.as_bytes(), &body, sig_header) {
return (StatusCode::UNAUTHORIZED, "invalid signature");
}
}
// 2. Parse event type.
let event_type = headers
.get("x-github-event")
.and_then(|v| v.to_str().ok())
.unwrap_or("");
// 3. Parse payload.
let payload: serde_json::Value = match serde_json::from_slice(&body) {
Ok(v) => v,
Err(e) => {
tracing::warn!(error = %e, "invalid GitHub webhook JSON");
return (StatusCode::BAD_REQUEST, "invalid JSON");
}
};
tracing::info!(
event = event_type,
repo = payload["repository"]["full_name"].as_str().unwrap_or(""),
"received GitHub webhook"
);
// 4. Map to WFE event + check triggers.
let forge_event = map_forge_event(event_type, &payload);
// Publish as event (for workflows waiting on events).
if let Err(e) = state
.host
.publish_event(&forge_event.event_name, &forge_event.event_key, forge_event.data.clone())
.await
{
tracing::error!(error = %e, "failed to publish forge event");
return (StatusCode::INTERNAL_SERVER_ERROR, "failed to publish event");
}
// Check triggers and auto-start workflows.
for trigger in &state.config.webhook.triggers {
if trigger.source != "github" {
continue;
}
if trigger.event != event_type {
continue;
}
if let Some(ref match_ref) = trigger.match_ref {
let payload_ref = payload["ref"].as_str().unwrap_or("");
if payload_ref != match_ref {
continue;
}
}
let data = map_trigger_data(trigger, &payload);
match state
.host
.start_workflow(&trigger.workflow_id, trigger.version, data)
.await
{
Ok(id) => {
tracing::info!(
workflow_id = %id,
trigger = %trigger.workflow_id,
"webhook triggered workflow"
);
}
Err(e) => {
tracing::warn!(
error = %e,
trigger = %trigger.workflow_id,
"failed to start triggered workflow"
);
}
}
}
(StatusCode::OK, "ok")
}
/// Gitea webhook handler.
///
/// POST /webhooks/gitea
/// Verifies X-Gitea-Signature, parses X-Gitea-Event (or X-GitHub-Event) header.
/// Gitea payloads are intentionally compatible with GitHub's format.
pub async fn handle_gitea_webhook(
State(state): State<WebhookState>,
headers: HeaderMap,
body: Bytes,
) -> impl IntoResponse {
// 1. Verify HMAC signature if secret is configured.
if let Some(secret) = state.config.auth.webhook_secrets.get("gitea") {
// Gitea uses X-Gitea-Signature (raw hex, no sha256= prefix in older versions).
let sig_header = headers
.get("x-gitea-signature")
.and_then(|v| v.to_str().ok())
.unwrap_or("");
// Handle both raw hex and sha256= prefixed formats.
if !verify_hmac_sha256(secret.as_bytes(), &body, sig_header)
&& !verify_hmac_sha256_raw(secret.as_bytes(), &body, sig_header)
{
return (StatusCode::UNAUTHORIZED, "invalid signature");
}
}
// 2. Parse event type (try Gitea header first, fall back to GitHub compat header).
let event_type = headers
.get("x-gitea-event")
.or_else(|| headers.get("x-github-event"))
.and_then(|v| v.to_str().ok())
.unwrap_or("");
// 3. Parse payload.
let payload: serde_json::Value = match serde_json::from_slice(&body) {
Ok(v) => v,
Err(e) => {
tracing::warn!(error = %e, "invalid Gitea webhook JSON");
return (StatusCode::BAD_REQUEST, "invalid JSON");
}
};
tracing::info!(
event = event_type,
repo = payload["repository"]["full_name"].as_str().unwrap_or(""),
"received Gitea webhook"
);
// 4. Map to WFE event + check triggers (same logic as GitHub).
let forge_event = map_forge_event(event_type, &payload);
if let Err(e) = state
.host
.publish_event(&forge_event.event_name, &forge_event.event_key, forge_event.data.clone())
.await
{
tracing::error!(error = %e, "failed to publish forge event");
return (StatusCode::INTERNAL_SERVER_ERROR, "failed to publish event");
}
for trigger in &state.config.webhook.triggers {
if trigger.source != "gitea" {
continue;
}
if trigger.event != event_type {
continue;
}
if let Some(ref match_ref) = trigger.match_ref {
let payload_ref = payload["ref"].as_str().unwrap_or("");
if payload_ref != match_ref {
continue;
}
}
let data = map_trigger_data(trigger, &payload);
match state
.host
.start_workflow(&trigger.workflow_id, trigger.version, data)
.await
{
Ok(id) => {
tracing::info!(workflow_id = %id, trigger = %trigger.workflow_id, "webhook triggered workflow");
}
Err(e) => {
tracing::warn!(error = %e, trigger = %trigger.workflow_id, "failed to start triggered workflow");
}
}
}
(StatusCode::OK, "ok")
}
/// Health check endpoint.
pub async fn health_check() -> impl IntoResponse {
(StatusCode::OK, "ok")
}
// ── Types ───────────────────────────────────────────────────────────
#[derive(serde::Deserialize)]
pub struct GenericEventPayload {
pub event_name: String,
pub event_key: String,
pub data: Option<serde_json::Value>,
}
struct ForgeEvent {
event_name: String,
event_key: String,
data: serde_json::Value,
}
// ── Helpers ─────────────────────────────────────────────────────────
/// Verify HMAC-SHA256 signature with `sha256=<hex>` prefix (GitHub format).
fn verify_hmac_sha256(secret: &[u8], body: &[u8], signature: &str) -> bool {
let hex_sig = signature.strip_prefix("sha256=").unwrap_or("");
if hex_sig.is_empty() {
return false;
}
let expected = match hex::decode(hex_sig) {
Ok(v) => v,
Err(_) => return false,
};
let mut mac = HmacSha256::new_from_slice(secret).expect("HMAC accepts any key size");
mac.update(body);
mac.verify_slice(&expected).is_ok()
}
/// Verify HMAC-SHA256 signature as raw hex (no prefix, Gitea legacy format).
fn verify_hmac_sha256_raw(secret: &[u8], body: &[u8], signature: &str) -> bool {
let expected = match hex::decode(signature) {
Ok(v) => v,
Err(_) => return false,
};
let mut mac = HmacSha256::new_from_slice(secret).expect("HMAC accepts any key size");
mac.update(body);
mac.verify_slice(&expected).is_ok()
}
/// Map a git forge event type + payload to a WFE event.
fn map_forge_event(event_type: &str, payload: &serde_json::Value) -> ForgeEvent {
let repo = payload["repository"]["full_name"]
.as_str()
.unwrap_or("unknown")
.to_string();
match event_type {
"push" => {
let git_ref = payload["ref"].as_str().unwrap_or("").to_string();
ForgeEvent {
event_name: "git.push".to_string(),
event_key: format!("{repo}/{git_ref}"),
data: serde_json::json!({
"repo": repo,
"ref": git_ref,
"before": payload["before"].as_str().unwrap_or(""),
"after": payload["after"].as_str().unwrap_or(""),
"commit": payload["head_commit"]["id"].as_str().unwrap_or(""),
"message": payload["head_commit"]["message"].as_str().unwrap_or(""),
"sender": payload["sender"]["login"].as_str().unwrap_or(""),
}),
}
}
"pull_request" => {
let number = payload["number"].as_u64().unwrap_or(0);
ForgeEvent {
event_name: "git.pr".to_string(),
event_key: format!("{repo}/{number}"),
data: serde_json::json!({
"repo": repo,
"action": payload["action"].as_str().unwrap_or(""),
"number": number,
"title": payload["pull_request"]["title"].as_str().unwrap_or(""),
"head_ref": payload["pull_request"]["head"]["ref"].as_str().unwrap_or(""),
"base_ref": payload["pull_request"]["base"]["ref"].as_str().unwrap_or(""),
"sender": payload["sender"]["login"].as_str().unwrap_or(""),
}),
}
}
"create" => {
let ref_name = payload["ref"].as_str().unwrap_or("").to_string();
let ref_type = payload["ref_type"].as_str().unwrap_or("").to_string();
ForgeEvent {
event_name: format!("git.{ref_type}"),
event_key: format!("{repo}/{ref_name}"),
data: serde_json::json!({
"repo": repo,
"ref": ref_name,
"ref_type": ref_type,
"sender": payload["sender"]["login"].as_str().unwrap_or(""),
}),
}
}
_ => ForgeEvent {
event_name: format!("git.{event_type}"),
event_key: repo.clone(),
data: serde_json::json!({
"repo": repo,
"event_type": event_type,
}),
},
}
}
/// Extract data fields from payload using simple JSONPath-like mapping.
/// Supports `$.field.nested` syntax.
fn map_trigger_data(
trigger: &WebhookTrigger,
payload: &serde_json::Value,
) -> serde_json::Value {
let mut data = serde_json::Map::new();
for (key, path) in &trigger.data_mapping {
if let Some(value) = resolve_json_path(payload, path) {
data.insert(key.clone(), value.clone());
}
}
serde_json::Value::Object(data)
}
/// Resolve a simple JSONPath expression like `$.repository.full_name`.
fn resolve_json_path<'a>(value: &'a serde_json::Value, path: &str) -> Option<&'a serde_json::Value> {
let path = path.strip_prefix("$.").unwrap_or(path);
let mut current = value;
for segment in path.split('.') {
current = current.get(segment)?;
}
Some(current)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn verify_github_hmac_valid() {
let secret = b"mysecret";
let body = b"hello world";
let mut mac = HmacSha256::new_from_slice(secret).unwrap();
mac.update(body);
let sig = format!("sha256={}", hex::encode(mac.finalize().into_bytes()));
assert!(verify_hmac_sha256(secret, body, &sig));
}
#[test]
fn verify_github_hmac_invalid() {
assert!(!verify_hmac_sha256(b"secret", b"body", "sha256=deadbeef"));
}
#[test]
fn verify_github_hmac_missing_prefix() {
assert!(!verify_hmac_sha256(b"secret", b"body", "not-a-signature"));
}
#[test]
fn verify_gitea_hmac_raw_valid() {
let secret = b"giteasecret";
let body = b"payload";
let mut mac = HmacSha256::new_from_slice(secret).unwrap();
mac.update(body);
let sig = hex::encode(mac.finalize().into_bytes());
assert!(verify_hmac_sha256_raw(secret, body, &sig));
}
#[test]
fn verify_gitea_hmac_raw_invalid() {
assert!(!verify_hmac_sha256_raw(b"secret", b"body", "badhex"));
}
#[test]
fn map_push_event() {
let payload = serde_json::json!({
"ref": "refs/heads/main",
"before": "aaa",
"after": "bbb",
"head_commit": { "id": "bbb", "message": "fix: stuff" },
"repository": { "full_name": "studio/wfe" },
"sender": { "login": "sienna" }
});
let event = map_forge_event("push", &payload);
assert_eq!(event.event_name, "git.push");
assert_eq!(event.event_key, "studio/wfe/refs/heads/main");
assert_eq!(event.data["commit"], "bbb");
assert_eq!(event.data["sender"], "sienna");
}
#[test]
fn map_pull_request_event() {
let payload = serde_json::json!({
"action": "opened",
"number": 42,
"pull_request": {
"title": "Add feature",
"head": { "ref": "feature-branch" },
"base": { "ref": "main" }
},
"repository": { "full_name": "studio/wfe" },
"sender": { "login": "sienna" }
});
let event = map_forge_event("pull_request", &payload);
assert_eq!(event.event_name, "git.pr");
assert_eq!(event.event_key, "studio/wfe/42");
assert_eq!(event.data["action"], "opened");
assert_eq!(event.data["title"], "Add feature");
assert_eq!(event.data["head_ref"], "feature-branch");
}
#[test]
fn map_create_tag_event() {
let payload = serde_json::json!({
"ref": "v1.5.0",
"ref_type": "tag",
"repository": { "full_name": "studio/wfe" },
"sender": { "login": "sienna" }
});
let event = map_forge_event("create", &payload);
assert_eq!(event.event_name, "git.tag");
assert_eq!(event.event_key, "studio/wfe/v1.5.0");
}
#[test]
fn map_create_branch_event() {
let payload = serde_json::json!({
"ref": "feature-x",
"ref_type": "branch",
"repository": { "full_name": "studio/wfe" },
"sender": { "login": "sienna" }
});
let event = map_forge_event("create", &payload);
assert_eq!(event.event_name, "git.branch");
assert_eq!(event.event_key, "studio/wfe/feature-x");
}
#[test]
fn map_unknown_event() {
let payload = serde_json::json!({
"repository": { "full_name": "studio/wfe" }
});
let event = map_forge_event("release", &payload);
assert_eq!(event.event_name, "git.release");
assert_eq!(event.event_key, "studio/wfe");
}
#[test]
fn resolve_json_path_simple() {
let v = serde_json::json!({"a": {"b": {"c": "value"}}});
assert_eq!(resolve_json_path(&v, "$.a.b.c").unwrap(), "value");
}
#[test]
fn resolve_json_path_no_prefix() {
let v = serde_json::json!({"repo": "test"});
assert_eq!(resolve_json_path(&v, "repo").unwrap(), "test");
}
#[test]
fn resolve_json_path_missing() {
let v = serde_json::json!({"a": 1});
assert!(resolve_json_path(&v, "$.b.c").is_none());
}
#[test]
fn map_trigger_data_extracts_fields() {
let trigger = WebhookTrigger {
source: "github".to_string(),
event: "push".to_string(),
match_ref: None,
workflow_id: "ci".to_string(),
version: 1,
data_mapping: [
("repo".to_string(), "$.repository.full_name".to_string()),
("commit".to_string(), "$.head_commit.id".to_string()),
]
.into(),
};
let payload = serde_json::json!({
"repository": { "full_name": "studio/wfe" },
"head_commit": { "id": "abc123" }
});
let data = map_trigger_data(&trigger, &payload);
assert_eq!(data["repo"], "studio/wfe");
assert_eq!(data["commit"], "abc123");
}
#[test]
fn map_trigger_data_missing_field_skipped() {
let trigger = WebhookTrigger {
source: "github".to_string(),
event: "push".to_string(),
match_ref: None,
workflow_id: "ci".to_string(),
version: 1,
data_mapping: [("missing".to_string(), "$.nonexistent.field".to_string())].into(),
};
let payload = serde_json::json!({"repo": "test"});
let data = map_trigger_data(&trigger, &payload);
assert!(data.get("missing").is_none());
}
}