Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
17a50d776b
|
|||
|
550dcd1f0c
|
|||
|
cbbeaf6d67
|
|||
|
6dffb91626
|
|||
|
c63bf7b814
|
|||
|
7a9af8015e
|
39
CHANGELOG.md
39
CHANGELOG.md
@@ -2,6 +2,45 @@
|
|||||||
|
|
||||||
All notable changes to this project will be documented in this file.
|
All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
|
## [1.6.0] - 2026-04-01
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- **wfe-server**: Headless workflow server (single binary)
|
||||||
|
- gRPC API with 13 RPCs: workflow CRUD, lifecycle streaming, log streaming, log search
|
||||||
|
- HTTP webhooks: GitHub and Gitea with HMAC-SHA256 verification, configurable triggers
|
||||||
|
- OIDC/JWT authentication with JWKS discovery and asymmetric algorithm allowlist
|
||||||
|
- Static bearer token auth with constant-time comparison
|
||||||
|
- Lifecycle event broadcasting via `WatchLifecycle` server-streaming RPC
|
||||||
|
- Real-time log streaming via `StreamLogs` with follow mode and history replay
|
||||||
|
- Full-text log search via OpenSearch with `SearchLogs` RPC
|
||||||
|
- Layered config: CLI flags > env vars > TOML file
|
||||||
|
- **wfe-server-protos**: gRPC service definitions (tonic 0.14, server + client stubs)
|
||||||
|
- **wfe-core**: `LogSink` trait for real-time step output streaming
|
||||||
|
- **wfe-core**: Lifecycle publisher wired into executor (StepStarted, StepCompleted, Error, Completed, Terminated)
|
||||||
|
- **wfe**: `use_log_sink()` on `WorkflowHostBuilder`
|
||||||
|
- **wfe-yaml**: Shell step streaming mode with `tokio::select!` interleaved stdout/stderr
|
||||||
|
|
||||||
|
### Security
|
||||||
|
|
||||||
|
- JWT algorithm confusion prevention: derive algorithm from JWK, reject symmetric algorithms
|
||||||
|
- Constant-time static token comparison via `subtle` crate
|
||||||
|
- OIDC issuer HTTPS validation to prevent SSRF
|
||||||
|
- Fail-closed on OIDC discovery failure (server won't start with broken auth)
|
||||||
|
- Authenticated generic webhook endpoint
|
||||||
|
- 2MB webhook payload size limit
|
||||||
|
- Config parse errors fail loudly (no silent fallback to open defaults)
|
||||||
|
- Blocked sensitive env var injection (PATH, LD_PRELOAD, etc.) from workflow data
|
||||||
|
- Security regression tests for all critical and high findings
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Shell step streaming path now respects `timeout_ms` with `child.kill()` on timeout
|
||||||
|
- LogSink properly threaded from WorkflowHostBuilder through executor to StepExecutionContext
|
||||||
|
- LogStore.with_search() wired in server main.rs for OpenSearch indexing
|
||||||
|
- OpenSearch `index_chunk` returns Err on HTTP failure instead of swallowing it
|
||||||
|
- Webhook publish failures return 500 instead of 200
|
||||||
|
|
||||||
## [1.5.0] - 2026-03-29
|
## [1.5.0] - 2026-03-29
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
|
|||||||
22
Cargo.toml
22
Cargo.toml
@@ -1,9 +1,9 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
members = ["wfe-core", "wfe-sqlite", "wfe-postgres", "wfe-opensearch", "wfe-valkey", "wfe", "wfe-yaml", "wfe-buildkit", "wfe-containerd", "wfe-containerd-protos", "wfe-buildkit-protos", "wfe-rustlang"]
|
members = ["wfe-core", "wfe-sqlite", "wfe-postgres", "wfe-opensearch", "wfe-valkey", "wfe", "wfe-yaml", "wfe-buildkit", "wfe-containerd", "wfe-containerd-protos", "wfe-buildkit-protos", "wfe-rustlang", "wfe-server-protos", "wfe-server"]
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
|
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "1.5.0"
|
version = "1.6.0"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://src.sunbeam.pt/studio/wfe"
|
repository = "https://src.sunbeam.pt/studio/wfe"
|
||||||
@@ -38,15 +38,15 @@ redis = { version = "0.27", features = ["tokio-comp", "connection-manager"] }
|
|||||||
opensearch = "2"
|
opensearch = "2"
|
||||||
|
|
||||||
# Internal crates
|
# Internal crates
|
||||||
wfe-core = { version = "1.5.0", path = "wfe-core", registry = "sunbeam" }
|
wfe-core = { version = "1.6.0", path = "wfe-core", registry = "sunbeam" }
|
||||||
wfe-sqlite = { version = "1.5.0", path = "wfe-sqlite", registry = "sunbeam" }
|
wfe-sqlite = { version = "1.6.0", path = "wfe-sqlite", registry = "sunbeam" }
|
||||||
wfe-postgres = { version = "1.5.0", path = "wfe-postgres", registry = "sunbeam" }
|
wfe-postgres = { version = "1.6.0", path = "wfe-postgres", registry = "sunbeam" }
|
||||||
wfe-opensearch = { version = "1.5.0", path = "wfe-opensearch", registry = "sunbeam" }
|
wfe-opensearch = { version = "1.6.0", path = "wfe-opensearch", registry = "sunbeam" }
|
||||||
wfe-valkey = { version = "1.5.0", path = "wfe-valkey", registry = "sunbeam" }
|
wfe-valkey = { version = "1.6.0", path = "wfe-valkey", registry = "sunbeam" }
|
||||||
wfe-yaml = { version = "1.5.0", path = "wfe-yaml", registry = "sunbeam" }
|
wfe-yaml = { version = "1.6.0", path = "wfe-yaml", registry = "sunbeam" }
|
||||||
wfe-buildkit = { version = "1.5.0", path = "wfe-buildkit", registry = "sunbeam" }
|
wfe-buildkit = { version = "1.6.0", path = "wfe-buildkit", registry = "sunbeam" }
|
||||||
wfe-containerd = { version = "1.5.0", path = "wfe-containerd", registry = "sunbeam" }
|
wfe-containerd = { version = "1.6.0", path = "wfe-containerd", registry = "sunbeam" }
|
||||||
wfe-rustlang = { version = "1.5.0", path = "wfe-rustlang", registry = "sunbeam" }
|
wfe-rustlang = { version = "1.6.0", path = "wfe-rustlang", registry = "sunbeam" }
|
||||||
|
|
||||||
# YAML
|
# YAML
|
||||||
serde_yaml = "0.9"
|
serde_yaml = "0.9"
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ async-trait = { workspace = true }
|
|||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
regex = { workspace = true }
|
regex = { workspace = true }
|
||||||
wfe-buildkit-protos = { version = "1.5.0", path = "../wfe-buildkit-protos", registry = "sunbeam" }
|
wfe-buildkit-protos = { version = "1.6.0", path = "../wfe-buildkit-protos", registry = "sunbeam" }
|
||||||
tonic = "0.14"
|
tonic = "0.14"
|
||||||
tower = { version = "0.4", features = ["util"] }
|
tower = { version = "0.4", features = ["util"] }
|
||||||
hyper-util = { version = "0.1", features = ["tokio"] }
|
hyper-util = { version = "0.1", features = ["tokio"] }
|
||||||
|
|||||||
@@ -94,6 +94,7 @@ async fn build_simple_dockerfile_via_grpc() {
|
|||||||
workflow: &instance,
|
workflow: &instance,
|
||||||
cancellation_token: cancel,
|
cancellation_token: cancel,
|
||||||
host_context: None,
|
host_context: None,
|
||||||
|
log_sink: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let result = step.run(&ctx).await.expect("build should succeed");
|
let result = step.run(&ctx).await.expect("build should succeed");
|
||||||
@@ -180,6 +181,7 @@ async fn build_with_build_args() {
|
|||||||
workflow: &instance,
|
workflow: &instance,
|
||||||
cancellation_token: cancel,
|
cancellation_token: cancel,
|
||||||
host_context: None,
|
host_context: None,
|
||||||
|
log_sink: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let result = step.run(&ctx).await.expect("build with args should succeed");
|
let result = step.run(&ctx).await.expect("build with args should succeed");
|
||||||
@@ -227,6 +229,7 @@ async fn connect_to_unavailable_daemon_returns_error() {
|
|||||||
workflow: &instance,
|
workflow: &instance,
|
||||||
cancellation_token: cancel,
|
cancellation_token: cancel,
|
||||||
host_context: None,
|
host_context: None,
|
||||||
|
log_sink: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let err = step.run(&ctx).await;
|
let err = step.run(&ctx).await;
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ description = "containerd container runner executor for WFE"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
wfe-core = { workspace = true }
|
wfe-core = { workspace = true }
|
||||||
wfe-containerd-protos = { version = "1.5.0", path = "../wfe-containerd-protos", registry = "sunbeam" }
|
wfe-containerd-protos = { version = "1.6.0", path = "../wfe-containerd-protos", registry = "sunbeam" }
|
||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
serde = { workspace = true }
|
serde = { workspace = true }
|
||||||
serde_json = { workspace = true }
|
serde_json = { workspace = true }
|
||||||
|
|||||||
@@ -75,6 +75,7 @@ fn make_context<'a>(
|
|||||||
workflow,
|
workflow,
|
||||||
cancellation_token: tokio_util::sync::CancellationToken::new(),
|
cancellation_token: tokio_util::sync::CancellationToken::new(),
|
||||||
host_context: None,
|
host_context: None,
|
||||||
|
log_sink: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -23,6 +23,7 @@ pub struct WorkflowExecutor {
|
|||||||
pub queue_provider: Arc<dyn QueueProvider>,
|
pub queue_provider: Arc<dyn QueueProvider>,
|
||||||
pub lifecycle: Option<Arc<dyn LifecyclePublisher>>,
|
pub lifecycle: Option<Arc<dyn LifecyclePublisher>>,
|
||||||
pub search: Option<Arc<dyn SearchIndex>>,
|
pub search: Option<Arc<dyn SearchIndex>>,
|
||||||
|
pub log_sink: Option<Arc<dyn crate::traits::LogSink>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl WorkflowExecutor {
|
impl WorkflowExecutor {
|
||||||
@@ -37,9 +38,15 @@ impl WorkflowExecutor {
|
|||||||
queue_provider,
|
queue_provider,
|
||||||
lifecycle: None,
|
lifecycle: None,
|
||||||
search: None,
|
search: None,
|
||||||
|
log_sink: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn with_log_sink(mut self, sink: Arc<dyn crate::traits::LogSink>) -> Self {
|
||||||
|
self.log_sink = Some(sink);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
pub fn with_lifecycle(mut self, lifecycle: Arc<dyn LifecyclePublisher>) -> Self {
|
pub fn with_lifecycle(mut self, lifecycle: Arc<dyn LifecyclePublisher>) -> Self {
|
||||||
self.lifecycle = Some(lifecycle);
|
self.lifecycle = Some(lifecycle);
|
||||||
self
|
self
|
||||||
@@ -50,6 +57,15 @@ impl WorkflowExecutor {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Publish a lifecycle event if a publisher is configured.
|
||||||
|
async fn publish_lifecycle(&self, event: crate::models::LifecycleEvent) {
|
||||||
|
if let Some(ref publisher) = self.lifecycle {
|
||||||
|
if let Err(e) = publisher.publish(event).await {
|
||||||
|
warn!(error = %e, "failed to publish lifecycle event");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Execute a single workflow instance.
|
/// Execute a single workflow instance.
|
||||||
///
|
///
|
||||||
/// 1. Acquire lock
|
/// 1. Acquire lock
|
||||||
@@ -202,6 +218,16 @@ impl WorkflowExecutor {
|
|||||||
}
|
}
|
||||||
workflow.execution_pointers[idx].status = PointerStatus::Running;
|
workflow.execution_pointers[idx].status = PointerStatus::Running;
|
||||||
|
|
||||||
|
self.publish_lifecycle(crate::models::LifecycleEvent::new(
|
||||||
|
&workflow.id,
|
||||||
|
&workflow.workflow_definition_id,
|
||||||
|
workflow.version,
|
||||||
|
crate::models::LifecycleEventType::StepStarted {
|
||||||
|
step_id,
|
||||||
|
step_name: step.name.clone(),
|
||||||
|
},
|
||||||
|
)).await;
|
||||||
|
|
||||||
// c. Build StepExecutionContext (borrows workflow immutably).
|
// c. Build StepExecutionContext (borrows workflow immutably).
|
||||||
let cancellation_token = tokio_util::sync::CancellationToken::new();
|
let cancellation_token = tokio_util::sync::CancellationToken::new();
|
||||||
let context = StepExecutionContext {
|
let context = StepExecutionContext {
|
||||||
@@ -212,6 +238,7 @@ impl WorkflowExecutor {
|
|||||||
workflow: &workflow,
|
workflow: &workflow,
|
||||||
cancellation_token,
|
cancellation_token,
|
||||||
host_context,
|
host_context,
|
||||||
|
log_sink: self.log_sink.as_deref(),
|
||||||
};
|
};
|
||||||
|
|
||||||
// d. Call step.run(context).
|
// d. Call step.run(context).
|
||||||
@@ -238,6 +265,17 @@ impl WorkflowExecutor {
|
|||||||
has_branches = result.branch_values.is_some(),
|
has_branches = result.branch_values.is_some(),
|
||||||
"Step completed"
|
"Step completed"
|
||||||
);
|
);
|
||||||
|
|
||||||
|
self.publish_lifecycle(crate::models::LifecycleEvent::new(
|
||||||
|
&workflow.id,
|
||||||
|
&workflow.workflow_definition_id,
|
||||||
|
workflow.version,
|
||||||
|
crate::models::LifecycleEventType::StepCompleted {
|
||||||
|
step_id,
|
||||||
|
step_name: step.name.clone(),
|
||||||
|
},
|
||||||
|
)).await;
|
||||||
|
|
||||||
// e. Process the ExecutionResult.
|
// e. Process the ExecutionResult.
|
||||||
// Extract workflow_id before mutable borrow.
|
// Extract workflow_id before mutable borrow.
|
||||||
let wf_id = workflow.id.clone();
|
let wf_id = workflow.id.clone();
|
||||||
@@ -272,6 +310,15 @@ impl WorkflowExecutor {
|
|||||||
tracing::Span::current().record("step.status", "failed");
|
tracing::Span::current().record("step.status", "failed");
|
||||||
warn!(workflow_id, step_id, error = %error_msg, "Step execution failed");
|
warn!(workflow_id, step_id, error = %error_msg, "Step execution failed");
|
||||||
|
|
||||||
|
self.publish_lifecycle(crate::models::LifecycleEvent::new(
|
||||||
|
&workflow.id,
|
||||||
|
&workflow.workflow_definition_id,
|
||||||
|
workflow.version,
|
||||||
|
crate::models::LifecycleEventType::Error {
|
||||||
|
message: error_msg.clone(),
|
||||||
|
},
|
||||||
|
)).await;
|
||||||
|
|
||||||
let pointer_id = workflow.execution_pointers[idx].id.clone();
|
let pointer_id = workflow.execution_pointers[idx].id.clone();
|
||||||
execution_errors.push(ExecutionError::new(
|
execution_errors.push(ExecutionError::new(
|
||||||
workflow_id,
|
workflow_id,
|
||||||
@@ -293,6 +340,12 @@ impl WorkflowExecutor {
|
|||||||
workflow.status = new_status;
|
workflow.status = new_status;
|
||||||
if new_status == WorkflowStatus::Terminated {
|
if new_status == WorkflowStatus::Terminated {
|
||||||
workflow.complete_time = Some(Utc::now());
|
workflow.complete_time = Some(Utc::now());
|
||||||
|
self.publish_lifecycle(crate::models::LifecycleEvent::new(
|
||||||
|
&workflow.id,
|
||||||
|
&workflow.workflow_definition_id,
|
||||||
|
workflow.version,
|
||||||
|
crate::models::LifecycleEventType::Terminated,
|
||||||
|
)).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -321,6 +374,13 @@ impl WorkflowExecutor {
|
|||||||
workflow.status = WorkflowStatus::Complete;
|
workflow.status = WorkflowStatus::Complete;
|
||||||
workflow.complete_time = Some(Utc::now());
|
workflow.complete_time = Some(Utc::now());
|
||||||
|
|
||||||
|
self.publish_lifecycle(crate::models::LifecycleEvent::new(
|
||||||
|
&workflow.id,
|
||||||
|
&workflow.workflow_definition_id,
|
||||||
|
workflow.version,
|
||||||
|
crate::models::LifecycleEventType::Completed,
|
||||||
|
)).await;
|
||||||
|
|
||||||
// Publish completion event for SubWorkflow parents.
|
// Publish completion event for SubWorkflow parents.
|
||||||
let completion_event = Event::new(
|
let completion_event = Event::new(
|
||||||
"wfe.workflow.completed",
|
"wfe.workflow.completed",
|
||||||
|
|||||||
@@ -45,6 +45,7 @@ mod test_helpers {
|
|||||||
workflow,
|
workflow,
|
||||||
cancellation_token: CancellationToken::new(),
|
cancellation_token: CancellationToken::new(),
|
||||||
host_context: None,
|
host_context: None,
|
||||||
|
log_sink: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -212,6 +212,7 @@ mod tests {
|
|||||||
workflow,
|
workflow,
|
||||||
cancellation_token: tokio_util::sync::CancellationToken::new(),
|
cancellation_token: tokio_util::sync::CancellationToken::new(),
|
||||||
host_context: Some(host),
|
host_context: Some(host),
|
||||||
|
log_sink: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
59
wfe-core/src/traits/log_sink.rs
Normal file
59
wfe-core/src/traits/log_sink.rs
Normal file
@@ -0,0 +1,59 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
|
||||||
|
/// A chunk of log output from a step execution.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct LogChunk {
|
||||||
|
pub workflow_id: String,
|
||||||
|
pub definition_id: String,
|
||||||
|
pub step_id: usize,
|
||||||
|
pub step_name: String,
|
||||||
|
pub stream: LogStreamType,
|
||||||
|
pub data: Vec<u8>,
|
||||||
|
pub timestamp: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Whether a log chunk is from stdout or stderr.
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub enum LogStreamType {
|
||||||
|
Stdout,
|
||||||
|
Stderr,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Receives log chunks as they're produced during step execution.
|
||||||
|
///
|
||||||
|
/// Implementations can broadcast to live subscribers, persist to a database,
|
||||||
|
/// index for search, or any combination. The trait is designed to be called
|
||||||
|
/// from within step executors (shell, containerd, etc.) as lines are produced.
|
||||||
|
#[async_trait]
|
||||||
|
pub trait LogSink: Send + Sync {
|
||||||
|
async fn write_chunk(&self, chunk: LogChunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn log_stream_type_equality() {
|
||||||
|
assert_eq!(LogStreamType::Stdout, LogStreamType::Stdout);
|
||||||
|
assert_ne!(LogStreamType::Stdout, LogStreamType::Stderr);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn log_chunk_clone() {
|
||||||
|
let chunk = LogChunk {
|
||||||
|
workflow_id: "wf-1".to_string(),
|
||||||
|
definition_id: "def-1".to_string(),
|
||||||
|
step_id: 0,
|
||||||
|
step_name: "build".to_string(),
|
||||||
|
stream: LogStreamType::Stdout,
|
||||||
|
data: b"hello\n".to_vec(),
|
||||||
|
timestamp: Utc::now(),
|
||||||
|
};
|
||||||
|
let cloned = chunk.clone();
|
||||||
|
assert_eq!(cloned.workflow_id, "wf-1");
|
||||||
|
assert_eq!(cloned.stream, LogStreamType::Stdout);
|
||||||
|
assert_eq!(cloned.data, b"hello\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -69,6 +69,7 @@ mod tests {
|
|||||||
workflow: &instance,
|
workflow: &instance,
|
||||||
cancellation_token: tokio_util::sync::CancellationToken::new(),
|
cancellation_token: tokio_util::sync::CancellationToken::new(),
|
||||||
host_context: None,
|
host_context: None,
|
||||||
|
log_sink: None,
|
||||||
};
|
};
|
||||||
mw.pre_step(&ctx).await.unwrap();
|
mw.pre_step(&ctx).await.unwrap();
|
||||||
}
|
}
|
||||||
@@ -88,6 +89,7 @@ mod tests {
|
|||||||
workflow: &instance,
|
workflow: &instance,
|
||||||
cancellation_token: tokio_util::sync::CancellationToken::new(),
|
cancellation_token: tokio_util::sync::CancellationToken::new(),
|
||||||
host_context: None,
|
host_context: None,
|
||||||
|
log_sink: None,
|
||||||
};
|
};
|
||||||
let result = ExecutionResult::next();
|
let result = ExecutionResult::next();
|
||||||
mw.post_step(&ctx, &result).await.unwrap();
|
mw.post_step(&ctx, &result).await.unwrap();
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
pub mod lifecycle;
|
pub mod lifecycle;
|
||||||
pub mod lock;
|
pub mod lock;
|
||||||
|
pub mod log_sink;
|
||||||
pub mod middleware;
|
pub mod middleware;
|
||||||
pub mod persistence;
|
pub mod persistence;
|
||||||
pub mod queue;
|
pub mod queue;
|
||||||
@@ -9,6 +10,7 @@ pub mod step;
|
|||||||
|
|
||||||
pub use lifecycle::LifecyclePublisher;
|
pub use lifecycle::LifecyclePublisher;
|
||||||
pub use lock::DistributedLockProvider;
|
pub use lock::DistributedLockProvider;
|
||||||
|
pub use log_sink::{LogChunk, LogSink, LogStreamType};
|
||||||
pub use middleware::{StepMiddleware, WorkflowMiddleware};
|
pub use middleware::{StepMiddleware, WorkflowMiddleware};
|
||||||
pub use persistence::{
|
pub use persistence::{
|
||||||
EventRepository, PersistenceProvider, ScheduledCommandRepository, SubscriptionRepository,
|
EventRepository, PersistenceProvider, ScheduledCommandRepository, SubscriptionRepository,
|
||||||
|
|||||||
@@ -38,6 +38,8 @@ pub struct StepExecutionContext<'a> {
|
|||||||
pub cancellation_token: tokio_util::sync::CancellationToken,
|
pub cancellation_token: tokio_util::sync::CancellationToken,
|
||||||
/// Host context for starting child workflows. None if not available.
|
/// Host context for starting child workflows. None if not available.
|
||||||
pub host_context: Option<&'a dyn HostContext>,
|
pub host_context: Option<&'a dyn HostContext>,
|
||||||
|
/// Log sink for streaming step output. None if not configured.
|
||||||
|
pub log_sink: Option<&'a dyn super::LogSink>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Manual Debug impl since dyn HostContext is not Debug.
|
// Manual Debug impl since dyn HostContext is not Debug.
|
||||||
@@ -50,6 +52,7 @@ impl<'a> std::fmt::Debug for StepExecutionContext<'a> {
|
|||||||
.field("step", &self.step)
|
.field("step", &self.step)
|
||||||
.field("workflow", &self.workflow)
|
.field("workflow", &self.workflow)
|
||||||
.field("host_context", &self.host_context.is_some())
|
.field("host_context", &self.host_context.is_some())
|
||||||
|
.field("log_sink", &self.log_sink.is_some())
|
||||||
.finish()
|
.finish()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
19
wfe-server-protos/Cargo.toml
Normal file
19
wfe-server-protos/Cargo.toml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
[package]
|
||||||
|
name = "wfe-server-protos"
|
||||||
|
version.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
repository.workspace = true
|
||||||
|
homepage.workspace = true
|
||||||
|
description = "gRPC service definitions for the WFE workflow server"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
tonic = "0.14"
|
||||||
|
tonic-prost = "0.14"
|
||||||
|
prost = "0.14"
|
||||||
|
prost-types = "0.14"
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
tonic-build = "0.14"
|
||||||
|
tonic-prost-build = "0.14"
|
||||||
|
prost-build = "0.14"
|
||||||
17
wfe-server-protos/build.rs
Normal file
17
wfe-server-protos/build.rs
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
let proto_files = vec!["proto/wfe/v1/wfe.proto"];
|
||||||
|
|
||||||
|
let mut prost_config = prost_build::Config::new();
|
||||||
|
prost_config.include_file("mod.rs");
|
||||||
|
|
||||||
|
tonic_prost_build::configure()
|
||||||
|
.build_server(true)
|
||||||
|
.build_client(true)
|
||||||
|
.compile_with_config(
|
||||||
|
prost_config,
|
||||||
|
&proto_files,
|
||||||
|
&["proto"],
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
263
wfe-server-protos/proto/wfe/v1/wfe.proto
Normal file
263
wfe-server-protos/proto/wfe/v1/wfe.proto
Normal file
@@ -0,0 +1,263 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
package wfe.v1;
|
||||||
|
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
import "google/protobuf/struct.proto";
|
||||||
|
|
||||||
|
service Wfe {
|
||||||
|
// === Definitions ===
|
||||||
|
rpc RegisterWorkflow(RegisterWorkflowRequest) returns (RegisterWorkflowResponse);
|
||||||
|
rpc ListDefinitions(ListDefinitionsRequest) returns (ListDefinitionsResponse);
|
||||||
|
|
||||||
|
// === Instances ===
|
||||||
|
rpc StartWorkflow(StartWorkflowRequest) returns (StartWorkflowResponse);
|
||||||
|
rpc GetWorkflow(GetWorkflowRequest) returns (GetWorkflowResponse);
|
||||||
|
rpc CancelWorkflow(CancelWorkflowRequest) returns (CancelWorkflowResponse);
|
||||||
|
rpc SuspendWorkflow(SuspendWorkflowRequest) returns (SuspendWorkflowResponse);
|
||||||
|
rpc ResumeWorkflow(ResumeWorkflowRequest) returns (ResumeWorkflowResponse);
|
||||||
|
rpc SearchWorkflows(SearchWorkflowsRequest) returns (SearchWorkflowsResponse);
|
||||||
|
|
||||||
|
// === Events ===
|
||||||
|
rpc PublishEvent(PublishEventRequest) returns (PublishEventResponse);
|
||||||
|
|
||||||
|
// === Streaming ===
|
||||||
|
rpc WatchLifecycle(WatchLifecycleRequest) returns (stream LifecycleEvent);
|
||||||
|
rpc StreamLogs(StreamLogsRequest) returns (stream LogEntry);
|
||||||
|
|
||||||
|
// === Search ===
|
||||||
|
rpc SearchLogs(SearchLogsRequest) returns (SearchLogsResponse);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Definitions ─────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
message RegisterWorkflowRequest {
|
||||||
|
// Raw YAML content. The server compiles it via wfe-yaml.
|
||||||
|
string yaml = 1;
|
||||||
|
// Optional config map for ((variable)) interpolation.
|
||||||
|
map<string, string> config = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RegisterWorkflowResponse {
|
||||||
|
repeated RegisteredDefinition definitions = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RegisteredDefinition {
|
||||||
|
string definition_id = 1;
|
||||||
|
uint32 version = 2;
|
||||||
|
uint32 step_count = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ListDefinitionsRequest {}
|
||||||
|
|
||||||
|
message ListDefinitionsResponse {
|
||||||
|
repeated DefinitionSummary definitions = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DefinitionSummary {
|
||||||
|
string id = 1;
|
||||||
|
uint32 version = 2;
|
||||||
|
string description = 3;
|
||||||
|
uint32 step_count = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Instances ───────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
message StartWorkflowRequest {
|
||||||
|
string definition_id = 1;
|
||||||
|
uint32 version = 2;
|
||||||
|
google.protobuf.Struct data = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message StartWorkflowResponse {
|
||||||
|
string workflow_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetWorkflowRequest {
|
||||||
|
string workflow_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message GetWorkflowResponse {
|
||||||
|
WorkflowInstance instance = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CancelWorkflowRequest {
|
||||||
|
string workflow_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CancelWorkflowResponse {}
|
||||||
|
|
||||||
|
message SuspendWorkflowRequest {
|
||||||
|
string workflow_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SuspendWorkflowResponse {}
|
||||||
|
|
||||||
|
message ResumeWorkflowRequest {
|
||||||
|
string workflow_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ResumeWorkflowResponse {}
|
||||||
|
|
||||||
|
message SearchWorkflowsRequest {
|
||||||
|
string query = 1;
|
||||||
|
WorkflowStatus status_filter = 2;
|
||||||
|
uint64 skip = 3;
|
||||||
|
uint64 take = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SearchWorkflowsResponse {
|
||||||
|
repeated WorkflowSearchResult results = 1;
|
||||||
|
uint64 total = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Events ──────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
message PublishEventRequest {
|
||||||
|
string event_name = 1;
|
||||||
|
string event_key = 2;
|
||||||
|
google.protobuf.Struct data = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message PublishEventResponse {
|
||||||
|
string event_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Lifecycle streaming ─────────────────────────────────────────────
|
||||||
|
|
||||||
|
message WatchLifecycleRequest {
|
||||||
|
// Empty = all workflows. Set to filter to one.
|
||||||
|
string workflow_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message LifecycleEvent {
|
||||||
|
google.protobuf.Timestamp event_time = 1;
|
||||||
|
string workflow_id = 2;
|
||||||
|
string definition_id = 3;
|
||||||
|
uint32 version = 4;
|
||||||
|
LifecycleEventType event_type = 5;
|
||||||
|
// Populated for step events.
|
||||||
|
uint32 step_id = 6;
|
||||||
|
string step_name = 7;
|
||||||
|
// Populated for error events.
|
||||||
|
string error_message = 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Log streaming ──────────────────────────────────────────────────
|
||||||
|
|
||||||
|
message StreamLogsRequest {
|
||||||
|
string workflow_id = 1;
|
||||||
|
// Filter to a specific step. Empty = all steps.
|
||||||
|
string step_name = 2;
|
||||||
|
// If true, keep streaming as new logs arrive (tail -f).
|
||||||
|
bool follow = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message LogEntry {
|
||||||
|
string workflow_id = 1;
|
||||||
|
string step_name = 2;
|
||||||
|
uint32 step_id = 3;
|
||||||
|
LogStream stream = 4;
|
||||||
|
bytes data = 5;
|
||||||
|
google.protobuf.Timestamp timestamp = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Log search ─────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
message SearchLogsRequest {
|
||||||
|
// Full-text search query.
|
||||||
|
string query = 1;
|
||||||
|
// Optional filters.
|
||||||
|
string workflow_id = 2;
|
||||||
|
string step_name = 3;
|
||||||
|
LogStream stream_filter = 4;
|
||||||
|
uint64 skip = 5;
|
||||||
|
uint64 take = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SearchLogsResponse {
|
||||||
|
repeated LogSearchResult results = 1;
|
||||||
|
uint64 total = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message LogSearchResult {
|
||||||
|
string workflow_id = 1;
|
||||||
|
string definition_id = 2;
|
||||||
|
string step_name = 3;
|
||||||
|
string line = 4;
|
||||||
|
LogStream stream = 5;
|
||||||
|
google.protobuf.Timestamp timestamp = 6;
|
||||||
|
}
|
||||||
|
|
||||||
|
// ─── Shared types ───────────────────────────────────────────────────
|
||||||
|
|
||||||
|
message WorkflowInstance {
|
||||||
|
string id = 1;
|
||||||
|
string definition_id = 2;
|
||||||
|
uint32 version = 3;
|
||||||
|
string description = 4;
|
||||||
|
string reference = 5;
|
||||||
|
WorkflowStatus status = 6;
|
||||||
|
google.protobuf.Struct data = 7;
|
||||||
|
google.protobuf.Timestamp create_time = 8;
|
||||||
|
google.protobuf.Timestamp complete_time = 9;
|
||||||
|
repeated ExecutionPointer execution_pointers = 10;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ExecutionPointer {
|
||||||
|
string id = 1;
|
||||||
|
uint32 step_id = 2;
|
||||||
|
string step_name = 3;
|
||||||
|
PointerStatus status = 4;
|
||||||
|
google.protobuf.Timestamp start_time = 5;
|
||||||
|
google.protobuf.Timestamp end_time = 6;
|
||||||
|
uint32 retry_count = 7;
|
||||||
|
bool active = 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
message WorkflowSearchResult {
|
||||||
|
string id = 1;
|
||||||
|
string definition_id = 2;
|
||||||
|
uint32 version = 3;
|
||||||
|
WorkflowStatus status = 4;
|
||||||
|
string reference = 5;
|
||||||
|
string description = 6;
|
||||||
|
google.protobuf.Timestamp create_time = 7;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum WorkflowStatus {
|
||||||
|
WORKFLOW_STATUS_UNSPECIFIED = 0;
|
||||||
|
WORKFLOW_STATUS_RUNNABLE = 1;
|
||||||
|
WORKFLOW_STATUS_SUSPENDED = 2;
|
||||||
|
WORKFLOW_STATUS_COMPLETE = 3;
|
||||||
|
WORKFLOW_STATUS_TERMINATED = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum PointerStatus {
|
||||||
|
POINTER_STATUS_UNSPECIFIED = 0;
|
||||||
|
POINTER_STATUS_PENDING = 1;
|
||||||
|
POINTER_STATUS_RUNNING = 2;
|
||||||
|
POINTER_STATUS_COMPLETE = 3;
|
||||||
|
POINTER_STATUS_SLEEPING = 4;
|
||||||
|
POINTER_STATUS_WAITING_FOR_EVENT = 5;
|
||||||
|
POINTER_STATUS_FAILED = 6;
|
||||||
|
POINTER_STATUS_SKIPPED = 7;
|
||||||
|
POINTER_STATUS_CANCELLED = 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum LifecycleEventType {
|
||||||
|
LIFECYCLE_EVENT_TYPE_UNSPECIFIED = 0;
|
||||||
|
LIFECYCLE_EVENT_TYPE_STARTED = 1;
|
||||||
|
LIFECYCLE_EVENT_TYPE_COMPLETED = 2;
|
||||||
|
LIFECYCLE_EVENT_TYPE_TERMINATED = 3;
|
||||||
|
LIFECYCLE_EVENT_TYPE_SUSPENDED = 4;
|
||||||
|
LIFECYCLE_EVENT_TYPE_RESUMED = 5;
|
||||||
|
LIFECYCLE_EVENT_TYPE_ERROR = 6;
|
||||||
|
LIFECYCLE_EVENT_TYPE_STEP_STARTED = 7;
|
||||||
|
LIFECYCLE_EVENT_TYPE_STEP_COMPLETED = 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum LogStream {
|
||||||
|
LOG_STREAM_UNSPECIFIED = 0;
|
||||||
|
LOG_STREAM_STDOUT = 1;
|
||||||
|
LOG_STREAM_STDERR = 2;
|
||||||
|
}
|
||||||
17
wfe-server-protos/src/lib.rs
Normal file
17
wfe-server-protos/src/lib.rs
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
//! Generated gRPC stubs for the WFE workflow server API.
|
||||||
|
//!
|
||||||
|
//! Built from `proto/wfe/v1/wfe.proto`. Includes both server and client code.
|
||||||
|
//!
|
||||||
|
//! ```rust,ignore
|
||||||
|
//! use wfe_server_protos::wfe::v1::wfe_server::WfeServer;
|
||||||
|
//! use wfe_server_protos::wfe::v1::wfe_client::WfeClient;
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
#![allow(clippy::all)]
|
||||||
|
#![allow(warnings)]
|
||||||
|
|
||||||
|
include!(concat!(env!("OUT_DIR"), "/mod.rs"));
|
||||||
|
|
||||||
|
pub use prost;
|
||||||
|
pub use prost_types;
|
||||||
|
pub use tonic;
|
||||||
72
wfe-server/Cargo.toml
Normal file
72
wfe-server/Cargo.toml
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
[package]
|
||||||
|
name = "wfe-server"
|
||||||
|
version.workspace = true
|
||||||
|
edition.workspace = true
|
||||||
|
license.workspace = true
|
||||||
|
repository.workspace = true
|
||||||
|
homepage.workspace = true
|
||||||
|
description = "Headless workflow server with gRPC API and HTTP webhooks"
|
||||||
|
|
||||||
|
[[bin]]
|
||||||
|
name = "wfe-server"
|
||||||
|
path = "src/main.rs"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
# Internal
|
||||||
|
wfe-core = { workspace = true, features = ["test-support"] }
|
||||||
|
wfe = { path = "../wfe" }
|
||||||
|
wfe-yaml = { path = "../wfe-yaml", features = ["rustlang", "buildkit", "containerd"] }
|
||||||
|
wfe-server-protos = { path = "../wfe-server-protos" }
|
||||||
|
wfe-sqlite = { workspace = true }
|
||||||
|
wfe-postgres = { workspace = true }
|
||||||
|
wfe-valkey = { workspace = true }
|
||||||
|
wfe-opensearch = { workspace = true }
|
||||||
|
opensearch = { workspace = true }
|
||||||
|
|
||||||
|
# gRPC
|
||||||
|
tonic = "0.14"
|
||||||
|
tonic-health = "0.14"
|
||||||
|
prost-types = "0.14"
|
||||||
|
|
||||||
|
# HTTP (webhooks)
|
||||||
|
axum = { version = "0.8", features = ["json", "macros"] }
|
||||||
|
hyper = "1"
|
||||||
|
tower = "0.5"
|
||||||
|
|
||||||
|
# Runtime
|
||||||
|
tokio = { workspace = true }
|
||||||
|
async-trait = { workspace = true }
|
||||||
|
|
||||||
|
# Serialization
|
||||||
|
serde = { workspace = true }
|
||||||
|
serde_json = { workspace = true }
|
||||||
|
toml = "0.8"
|
||||||
|
|
||||||
|
# CLI
|
||||||
|
clap = { version = "4", features = ["derive", "env"] }
|
||||||
|
|
||||||
|
# Auth
|
||||||
|
hmac = "0.12"
|
||||||
|
sha2 = "0.10"
|
||||||
|
hex = "0.4"
|
||||||
|
jsonwebtoken = "9"
|
||||||
|
subtle = "2"
|
||||||
|
reqwest = { workspace = true }
|
||||||
|
|
||||||
|
# Observability
|
||||||
|
tracing = { workspace = true }
|
||||||
|
tracing-subscriber = { workspace = true }
|
||||||
|
chrono = { workspace = true }
|
||||||
|
uuid = { workspace = true }
|
||||||
|
|
||||||
|
# Utils
|
||||||
|
tokio-stream = "0.1"
|
||||||
|
dashmap = "6"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
pretty_assertions = { workspace = true }
|
||||||
|
tokio = { workspace = true, features = ["test-util"] }
|
||||||
|
tempfile = { workspace = true }
|
||||||
|
rsa = { version = "0.9", features = ["pem"] }
|
||||||
|
rand = "0.8"
|
||||||
|
base64 = "0.22"
|
||||||
769
wfe-server/src/auth.rs
Normal file
769
wfe-server/src/auth.rs
Normal file
@@ -0,0 +1,769 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use jsonwebtoken::{decode, Algorithm, DecodingKey, Validation};
|
||||||
|
use serde::Deserialize;
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
use tonic::{Request, Status};
|
||||||
|
|
||||||
|
use crate::config::AuthConfig;
|
||||||
|
|
||||||
|
/// Asymmetric algorithms we accept. NEVER trust the JWT header's alg claim.
|
||||||
|
/// This prevents algorithm confusion attacks (CVE-2016-5431).
|
||||||
|
const ALLOWED_ALGORITHMS: &[Algorithm] = &[
|
||||||
|
Algorithm::RS256,
|
||||||
|
Algorithm::RS384,
|
||||||
|
Algorithm::RS512,
|
||||||
|
Algorithm::ES256,
|
||||||
|
Algorithm::ES384,
|
||||||
|
Algorithm::PS256,
|
||||||
|
Algorithm::PS384,
|
||||||
|
Algorithm::PS512,
|
||||||
|
Algorithm::EdDSA,
|
||||||
|
];
|
||||||
|
|
||||||
|
/// JWT claims we validate.
|
||||||
|
#[derive(Debug, Deserialize)]
|
||||||
|
struct Claims {
|
||||||
|
#[allow(dead_code)]
|
||||||
|
sub: Option<String>,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
iss: Option<String>,
|
||||||
|
#[allow(dead_code)]
|
||||||
|
aud: Option<serde_json::Value>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cached JWKS keys fetched from the OIDC provider.
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct JwksCache {
|
||||||
|
keys: Vec<jsonwebtoken::jwk::Jwk>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Auth state shared across gRPC interceptor calls.
|
||||||
|
pub struct AuthState {
|
||||||
|
pub(crate) config: AuthConfig,
|
||||||
|
jwks: RwLock<Option<JwksCache>>,
|
||||||
|
jwks_uri: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AuthState {
|
||||||
|
/// Create auth state. If OIDC is configured, discovers the JWKS URI.
|
||||||
|
/// Panics if OIDC is configured but discovery fails (fail-closed).
|
||||||
|
pub async fn new(config: AuthConfig) -> Self {
|
||||||
|
let jwks_uri = if let Some(ref issuer) = config.oidc_issuer {
|
||||||
|
// HIGH-03: Validate issuer URL uses HTTPS in production.
|
||||||
|
if !issuer.starts_with("https://") && !issuer.starts_with("http://localhost") {
|
||||||
|
panic!(
|
||||||
|
"OIDC issuer must use HTTPS (got: {issuer}). \
|
||||||
|
Use http://localhost only for development."
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
match discover_jwks_uri(issuer).await {
|
||||||
|
Ok(uri) => {
|
||||||
|
// Validate JWKS URI also uses HTTPS (second-order SSRF prevention).
|
||||||
|
if !uri.starts_with("https://") && !uri.starts_with("http://localhost") {
|
||||||
|
panic!("JWKS URI from OIDC discovery must use HTTPS (got: {uri})");
|
||||||
|
}
|
||||||
|
tracing::info!(issuer = %issuer, jwks_uri = %uri, "OIDC discovery complete");
|
||||||
|
Some(uri)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
// HIGH-05: Fail startup if OIDC is configured but discovery fails.
|
||||||
|
panic!("OIDC issuer configured but discovery failed: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let state = Self {
|
||||||
|
config,
|
||||||
|
jwks: RwLock::new(None),
|
||||||
|
jwks_uri,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Pre-fetch JWKS.
|
||||||
|
if state.jwks_uri.is_some() {
|
||||||
|
state
|
||||||
|
.refresh_jwks()
|
||||||
|
.await
|
||||||
|
.expect("initial JWKS fetch failed — cannot start with OIDC enabled");
|
||||||
|
}
|
||||||
|
|
||||||
|
state
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Refresh the cached JWKS from the provider.
|
||||||
|
pub async fn refresh_jwks(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||||
|
let uri = self.jwks_uri.as_ref().ok_or("no JWKS URI")?;
|
||||||
|
let resp: JwksResponse = reqwest::get(uri).await?.json().await?;
|
||||||
|
let mut cache = self.jwks.write().await;
|
||||||
|
*cache = Some(JwksCache { keys: resp.keys });
|
||||||
|
tracing::debug!(key_count = cache.as_ref().unwrap().keys.len(), "JWKS refreshed");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validate a request's authorization.
|
||||||
|
pub async fn check<T>(&self, request: &Request<T>) -> Result<(), Status> {
|
||||||
|
// No auth configured = open access.
|
||||||
|
if self.config.tokens.is_empty() && self.config.oidc_issuer.is_none() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let token = extract_bearer_token(request)?;
|
||||||
|
|
||||||
|
// CRITICAL-02: Use constant-time comparison for static tokens.
|
||||||
|
if check_static_tokens(&self.config.tokens, token) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try JWT/OIDC validation.
|
||||||
|
if self.config.oidc_issuer.is_some() {
|
||||||
|
return self.validate_jwt_cached(token);
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(Status::unauthenticated("invalid token"))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validate a JWT against the cached JWKS (synchronous — for use in interceptors).
|
||||||
|
/// Shared logic used by both `check()` and `make_interceptor()`.
|
||||||
|
fn validate_jwt_cached(&self, token: &str) -> Result<(), Status> {
|
||||||
|
let cache = self.jwks.try_read()
|
||||||
|
.map_err(|_| Status::unavailable("JWKS refresh in progress"))?;
|
||||||
|
let jwks = cache
|
||||||
|
.as_ref()
|
||||||
|
.ok_or_else(|| Status::unavailable("JWKS not loaded"))?;
|
||||||
|
|
||||||
|
let header = jsonwebtoken::decode_header(token)
|
||||||
|
.map_err(|e| Status::unauthenticated(format!("invalid JWT header: {e}")))?;
|
||||||
|
|
||||||
|
// CRITICAL-01: Never trust the JWT header's alg claim.
|
||||||
|
// Derive the algorithm from the JWK, not the token.
|
||||||
|
let kid = header.kid.as_deref();
|
||||||
|
|
||||||
|
// MEDIUM-06: Require kid when JWKS has multiple keys.
|
||||||
|
if kid.is_none() && jwks.keys.len() > 1 {
|
||||||
|
return Err(Status::unauthenticated(
|
||||||
|
"JWT missing kid header but JWKS has multiple keys",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let jwk = jwks
|
||||||
|
.keys
|
||||||
|
.iter()
|
||||||
|
.find(|k| match (kid, &k.common.key_id) {
|
||||||
|
(Some(kid), Some(k_kid)) => kid == k_kid,
|
||||||
|
(None, _) if jwks.keys.len() == 1 => true,
|
||||||
|
_ => false,
|
||||||
|
})
|
||||||
|
.ok_or_else(|| Status::unauthenticated("no matching key in JWKS"))?;
|
||||||
|
|
||||||
|
let decoding_key = DecodingKey::from_jwk(jwk)
|
||||||
|
.map_err(|e| Status::unauthenticated(format!("invalid JWK: {e}")))?;
|
||||||
|
|
||||||
|
// CRITICAL-01: Use the JWK's algorithm, NOT the token header's.
|
||||||
|
let alg = jwk
|
||||||
|
.common
|
||||||
|
.key_algorithm
|
||||||
|
.and_then(|ka| key_algorithm_to_jwt_algorithm(ka))
|
||||||
|
.ok_or_else(|| {
|
||||||
|
Status::unauthenticated("JWK has no algorithm or unsupported algorithm")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
// Double-check it's in our allowlist (no symmetric algorithms).
|
||||||
|
if !ALLOWED_ALGORITHMS.contains(&alg) {
|
||||||
|
return Err(Status::unauthenticated(format!(
|
||||||
|
"algorithm {alg:?} not in allowlist"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut validation = Validation::new(alg);
|
||||||
|
if let Some(ref issuer) = self.config.oidc_issuer {
|
||||||
|
validation.set_issuer(&[issuer]);
|
||||||
|
}
|
||||||
|
if let Some(ref audience) = self.config.oidc_audience {
|
||||||
|
validation.set_audience(&[audience]);
|
||||||
|
} else {
|
||||||
|
validation.validate_aud = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
decode::<Claims>(token, &decoding_key, &validation)
|
||||||
|
.map_err(|e| Status::unauthenticated(format!("JWT validation failed: {e}")))?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// CRITICAL-02: Constant-time token comparison to prevent timing attacks.
|
||||||
|
/// Public for use in webhook auth.
|
||||||
|
pub fn check_static_tokens_pub(tokens: &[String], candidate: &str) -> bool {
|
||||||
|
check_static_tokens(tokens, candidate)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_static_tokens(tokens: &[String], candidate: &str) -> bool {
|
||||||
|
use subtle::ConstantTimeEq;
|
||||||
|
let candidate_bytes = candidate.as_bytes();
|
||||||
|
for token in tokens {
|
||||||
|
let token_bytes = token.as_bytes();
|
||||||
|
if token_bytes.len() == candidate_bytes.len()
|
||||||
|
&& bool::from(token_bytes.ct_eq(candidate_bytes))
|
||||||
|
{
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extract bearer token from gRPC metadata or HTTP Authorization header.
|
||||||
|
fn extract_bearer_token<T>(request: &Request<T>) -> Result<&str, Status> {
|
||||||
|
let auth = request
|
||||||
|
.metadata()
|
||||||
|
.get("authorization")
|
||||||
|
.and_then(|v| v.to_str().ok())
|
||||||
|
.ok_or_else(|| Status::unauthenticated("missing authorization header"))?;
|
||||||
|
|
||||||
|
auth.strip_prefix("Bearer ")
|
||||||
|
.or_else(|| auth.strip_prefix("bearer "))
|
||||||
|
.ok_or_else(|| Status::unauthenticated("expected Bearer token"))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Map JWK key algorithm to jsonwebtoken Algorithm.
|
||||||
|
fn key_algorithm_to_jwt_algorithm(
|
||||||
|
ka: jsonwebtoken::jwk::KeyAlgorithm,
|
||||||
|
) -> Option<Algorithm> {
|
||||||
|
use jsonwebtoken::jwk::KeyAlgorithm as KA;
|
||||||
|
match ka {
|
||||||
|
KA::RS256 => Some(Algorithm::RS256),
|
||||||
|
KA::RS384 => Some(Algorithm::RS384),
|
||||||
|
KA::RS512 => Some(Algorithm::RS512),
|
||||||
|
KA::ES256 => Some(Algorithm::ES256),
|
||||||
|
KA::ES384 => Some(Algorithm::ES384),
|
||||||
|
KA::PS256 => Some(Algorithm::PS256),
|
||||||
|
KA::PS384 => Some(Algorithm::PS384),
|
||||||
|
KA::PS512 => Some(Algorithm::PS512),
|
||||||
|
KA::EdDSA => Some(Algorithm::EdDSA),
|
||||||
|
_ => None, // Reject HS256, HS384, HS512 and unknown algorithms.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// OIDC discovery response (minimal — we only need jwks_uri).
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct OidcDiscovery {
|
||||||
|
jwks_uri: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// JWKS response.
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct JwksResponse {
|
||||||
|
keys: Vec<jsonwebtoken::jwk::Jwk>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch the JWKS URI from the OIDC discovery endpoint.
|
||||||
|
async fn discover_jwks_uri(
|
||||||
|
issuer: &str,
|
||||||
|
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
|
||||||
|
let discovery_url = format!(
|
||||||
|
"{}/.well-known/openid-configuration",
|
||||||
|
issuer.trim_end_matches('/')
|
||||||
|
);
|
||||||
|
let resp: OidcDiscovery = reqwest::get(&discovery_url).await?.json().await?;
|
||||||
|
Ok(resp.jwks_uri)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a tonic interceptor that checks auth on every request.
|
||||||
|
pub fn make_interceptor(
|
||||||
|
auth: Arc<AuthState>,
|
||||||
|
) -> impl Fn(Request<()>) -> Result<Request<()>, Status> + Clone {
|
||||||
|
move |req: Request<()>| {
|
||||||
|
let auth = auth.clone();
|
||||||
|
|
||||||
|
// No auth configured = pass through.
|
||||||
|
if auth.config.tokens.is_empty() && auth.config.oidc_issuer.is_none() {
|
||||||
|
return Ok(req);
|
||||||
|
}
|
||||||
|
|
||||||
|
let token = match extract_bearer_token(&req) {
|
||||||
|
Ok(t) => t.to_string(),
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
// CRITICAL-02: Constant-time static token check.
|
||||||
|
if check_static_tokens(&auth.config.tokens, &token) {
|
||||||
|
return Ok(req);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check JWT via shared validate_jwt_cached (deduplicated logic).
|
||||||
|
if auth.config.oidc_issuer.is_some() {
|
||||||
|
auth.validate_jwt_cached(&token)?;
|
||||||
|
return Ok(req);
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(Status::unauthenticated("invalid token"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn extract_bearer_from_metadata() {
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", "Bearer mytoken".parse().unwrap());
|
||||||
|
assert_eq!(extract_bearer_token(&req).unwrap(), "mytoken");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn extract_bearer_lowercase() {
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", "bearer mytoken".parse().unwrap());
|
||||||
|
assert_eq!(extract_bearer_token(&req).unwrap(), "mytoken");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn extract_bearer_missing_header() {
|
||||||
|
let req = Request::new(());
|
||||||
|
assert!(extract_bearer_token(&req).is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn extract_bearer_wrong_scheme() {
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", "Basic abc".parse().unwrap());
|
||||||
|
assert!(extract_bearer_token(&req).is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn constant_time_token_check_valid() {
|
||||||
|
let tokens = vec!["secret123".to_string()];
|
||||||
|
assert!(check_static_tokens(&tokens, "secret123"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn constant_time_token_check_invalid() {
|
||||||
|
let tokens = vec!["secret123".to_string()];
|
||||||
|
assert!(!check_static_tokens(&tokens, "wrong"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn constant_time_token_check_empty() {
|
||||||
|
let tokens: Vec<String> = vec![];
|
||||||
|
assert!(!check_static_tokens(&tokens, "anything"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn constant_time_token_check_length_mismatch() {
|
||||||
|
let tokens = vec!["short".to_string()];
|
||||||
|
assert!(!check_static_tokens(&tokens, "muchlongertoken"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn no_auth_configured_allows_all() {
|
||||||
|
let state = AuthState {
|
||||||
|
config: AuthConfig::default(),
|
||||||
|
jwks: RwLock::new(None),
|
||||||
|
jwks_uri: None,
|
||||||
|
};
|
||||||
|
let req = Request::new(());
|
||||||
|
assert!(state.check(&req).await.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn static_token_valid() {
|
||||||
|
let config = AuthConfig {
|
||||||
|
tokens: vec!["secret123".to_string()],
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let state = AuthState {
|
||||||
|
config,
|
||||||
|
jwks: RwLock::new(None),
|
||||||
|
jwks_uri: None,
|
||||||
|
};
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", "Bearer secret123".parse().unwrap());
|
||||||
|
assert!(state.check(&req).await.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn static_token_invalid() {
|
||||||
|
let config = AuthConfig {
|
||||||
|
tokens: vec!["secret123".to_string()],
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let state = AuthState {
|
||||||
|
config,
|
||||||
|
jwks: RwLock::new(None),
|
||||||
|
jwks_uri: None,
|
||||||
|
};
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", "Bearer wrong".parse().unwrap());
|
||||||
|
assert!(state.check(&req).await.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn static_token_missing_header() {
|
||||||
|
let config = AuthConfig {
|
||||||
|
tokens: vec!["secret123".to_string()],
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let state = AuthState {
|
||||||
|
config,
|
||||||
|
jwks: RwLock::new(None),
|
||||||
|
jwks_uri: None,
|
||||||
|
};
|
||||||
|
let req = Request::new(());
|
||||||
|
assert!(state.check(&req).await.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn interceptor_no_auth_passes() {
|
||||||
|
let state = Arc::new(AuthState {
|
||||||
|
config: AuthConfig::default(),
|
||||||
|
jwks: RwLock::new(None),
|
||||||
|
jwks_uri: None,
|
||||||
|
});
|
||||||
|
let interceptor = make_interceptor(state);
|
||||||
|
let req = Request::new(());
|
||||||
|
assert!(interceptor(req).is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn interceptor_static_token_valid() {
|
||||||
|
let config = AuthConfig {
|
||||||
|
tokens: vec!["tok".to_string()],
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let state = Arc::new(AuthState {
|
||||||
|
config,
|
||||||
|
jwks: RwLock::new(None),
|
||||||
|
jwks_uri: None,
|
||||||
|
});
|
||||||
|
let interceptor = make_interceptor(state);
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", "Bearer tok".parse().unwrap());
|
||||||
|
assert!(interceptor(req).is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn interceptor_static_token_invalid() {
|
||||||
|
let config = AuthConfig {
|
||||||
|
tokens: vec!["tok".to_string()],
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let state = Arc::new(AuthState {
|
||||||
|
config,
|
||||||
|
jwks: RwLock::new(None),
|
||||||
|
jwks_uri: None,
|
||||||
|
});
|
||||||
|
let interceptor = make_interceptor(state);
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", "Bearer bad".parse().unwrap());
|
||||||
|
assert!(interceptor(req).is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper: create a test RSA key pair, JWK, and signed JWT.
|
||||||
|
fn make_test_jwt(
|
||||||
|
issuer: &str,
|
||||||
|
audience: Option<&str>,
|
||||||
|
) -> (Vec<jsonwebtoken::jwk::Jwk>, String) {
|
||||||
|
use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine};
|
||||||
|
use rsa::RsaPrivateKey;
|
||||||
|
|
||||||
|
let mut rng = rand::thread_rng();
|
||||||
|
let private_key = RsaPrivateKey::new(&mut rng, 2048).unwrap();
|
||||||
|
let public_key = private_key.to_public_key();
|
||||||
|
|
||||||
|
use rsa::traits::PublicKeyParts;
|
||||||
|
let n = URL_SAFE_NO_PAD.encode(public_key.n().to_bytes_be());
|
||||||
|
let e = URL_SAFE_NO_PAD.encode(public_key.e().to_bytes_be());
|
||||||
|
|
||||||
|
let jwk: jsonwebtoken::jwk::Jwk = serde_json::from_value(serde_json::json!({
|
||||||
|
"kty": "RSA",
|
||||||
|
"use": "sig",
|
||||||
|
"alg": "RS256",
|
||||||
|
"kid": "test-key-1",
|
||||||
|
"n": n,
|
||||||
|
"e": e,
|
||||||
|
}))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
use rsa::pkcs1::EncodeRsaPrivateKey;
|
||||||
|
let pem = private_key
|
||||||
|
.to_pkcs1_pem(rsa::pkcs1::LineEnding::LF)
|
||||||
|
.unwrap();
|
||||||
|
let encoding_key =
|
||||||
|
jsonwebtoken::EncodingKey::from_rsa_pem(pem.as_bytes()).unwrap();
|
||||||
|
|
||||||
|
let mut header = jsonwebtoken::Header::new(jsonwebtoken::Algorithm::RS256);
|
||||||
|
header.kid = Some("test-key-1".to_string());
|
||||||
|
|
||||||
|
#[derive(serde::Serialize)]
|
||||||
|
struct TestClaims {
|
||||||
|
sub: String,
|
||||||
|
iss: String,
|
||||||
|
#[serde(skip_serializing_if = "Option::is_none")]
|
||||||
|
aud: Option<String>,
|
||||||
|
exp: u64,
|
||||||
|
iat: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
let now = std::time::SystemTime::now()
|
||||||
|
.duration_since(std::time::UNIX_EPOCH)
|
||||||
|
.unwrap()
|
||||||
|
.as_secs();
|
||||||
|
|
||||||
|
let claims = TestClaims {
|
||||||
|
sub: "user@example.com".to_string(),
|
||||||
|
iss: issuer.to_string(),
|
||||||
|
aud: audience.map(String::from),
|
||||||
|
exp: now + 3600,
|
||||||
|
iat: now,
|
||||||
|
};
|
||||||
|
|
||||||
|
let token = jsonwebtoken::encode(&header, &claims, &encoding_key).unwrap();
|
||||||
|
(vec![jwk], token)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn jwt_validation_valid_token() {
|
||||||
|
let issuer = "https://auth.example.com";
|
||||||
|
let (jwks, token) = make_test_jwt(issuer, None);
|
||||||
|
let config = AuthConfig {
|
||||||
|
oidc_issuer: Some(issuer.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let state = AuthState {
|
||||||
|
config,
|
||||||
|
jwks: RwLock::new(Some(JwksCache { keys: jwks })),
|
||||||
|
jwks_uri: None,
|
||||||
|
};
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", format!("Bearer {token}").parse().unwrap());
|
||||||
|
assert!(state.check(&req).await.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn jwt_validation_wrong_issuer() {
|
||||||
|
let (jwks, token) = make_test_jwt("https://wrong-issuer.com", None);
|
||||||
|
let config = AuthConfig {
|
||||||
|
oidc_issuer: Some("https://expected-issuer.com".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let state = AuthState {
|
||||||
|
config,
|
||||||
|
jwks: RwLock::new(Some(JwksCache { keys: jwks })),
|
||||||
|
jwks_uri: None,
|
||||||
|
};
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", format!("Bearer {token}").parse().unwrap());
|
||||||
|
assert!(state.check(&req).await.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn jwt_validation_with_audience() {
|
||||||
|
let issuer = "https://auth.example.com";
|
||||||
|
let (jwks, token) = make_test_jwt(issuer, Some("wfe-server"));
|
||||||
|
let config = AuthConfig {
|
||||||
|
oidc_issuer: Some(issuer.to_string()),
|
||||||
|
oidc_audience: Some("wfe-server".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let state = AuthState {
|
||||||
|
config,
|
||||||
|
jwks: RwLock::new(Some(JwksCache { keys: jwks })),
|
||||||
|
jwks_uri: None,
|
||||||
|
};
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", format!("Bearer {token}").parse().unwrap());
|
||||||
|
assert!(state.check(&req).await.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn jwt_validation_wrong_audience() {
|
||||||
|
let issuer = "https://auth.example.com";
|
||||||
|
let (jwks, token) = make_test_jwt(issuer, Some("wrong-audience"));
|
||||||
|
let config = AuthConfig {
|
||||||
|
oidc_issuer: Some(issuer.to_string()),
|
||||||
|
oidc_audience: Some("wfe-server".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let state = AuthState {
|
||||||
|
config,
|
||||||
|
jwks: RwLock::new(Some(JwksCache { keys: jwks })),
|
||||||
|
jwks_uri: None,
|
||||||
|
};
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", format!("Bearer {token}").parse().unwrap());
|
||||||
|
assert!(state.check(&req).await.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn jwt_validation_garbage_token() {
|
||||||
|
let config = AuthConfig {
|
||||||
|
oidc_issuer: Some("https://auth.example.com".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let state = AuthState {
|
||||||
|
config,
|
||||||
|
jwks: RwLock::new(Some(JwksCache { keys: vec![] })),
|
||||||
|
jwks_uri: None,
|
||||||
|
};
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", "Bearer not.a.jwt".parse().unwrap());
|
||||||
|
assert!(state.check(&req).await.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn jwt_validation_no_jwks_loaded() {
|
||||||
|
let config = AuthConfig {
|
||||||
|
oidc_issuer: Some("https://auth.example.com".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let state = AuthState {
|
||||||
|
config,
|
||||||
|
jwks: RwLock::new(None),
|
||||||
|
jwks_uri: None,
|
||||||
|
};
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", "Bearer some.jwt.token".parse().unwrap());
|
||||||
|
let err = state.check(&req).await.unwrap_err();
|
||||||
|
assert_eq!(err.code(), tonic::Code::Unavailable);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn interceptor_jwt_valid() {
|
||||||
|
let issuer = "https://auth.example.com";
|
||||||
|
let (jwks, token) = make_test_jwt(issuer, None);
|
||||||
|
let config = AuthConfig {
|
||||||
|
oidc_issuer: Some(issuer.to_string()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let state = Arc::new(AuthState {
|
||||||
|
config,
|
||||||
|
jwks: RwLock::new(Some(JwksCache { keys: jwks })),
|
||||||
|
jwks_uri: None,
|
||||||
|
});
|
||||||
|
let interceptor = make_interceptor(state);
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", format!("Bearer {token}").parse().unwrap());
|
||||||
|
assert!(interceptor(req).is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn interceptor_jwt_invalid() {
|
||||||
|
let config = AuthConfig {
|
||||||
|
oidc_issuer: Some("https://auth.example.com".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let state = Arc::new(AuthState {
|
||||||
|
config,
|
||||||
|
jwks: RwLock::new(Some(JwksCache { keys: vec![] })),
|
||||||
|
jwks_uri: None,
|
||||||
|
});
|
||||||
|
let interceptor = make_interceptor(state);
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", "Bearer bad.jwt.token".parse().unwrap());
|
||||||
|
assert!(interceptor(req).is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn key_algorithm_mapping() {
|
||||||
|
use jsonwebtoken::jwk::KeyAlgorithm as KA;
|
||||||
|
assert_eq!(key_algorithm_to_jwt_algorithm(KA::RS256), Some(Algorithm::RS256));
|
||||||
|
assert_eq!(key_algorithm_to_jwt_algorithm(KA::ES256), Some(Algorithm::ES256));
|
||||||
|
assert_eq!(key_algorithm_to_jwt_algorithm(KA::EdDSA), Some(Algorithm::EdDSA));
|
||||||
|
// HS256 should be rejected (symmetric algorithm).
|
||||||
|
assert_eq!(key_algorithm_to_jwt_algorithm(KA::HS256), None);
|
||||||
|
assert_eq!(key_algorithm_to_jwt_algorithm(KA::HS384), None);
|
||||||
|
assert_eq!(key_algorithm_to_jwt_algorithm(KA::HS512), None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn allowed_algorithms_rejects_symmetric() {
|
||||||
|
assert!(!ALLOWED_ALGORITHMS.contains(&Algorithm::HS256));
|
||||||
|
assert!(!ALLOWED_ALGORITHMS.contains(&Algorithm::HS384));
|
||||||
|
assert!(!ALLOWED_ALGORITHMS.contains(&Algorithm::HS512));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Security regression tests ────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn security_hs256_rejected_in_allowlist() {
|
||||||
|
// CRITICAL-01: HS256 must NEVER be in the allowlist.
|
||||||
|
// An attacker with the public RSA key could forge tokens if HS256 is allowed.
|
||||||
|
assert!(!ALLOWED_ALGORITHMS.contains(&Algorithm::HS256));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn security_key_algorithm_rejects_all_symmetric() {
|
||||||
|
// CRITICAL-01: key_algorithm_to_jwt_algorithm must return None for symmetric algs.
|
||||||
|
use jsonwebtoken::jwk::KeyAlgorithm as KA;
|
||||||
|
assert!(key_algorithm_to_jwt_algorithm(KA::HS256).is_none());
|
||||||
|
assert!(key_algorithm_to_jwt_algorithm(KA::HS384).is_none());
|
||||||
|
assert!(key_algorithm_to_jwt_algorithm(KA::HS512).is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn security_constant_time_comparison_used() {
|
||||||
|
// CRITICAL-02: Static token check must use constant-time comparison.
|
||||||
|
// Verify that equal-length wrong tokens don't short-circuit.
|
||||||
|
let tokens = vec!["abcdefgh".to_string()];
|
||||||
|
// Both are 8 chars — a timing attack would try this.
|
||||||
|
assert!(!check_static_tokens(&tokens, "abcdefgX"));
|
||||||
|
assert!(check_static_tokens(&tokens, "abcdefgh"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[should_panic(expected = "OIDC issuer must use HTTPS")]
|
||||||
|
async fn security_oidc_issuer_requires_https() {
|
||||||
|
// HIGH-03: Non-HTTPS issuers must be rejected (SSRF prevention).
|
||||||
|
let config = AuthConfig {
|
||||||
|
oidc_issuer: Some("http://evil.internal:8080".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
AuthState::new(config).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn security_jwt_requires_kid_with_multiple_keys() {
|
||||||
|
// MEDIUM-06: When JWKS has multiple keys, JWT must have kid header.
|
||||||
|
let (mut jwks, token) = make_test_jwt("https://auth.example.com", None);
|
||||||
|
// Duplicate the key with a different kid.
|
||||||
|
let mut key2 = jwks[0].clone();
|
||||||
|
key2.common.key_id = Some("test-key-2".to_string());
|
||||||
|
jwks.push(key2);
|
||||||
|
|
||||||
|
// Strip kid from the token by decoding, modifying header, re-encoding.
|
||||||
|
// Easier: just test the validate path with multiple keys and a token that has kid.
|
||||||
|
// The token from make_test_jwt has kid="test-key-1", so it should work.
|
||||||
|
let config = AuthConfig {
|
||||||
|
oidc_issuer: Some("https://auth.example.com".to_string()),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
let state = AuthState {
|
||||||
|
config,
|
||||||
|
jwks: RwLock::new(Some(JwksCache { keys: jwks })),
|
||||||
|
jwks_uri: None,
|
||||||
|
};
|
||||||
|
let mut req = Request::new(());
|
||||||
|
req.metadata_mut()
|
||||||
|
.insert("authorization", format!("Bearer {token}").parse().unwrap());
|
||||||
|
// Should succeed because the token has kid="test-key-1" which matches.
|
||||||
|
assert!(state.check(&req).await.is_ok());
|
||||||
|
}
|
||||||
|
}
|
||||||
363
wfe-server/src/config.rs
Normal file
363
wfe-server/src/config.rs
Normal file
@@ -0,0 +1,363 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::path::PathBuf;
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
use serde::Deserialize;
|
||||||
|
|
||||||
|
/// WFE workflow server.
|
||||||
|
#[derive(Parser, Debug)]
|
||||||
|
#[command(name = "wfe-server", version, about)]
|
||||||
|
pub struct Cli {
|
||||||
|
/// Config file path.
|
||||||
|
#[arg(short, long, default_value = "wfe-server.toml")]
|
||||||
|
pub config: PathBuf,
|
||||||
|
|
||||||
|
/// gRPC listen address.
|
||||||
|
#[arg(long, env = "WFE_GRPC_ADDR")]
|
||||||
|
pub grpc_addr: Option<SocketAddr>,
|
||||||
|
|
||||||
|
/// HTTP listen address (webhooks).
|
||||||
|
#[arg(long, env = "WFE_HTTP_ADDR")]
|
||||||
|
pub http_addr: Option<SocketAddr>,
|
||||||
|
|
||||||
|
/// Persistence backend: sqlite or postgres.
|
||||||
|
#[arg(long, env = "WFE_PERSISTENCE")]
|
||||||
|
pub persistence: Option<String>,
|
||||||
|
|
||||||
|
/// Database URL or path.
|
||||||
|
#[arg(long, env = "WFE_DB_URL")]
|
||||||
|
pub db_url: Option<String>,
|
||||||
|
|
||||||
|
/// Queue backend: memory or valkey.
|
||||||
|
#[arg(long, env = "WFE_QUEUE")]
|
||||||
|
pub queue: Option<String>,
|
||||||
|
|
||||||
|
/// Queue URL (for valkey).
|
||||||
|
#[arg(long, env = "WFE_QUEUE_URL")]
|
||||||
|
pub queue_url: Option<String>,
|
||||||
|
|
||||||
|
/// OpenSearch URL (enables log + workflow search).
|
||||||
|
#[arg(long, env = "WFE_SEARCH_URL")]
|
||||||
|
pub search_url: Option<String>,
|
||||||
|
|
||||||
|
/// Directory to auto-load YAML workflow definitions from.
|
||||||
|
#[arg(long, env = "WFE_WORKFLOWS_DIR")]
|
||||||
|
pub workflows_dir: Option<PathBuf>,
|
||||||
|
|
||||||
|
/// Comma-separated bearer tokens for API auth.
|
||||||
|
#[arg(long, env = "WFE_AUTH_TOKENS")]
|
||||||
|
pub auth_tokens: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Server configuration (deserialized from TOML).
|
||||||
|
#[derive(Debug, Deserialize, Clone)]
|
||||||
|
#[serde(default)]
|
||||||
|
pub struct ServerConfig {
|
||||||
|
pub grpc_addr: SocketAddr,
|
||||||
|
pub http_addr: SocketAddr,
|
||||||
|
pub persistence: PersistenceConfig,
|
||||||
|
pub queue: QueueConfig,
|
||||||
|
pub search: Option<SearchConfig>,
|
||||||
|
pub auth: AuthConfig,
|
||||||
|
pub webhook: WebhookConfig,
|
||||||
|
pub workflows_dir: Option<PathBuf>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for ServerConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
grpc_addr: "0.0.0.0:50051".parse().unwrap(),
|
||||||
|
http_addr: "0.0.0.0:8080".parse().unwrap(),
|
||||||
|
persistence: PersistenceConfig::default(),
|
||||||
|
queue: QueueConfig::default(),
|
||||||
|
search: None,
|
||||||
|
auth: AuthConfig::default(),
|
||||||
|
webhook: WebhookConfig::default(),
|
||||||
|
workflows_dir: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize, Clone)]
|
||||||
|
#[serde(tag = "backend")]
|
||||||
|
pub enum PersistenceConfig {
|
||||||
|
#[serde(rename = "sqlite")]
|
||||||
|
Sqlite { path: String },
|
||||||
|
#[serde(rename = "postgres")]
|
||||||
|
Postgres { url: String },
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for PersistenceConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::Sqlite {
|
||||||
|
path: "wfe.db".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize, Clone)]
|
||||||
|
#[serde(tag = "backend")]
|
||||||
|
pub enum QueueConfig {
|
||||||
|
#[serde(rename = "memory")]
|
||||||
|
InMemory,
|
||||||
|
#[serde(rename = "valkey")]
|
||||||
|
Valkey { url: String },
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for QueueConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::InMemory
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize, Clone)]
|
||||||
|
pub struct SearchConfig {
|
||||||
|
pub url: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize, Clone, Default)]
|
||||||
|
pub struct AuthConfig {
|
||||||
|
/// Static bearer tokens (simple auth, no OIDC needed).
|
||||||
|
#[serde(default)]
|
||||||
|
pub tokens: Vec<String>,
|
||||||
|
/// OIDC issuer URL (e.g., https://auth.example.com/realms/myapp).
|
||||||
|
/// Enables JWT validation via OIDC discovery + JWKS.
|
||||||
|
#[serde(default)]
|
||||||
|
pub oidc_issuer: Option<String>,
|
||||||
|
/// Expected JWT audience claim.
|
||||||
|
#[serde(default)]
|
||||||
|
pub oidc_audience: Option<String>,
|
||||||
|
/// Webhook HMAC secrets per source.
|
||||||
|
#[serde(default)]
|
||||||
|
pub webhook_secrets: HashMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize, Clone, Default)]
|
||||||
|
pub struct WebhookConfig {
|
||||||
|
#[serde(default)]
|
||||||
|
pub triggers: Vec<WebhookTrigger>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Deserialize, Clone)]
|
||||||
|
pub struct WebhookTrigger {
|
||||||
|
pub source: String,
|
||||||
|
pub event: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub match_ref: Option<String>,
|
||||||
|
pub workflow_id: String,
|
||||||
|
pub version: u32,
|
||||||
|
#[serde(default)]
|
||||||
|
pub data_mapping: HashMap<String, String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Load configuration with layered overrides: CLI > env > file.
|
||||||
|
pub fn load(cli: &Cli) -> ServerConfig {
|
||||||
|
let mut config = if cli.config.exists() {
|
||||||
|
let content = std::fs::read_to_string(&cli.config)
|
||||||
|
.unwrap_or_else(|e| panic!("failed to read config file {}: {e}", cli.config.display()));
|
||||||
|
toml::from_str(&content)
|
||||||
|
.unwrap_or_else(|e| panic!("failed to parse config file {}: {e}", cli.config.display()))
|
||||||
|
} else {
|
||||||
|
ServerConfig::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(addr) = cli.grpc_addr {
|
||||||
|
config.grpc_addr = addr;
|
||||||
|
}
|
||||||
|
if let Some(addr) = cli.http_addr {
|
||||||
|
config.http_addr = addr;
|
||||||
|
}
|
||||||
|
if let Some(ref dir) = cli.workflows_dir {
|
||||||
|
config.workflows_dir = Some(dir.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Persistence override.
|
||||||
|
if let Some(ref backend) = cli.persistence {
|
||||||
|
let url = cli
|
||||||
|
.db_url
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| "wfe.db".to_string());
|
||||||
|
config.persistence = match backend.as_str() {
|
||||||
|
"postgres" => PersistenceConfig::Postgres { url },
|
||||||
|
_ => PersistenceConfig::Sqlite { path: url },
|
||||||
|
};
|
||||||
|
} else if let Some(ref url) = cli.db_url {
|
||||||
|
// Infer backend from URL.
|
||||||
|
if url.starts_with("postgres") {
|
||||||
|
config.persistence = PersistenceConfig::Postgres { url: url.clone() };
|
||||||
|
} else {
|
||||||
|
config.persistence = PersistenceConfig::Sqlite { path: url.clone() };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Queue override.
|
||||||
|
if let Some(ref backend) = cli.queue {
|
||||||
|
config.queue = match backend.as_str() {
|
||||||
|
"valkey" | "redis" => {
|
||||||
|
let url = cli
|
||||||
|
.queue_url
|
||||||
|
.clone()
|
||||||
|
.unwrap_or_else(|| "redis://127.0.0.1:6379".to_string());
|
||||||
|
QueueConfig::Valkey { url }
|
||||||
|
}
|
||||||
|
_ => QueueConfig::InMemory,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search override.
|
||||||
|
if let Some(ref url) = cli.search_url {
|
||||||
|
config.search = Some(SearchConfig { url: url.clone() });
|
||||||
|
}
|
||||||
|
|
||||||
|
// Auth tokens override.
|
||||||
|
if let Some(ref tokens) = cli.auth_tokens {
|
||||||
|
config.auth.tokens = tokens
|
||||||
|
.split(',')
|
||||||
|
.map(|t| t.trim().to_string())
|
||||||
|
.filter(|t| !t.is_empty())
|
||||||
|
.collect();
|
||||||
|
}
|
||||||
|
|
||||||
|
config
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn default_config() {
|
||||||
|
let config = ServerConfig::default();
|
||||||
|
assert_eq!(config.grpc_addr, "0.0.0.0:50051".parse().unwrap());
|
||||||
|
assert_eq!(config.http_addr, "0.0.0.0:8080".parse().unwrap());
|
||||||
|
assert!(matches!(config.persistence, PersistenceConfig::Sqlite { .. }));
|
||||||
|
assert!(matches!(config.queue, QueueConfig::InMemory));
|
||||||
|
assert!(config.search.is_none());
|
||||||
|
assert!(config.auth.tokens.is_empty());
|
||||||
|
assert!(config.webhook.triggers.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn parse_toml_config() {
|
||||||
|
let toml = r#"
|
||||||
|
grpc_addr = "127.0.0.1:9090"
|
||||||
|
http_addr = "127.0.0.1:8081"
|
||||||
|
|
||||||
|
[persistence]
|
||||||
|
backend = "postgres"
|
||||||
|
url = "postgres://localhost/wfe"
|
||||||
|
|
||||||
|
[queue]
|
||||||
|
backend = "valkey"
|
||||||
|
url = "redis://localhost:6379"
|
||||||
|
|
||||||
|
[search]
|
||||||
|
url = "http://localhost:9200"
|
||||||
|
|
||||||
|
[auth]
|
||||||
|
tokens = ["token1", "token2"]
|
||||||
|
|
||||||
|
[auth.webhook_secrets]
|
||||||
|
github = "mysecret"
|
||||||
|
|
||||||
|
[[webhook.triggers]]
|
||||||
|
source = "github"
|
||||||
|
event = "push"
|
||||||
|
match_ref = "refs/heads/main"
|
||||||
|
workflow_id = "ci"
|
||||||
|
version = 1
|
||||||
|
"#;
|
||||||
|
let config: ServerConfig = toml::from_str(toml).unwrap();
|
||||||
|
assert_eq!(config.grpc_addr, "127.0.0.1:9090".parse().unwrap());
|
||||||
|
assert!(matches!(config.persistence, PersistenceConfig::Postgres { .. }));
|
||||||
|
assert!(matches!(config.queue, QueueConfig::Valkey { .. }));
|
||||||
|
assert!(config.search.is_some());
|
||||||
|
assert_eq!(config.auth.tokens.len(), 2);
|
||||||
|
assert_eq!(config.auth.webhook_secrets.get("github").unwrap(), "mysecret");
|
||||||
|
assert_eq!(config.webhook.triggers.len(), 1);
|
||||||
|
assert_eq!(config.webhook.triggers[0].workflow_id, "ci");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn cli_overrides_file() {
|
||||||
|
let cli = Cli {
|
||||||
|
config: PathBuf::from("/nonexistent"),
|
||||||
|
grpc_addr: Some("127.0.0.1:9999".parse().unwrap()),
|
||||||
|
http_addr: None,
|
||||||
|
persistence: Some("postgres".to_string()),
|
||||||
|
db_url: Some("postgres://db/wfe".to_string()),
|
||||||
|
queue: Some("valkey".to_string()),
|
||||||
|
queue_url: Some("redis://valkey:6379".to_string()),
|
||||||
|
search_url: Some("http://os:9200".to_string()),
|
||||||
|
workflows_dir: Some(PathBuf::from("/workflows")),
|
||||||
|
auth_tokens: Some("tok1, tok2".to_string()),
|
||||||
|
};
|
||||||
|
let config = load(&cli);
|
||||||
|
assert_eq!(config.grpc_addr, "127.0.0.1:9999".parse().unwrap());
|
||||||
|
assert!(matches!(config.persistence, PersistenceConfig::Postgres { ref url } if url == "postgres://db/wfe"));
|
||||||
|
assert!(matches!(config.queue, QueueConfig::Valkey { ref url } if url == "redis://valkey:6379"));
|
||||||
|
assert_eq!(config.search.unwrap().url, "http://os:9200");
|
||||||
|
assert_eq!(config.workflows_dir.unwrap(), PathBuf::from("/workflows"));
|
||||||
|
assert_eq!(config.auth.tokens, vec!["tok1", "tok2"]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn infer_postgres_from_url() {
|
||||||
|
let cli = Cli {
|
||||||
|
config: PathBuf::from("/nonexistent"),
|
||||||
|
grpc_addr: None,
|
||||||
|
http_addr: None,
|
||||||
|
persistence: None,
|
||||||
|
db_url: Some("postgres://localhost/wfe".to_string()),
|
||||||
|
queue: None,
|
||||||
|
queue_url: None,
|
||||||
|
search_url: None,
|
||||||
|
workflows_dir: None,
|
||||||
|
auth_tokens: None,
|
||||||
|
};
|
||||||
|
let config = load(&cli);
|
||||||
|
assert!(matches!(config.persistence, PersistenceConfig::Postgres { .. }));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Security regression tests ──
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
#[should_panic(expected = "failed to parse config file")]
|
||||||
|
fn security_malformed_config_panics() {
|
||||||
|
// HIGH-19: Malformed config must NOT silently fall back to defaults.
|
||||||
|
let tmp = tempfile::NamedTempFile::new().unwrap();
|
||||||
|
std::fs::write(tmp.path(), "this is not { valid toml @@@@").unwrap();
|
||||||
|
let cli = Cli {
|
||||||
|
config: tmp.path().to_path_buf(),
|
||||||
|
grpc_addr: None,
|
||||||
|
http_addr: None,
|
||||||
|
persistence: None,
|
||||||
|
db_url: None,
|
||||||
|
queue: None,
|
||||||
|
queue_url: None,
|
||||||
|
search_url: None,
|
||||||
|
workflows_dir: None,
|
||||||
|
auth_tokens: None,
|
||||||
|
};
|
||||||
|
load(&cli);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn trigger_data_mapping() {
|
||||||
|
let toml = r#"
|
||||||
|
[[triggers]]
|
||||||
|
source = "github"
|
||||||
|
event = "push"
|
||||||
|
workflow_id = "ci"
|
||||||
|
version = 1
|
||||||
|
|
||||||
|
[triggers.data_mapping]
|
||||||
|
repo = "$.repository.full_name"
|
||||||
|
commit = "$.head_commit.id"
|
||||||
|
"#;
|
||||||
|
let config: WebhookConfig = toml::from_str(toml).unwrap();
|
||||||
|
assert_eq!(config.triggers[0].data_mapping.len(), 2);
|
||||||
|
assert_eq!(config.triggers[0].data_mapping["repo"], "$.repository.full_name");
|
||||||
|
}
|
||||||
|
}
|
||||||
862
wfe-server/src/grpc.rs
Normal file
862
wfe-server/src/grpc.rs
Normal file
@@ -0,0 +1,862 @@
|
|||||||
|
use std::collections::{BTreeMap, HashMap};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use tonic::{Request, Response, Status};
|
||||||
|
use wfe_server_protos::wfe::v1::*;
|
||||||
|
use wfe_server_protos::wfe::v1::wfe_server::Wfe;
|
||||||
|
|
||||||
|
pub struct WfeService {
|
||||||
|
host: Arc<wfe::WorkflowHost>,
|
||||||
|
lifecycle_bus: Arc<crate::lifecycle_bus::BroadcastLifecyclePublisher>,
|
||||||
|
log_store: Arc<crate::log_store::LogStore>,
|
||||||
|
log_search: Option<Arc<crate::log_search::LogSearchIndex>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WfeService {
|
||||||
|
pub fn new(
|
||||||
|
host: Arc<wfe::WorkflowHost>,
|
||||||
|
lifecycle_bus: Arc<crate::lifecycle_bus::BroadcastLifecyclePublisher>,
|
||||||
|
log_store: Arc<crate::log_store::LogStore>,
|
||||||
|
) -> Self {
|
||||||
|
Self { host, lifecycle_bus, log_store, log_search: None }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_log_search(mut self, index: Arc<crate::log_search::LogSearchIndex>) -> Self {
|
||||||
|
self.log_search = Some(index);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tonic::async_trait]
|
||||||
|
impl Wfe for WfeService {
|
||||||
|
// ── Definitions ──────────────────────────────────────────────────
|
||||||
|
|
||||||
|
async fn register_workflow(
|
||||||
|
&self,
|
||||||
|
request: Request<RegisterWorkflowRequest>,
|
||||||
|
) -> Result<Response<RegisterWorkflowResponse>, Status> {
|
||||||
|
let req = request.into_inner();
|
||||||
|
|
||||||
|
let config: HashMap<String, serde_json::Value> = req
|
||||||
|
.config
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k, serde_json::Value::String(v)))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let workflows = wfe_yaml::load_workflow_from_str(&req.yaml, &config)
|
||||||
|
.map_err(|e| Status::invalid_argument(format!("YAML compilation failed: {e}")))?;
|
||||||
|
|
||||||
|
let mut definitions = Vec::new();
|
||||||
|
|
||||||
|
for compiled in workflows {
|
||||||
|
for (key, factory) in compiled.step_factories {
|
||||||
|
self.host.register_step_factory(&key, factory).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let id = compiled.definition.id.clone();
|
||||||
|
let version = compiled.definition.version;
|
||||||
|
let step_count = compiled.definition.steps.len() as u32;
|
||||||
|
|
||||||
|
self.host
|
||||||
|
.register_workflow_definition(compiled.definition)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
definitions.push(RegisteredDefinition {
|
||||||
|
definition_id: id,
|
||||||
|
version,
|
||||||
|
step_count,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Response::new(RegisterWorkflowResponse { definitions }))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn list_definitions(
|
||||||
|
&self,
|
||||||
|
_request: Request<ListDefinitionsRequest>,
|
||||||
|
) -> Result<Response<ListDefinitionsResponse>, Status> {
|
||||||
|
// TODO: add list_definitions() to WorkflowHost
|
||||||
|
Ok(Response::new(ListDefinitionsResponse {
|
||||||
|
definitions: vec![],
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Instances ────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
async fn start_workflow(
|
||||||
|
&self,
|
||||||
|
request: Request<StartWorkflowRequest>,
|
||||||
|
) -> Result<Response<StartWorkflowResponse>, Status> {
|
||||||
|
let req = request.into_inner();
|
||||||
|
|
||||||
|
let data = req
|
||||||
|
.data
|
||||||
|
.map(struct_to_json)
|
||||||
|
.unwrap_or_else(|| serde_json::json!({}));
|
||||||
|
|
||||||
|
let workflow_id = self
|
||||||
|
.host
|
||||||
|
.start_workflow(&req.definition_id, req.version, data)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Status::internal(format!("failed to start workflow: {e}")))?;
|
||||||
|
|
||||||
|
Ok(Response::new(StartWorkflowResponse { workflow_id }))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn get_workflow(
|
||||||
|
&self,
|
||||||
|
request: Request<GetWorkflowRequest>,
|
||||||
|
) -> Result<Response<GetWorkflowResponse>, Status> {
|
||||||
|
let req = request.into_inner();
|
||||||
|
|
||||||
|
let instance = self
|
||||||
|
.host
|
||||||
|
.get_workflow(&req.workflow_id)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Status::not_found(format!("workflow not found: {e}")))?;
|
||||||
|
|
||||||
|
Ok(Response::new(GetWorkflowResponse {
|
||||||
|
instance: Some(workflow_to_proto(&instance)),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn cancel_workflow(
|
||||||
|
&self,
|
||||||
|
request: Request<CancelWorkflowRequest>,
|
||||||
|
) -> Result<Response<CancelWorkflowResponse>, Status> {
|
||||||
|
let req = request.into_inner();
|
||||||
|
|
||||||
|
self.host
|
||||||
|
.terminate_workflow(&req.workflow_id)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Status::internal(format!("failed to cancel: {e}")))?;
|
||||||
|
|
||||||
|
Ok(Response::new(CancelWorkflowResponse {}))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn suspend_workflow(
|
||||||
|
&self,
|
||||||
|
request: Request<SuspendWorkflowRequest>,
|
||||||
|
) -> Result<Response<SuspendWorkflowResponse>, Status> {
|
||||||
|
let req = request.into_inner();
|
||||||
|
|
||||||
|
self.host
|
||||||
|
.suspend_workflow(&req.workflow_id)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Status::internal(format!("failed to suspend: {e}")))?;
|
||||||
|
|
||||||
|
Ok(Response::new(SuspendWorkflowResponse {}))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn resume_workflow(
|
||||||
|
&self,
|
||||||
|
request: Request<ResumeWorkflowRequest>,
|
||||||
|
) -> Result<Response<ResumeWorkflowResponse>, Status> {
|
||||||
|
let req = request.into_inner();
|
||||||
|
|
||||||
|
self.host
|
||||||
|
.resume_workflow(&req.workflow_id)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Status::internal(format!("failed to resume: {e}")))?;
|
||||||
|
|
||||||
|
Ok(Response::new(ResumeWorkflowResponse {}))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn search_workflows(
|
||||||
|
&self,
|
||||||
|
_request: Request<SearchWorkflowsRequest>,
|
||||||
|
) -> Result<Response<SearchWorkflowsResponse>, Status> {
|
||||||
|
// TODO: implement with SearchIndex
|
||||||
|
Ok(Response::new(SearchWorkflowsResponse {
|
||||||
|
results: vec![],
|
||||||
|
total: 0,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Events ───────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
async fn publish_event(
|
||||||
|
&self,
|
||||||
|
request: Request<PublishEventRequest>,
|
||||||
|
) -> Result<Response<PublishEventResponse>, Status> {
|
||||||
|
let req = request.into_inner();
|
||||||
|
|
||||||
|
let data = req
|
||||||
|
.data
|
||||||
|
.map(struct_to_json)
|
||||||
|
.unwrap_or_else(|| serde_json::json!({}));
|
||||||
|
|
||||||
|
self.host
|
||||||
|
.publish_event(&req.event_name, &req.event_key, data)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Status::internal(format!("failed to publish event: {e}")))?;
|
||||||
|
|
||||||
|
Ok(Response::new(PublishEventResponse {
|
||||||
|
event_id: String::new(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Streaming (stubs for now) ────────────────────────────────────
|
||||||
|
|
||||||
|
type WatchLifecycleStream =
|
||||||
|
tokio_stream::wrappers::ReceiverStream<Result<LifecycleEvent, Status>>;
|
||||||
|
|
||||||
|
async fn watch_lifecycle(
|
||||||
|
&self,
|
||||||
|
request: Request<WatchLifecycleRequest>,
|
||||||
|
) -> Result<Response<Self::WatchLifecycleStream>, Status> {
|
||||||
|
let req = request.into_inner();
|
||||||
|
let filter_workflow_id = if req.workflow_id.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(req.workflow_id)
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut broadcast_rx = self.lifecycle_bus.subscribe();
|
||||||
|
let (tx, rx) = tokio::sync::mpsc::channel(256);
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
match broadcast_rx.recv().await {
|
||||||
|
Ok(event) => {
|
||||||
|
// Apply workflow_id filter.
|
||||||
|
if let Some(ref filter) = filter_workflow_id {
|
||||||
|
if event.workflow_instance_id != *filter {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let proto_event = lifecycle_event_to_proto(&event);
|
||||||
|
if tx.send(Ok(proto_event)).await.is_err() {
|
||||||
|
break; // Client disconnected.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(tokio::sync::broadcast::error::RecvError::Lagged(n)) => {
|
||||||
|
tracing::warn!(lagged = n, "lifecycle watcher lagged, skipping events");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Err(tokio::sync::broadcast::error::RecvError::Closed) => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(Response::new(tokio_stream::wrappers::ReceiverStream::new(rx)))
|
||||||
|
}
|
||||||
|
|
||||||
|
type StreamLogsStream = tokio_stream::wrappers::ReceiverStream<Result<LogEntry, Status>>;
|
||||||
|
|
||||||
|
async fn stream_logs(
|
||||||
|
&self,
|
||||||
|
request: Request<StreamLogsRequest>,
|
||||||
|
) -> Result<Response<Self::StreamLogsStream>, Status> {
|
||||||
|
let req = request.into_inner();
|
||||||
|
let workflow_id = req.workflow_id.clone();
|
||||||
|
let step_name_filter = if req.step_name.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(req.step_name)
|
||||||
|
};
|
||||||
|
|
||||||
|
let (tx, rx) = tokio::sync::mpsc::channel(256);
|
||||||
|
let log_store = self.log_store.clone();
|
||||||
|
|
||||||
|
tokio::spawn(async move {
|
||||||
|
// 1. Replay history first.
|
||||||
|
let history = log_store.get_history(&workflow_id, None);
|
||||||
|
for chunk in history {
|
||||||
|
if let Some(ref filter) = step_name_filter {
|
||||||
|
if chunk.step_name != *filter {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let entry = log_chunk_to_proto(&chunk);
|
||||||
|
if tx.send(Ok(entry)).await.is_err() {
|
||||||
|
return; // Client disconnected.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. If follow mode, switch to live broadcast.
|
||||||
|
if req.follow {
|
||||||
|
let mut broadcast_rx = log_store.subscribe(&workflow_id);
|
||||||
|
loop {
|
||||||
|
match broadcast_rx.recv().await {
|
||||||
|
Ok(chunk) => {
|
||||||
|
if let Some(ref filter) = step_name_filter {
|
||||||
|
if chunk.step_name != *filter {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let entry = log_chunk_to_proto(&chunk);
|
||||||
|
if tx.send(Ok(entry)).await.is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(tokio::sync::broadcast::error::RecvError::Lagged(n)) => {
|
||||||
|
tracing::warn!(lagged = n, "log stream lagged");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
Err(tokio::sync::broadcast::error::RecvError::Closed) => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// If not follow mode, the stream ends after history replay.
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(Response::new(tokio_stream::wrappers::ReceiverStream::new(rx)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Search ───────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
async fn search_logs(
|
||||||
|
&self,
|
||||||
|
request: Request<SearchLogsRequest>,
|
||||||
|
) -> Result<Response<SearchLogsResponse>, Status> {
|
||||||
|
let Some(ref search) = self.log_search else {
|
||||||
|
return Err(Status::unavailable("log search not configured — set --search-url"));
|
||||||
|
};
|
||||||
|
|
||||||
|
let req = request.into_inner();
|
||||||
|
let workflow_id = if req.workflow_id.is_empty() { None } else { Some(req.workflow_id.as_str()) };
|
||||||
|
let step_name = if req.step_name.is_empty() { None } else { Some(req.step_name.as_str()) };
|
||||||
|
let stream_filter = match req.stream_filter {
|
||||||
|
x if x == LogStream::Stdout as i32 => Some("stdout"),
|
||||||
|
x if x == LogStream::Stderr as i32 => Some("stderr"),
|
||||||
|
_ => None,
|
||||||
|
};
|
||||||
|
let take = if req.take == 0 { 50 } else { req.take };
|
||||||
|
|
||||||
|
let (hits, total) = search
|
||||||
|
.search(&req.query, workflow_id, step_name, stream_filter, req.skip, take)
|
||||||
|
.await
|
||||||
|
.map_err(|e| Status::internal(format!("search failed: {e}")))?;
|
||||||
|
|
||||||
|
let results = hits
|
||||||
|
.into_iter()
|
||||||
|
.map(|h| {
|
||||||
|
let stream = match h.stream.as_str() {
|
||||||
|
"stdout" => LogStream::Stdout as i32,
|
||||||
|
"stderr" => LogStream::Stderr as i32,
|
||||||
|
_ => LogStream::Unspecified as i32,
|
||||||
|
};
|
||||||
|
LogSearchResult {
|
||||||
|
workflow_id: h.workflow_id,
|
||||||
|
definition_id: h.definition_id,
|
||||||
|
step_name: h.step_name,
|
||||||
|
line: h.line,
|
||||||
|
stream,
|
||||||
|
timestamp: Some(datetime_to_timestamp(&h.timestamp)),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Ok(Response::new(SearchLogsResponse { results, total }))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Conversion helpers ──────────────────────────────────────────────
|
||||||
|
|
||||||
|
fn struct_to_json(s: prost_types::Struct) -> serde_json::Value {
|
||||||
|
let map: serde_json::Map<String, serde_json::Value> = s
|
||||||
|
.fields
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k, prost_value_to_json(v)))
|
||||||
|
.collect();
|
||||||
|
serde_json::Value::Object(map)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn prost_value_to_json(v: prost_types::Value) -> serde_json::Value {
|
||||||
|
use prost_types::value::Kind;
|
||||||
|
match v.kind {
|
||||||
|
Some(Kind::NullValue(_)) => serde_json::Value::Null,
|
||||||
|
Some(Kind::NumberValue(n)) => serde_json::json!(n),
|
||||||
|
Some(Kind::StringValue(s)) => serde_json::Value::String(s),
|
||||||
|
Some(Kind::BoolValue(b)) => serde_json::Value::Bool(b),
|
||||||
|
Some(Kind::StructValue(s)) => struct_to_json(s),
|
||||||
|
Some(Kind::ListValue(l)) => {
|
||||||
|
serde_json::Value::Array(l.values.into_iter().map(prost_value_to_json).collect())
|
||||||
|
}
|
||||||
|
None => serde_json::Value::Null,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn json_to_struct(v: &serde_json::Value) -> prost_types::Struct {
|
||||||
|
let fields: BTreeMap<String, prost_types::Value> = match v.as_object() {
|
||||||
|
Some(obj) => obj
|
||||||
|
.iter()
|
||||||
|
.map(|(k, v)| (k.clone(), json_to_prost_value(v)))
|
||||||
|
.collect(),
|
||||||
|
None => BTreeMap::new(),
|
||||||
|
};
|
||||||
|
prost_types::Struct { fields }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn json_to_prost_value(v: &serde_json::Value) -> prost_types::Value {
|
||||||
|
use prost_types::value::Kind;
|
||||||
|
let kind = match v {
|
||||||
|
serde_json::Value::Null => Kind::NullValue(0),
|
||||||
|
serde_json::Value::Bool(b) => Kind::BoolValue(*b),
|
||||||
|
serde_json::Value::Number(n) => Kind::NumberValue(n.as_f64().unwrap_or(0.0)),
|
||||||
|
serde_json::Value::String(s) => Kind::StringValue(s.clone()),
|
||||||
|
serde_json::Value::Array(arr) => Kind::ListValue(prost_types::ListValue {
|
||||||
|
values: arr.iter().map(json_to_prost_value).collect(),
|
||||||
|
}),
|
||||||
|
serde_json::Value::Object(_) => Kind::StructValue(json_to_struct(v)),
|
||||||
|
};
|
||||||
|
prost_types::Value { kind: Some(kind) }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn log_chunk_to_proto(chunk: &wfe_core::traits::LogChunk) -> LogEntry {
|
||||||
|
use wfe_core::traits::LogStreamType;
|
||||||
|
let stream = match chunk.stream {
|
||||||
|
LogStreamType::Stdout => LogStream::Stdout as i32,
|
||||||
|
LogStreamType::Stderr => LogStream::Stderr as i32,
|
||||||
|
};
|
||||||
|
LogEntry {
|
||||||
|
workflow_id: chunk.workflow_id.clone(),
|
||||||
|
step_name: chunk.step_name.clone(),
|
||||||
|
step_id: chunk.step_id as u32,
|
||||||
|
stream,
|
||||||
|
data: chunk.data.clone(),
|
||||||
|
timestamp: Some(datetime_to_timestamp(&chunk.timestamp)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn lifecycle_event_to_proto(e: &wfe_core::models::LifecycleEvent) -> LifecycleEvent {
|
||||||
|
use wfe_core::models::LifecycleEventType as LET;
|
||||||
|
// Proto enum — prost strips the LIFECYCLE_EVENT_TYPE_ prefix.
|
||||||
|
use wfe_server_protos::wfe::v1::LifecycleEventType as PLET;
|
||||||
|
let (event_type, step_id, step_name, error_message) = match &e.event_type {
|
||||||
|
LET::Started => (PLET::Started as i32, 0, String::new(), String::new()),
|
||||||
|
LET::Completed => (PLET::Completed as i32, 0, String::new(), String::new()),
|
||||||
|
LET::Terminated => (PLET::Terminated as i32, 0, String::new(), String::new()),
|
||||||
|
LET::Suspended => (PLET::Suspended as i32, 0, String::new(), String::new()),
|
||||||
|
LET::Resumed => (PLET::Resumed as i32, 0, String::new(), String::new()),
|
||||||
|
LET::Error { message } => (PLET::Error as i32, 0, String::new(), message.clone()),
|
||||||
|
LET::StepStarted { step_id, step_name } => (PLET::StepStarted as i32, *step_id as u32, step_name.clone().unwrap_or_default(), String::new()),
|
||||||
|
LET::StepCompleted { step_id, step_name } => (PLET::StepCompleted as i32, *step_id as u32, step_name.clone().unwrap_or_default(), String::new()),
|
||||||
|
};
|
||||||
|
LifecycleEvent {
|
||||||
|
event_time: Some(datetime_to_timestamp(&e.event_time_utc)),
|
||||||
|
workflow_id: e.workflow_instance_id.clone(),
|
||||||
|
definition_id: e.workflow_definition_id.clone(),
|
||||||
|
version: e.version,
|
||||||
|
event_type,
|
||||||
|
step_id,
|
||||||
|
step_name,
|
||||||
|
error_message,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn datetime_to_timestamp(dt: &chrono::DateTime<chrono::Utc>) -> prost_types::Timestamp {
|
||||||
|
prost_types::Timestamp {
|
||||||
|
seconds: dt.timestamp(),
|
||||||
|
nanos: dt.timestamp_subsec_nanos() as i32,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn workflow_to_proto(w: &wfe_core::models::WorkflowInstance) -> WorkflowInstance {
|
||||||
|
WorkflowInstance {
|
||||||
|
id: w.id.clone(),
|
||||||
|
definition_id: w.workflow_definition_id.clone(),
|
||||||
|
version: w.version,
|
||||||
|
description: w.description.clone().unwrap_or_default(),
|
||||||
|
reference: w.reference.clone().unwrap_or_default(),
|
||||||
|
status: match w.status {
|
||||||
|
wfe_core::models::WorkflowStatus::Runnable => WorkflowStatus::Runnable as i32,
|
||||||
|
wfe_core::models::WorkflowStatus::Suspended => WorkflowStatus::Suspended as i32,
|
||||||
|
wfe_core::models::WorkflowStatus::Complete => WorkflowStatus::Complete as i32,
|
||||||
|
wfe_core::models::WorkflowStatus::Terminated => WorkflowStatus::Terminated as i32,
|
||||||
|
},
|
||||||
|
data: Some(json_to_struct(&w.data)),
|
||||||
|
create_time: Some(datetime_to_timestamp(&w.create_time)),
|
||||||
|
complete_time: w.complete_time.as_ref().map(datetime_to_timestamp),
|
||||||
|
execution_pointers: w
|
||||||
|
.execution_pointers
|
||||||
|
.iter()
|
||||||
|
.map(pointer_to_proto)
|
||||||
|
.collect(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pointer_to_proto(p: &wfe_core::models::ExecutionPointer) -> ExecutionPointer {
|
||||||
|
use wfe_core::models::PointerStatus as PS;
|
||||||
|
let status = match p.status {
|
||||||
|
PS::Pending | PS::PendingPredecessor => PointerStatus::Pending as i32,
|
||||||
|
PS::Running => PointerStatus::Running as i32,
|
||||||
|
PS::Complete => PointerStatus::Complete as i32,
|
||||||
|
PS::Sleeping => PointerStatus::Sleeping as i32,
|
||||||
|
PS::WaitingForEvent => PointerStatus::WaitingForEvent as i32,
|
||||||
|
PS::Failed => PointerStatus::Failed as i32,
|
||||||
|
PS::Skipped => PointerStatus::Skipped as i32,
|
||||||
|
PS::Compensated | PS::Cancelled => PointerStatus::Cancelled as i32,
|
||||||
|
};
|
||||||
|
ExecutionPointer {
|
||||||
|
id: p.id.clone(),
|
||||||
|
step_id: p.step_id as u32,
|
||||||
|
step_name: p.step_name.clone().unwrap_or_default(),
|
||||||
|
status,
|
||||||
|
start_time: p.start_time.as_ref().map(datetime_to_timestamp),
|
||||||
|
end_time: p.end_time.as_ref().map(datetime_to_timestamp),
|
||||||
|
retry_count: p.retry_count,
|
||||||
|
active: p.active,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn struct_to_json_roundtrip() {
|
||||||
|
let original = serde_json::json!({
|
||||||
|
"name": "test",
|
||||||
|
"count": 42.0,
|
||||||
|
"active": true,
|
||||||
|
"tags": ["a", "b"],
|
||||||
|
"nested": { "key": "value" }
|
||||||
|
});
|
||||||
|
let proto_struct = json_to_struct(&original);
|
||||||
|
let back = struct_to_json(proto_struct);
|
||||||
|
assert_eq!(original, back);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn json_null_roundtrip() {
|
||||||
|
let v = serde_json::Value::Null;
|
||||||
|
let pv = json_to_prost_value(&v);
|
||||||
|
let back = prost_value_to_json(pv);
|
||||||
|
assert_eq!(back, serde_json::Value::Null);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn json_string_roundtrip() {
|
||||||
|
let v = serde_json::Value::String("hello".to_string());
|
||||||
|
let pv = json_to_prost_value(&v);
|
||||||
|
let back = prost_value_to_json(pv);
|
||||||
|
assert_eq!(back, v);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn json_bool_roundtrip() {
|
||||||
|
let v = serde_json::Value::Bool(true);
|
||||||
|
let pv = json_to_prost_value(&v);
|
||||||
|
let back = prost_value_to_json(pv);
|
||||||
|
assert_eq!(back, v);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn json_number_roundtrip() {
|
||||||
|
let v = serde_json::json!(3.14);
|
||||||
|
let pv = json_to_prost_value(&v);
|
||||||
|
let back = prost_value_to_json(pv);
|
||||||
|
assert_eq!(back, v);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn json_array_roundtrip() {
|
||||||
|
let v = serde_json::json!(["a", 1.0, true, null]);
|
||||||
|
let pv = json_to_prost_value(&v);
|
||||||
|
let back = prost_value_to_json(pv);
|
||||||
|
assert_eq!(back, v);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn empty_struct_roundtrip() {
|
||||||
|
let v = serde_json::json!({});
|
||||||
|
let proto_struct = json_to_struct(&v);
|
||||||
|
let back = struct_to_json(proto_struct);
|
||||||
|
assert_eq!(back, v);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn prost_value_none_kind() {
|
||||||
|
let v = prost_types::Value { kind: None };
|
||||||
|
assert_eq!(prost_value_to_json(v), serde_json::Value::Null);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn json_to_struct_from_non_object() {
|
||||||
|
let v = serde_json::json!("not an object");
|
||||||
|
let s = json_to_struct(&v);
|
||||||
|
assert!(s.fields.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn datetime_to_timestamp_conversion() {
|
||||||
|
let dt = chrono::DateTime::parse_from_rfc3339("2026-03-29T12:00:00Z")
|
||||||
|
.unwrap()
|
||||||
|
.with_timezone(&chrono::Utc);
|
||||||
|
let ts = datetime_to_timestamp(&dt);
|
||||||
|
assert_eq!(ts.seconds, dt.timestamp());
|
||||||
|
assert_eq!(ts.nanos, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn workflow_status_mapping() {
|
||||||
|
use wfe_core::models::{WorkflowInstance as WI, WorkflowStatus as WS};
|
||||||
|
let mut w = WI::new("test", 1, serde_json::json!({}));
|
||||||
|
|
||||||
|
w.status = WS::Runnable;
|
||||||
|
let p = workflow_to_proto(&w);
|
||||||
|
assert_eq!(p.status, WorkflowStatus::Runnable as i32);
|
||||||
|
|
||||||
|
w.status = WS::Complete;
|
||||||
|
let p = workflow_to_proto(&w);
|
||||||
|
assert_eq!(p.status, WorkflowStatus::Complete as i32);
|
||||||
|
|
||||||
|
w.status = WS::Suspended;
|
||||||
|
let p = workflow_to_proto(&w);
|
||||||
|
assert_eq!(p.status, WorkflowStatus::Suspended as i32);
|
||||||
|
|
||||||
|
w.status = WS::Terminated;
|
||||||
|
let p = workflow_to_proto(&w);
|
||||||
|
assert_eq!(p.status, WorkflowStatus::Terminated as i32);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn pointer_status_mapping() {
|
||||||
|
use wfe_core::models::{ExecutionPointer as EP, PointerStatus as PS};
|
||||||
|
let mut p = EP::new(0);
|
||||||
|
|
||||||
|
p.status = PS::Pending;
|
||||||
|
assert_eq!(pointer_to_proto(&p).status, PointerStatus::Pending as i32);
|
||||||
|
|
||||||
|
p.status = PS::Running;
|
||||||
|
assert_eq!(pointer_to_proto(&p).status, PointerStatus::Running as i32);
|
||||||
|
|
||||||
|
p.status = PS::Complete;
|
||||||
|
assert_eq!(pointer_to_proto(&p).status, PointerStatus::Complete as i32);
|
||||||
|
|
||||||
|
p.status = PS::Sleeping;
|
||||||
|
assert_eq!(pointer_to_proto(&p).status, PointerStatus::Sleeping as i32);
|
||||||
|
|
||||||
|
p.status = PS::WaitingForEvent;
|
||||||
|
assert_eq!(pointer_to_proto(&p).status, PointerStatus::WaitingForEvent as i32);
|
||||||
|
|
||||||
|
p.status = PS::Failed;
|
||||||
|
assert_eq!(pointer_to_proto(&p).status, PointerStatus::Failed as i32);
|
||||||
|
|
||||||
|
p.status = PS::Skipped;
|
||||||
|
assert_eq!(pointer_to_proto(&p).status, PointerStatus::Skipped as i32);
|
||||||
|
|
||||||
|
p.status = PS::Cancelled;
|
||||||
|
assert_eq!(pointer_to_proto(&p).status, PointerStatus::Cancelled as i32);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn workflow_to_proto_basic() {
|
||||||
|
let w = wfe_core::models::WorkflowInstance::new("my-wf", 1, serde_json::json!({"key": "val"}));
|
||||||
|
let p = workflow_to_proto(&w);
|
||||||
|
assert_eq!(p.definition_id, "my-wf");
|
||||||
|
assert_eq!(p.version, 1);
|
||||||
|
assert!(p.create_time.is_some());
|
||||||
|
assert!(p.complete_time.is_none());
|
||||||
|
let data = struct_to_json(p.data.unwrap());
|
||||||
|
assert_eq!(data["key"], "val");
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── gRPC integration tests with real WorkflowHost ────────────────
|
||||||
|
|
||||||
|
async fn make_test_service() -> WfeService {
|
||||||
|
use wfe::WorkflowHostBuilder;
|
||||||
|
use wfe_core::test_support::{
|
||||||
|
InMemoryLockProvider, InMemoryPersistenceProvider, InMemoryQueueProvider,
|
||||||
|
};
|
||||||
|
|
||||||
|
let host = WorkflowHostBuilder::new()
|
||||||
|
.use_persistence(std::sync::Arc::new(InMemoryPersistenceProvider::new())
|
||||||
|
as std::sync::Arc<dyn wfe_core::traits::PersistenceProvider>)
|
||||||
|
.use_lock_provider(std::sync::Arc::new(InMemoryLockProvider::new())
|
||||||
|
as std::sync::Arc<dyn wfe_core::traits::DistributedLockProvider>)
|
||||||
|
.use_queue_provider(std::sync::Arc::new(InMemoryQueueProvider::new())
|
||||||
|
as std::sync::Arc<dyn wfe_core::traits::QueueProvider>)
|
||||||
|
.build()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
host.start().await.unwrap();
|
||||||
|
|
||||||
|
let lifecycle_bus = std::sync::Arc::new(crate::lifecycle_bus::BroadcastLifecyclePublisher::new(64));
|
||||||
|
let log_store = std::sync::Arc::new(crate::log_store::LogStore::new());
|
||||||
|
|
||||||
|
WfeService::new(std::sync::Arc::new(host), lifecycle_bus, log_store)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn rpc_register_and_start_workflow() {
|
||||||
|
let svc = make_test_service().await;
|
||||||
|
|
||||||
|
// Register a workflow.
|
||||||
|
let req = Request::new(RegisterWorkflowRequest {
|
||||||
|
yaml: r#"
|
||||||
|
workflow:
|
||||||
|
id: test-wf
|
||||||
|
version: 1
|
||||||
|
steps:
|
||||||
|
- name: hello
|
||||||
|
type: shell
|
||||||
|
config:
|
||||||
|
run: echo hi
|
||||||
|
"#.to_string(),
|
||||||
|
config: Default::default(),
|
||||||
|
});
|
||||||
|
let resp = svc.register_workflow(req).await.unwrap().into_inner();
|
||||||
|
assert_eq!(resp.definitions.len(), 1);
|
||||||
|
assert_eq!(resp.definitions[0].definition_id, "test-wf");
|
||||||
|
assert_eq!(resp.definitions[0].version, 1);
|
||||||
|
assert_eq!(resp.definitions[0].step_count, 1);
|
||||||
|
|
||||||
|
// Start the workflow.
|
||||||
|
let req = Request::new(StartWorkflowRequest {
|
||||||
|
definition_id: "test-wf".to_string(),
|
||||||
|
version: 1,
|
||||||
|
data: None,
|
||||||
|
});
|
||||||
|
let resp = svc.start_workflow(req).await.unwrap().into_inner();
|
||||||
|
assert!(!resp.workflow_id.is_empty());
|
||||||
|
|
||||||
|
// Get the workflow.
|
||||||
|
let req = Request::new(GetWorkflowRequest {
|
||||||
|
workflow_id: resp.workflow_id.clone(),
|
||||||
|
});
|
||||||
|
let resp = svc.get_workflow(req).await.unwrap().into_inner();
|
||||||
|
let instance = resp.instance.unwrap();
|
||||||
|
assert_eq!(instance.definition_id, "test-wf");
|
||||||
|
assert_eq!(instance.status, WorkflowStatus::Runnable as i32);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn rpc_register_invalid_yaml() {
|
||||||
|
let svc = make_test_service().await;
|
||||||
|
let req = Request::new(RegisterWorkflowRequest {
|
||||||
|
yaml: "not: valid: yaml: {{{}}}".to_string(),
|
||||||
|
config: Default::default(),
|
||||||
|
});
|
||||||
|
let err = svc.register_workflow(req).await.unwrap_err();
|
||||||
|
assert_eq!(err.code(), tonic::Code::InvalidArgument);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn rpc_start_nonexistent_workflow() {
|
||||||
|
let svc = make_test_service().await;
|
||||||
|
let req = Request::new(StartWorkflowRequest {
|
||||||
|
definition_id: "nonexistent".to_string(),
|
||||||
|
version: 1,
|
||||||
|
data: None,
|
||||||
|
});
|
||||||
|
let err = svc.start_workflow(req).await.unwrap_err();
|
||||||
|
assert_eq!(err.code(), tonic::Code::Internal);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn rpc_get_nonexistent_workflow() {
|
||||||
|
let svc = make_test_service().await;
|
||||||
|
let req = Request::new(GetWorkflowRequest {
|
||||||
|
workflow_id: "nonexistent".to_string(),
|
||||||
|
});
|
||||||
|
let err = svc.get_workflow(req).await.unwrap_err();
|
||||||
|
assert_eq!(err.code(), tonic::Code::NotFound);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn rpc_cancel_workflow() {
|
||||||
|
let svc = make_test_service().await;
|
||||||
|
|
||||||
|
// Register + start.
|
||||||
|
let req = Request::new(RegisterWorkflowRequest {
|
||||||
|
yaml: "workflow:\n id: cancel-test\n version: 1\n steps:\n - name: s\n type: shell\n config:\n run: echo ok\n".to_string(),
|
||||||
|
config: Default::default(),
|
||||||
|
});
|
||||||
|
svc.register_workflow(req).await.unwrap();
|
||||||
|
|
||||||
|
let req = Request::new(StartWorkflowRequest {
|
||||||
|
definition_id: "cancel-test".to_string(),
|
||||||
|
version: 1,
|
||||||
|
data: None,
|
||||||
|
});
|
||||||
|
let wf_id = svc.start_workflow(req).await.unwrap().into_inner().workflow_id;
|
||||||
|
|
||||||
|
// Cancel it.
|
||||||
|
let req = Request::new(CancelWorkflowRequest { workflow_id: wf_id.clone() });
|
||||||
|
svc.cancel_workflow(req).await.unwrap();
|
||||||
|
|
||||||
|
// Verify it's terminated.
|
||||||
|
let req = Request::new(GetWorkflowRequest { workflow_id: wf_id });
|
||||||
|
let instance = svc.get_workflow(req).await.unwrap().into_inner().instance.unwrap();
|
||||||
|
assert_eq!(instance.status, WorkflowStatus::Terminated as i32);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn rpc_suspend_resume_workflow() {
|
||||||
|
let svc = make_test_service().await;
|
||||||
|
|
||||||
|
let req = Request::new(RegisterWorkflowRequest {
|
||||||
|
yaml: "workflow:\n id: sr-test\n version: 1\n steps:\n - name: s\n type: shell\n config:\n run: echo ok\n".to_string(),
|
||||||
|
config: Default::default(),
|
||||||
|
});
|
||||||
|
svc.register_workflow(req).await.unwrap();
|
||||||
|
|
||||||
|
let req = Request::new(StartWorkflowRequest {
|
||||||
|
definition_id: "sr-test".to_string(),
|
||||||
|
version: 1,
|
||||||
|
data: None,
|
||||||
|
});
|
||||||
|
let wf_id = svc.start_workflow(req).await.unwrap().into_inner().workflow_id;
|
||||||
|
|
||||||
|
// Suspend.
|
||||||
|
let req = Request::new(SuspendWorkflowRequest { workflow_id: wf_id.clone() });
|
||||||
|
svc.suspend_workflow(req).await.unwrap();
|
||||||
|
|
||||||
|
let req = Request::new(GetWorkflowRequest { workflow_id: wf_id.clone() });
|
||||||
|
let instance = svc.get_workflow(req).await.unwrap().into_inner().instance.unwrap();
|
||||||
|
assert_eq!(instance.status, WorkflowStatus::Suspended as i32);
|
||||||
|
|
||||||
|
// Resume.
|
||||||
|
let req = Request::new(ResumeWorkflowRequest { workflow_id: wf_id.clone() });
|
||||||
|
svc.resume_workflow(req).await.unwrap();
|
||||||
|
|
||||||
|
let req = Request::new(GetWorkflowRequest { workflow_id: wf_id });
|
||||||
|
let instance = svc.get_workflow(req).await.unwrap().into_inner().instance.unwrap();
|
||||||
|
assert_eq!(instance.status, WorkflowStatus::Runnable as i32);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn rpc_publish_event() {
|
||||||
|
let svc = make_test_service().await;
|
||||||
|
let req = Request::new(PublishEventRequest {
|
||||||
|
event_name: "test.event".to_string(),
|
||||||
|
event_key: "key-1".to_string(),
|
||||||
|
data: None,
|
||||||
|
});
|
||||||
|
// Should succeed even with no waiting workflows.
|
||||||
|
svc.publish_event(req).await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn rpc_search_logs_not_configured() {
|
||||||
|
let svc = make_test_service().await;
|
||||||
|
let req = Request::new(SearchLogsRequest {
|
||||||
|
query: "test".to_string(),
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
let err = svc.search_logs(req).await.unwrap_err();
|
||||||
|
assert_eq!(err.code(), tonic::Code::Unavailable);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn rpc_list_definitions_empty() {
|
||||||
|
let svc = make_test_service().await;
|
||||||
|
let req = Request::new(ListDefinitionsRequest {});
|
||||||
|
let resp = svc.list_definitions(req).await.unwrap().into_inner();
|
||||||
|
assert!(resp.definitions.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn rpc_search_workflows_empty() {
|
||||||
|
let svc = make_test_service().await;
|
||||||
|
let req = Request::new(SearchWorkflowsRequest {
|
||||||
|
query: "test".to_string(),
|
||||||
|
..Default::default()
|
||||||
|
});
|
||||||
|
let resp = svc.search_workflows(req).await.unwrap().into_inner();
|
||||||
|
assert_eq!(resp.total, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
125
wfe-server/src/lifecycle_bus.rs
Normal file
125
wfe-server/src/lifecycle_bus.rs
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
use async_trait::async_trait;
|
||||||
|
use tokio::sync::broadcast;
|
||||||
|
use wfe_core::models::LifecycleEvent;
|
||||||
|
use wfe_core::traits::LifecyclePublisher;
|
||||||
|
|
||||||
|
/// Broadcasts lifecycle events to multiple subscribers via tokio broadcast channels.
|
||||||
|
pub struct BroadcastLifecyclePublisher {
|
||||||
|
sender: broadcast::Sender<LifecycleEvent>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BroadcastLifecyclePublisher {
|
||||||
|
pub fn new(capacity: usize) -> Self {
|
||||||
|
let (sender, _) = broadcast::channel(capacity);
|
||||||
|
Self { sender }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn subscribe(&self) -> broadcast::Receiver<LifecycleEvent> {
|
||||||
|
self.sender.subscribe()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl LifecyclePublisher for BroadcastLifecyclePublisher {
|
||||||
|
async fn publish(&self, event: LifecycleEvent) -> wfe_core::Result<()> {
|
||||||
|
// Ignore send errors (no active subscribers).
|
||||||
|
let _ = self.sender.send(event);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use wfe_core::models::LifecycleEventType;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn publish_and_receive() {
|
||||||
|
let bus = BroadcastLifecyclePublisher::new(16);
|
||||||
|
let mut rx = bus.subscribe();
|
||||||
|
|
||||||
|
let event = LifecycleEvent::new("wf-1", "def-1", 1, LifecycleEventType::Started);
|
||||||
|
bus.publish(event.clone()).await.unwrap();
|
||||||
|
|
||||||
|
let received = rx.recv().await.unwrap();
|
||||||
|
assert_eq!(received.workflow_instance_id, "wf-1");
|
||||||
|
assert_eq!(received.event_type, LifecycleEventType::Started);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn multiple_subscribers() {
|
||||||
|
let bus = BroadcastLifecyclePublisher::new(16);
|
||||||
|
let mut rx1 = bus.subscribe();
|
||||||
|
let mut rx2 = bus.subscribe();
|
||||||
|
|
||||||
|
bus.publish(LifecycleEvent::new("wf-1", "def-1", 1, LifecycleEventType::Completed))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let e1 = rx1.recv().await.unwrap();
|
||||||
|
let e2 = rx2.recv().await.unwrap();
|
||||||
|
assert_eq!(e1.event_type, LifecycleEventType::Completed);
|
||||||
|
assert_eq!(e2.event_type, LifecycleEventType::Completed);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn no_subscribers_does_not_error() {
|
||||||
|
let bus = BroadcastLifecyclePublisher::new(16);
|
||||||
|
// No subscribers — should not panic.
|
||||||
|
bus.publish(LifecycleEvent::new("wf-1", "def-1", 1, LifecycleEventType::Started))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn step_events_propagate() {
|
||||||
|
let bus = BroadcastLifecyclePublisher::new(16);
|
||||||
|
let mut rx = bus.subscribe();
|
||||||
|
|
||||||
|
bus.publish(LifecycleEvent::new(
|
||||||
|
"wf-1",
|
||||||
|
"def-1",
|
||||||
|
1,
|
||||||
|
LifecycleEventType::StepStarted {
|
||||||
|
step_id: 3,
|
||||||
|
step_name: Some("build".to_string()),
|
||||||
|
},
|
||||||
|
))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let received = rx.recv().await.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
received.event_type,
|
||||||
|
LifecycleEventType::StepStarted {
|
||||||
|
step_id: 3,
|
||||||
|
step_name: Some("build".to_string()),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn error_events_include_message() {
|
||||||
|
let bus = BroadcastLifecyclePublisher::new(16);
|
||||||
|
let mut rx = bus.subscribe();
|
||||||
|
|
||||||
|
bus.publish(LifecycleEvent::new(
|
||||||
|
"wf-1",
|
||||||
|
"def-1",
|
||||||
|
1,
|
||||||
|
LifecycleEventType::Error {
|
||||||
|
message: "step failed".to_string(),
|
||||||
|
},
|
||||||
|
))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let received = rx.recv().await.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
received.event_type,
|
||||||
|
LifecycleEventType::Error {
|
||||||
|
message: "step failed".to_string(),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
529
wfe-server/src/log_search.rs
Normal file
529
wfe-server/src/log_search.rs
Normal file
@@ -0,0 +1,529 @@
|
|||||||
|
use chrono::{DateTime, Utc};
|
||||||
|
use opensearch::http::transport::Transport;
|
||||||
|
use opensearch::{IndexParts, OpenSearch, SearchParts};
|
||||||
|
use serde::{Deserialize, Serialize};
|
||||||
|
use serde_json::json;
|
||||||
|
use wfe_core::traits::{LogChunk, LogStreamType};
|
||||||
|
|
||||||
|
const LOG_INDEX: &str = "wfe-build-logs";
|
||||||
|
|
||||||
|
/// Document structure for a log line stored in OpenSearch.
|
||||||
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
|
struct LogDocument {
|
||||||
|
workflow_id: String,
|
||||||
|
definition_id: String,
|
||||||
|
step_id: usize,
|
||||||
|
step_name: String,
|
||||||
|
stream: String,
|
||||||
|
line: String,
|
||||||
|
timestamp: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LogDocument {
|
||||||
|
fn from_chunk(chunk: &LogChunk) -> Self {
|
||||||
|
Self {
|
||||||
|
workflow_id: chunk.workflow_id.clone(),
|
||||||
|
definition_id: chunk.definition_id.clone(),
|
||||||
|
step_id: chunk.step_id,
|
||||||
|
step_name: chunk.step_name.clone(),
|
||||||
|
stream: match chunk.stream {
|
||||||
|
LogStreamType::Stdout => "stdout".to_string(),
|
||||||
|
LogStreamType::Stderr => "stderr".to_string(),
|
||||||
|
},
|
||||||
|
line: String::from_utf8_lossy(&chunk.data).trim_end().to_string(),
|
||||||
|
timestamp: chunk.timestamp.to_rfc3339(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Result from a log search query.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct LogSearchHit {
|
||||||
|
pub workflow_id: String,
|
||||||
|
pub definition_id: String,
|
||||||
|
pub step_name: String,
|
||||||
|
pub line: String,
|
||||||
|
pub stream: String,
|
||||||
|
pub timestamp: DateTime<Utc>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// OpenSearch-backed log search index.
|
||||||
|
pub struct LogSearchIndex {
|
||||||
|
client: OpenSearch,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LogSearchIndex {
|
||||||
|
pub fn new(url: &str) -> wfe_core::Result<Self> {
|
||||||
|
let transport = Transport::single_node(url)
|
||||||
|
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)?;
|
||||||
|
Ok(Self {
|
||||||
|
client: OpenSearch::new(transport),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create the log index if it doesn't exist.
|
||||||
|
pub async fn ensure_index(&self) -> wfe_core::Result<()> {
|
||||||
|
let exists = self
|
||||||
|
.client
|
||||||
|
.indices()
|
||||||
|
.exists(opensearch::indices::IndicesExistsParts::Index(&[LOG_INDEX]))
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)?;
|
||||||
|
|
||||||
|
if exists.status_code().is_success() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let body = json!({
|
||||||
|
"mappings": {
|
||||||
|
"properties": {
|
||||||
|
"workflow_id": { "type": "keyword" },
|
||||||
|
"definition_id": { "type": "keyword" },
|
||||||
|
"step_id": { "type": "integer" },
|
||||||
|
"step_name": { "type": "keyword" },
|
||||||
|
"stream": { "type": "keyword" },
|
||||||
|
"line": { "type": "text", "analyzer": "standard" },
|
||||||
|
"timestamp": { "type": "date" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let response = self
|
||||||
|
.client
|
||||||
|
.indices()
|
||||||
|
.create(opensearch::indices::IndicesCreateParts::Index(LOG_INDEX))
|
||||||
|
.body(body)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)?;
|
||||||
|
|
||||||
|
if !response.status_code().is_success() {
|
||||||
|
let text = response.text().await.unwrap_or_default();
|
||||||
|
return Err(wfe_core::WfeError::Persistence(format!(
|
||||||
|
"Failed to create log index: {text}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
tracing::info!(index = LOG_INDEX, "log search index created");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Index a single log chunk.
|
||||||
|
pub async fn index_chunk(&self, chunk: &LogChunk) -> wfe_core::Result<()> {
|
||||||
|
let doc = LogDocument::from_chunk(chunk);
|
||||||
|
let body = serde_json::to_value(&doc)?;
|
||||||
|
|
||||||
|
let response = self
|
||||||
|
.client
|
||||||
|
.index(IndexParts::Index(LOG_INDEX))
|
||||||
|
.body(body)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)?;
|
||||||
|
|
||||||
|
if !response.status_code().is_success() {
|
||||||
|
let text = response.text().await.unwrap_or_default();
|
||||||
|
return Err(wfe_core::WfeError::Persistence(format!(
|
||||||
|
"failed to index log chunk: {text}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Search log lines.
|
||||||
|
pub async fn search(
|
||||||
|
&self,
|
||||||
|
query: &str,
|
||||||
|
workflow_id: Option<&str>,
|
||||||
|
step_name: Option<&str>,
|
||||||
|
stream_filter: Option<&str>,
|
||||||
|
skip: u64,
|
||||||
|
take: u64,
|
||||||
|
) -> wfe_core::Result<(Vec<LogSearchHit>, u64)> {
|
||||||
|
let mut must_clauses = Vec::new();
|
||||||
|
let mut filter_clauses = Vec::new();
|
||||||
|
|
||||||
|
if !query.is_empty() {
|
||||||
|
must_clauses.push(json!({
|
||||||
|
"match": { "line": query }
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(wf_id) = workflow_id {
|
||||||
|
filter_clauses.push(json!({ "term": { "workflow_id": wf_id } }));
|
||||||
|
}
|
||||||
|
if let Some(sn) = step_name {
|
||||||
|
filter_clauses.push(json!({ "term": { "step_name": sn } }));
|
||||||
|
}
|
||||||
|
if let Some(stream) = stream_filter {
|
||||||
|
filter_clauses.push(json!({ "term": { "stream": stream } }));
|
||||||
|
}
|
||||||
|
|
||||||
|
let query_body = if must_clauses.is_empty() && filter_clauses.is_empty() {
|
||||||
|
json!({ "match_all": {} })
|
||||||
|
} else {
|
||||||
|
let mut bool_q = serde_json::Map::new();
|
||||||
|
if !must_clauses.is_empty() {
|
||||||
|
bool_q.insert("must".to_string(), json!(must_clauses));
|
||||||
|
}
|
||||||
|
if !filter_clauses.is_empty() {
|
||||||
|
bool_q.insert("filter".to_string(), json!(filter_clauses));
|
||||||
|
}
|
||||||
|
json!({ "bool": bool_q })
|
||||||
|
};
|
||||||
|
|
||||||
|
let body = json!({
|
||||||
|
"query": query_body,
|
||||||
|
"from": skip,
|
||||||
|
"size": take,
|
||||||
|
"sort": [{ "timestamp": "asc" }]
|
||||||
|
});
|
||||||
|
|
||||||
|
let response = self
|
||||||
|
.client
|
||||||
|
.search(SearchParts::Index(&[LOG_INDEX]))
|
||||||
|
.body(body)
|
||||||
|
.send()
|
||||||
|
.await
|
||||||
|
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)?;
|
||||||
|
|
||||||
|
if !response.status_code().is_success() {
|
||||||
|
let text = response.text().await.unwrap_or_default();
|
||||||
|
return Err(wfe_core::WfeError::Persistence(format!(
|
||||||
|
"Log search failed: {text}"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp_body: serde_json::Value = response
|
||||||
|
.json()
|
||||||
|
.await
|
||||||
|
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)?;
|
||||||
|
|
||||||
|
let total = resp_body["hits"]["total"]["value"].as_u64().unwrap_or(0);
|
||||||
|
let hits = resp_body["hits"]["hits"]
|
||||||
|
.as_array()
|
||||||
|
.cloned()
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
let results = hits
|
||||||
|
.iter()
|
||||||
|
.filter_map(|hit| {
|
||||||
|
let src = &hit["_source"];
|
||||||
|
Some(LogSearchHit {
|
||||||
|
workflow_id: src["workflow_id"].as_str()?.to_string(),
|
||||||
|
definition_id: src["definition_id"].as_str()?.to_string(),
|
||||||
|
step_name: src["step_name"].as_str()?.to_string(),
|
||||||
|
line: src["line"].as_str()?.to_string(),
|
||||||
|
stream: src["stream"].as_str()?.to_string(),
|
||||||
|
timestamp: src["timestamp"]
|
||||||
|
.as_str()
|
||||||
|
.and_then(|s| DateTime::parse_from_rfc3339(s).ok())
|
||||||
|
.map(|dt| dt.with_timezone(&Utc))
|
||||||
|
.unwrap_or_else(Utc::now),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Ok((results, total))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn log_document_from_chunk_stdout() {
|
||||||
|
let chunk = LogChunk {
|
||||||
|
workflow_id: "wf-1".to_string(),
|
||||||
|
definition_id: "ci".to_string(),
|
||||||
|
step_id: 0,
|
||||||
|
step_name: "build".to_string(),
|
||||||
|
stream: LogStreamType::Stdout,
|
||||||
|
data: b"compiling wfe-core\n".to_vec(),
|
||||||
|
timestamp: Utc::now(),
|
||||||
|
};
|
||||||
|
let doc = LogDocument::from_chunk(&chunk);
|
||||||
|
assert_eq!(doc.workflow_id, "wf-1");
|
||||||
|
assert_eq!(doc.stream, "stdout");
|
||||||
|
assert_eq!(doc.line, "compiling wfe-core");
|
||||||
|
assert_eq!(doc.step_name, "build");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn log_document_from_chunk_stderr() {
|
||||||
|
let chunk = LogChunk {
|
||||||
|
workflow_id: "wf-2".to_string(),
|
||||||
|
definition_id: "deploy".to_string(),
|
||||||
|
step_id: 1,
|
||||||
|
step_name: "test".to_string(),
|
||||||
|
stream: LogStreamType::Stderr,
|
||||||
|
data: b"warning: unused variable\n".to_vec(),
|
||||||
|
timestamp: Utc::now(),
|
||||||
|
};
|
||||||
|
let doc = LogDocument::from_chunk(&chunk);
|
||||||
|
assert_eq!(doc.stream, "stderr");
|
||||||
|
assert_eq!(doc.line, "warning: unused variable");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn log_document_trims_trailing_newline() {
|
||||||
|
let chunk = LogChunk {
|
||||||
|
workflow_id: "wf-1".to_string(),
|
||||||
|
definition_id: "ci".to_string(),
|
||||||
|
step_id: 0,
|
||||||
|
step_name: "build".to_string(),
|
||||||
|
stream: LogStreamType::Stdout,
|
||||||
|
data: b"line with newline\n".to_vec(),
|
||||||
|
timestamp: Utc::now(),
|
||||||
|
};
|
||||||
|
let doc = LogDocument::from_chunk(&chunk);
|
||||||
|
assert_eq!(doc.line, "line with newline");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn log_document_serializes_to_json() {
|
||||||
|
let chunk = LogChunk {
|
||||||
|
workflow_id: "wf-1".to_string(),
|
||||||
|
definition_id: "ci".to_string(),
|
||||||
|
step_id: 2,
|
||||||
|
step_name: "clippy".to_string(),
|
||||||
|
stream: LogStreamType::Stdout,
|
||||||
|
data: b"all good\n".to_vec(),
|
||||||
|
timestamp: Utc::now(),
|
||||||
|
};
|
||||||
|
let doc = LogDocument::from_chunk(&chunk);
|
||||||
|
let json = serde_json::to_value(&doc).unwrap();
|
||||||
|
assert_eq!(json["step_name"], "clippy");
|
||||||
|
assert_eq!(json["step_id"], 2);
|
||||||
|
assert!(json["timestamp"].is_string());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── OpenSearch integration tests ────────────────────────────────
|
||||||
|
|
||||||
|
fn opensearch_url() -> Option<String> {
|
||||||
|
let url = std::env::var("WFE_SEARCH_URL")
|
||||||
|
.unwrap_or_else(|_| "http://localhost:9200".to_string());
|
||||||
|
// Quick TCP probe to check if OpenSearch is reachable.
|
||||||
|
let addr = url
|
||||||
|
.strip_prefix("http://")
|
||||||
|
.or_else(|| url.strip_prefix("https://"))
|
||||||
|
.unwrap_or("localhost:9200");
|
||||||
|
match std::net::TcpStream::connect_timeout(
|
||||||
|
&addr.parse().ok()?,
|
||||||
|
std::time::Duration::from_secs(1),
|
||||||
|
) {
|
||||||
|
Ok(_) => Some(url),
|
||||||
|
Err(_) => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_test_chunk(
|
||||||
|
workflow_id: &str,
|
||||||
|
step_name: &str,
|
||||||
|
stream: LogStreamType,
|
||||||
|
line: &str,
|
||||||
|
) -> LogChunk {
|
||||||
|
LogChunk {
|
||||||
|
workflow_id: workflow_id.to_string(),
|
||||||
|
definition_id: "test-def".to_string(),
|
||||||
|
step_id: 0,
|
||||||
|
step_name: step_name.to_string(),
|
||||||
|
stream,
|
||||||
|
data: format!("{line}\n").into_bytes(),
|
||||||
|
timestamp: Utc::now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Delete the test index to start clean.
|
||||||
|
async fn cleanup_index(url: &str) {
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
let _ = client
|
||||||
|
.delete(format!("{url}/{LOG_INDEX}"))
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn opensearch_ensure_index_creates_index() {
|
||||||
|
let Some(url) = opensearch_url() else {
|
||||||
|
eprintln!("SKIP: OpenSearch not available");
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
cleanup_index(&url).await;
|
||||||
|
|
||||||
|
let index = LogSearchIndex::new(&url).unwrap();
|
||||||
|
index.ensure_index().await.unwrap();
|
||||||
|
|
||||||
|
// Calling again should be idempotent.
|
||||||
|
index.ensure_index().await.unwrap();
|
||||||
|
|
||||||
|
cleanup_index(&url).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn opensearch_index_and_search_chunk() {
|
||||||
|
let Some(url) = opensearch_url() else {
|
||||||
|
eprintln!("SKIP: OpenSearch not available");
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
cleanup_index(&url).await;
|
||||||
|
|
||||||
|
let index = LogSearchIndex::new(&url).unwrap();
|
||||||
|
index.ensure_index().await.unwrap();
|
||||||
|
|
||||||
|
// Index some log chunks.
|
||||||
|
let chunk = make_test_chunk("wf-search-1", "build", LogStreamType::Stdout, "compiling wfe-core v1.5.0");
|
||||||
|
index.index_chunk(&chunk).await.unwrap();
|
||||||
|
|
||||||
|
let chunk = make_test_chunk("wf-search-1", "build", LogStreamType::Stderr, "warning: unused variable");
|
||||||
|
index.index_chunk(&chunk).await.unwrap();
|
||||||
|
|
||||||
|
let chunk = make_test_chunk("wf-search-1", "test", LogStreamType::Stdout, "test result: ok. 79 passed");
|
||||||
|
index.index_chunk(&chunk).await.unwrap();
|
||||||
|
|
||||||
|
// OpenSearch needs a refresh to make docs searchable.
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
client.post(format!("{url}/{LOG_INDEX}/_refresh")).send().await.unwrap();
|
||||||
|
|
||||||
|
// Search by text.
|
||||||
|
let (results, total) = index
|
||||||
|
.search("wfe-core", None, None, None, 0, 10)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(total >= 1, "expected at least 1 hit, got {total}");
|
||||||
|
assert!(results.iter().any(|r| r.line.contains("wfe-core")));
|
||||||
|
|
||||||
|
// Search by workflow_id filter.
|
||||||
|
let (results, _) = index
|
||||||
|
.search("", Some("wf-search-1"), None, None, 0, 10)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(results.len(), 3);
|
||||||
|
|
||||||
|
// Search by step_name filter.
|
||||||
|
let (results, _) = index
|
||||||
|
.search("", Some("wf-search-1"), Some("test"), None, 0, 10)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(results.len(), 1);
|
||||||
|
assert!(results[0].line.contains("79 passed"));
|
||||||
|
|
||||||
|
// Search by stream filter.
|
||||||
|
let (results, _) = index
|
||||||
|
.search("", Some("wf-search-1"), None, Some("stderr"), 0, 10)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(results.len(), 1);
|
||||||
|
assert!(results[0].line.contains("unused variable"));
|
||||||
|
|
||||||
|
cleanup_index(&url).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn opensearch_search_empty_index() {
|
||||||
|
let Some(url) = opensearch_url() else {
|
||||||
|
eprintln!("SKIP: OpenSearch not available");
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
cleanup_index(&url).await;
|
||||||
|
|
||||||
|
let index = LogSearchIndex::new(&url).unwrap();
|
||||||
|
index.ensure_index().await.unwrap();
|
||||||
|
|
||||||
|
let (results, total) = index
|
||||||
|
.search("nonexistent", None, None, None, 0, 10)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(total, 0);
|
||||||
|
assert!(results.is_empty());
|
||||||
|
|
||||||
|
cleanup_index(&url).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn opensearch_search_pagination() {
|
||||||
|
let Some(url) = opensearch_url() else {
|
||||||
|
eprintln!("SKIP: OpenSearch not available");
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
cleanup_index(&url).await;
|
||||||
|
|
||||||
|
let index = LogSearchIndex::new(&url).unwrap();
|
||||||
|
index.ensure_index().await.unwrap();
|
||||||
|
|
||||||
|
// Index 5 chunks.
|
||||||
|
for i in 0..5 {
|
||||||
|
let chunk = make_test_chunk("wf-page", "build", LogStreamType::Stdout, &format!("line {i}"));
|
||||||
|
index.index_chunk(&chunk).await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
client.post(format!("{url}/{LOG_INDEX}/_refresh")).send().await.unwrap();
|
||||||
|
|
||||||
|
// Get first 2.
|
||||||
|
let (results, total) = index
|
||||||
|
.search("", Some("wf-page"), None, None, 0, 2)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(total, 5);
|
||||||
|
assert_eq!(results.len(), 2);
|
||||||
|
|
||||||
|
// Get next 2.
|
||||||
|
let (results, _) = index
|
||||||
|
.search("", Some("wf-page"), None, None, 2, 2)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(results.len(), 2);
|
||||||
|
|
||||||
|
// Get last 1.
|
||||||
|
let (results, _) = index
|
||||||
|
.search("", Some("wf-page"), None, None, 4, 2)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(results.len(), 1);
|
||||||
|
|
||||||
|
cleanup_index(&url).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn log_search_index_new_constructs_ok() {
|
||||||
|
// Construction should succeed even for unreachable URLs (fails on first use).
|
||||||
|
let result = LogSearchIndex::new("http://localhost:19876");
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn opensearch_index_chunk_result_fields() {
|
||||||
|
let Some(url) = opensearch_url() else {
|
||||||
|
eprintln!("SKIP: OpenSearch not available");
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
cleanup_index(&url).await;
|
||||||
|
|
||||||
|
let index = LogSearchIndex::new(&url).unwrap();
|
||||||
|
index.ensure_index().await.unwrap();
|
||||||
|
|
||||||
|
let chunk = make_test_chunk("wf-fields", "clippy", LogStreamType::Stderr, "error: type mismatch");
|
||||||
|
index.index_chunk(&chunk).await.unwrap();
|
||||||
|
|
||||||
|
let client = reqwest::Client::new();
|
||||||
|
client.post(format!("{url}/{LOG_INDEX}/_refresh")).send().await.unwrap();
|
||||||
|
|
||||||
|
let (results, _) = index
|
||||||
|
.search("type mismatch", None, None, None, 0, 10)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(!results.is_empty());
|
||||||
|
let hit = &results[0];
|
||||||
|
assert_eq!(hit.workflow_id, "wf-fields");
|
||||||
|
assert_eq!(hit.definition_id, "test-def");
|
||||||
|
assert_eq!(hit.step_name, "clippy");
|
||||||
|
assert_eq!(hit.stream, "stderr");
|
||||||
|
assert!(hit.line.contains("type mismatch"));
|
||||||
|
|
||||||
|
cleanup_index(&url).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
203
wfe-server/src/log_store.rs
Normal file
203
wfe-server/src/log_store.rs
Normal file
@@ -0,0 +1,203 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use dashmap::DashMap;
|
||||||
|
use tokio::sync::broadcast;
|
||||||
|
use wfe_core::traits::log_sink::{LogChunk, LogSink};
|
||||||
|
|
||||||
|
/// Stores and broadcasts log chunks for workflow step executions.
|
||||||
|
///
|
||||||
|
/// Three tiers:
|
||||||
|
/// 1. **Live broadcast** — per-workflow broadcast channel for StreamLogs subscribers
|
||||||
|
/// 2. **In-memory history** — append-only buffer per (workflow_id, step_id) for replay
|
||||||
|
/// 3. **Search index** — OpenSearch log indexing via LogSearchIndex (optional)
|
||||||
|
pub struct LogStore {
|
||||||
|
/// Per-workflow broadcast channels for live streaming.
|
||||||
|
live: DashMap<String, broadcast::Sender<LogChunk>>,
|
||||||
|
/// In-memory history per (workflow_id, step_id).
|
||||||
|
history: DashMap<(String, usize), Vec<LogChunk>>,
|
||||||
|
/// Optional search index for log lines.
|
||||||
|
search: Option<Arc<crate::log_search::LogSearchIndex>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LogStore {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
live: DashMap::new(),
|
||||||
|
history: DashMap::new(),
|
||||||
|
search: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn with_search(mut self, index: Arc<crate::log_search::LogSearchIndex>) -> Self {
|
||||||
|
self.search = Some(index);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Subscribe to live log chunks for a workflow.
|
||||||
|
pub fn subscribe(&self, workflow_id: &str) -> broadcast::Receiver<LogChunk> {
|
||||||
|
self.live
|
||||||
|
.entry(workflow_id.to_string())
|
||||||
|
.or_insert_with(|| broadcast::channel(4096).0)
|
||||||
|
.subscribe()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get historical logs for a workflow, optionally filtered by step.
|
||||||
|
pub fn get_history(&self, workflow_id: &str, step_id: Option<usize>) -> Vec<LogChunk> {
|
||||||
|
let mut result = Vec::new();
|
||||||
|
for entry in self.history.iter() {
|
||||||
|
let (wf_id, s_id) = entry.key();
|
||||||
|
if wf_id != workflow_id {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if let Some(filter_step) = step_id {
|
||||||
|
if *s_id != filter_step {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
result.extend(entry.value().iter().cloned());
|
||||||
|
}
|
||||||
|
// Sort by timestamp.
|
||||||
|
result.sort_by_key(|c| c.timestamp);
|
||||||
|
result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl LogSink for LogStore {
|
||||||
|
async fn write_chunk(&self, chunk: LogChunk) {
|
||||||
|
// Store in history.
|
||||||
|
self.history
|
||||||
|
.entry((chunk.workflow_id.clone(), chunk.step_id))
|
||||||
|
.or_default()
|
||||||
|
.push(chunk.clone());
|
||||||
|
|
||||||
|
// Broadcast to live subscribers.
|
||||||
|
let sender = self
|
||||||
|
.live
|
||||||
|
.entry(chunk.workflow_id.clone())
|
||||||
|
.or_insert_with(|| broadcast::channel(4096).0);
|
||||||
|
let _ = sender.send(chunk.clone());
|
||||||
|
|
||||||
|
// Index to OpenSearch (best-effort, don't block on failure).
|
||||||
|
if let Some(ref search) = self.search {
|
||||||
|
if let Err(e) = search.index_chunk(&chunk).await {
|
||||||
|
tracing::warn!(error = %e, "failed to index log chunk to OpenSearch");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use chrono::Utc;
|
||||||
|
use wfe_core::traits::LogStreamType;
|
||||||
|
|
||||||
|
fn make_chunk(workflow_id: &str, step_id: usize, step_name: &str, data: &str) -> LogChunk {
|
||||||
|
LogChunk {
|
||||||
|
workflow_id: workflow_id.to_string(),
|
||||||
|
definition_id: "def-1".to_string(),
|
||||||
|
step_id,
|
||||||
|
step_name: step_name.to_string(),
|
||||||
|
stream: LogStreamType::Stdout,
|
||||||
|
data: data.as_bytes().to_vec(),
|
||||||
|
timestamp: Utc::now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn write_and_read_history() {
|
||||||
|
let store = LogStore::new();
|
||||||
|
store.write_chunk(make_chunk("wf-1", 0, "build", "line 1\n")).await;
|
||||||
|
store.write_chunk(make_chunk("wf-1", 0, "build", "line 2\n")).await;
|
||||||
|
|
||||||
|
let history = store.get_history("wf-1", None);
|
||||||
|
assert_eq!(history.len(), 2);
|
||||||
|
assert_eq!(history[0].data, b"line 1\n");
|
||||||
|
assert_eq!(history[1].data, b"line 2\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn history_filtered_by_step() {
|
||||||
|
let store = LogStore::new();
|
||||||
|
store.write_chunk(make_chunk("wf-1", 0, "build", "build log\n")).await;
|
||||||
|
store.write_chunk(make_chunk("wf-1", 1, "test", "test log\n")).await;
|
||||||
|
|
||||||
|
let build_only = store.get_history("wf-1", Some(0));
|
||||||
|
assert_eq!(build_only.len(), 1);
|
||||||
|
assert_eq!(build_only[0].step_name, "build");
|
||||||
|
|
||||||
|
let test_only = store.get_history("wf-1", Some(1));
|
||||||
|
assert_eq!(test_only.len(), 1);
|
||||||
|
assert_eq!(test_only[0].step_name, "test");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn empty_history_for_unknown_workflow() {
|
||||||
|
let store = LogStore::new();
|
||||||
|
assert!(store.get_history("nonexistent", None).is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn live_broadcast() {
|
||||||
|
let store = LogStore::new();
|
||||||
|
let mut rx = store.subscribe("wf-1");
|
||||||
|
|
||||||
|
store.write_chunk(make_chunk("wf-1", 0, "build", "hello\n")).await;
|
||||||
|
|
||||||
|
let received = rx.recv().await.unwrap();
|
||||||
|
assert_eq!(received.data, b"hello\n");
|
||||||
|
assert_eq!(received.workflow_id, "wf-1");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn broadcast_different_workflows_isolated() {
|
||||||
|
let store = LogStore::new();
|
||||||
|
let mut rx1 = store.subscribe("wf-1");
|
||||||
|
let mut rx2 = store.subscribe("wf-2");
|
||||||
|
|
||||||
|
store.write_chunk(make_chunk("wf-1", 0, "build", "wf1 log\n")).await;
|
||||||
|
store.write_chunk(make_chunk("wf-2", 0, "test", "wf2 log\n")).await;
|
||||||
|
|
||||||
|
let e1 = rx1.recv().await.unwrap();
|
||||||
|
assert_eq!(e1.workflow_id, "wf-1");
|
||||||
|
|
||||||
|
let e2 = rx2.recv().await.unwrap();
|
||||||
|
assert_eq!(e2.workflow_id, "wf-2");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn no_subscribers_does_not_error() {
|
||||||
|
let store = LogStore::new();
|
||||||
|
// No subscribers — should not panic.
|
||||||
|
store.write_chunk(make_chunk("wf-1", 0, "build", "orphan log\n")).await;
|
||||||
|
// History should still be stored.
|
||||||
|
assert_eq!(store.get_history("wf-1", None).len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn multiple_subscribers_same_workflow() {
|
||||||
|
let store = LogStore::new();
|
||||||
|
let mut rx1 = store.subscribe("wf-1");
|
||||||
|
let mut rx2 = store.subscribe("wf-1");
|
||||||
|
|
||||||
|
store.write_chunk(make_chunk("wf-1", 0, "build", "shared\n")).await;
|
||||||
|
|
||||||
|
let e1 = rx1.recv().await.unwrap();
|
||||||
|
let e2 = rx2.recv().await.unwrap();
|
||||||
|
assert_eq!(e1.data, b"shared\n");
|
||||||
|
assert_eq!(e2.data, b"shared\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn history_preserves_stream_type() {
|
||||||
|
let store = LogStore::new();
|
||||||
|
let mut chunk = make_chunk("wf-1", 0, "build", "error output\n");
|
||||||
|
chunk.stream = LogStreamType::Stderr;
|
||||||
|
store.write_chunk(chunk).await;
|
||||||
|
|
||||||
|
let history = store.get_history("wf-1", None);
|
||||||
|
assert_eq!(history[0].stream, LogStreamType::Stderr);
|
||||||
|
}
|
||||||
|
}
|
||||||
250
wfe-server/src/main.rs
Normal file
250
wfe-server/src/main.rs
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
mod auth;
|
||||||
|
mod config;
|
||||||
|
mod grpc;
|
||||||
|
mod lifecycle_bus;
|
||||||
|
mod log_search;
|
||||||
|
mod log_store;
|
||||||
|
mod webhook;
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use clap::Parser;
|
||||||
|
use tonic::transport::Server;
|
||||||
|
use tracing_subscriber::EnvFilter;
|
||||||
|
use wfe::WorkflowHostBuilder;
|
||||||
|
use wfe_core::test_support::{
|
||||||
|
InMemoryLockProvider, InMemoryPersistenceProvider, InMemoryQueueProvider,
|
||||||
|
};
|
||||||
|
use wfe_server_protos::wfe::v1::wfe_server::WfeServer;
|
||||||
|
|
||||||
|
use crate::config::{Cli, PersistenceConfig, QueueConfig};
|
||||||
|
use crate::grpc::WfeService;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
// 1. Parse CLI + load config.
|
||||||
|
let cli = Cli::parse();
|
||||||
|
let config = config::load(&cli);
|
||||||
|
|
||||||
|
// 2. Init tracing.
|
||||||
|
tracing_subscriber::fmt()
|
||||||
|
.with_env_filter(
|
||||||
|
EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info")),
|
||||||
|
)
|
||||||
|
.init();
|
||||||
|
|
||||||
|
tracing::info!(
|
||||||
|
grpc_addr = %config.grpc_addr,
|
||||||
|
http_addr = %config.http_addr,
|
||||||
|
"starting wfe-server"
|
||||||
|
);
|
||||||
|
|
||||||
|
// 3. Build providers based on config.
|
||||||
|
let (persistence, lock, queue): (
|
||||||
|
Arc<dyn wfe_core::traits::PersistenceProvider>,
|
||||||
|
Arc<dyn wfe_core::traits::DistributedLockProvider>,
|
||||||
|
Arc<dyn wfe_core::traits::QueueProvider>,
|
||||||
|
) = match (&config.persistence, &config.queue) {
|
||||||
|
(PersistenceConfig::Sqlite { path }, QueueConfig::InMemory) => {
|
||||||
|
tracing::info!(path = %path, "using SQLite + in-memory queue");
|
||||||
|
let persistence = Arc::new(
|
||||||
|
wfe_sqlite::SqlitePersistenceProvider::new(path)
|
||||||
|
.await
|
||||||
|
.expect("failed to init SQLite"),
|
||||||
|
);
|
||||||
|
let lock = Arc::new(InMemoryLockProvider::new());
|
||||||
|
let queue = Arc::new(InMemoryQueueProvider::new());
|
||||||
|
(persistence, lock, queue)
|
||||||
|
}
|
||||||
|
(PersistenceConfig::Postgres { url }, QueueConfig::Valkey { url: valkey_url }) => {
|
||||||
|
tracing::info!("using Postgres + Valkey");
|
||||||
|
let persistence = Arc::new(
|
||||||
|
wfe_postgres::PostgresPersistenceProvider::new(url)
|
||||||
|
.await
|
||||||
|
.expect("failed to init Postgres"),
|
||||||
|
);
|
||||||
|
let lock = Arc::new(
|
||||||
|
wfe_valkey::ValkeyLockProvider::new(valkey_url, "wfe")
|
||||||
|
.await
|
||||||
|
.expect("failed to init Valkey lock"),
|
||||||
|
);
|
||||||
|
let queue = Arc::new(
|
||||||
|
wfe_valkey::ValkeyQueueProvider::new(valkey_url, "wfe")
|
||||||
|
.await
|
||||||
|
.expect("failed to init Valkey queue"),
|
||||||
|
);
|
||||||
|
(
|
||||||
|
persistence as Arc<dyn wfe_core::traits::PersistenceProvider>,
|
||||||
|
lock as Arc<dyn wfe_core::traits::DistributedLockProvider>,
|
||||||
|
queue as Arc<dyn wfe_core::traits::QueueProvider>,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
tracing::info!("using in-memory providers (dev mode)");
|
||||||
|
let persistence = Arc::new(InMemoryPersistenceProvider::new());
|
||||||
|
let lock = Arc::new(InMemoryLockProvider::new());
|
||||||
|
let queue = Arc::new(InMemoryQueueProvider::new());
|
||||||
|
(
|
||||||
|
persistence as Arc<dyn wfe_core::traits::PersistenceProvider>,
|
||||||
|
lock as Arc<dyn wfe_core::traits::DistributedLockProvider>,
|
||||||
|
queue as Arc<dyn wfe_core::traits::QueueProvider>,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// 4. Build lifecycle broadcaster.
|
||||||
|
let lifecycle_bus = Arc::new(lifecycle_bus::BroadcastLifecyclePublisher::new(4096));
|
||||||
|
|
||||||
|
// 5. Build log search index (optional, needs to exist before log store).
|
||||||
|
let log_search_index = if let Some(ref search_config) = config.search {
|
||||||
|
match log_search::LogSearchIndex::new(&search_config.url) {
|
||||||
|
Ok(index) => {
|
||||||
|
let index = Arc::new(index);
|
||||||
|
if let Err(e) = index.ensure_index().await {
|
||||||
|
tracing::warn!(error = %e, "failed to create log search index");
|
||||||
|
}
|
||||||
|
tracing::info!(url = %search_config.url, "log search enabled");
|
||||||
|
Some(index)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!(error = %e, "failed to connect to OpenSearch");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
// 6. Build log store (with optional search indexing).
|
||||||
|
let log_store = {
|
||||||
|
let store = log_store::LogStore::new();
|
||||||
|
if let Some(ref index) = log_search_index {
|
||||||
|
Arc::new(store.with_search(index.clone()))
|
||||||
|
} else {
|
||||||
|
Arc::new(store)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// 7. Build WorkflowHost with lifecycle + log_sink.
|
||||||
|
let host = WorkflowHostBuilder::new()
|
||||||
|
.use_persistence(persistence)
|
||||||
|
.use_lock_provider(lock)
|
||||||
|
.use_queue_provider(queue)
|
||||||
|
.use_lifecycle(lifecycle_bus.clone() as Arc<dyn wfe_core::traits::LifecyclePublisher>)
|
||||||
|
.use_log_sink(log_store.clone() as Arc<dyn wfe_core::traits::LogSink>)
|
||||||
|
.build()
|
||||||
|
.expect("failed to build workflow host");
|
||||||
|
|
||||||
|
// 8. Auto-load YAML definitions.
|
||||||
|
if let Some(ref dir) = config.workflows_dir {
|
||||||
|
load_yaml_definitions(&host, dir).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
// 9. Start the workflow engine.
|
||||||
|
host.start().await.expect("failed to start workflow host");
|
||||||
|
tracing::info!("workflow engine started");
|
||||||
|
|
||||||
|
let host = Arc::new(host);
|
||||||
|
|
||||||
|
// 10. Build gRPC service.
|
||||||
|
let mut wfe_service = WfeService::new(host.clone(), lifecycle_bus, log_store);
|
||||||
|
if let Some(index) = log_search_index {
|
||||||
|
wfe_service = wfe_service.with_log_search(index);
|
||||||
|
}
|
||||||
|
let (health_reporter, health_service) = tonic_health::server::health_reporter();
|
||||||
|
health_reporter
|
||||||
|
.set_serving::<WfeServer<WfeService>>()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// 11. Build auth state.
|
||||||
|
let auth_state = Arc::new(auth::AuthState::new(config.auth.clone()).await);
|
||||||
|
let auth_interceptor = auth::make_interceptor(auth_state);
|
||||||
|
|
||||||
|
// 12. Build axum HTTP server for webhooks.
|
||||||
|
let webhook_state = webhook::WebhookState {
|
||||||
|
host: host.clone(),
|
||||||
|
config: config.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// HIGH-08: Limit webhook payload size to 2 MB to prevent OOM DoS.
|
||||||
|
let http_router = axum::Router::new()
|
||||||
|
.route("/webhooks/events", axum::routing::post(webhook::handle_generic_event))
|
||||||
|
.route("/webhooks/github", axum::routing::post(webhook::handle_github_webhook))
|
||||||
|
.route("/webhooks/gitea", axum::routing::post(webhook::handle_gitea_webhook))
|
||||||
|
.route("/healthz", axum::routing::get(webhook::health_check))
|
||||||
|
.layer(axum::extract::DefaultBodyLimit::max(2 * 1024 * 1024))
|
||||||
|
.with_state(webhook_state);
|
||||||
|
|
||||||
|
// 12. Run gRPC + HTTP servers with graceful shutdown.
|
||||||
|
let grpc_addr = config.grpc_addr;
|
||||||
|
let http_addr = config.http_addr;
|
||||||
|
tracing::info!(%grpc_addr, %http_addr, "servers listening");
|
||||||
|
|
||||||
|
let grpc_server = Server::builder()
|
||||||
|
.add_service(health_service)
|
||||||
|
.add_service(WfeServer::with_interceptor(wfe_service, auth_interceptor))
|
||||||
|
.serve(grpc_addr);
|
||||||
|
|
||||||
|
let http_listener = tokio::net::TcpListener::bind(http_addr)
|
||||||
|
.await
|
||||||
|
.expect("failed to bind HTTP address");
|
||||||
|
let http_server = axum::serve(http_listener, http_router);
|
||||||
|
|
||||||
|
tokio::select! {
|
||||||
|
result = grpc_server => {
|
||||||
|
if let Err(e) = result {
|
||||||
|
tracing::error!(error = %e, "gRPC server error");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
result = http_server => {
|
||||||
|
if let Err(e) = result {
|
||||||
|
tracing::error!(error = %e, "HTTP server error");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ = tokio::signal::ctrl_c() => {
|
||||||
|
tracing::info!("shutdown signal received");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 9. Graceful shutdown.
|
||||||
|
host.stop().await;
|
||||||
|
tracing::info!("wfe-server stopped");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn load_yaml_definitions(host: &wfe::WorkflowHost, dir: &std::path::Path) {
|
||||||
|
let entries = match std::fs::read_dir(dir) {
|
||||||
|
Ok(e) => e,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!(dir = %dir.display(), error = %e, "failed to read workflows directory");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let config = std::collections::HashMap::new();
|
||||||
|
|
||||||
|
for entry in entries.flatten() {
|
||||||
|
let path = entry.path();
|
||||||
|
if path.extension().is_some_and(|ext| ext == "yaml" || ext == "yml") {
|
||||||
|
match wfe_yaml::load_workflow_from_str(
|
||||||
|
&std::fs::read_to_string(&path).unwrap_or_default(),
|
||||||
|
&config,
|
||||||
|
) {
|
||||||
|
Ok(workflows) => {
|
||||||
|
for compiled in workflows {
|
||||||
|
for (key, factory) in compiled.step_factories {
|
||||||
|
host.register_step_factory(&key, factory).await;
|
||||||
|
}
|
||||||
|
let id = compiled.definition.id.clone();
|
||||||
|
let version = compiled.definition.version;
|
||||||
|
host.register_workflow_definition(compiled.definition).await;
|
||||||
|
tracing::info!(id = %id, version, path = %path.display(), "loaded workflow definition");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!(path = %path.display(), error = %e, "failed to compile workflow");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
556
wfe-server/src/webhook.rs
Normal file
556
wfe-server/src/webhook.rs
Normal file
@@ -0,0 +1,556 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use axum::body::Bytes;
|
||||||
|
use axum::extract::State;
|
||||||
|
use axum::http::{HeaderMap, StatusCode};
|
||||||
|
use axum::response::IntoResponse;
|
||||||
|
use axum::Json;
|
||||||
|
use hmac::{Hmac, Mac};
|
||||||
|
use sha2::Sha256;
|
||||||
|
|
||||||
|
use crate::config::{ServerConfig, WebhookTrigger};
|
||||||
|
|
||||||
|
type HmacSha256 = Hmac<Sha256>;
|
||||||
|
|
||||||
|
/// Shared state for webhook handlers.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct WebhookState {
|
||||||
|
pub host: Arc<wfe::WorkflowHost>,
|
||||||
|
pub config: ServerConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generic event webhook.
|
||||||
|
///
|
||||||
|
/// POST /webhooks/events
|
||||||
|
/// Body: { "event_name": "...", "event_key": "...", "data": { ... } }
|
||||||
|
/// Requires bearer token authentication (same tokens as gRPC auth).
|
||||||
|
pub async fn handle_generic_event(
|
||||||
|
State(state): State<WebhookState>,
|
||||||
|
headers: HeaderMap,
|
||||||
|
Json(payload): Json<GenericEventPayload>,
|
||||||
|
) -> impl IntoResponse {
|
||||||
|
// HIGH-07: Authenticate generic event endpoint.
|
||||||
|
if !state.config.auth.tokens.is_empty() {
|
||||||
|
let auth_header = headers
|
||||||
|
.get("authorization")
|
||||||
|
.and_then(|v| v.to_str().ok())
|
||||||
|
.unwrap_or("");
|
||||||
|
let token = auth_header
|
||||||
|
.strip_prefix("Bearer ")
|
||||||
|
.or_else(|| auth_header.strip_prefix("bearer "))
|
||||||
|
.unwrap_or("");
|
||||||
|
if !crate::auth::check_static_tokens_pub(&state.config.auth.tokens, token) {
|
||||||
|
return (StatusCode::UNAUTHORIZED, "invalid token");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let data = payload.data.unwrap_or_else(|| serde_json::json!({}));
|
||||||
|
|
||||||
|
match state
|
||||||
|
.host
|
||||||
|
.publish_event(&payload.event_name, &payload.event_key, data)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(()) => (StatusCode::OK, "event published"),
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!(error = %e, "failed to publish generic event");
|
||||||
|
(StatusCode::INTERNAL_SERVER_ERROR, "failed to publish event")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// GitHub webhook handler.
|
||||||
|
///
|
||||||
|
/// POST /webhooks/github
|
||||||
|
/// Verifies X-Hub-Signature-256, parses X-GitHub-Event header.
|
||||||
|
pub async fn handle_github_webhook(
|
||||||
|
State(state): State<WebhookState>,
|
||||||
|
headers: HeaderMap,
|
||||||
|
body: Bytes,
|
||||||
|
) -> impl IntoResponse {
|
||||||
|
// 1. Verify HMAC signature if secret is configured.
|
||||||
|
if let Some(secret) = state.config.auth.webhook_secrets.get("github") {
|
||||||
|
let sig_header = headers
|
||||||
|
.get("x-hub-signature-256")
|
||||||
|
.and_then(|v| v.to_str().ok())
|
||||||
|
.unwrap_or("");
|
||||||
|
|
||||||
|
if !verify_hmac_sha256(secret.as_bytes(), &body, sig_header) {
|
||||||
|
return (StatusCode::UNAUTHORIZED, "invalid signature");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Parse event type.
|
||||||
|
let event_type = headers
|
||||||
|
.get("x-github-event")
|
||||||
|
.and_then(|v| v.to_str().ok())
|
||||||
|
.unwrap_or("");
|
||||||
|
|
||||||
|
// 3. Parse payload.
|
||||||
|
let payload: serde_json::Value = match serde_json::from_slice(&body) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!(error = %e, "invalid GitHub webhook JSON");
|
||||||
|
return (StatusCode::BAD_REQUEST, "invalid JSON");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
tracing::info!(
|
||||||
|
event = event_type,
|
||||||
|
repo = payload["repository"]["full_name"].as_str().unwrap_or(""),
|
||||||
|
"received GitHub webhook"
|
||||||
|
);
|
||||||
|
|
||||||
|
// 4. Map to WFE event + check triggers.
|
||||||
|
let forge_event = map_forge_event(event_type, &payload);
|
||||||
|
|
||||||
|
// Publish as event (for workflows waiting on events).
|
||||||
|
if let Err(e) = state
|
||||||
|
.host
|
||||||
|
.publish_event(&forge_event.event_name, &forge_event.event_key, forge_event.data.clone())
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
tracing::error!(error = %e, "failed to publish forge event");
|
||||||
|
return (StatusCode::INTERNAL_SERVER_ERROR, "failed to publish event");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check triggers and auto-start workflows.
|
||||||
|
for trigger in &state.config.webhook.triggers {
|
||||||
|
if trigger.source != "github" {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if trigger.event != event_type {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if let Some(ref match_ref) = trigger.match_ref {
|
||||||
|
let payload_ref = payload["ref"].as_str().unwrap_or("");
|
||||||
|
if payload_ref != match_ref {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let data = map_trigger_data(trigger, &payload);
|
||||||
|
match state
|
||||||
|
.host
|
||||||
|
.start_workflow(&trigger.workflow_id, trigger.version, data)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(id) => {
|
||||||
|
tracing::info!(
|
||||||
|
workflow_id = %id,
|
||||||
|
trigger = %trigger.workflow_id,
|
||||||
|
"webhook triggered workflow"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!(
|
||||||
|
error = %e,
|
||||||
|
trigger = %trigger.workflow_id,
|
||||||
|
"failed to start triggered workflow"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(StatusCode::OK, "ok")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gitea webhook handler.
|
||||||
|
///
|
||||||
|
/// POST /webhooks/gitea
|
||||||
|
/// Verifies X-Gitea-Signature, parses X-Gitea-Event (or X-GitHub-Event) header.
|
||||||
|
/// Gitea payloads are intentionally compatible with GitHub's format.
|
||||||
|
pub async fn handle_gitea_webhook(
|
||||||
|
State(state): State<WebhookState>,
|
||||||
|
headers: HeaderMap,
|
||||||
|
body: Bytes,
|
||||||
|
) -> impl IntoResponse {
|
||||||
|
// 1. Verify HMAC signature if secret is configured.
|
||||||
|
if let Some(secret) = state.config.auth.webhook_secrets.get("gitea") {
|
||||||
|
// Gitea uses X-Gitea-Signature (raw hex, no sha256= prefix in older versions).
|
||||||
|
let sig_header = headers
|
||||||
|
.get("x-gitea-signature")
|
||||||
|
.and_then(|v| v.to_str().ok())
|
||||||
|
.unwrap_or("");
|
||||||
|
|
||||||
|
// Handle both raw hex and sha256= prefixed formats.
|
||||||
|
if !verify_hmac_sha256(secret.as_bytes(), &body, sig_header)
|
||||||
|
&& !verify_hmac_sha256_raw(secret.as_bytes(), &body, sig_header)
|
||||||
|
{
|
||||||
|
return (StatusCode::UNAUTHORIZED, "invalid signature");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Parse event type (try Gitea header first, fall back to GitHub compat header).
|
||||||
|
let event_type = headers
|
||||||
|
.get("x-gitea-event")
|
||||||
|
.or_else(|| headers.get("x-github-event"))
|
||||||
|
.and_then(|v| v.to_str().ok())
|
||||||
|
.unwrap_or("");
|
||||||
|
|
||||||
|
// 3. Parse payload.
|
||||||
|
let payload: serde_json::Value = match serde_json::from_slice(&body) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!(error = %e, "invalid Gitea webhook JSON");
|
||||||
|
return (StatusCode::BAD_REQUEST, "invalid JSON");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
tracing::info!(
|
||||||
|
event = event_type,
|
||||||
|
repo = payload["repository"]["full_name"].as_str().unwrap_or(""),
|
||||||
|
"received Gitea webhook"
|
||||||
|
);
|
||||||
|
|
||||||
|
// 4. Map to WFE event + check triggers (same logic as GitHub).
|
||||||
|
let forge_event = map_forge_event(event_type, &payload);
|
||||||
|
|
||||||
|
if let Err(e) = state
|
||||||
|
.host
|
||||||
|
.publish_event(&forge_event.event_name, &forge_event.event_key, forge_event.data.clone())
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
tracing::error!(error = %e, "failed to publish forge event");
|
||||||
|
return (StatusCode::INTERNAL_SERVER_ERROR, "failed to publish event");
|
||||||
|
}
|
||||||
|
|
||||||
|
for trigger in &state.config.webhook.triggers {
|
||||||
|
if trigger.source != "gitea" {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if trigger.event != event_type {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if let Some(ref match_ref) = trigger.match_ref {
|
||||||
|
let payload_ref = payload["ref"].as_str().unwrap_or("");
|
||||||
|
if payload_ref != match_ref {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let data = map_trigger_data(trigger, &payload);
|
||||||
|
match state
|
||||||
|
.host
|
||||||
|
.start_workflow(&trigger.workflow_id, trigger.version, data)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(id) => {
|
||||||
|
tracing::info!(workflow_id = %id, trigger = %trigger.workflow_id, "webhook triggered workflow");
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!(error = %e, trigger = %trigger.workflow_id, "failed to start triggered workflow");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(StatusCode::OK, "ok")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Health check endpoint.
|
||||||
|
pub async fn health_check() -> impl IntoResponse {
|
||||||
|
(StatusCode::OK, "ok")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Types ───────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[derive(serde::Deserialize)]
|
||||||
|
pub struct GenericEventPayload {
|
||||||
|
pub event_name: String,
|
||||||
|
pub event_key: String,
|
||||||
|
pub data: Option<serde_json::Value>,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ForgeEvent {
|
||||||
|
event_name: String,
|
||||||
|
event_key: String,
|
||||||
|
data: serde_json::Value,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Helpers ─────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
/// Verify HMAC-SHA256 signature with `sha256=<hex>` prefix (GitHub format).
|
||||||
|
fn verify_hmac_sha256(secret: &[u8], body: &[u8], signature: &str) -> bool {
|
||||||
|
let hex_sig = signature.strip_prefix("sha256=").unwrap_or("");
|
||||||
|
if hex_sig.is_empty() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
let expected = match hex::decode(hex_sig) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(_) => return false,
|
||||||
|
};
|
||||||
|
let mut mac = HmacSha256::new_from_slice(secret).expect("HMAC accepts any key size");
|
||||||
|
mac.update(body);
|
||||||
|
mac.verify_slice(&expected).is_ok()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Verify HMAC-SHA256 signature as raw hex (no prefix, Gitea legacy format).
|
||||||
|
fn verify_hmac_sha256_raw(secret: &[u8], body: &[u8], signature: &str) -> bool {
|
||||||
|
let expected = match hex::decode(signature) {
|
||||||
|
Ok(v) => v,
|
||||||
|
Err(_) => return false,
|
||||||
|
};
|
||||||
|
let mut mac = HmacSha256::new_from_slice(secret).expect("HMAC accepts any key size");
|
||||||
|
mac.update(body);
|
||||||
|
mac.verify_slice(&expected).is_ok()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Map a git forge event type + payload to a WFE event.
|
||||||
|
fn map_forge_event(event_type: &str, payload: &serde_json::Value) -> ForgeEvent {
|
||||||
|
let repo = payload["repository"]["full_name"]
|
||||||
|
.as_str()
|
||||||
|
.unwrap_or("unknown")
|
||||||
|
.to_string();
|
||||||
|
|
||||||
|
match event_type {
|
||||||
|
"push" => {
|
||||||
|
let git_ref = payload["ref"].as_str().unwrap_or("").to_string();
|
||||||
|
ForgeEvent {
|
||||||
|
event_name: "git.push".to_string(),
|
||||||
|
event_key: format!("{repo}/{git_ref}"),
|
||||||
|
data: serde_json::json!({
|
||||||
|
"repo": repo,
|
||||||
|
"ref": git_ref,
|
||||||
|
"before": payload["before"].as_str().unwrap_or(""),
|
||||||
|
"after": payload["after"].as_str().unwrap_or(""),
|
||||||
|
"commit": payload["head_commit"]["id"].as_str().unwrap_or(""),
|
||||||
|
"message": payload["head_commit"]["message"].as_str().unwrap_or(""),
|
||||||
|
"sender": payload["sender"]["login"].as_str().unwrap_or(""),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"pull_request" => {
|
||||||
|
let number = payload["number"].as_u64().unwrap_or(0);
|
||||||
|
ForgeEvent {
|
||||||
|
event_name: "git.pr".to_string(),
|
||||||
|
event_key: format!("{repo}/{number}"),
|
||||||
|
data: serde_json::json!({
|
||||||
|
"repo": repo,
|
||||||
|
"action": payload["action"].as_str().unwrap_or(""),
|
||||||
|
"number": number,
|
||||||
|
"title": payload["pull_request"]["title"].as_str().unwrap_or(""),
|
||||||
|
"head_ref": payload["pull_request"]["head"]["ref"].as_str().unwrap_or(""),
|
||||||
|
"base_ref": payload["pull_request"]["base"]["ref"].as_str().unwrap_or(""),
|
||||||
|
"sender": payload["sender"]["login"].as_str().unwrap_or(""),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"create" => {
|
||||||
|
let ref_name = payload["ref"].as_str().unwrap_or("").to_string();
|
||||||
|
let ref_type = payload["ref_type"].as_str().unwrap_or("").to_string();
|
||||||
|
ForgeEvent {
|
||||||
|
event_name: format!("git.{ref_type}"),
|
||||||
|
event_key: format!("{repo}/{ref_name}"),
|
||||||
|
data: serde_json::json!({
|
||||||
|
"repo": repo,
|
||||||
|
"ref": ref_name,
|
||||||
|
"ref_type": ref_type,
|
||||||
|
"sender": payload["sender"]["login"].as_str().unwrap_or(""),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => ForgeEvent {
|
||||||
|
event_name: format!("git.{event_type}"),
|
||||||
|
event_key: repo.clone(),
|
||||||
|
data: serde_json::json!({
|
||||||
|
"repo": repo,
|
||||||
|
"event_type": event_type,
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extract data fields from payload using simple JSONPath-like mapping.
|
||||||
|
/// Supports `$.field.nested` syntax.
|
||||||
|
fn map_trigger_data(
|
||||||
|
trigger: &WebhookTrigger,
|
||||||
|
payload: &serde_json::Value,
|
||||||
|
) -> serde_json::Value {
|
||||||
|
let mut data = serde_json::Map::new();
|
||||||
|
for (key, path) in &trigger.data_mapping {
|
||||||
|
if let Some(value) = resolve_json_path(payload, path) {
|
||||||
|
data.insert(key.clone(), value.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
serde_json::Value::Object(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolve a simple JSONPath expression like `$.repository.full_name`.
|
||||||
|
fn resolve_json_path<'a>(value: &'a serde_json::Value, path: &str) -> Option<&'a serde_json::Value> {
|
||||||
|
let path = path.strip_prefix("$.").unwrap_or(path);
|
||||||
|
let mut current = value;
|
||||||
|
for segment in path.split('.') {
|
||||||
|
current = current.get(segment)?;
|
||||||
|
}
|
||||||
|
Some(current)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn verify_github_hmac_valid() {
|
||||||
|
let secret = b"mysecret";
|
||||||
|
let body = b"hello world";
|
||||||
|
let mut mac = HmacSha256::new_from_slice(secret).unwrap();
|
||||||
|
mac.update(body);
|
||||||
|
let sig = format!("sha256={}", hex::encode(mac.finalize().into_bytes()));
|
||||||
|
assert!(verify_hmac_sha256(secret, body, &sig));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn verify_github_hmac_invalid() {
|
||||||
|
assert!(!verify_hmac_sha256(b"secret", b"body", "sha256=deadbeef"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn verify_github_hmac_missing_prefix() {
|
||||||
|
assert!(!verify_hmac_sha256(b"secret", b"body", "not-a-signature"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn verify_gitea_hmac_raw_valid() {
|
||||||
|
let secret = b"giteasecret";
|
||||||
|
let body = b"payload";
|
||||||
|
let mut mac = HmacSha256::new_from_slice(secret).unwrap();
|
||||||
|
mac.update(body);
|
||||||
|
let sig = hex::encode(mac.finalize().into_bytes());
|
||||||
|
assert!(verify_hmac_sha256_raw(secret, body, &sig));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn verify_gitea_hmac_raw_invalid() {
|
||||||
|
assert!(!verify_hmac_sha256_raw(b"secret", b"body", "badhex"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn map_push_event() {
|
||||||
|
let payload = serde_json::json!({
|
||||||
|
"ref": "refs/heads/main",
|
||||||
|
"before": "aaa",
|
||||||
|
"after": "bbb",
|
||||||
|
"head_commit": { "id": "bbb", "message": "fix: stuff" },
|
||||||
|
"repository": { "full_name": "studio/wfe" },
|
||||||
|
"sender": { "login": "sienna" }
|
||||||
|
});
|
||||||
|
let event = map_forge_event("push", &payload);
|
||||||
|
assert_eq!(event.event_name, "git.push");
|
||||||
|
assert_eq!(event.event_key, "studio/wfe/refs/heads/main");
|
||||||
|
assert_eq!(event.data["commit"], "bbb");
|
||||||
|
assert_eq!(event.data["sender"], "sienna");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn map_pull_request_event() {
|
||||||
|
let payload = serde_json::json!({
|
||||||
|
"action": "opened",
|
||||||
|
"number": 42,
|
||||||
|
"pull_request": {
|
||||||
|
"title": "Add feature",
|
||||||
|
"head": { "ref": "feature-branch" },
|
||||||
|
"base": { "ref": "main" }
|
||||||
|
},
|
||||||
|
"repository": { "full_name": "studio/wfe" },
|
||||||
|
"sender": { "login": "sienna" }
|
||||||
|
});
|
||||||
|
let event = map_forge_event("pull_request", &payload);
|
||||||
|
assert_eq!(event.event_name, "git.pr");
|
||||||
|
assert_eq!(event.event_key, "studio/wfe/42");
|
||||||
|
assert_eq!(event.data["action"], "opened");
|
||||||
|
assert_eq!(event.data["title"], "Add feature");
|
||||||
|
assert_eq!(event.data["head_ref"], "feature-branch");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn map_create_tag_event() {
|
||||||
|
let payload = serde_json::json!({
|
||||||
|
"ref": "v1.5.0",
|
||||||
|
"ref_type": "tag",
|
||||||
|
"repository": { "full_name": "studio/wfe" },
|
||||||
|
"sender": { "login": "sienna" }
|
||||||
|
});
|
||||||
|
let event = map_forge_event("create", &payload);
|
||||||
|
assert_eq!(event.event_name, "git.tag");
|
||||||
|
assert_eq!(event.event_key, "studio/wfe/v1.5.0");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn map_create_branch_event() {
|
||||||
|
let payload = serde_json::json!({
|
||||||
|
"ref": "feature-x",
|
||||||
|
"ref_type": "branch",
|
||||||
|
"repository": { "full_name": "studio/wfe" },
|
||||||
|
"sender": { "login": "sienna" }
|
||||||
|
});
|
||||||
|
let event = map_forge_event("create", &payload);
|
||||||
|
assert_eq!(event.event_name, "git.branch");
|
||||||
|
assert_eq!(event.event_key, "studio/wfe/feature-x");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn map_unknown_event() {
|
||||||
|
let payload = serde_json::json!({
|
||||||
|
"repository": { "full_name": "studio/wfe" }
|
||||||
|
});
|
||||||
|
let event = map_forge_event("release", &payload);
|
||||||
|
assert_eq!(event.event_name, "git.release");
|
||||||
|
assert_eq!(event.event_key, "studio/wfe");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn resolve_json_path_simple() {
|
||||||
|
let v = serde_json::json!({"a": {"b": {"c": "value"}}});
|
||||||
|
assert_eq!(resolve_json_path(&v, "$.a.b.c").unwrap(), "value");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn resolve_json_path_no_prefix() {
|
||||||
|
let v = serde_json::json!({"repo": "test"});
|
||||||
|
assert_eq!(resolve_json_path(&v, "repo").unwrap(), "test");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn resolve_json_path_missing() {
|
||||||
|
let v = serde_json::json!({"a": 1});
|
||||||
|
assert!(resolve_json_path(&v, "$.b.c").is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn map_trigger_data_extracts_fields() {
|
||||||
|
let trigger = WebhookTrigger {
|
||||||
|
source: "github".to_string(),
|
||||||
|
event: "push".to_string(),
|
||||||
|
match_ref: None,
|
||||||
|
workflow_id: "ci".to_string(),
|
||||||
|
version: 1,
|
||||||
|
data_mapping: [
|
||||||
|
("repo".to_string(), "$.repository.full_name".to_string()),
|
||||||
|
("commit".to_string(), "$.head_commit.id".to_string()),
|
||||||
|
]
|
||||||
|
.into(),
|
||||||
|
};
|
||||||
|
let payload = serde_json::json!({
|
||||||
|
"repository": { "full_name": "studio/wfe" },
|
||||||
|
"head_commit": { "id": "abc123" }
|
||||||
|
});
|
||||||
|
let data = map_trigger_data(&trigger, &payload);
|
||||||
|
assert_eq!(data["repo"], "studio/wfe");
|
||||||
|
assert_eq!(data["commit"], "abc123");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn map_trigger_data_missing_field_skipped() {
|
||||||
|
let trigger = WebhookTrigger {
|
||||||
|
source: "github".to_string(),
|
||||||
|
event: "push".to_string(),
|
||||||
|
match_ref: None,
|
||||||
|
workflow_id: "ci".to_string(),
|
||||||
|
version: 1,
|
||||||
|
data_mapping: [("missing".to_string(), "$.nonexistent.field".to_string())].into(),
|
||||||
|
};
|
||||||
|
let payload = serde_json::json!({"repo": "test"});
|
||||||
|
let data = map_trigger_data(&trigger, &payload);
|
||||||
|
assert!(data.get("missing").is_none());
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -21,6 +21,7 @@ async-trait = { workspace = true }
|
|||||||
tokio = { workspace = true }
|
tokio = { workspace = true }
|
||||||
thiserror = { workspace = true }
|
thiserror = { workspace = true }
|
||||||
tracing = { workspace = true }
|
tracing = { workspace = true }
|
||||||
|
chrono = { workspace = true }
|
||||||
regex = { workspace = true }
|
regex = { workspace = true }
|
||||||
deno_core = { workspace = true, optional = true }
|
deno_core = { workspace = true, optional = true }
|
||||||
deno_error = { workspace = true, optional = true }
|
deno_error = { workspace = true, optional = true }
|
||||||
|
|||||||
@@ -23,18 +23,23 @@ impl ShellStep {
|
|||||||
pub fn new(config: ShellConfig) -> Self {
|
pub fn new(config: ShellConfig) -> Self {
|
||||||
Self { config }
|
Self { config }
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
fn build_command(&self, context: &StepExecutionContext<'_>) -> tokio::process::Command {
|
||||||
impl StepBody for ShellStep {
|
|
||||||
async fn run(&mut self, context: &StepExecutionContext<'_>) -> wfe_core::Result<ExecutionResult> {
|
|
||||||
let mut cmd = tokio::process::Command::new(&self.config.shell);
|
let mut cmd = tokio::process::Command::new(&self.config.shell);
|
||||||
cmd.arg("-c").arg(&self.config.run);
|
cmd.arg("-c").arg(&self.config.run);
|
||||||
|
|
||||||
// Inject workflow data as UPPER_CASE env vars (top-level keys only).
|
// Inject workflow data as UPPER_CASE env vars (top-level keys only).
|
||||||
|
// Skip keys that would override security-sensitive environment variables.
|
||||||
|
const BLOCKED_KEYS: &[&str] = &[
|
||||||
|
"PATH", "LD_PRELOAD", "LD_LIBRARY_PATH", "DYLD_LIBRARY_PATH",
|
||||||
|
"HOME", "SHELL", "USER", "LOGNAME", "TERM",
|
||||||
|
];
|
||||||
if let Some(data_obj) = context.workflow.data.as_object() {
|
if let Some(data_obj) = context.workflow.data.as_object() {
|
||||||
for (key, value) in data_obj {
|
for (key, value) in data_obj {
|
||||||
let env_key = key.to_uppercase();
|
let env_key = key.to_uppercase();
|
||||||
|
if BLOCKED_KEYS.contains(&env_key.as_str()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
let env_val = match value {
|
let env_val = match value {
|
||||||
serde_json::Value::String(s) => s.clone(),
|
serde_json::Value::String(s) => s.clone(),
|
||||||
other => other.to_string(),
|
other => other.to_string(),
|
||||||
@@ -43,12 +48,10 @@ impl StepBody for ShellStep {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add extra env from config.
|
|
||||||
for (key, value) in &self.config.env {
|
for (key, value) in &self.config.env {
|
||||||
cmd.env(key, value);
|
cmd.env(key, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set working directory if specified.
|
|
||||||
if let Some(ref dir) = self.config.working_dir {
|
if let Some(ref dir) = self.config.working_dir {
|
||||||
cmd.current_dir(dir);
|
cmd.current_dir(dir);
|
||||||
}
|
}
|
||||||
@@ -56,15 +59,137 @@ impl StepBody for ShellStep {
|
|||||||
cmd.stdout(std::process::Stdio::piped());
|
cmd.stdout(std::process::Stdio::piped());
|
||||||
cmd.stderr(std::process::Stdio::piped());
|
cmd.stderr(std::process::Stdio::piped());
|
||||||
|
|
||||||
// Execute with optional timeout.
|
cmd
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run with streaming output via LogSink.
|
||||||
|
///
|
||||||
|
/// Reads stdout and stderr line-by-line, streaming each line to the
|
||||||
|
/// LogSink as it's produced. Uses `tokio::select!` to interleave both
|
||||||
|
/// streams without spawning tasks (avoids lifetime issues with &dyn LogSink).
|
||||||
|
async fn run_streaming(
|
||||||
|
&self,
|
||||||
|
context: &StepExecutionContext<'_>,
|
||||||
|
) -> wfe_core::Result<(String, String, i32)> {
|
||||||
|
use tokio::io::{AsyncBufReadExt, BufReader};
|
||||||
|
use wfe_core::traits::{LogChunk, LogStreamType};
|
||||||
|
|
||||||
|
let log_sink = context.log_sink.unwrap();
|
||||||
|
let workflow_id = context.workflow.id.clone();
|
||||||
|
let definition_id = context.workflow.workflow_definition_id.clone();
|
||||||
|
let step_id = context.step.id;
|
||||||
|
let step_name = context.step.name.clone().unwrap_or_else(|| "unknown".to_string());
|
||||||
|
|
||||||
|
let mut cmd = self.build_command(context);
|
||||||
|
let mut child = cmd.spawn().map_err(|e| {
|
||||||
|
WfeError::StepExecution(format!("Failed to spawn shell command: {e}"))
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let stdout_pipe = child.stdout.take().ok_or_else(|| {
|
||||||
|
WfeError::StepExecution("failed to capture stdout pipe".to_string())
|
||||||
|
})?;
|
||||||
|
let stderr_pipe = child.stderr.take().ok_or_else(|| {
|
||||||
|
WfeError::StepExecution("failed to capture stderr pipe".to_string())
|
||||||
|
})?;
|
||||||
|
let mut stdout_lines = BufReader::new(stdout_pipe).lines();
|
||||||
|
let mut stderr_lines = BufReader::new(stderr_pipe).lines();
|
||||||
|
|
||||||
|
let mut stdout_buf = Vec::new();
|
||||||
|
let mut stderr_buf = Vec::new();
|
||||||
|
let mut stdout_done = false;
|
||||||
|
let mut stderr_done = false;
|
||||||
|
|
||||||
|
// Interleave stdout/stderr reads with optional timeout.
|
||||||
|
let read_future = async {
|
||||||
|
while !stdout_done || !stderr_done {
|
||||||
|
tokio::select! {
|
||||||
|
line = stdout_lines.next_line(), if !stdout_done => {
|
||||||
|
match line {
|
||||||
|
Ok(Some(line)) => {
|
||||||
|
log_sink.write_chunk(LogChunk {
|
||||||
|
workflow_id: workflow_id.clone(),
|
||||||
|
definition_id: definition_id.clone(),
|
||||||
|
step_id,
|
||||||
|
step_name: step_name.clone(),
|
||||||
|
stream: LogStreamType::Stdout,
|
||||||
|
data: format!("{line}\n").into_bytes(),
|
||||||
|
timestamp: chrono::Utc::now(),
|
||||||
|
}).await;
|
||||||
|
stdout_buf.push(line);
|
||||||
|
}
|
||||||
|
_ => stdout_done = true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
line = stderr_lines.next_line(), if !stderr_done => {
|
||||||
|
match line {
|
||||||
|
Ok(Some(line)) => {
|
||||||
|
log_sink.write_chunk(LogChunk {
|
||||||
|
workflow_id: workflow_id.clone(),
|
||||||
|
definition_id: definition_id.clone(),
|
||||||
|
step_id,
|
||||||
|
step_name: step_name.clone(),
|
||||||
|
stream: LogStreamType::Stderr,
|
||||||
|
data: format!("{line}\n").into_bytes(),
|
||||||
|
timestamp: chrono::Utc::now(),
|
||||||
|
}).await;
|
||||||
|
stderr_buf.push(line);
|
||||||
|
}
|
||||||
|
_ => stderr_done = true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
child.wait().await
|
||||||
|
};
|
||||||
|
|
||||||
|
let status = if let Some(timeout_ms) = self.config.timeout_ms {
|
||||||
|
let duration = std::time::Duration::from_millis(timeout_ms);
|
||||||
|
match tokio::time::timeout(duration, read_future).await {
|
||||||
|
Ok(result) => result.map_err(|e| {
|
||||||
|
WfeError::StepExecution(format!("Failed to wait for shell command: {e}"))
|
||||||
|
})?,
|
||||||
|
Err(_) => {
|
||||||
|
// Kill the child on timeout.
|
||||||
|
let _ = child.kill().await;
|
||||||
|
return Err(WfeError::StepExecution(format!(
|
||||||
|
"Shell command timed out after {timeout_ms}ms"
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
read_future.await.map_err(|e| {
|
||||||
|
WfeError::StepExecution(format!("Failed to wait for shell command: {e}"))
|
||||||
|
})?
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut stdout = stdout_buf.join("\n");
|
||||||
|
let mut stderr = stderr_buf.join("\n");
|
||||||
|
if !stdout.is_empty() {
|
||||||
|
stdout.push('\n');
|
||||||
|
}
|
||||||
|
if !stderr.is_empty() {
|
||||||
|
stderr.push('\n');
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((stdout, stderr, status.code().unwrap_or(-1)))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run with buffered output (original path, no LogSink).
|
||||||
|
async fn run_buffered(
|
||||||
|
&self,
|
||||||
|
context: &StepExecutionContext<'_>,
|
||||||
|
) -> wfe_core::Result<(String, String, i32)> {
|
||||||
|
let mut cmd = self.build_command(context);
|
||||||
|
|
||||||
let output = if let Some(timeout_ms) = self.config.timeout_ms {
|
let output = if let Some(timeout_ms) = self.config.timeout_ms {
|
||||||
let duration = std::time::Duration::from_millis(timeout_ms);
|
let duration = std::time::Duration::from_millis(timeout_ms);
|
||||||
match tokio::time::timeout(duration, cmd.output()).await {
|
match tokio::time::timeout(duration, cmd.output()).await {
|
||||||
Ok(result) => result.map_err(|e| WfeError::StepExecution(format!("Failed to spawn shell command: {e}")))?,
|
Ok(result) => result.map_err(|e| {
|
||||||
|
WfeError::StepExecution(format!("Failed to spawn shell command: {e}"))
|
||||||
|
})?,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
return Err(WfeError::StepExecution(format!(
|
return Err(WfeError::StepExecution(format!(
|
||||||
"Shell command timed out after {}ms",
|
"Shell command timed out after {timeout_ms}ms"
|
||||||
timeout_ms
|
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -76,11 +201,24 @@ impl StepBody for ShellStep {
|
|||||||
|
|
||||||
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
|
let stdout = String::from_utf8_lossy(&output.stdout).to_string();
|
||||||
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
|
let stderr = String::from_utf8_lossy(&output.stderr).to_string();
|
||||||
|
let code = output.status.code().unwrap_or(-1);
|
||||||
|
|
||||||
if !output.status.success() {
|
Ok((stdout, stderr, code))
|
||||||
let code = output.status.code().unwrap_or(-1);
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl StepBody for ShellStep {
|
||||||
|
async fn run(&mut self, context: &StepExecutionContext<'_>) -> wfe_core::Result<ExecutionResult> {
|
||||||
|
let (stdout, stderr, exit_code) = if context.log_sink.is_some() {
|
||||||
|
self.run_streaming(context).await?
|
||||||
|
} else {
|
||||||
|
self.run_buffered(context).await?
|
||||||
|
};
|
||||||
|
|
||||||
|
if exit_code != 0 {
|
||||||
return Err(WfeError::StepExecution(format!(
|
return Err(WfeError::StepExecution(format!(
|
||||||
"Shell command exited with code {code}\nstdout: {stdout}\nstderr: {stderr}"
|
"Shell command exited with code {exit_code}\nstdout: {stdout}\nstderr: {stderr}"
|
||||||
)));
|
)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -93,7 +231,6 @@ impl StepBody for ShellStep {
|
|||||||
{
|
{
|
||||||
let name = rest[..eq_pos].trim().to_string();
|
let name = rest[..eq_pos].trim().to_string();
|
||||||
let raw_value = rest[eq_pos + 1..].to_string();
|
let raw_value = rest[eq_pos + 1..].to_string();
|
||||||
// Auto-convert typed values from string annotations
|
|
||||||
let value = match raw_value.as_str() {
|
let value = match raw_value.as_str() {
|
||||||
"true" => serde_json::Value::Bool(true),
|
"true" => serde_json::Value::Bool(true),
|
||||||
"false" => serde_json::Value::Bool(false),
|
"false" => serde_json::Value::Bool(false),
|
||||||
@@ -110,15 +247,10 @@ impl StepBody for ShellStep {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add raw stdout under the step name.
|
let step_name = context.step.name.as_deref().unwrap_or("unknown");
|
||||||
let step_name = context
|
|
||||||
.step
|
|
||||||
.name
|
|
||||||
.as_deref()
|
|
||||||
.unwrap_or("unknown");
|
|
||||||
outputs.insert(
|
outputs.insert(
|
||||||
format!("{step_name}.stdout"),
|
format!("{step_name}.stdout"),
|
||||||
serde_json::Value::String(stdout.clone()),
|
serde_json::Value::String(stdout),
|
||||||
);
|
);
|
||||||
outputs.insert(
|
outputs.insert(
|
||||||
format!("{step_name}.stderr"),
|
format!("{step_name}.stderr"),
|
||||||
|
|||||||
@@ -1082,6 +1082,7 @@ workflows:
|
|||||||
workflow: &workflow,
|
workflow: &workflow,
|
||||||
cancellation_token: tokio_util::sync::CancellationToken::new(),
|
cancellation_token: tokio_util::sync::CancellationToken::new(),
|
||||||
host_context: Some(&host),
|
host_context: Some(&host),
|
||||||
|
log_sink: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
let result = step.run(&ctx).await.unwrap();
|
let result = step.run(&ctx).await.unwrap();
|
||||||
|
|||||||
@@ -42,6 +42,7 @@ fn make_context<'a>(
|
|||||||
workflow,
|
workflow,
|
||||||
cancellation_token: tokio_util::sync::CancellationToken::new(),
|
cancellation_token: tokio_util::sync::CancellationToken::new(),
|
||||||
host_context: None,
|
host_context: None,
|
||||||
|
log_sink: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -53,6 +53,70 @@ async fn run_yaml_workflow(yaml: &str) -> wfe::models::WorkflowInstance {
|
|||||||
run_yaml_workflow_with_data(yaml, serde_json::json!({})).await
|
run_yaml_workflow_with_data(yaml, serde_json::json!({})).await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A test LogSink that collects all chunks.
|
||||||
|
struct CollectingLogSink {
|
||||||
|
chunks: tokio::sync::Mutex<Vec<wfe_core::traits::LogChunk>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CollectingLogSink {
|
||||||
|
fn new() -> Self {
|
||||||
|
Self { chunks: tokio::sync::Mutex::new(Vec::new()) }
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn chunks(&self) -> Vec<wfe_core::traits::LogChunk> {
|
||||||
|
self.chunks.lock().await.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl wfe_core::traits::LogSink for CollectingLogSink {
|
||||||
|
async fn write_chunk(&self, chunk: wfe_core::traits::LogChunk) {
|
||||||
|
self.chunks.lock().await.push(chunk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run a workflow with a LogSink to verify log streaming works end-to-end.
|
||||||
|
async fn run_yaml_workflow_with_log_sink(
|
||||||
|
yaml: &str,
|
||||||
|
log_sink: Arc<CollectingLogSink>,
|
||||||
|
) -> wfe::models::WorkflowInstance {
|
||||||
|
let config = HashMap::new();
|
||||||
|
let compiled = load_single_workflow_from_str(yaml, &config).unwrap();
|
||||||
|
|
||||||
|
let persistence = Arc::new(InMemoryPersistenceProvider::new());
|
||||||
|
let lock = Arc::new(InMemoryLockProvider::new());
|
||||||
|
let queue = Arc::new(InMemoryQueueProvider::new());
|
||||||
|
|
||||||
|
let host = WorkflowHostBuilder::new()
|
||||||
|
.use_persistence(persistence as Arc<dyn wfe_core::traits::PersistenceProvider>)
|
||||||
|
.use_lock_provider(lock as Arc<dyn wfe_core::traits::DistributedLockProvider>)
|
||||||
|
.use_queue_provider(queue as Arc<dyn wfe_core::traits::QueueProvider>)
|
||||||
|
.use_log_sink(log_sink as Arc<dyn wfe_core::traits::LogSink>)
|
||||||
|
.build()
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
for (key, factory) in compiled.step_factories {
|
||||||
|
host.register_step_factory(&key, factory).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
host.register_workflow_definition(compiled.definition.clone())
|
||||||
|
.await;
|
||||||
|
host.start().await.unwrap();
|
||||||
|
|
||||||
|
let instance = run_workflow_sync(
|
||||||
|
&host,
|
||||||
|
&compiled.definition.id,
|
||||||
|
compiled.definition.version,
|
||||||
|
serde_json::json!({}),
|
||||||
|
Duration::from_secs(10),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
host.stop().await;
|
||||||
|
instance
|
||||||
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
async fn simple_echo_captures_stdout() {
|
async fn simple_echo_captures_stdout() {
|
||||||
let yaml = r#"
|
let yaml = r#"
|
||||||
@@ -236,3 +300,176 @@ workflow:
|
|||||||
let instance = run_yaml_workflow(yaml).await;
|
let instance = run_yaml_workflow(yaml).await;
|
||||||
assert_eq!(instance.status, WorkflowStatus::Complete);
|
assert_eq!(instance.status, WorkflowStatus::Complete);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ── LogSink regression tests ─────────────────────────────────────────
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn log_sink_receives_stdout_chunks() {
|
||||||
|
let log_sink = Arc::new(CollectingLogSink::new());
|
||||||
|
let yaml = r#"
|
||||||
|
workflow:
|
||||||
|
id: logsink-stdout-wf
|
||||||
|
version: 1
|
||||||
|
steps:
|
||||||
|
- name: echo-step
|
||||||
|
type: shell
|
||||||
|
config:
|
||||||
|
run: echo "line one" && echo "line two"
|
||||||
|
"#;
|
||||||
|
let instance = run_yaml_workflow_with_log_sink(yaml, log_sink.clone()).await;
|
||||||
|
assert_eq!(instance.status, WorkflowStatus::Complete);
|
||||||
|
|
||||||
|
let chunks = log_sink.chunks().await;
|
||||||
|
assert!(chunks.len() >= 2, "expected at least 2 stdout chunks, got {}", chunks.len());
|
||||||
|
|
||||||
|
let stdout_chunks: Vec<_> = chunks
|
||||||
|
.iter()
|
||||||
|
.filter(|c| c.stream == wfe_core::traits::LogStreamType::Stdout)
|
||||||
|
.collect();
|
||||||
|
assert!(stdout_chunks.len() >= 2, "expected at least 2 stdout chunks");
|
||||||
|
|
||||||
|
let all_data: String = stdout_chunks.iter()
|
||||||
|
.map(|c| String::from_utf8_lossy(&c.data).to_string())
|
||||||
|
.collect();
|
||||||
|
assert!(all_data.contains("line one"), "stdout should contain 'line one', got: {all_data}");
|
||||||
|
assert!(all_data.contains("line two"), "stdout should contain 'line two', got: {all_data}");
|
||||||
|
|
||||||
|
// Verify chunk metadata.
|
||||||
|
for chunk in &stdout_chunks {
|
||||||
|
assert!(!chunk.workflow_id.is_empty());
|
||||||
|
assert_eq!(chunk.step_name, "echo-step");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn log_sink_receives_stderr_chunks() {
|
||||||
|
let log_sink = Arc::new(CollectingLogSink::new());
|
||||||
|
let yaml = r#"
|
||||||
|
workflow:
|
||||||
|
id: logsink-stderr-wf
|
||||||
|
version: 1
|
||||||
|
steps:
|
||||||
|
- name: err-step
|
||||||
|
type: shell
|
||||||
|
config:
|
||||||
|
run: echo "stderr output" >&2
|
||||||
|
"#;
|
||||||
|
let instance = run_yaml_workflow_with_log_sink(yaml, log_sink.clone()).await;
|
||||||
|
assert_eq!(instance.status, WorkflowStatus::Complete);
|
||||||
|
|
||||||
|
let chunks = log_sink.chunks().await;
|
||||||
|
let stderr_chunks: Vec<_> = chunks
|
||||||
|
.iter()
|
||||||
|
.filter(|c| c.stream == wfe_core::traits::LogStreamType::Stderr)
|
||||||
|
.collect();
|
||||||
|
assert!(!stderr_chunks.is_empty(), "expected stderr chunks");
|
||||||
|
|
||||||
|
let stderr_data: String = stderr_chunks.iter()
|
||||||
|
.map(|c| String::from_utf8_lossy(&c.data).to_string())
|
||||||
|
.collect();
|
||||||
|
assert!(stderr_data.contains("stderr output"), "stderr should contain 'stderr output', got: {stderr_data}");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn log_sink_captures_multi_step_workflow() {
|
||||||
|
let log_sink = Arc::new(CollectingLogSink::new());
|
||||||
|
let yaml = r#"
|
||||||
|
workflow:
|
||||||
|
id: logsink-multi-wf
|
||||||
|
version: 1
|
||||||
|
steps:
|
||||||
|
- name: step-a
|
||||||
|
type: shell
|
||||||
|
config:
|
||||||
|
run: echo "from step a"
|
||||||
|
- name: step-b
|
||||||
|
type: shell
|
||||||
|
config:
|
||||||
|
run: echo "from step b"
|
||||||
|
"#;
|
||||||
|
let instance = run_yaml_workflow_with_log_sink(yaml, log_sink.clone()).await;
|
||||||
|
assert_eq!(instance.status, WorkflowStatus::Complete);
|
||||||
|
|
||||||
|
let chunks = log_sink.chunks().await;
|
||||||
|
let step_names: Vec<_> = chunks.iter().map(|c| c.step_name.as_str()).collect();
|
||||||
|
assert!(step_names.contains(&"step-a"), "should have chunks from step-a");
|
||||||
|
assert!(step_names.contains(&"step-b"), "should have chunks from step-b");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn log_sink_not_configured_still_works() {
|
||||||
|
// Without a log_sink, the buffered path should still work.
|
||||||
|
let yaml = r#"
|
||||||
|
workflow:
|
||||||
|
id: no-logsink-wf
|
||||||
|
version: 1
|
||||||
|
steps:
|
||||||
|
- name: echo-step
|
||||||
|
type: shell
|
||||||
|
config:
|
||||||
|
run: echo "no sink"
|
||||||
|
"#;
|
||||||
|
let instance = run_yaml_workflow(yaml).await;
|
||||||
|
assert_eq!(instance.status, WorkflowStatus::Complete);
|
||||||
|
let data = instance.data.as_object().unwrap();
|
||||||
|
assert!(data.get("echo-step.stdout").unwrap().as_str().unwrap().contains("no sink"));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── Security regression tests ────────────────────────────────────────
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn security_blocked_env_vars_not_injected() {
|
||||||
|
// MEDIUM-22: Workflow data keys like "path" must NOT override PATH.
|
||||||
|
let yaml = r#"
|
||||||
|
workflow:
|
||||||
|
id: sec-env-wf
|
||||||
|
version: 1
|
||||||
|
steps:
|
||||||
|
- name: check-path
|
||||||
|
type: shell
|
||||||
|
config:
|
||||||
|
run: echo "$PATH"
|
||||||
|
"#;
|
||||||
|
// Set a workflow data key "path" that would override PATH if not blocked.
|
||||||
|
let instance = run_yaml_workflow_with_data(
|
||||||
|
yaml,
|
||||||
|
serde_json::json!({"path": "/attacker/bin"}),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert_eq!(instance.status, WorkflowStatus::Complete);
|
||||||
|
|
||||||
|
let data = instance.data.as_object().unwrap();
|
||||||
|
let stdout = data.get("check-path.stdout").unwrap().as_str().unwrap();
|
||||||
|
// PATH should NOT contain /attacker/bin.
|
||||||
|
assert!(
|
||||||
|
!stdout.contains("/attacker/bin"),
|
||||||
|
"PATH should not be overridden by workflow data, got: {stdout}"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn security_safe_env_vars_still_injected() {
|
||||||
|
// Verify non-blocked keys still work after the security fix.
|
||||||
|
let wfe_prefix = "##wfe";
|
||||||
|
let yaml = format!(
|
||||||
|
r#"
|
||||||
|
workflow:
|
||||||
|
id: sec-safe-env-wf
|
||||||
|
version: 1
|
||||||
|
steps:
|
||||||
|
- name: check-var
|
||||||
|
type: shell
|
||||||
|
config:
|
||||||
|
run: echo "{wfe_prefix}[output val=$MY_CUSTOM_VAR]"
|
||||||
|
"#
|
||||||
|
);
|
||||||
|
let instance = run_yaml_workflow_with_data(
|
||||||
|
&yaml,
|
||||||
|
serde_json::json!({"my_custom_var": "works"}),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
assert_eq!(instance.status, WorkflowStatus::Complete);
|
||||||
|
|
||||||
|
let data = instance.data.as_object().unwrap();
|
||||||
|
assert_eq!(data.get("val").and_then(|v| v.as_str()), Some("works"));
|
||||||
|
}
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ use tracing::{debug, error, info, warn};
|
|||||||
|
|
||||||
use wfe_core::executor::{StepRegistry, WorkflowExecutor};
|
use wfe_core::executor::{StepRegistry, WorkflowExecutor};
|
||||||
use wfe_core::models::{
|
use wfe_core::models::{
|
||||||
Event, ExecutionPointer, PointerStatus, QueueType, WorkflowDefinition, WorkflowInstance,
|
Event, ExecutionPointer, LifecycleEvent, LifecycleEventType, PointerStatus, QueueType,
|
||||||
WorkflowStatus,
|
WorkflowDefinition, WorkflowInstance, WorkflowStatus,
|
||||||
};
|
};
|
||||||
use wfe_core::traits::{
|
use wfe_core::traits::{
|
||||||
DistributedLockProvider, HostContext, LifecyclePublisher, PersistenceProvider, QueueProvider,
|
DistributedLockProvider, HostContext, LifecyclePublisher, PersistenceProvider, QueueProvider,
|
||||||
@@ -308,6 +308,18 @@ impl WorkflowHost {
|
|||||||
.queue_work(&id, QueueType::Workflow)
|
.queue_work(&id, QueueType::Workflow)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
// Publish lifecycle event.
|
||||||
|
if let Some(ref publisher) = self.lifecycle {
|
||||||
|
let _ = publisher
|
||||||
|
.publish(LifecycleEvent::new(
|
||||||
|
&id,
|
||||||
|
definition_id,
|
||||||
|
version,
|
||||||
|
LifecycleEventType::Started,
|
||||||
|
))
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(id)
|
Ok(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -345,6 +357,16 @@ impl WorkflowHost {
|
|||||||
}
|
}
|
||||||
instance.status = WorkflowStatus::Suspended;
|
instance.status = WorkflowStatus::Suspended;
|
||||||
self.persistence.persist_workflow(&instance).await?;
|
self.persistence.persist_workflow(&instance).await?;
|
||||||
|
if let Some(ref publisher) = self.lifecycle {
|
||||||
|
let _ = publisher
|
||||||
|
.publish(LifecycleEvent::new(
|
||||||
|
id,
|
||||||
|
&instance.workflow_definition_id,
|
||||||
|
instance.version,
|
||||||
|
LifecycleEventType::Suspended,
|
||||||
|
))
|
||||||
|
.await;
|
||||||
|
}
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -362,6 +384,16 @@ impl WorkflowHost {
|
|||||||
.queue_work(id, QueueType::Workflow)
|
.queue_work(id, QueueType::Workflow)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
if let Some(ref publisher) = self.lifecycle {
|
||||||
|
let _ = publisher
|
||||||
|
.publish(LifecycleEvent::new(
|
||||||
|
id,
|
||||||
|
&instance.workflow_definition_id,
|
||||||
|
instance.version,
|
||||||
|
LifecycleEventType::Resumed,
|
||||||
|
))
|
||||||
|
.await;
|
||||||
|
}
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -376,6 +408,16 @@ impl WorkflowHost {
|
|||||||
instance.status = WorkflowStatus::Terminated;
|
instance.status = WorkflowStatus::Terminated;
|
||||||
instance.complete_time = Some(chrono::Utc::now());
|
instance.complete_time = Some(chrono::Utc::now());
|
||||||
self.persistence.persist_workflow(&instance).await?;
|
self.persistence.persist_workflow(&instance).await?;
|
||||||
|
if let Some(ref publisher) = self.lifecycle {
|
||||||
|
let _ = publisher
|
||||||
|
.publish(LifecycleEvent::new(
|
||||||
|
id,
|
||||||
|
&instance.workflow_definition_id,
|
||||||
|
instance.version,
|
||||||
|
LifecycleEventType::Terminated,
|
||||||
|
))
|
||||||
|
.await;
|
||||||
|
}
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,6 +21,7 @@ pub struct WorkflowHostBuilder {
|
|||||||
queue_provider: Option<Arc<dyn QueueProvider>>,
|
queue_provider: Option<Arc<dyn QueueProvider>>,
|
||||||
lifecycle: Option<Arc<dyn LifecyclePublisher>>,
|
lifecycle: Option<Arc<dyn LifecyclePublisher>>,
|
||||||
search: Option<Arc<dyn SearchIndex>>,
|
search: Option<Arc<dyn SearchIndex>>,
|
||||||
|
log_sink: Option<Arc<dyn wfe_core::traits::LogSink>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl WorkflowHostBuilder {
|
impl WorkflowHostBuilder {
|
||||||
@@ -31,6 +32,7 @@ impl WorkflowHostBuilder {
|
|||||||
queue_provider: None,
|
queue_provider: None,
|
||||||
lifecycle: None,
|
lifecycle: None,
|
||||||
search: None,
|
search: None,
|
||||||
|
log_sink: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -64,6 +66,12 @@ impl WorkflowHostBuilder {
|
|||||||
self
|
self
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set an optional log sink for real-time step output streaming.
|
||||||
|
pub fn use_log_sink(mut self, sink: Arc<dyn wfe_core::traits::LogSink>) -> Self {
|
||||||
|
self.log_sink = Some(sink);
|
||||||
|
self
|
||||||
|
}
|
||||||
|
|
||||||
/// Build the `WorkflowHost`.
|
/// Build the `WorkflowHost`.
|
||||||
///
|
///
|
||||||
/// Returns an error if persistence, lock_provider, or queue_provider have not been set.
|
/// Returns an error if persistence, lock_provider, or queue_provider have not been set.
|
||||||
@@ -90,6 +98,9 @@ impl WorkflowHostBuilder {
|
|||||||
if let Some(ref search) = self.search {
|
if let Some(ref search) = self.search {
|
||||||
executor = executor.with_search(Arc::clone(search));
|
executor = executor.with_search(Arc::clone(search));
|
||||||
}
|
}
|
||||||
|
if let Some(ref log_sink) = self.log_sink {
|
||||||
|
executor = executor.with_log_sink(Arc::clone(log_sink));
|
||||||
|
}
|
||||||
|
|
||||||
Ok(WorkflowHost {
|
Ok(WorkflowHost {
|
||||||
persistence,
|
persistence,
|
||||||
|
|||||||
Reference in New Issue
Block a user