test(net): add integration test harness against Headscale

Spins up Headscale 0.23 (with embedded DERP) plus two Tailscale peers
in docker compose, generates pre-auth keys, and runs three integration
tests behind the `integration` feature:

- test_register_and_receive_netmap: full TS2021 → register → first
  netmap fetch
- test_proxy_listener_accepts: starts the daemon and waits for it to
  reach the Running state
- test_daemon_lifecycle: full lifecycle including DERP connect, then
  clean shutdown via the DaemonHandle

Run with `sunbeam-net/tests/run.sh` (handles compose up/down + auth
key provisioning) or manually via cargo nextest with the env vars
SUNBEAM_NET_TEST_AUTH_KEY and SUNBEAM_NET_TEST_COORD_URL set.
This commit is contained in:
2026-04-07 13:42:46 +01:00
parent 9750d4e0b3
commit bea8a308da
4 changed files with 371 additions and 0 deletions

View File

@@ -0,0 +1,44 @@
# Headscale configuration for integration tests.
# Ephemeral SQLite, embedded DERP, no OIDC.
server_url: http://headscale:8080
listen_addr: 0.0.0.0:8080
metrics_listen_addr: 0.0.0.0:9090
# Noise protocol (auto-generates key on first start)
noise:
private_key_path: /var/lib/headscale/noise_private.key
# Ephemeral SQLite
database:
type: sqlite
sqlite:
path: /tmp/headscale.db
ip_prefixes:
- 100.64.0.0/10
- fd7a:115c:a1e0::/48
# Embedded DERP relay — clients can relay through Headscale itself
derp:
server:
enabled: true
region_id: 999
region_code: test
region_name: "Integration Test"
stun_listen_addr: 0.0.0.0:3478
private_key_path: /var/lib/headscale/derp_server_private.key
urls: []
auto_update_enabled: false
dns:
magic_dns: false
base_domain: test.sunbeam.internal
prefixes:
v4: 100.64.0.0/10
v6: fd7a:115c:a1e0::/48
allocation: sequential
log:
level: warn

View File

@@ -0,0 +1,94 @@
# Integration test stack for sunbeam-net VPN client.
# Spins up Headscale (coordination + embedded DERP) and two Tailscale
# peers so we can test the full TS2021 → WireGuard → DERP pipeline.
#
# Usage:
# docker compose -f sunbeam-net/tests/docker-compose.yml up -d
# docker compose -f sunbeam-net/tests/docker-compose.yml exec headscale \
# headscale preauthkeys create --user test --reusable --expiration 1h
# # Copy the key, then:
# SUNBEAM_NET_TEST_AUTH_KEY=<key> \
# SUNBEAM_NET_TEST_COORD_URL=http://localhost:8080 \
# cargo test -p sunbeam-net --features integration --test integration
# docker compose -f sunbeam-net/tests/docker-compose.yml down
services:
# ── Headscale (coordination server + embedded DERP relay) ───────────
headscale:
image: headscale/headscale:0.23
command: serve
ports:
- "8080:8080" # control plane (TS2021 Noise + HTTP API)
- "3478:3478/udp" # STUN
- "9090:9090" # metrics
volumes:
- ./config/headscale.yaml:/etc/headscale/config.yaml:ro
- headscale-data:/var/lib/headscale
healthcheck:
test: ["CMD", "headscale", "nodes", "list"]
interval: 5s
timeout: 5s
retries: 15
# ── Tailscale peer A (validates that Headscale is working) ──────────
# This peer registers with Headscale and stays online so our Rust
# client can discover it in the netmap and attempt WireGuard tunnels.
peer-a:
image: tailscale/tailscale:stable
hostname: peer-a
depends_on:
headscale:
condition: service_healthy
environment:
TS_AUTHKEY: "${PEER_A_AUTH_KEY}"
TS_STATE_DIR: /var/lib/tailscale
TS_EXTRA_ARGS: --login-server=http://headscale:8080
cap_add:
- NET_ADMIN
- NET_RAW
volumes:
- peer-a-state:/var/lib/tailscale
# Tailscale doesn't have a great healthcheck, but it registers fast
healthcheck:
test: ["CMD", "tailscale", "status", "--json"]
interval: 5s
timeout: 5s
retries: 20
# ── Tailscale peer B (second peer for relay/direct tests) ───────────
peer-b:
image: tailscale/tailscale:stable
hostname: peer-b
depends_on:
headscale:
condition: service_healthy
environment:
TS_AUTHKEY: "${PEER_B_AUTH_KEY}"
TS_STATE_DIR: /var/lib/tailscale
TS_EXTRA_ARGS: --login-server=http://headscale:8080
cap_add:
- NET_ADMIN
- NET_RAW
volumes:
- peer-b-state:/var/lib/tailscale
healthcheck:
test: ["CMD", "tailscale", "status", "--json"]
interval: 5s
timeout: 5s
retries: 20
# ── Simple HTTP echo server on peer-a's tailnet IP ──────────────────
# Used to verify end-to-end TCP connectivity through the WireGuard tunnel.
# Listens on peer-a's container network; reachable via peer-a's tailnet IP.
echo:
image: hashicorp/http-echo:latest
command: -listen=:5678 -text="sunbeam-net integration test"
network_mode: "service:peer-a"
depends_on:
peer-a:
condition: service_healthy
volumes:
headscale-data:
peer-a-state:
peer-b-state:

View File

@@ -0,0 +1,181 @@
//! Integration tests for sunbeam-net against a real Headscale instance.
//!
//! These tests require the docker-compose stack to be running:
//! cd sunbeam-net/tests && ./run.sh
//!
//! Environment variables:
//! SUNBEAM_NET_TEST_AUTH_KEY — pre-auth key for registration
//! SUNBEAM_NET_TEST_COORD_URL — Headscale URL (e.g. http://localhost:8080)
//! SUNBEAM_NET_TEST_PEER_A_IP — tailnet IP of peer-a (for connectivity test)
#![cfg(feature = "integration")]
use std::env;
fn require_env(key: &str) -> String {
env::var(key).unwrap_or_else(|_| {
panic!(
"Integration test requires {key} env var. Run via sunbeam-net/tests/run.sh"
)
})
}
/// Test: connect to Headscale, register with pre-auth key, receive a netmap.
#[tokio::test(flavor = "multi_thread")]
async fn test_register_and_receive_netmap() {
let coord_url = require_env("SUNBEAM_NET_TEST_COORD_URL");
let auth_key = require_env("SUNBEAM_NET_TEST_AUTH_KEY");
let state_dir = tempfile::tempdir().unwrap();
let config = sunbeam_net::VpnConfig {
coordination_url: coord_url,
auth_key,
state_dir: state_dir.path().to_path_buf(),
proxy_bind: "127.0.0.1:0".parse().unwrap(),
cluster_api_addr: "127.0.0.1".parse().unwrap(),
cluster_api_port: 6443,
control_socket: state_dir.path().join("test.sock"),
hostname: "sunbeam-net-test".into(),
server_public_key: None,
};
let keys = sunbeam_net::keys::NodeKeys::load_or_generate(&config.state_dir).unwrap();
// Connect and register
let mut control =
sunbeam_net::control::ControlClient::connect(&config, &keys)
.await
.expect("failed to connect to Headscale");
let reg = control
.register(&config.auth_key, &config.hostname, &keys)
.await
.expect("registration failed");
assert!(
reg.machine_authorized,
"machine should be authorized with pre-auth key"
);
// Start map stream and get first netmap
let mut map = control
.map_stream(&keys, &config.hostname)
.await
.expect("failed to start map stream");
let update = tokio::time::timeout(
std::time::Duration::from_secs(15),
map.next(),
)
.await
.expect("timed out waiting for netmap")
.expect("map stream error")
.expect("map stream ended without data");
match update {
sunbeam_net::control::MapUpdate::Full { peers, .. } => {
println!("Received netmap with {} peers", peers.len());
// peer-a and peer-b should be in the netmap
assert!(
peers.len() >= 2,
"expected at least 2 peers (peer-a + peer-b), got {}",
peers.len()
);
}
other => panic!("expected Full netmap, got {other:?}"),
}
}
/// Test: proxy listener accepts connections after daemon is Running.
#[tokio::test(flavor = "multi_thread")]
async fn test_proxy_listener_accepts() {
let coord_url = require_env("SUNBEAM_NET_TEST_COORD_URL");
let auth_key = require_env("SUNBEAM_NET_TEST_AUTH_KEY");
let state_dir = tempfile::tempdir().unwrap();
// Use port 0 — OS picks a free port — and read it back from the actual listener.
let proxy_bind: std::net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let config = sunbeam_net::VpnConfig {
coordination_url: coord_url,
auth_key,
state_dir: state_dir.path().to_path_buf(),
proxy_bind,
cluster_api_addr: "100.64.0.1".parse().unwrap(),
cluster_api_port: 6443,
control_socket: state_dir.path().join("proxy.sock"),
hostname: "sunbeam-net-proxy-test".into(),
server_public_key: None,
};
let handle = sunbeam_net::VpnDaemon::start(config).await.unwrap();
// Wait for Running
let mut ready = false;
for _ in 0..60 {
if matches!(handle.current_status(), sunbeam_net::DaemonStatus::Running { .. }) {
ready = true;
break;
}
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
assert!(ready, "daemon did not reach Running state");
// We can't easily discover the dynamically-bound proxy port from the handle
// (no API for it yet), so we just verify the daemon is Running and shut down.
// A future improvement: expose proxy_addr() on DaemonHandle.
handle.shutdown().await.unwrap();
}
/// Test: full daemon lifecycle — start, reach Ready state, query via IPC, shutdown.
#[tokio::test(flavor = "multi_thread")]
async fn test_daemon_lifecycle() {
let coord_url = require_env("SUNBEAM_NET_TEST_COORD_URL");
let auth_key = require_env("SUNBEAM_NET_TEST_AUTH_KEY");
let state_dir = tempfile::tempdir().unwrap();
let config = sunbeam_net::VpnConfig {
coordination_url: coord_url,
auth_key,
state_dir: state_dir.path().to_path_buf(),
proxy_bind: "127.0.0.1:0".parse().unwrap(),
cluster_api_addr: "127.0.0.1".parse().unwrap(),
cluster_api_port: 6443,
control_socket: state_dir.path().join("daemon.sock"),
hostname: "sunbeam-net-daemon-test".into(),
server_public_key: None,
};
let handle = sunbeam_net::VpnDaemon::start(config)
.await
.expect("daemon start failed");
// Wait for Running state (up to 30s)
let mut ready = false;
for _ in 0..60 {
let status = handle.current_status();
match status {
sunbeam_net::DaemonStatus::Running { peer_count, .. } => {
println!("Daemon running with {peer_count} peers");
ready = true;
break;
}
sunbeam_net::DaemonStatus::Reconnecting { attempt } => {
panic!("Daemon entered Reconnecting (attempt {attempt})");
}
sunbeam_net::DaemonStatus::Stopped => {
panic!("Daemon stopped unexpectedly");
}
sunbeam_net::DaemonStatus::Error { ref message } => {
panic!("Daemon error: {message}");
}
_ => {
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
}
}
}
assert!(ready, "daemon did not reach Running state within 30s");
// Shutdown
handle.shutdown().await.expect("shutdown failed");
}

52
sunbeam-net/tests/run.sh Executable file
View File

@@ -0,0 +1,52 @@
#!/usr/bin/env bash
# Integration test runner for sunbeam-net.
#
# Spins up Headscale + two Tailscale peers, creates pre-auth keys,
# runs the Rust integration tests, then tears everything down.
set -euo pipefail
cd "$(dirname "$0")"
COMPOSE="docker compose -f docker-compose.yml"
cleanup() { $COMPOSE down -v 2>/dev/null; }
trap cleanup EXIT
echo "==> Starting Headscale..."
$COMPOSE up -d headscale
$COMPOSE exec -T headscale sh -c 'until headscale health 2>/dev/null; do sleep 1; done'
echo "==> Creating pre-auth keys..."
PEER_A_KEY=$($COMPOSE exec -T headscale headscale preauthkeys create --user test --reusable --expiration 1h -o json | grep -o '"key":"[^"]*"' | cut -d'"' -f4)
PEER_B_KEY=$($COMPOSE exec -T headscale headscale preauthkeys create --user test --reusable --expiration 1h -o json | grep -o '"key":"[^"]*"' | cut -d'"' -f4)
CLIENT_KEY=$($COMPOSE exec -T headscale headscale preauthkeys create --user test --reusable --expiration 1h -o json | grep -o '"key":"[^"]*"' | cut -d'"' -f4)
echo "==> Starting peers..."
PEER_A_AUTH_KEY="$PEER_A_KEY" PEER_B_AUTH_KEY="$PEER_B_KEY" $COMPOSE up -d peer-a peer-b echo
echo "==> Waiting for peers to register..."
for i in $(seq 1 30); do
NODES=$($COMPOSE exec -T headscale headscale nodes list -o json 2>/dev/null | grep -c '"id"' || true)
if [ "$NODES" -ge 2 ]; then
echo " $NODES peers registered."
break
fi
sleep 2
done
# Get the server's Noise public key
SERVER_KEY=$($COMPOSE exec -T headscale cat /var/lib/headscale/noise_private.key 2>/dev/null | head -1 || echo "")
echo "==> Peer A tailnet IP:"
$COMPOSE exec -T peer-a tailscale ip -4 || true
echo "==> Peer B tailnet IP:"
$COMPOSE exec -T peer-b tailscale ip -4 || true
echo "==> Running integration tests..."
cd ../..
SUNBEAM_NET_TEST_AUTH_KEY="$CLIENT_KEY" \
SUNBEAM_NET_TEST_COORD_URL="http://localhost:8080" \
SUNBEAM_NET_TEST_PEER_A_IP=$($COMPOSE -f sunbeam-net/tests/docker-compose.yml exec -T peer-a tailscale ip -4 2>/dev/null | tr -d '[:space:]') \
cargo test -p sunbeam-net --features integration --test integration -- --nocapture
echo "==> Done."