chore: checkpoint for the demo. almost!!

Signed-off-by: Sienna Meridian Satterwhite <sienna@r3t.io>
This commit is contained in:
2026-01-05 19:41:38 +00:00
parent d1d3aec8aa
commit d2fc967f1a
29 changed files with 3389 additions and 454 deletions

View File

@@ -0,0 +1,652 @@
//! Multi-node sync integration tests with real iroh-gossip networking
//!
//! These tests verify actual message passing and synchronization between nodes
//! using real iroh-gossip with localhost connections.
mod test_utils;
use anyhow::Result;
use test_utils::{TestContext, create_test_app, wait_for_sync, count_entities_with_id, setup_gossip_pair, setup_gossip_trio};
use bevy::prelude::*;
use iroh::{Endpoint, protocol::Router};
use libmarathon::networking::{
CurrentSession,
NetworkEntityMap,
NetworkedEntity,
SessionState,
Synced,
};
use libmarathon::persistence::Persisted;
use std::time::Duration;
use tokio::time::Instant;
use uuid::Uuid;
// ============================================================================
// Integration Tests
// ============================================================================
// Note: All test utilities (gossip setup, test app creation, sync helpers)
// are now in the shared test_utils module to avoid duplication
/// Test: Two nodes can synchronize a cube spawn using real iroh-gossip
#[tokio::test(flavor = "multi_thread")]
async fn test_two_nodes_sync_cube_spawn() -> Result<()> {
println!("=== Starting test_two_nodes_sync_cube_spawn ===");
let ctx1 = TestContext::new();
let ctx2 = TestContext::new();
let (ep1, ep2, router1, router2, bridge1, bridge2) = setup_gossip_pair().await?;
let node1_id = bridge1.node_id();
let node2_id = bridge2.node_id();
let mut app1 = create_test_app(node1_id, ctx1.db_path(), bridge1);
let mut app2 = create_test_app(node2_id, ctx2.db_path(), bridge2);
println!("Node1 ID: {}", node1_id);
println!("Node2 ID: {}", node2_id);
// Initialize both apps
app1.update();
app2.update();
// Connect both to the same session
use libmarathon::networking::SessionId;
let session_id = SessionId::new();
{
let mut session1 = app1.world_mut().resource_mut::<CurrentSession>();
session1.session.id = session_id.clone();
session1.transition_to(SessionState::Active);
}
{
let mut session2 = app2.world_mut().resource_mut::<CurrentSession>();
session2.session.id = session_id.clone();
session2.transition_to(SessionState::Active);
}
// Node 1: Spawn a cube
let cube_id = Uuid::new_v4();
{
app1.world_mut().spawn((
NetworkedEntity::with_id(cube_id, node1_id),
Persisted::with_id(cube_id),
Synced,
Transform::from_xyz(1.0, 2.0, 3.0),
));
}
println!("Node1: Spawned cube {}", cube_id);
// Wait for sync
wait_for_sync(&mut app1, &mut app2, Duration::from_secs(5), |_w1, w2| {
let entity_map = w2.resource::<NetworkEntityMap>();
entity_map.get_entity(cube_id).is_some()
})
.await?;
println!("✓ Node2 successfully received cube from Node1");
// Verify vector clocks converged
{
use libmarathon::networking::NodeVectorClock;
let clock1 = app1.world().resource::<NodeVectorClock>();
let clock2 = app2.world().resource::<NodeVectorClock>();
println!("Node1 clock: {:?}", clock1.clock);
println!("Node2 clock: {:?}", clock2.clock);
// Both clocks should know about both nodes
assert!(
clock2.clock.node_count() >= 2,
"Node2 should know about both nodes in its clock"
);
}
// Cleanup
router1.shutdown().await?;
router2.shutdown().await?;
ep1.close().await;
ep2.close().await;
println!("✓ Two nodes sync cube spawn test passed");
Ok(())
}
/// Test: Three nodes maintain consistent state using real iroh-gossip
#[tokio::test(flavor = "multi_thread")]
async fn test_three_nodes_consistency() -> Result<()> {
println!("=== Starting test_three_nodes_consistency ===");
let ctx1 = TestContext::new();
let ctx2 = TestContext::new();
let ctx3 = TestContext::new();
let (ep1, ep2, ep3, router1, router2, router3, bridge1, bridge2, bridge3) =
setup_gossip_trio().await?;
let node1_id = bridge1.node_id();
let node2_id = bridge2.node_id();
let node3_id = bridge3.node_id();
let mut app1 = create_test_app(node1_id, ctx1.db_path(), bridge1);
let mut app2 = create_test_app(node2_id, ctx2.db_path(), bridge2);
let mut app3 = create_test_app(node3_id, ctx3.db_path(), bridge3);
println!("Node1 ID: {}", node1_id);
println!("Node2 ID: {}", node2_id);
println!("Node3 ID: {}", node3_id);
// Initialize all apps
app1.update();
app2.update();
app3.update();
// Connect all to the same session
use libmarathon::networking::SessionId;
let session_id = SessionId::new();
for app in [&mut app1, &mut app2, &mut app3] {
let mut session = app.world_mut().resource_mut::<CurrentSession>();
session.session.id = session_id.clone();
session.transition_to(SessionState::Active);
}
// Each node spawns a cube
let cube1_id = Uuid::new_v4();
let cube2_id = Uuid::new_v4();
let cube3_id = Uuid::new_v4();
app1.world_mut().spawn((
NetworkedEntity::with_id(cube1_id, node1_id),
Persisted::with_id(cube1_id),
Synced,
Transform::from_xyz(1.0, 0.0, 0.0),
));
app2.world_mut().spawn((
NetworkedEntity::with_id(cube2_id, node2_id),
Persisted::with_id(cube2_id),
Synced,
Transform::from_xyz(0.0, 2.0, 0.0),
));
app3.world_mut().spawn((
NetworkedEntity::with_id(cube3_id, node3_id),
Persisted::with_id(cube3_id),
Synced,
Transform::from_xyz(0.0, 0.0, 3.0),
));
println!("All nodes spawned their cubes");
// Wait for convergence - all nodes should have all 3 cubes
let start = Instant::now();
let mut converged = false;
while start.elapsed() < Duration::from_secs(10) {
app1.update();
app2.update();
app3.update();
let map1 = app1.world().resource::<NetworkEntityMap>();
let map2 = app2.world().resource::<NetworkEntityMap>();
let map3 = app3.world().resource::<NetworkEntityMap>();
let count1 = [cube1_id, cube2_id, cube3_id].iter().filter(|id| map1.get_entity(**id).is_some()).count();
let count2 = [cube1_id, cube2_id, cube3_id].iter().filter(|id| map2.get_entity(**id).is_some()).count();
let count3 = [cube1_id, cube2_id, cube3_id].iter().filter(|id| map3.get_entity(**id).is_some()).count();
if count1 == 3 && count2 == 3 && count3 == 3 {
println!("✓ All nodes converged to 3 cubes");
converged = true;
break;
}
tokio::time::sleep(Duration::from_millis(16)).await;
}
assert!(converged, "Nodes did not converge to consistent state");
// Cleanup
router1.shutdown().await?;
router2.shutdown().await?;
router3.shutdown().await?;
ep1.close().await;
ep2.close().await;
ep3.close().await;
println!("✓ Three nodes consistency test passed");
Ok(())
}
/// Test: FullState sync does not create duplicate entities
/// This tests the fix for the bug where apply_full_state() would spawn
/// duplicate entities instead of reusing existing ones.
#[tokio::test(flavor = "multi_thread")]
async fn test_fullstate_no_duplicate_entities() -> Result<()> {
println!("=== Starting test_fullstate_no_duplicate_entities ===");
let ctx1 = TestContext::new();
let ctx2 = TestContext::new();
let (ep1, ep2, router1, router2, bridge1, bridge2) = setup_gossip_pair().await?;
let node1_id = bridge1.node_id();
let node2_id = bridge2.node_id();
let mut app1 = create_test_app(node1_id, ctx1.db_path(), bridge1);
let mut app2 = create_test_app(node2_id, ctx2.db_path(), bridge2);
println!("Node1 ID: {}", node1_id);
println!("Node2 ID: {}", node2_id);
// Initialize both apps
app1.update();
app2.update();
// Connect both to the same session
use libmarathon::networking::SessionId;
let session_id = SessionId::new();
{
let mut session1 = app1.world_mut().resource_mut::<CurrentSession>();
session1.session.id = session_id.clone();
session1.transition_to(SessionState::Active);
}
{
let mut session2 = app2.world_mut().resource_mut::<CurrentSession>();
session2.session.id = session_id.clone();
session2.transition_to(SessionState::Active);
}
// Node 1: Spawn multiple cubes
let cube1_id = Uuid::new_v4();
let cube2_id = Uuid::new_v4();
let cube3_id = Uuid::new_v4();
{
app1.world_mut().spawn((
NetworkedEntity::with_id(cube1_id, node1_id),
Persisted::with_id(cube1_id),
Synced,
Transform::from_xyz(1.0, 0.0, 0.0),
));
app1.world_mut().spawn((
NetworkedEntity::with_id(cube2_id, node1_id),
Persisted::with_id(cube2_id),
Synced,
Transform::from_xyz(2.0, 0.0, 0.0),
));
app1.world_mut().spawn((
NetworkedEntity::with_id(cube3_id, node1_id),
Persisted::with_id(cube3_id),
Synced,
Transform::from_xyz(3.0, 0.0, 0.0),
));
}
println!("Node1: Spawned 3 cubes: {}, {}, {}", cube1_id, cube2_id, cube3_id);
// Wait for sync - Node2 should receive all 3 cubes via FullState
wait_for_sync(&mut app1, &mut app2, Duration::from_secs(5), |_w1, w2| {
let entity_map = w2.resource::<NetworkEntityMap>();
entity_map.get_entity(cube1_id).is_some()
&& entity_map.get_entity(cube2_id).is_some()
&& entity_map.get_entity(cube3_id).is_some()
})
.await?;
println!("✓ Node2 received all cubes from Node1");
// CRITICAL CHECK: Verify no duplicate entities were created
// Each unique network_id should appear exactly once
{
let count1 = count_entities_with_id(app2.world_mut(), cube1_id);
let count2 = count_entities_with_id(app2.world_mut(), cube2_id);
let count3 = count_entities_with_id(app2.world_mut(), cube3_id);
println!("Entity counts in Node2:");
println!(" Cube1 ({}): {}", cube1_id, count1);
println!(" Cube2 ({}): {}", cube2_id, count2);
println!(" Cube3 ({}): {}", cube3_id, count3);
assert_eq!(
count1, 1,
"Cube1 should appear exactly once, found {} instances",
count1
);
assert_eq!(
count2, 1,
"Cube2 should appear exactly once, found {} instances",
count2
);
assert_eq!(
count3, 1,
"Cube3 should appear exactly once, found {} instances",
count3
);
}
// Also verify total entity count matches expected
{
use libmarathon::networking::NetworkedEntity;
let mut query = app2.world_mut().query::<&NetworkedEntity>();
let total_count = query.iter(app2.world()).count();
println!("Total NetworkedEntity count in Node2: {}", total_count);
assert_eq!(
total_count, 3,
"Node2 should have exactly 3 networked entities, found {}",
total_count
);
}
println!("✓ No duplicate entities created - FullState correctly reused existing entities");
// Continue syncing for a bit to ensure no duplicates appear over time
let start = Instant::now();
while start.elapsed() < Duration::from_secs(2) {
app1.update();
app2.update();
tokio::time::sleep(Duration::from_millis(16)).await;
}
// Final verification - counts should still be correct
{
let count1 = count_entities_with_id(app2.world_mut(), cube1_id);
let count2 = count_entities_with_id(app2.world_mut(), cube2_id);
let count3 = count_entities_with_id(app2.world_mut(), cube3_id);
assert_eq!(count1, 1, "Cube1 count changed during continued sync");
assert_eq!(count2, 1, "Cube2 count changed during continued sync");
assert_eq!(count3, 1, "Cube3 count changed during continued sync");
}
println!("✓ Entity counts remained stable during continued sync");
// Cleanup
router1.shutdown().await?;
router2.shutdown().await?;
ep1.close().await;
ep2.close().await;
println!("✓ FullState no duplicate entities test passed");
Ok(())
}
/// Test: Remote delta does not cause feedback loop
/// Verifies that applying a remote operation doesn't trigger re-broadcasting
/// the same change back to the network (runaway vector clock bug)
#[tokio::test(flavor = "multi_thread")]
async fn test_remote_delta_no_feedback_loop() -> Result<()> {
println!("=== Starting test_remote_delta_no_feedback_loop ===");
let ctx1 = TestContext::new();
let ctx2 = TestContext::new();
let (ep1, ep2, router1, router2, bridge1, bridge2) = setup_gossip_pair().await?;
let node1_id = bridge1.node_id();
let node2_id = bridge2.node_id();
let mut app1 = create_test_app(node1_id, ctx1.db_path(), bridge1);
let mut app2 = create_test_app(node2_id, ctx2.db_path(), bridge2);
println!("Node1 ID: {}", node1_id);
println!("Node2 ID: {}", node2_id);
// Initialize both apps
app1.update();
app2.update();
// Connect both to the same session
use libmarathon::networking::SessionId;
let session_id = SessionId::new();
{
let mut session1 = app1.world_mut().resource_mut::<CurrentSession>();
session1.session.id = session_id.clone();
session1.transition_to(SessionState::Active);
}
{
let mut session2 = app2.world_mut().resource_mut::<CurrentSession>();
session2.session.id = session_id.clone();
session2.transition_to(SessionState::Active);
}
// Node 1: Spawn a cube
let cube_id = Uuid::new_v4();
{
app1.world_mut().spawn((
NetworkedEntity::with_id(cube_id, node1_id),
Persisted::with_id(cube_id),
Synced,
Transform::from_xyz(1.0, 2.0, 3.0),
));
}
println!("Node1: Spawned cube {}", cube_id);
// Wait for initial sync
wait_for_sync(&mut app1, &mut app2, Duration::from_secs(5), |_w1, w2| {
let entity_map = w2.resource::<NetworkEntityMap>();
entity_map.get_entity(cube_id).is_some()
})
.await?;
println!("✓ Node2 received cube from Node1");
// Get initial clock sequences
let node1_initial_seq = {
let clock1 = app1.world().resource::<libmarathon::networking::NodeVectorClock>();
clock1.sequence()
};
let node2_initial_seq = {
let clock2 = app2.world().resource::<libmarathon::networking::NodeVectorClock>();
clock2.sequence()
};
println!("Initial clocks: Node1={}, Node2={}", node1_initial_seq, node2_initial_seq);
// Run both apps for a few seconds to see if clocks stabilize
// If there's a feedback loop, clocks will keep incrementing rapidly
let start = Instant::now();
while start.elapsed() < Duration::from_secs(2) {
app1.update();
app2.update();
tokio::time::sleep(Duration::from_millis(16)).await;
}
// Check final clock sequences
let node1_final_seq = {
let clock1 = app1.world().resource::<libmarathon::networking::NodeVectorClock>();
clock1.sequence()
};
let node2_final_seq = {
let clock2 = app2.world().resource::<libmarathon::networking::NodeVectorClock>();
clock2.sequence()
};
println!("Final clocks: Node1={}, Node2={}", node1_final_seq, node2_final_seq);
// Calculate clock growth
let node1_growth = node1_final_seq - node1_initial_seq;
let node2_growth = node2_final_seq - node2_initial_seq;
println!("Clock growth: Node1=+{}, Node2=+{}", node1_growth, node2_growth);
// With feedback loop: clocks would grow by 100s (every frame generates delta)
// Without feedback loop: clocks should grow by 0-5 (only periodic sync)
assert!(
node1_growth < 10,
"Node1 clock grew too much ({}) - indicates feedback loop",
node1_growth
);
assert!(
node2_growth < 10,
"Node2 clock grew too much ({}) - indicates feedback loop",
node2_growth
);
println!("✓ No runaway vector clock - feedback loop prevented");
// Cleanup
router1.shutdown().await?;
router2.shutdown().await?;
ep1.close().await;
ep2.close().await;
println!("✓ Remote delta feedback loop prevention test passed");
Ok(())
}
/// Test: Local change after remote delta gets broadcast
/// Verifies that making a local change after receiving a remote delta
/// still results in broadcasting the local change (with one frame delay)
#[tokio::test(flavor = "multi_thread")]
async fn test_local_change_after_remote_delta() -> Result<()> {
println!("=== Starting test_local_change_after_remote_delta ===");
let ctx1 = TestContext::new();
let ctx2 = TestContext::new();
let (ep1, ep2, router1, router2, bridge1, bridge2) = setup_gossip_pair().await?;
let node1_id = bridge1.node_id();
let node2_id = bridge2.node_id();
let mut app1 = create_test_app(node1_id, ctx1.db_path(), bridge1);
let mut app2 = create_test_app(node2_id, ctx2.db_path(), bridge2);
println!("Node1 ID: {}", node1_id);
println!("Node2 ID: {}", node2_id);
// Initialize both apps
app1.update();
app2.update();
// Connect both to the same session
use libmarathon::networking::SessionId;
let session_id = SessionId::new();
{
let mut session1 = app1.world_mut().resource_mut::<CurrentSession>();
session1.session.id = session_id.clone();
session1.transition_to(SessionState::Active);
}
{
let mut session2 = app2.world_mut().resource_mut::<CurrentSession>();
session2.session.id = session_id.clone();
session2.transition_to(SessionState::Active);
}
// Node 1: Spawn a cube at position (1, 2, 3)
let cube_id = Uuid::new_v4();
{
app1.world_mut().spawn((
NetworkedEntity::with_id(cube_id, node1_id),
Persisted::with_id(cube_id),
Synced,
Transform::from_xyz(1.0, 2.0, 3.0),
));
}
println!("Node1: Spawned cube {} at (1, 2, 3)", cube_id);
// Wait for sync
wait_for_sync(&mut app1, &mut app2, Duration::from_secs(5), |_w1, w2| {
let entity_map = w2.resource::<NetworkEntityMap>();
entity_map.get_entity(cube_id).is_some()
})
.await?;
println!("✓ Node2 received cube");
// Node 2: Make a local change (move cube to 10, 20, 30)
{
let entity_map = app2.world().resource::<NetworkEntityMap>();
let entity = entity_map.get_entity(cube_id).expect("Cube should exist on Node2");
if let Ok(mut entity_mut) = app2.world_mut().get_entity_mut(entity) {
if let Some(mut transform) = entity_mut.get_mut::<Transform>() {
transform.translation = Vec3::new(10.0, 20.0, 30.0);
println!("Node2: Moved cube to (10, 20, 30)");
}
}
}
// Wait for Node1 to receive the update from Node2
wait_for_sync(&mut app1, &mut app2, Duration::from_secs(5), |w1, _w2| {
let entity_map = w1.resource::<NetworkEntityMap>();
if let Some(entity) = entity_map.get_entity(cube_id) {
if let Ok(entity_ref) = w1.get_entity(entity) {
if let Some(transform) = entity_ref.get::<Transform>() {
// Check if position is close to (10, 20, 30)
let pos = transform.translation;
(pos.x - 10.0).abs() < 0.1 &&
(pos.y - 20.0).abs() < 0.1 &&
(pos.z - 30.0).abs() < 0.1
} else {
false
}
} else {
false
}
} else {
false
}
})
.await?;
println!("✓ Node1 received local change from Node2");
// Verify final position on Node1
{
let entity_map = app1.world().resource::<NetworkEntityMap>();
let entity = entity_map.get_entity(cube_id).expect("Cube should exist on Node1");
if let Ok(entity_ref) = app1.world().get_entity(entity) {
if let Some(transform) = entity_ref.get::<Transform>() {
let pos = transform.translation;
println!("Node1 final position: ({}, {}, {})", pos.x, pos.y, pos.z);
assert!(
(pos.x - 10.0).abs() < 0.1,
"X position should be ~10.0, got {}",
pos.x
);
assert!(
(pos.y - 20.0).abs() < 0.1,
"Y position should be ~20.0, got {}",
pos.y
);
assert!(
(pos.z - 30.0).abs() < 0.1,
"Z position should be ~30.0, got {}",
pos.z
);
}
}
}
println!("✓ Local change after remote delta was successfully broadcast");
// Cleanup
router1.shutdown().await?;
router2.shutdown().await?;
ep1.close().await;
ep2.close().await;
println!("✓ Local change after remote delta test passed");
Ok(())
}

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,8 @@
//! These tests validate end-to-end CRDT synchronization and persistence
//! using multiple headless Bevy apps with real iroh-gossip networking.
mod test_utils;
use std::{
path::PathBuf,
time::{
@@ -12,6 +14,7 @@ use std::{
};
use anyhow::Result;
use test_utils::{setup_gossip_pair, TestContext, wait_for_sync};
use bevy::{
MinimalPlugins,
app::{
@@ -85,37 +88,15 @@ struct TestHealth {
}
// ============================================================================
// Test Utilities
// Test-Specific Utilities
// ============================================================================
// Common utilities (TestContext, wait_for_sync, gossip setup) are in shared test_utils
// These are specific to this test file (DB checks, TestPosition assertions)
mod test_utils {
use rusqlite::Connection;
use rusqlite::Connection;
use super::*;
/// Test context that manages temporary directories with RAII cleanup
pub struct TestContext {
_temp_dir: TempDir,
db_path: PathBuf,
}
impl TestContext {
pub fn new() -> Self {
let temp_dir = TempDir::new().expect("Failed to create temp directory");
let db_path = temp_dir.path().join("test.db");
Self {
_temp_dir: temp_dir,
db_path,
}
}
pub fn db_path(&self) -> PathBuf {
self.db_path.clone()
}
}
/// Check if an entity exists in the database
pub fn entity_exists_in_db(db_path: &PathBuf, entity_id: Uuid) -> Result<bool> {
/// Check if an entity exists in the database
fn entity_exists_in_db(db_path: &PathBuf, entity_id: Uuid) -> Result<bool> {
let conn = Connection::open(db_path)?;
let entity_id_bytes = entity_id.as_bytes();
@@ -128,374 +109,97 @@ mod test_utils {
Ok(exists)
}
/// Check if a component exists for an entity in the database
pub fn component_exists_in_db(
db_path: &PathBuf,
entity_id: Uuid,
component_type: &str,
) -> Result<bool> {
let conn = Connection::open(db_path)?;
let entity_id_bytes = entity_id.as_bytes();
/// Check if a component exists for an entity in the database
fn component_exists_in_db(
db_path: &PathBuf,
entity_id: Uuid,
component_type: &str,
) -> Result<bool> {
let conn = Connection::open(db_path)?;
let entity_id_bytes = entity_id.as_bytes();
let exists: bool = conn.query_row(
"SELECT COUNT(*) > 0 FROM components WHERE entity_id = ?1 AND component_type = ?2",
rusqlite::params![entity_id_bytes.as_slice(), component_type],
|row| row.get(0),
)?;
let exists: bool = conn.query_row(
"SELECT COUNT(*) > 0 FROM components WHERE entity_id = ?1 AND component_type = ?2",
rusqlite::params![entity_id_bytes.as_slice(), component_type],
|row| row.get(0),
)?;
Ok(exists)
}
Ok(exists)
}
/// Load a component from the database and deserialize it
/// TODO: Rewrite to use ComponentTypeRegistry instead of reflection
#[allow(dead_code)]
pub fn load_component_from_db<T: Component + Clone>(
_db_path: &PathBuf,
_entity_id: Uuid,
_component_type: &str,
) -> Result<Option<T>> {
// This function needs to be rewritten to use ComponentTypeRegistry
// For now, return None to allow tests to compile
Ok(None)
}
/// Load a component from the database and deserialize it
/// TODO: Rewrite to use ComponentTypeRegistry instead of reflection
#[allow(dead_code)]
fn load_component_from_db<T: Component + Clone>(
_db_path: &PathBuf,
_entity_id: Uuid,
_component_type: &str,
) -> Result<Option<T>> {
// This function needs to be rewritten to use ComponentTypeRegistry
// For now, return None to allow tests to compile
Ok(None)
}
/// Create a headless Bevy app configured for testing
pub fn create_test_app(node_id: Uuid, db_path: PathBuf, bridge: GossipBridge) -> App {
let mut app = App::new();
/// Create a test app with TestPosition and TestHealth registered
fn create_test_app(node_id: Uuid, db_path: PathBuf, bridge: GossipBridge) -> App {
let mut app = test_utils::create_test_app(node_id, db_path, bridge);
app.add_plugins(MinimalPlugins.set(ScheduleRunnerPlugin::run_loop(
Duration::from_secs_f64(1.0 / 60.0),
)))
.insert_resource(bridge)
.add_plugins(NetworkingPlugin::new(NetworkingConfig {
node_id,
sync_interval_secs: 0.5, // Fast for testing
prune_interval_secs: 10.0,
tombstone_gc_interval_secs: 30.0,
}))
.add_plugins(PersistencePlugin::with_config(
db_path,
PersistenceConfig {
flush_interval_secs: 1,
checkpoint_interval_secs: 5,
battery_adaptive: false,
..Default::default()
},
));
// Register test-specific component types
app.register_type::<TestPosition>()
.register_type::<TestHealth>();
// Register test component types for reflection
app.register_type::<TestPosition>()
.register_type::<TestHealth>();
app
}
app
}
/// Assert that an entity with specific network ID and position exists
fn assert_entity_synced(
world: &mut World,
network_id: Uuid,
expected_position: TestPosition,
) -> Result<()> {
let mut query = world.query::<(&NetworkedEntity, &TestPosition)>();
/// Count entities with a specific network ID
pub fn count_entities_with_id(world: &mut World, network_id: Uuid) -> usize {
let mut query = world.query::<&NetworkedEntity>();
query
.iter(world)
.filter(|entity| entity.network_id == network_id)
.count()
}
/// Assert that an entity with specific network ID and position exists
pub fn assert_entity_synced(
world: &mut World,
network_id: Uuid,
expected_position: TestPosition,
) -> Result<()> {
let mut query = world.query::<(&NetworkedEntity, &TestPosition)>();
for (entity, position) in query.iter(world) {
if entity.network_id == network_id {
if position == &expected_position {
return Ok(());
} else {
anyhow::bail!(
"Position mismatch for entity {}: expected {:?}, got {:?}",
network_id,
expected_position,
position
);
}
}
}
anyhow::bail!("Entity {} not found in world", network_id)
}
/// Wait for sync condition to be met, polling both apps
pub async fn wait_for_sync<F>(
app1: &mut App,
app2: &mut App,
timeout: Duration,
check_fn: F,
) -> Result<()>
where
F: Fn(&mut World, &mut World) -> bool, {
let start = Instant::now();
let mut tick_count = 0;
while start.elapsed() < timeout {
// Tick both apps
app1.update();
app2.update();
tick_count += 1;
if tick_count % 50 == 0 {
println!(
"Waiting for sync... tick {} ({:.1}s elapsed)",
tick_count,
start.elapsed().as_secs_f32()
);
}
// Check condition
if check_fn(app1.world_mut(), app2.world_mut()) {
println!(
"Sync completed after {} ticks ({:.3}s)",
tick_count,
start.elapsed().as_secs_f32()
);
for (entity, position) in query.iter(world) {
if entity.network_id == network_id {
if position == &expected_position {
return Ok(());
}
// Small delay to avoid spinning
tokio::time::sleep(Duration::from_millis(16)).await;
}
println!("Sync timeout after {} ticks", tick_count);
anyhow::bail!("Sync timeout after {:?}. Condition not met.", timeout)
}
/// Initialize a single iroh-gossip node
async fn init_gossip_node(
topic_id: TopicId,
bootstrap_addrs: Vec<iroh::EndpointAddr>,
) -> Result<(Endpoint, Gossip, Router, GossipBridge)> {
println!(" Creating endpoint (localhost only for fast testing)...");
// Create the Iroh endpoint bound to localhost only (no mDNS needed)
let endpoint = Endpoint::builder()
.bind_addr_v4(std::net::SocketAddrV4::new(std::net::Ipv4Addr::LOCALHOST, 0))
.bind()
.await?;
let endpoint_id = endpoint.addr().id;
println!(" Endpoint created: {}", endpoint_id);
// Convert 32-byte endpoint ID to 16-byte UUID by taking first 16 bytes
let id_bytes = endpoint_id.as_bytes();
let mut uuid_bytes = [0u8; 16];
uuid_bytes.copy_from_slice(&id_bytes[..16]);
let node_id = Uuid::from_bytes(uuid_bytes);
println!(" Spawning gossip protocol...");
// Build the gossip protocol
let gossip = Gossip::builder().spawn(endpoint.clone());
println!(" Setting up router...");
// Setup the router to handle incoming connections
let router = Router::builder(endpoint.clone())
.accept(iroh_gossip::ALPN, gossip.clone())
.spawn();
// Add bootstrap peers using StaticProvider for direct localhost connections
let bootstrap_count = bootstrap_addrs.len();
let has_bootstrap_peers = !bootstrap_addrs.is_empty();
// Collect bootstrap IDs before moving the addresses
let bootstrap_ids: Vec<_> = bootstrap_addrs.iter().map(|a| a.id).collect();
if has_bootstrap_peers {
let static_provider = iroh::discovery::static_provider::StaticProvider::default();
for addr in &bootstrap_addrs {
static_provider.add_endpoint_info(addr.clone());
}
endpoint.discovery().add(static_provider);
println!(" Added {} bootstrap peers to discovery", bootstrap_count);
// Connect to bootstrap peers (localhost connections are instant)
for addr in &bootstrap_addrs {
match endpoint.connect(addr.clone(), iroh_gossip::ALPN).await {
| Ok(_conn) => println!(" ✓ Connected to {}", addr.id),
| Err(e) => println!(" ✗ Connection failed: {}", e),
}
} else {
anyhow::bail!(
"Position mismatch for entity {}: expected {:?}, got {:?}",
network_id,
expected_position,
position
);
}
}
// Subscribe to the topic
let subscribe_handle = gossip.subscribe(topic_id, bootstrap_ids).await?;
let (sender, mut receiver) = subscribe_handle.split();
// Wait for join if we have bootstrap peers (should be instant on localhost)
if has_bootstrap_peers {
match tokio::time::timeout(Duration::from_millis(500), receiver.joined()).await {
| Ok(Ok(())) => println!(" ✓ Join completed"),
| Ok(Err(e)) => println!(" ✗ Join error: {}", e),
| Err(_) => println!(" ⚠ Join timeout (proceeding anyway)"),
}
}
// Create bridge and wire it up
let bridge = GossipBridge::new(node_id);
println!(" Spawning bridge tasks...");
// Spawn background tasks to forward messages between gossip and bridge
spawn_gossip_bridge_tasks(sender, receiver, bridge.clone());
println!(" Node initialization complete");
Ok((endpoint, gossip, router, bridge))
}
/// Setup a pair of iroh-gossip nodes connected to the same topic
pub async fn setup_gossip_pair() -> Result<(
Endpoint,
Endpoint,
Router,
Router,
GossipBridge,
GossipBridge,
)> {
// Use a shared topic for both nodes
let topic_id = TopicId::from_bytes([42; 32]);
println!("Using topic ID: {:?}", topic_id);
// Initialize node 1 with no bootstrap peers
println!("Initializing node 1...");
let (ep1, _gossip1, router1, bridge1) = init_gossip_node(topic_id, vec![]).await?;
println!("Node 1 initialized with ID: {}", ep1.addr().id);
// Get node 1's full address (ID + network addresses) for node 2 to bootstrap
// from
let node1_addr = ep1.addr().clone();
println!("Node 1 full address: {:?}", node1_addr);
// Initialize node 2 with node 1's full address as bootstrap peer
println!("Initializing node 2 with bootstrap peer: {}", node1_addr.id);
let (ep2, _gossip2, router2, bridge2) =
init_gossip_node(topic_id, vec![node1_addr]).await?;
println!("Node 2 initialized with ID: {}", ep2.addr().id);
// Brief wait for gossip protocol to stabilize (localhost is fast)
tokio::time::sleep(Duration::from_millis(200)).await;
Ok((ep1, ep2, router1, router2, bridge1, bridge2))
}
/// Spawn background tasks to forward messages between iroh-gossip and
/// GossipBridge
fn spawn_gossip_bridge_tasks(
sender: GossipSender,
mut receiver: GossipReceiver,
bridge: GossipBridge,
) {
let node_id = bridge.node_id();
// Task 1: Forward from bridge.outgoing → gossip sender
let bridge_out = bridge.clone();
tokio::spawn(async move {
let mut msg_count = 0;
loop {
// Poll the bridge's outgoing queue
if let Some(versioned_msg) = bridge_out.try_recv_outgoing() {
msg_count += 1;
println!(
"[Node {}] Sending message #{} via gossip",
node_id, msg_count
);
// Serialize the message
match rkyv::to_bytes::<rkyv::rancor::Failure>(&versioned_msg).map(|b| b.to_vec()) {
| Ok(bytes) => {
// Broadcast via gossip
if let Err(e) = sender.broadcast(bytes.into()).await {
eprintln!("[Node {}] Failed to broadcast message: {}", node_id, e);
} else {
println!(
"[Node {}] Message #{} broadcasted successfully",
node_id, msg_count
);
}
},
| Err(e) => eprintln!(
"[Node {}] Failed to serialize message for broadcast: {}",
node_id, e
),
}
}
// Small delay to avoid spinning
tokio::time::sleep(Duration::from_millis(10)).await;
}
});
// Task 2: Forward from gossip receiver → bridge.incoming
let bridge_in = bridge.clone();
tokio::spawn(async move {
let mut msg_count = 0;
println!("[Node {}] Gossip receiver task started", node_id);
loop {
// Receive from gossip (GossipReceiver is a Stream)
match tokio::time::timeout(Duration::from_millis(100), receiver.next()).await {
| Ok(Some(Ok(event))) => {
println!(
"[Node {}] Received gossip event: {:?}",
node_id,
std::mem::discriminant(&event)
);
if let iroh_gossip::api::Event::Received(msg) = event {
msg_count += 1;
println!(
"[Node {}] Received message #{} from gossip",
node_id, msg_count
);
// Deserialize the message
match rkyv::from_bytes::<VersionedMessage, rkyv::rancor::Failure>(&msg.content) {
| Ok(versioned_msg) => {
// Push to bridge's incoming queue
if let Err(e) = bridge_in.push_incoming(versioned_msg) {
eprintln!(
"[Node {}] Failed to push to bridge incoming: {}",
node_id, e
);
} else {
println!(
"[Node {}] Message #{} pushed to bridge incoming",
node_id, msg_count
);
}
},
| Err(e) => eprintln!(
"[Node {}] Failed to deserialize gossip message: {}",
node_id, e
),
}
}
},
| Ok(Some(Err(e))) => {
eprintln!("[Node {}] Gossip receiver error: {}", node_id, e)
},
| Ok(None) => {
// Stream ended
println!("[Node {}] Gossip stream ended", node_id);
break;
},
| Err(_) => {
// Timeout, no message available
},
}
}
});
}
anyhow::bail!("Entity {} not found in world", network_id)
}
// ============================================================================
// Integration Tests
// ============================================================================
/// Test 1: Basic entity sync (Node A spawns → Node B receives)
use test_utils::count_entities_with_id;
#[tokio::test(flavor = "multi_thread")]
async fn test_basic_entity_sync() -> Result<()> {
use test_utils::*;
println!("=== Starting test_basic_entity_sync ===");
let ctx1 = TestContext::new();
let ctx2 = TestContext::new();
let (ep1, ep2, router1, router2, bridge1, bridge2) = setup_gossip_pair().await?;
let node1_id = bridge1.node_id();
let node2_id = bridge2.node_id();
let mut app1 = create_test_app(node1_id, ctx1.db_path(), bridge1);
let mut app2 = create_test_app(node2_id, ctx2.db_path(), bridge2);
println!("Node1 ID: {}", node1_id);
println!("Node2 ID: {}", node2_id);
println!("=== Starting test_basic_entity_sync ===");

View File

@@ -0,0 +1,315 @@
//! Shared iroh-gossip setup utilities for integration tests
//!
//! This module provides real iroh-gossip networking infrastructure that all
//! integration tests should use. No shortcuts - always use real localhost connections.
use anyhow::Result;
use futures_lite::StreamExt;
use iroh::{
Endpoint,
discovery::static_provider::StaticProvider,
protocol::Router,
};
use iroh_gossip::{
api::{GossipReceiver, GossipSender},
net::Gossip,
proto::TopicId,
};
use libmarathon::networking::{GossipBridge, VersionedMessage};
use std::time::Duration;
use uuid::Uuid;
/// Initialize a single iroh-gossip node
///
/// Creates a real iroh endpoint bound to localhost, spawns the gossip protocol,
/// sets up routing, and optionally connects to bootstrap peers.
///
/// # Arguments
/// * `topic_id` - The gossip topic to subscribe to
/// * `bootstrap_addrs` - Optional bootstrap peers to connect to
///
/// # Returns
/// * Endpoint - The iroh endpoint for this node
/// * Gossip - The gossip protocol handler
/// * Router - The router handling incoming connections
/// * GossipBridge - The bridge for Bevy ECS integration
pub async fn init_gossip_node(
topic_id: TopicId,
bootstrap_addrs: Vec<iroh::EndpointAddr>,
) -> Result<(Endpoint, Gossip, Router, GossipBridge)> {
println!(" Creating endpoint (localhost only for fast testing)...");
// Create the Iroh endpoint bound to localhost only (no mDNS needed)
let endpoint = Endpoint::builder()
.bind_addr_v4(std::net::SocketAddrV4::new(std::net::Ipv4Addr::LOCALHOST, 0))
.bind()
.await?;
let endpoint_id = endpoint.addr().id;
println!(" Endpoint created: {}", endpoint_id);
// Convert 32-byte endpoint ID to 16-byte UUID by taking first 16 bytes
let id_bytes = endpoint_id.as_bytes();
let mut uuid_bytes = [0u8; 16];
uuid_bytes.copy_from_slice(&id_bytes[..16]);
let node_id = Uuid::from_bytes(uuid_bytes);
println!(" Spawning gossip protocol...");
// Build the gossip protocol
let gossip = Gossip::builder().spawn(endpoint.clone());
println!(" Setting up router...");
// Setup the router to handle incoming connections
let router = Router::builder(endpoint.clone())
.accept(iroh_gossip::ALPN, gossip.clone())
.spawn();
// Add bootstrap peers using StaticProvider for direct localhost connections
let bootstrap_count = bootstrap_addrs.len();
let has_bootstrap_peers = !bootstrap_addrs.is_empty();
// Collect bootstrap IDs before moving the addresses
let bootstrap_ids: Vec<_> = bootstrap_addrs.iter().map(|a| a.id).collect();
if has_bootstrap_peers {
let static_provider = StaticProvider::default();
for addr in &bootstrap_addrs {
static_provider.add_endpoint_info(addr.clone());
}
endpoint.discovery().add(static_provider);
println!(" Added {} bootstrap peers to discovery", bootstrap_count);
// Connect to bootstrap peers (localhost connections are instant)
for addr in &bootstrap_addrs {
match endpoint.connect(addr.clone(), iroh_gossip::ALPN).await {
Ok(_conn) => println!(" ✓ Connected to {}", addr.id),
Err(e) => println!(" ✗ Connection failed: {}", e),
}
}
}
// Subscribe to the topic
let subscribe_handle = gossip.subscribe(topic_id, bootstrap_ids).await?;
let (sender, mut receiver) = subscribe_handle.split();
// Wait for join if we have bootstrap peers (should be instant on localhost)
if has_bootstrap_peers {
match tokio::time::timeout(Duration::from_millis(500), receiver.joined()).await {
Ok(Ok(())) => println!(" ✓ Join completed"),
Ok(Err(e)) => println!(" ✗ Join error: {}", e),
Err(_) => println!(" ⚠ Join timeout (proceeding anyway)"),
}
}
// Create bridge and wire it up
let bridge = GossipBridge::new(node_id);
println!(" Spawning bridge tasks...");
// Spawn background tasks to forward messages between gossip and bridge
spawn_gossip_bridge_tasks(sender, receiver, bridge.clone());
println!(" Node initialization complete");
Ok((endpoint, gossip, router, bridge))
}
/// Spawn background tasks to forward messages between iroh-gossip and GossipBridge
///
/// This creates two tokio tasks:
/// 1. Forward from bridge.outgoing → gossip sender (broadcasts to peers)
/// 2. Forward from gossip receiver → bridge.incoming (receives from peers)
///
/// These tasks run indefinitely and handle serialization/deserialization.
pub fn spawn_gossip_bridge_tasks(
sender: GossipSender,
mut receiver: GossipReceiver,
bridge: GossipBridge,
) {
let node_id = bridge.node_id();
// Task 1: Forward from bridge.outgoing → gossip sender
let bridge_out = bridge.clone();
tokio::spawn(async move {
let mut msg_count = 0;
loop {
// Poll the bridge's outgoing queue
if let Some(versioned_msg) = bridge_out.try_recv_outgoing() {
msg_count += 1;
println!(
"[Node {}] Sending message #{} via gossip",
node_id, msg_count
);
// Serialize the message
match rkyv::to_bytes::<rkyv::rancor::Failure>(&versioned_msg).map(|b| b.to_vec()) {
Ok(bytes) => {
// Broadcast via gossip
if let Err(e) = sender.broadcast(bytes.into()).await {
eprintln!("[Node {}] Failed to broadcast message: {}", node_id, e);
} else {
println!(
"[Node {}] Message #{} broadcasted successfully",
node_id, msg_count
);
}
}
Err(e) => eprintln!(
"[Node {}] Failed to serialize message for broadcast: {}",
node_id, e
),
}
}
// Small delay to avoid spinning
tokio::time::sleep(Duration::from_millis(10)).await;
}
});
// Task 2: Forward from gossip receiver → bridge.incoming
let bridge_in = bridge.clone();
tokio::spawn(async move {
let mut msg_count = 0;
println!("[Node {}] Gossip receiver task started", node_id);
loop {
// Receive from gossip (GossipReceiver is a Stream)
match tokio::time::timeout(Duration::from_millis(100), receiver.next()).await {
Ok(Some(Ok(event))) => {
println!(
"[Node {}] Received gossip event: {:?}",
node_id,
std::mem::discriminant(&event)
);
if let iroh_gossip::api::Event::Received(msg) = event {
msg_count += 1;
println!(
"[Node {}] Received message #{} from gossip",
node_id, msg_count
);
// Deserialize the message
match rkyv::from_bytes::<VersionedMessage, rkyv::rancor::Failure>(&msg.content) {
Ok(versioned_msg) => {
// Push to bridge's incoming queue
if let Err(e) = bridge_in.push_incoming(versioned_msg) {
eprintln!(
"[Node {}] Failed to push to bridge incoming: {}",
node_id, e
);
} else {
println!(
"[Node {}] Message #{} pushed to bridge incoming",
node_id, msg_count
);
}
}
Err(e) => eprintln!(
"[Node {}] Failed to deserialize gossip message: {}",
node_id, e
),
}
}
}
Ok(Some(Err(e))) => {
eprintln!("[Node {}] Gossip receiver error: {}", node_id, e)
}
Ok(None) => {
// Stream ended
println!("[Node {}] Gossip stream ended", node_id);
break;
}
Err(_) => {
// Timeout, no message available
}
}
}
});
}
/// Setup a pair of iroh-gossip nodes connected to the same topic
///
/// This creates two nodes:
/// - Node 1: Initialized first with no bootstrap peers
/// - Node 2: Bootstraps from Node 1's address
///
/// Both nodes are subscribed to the same topic and connected via localhost.
///
/// # Returns
/// Tuple of (endpoint1, endpoint2, router1, router2, bridge1, bridge2)
pub async fn setup_gossip_pair() -> Result<(
Endpoint,
Endpoint,
Router,
Router,
GossipBridge,
GossipBridge,
)> {
// Use a shared topic for both nodes
let topic_id = TopicId::from_bytes([42; 32]);
println!("Using topic ID: {:?}", topic_id);
// Initialize node 1 with no bootstrap peers
println!("Initializing node 1...");
let (ep1, _gossip1, router1, bridge1) = init_gossip_node(topic_id, vec![]).await?;
println!("Node 1 initialized with ID: {}", ep1.addr().id);
// Get node 1's full address (ID + network addresses) for node 2 to bootstrap from
let node1_addr = ep1.addr().clone();
println!("Node 1 full address: {:?}", node1_addr);
// Initialize node 2 with node 1's full address as bootstrap peer
println!("Initializing node 2 with bootstrap peer: {}", node1_addr.id);
let (ep2, _gossip2, router2, bridge2) =
init_gossip_node(topic_id, vec![node1_addr]).await?;
println!("Node 2 initialized with ID: {}", ep2.addr().id);
// Brief wait for gossip protocol to stabilize (localhost is fast)
tokio::time::sleep(Duration::from_millis(200)).await;
Ok((ep1, ep2, router1, router2, bridge1, bridge2))
}
/// Setup three iroh-gossip nodes connected to the same topic
///
/// This creates three nodes:
/// - Node 1: Initialized first with no bootstrap peers
/// - Node 2: Bootstraps from Node 1
/// - Node 3: Bootstraps from both Node 1 and Node 2
///
/// All nodes are subscribed to the same topic and connected via localhost.
///
/// # Returns
/// Tuple of (ep1, ep2, ep3, router1, router2, router3, bridge1, bridge2, bridge3)
pub async fn setup_gossip_trio() -> Result<(
Endpoint,
Endpoint,
Endpoint,
Router,
Router,
Router,
GossipBridge,
GossipBridge,
GossipBridge,
)> {
let topic_id = TopicId::from_bytes([42; 32]);
println!("Using topic ID: {:?}", topic_id);
// Initialize node 1
println!("Initializing node 1...");
let (ep1, _gossip1, router1, bridge1) = init_gossip_node(topic_id, vec![]).await?;
println!("Node 1 initialized with ID: {}", ep1.addr().id);
let node1_addr = ep1.addr().clone();
// Initialize node 2 with node 1 as bootstrap
println!("Initializing node 2 with bootstrap peer: {}", node1_addr.id);
let (ep2, _gossip2, router2, bridge2) =
init_gossip_node(topic_id, vec![node1_addr.clone()]).await?;
println!("Node 2 initialized with ID: {}", ep2.addr().id);
// Initialize node 3 with both node 1 and node 2 as bootstrap
let node2_addr = ep2.addr().clone();
println!("Initializing node 3 with bootstrap peers: {} and {}", node1_addr.id, node2_addr.id);
let (ep3, _gossip3, router3, bridge3) =
init_gossip_node(topic_id, vec![node1_addr, node2_addr]).await?;
println!("Node 3 initialized with ID: {}", ep3.addr().id);
// Brief wait for gossip protocol to stabilize
tokio::time::sleep(Duration::from_millis(300)).await;
Ok((ep1, ep2, ep3, router1, router2, router3, bridge1, bridge2, bridge3))
}

View File

@@ -0,0 +1,144 @@
//! Shared test utilities for integration tests
//!
//! This module provides common test infrastructure that all integration tests use:
//! - Real iroh-gossip setup with localhost connections
//! - Test app creation with networking + persistence
//! - Wait helpers for async sync verification
pub mod gossip;
pub use gossip::{init_gossip_node, setup_gossip_pair, setup_gossip_trio, spawn_gossip_bridge_tasks};
use anyhow::Result;
use bevy::{
MinimalPlugins,
app::{App, ScheduleRunnerPlugin},
prelude::*,
};
use libmarathon::{
networking::{
GossipBridge,
NetworkingConfig,
NetworkingPlugin,
},
persistence::{
PersistenceConfig,
PersistencePlugin,
},
};
use std::{
path::PathBuf,
time::Duration,
};
use tempfile::TempDir;
use tokio::time::Instant;
use uuid::Uuid;
/// Test context that manages temporary directories with RAII cleanup
pub struct TestContext {
temp_dir: TempDir,
}
impl TestContext {
pub fn new() -> Self {
Self {
temp_dir: TempDir::new().expect("Failed to create temp directory"),
}
}
pub fn db_path(&self) -> PathBuf {
self.temp_dir.path().join("test.db")
}
}
/// Create a test app with networking and persistence
pub fn create_test_app(node_id: Uuid, db_path: PathBuf, bridge: GossipBridge) -> App {
create_test_app_maybe_offline(node_id, db_path, Some(bridge))
}
/// Create a test app with optional bridge (for testing offline scenarios)
pub fn create_test_app_maybe_offline(node_id: Uuid, db_path: PathBuf, bridge: Option<GossipBridge>) -> App {
let mut app = App::new();
app.add_plugins(MinimalPlugins.set(ScheduleRunnerPlugin::run_loop(
Duration::from_secs_f64(1.0 / 60.0),
)))
.add_plugins(NetworkingPlugin::new(NetworkingConfig {
node_id,
sync_interval_secs: 0.5,
prune_interval_secs: 10.0,
tombstone_gc_interval_secs: 30.0,
}))
.add_plugins(PersistencePlugin::with_config(
db_path,
PersistenceConfig {
flush_interval_secs: 1,
checkpoint_interval_secs: 5,
battery_adaptive: false,
..Default::default()
},
));
// Insert bridge if provided (online mode)
if let Some(bridge) = bridge {
app.insert_resource(bridge);
}
app
}
/// Wait for sync condition to be met, polling both apps
pub async fn wait_for_sync<F>(
app1: &mut App,
app2: &mut App,
timeout: Duration,
check_fn: F,
) -> Result<()>
where
F: Fn(&mut World, &mut World) -> bool,
{
let start = Instant::now();
let mut tick_count = 0;
while start.elapsed() < timeout {
// Tick both apps
app1.update();
app2.update();
tick_count += 1;
if tick_count % 50 == 0 {
println!(
"Waiting for sync... tick {} ({:.1}s elapsed)",
tick_count,
start.elapsed().as_secs_f32()
);
}
// Check condition
if check_fn(app1.world_mut(), app2.world_mut()) {
println!(
"Sync completed after {} ticks ({:.3}s)",
tick_count,
start.elapsed().as_secs_f32()
);
return Ok(());
}
// Small delay to avoid spinning
tokio::time::sleep(Duration::from_millis(16)).await;
}
println!("Sync timeout after {} ticks", tick_count);
anyhow::bail!("Sync timeout after {:?}. Condition not met.", timeout)
}
/// Count entities with a specific network_id
pub fn count_entities_with_id(world: &mut World, network_id: Uuid) -> usize {
use libmarathon::networking::NetworkedEntity;
let mut query = world.query::<&NetworkedEntity>();
query
.iter(world)
.filter(|ne| ne.network_id == network_id)
.count()
}