chore: honestly fixed so much and forgot to commit

Signed-off-by: Sienna Meridian Satterwhite <sienna@r3t.io>
This commit is contained in:
2025-12-28 17:39:27 +00:00
parent f9f289f5b2
commit d1d3aec8aa
47 changed files with 2248 additions and 438 deletions

View File

@@ -5,6 +5,33 @@ use libmarathon::networking::SessionId;
use std::time::Duration;
use tokio::time::timeout;
/// Get appropriate timeout for engine operations
/// - With fast_tests: short timeout (networking is mocked)
/// - Without fast_tests: long timeout (real networking with DHT discovery)
fn engine_timeout() -> Duration {
#[cfg(feature = "fast_tests")]
{
Duration::from_millis(200)
}
#[cfg(not(feature = "fast_tests"))]
{
Duration::from_secs(30)
}
}
/// Get appropriate wait time for command processing
fn processing_delay() -> Duration {
#[cfg(feature = "fast_tests")]
{
Duration::from_millis(50)
}
#[cfg(not(feature = "fast_tests"))]
{
// Real networking needs more time for initialization
Duration::from_secs(20)
}
}
/// Test that commands sent from "Bevy side" reach the engine
#[tokio::test]
async fn test_command_routing() {
@@ -14,7 +41,7 @@ async fn test_command_routing() {
let engine_handle = tokio::spawn(async move {
// Run engine for a short time
let core = EngineCore::new(handle, ":memory:");
timeout(Duration::from_millis(100), core.run())
timeout(engine_timeout(), core.run())
.await
.ok();
});
@@ -29,7 +56,7 @@ async fn test_command_routing() {
});
// Give engine time to process
tokio::time::sleep(Duration::from_millis(50)).await;
tokio::time::sleep(processing_delay()).await;
// Poll events
let events = bridge.poll_events();
@@ -65,7 +92,7 @@ async fn test_event_routing() {
// Spawn engine
let engine_handle = tokio::spawn(async move {
let core = EngineCore::new(handle, ":memory:");
timeout(Duration::from_millis(100), core.run())
timeout(engine_timeout(), core.run())
.await
.ok();
});
@@ -78,7 +105,7 @@ async fn test_event_routing() {
session_id: session_id.clone(),
});
tokio::time::sleep(Duration::from_millis(50)).await;
tokio::time::sleep(processing_delay()).await;
// Poll events multiple times to verify queue works
let events1 = bridge.poll_events();
@@ -102,7 +129,7 @@ async fn test_networking_lifecycle() {
let engine_handle = tokio::spawn(async move {
let core = EngineCore::new(handle, ":memory:");
timeout(Duration::from_millis(200), core.run())
timeout(engine_timeout(), core.run())
.await
.ok();
});
@@ -115,7 +142,7 @@ async fn test_networking_lifecycle() {
session_id: session_id.clone(),
});
tokio::time::sleep(Duration::from_millis(50)).await;
tokio::time::sleep(processing_delay()).await;
let events = bridge.poll_events();
assert!(
@@ -128,7 +155,7 @@ async fn test_networking_lifecycle() {
// Stop networking
bridge.send_command(EngineCommand::StopNetworking);
tokio::time::sleep(Duration::from_millis(50)).await;
tokio::time::sleep(processing_delay()).await;
let events = bridge.poll_events();
assert!(
@@ -150,7 +177,7 @@ async fn test_join_session_routing() {
let engine_handle = tokio::spawn(async move {
let core = EngineCore::new(handle, ":memory:");
timeout(Duration::from_millis(200), core.run())
timeout(engine_timeout(), core.run())
.await
.ok();
});
@@ -163,7 +190,7 @@ async fn test_join_session_routing() {
session_id: session_id.clone(),
});
tokio::time::sleep(Duration::from_millis(50)).await;
tokio::time::sleep(processing_delay()).await;
let events = bridge.poll_events();
assert!(
@@ -191,44 +218,85 @@ async fn test_command_ordering() {
let engine_handle = tokio::spawn(async move {
let core = EngineCore::new(handle, ":memory:");
timeout(Duration::from_millis(200), core.run())
timeout(engine_timeout(), core.run())
.await
.ok();
});
tokio::time::sleep(Duration::from_millis(10)).await;
// Send multiple commands
// Send first command and wait for it to complete
let session1 = SessionId::new();
let session2 = SessionId::new();
bridge.send_command(EngineCommand::StartNetworking {
session_id: session1.clone(),
});
// Wait for first networking to start
tokio::time::sleep(processing_delay()).await;
let events1 = bridge.poll_events();
assert!(
events1.iter().any(|e| matches!(e, EngineEvent::NetworkingStarted { .. })),
"Should receive first NetworkingStarted"
);
// Now send stop and start second session
let session2 = SessionId::new();
bridge.send_command(EngineCommand::StopNetworking);
bridge.send_command(EngineCommand::JoinSession {
session_id: session2.clone(),
});
tokio::time::sleep(Duration::from_millis(100)).await;
// Wait for second networking to start
tokio::time::sleep(processing_delay()).await;
let events = bridge.poll_events();
let events2 = bridge.poll_events();
// Should see: NetworkingStarted(session1), NetworkingStopped, NetworkingStarted(session2)
let started_events: Vec<_> = events
// Should see: NetworkingStopped, NetworkingStarted(session2)
let started_events: Vec<_> = events2
.iter()
.filter(|e| matches!(e, EngineEvent::NetworkingStarted { .. }))
.collect();
let stopped_events: Vec<_> = events
let stopped_events: Vec<_> = events2
.iter()
.filter(|e| matches!(e, EngineEvent::NetworkingStopped))
.collect();
assert_eq!(started_events.len(), 2, "Should have 2 NetworkingStarted events");
assert_eq!(started_events.len(), 1, "Should have 1 NetworkingStarted event in second batch");
assert_eq!(stopped_events.len(), 1, "Should have 1 NetworkingStopped event");
// Cleanup
drop(bridge);
let _ = engine_handle.await;
}
/// Test: Shutdown command causes EngineCore to exit gracefully
#[tokio::test]
async fn test_shutdown_command() {
let (bridge, handle) = EngineBridge::new();
let engine_handle = tokio::spawn(async move {
let core = EngineCore::new(handle, ":memory:");
core.run().await;
});
tokio::time::sleep(Duration::from_millis(10)).await;
// Send Shutdown command
bridge.send_command(EngineCommand::Shutdown);
// Wait for engine to exit (should be quick since it's just processing the command)
let result = timeout(Duration::from_millis(100), engine_handle).await;
assert!(
result.is_ok(),
"Engine should exit within 100ms after receiving Shutdown command"
);
// Verify that the engine actually exited (not errored)
assert!(
result.unwrap().is_ok(),
"Engine should exit cleanly without panic"
);
}