initial arhitectural overhaul
Signed-off-by: Sienna Meridian Satterwhite <sienna@r3t.io>
This commit is contained in:
234
crates/libmarathon/tests/bridge_integration.rs
Normal file
234
crates/libmarathon/tests/bridge_integration.rs
Normal file
@@ -0,0 +1,234 @@
|
||||
//! Integration tests for EngineBridge command/event routing
|
||||
|
||||
use libmarathon::engine::{EngineBridge, EngineCommand, EngineCore, EngineEvent};
|
||||
use libmarathon::networking::SessionId;
|
||||
use std::time::Duration;
|
||||
use tokio::time::timeout;
|
||||
|
||||
/// Test that commands sent from "Bevy side" reach the engine
|
||||
#[tokio::test]
|
||||
async fn test_command_routing() {
|
||||
let (bridge, handle) = EngineBridge::new();
|
||||
|
||||
// Spawn engine in background
|
||||
let engine_handle = tokio::spawn(async move {
|
||||
// Run engine for a short time
|
||||
let core = EngineCore::new(handle, ":memory:");
|
||||
timeout(Duration::from_millis(100), core.run())
|
||||
.await
|
||||
.ok();
|
||||
});
|
||||
|
||||
// Give engine time to start
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
|
||||
// Send a command from "Bevy side"
|
||||
let session_id = SessionId::new();
|
||||
bridge.send_command(EngineCommand::StartNetworking {
|
||||
session_id: session_id.clone(),
|
||||
});
|
||||
|
||||
// Give engine time to process
|
||||
tokio::time::sleep(Duration::from_millis(50)).await;
|
||||
|
||||
// Poll events
|
||||
let events = bridge.poll_events();
|
||||
|
||||
// Verify we got a NetworkingStarted event
|
||||
assert!(!events.is_empty(), "Should receive at least one event");
|
||||
|
||||
let has_networking_started = events.iter().any(|e| {
|
||||
matches!(
|
||||
e,
|
||||
EngineEvent::NetworkingStarted {
|
||||
session_id: sid,
|
||||
..
|
||||
} if sid == &session_id
|
||||
)
|
||||
});
|
||||
|
||||
assert!(
|
||||
has_networking_started,
|
||||
"Should receive NetworkingStarted event"
|
||||
);
|
||||
|
||||
// Cleanup
|
||||
drop(bridge);
|
||||
let _ = engine_handle.await;
|
||||
}
|
||||
|
||||
/// Test that events from engine reach "Bevy side"
|
||||
#[tokio::test]
|
||||
async fn test_event_routing() {
|
||||
let (bridge, handle) = EngineBridge::new();
|
||||
|
||||
// Spawn engine
|
||||
let engine_handle = tokio::spawn(async move {
|
||||
let core = EngineCore::new(handle, ":memory:");
|
||||
timeout(Duration::from_millis(100), core.run())
|
||||
.await
|
||||
.ok();
|
||||
});
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
|
||||
// Send StartNetworking command
|
||||
let session_id = SessionId::new();
|
||||
bridge.send_command(EngineCommand::StartNetworking {
|
||||
session_id: session_id.clone(),
|
||||
});
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(50)).await;
|
||||
|
||||
// Poll events multiple times to verify queue works
|
||||
let events1 = bridge.poll_events();
|
||||
let events2 = bridge.poll_events();
|
||||
|
||||
assert!(!events1.is_empty(), "First poll should return events");
|
||||
assert!(
|
||||
events2.is_empty(),
|
||||
"Second poll should be empty (events already drained)"
|
||||
);
|
||||
|
||||
// Cleanup
|
||||
drop(bridge);
|
||||
let _ = engine_handle.await;
|
||||
}
|
||||
|
||||
/// Test full lifecycle: Start → Stop networking
|
||||
#[tokio::test]
|
||||
async fn test_networking_lifecycle() {
|
||||
let (bridge, handle) = EngineBridge::new();
|
||||
|
||||
let engine_handle = tokio::spawn(async move {
|
||||
let core = EngineCore::new(handle, ":memory:");
|
||||
timeout(Duration::from_millis(200), core.run())
|
||||
.await
|
||||
.ok();
|
||||
});
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
|
||||
// Start networking
|
||||
let session_id = SessionId::new();
|
||||
bridge.send_command(EngineCommand::StartNetworking {
|
||||
session_id: session_id.clone(),
|
||||
});
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(50)).await;
|
||||
|
||||
let events = bridge.poll_events();
|
||||
assert!(
|
||||
events
|
||||
.iter()
|
||||
.any(|e| matches!(e, EngineEvent::NetworkingStarted { .. })),
|
||||
"Should receive NetworkingStarted"
|
||||
);
|
||||
|
||||
// Stop networking
|
||||
bridge.send_command(EngineCommand::StopNetworking);
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(50)).await;
|
||||
|
||||
let events = bridge.poll_events();
|
||||
assert!(
|
||||
events
|
||||
.iter()
|
||||
.any(|e| matches!(e, EngineEvent::NetworkingStopped)),
|
||||
"Should receive NetworkingStopped"
|
||||
);
|
||||
|
||||
// Cleanup
|
||||
drop(bridge);
|
||||
let _ = engine_handle.await;
|
||||
}
|
||||
|
||||
/// Test JoinSession command routing
|
||||
#[tokio::test]
|
||||
async fn test_join_session_routing() {
|
||||
let (bridge, handle) = EngineBridge::new();
|
||||
|
||||
let engine_handle = tokio::spawn(async move {
|
||||
let core = EngineCore::new(handle, ":memory:");
|
||||
timeout(Duration::from_millis(200), core.run())
|
||||
.await
|
||||
.ok();
|
||||
});
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
|
||||
// Join a new session (should start networking)
|
||||
let session_id = SessionId::new();
|
||||
bridge.send_command(EngineCommand::JoinSession {
|
||||
session_id: session_id.clone(),
|
||||
});
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(50)).await;
|
||||
|
||||
let events = bridge.poll_events();
|
||||
assert!(
|
||||
events.iter().any(|e| {
|
||||
matches!(
|
||||
e,
|
||||
EngineEvent::NetworkingStarted {
|
||||
session_id: sid,
|
||||
..
|
||||
} if sid == &session_id
|
||||
)
|
||||
}),
|
||||
"JoinSession should start networking"
|
||||
);
|
||||
|
||||
// Cleanup
|
||||
drop(bridge);
|
||||
let _ = engine_handle.await;
|
||||
}
|
||||
|
||||
/// Test that multiple commands are processed in order
|
||||
#[tokio::test]
|
||||
async fn test_command_ordering() {
|
||||
let (bridge, handle) = EngineBridge::new();
|
||||
|
||||
let engine_handle = tokio::spawn(async move {
|
||||
let core = EngineCore::new(handle, ":memory:");
|
||||
timeout(Duration::from_millis(200), core.run())
|
||||
.await
|
||||
.ok();
|
||||
});
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
|
||||
// Send multiple commands
|
||||
let session1 = SessionId::new();
|
||||
let session2 = SessionId::new();
|
||||
|
||||
bridge.send_command(EngineCommand::StartNetworking {
|
||||
session_id: session1.clone(),
|
||||
});
|
||||
bridge.send_command(EngineCommand::StopNetworking);
|
||||
bridge.send_command(EngineCommand::JoinSession {
|
||||
session_id: session2.clone(),
|
||||
});
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
let events = bridge.poll_events();
|
||||
|
||||
// Should see: NetworkingStarted(session1), NetworkingStopped, NetworkingStarted(session2)
|
||||
let started_events: Vec<_> = events
|
||||
.iter()
|
||||
.filter(|e| matches!(e, EngineEvent::NetworkingStarted { .. }))
|
||||
.collect();
|
||||
|
||||
let stopped_events: Vec<_> = events
|
||||
.iter()
|
||||
.filter(|e| matches!(e, EngineEvent::NetworkingStopped))
|
||||
.collect();
|
||||
|
||||
assert_eq!(started_events.len(), 2, "Should have 2 NetworkingStarted events");
|
||||
assert_eq!(stopped_events.len(), 1, "Should have 1 NetworkingStopped event");
|
||||
|
||||
// Cleanup
|
||||
drop(bridge);
|
||||
let _ = engine_handle.await;
|
||||
}
|
||||
48
crates/libmarathon/tests/networking_gossip_test.rs
Normal file
48
crates/libmarathon/tests/networking_gossip_test.rs
Normal file
@@ -0,0 +1,48 @@
|
||||
//! Integration test for gossip bridge
|
||||
//!
|
||||
//! Tests the gossip bridge channel infrastructure. Full iroh-gossip integration
|
||||
//! will be tested in Phase 3.5.
|
||||
|
||||
use libmarathon::networking::*;
|
||||
|
||||
#[test]
|
||||
fn test_gossip_bridge_creation() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let bridge = init_gossip_bridge(node_id);
|
||||
|
||||
assert_eq!(bridge.node_id(), node_id);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gossip_bridge_send() {
|
||||
use libmarathon::networking::{
|
||||
JoinType,
|
||||
SessionId,
|
||||
};
|
||||
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let bridge = init_gossip_bridge(node_id);
|
||||
let session_id = SessionId::new();
|
||||
|
||||
let message = SyncMessage::JoinRequest {
|
||||
node_id,
|
||||
session_id,
|
||||
session_secret: None,
|
||||
last_known_clock: None,
|
||||
join_type: JoinType::Fresh,
|
||||
};
|
||||
let versioned = VersionedMessage::new(message);
|
||||
|
||||
// Should not error when sending
|
||||
let result = bridge.send(versioned);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gossip_bridge_try_recv_empty() {
|
||||
let node_id = uuid::Uuid::new_v4();
|
||||
let bridge = init_gossip_bridge(node_id);
|
||||
|
||||
// Should return None when no messages available
|
||||
assert!(bridge.try_recv().is_none());
|
||||
}
|
||||
127
crates/libmarathon/tests/our_messages_test.rs
Normal file
127
crates/libmarathon/tests/our_messages_test.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
use chrono::Datelike;
|
||||
use libmarathon::{
|
||||
ChatDb,
|
||||
Result,
|
||||
};
|
||||
|
||||
/// Test that we can get messages from the Dutch phone number conversation
|
||||
#[test]
|
||||
fn test_get_our_messages_default_range() -> Result<()> {
|
||||
let db = ChatDb::open("chat.db")?;
|
||||
|
||||
// Get messages from January 2024 to now (default)
|
||||
let messages = db.get_our_messages(None, None)?;
|
||||
|
||||
println!("Found {} messages from January 2024 to now", messages.len());
|
||||
|
||||
// Verify we got some messages
|
||||
assert!(
|
||||
messages.len() > 0,
|
||||
"Should find messages in the conversation"
|
||||
);
|
||||
|
||||
// Verify messages are in chronological order (ASC)
|
||||
for i in 1..messages.len().min(10) {
|
||||
if let (Some(prev_date), Some(curr_date)) = (messages[i - 1].date, messages[i].date) {
|
||||
assert!(
|
||||
prev_date <= curr_date,
|
||||
"Messages should be in ascending date order"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Verify all messages are from 2024 or later
|
||||
for msg in messages.iter().take(10) {
|
||||
if let Some(date) = msg.date {
|
||||
assert!(date.year() >= 2024, "Messages should be from 2024 or later");
|
||||
println!(
|
||||
"Message date: {}, from_me: {}, text: {:?}",
|
||||
date,
|
||||
msg.is_from_me,
|
||||
msg.text.as_ref().map(|s| &s[..s.len().min(50)])
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test that we can get messages with a custom date range
|
||||
#[test]
|
||||
fn test_get_our_messages_custom_range() -> Result<()> {
|
||||
use chrono::{
|
||||
TimeZone,
|
||||
Utc,
|
||||
};
|
||||
|
||||
let db = ChatDb::open("chat.db")?;
|
||||
|
||||
// Get messages from March 2024 to June 2024
|
||||
let start = Utc.with_ymd_and_hms(2024, 3, 1, 0, 0, 0).unwrap();
|
||||
let end = Utc.with_ymd_and_hms(2024, 6, 1, 0, 0, 0).unwrap();
|
||||
|
||||
let messages = db.get_our_messages(Some(start), Some(end))?;
|
||||
|
||||
println!("Found {} messages from March to June 2024", messages.len());
|
||||
|
||||
// Verify all messages are within the date range
|
||||
for msg in &messages {
|
||||
if let Some(date) = msg.date {
|
||||
assert!(
|
||||
date >= start && date <= end,
|
||||
"Message date {} should be between {} and {}",
|
||||
date,
|
||||
start,
|
||||
end
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test displaying a summary of the conversation
|
||||
#[test]
|
||||
fn test_conversation_summary() -> Result<()> {
|
||||
let db = ChatDb::open("chat.db")?;
|
||||
|
||||
let messages = db.get_our_messages(None, None)?;
|
||||
|
||||
println!("\n=== Conversation Summary ===");
|
||||
println!("Total messages: {}", messages.len());
|
||||
|
||||
let from_me = messages.iter().filter(|m| m.is_from_me).count();
|
||||
let from_them = messages.len() - from_me;
|
||||
|
||||
println!("From me: {}", from_me);
|
||||
println!("From them: {}", from_them);
|
||||
|
||||
// Show first few messages
|
||||
println!("\nFirst 5 messages:");
|
||||
for (i, msg) in messages.iter().take(5).enumerate() {
|
||||
if let Some(date) = msg.date {
|
||||
let sender = if msg.is_from_me { "Me" } else { "Them" };
|
||||
let text = msg
|
||||
.text
|
||||
.as_ref()
|
||||
.map(|t| {
|
||||
if t.len() > 60 {
|
||||
format!("{}...", &t[..60])
|
||||
} else {
|
||||
t.clone()
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| "[No text]".to_string());
|
||||
|
||||
println!(
|
||||
"{}. {} ({}): {}",
|
||||
i + 1,
|
||||
date.format("%Y-%m-%d %H:%M"),
|
||||
sender,
|
||||
text
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
# Seeds for failure cases proptest has generated in the past. It is
|
||||
# automatically read and these particular cases re-run before any
|
||||
# novel cases are generated.
|
||||
#
|
||||
# It is recommended to check this file in to source control so that
|
||||
# everyone who runs the test benefits from these saved cases.
|
||||
cc f2e56c98051c9da4146af4236447b4b8572a5990b84ae6e64fd93be95fe029df # shrinks to value1 = -733535506, value2 = -37242108
|
||||
366
crates/libmarathon/tests/property_tests.rs
Normal file
366
crates/libmarathon/tests/property_tests.rs
Normal file
@@ -0,0 +1,366 @@
|
||||
//! Property-based tests for CRDT invariants
|
||||
//!
|
||||
//! This module uses proptest to verify that our CRDT implementations maintain
|
||||
//! their mathematical properties under all possible inputs and operation
|
||||
//! sequences.
|
||||
|
||||
use libmarathon::{
|
||||
networking::{
|
||||
NodeId,
|
||||
VectorClock,
|
||||
},
|
||||
persistence::{
|
||||
EntityId,
|
||||
PersistenceError,
|
||||
PersistenceOp,
|
||||
WriteBuffer,
|
||||
},
|
||||
sync::SyncedValue,
|
||||
};
|
||||
use proptest::prelude::*;
|
||||
|
||||
// ============================================================================
|
||||
// VectorClock Property Tests
|
||||
// ============================================================================
|
||||
|
||||
/// Generate arbitrary NodeId (UUID)
|
||||
fn arb_node_id() -> impl Strategy<Value = NodeId> {
|
||||
any::<[u8; 16]>().prop_map(|bytes| uuid::Uuid::from_bytes(bytes))
|
||||
}
|
||||
|
||||
/// Generate arbitrary VectorClock with 1-10 nodes
|
||||
fn arb_vector_clock() -> impl Strategy<Value = VectorClock> {
|
||||
prop::collection::vec((arb_node_id(), 0u64..100), 1..10).prop_map(|entries| {
|
||||
let mut clock = VectorClock::new();
|
||||
for (node_id, count) in entries {
|
||||
for _ in 0..count {
|
||||
clock.increment(node_id);
|
||||
}
|
||||
}
|
||||
clock
|
||||
})
|
||||
}
|
||||
|
||||
proptest! {
|
||||
/// Test: VectorClock merge is idempotent
|
||||
/// Property: merge(A, A) = A
|
||||
#[test]
|
||||
fn vector_clock_merge_idempotent(clock in arb_vector_clock()) {
|
||||
let mut merged = clock.clone();
|
||||
merged.merge(&clock);
|
||||
prop_assert_eq!(merged, clock);
|
||||
}
|
||||
|
||||
/// Test: VectorClock merge is commutative
|
||||
/// Property: merge(A, B) = merge(B, A)
|
||||
#[test]
|
||||
fn vector_clock_merge_commutative(
|
||||
clock_a in arb_vector_clock(),
|
||||
clock_b in arb_vector_clock()
|
||||
) {
|
||||
let mut result1 = clock_a.clone();
|
||||
result1.merge(&clock_b);
|
||||
|
||||
let mut result2 = clock_b.clone();
|
||||
result2.merge(&clock_a);
|
||||
|
||||
prop_assert_eq!(result1, result2);
|
||||
}
|
||||
|
||||
/// Test: VectorClock merge is associative
|
||||
/// Property: merge(merge(A, B), C) = merge(A, merge(B, C))
|
||||
#[test]
|
||||
fn vector_clock_merge_associative(
|
||||
clock_a in arb_vector_clock(),
|
||||
clock_b in arb_vector_clock(),
|
||||
clock_c in arb_vector_clock()
|
||||
) {
|
||||
// (A merge B) merge C
|
||||
let mut result1 = clock_a.clone();
|
||||
result1.merge(&clock_b);
|
||||
result1.merge(&clock_c);
|
||||
|
||||
// A merge (B merge C)
|
||||
let mut temp = clock_b.clone();
|
||||
temp.merge(&clock_c);
|
||||
let mut result2 = clock_a.clone();
|
||||
result2.merge(&temp);
|
||||
|
||||
prop_assert_eq!(result1, result2);
|
||||
}
|
||||
|
||||
/// Test: happened_before is transitive
|
||||
/// Property: If A < B and B < C, then A < C
|
||||
#[test]
|
||||
fn vector_clock_happened_before_transitive(node_id in arb_node_id()) {
|
||||
let mut clock_a = VectorClock::new();
|
||||
clock_a.increment(node_id);
|
||||
|
||||
let mut clock_b = clock_a.clone();
|
||||
clock_b.increment(node_id);
|
||||
|
||||
let mut clock_c = clock_b.clone();
|
||||
clock_c.increment(node_id);
|
||||
|
||||
prop_assert!(clock_a.happened_before(&clock_b));
|
||||
prop_assert!(clock_b.happened_before(&clock_c));
|
||||
prop_assert!(clock_a.happened_before(&clock_c)); // Transitivity
|
||||
}
|
||||
|
||||
/// Test: happened_before is antisymmetric
|
||||
/// Property: If A < B, then NOT (B < A)
|
||||
#[test]
|
||||
fn vector_clock_happened_before_antisymmetric(
|
||||
clock_a in arb_vector_clock(),
|
||||
clock_b in arb_vector_clock()
|
||||
) {
|
||||
if clock_a.happened_before(&clock_b) {
|
||||
prop_assert!(!clock_b.happened_before(&clock_a));
|
||||
}
|
||||
}
|
||||
|
||||
/// Test: A clock never happens before itself
|
||||
/// Property: NOT (A < A)
|
||||
#[test]
|
||||
fn vector_clock_not_happened_before_self(clock in arb_vector_clock()) {
|
||||
prop_assert!(!clock.happened_before(&clock));
|
||||
}
|
||||
|
||||
/// Test: Merge creates upper bound
|
||||
/// Property: If C = merge(A, B), then A ≤ C and B ≤ C
|
||||
#[test]
|
||||
fn vector_clock_merge_upper_bound(
|
||||
clock_a in arb_vector_clock(),
|
||||
clock_b in arb_vector_clock()
|
||||
) {
|
||||
let mut merged = clock_a.clone();
|
||||
merged.merge(&clock_b);
|
||||
|
||||
// A ≤ merged (either happened_before or equal)
|
||||
prop_assert!(clock_a.happened_before(&merged) || clock_a == merged);
|
||||
// B ≤ merged (either happened_before or equal)
|
||||
prop_assert!(clock_b.happened_before(&merged) || clock_b == merged);
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// WriteBuffer Property Tests
|
||||
// ============================================================================
|
||||
|
||||
/// Generate arbitrary EntityId (UUID)
|
||||
fn arb_entity_id() -> impl Strategy<Value = EntityId> {
|
||||
any::<[u8; 16]>().prop_map(|bytes| uuid::Uuid::from_bytes(bytes))
|
||||
}
|
||||
|
||||
/// Generate arbitrary component name
|
||||
fn arb_component_type() -> impl Strategy<Value = String> {
|
||||
prop::string::string_regex("[A-Z][a-zA-Z0-9]{0,20}").unwrap()
|
||||
}
|
||||
|
||||
/// Generate arbitrary component data (small to avoid size limits)
|
||||
fn arb_component_data() -> impl Strategy<Value = Vec<u8>> {
|
||||
prop::collection::vec(any::<u8>(), 0..1000)
|
||||
}
|
||||
|
||||
proptest! {
|
||||
/// Test: WriteBuffer index consistency after multiple operations
|
||||
/// Property: Index always points to valid operations in the buffer
|
||||
#[test]
|
||||
fn write_buffer_index_consistency(
|
||||
operations in prop::collection::vec(
|
||||
(arb_entity_id(), arb_component_type(), arb_component_data()),
|
||||
1..50
|
||||
)
|
||||
) {
|
||||
let mut buffer = WriteBuffer::new(1000);
|
||||
|
||||
for (entity_id, component_type, data) in operations {
|
||||
let op = PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type,
|
||||
data,
|
||||
};
|
||||
|
||||
// Should never fail for valid data
|
||||
let result = buffer.add(op);
|
||||
prop_assert!(result.is_ok());
|
||||
}
|
||||
|
||||
// After all operations, buffer length should be valid
|
||||
prop_assert!(buffer.len() <= 1000);
|
||||
}
|
||||
|
||||
/// Test: WriteBuffer deduplication correctness
|
||||
/// Property: Adding same (entity, component) twice keeps only latest
|
||||
#[test]
|
||||
fn write_buffer_deduplication(
|
||||
entity_id in arb_entity_id(),
|
||||
component_type in arb_component_type(),
|
||||
data1 in arb_component_data(),
|
||||
data2 in arb_component_data()
|
||||
) {
|
||||
let mut buffer = WriteBuffer::new(100);
|
||||
|
||||
// Add first version
|
||||
let op1 = PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type: component_type.clone(),
|
||||
data: data1.clone(),
|
||||
};
|
||||
prop_assert!(buffer.add(op1).is_ok());
|
||||
|
||||
// Add second version (should replace)
|
||||
let op2 = PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type: component_type.clone(),
|
||||
data: data2.clone(),
|
||||
};
|
||||
prop_assert!(buffer.add(op2).is_ok());
|
||||
|
||||
// Should only have 1 operation
|
||||
prop_assert_eq!(buffer.len(), 1);
|
||||
|
||||
// Should be the latest data
|
||||
let ops = buffer.take_operations();
|
||||
prop_assert_eq!(ops.len(), 1);
|
||||
if let PersistenceOp::UpsertComponent { data, .. } = &ops[0] {
|
||||
prop_assert_eq!(data, &data2);
|
||||
}
|
||||
}
|
||||
|
||||
/// Test: WriteBuffer respects size limits
|
||||
/// Property: Operations larger than MAX_SIZE are rejected
|
||||
///
|
||||
/// Note: This test uses a smaller sample size (5 cases) to avoid generating
|
||||
/// massive amounts of data. We only need to verify the size check works.
|
||||
#[test]
|
||||
fn write_buffer_respects_size_limits(
|
||||
entity_id in arb_entity_id(),
|
||||
component_type in arb_component_type(),
|
||||
) {
|
||||
let mut buffer = WriteBuffer::new(100);
|
||||
|
||||
// Create data that's definitely too large (11MB)
|
||||
// We don't need to randomize the content, just verify size check works
|
||||
let oversized_data = vec![0u8; 11_000_000];
|
||||
|
||||
let op = PersistenceOp::UpsertComponent {
|
||||
entity_id,
|
||||
component_type,
|
||||
data: oversized_data,
|
||||
};
|
||||
|
||||
let result = buffer.add(op);
|
||||
prop_assert!(result.is_err());
|
||||
|
||||
// Verify it's the right error type
|
||||
if let Err(PersistenceError::ComponentTooLarge { .. }) = result {
|
||||
// Expected
|
||||
} else {
|
||||
prop_assert!(false, "Expected ComponentTooLarge error");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// LWW (Last-Write-Wins) Property Tests
|
||||
// ============================================================================
|
||||
|
||||
proptest! {
|
||||
/// Test: LWW convergence
|
||||
/// Property: Two replicas applying same updates in different order converge
|
||||
#[test]
|
||||
fn lww_convergence(
|
||||
node1 in arb_node_id(),
|
||||
node2 in arb_node_id(),
|
||||
initial_value in any::<i32>(),
|
||||
value1 in any::<i32>(),
|
||||
value2 in any::<i32>(),
|
||||
) {
|
||||
// Create two replicas with same initial value
|
||||
let mut replica_a = SyncedValue::new(initial_value, node1);
|
||||
let mut replica_b = SyncedValue::new(initial_value, node1);
|
||||
|
||||
// Create two updates
|
||||
let ts1 = chrono::Utc::now();
|
||||
let ts2 = ts1 + chrono::Duration::milliseconds(100);
|
||||
|
||||
// Replica A applies updates in order: update1, update2
|
||||
replica_a.apply_lww(value1, ts1, node1);
|
||||
replica_a.apply_lww(value2, ts2, node2);
|
||||
|
||||
// Replica B applies updates in reverse: update2, update1
|
||||
replica_b.apply_lww(value2, ts2, node2);
|
||||
replica_b.apply_lww(value1, ts1, node1);
|
||||
|
||||
// Both should converge to same value (latest timestamp wins)
|
||||
prop_assert_eq!(*replica_a.get(), *replica_b.get());
|
||||
prop_assert_eq!(*replica_a.get(), value2); // ts2 is newer
|
||||
}
|
||||
|
||||
/// Test: LWW merge idempotence
|
||||
/// Property: Merging the same value multiple times has no effect
|
||||
#[test]
|
||||
fn lww_merge_idempotent(
|
||||
node_id in arb_node_id(),
|
||||
value in any::<i32>(),
|
||||
) {
|
||||
let original = SyncedValue::new(value, node_id);
|
||||
let mut replica = original.clone();
|
||||
|
||||
// Merge with itself multiple times
|
||||
replica.merge(&original);
|
||||
replica.merge(&original);
|
||||
replica.merge(&original);
|
||||
|
||||
prop_assert_eq!(*replica.get(), *original.get());
|
||||
}
|
||||
|
||||
/// Test: LWW respects timestamp ordering
|
||||
/// Property: Older updates don't overwrite newer ones
|
||||
#[test]
|
||||
fn lww_respects_timestamp(
|
||||
node_id in arb_node_id(),
|
||||
old_value in any::<i32>(),
|
||||
new_value in any::<i32>(),
|
||||
) {
|
||||
let mut lww = SyncedValue::new(old_value, node_id);
|
||||
|
||||
let old_ts = chrono::Utc::now();
|
||||
let new_ts = old_ts + chrono::Duration::seconds(10);
|
||||
|
||||
// Apply newer update first
|
||||
lww.apply_lww(new_value, new_ts, node_id);
|
||||
|
||||
// Apply older update (should be ignored)
|
||||
lww.apply_lww(old_value, old_ts, node_id);
|
||||
|
||||
// Should keep the newer value
|
||||
prop_assert_eq!(*lww.get(), new_value);
|
||||
}
|
||||
|
||||
/// Test: LWW tiebreaker uses node_id
|
||||
/// Property: When timestamps equal, higher node_id wins
|
||||
#[test]
|
||||
fn lww_tiebreaker(
|
||||
value1 in any::<i32>(),
|
||||
value2 in any::<i32>(),
|
||||
) {
|
||||
let node1 = uuid::Uuid::from_u128(1);
|
||||
let node2 = uuid::Uuid::from_u128(2);
|
||||
|
||||
// Create SyncedValue FIRST, then capture a timestamp that's guaranteed to be newer
|
||||
let mut lww = SyncedValue::new(value1, node1);
|
||||
std::thread::sleep(std::time::Duration::from_millis(1)); // Ensure ts is after init
|
||||
let ts = chrono::Utc::now();
|
||||
|
||||
// Apply update from node1 at timestamp ts
|
||||
lww.apply_lww(value1, ts, node1);
|
||||
|
||||
// Apply conflicting update from node2 at SAME timestamp
|
||||
lww.apply_lww(value2, ts, node2);
|
||||
|
||||
// node2 > node1, so value2 should win
|
||||
prop_assert_eq!(*lww.get(), value2);
|
||||
}
|
||||
}
|
||||
1365
crates/libmarathon/tests/sync_integration_headless.rs
Normal file
1365
crates/libmarathon/tests/sync_integration_headless.rs
Normal file
File diff suppressed because it is too large
Load Diff
90
crates/libmarathon/tests/transform_change_test.rs
Normal file
90
crates/libmarathon/tests/transform_change_test.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
//! Minimal test to verify Transform change detection works
|
||||
|
||||
use std::sync::{
|
||||
Arc,
|
||||
Mutex,
|
||||
};
|
||||
|
||||
use bevy::prelude::*;
|
||||
use libmarathon::networking::{
|
||||
NetworkedEntity,
|
||||
NetworkedTransform,
|
||||
Synced,
|
||||
};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[test]
|
||||
fn test_transform_change_detection_basic() {
|
||||
let mut app = App::new();
|
||||
app.add_plugins(MinimalPlugins);
|
||||
|
||||
// Add the auto_detect system
|
||||
app.add_systems(
|
||||
Update,
|
||||
libmarathon::networking::auto_detect_transform_changes_system,
|
||||
);
|
||||
|
||||
// Add a test system that runs AFTER auto_detect to check if NetworkedEntity was
|
||||
// changed We need to check DURING the frame because change detection is
|
||||
// cleared after each frame
|
||||
let was_changed = Arc::new(Mutex::new(false));
|
||||
let was_changed_clone = was_changed.clone();
|
||||
|
||||
app.add_systems(
|
||||
Update,
|
||||
move |query: Query<&NetworkedEntity, Changed<NetworkedEntity>>| {
|
||||
let count = query.iter().count();
|
||||
if count > 0 {
|
||||
println!(
|
||||
"✓ Test system detected {} changed NetworkedEntity components",
|
||||
count
|
||||
);
|
||||
*was_changed_clone.lock().unwrap() = true;
|
||||
} else {
|
||||
println!("✗ Test system detected 0 changed NetworkedEntity components");
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
// Spawn an entity with Transform and NetworkedTransform
|
||||
let node_id = Uuid::new_v4();
|
||||
let entity_id = Uuid::new_v4();
|
||||
|
||||
let _entity = app
|
||||
.world_mut()
|
||||
.spawn((
|
||||
NetworkedEntity::with_id(entity_id, node_id),
|
||||
NetworkedTransform::default(),
|
||||
Transform::from_xyz(0.0, 0.0, 0.0),
|
||||
Synced,
|
||||
))
|
||||
.id();
|
||||
|
||||
// Run one update to clear initial change detection
|
||||
println!("First update (clearing initial change detection)...");
|
||||
app.update();
|
||||
|
||||
// Reset the flag
|
||||
*was_changed.lock().unwrap() = false;
|
||||
|
||||
// Now modify the Transform
|
||||
{
|
||||
let mut query = app.world_mut().query::<&mut Transform>();
|
||||
for mut transform in query.iter_mut(app.world_mut()) {
|
||||
transform.translation.x = 10.0;
|
||||
}
|
||||
}
|
||||
|
||||
println!("Modified Transform, running second update...");
|
||||
|
||||
// Run update - should trigger auto_detect_transform_changes_system
|
||||
app.update();
|
||||
|
||||
// Check if our test system detected the change
|
||||
let result = *was_changed.lock().unwrap();
|
||||
println!("Was NetworkedEntity marked as changed? {}", result);
|
||||
assert!(
|
||||
result,
|
||||
"NetworkedEntity should be marked as changed after Transform modification"
|
||||
);
|
||||
}
|
||||
Reference in New Issue
Block a user