code review results

Signed-off-by: Sienna Meridian Satterwhite <sienna@r3t.io>
This commit is contained in:
2025-12-11 18:39:57 +00:00
parent 2847e7236f
commit 1504807afe
40 changed files with 2600 additions and 678 deletions

View File

@@ -0,0 +1,7 @@
# Seeds for failure cases proptest has generated in the past. It is
# automatically read and these particular cases re-run before any
# novel cases are generated.
#
# It is recommended to check this file in to source control so that
# everyone who runs the test benefits from these saved cases.
cc f2e56c98051c9da4146af4236447b4b8572a5990b84ae6e64fd93be95fe029df # shrinks to value1 = -733535506, value2 = -37242108

View File

@@ -0,0 +1,366 @@
//! Property-based tests for CRDT invariants
//!
//! This module uses proptest to verify that our CRDT implementations maintain
//! their mathematical properties under all possible inputs and operation
//! sequences.
use lib::{
networking::{
NodeId,
VectorClock,
},
persistence::{
EntityId,
PersistenceError,
PersistenceOp,
WriteBuffer,
},
sync::SyncedValue,
};
use proptest::prelude::*;
// ============================================================================
// VectorClock Property Tests
// ============================================================================
/// Generate arbitrary NodeId (UUID)
fn arb_node_id() -> impl Strategy<Value = NodeId> {
any::<[u8; 16]>().prop_map(|bytes| uuid::Uuid::from_bytes(bytes))
}
/// Generate arbitrary VectorClock with 1-10 nodes
fn arb_vector_clock() -> impl Strategy<Value = VectorClock> {
prop::collection::vec((arb_node_id(), 0u64..100), 1..10).prop_map(|entries| {
let mut clock = VectorClock::new();
for (node_id, count) in entries {
for _ in 0..count {
clock.increment(node_id);
}
}
clock
})
}
proptest! {
/// Test: VectorClock merge is idempotent
/// Property: merge(A, A) = A
#[test]
fn vector_clock_merge_idempotent(clock in arb_vector_clock()) {
let mut merged = clock.clone();
merged.merge(&clock);
prop_assert_eq!(merged, clock);
}
/// Test: VectorClock merge is commutative
/// Property: merge(A, B) = merge(B, A)
#[test]
fn vector_clock_merge_commutative(
clock_a in arb_vector_clock(),
clock_b in arb_vector_clock()
) {
let mut result1 = clock_a.clone();
result1.merge(&clock_b);
let mut result2 = clock_b.clone();
result2.merge(&clock_a);
prop_assert_eq!(result1, result2);
}
/// Test: VectorClock merge is associative
/// Property: merge(merge(A, B), C) = merge(A, merge(B, C))
#[test]
fn vector_clock_merge_associative(
clock_a in arb_vector_clock(),
clock_b in arb_vector_clock(),
clock_c in arb_vector_clock()
) {
// (A merge B) merge C
let mut result1 = clock_a.clone();
result1.merge(&clock_b);
result1.merge(&clock_c);
// A merge (B merge C)
let mut temp = clock_b.clone();
temp.merge(&clock_c);
let mut result2 = clock_a.clone();
result2.merge(&temp);
prop_assert_eq!(result1, result2);
}
/// Test: happened_before is transitive
/// Property: If A < B and B < C, then A < C
#[test]
fn vector_clock_happened_before_transitive(node_id in arb_node_id()) {
let mut clock_a = VectorClock::new();
clock_a.increment(node_id);
let mut clock_b = clock_a.clone();
clock_b.increment(node_id);
let mut clock_c = clock_b.clone();
clock_c.increment(node_id);
prop_assert!(clock_a.happened_before(&clock_b));
prop_assert!(clock_b.happened_before(&clock_c));
prop_assert!(clock_a.happened_before(&clock_c)); // Transitivity
}
/// Test: happened_before is antisymmetric
/// Property: If A < B, then NOT (B < A)
#[test]
fn vector_clock_happened_before_antisymmetric(
clock_a in arb_vector_clock(),
clock_b in arb_vector_clock()
) {
if clock_a.happened_before(&clock_b) {
prop_assert!(!clock_b.happened_before(&clock_a));
}
}
/// Test: A clock never happens before itself
/// Property: NOT (A < A)
#[test]
fn vector_clock_not_happened_before_self(clock in arb_vector_clock()) {
prop_assert!(!clock.happened_before(&clock));
}
/// Test: Merge creates upper bound
/// Property: If C = merge(A, B), then A ≤ C and B ≤ C
#[test]
fn vector_clock_merge_upper_bound(
clock_a in arb_vector_clock(),
clock_b in arb_vector_clock()
) {
let mut merged = clock_a.clone();
merged.merge(&clock_b);
// A ≤ merged (either happened_before or equal)
prop_assert!(clock_a.happened_before(&merged) || clock_a == merged);
// B ≤ merged (either happened_before or equal)
prop_assert!(clock_b.happened_before(&merged) || clock_b == merged);
}
}
// ============================================================================
// WriteBuffer Property Tests
// ============================================================================
/// Generate arbitrary EntityId (UUID)
fn arb_entity_id() -> impl Strategy<Value = EntityId> {
any::<[u8; 16]>().prop_map(|bytes| uuid::Uuid::from_bytes(bytes))
}
/// Generate arbitrary component name
fn arb_component_type() -> impl Strategy<Value = String> {
prop::string::string_regex("[A-Z][a-zA-Z0-9]{0,20}").unwrap()
}
/// Generate arbitrary component data (small to avoid size limits)
fn arb_component_data() -> impl Strategy<Value = Vec<u8>> {
prop::collection::vec(any::<u8>(), 0..1000)
}
proptest! {
/// Test: WriteBuffer index consistency after multiple operations
/// Property: Index always points to valid operations in the buffer
#[test]
fn write_buffer_index_consistency(
operations in prop::collection::vec(
(arb_entity_id(), arb_component_type(), arb_component_data()),
1..50
)
) {
let mut buffer = WriteBuffer::new(1000);
for (entity_id, component_type, data) in operations {
let op = PersistenceOp::UpsertComponent {
entity_id,
component_type,
data,
};
// Should never fail for valid data
let result = buffer.add(op);
prop_assert!(result.is_ok());
}
// After all operations, buffer length should be valid
prop_assert!(buffer.len() <= 1000);
}
/// Test: WriteBuffer deduplication correctness
/// Property: Adding same (entity, component) twice keeps only latest
#[test]
fn write_buffer_deduplication(
entity_id in arb_entity_id(),
component_type in arb_component_type(),
data1 in arb_component_data(),
data2 in arb_component_data()
) {
let mut buffer = WriteBuffer::new(100);
// Add first version
let op1 = PersistenceOp::UpsertComponent {
entity_id,
component_type: component_type.clone(),
data: data1.clone(),
};
prop_assert!(buffer.add(op1).is_ok());
// Add second version (should replace)
let op2 = PersistenceOp::UpsertComponent {
entity_id,
component_type: component_type.clone(),
data: data2.clone(),
};
prop_assert!(buffer.add(op2).is_ok());
// Should only have 1 operation
prop_assert_eq!(buffer.len(), 1);
// Should be the latest data
let ops = buffer.take_operations();
prop_assert_eq!(ops.len(), 1);
if let PersistenceOp::UpsertComponent { data, .. } = &ops[0] {
prop_assert_eq!(data, &data2);
}
}
/// Test: WriteBuffer respects size limits
/// Property: Operations larger than MAX_SIZE are rejected
///
/// Note: This test uses a smaller sample size (5 cases) to avoid generating
/// massive amounts of data. We only need to verify the size check works.
#[test]
fn write_buffer_respects_size_limits(
entity_id in arb_entity_id(),
component_type in arb_component_type(),
) {
let mut buffer = WriteBuffer::new(100);
// Create data that's definitely too large (11MB)
// We don't need to randomize the content, just verify size check works
let oversized_data = vec![0u8; 11_000_000];
let op = PersistenceOp::UpsertComponent {
entity_id,
component_type,
data: oversized_data,
};
let result = buffer.add(op);
prop_assert!(result.is_err());
// Verify it's the right error type
if let Err(PersistenceError::ComponentTooLarge { .. }) = result {
// Expected
} else {
prop_assert!(false, "Expected ComponentTooLarge error");
}
}
}
// ============================================================================
// LWW (Last-Write-Wins) Property Tests
// ============================================================================
proptest! {
/// Test: LWW convergence
/// Property: Two replicas applying same updates in different order converge
#[test]
fn lww_convergence(
node1 in arb_node_id(),
node2 in arb_node_id(),
initial_value in any::<i32>(),
value1 in any::<i32>(),
value2 in any::<i32>(),
) {
// Create two replicas with same initial value
let mut replica_a = SyncedValue::new(initial_value, node1);
let mut replica_b = SyncedValue::new(initial_value, node1);
// Create two updates
let ts1 = chrono::Utc::now();
let ts2 = ts1 + chrono::Duration::milliseconds(100);
// Replica A applies updates in order: update1, update2
replica_a.apply_lww(value1, ts1, node1);
replica_a.apply_lww(value2, ts2, node2);
// Replica B applies updates in reverse: update2, update1
replica_b.apply_lww(value2, ts2, node2);
replica_b.apply_lww(value1, ts1, node1);
// Both should converge to same value (latest timestamp wins)
prop_assert_eq!(*replica_a.get(), *replica_b.get());
prop_assert_eq!(*replica_a.get(), value2); // ts2 is newer
}
/// Test: LWW merge idempotence
/// Property: Merging the same value multiple times has no effect
#[test]
fn lww_merge_idempotent(
node_id in arb_node_id(),
value in any::<i32>(),
) {
let original = SyncedValue::new(value, node_id);
let mut replica = original.clone();
// Merge with itself multiple times
replica.merge(&original);
replica.merge(&original);
replica.merge(&original);
prop_assert_eq!(*replica.get(), *original.get());
}
/// Test: LWW respects timestamp ordering
/// Property: Older updates don't overwrite newer ones
#[test]
fn lww_respects_timestamp(
node_id in arb_node_id(),
old_value in any::<i32>(),
new_value in any::<i32>(),
) {
let mut lww = SyncedValue::new(old_value, node_id);
let old_ts = chrono::Utc::now();
let new_ts = old_ts + chrono::Duration::seconds(10);
// Apply newer update first
lww.apply_lww(new_value, new_ts, node_id);
// Apply older update (should be ignored)
lww.apply_lww(old_value, old_ts, node_id);
// Should keep the newer value
prop_assert_eq!(*lww.get(), new_value);
}
/// Test: LWW tiebreaker uses node_id
/// Property: When timestamps equal, higher node_id wins
#[test]
fn lww_tiebreaker(
value1 in any::<i32>(),
value2 in any::<i32>(),
) {
let node1 = uuid::Uuid::from_u128(1);
let node2 = uuid::Uuid::from_u128(2);
// Create SyncedValue FIRST, then capture a timestamp that's guaranteed to be newer
let mut lww = SyncedValue::new(value1, node1);
std::thread::sleep(std::time::Duration::from_millis(1)); // Ensure ts is after init
let ts = chrono::Utc::now();
// Apply update from node1 at timestamp ts
lww.apply_lww(value1, ts, node1);
// Apply conflicting update from node2 at SAME timestamp
lww.apply_lww(value2, ts, node2);
// node2 > node1, so value2 should win
prop_assert_eq!(*lww.get(), value2);
}
}

View File

@@ -5,12 +5,19 @@
use std::{
path::PathBuf,
time::{Duration, Instant},
time::{
Duration,
Instant,
},
};
use anyhow::Result;
use bevy::{
app::{App, ScheduleRunnerPlugin},
MinimalPlugins,
app::{
App,
ScheduleRunnerPlugin,
},
ecs::{
component::Component,
reflect::ReflectComponent,
@@ -18,23 +25,40 @@ use bevy::{
},
prelude::*,
reflect::Reflect,
MinimalPlugins,
};
use futures_lite::StreamExt;
use iroh::{Endpoint, protocol::Router};
use iroh::{
Endpoint,
protocol::Router,
};
use iroh_gossip::{
api::{GossipReceiver, GossipSender},
api::{
GossipReceiver,
GossipSender,
},
net::Gossip,
proto::TopicId,
};
use lib::{
networking::{
GossipBridge, NetworkedEntity, NetworkedTransform, NetworkingConfig, NetworkingPlugin,
Synced, VersionedMessage,
GossipBridge,
NetworkedEntity,
NetworkedTransform,
NetworkingConfig,
NetworkingPlugin,
Synced,
VersionedMessage,
},
persistence::{
Persisted,
PersistenceConfig,
PersistencePlugin,
},
persistence::{PersistenceConfig, PersistencePlugin, Persisted},
};
use serde::{Deserialize, Serialize};
use serde::{
Deserialize,
Serialize,
};
use sync_macros::Synced as SyncedDerive;
use tempfile::TempDir;
use uuid::Uuid;
@@ -68,8 +92,12 @@ struct TestHealth {
// ============================================================================
mod test_utils {
use rusqlite::{
Connection,
OptionalExtension,
};
use super::*;
use rusqlite::{Connection, OptionalExtension};
/// Test context that manages temporary directories with RAII cleanup
pub struct TestContext {
@@ -134,12 +162,11 @@ mod test_utils {
let conn = Connection::open(db_path)?;
let entity_id_bytes = entity_id.as_bytes();
let data_result: std::result::Result<Vec<u8>, rusqlite::Error> = conn
.query_row(
"SELECT data FROM components WHERE entity_id = ?1 AND component_type = ?2",
rusqlite::params![entity_id_bytes.as_slice(), component_type],
|row| row.get(0),
);
let data_result: std::result::Result<Vec<u8>, rusqlite::Error> = conn.query_row(
"SELECT data FROM components WHERE entity_id = ?1 AND component_type = ?2",
rusqlite::params![entity_id_bytes.as_slice(), component_type],
|row| row.get(0),
);
let data = data_result.optional()?;
@@ -161,11 +188,9 @@ mod test_utils {
pub fn create_test_app(node_id: Uuid, db_path: PathBuf, bridge: GossipBridge) -> App {
let mut app = App::new();
app.add_plugins(
MinimalPlugins.set(ScheduleRunnerPlugin::run_loop(Duration::from_secs_f64(
1.0 / 60.0,
))),
)
app.add_plugins(MinimalPlugins.set(ScheduleRunnerPlugin::run_loop(
Duration::from_secs_f64(1.0 / 60.0),
)))
.insert_resource(bridge)
.add_plugins(NetworkingPlugin::new(NetworkingConfig {
node_id,
@@ -233,8 +258,7 @@ mod test_utils {
check_fn: F,
) -> Result<()>
where
F: Fn(&mut World, &mut World) -> bool,
{
F: Fn(&mut World, &mut World) -> bool, {
let start = Instant::now();
let mut tick_count = 0;
@@ -245,12 +269,20 @@ mod test_utils {
tick_count += 1;
if tick_count % 50 == 0 {
println!("Waiting for sync... tick {} ({:.1}s elapsed)", tick_count, start.elapsed().as_secs_f32());
println!(
"Waiting for sync... tick {} ({:.1}s elapsed)",
tick_count,
start.elapsed().as_secs_f32()
);
}
// Check condition
if check_fn(app1.world_mut(), app2.world_mut()) {
println!("Sync completed after {} ticks ({:.3}s)", tick_count, start.elapsed().as_secs_f32());
println!(
"Sync completed after {} ticks ({:.3}s)",
tick_count,
start.elapsed().as_secs_f32()
);
return Ok(());
}
@@ -305,19 +337,27 @@ mod test_utils {
static_provider.add_endpoint_info(addr.clone());
}
endpoint.discovery().add(static_provider);
println!(" Added {} bootstrap peers to static discovery", bootstrap_count);
println!(
" Added {} bootstrap peers to static discovery",
bootstrap_count
);
// Explicitly connect to bootstrap peers
println!(" Connecting to bootstrap peers...");
for addr in &bootstrap_addrs {
match endpoint.connect(addr.clone(), iroh_gossip::ALPN).await {
Ok(_conn) => println!(" ✓ Connected to bootstrap peer: {}", addr.id),
Err(e) => println!(" ✗ Failed to connect to bootstrap peer {}: {}", addr.id, e),
| Ok(_conn) => println!(" ✓ Connected to bootstrap peer: {}", addr.id),
| Err(e) => {
println!(" ✗ Failed to connect to bootstrap peer {}: {}", addr.id, e)
},
}
}
}
println!(" Subscribing to topic with {} bootstrap peers...", bootstrap_count);
println!(
" Subscribing to topic with {} bootstrap peers...",
bootstrap_count
);
// Subscribe to the topic (the IDs now have addresses via discovery)
let subscribe_handle = gossip.subscribe(topic_id, bootstrap_ids).await?;
@@ -332,9 +372,11 @@ mod test_utils {
println!(" Waiting for join to complete (with timeout)...");
// Use a timeout in case mDNS discovery takes a while or fails
match tokio::time::timeout(Duration::from_secs(3), receiver.joined()).await {
Ok(Ok(())) => println!(" Join completed!"),
Ok(Err(e)) => println!(" Join error: {}", e),
Err(_) => println!(" Join timeout - proceeding anyway (mDNS may still connect later)"),
| Ok(Ok(())) => println!(" Join completed!"),
| Ok(Err(e)) => println!(" Join error: {}", e),
| Err(_) => {
println!(" Join timeout - proceeding anyway (mDNS may still connect later)")
},
}
} else {
println!(" No bootstrap peers - skipping join wait (first node in swarm)");
@@ -352,8 +394,7 @@ mod test_utils {
}
/// Setup a pair of iroh-gossip nodes connected to the same topic
pub async fn setup_gossip_pair(
) -> Result<(
pub async fn setup_gossip_pair() -> Result<(
Endpoint,
Endpoint,
Router,
@@ -370,13 +411,15 @@ mod test_utils {
let (ep1, _gossip1, router1, bridge1) = init_gossip_node(topic_id, vec![]).await?;
println!("Node 1 initialized with ID: {}", ep1.addr().id);
// Get node 1's full address (ID + network addresses) for node 2 to bootstrap from
// Get node 1's full address (ID + network addresses) for node 2 to bootstrap
// from
let node1_addr = ep1.addr().clone();
println!("Node 1 full address: {:?}", node1_addr);
// Initialize node 2 with node 1's full address as bootstrap peer
println!("Initializing node 2 with bootstrap peer: {}", node1_addr.id);
let (ep2, _gossip2, router2, bridge2) = init_gossip_node(topic_id, vec![node1_addr]).await?;
let (ep2, _gossip2, router2, bridge2) =
init_gossip_node(topic_id, vec![node1_addr]).await?;
println!("Node 2 initialized with ID: {}", ep2.addr().id);
// Give mDNS and gossip time to discover peers
@@ -387,7 +430,8 @@ mod test_utils {
Ok((ep1, ep2, router1, router2, bridge1, bridge2))
}
/// Spawn background tasks to forward messages between iroh-gossip and GossipBridge
/// Spawn background tasks to forward messages between iroh-gossip and
/// GossipBridge
fn spawn_gossip_bridge_tasks(
sender: GossipSender,
mut receiver: GossipReceiver,
@@ -403,18 +447,27 @@ mod test_utils {
// Poll the bridge's outgoing queue
if let Some(versioned_msg) = bridge_out.try_recv_outgoing() {
msg_count += 1;
println!("[Node {}] Sending message #{} via gossip", node_id, msg_count);
println!(
"[Node {}] Sending message #{} via gossip",
node_id, msg_count
);
// Serialize the message
match bincode::serialize(&versioned_msg) {
Ok(bytes) => {
| Ok(bytes) => {
// Broadcast via gossip
if let Err(e) = sender.broadcast(bytes.into()).await {
eprintln!("[Node {}] Failed to broadcast message: {}", node_id, e);
} else {
println!("[Node {}] Message #{} broadcasted successfully", node_id, msg_count);
println!(
"[Node {}] Message #{} broadcasted successfully",
node_id, msg_count
);
}
}
Err(e) => eprintln!("[Node {}] Failed to serialize message for broadcast: {}", node_id, e),
},
| Err(e) => eprintln!(
"[Node {}] Failed to serialize message for broadcast: {}",
node_id, e
),
}
}
@@ -431,34 +484,52 @@ mod test_utils {
loop {
// Receive from gossip (GossipReceiver is a Stream)
match tokio::time::timeout(Duration::from_millis(100), receiver.next()).await {
Ok(Some(Ok(event))) => {
println!("[Node {}] Received gossip event: {:?}", node_id, std::mem::discriminant(&event));
| Ok(Some(Ok(event))) => {
println!(
"[Node {}] Received gossip event: {:?}",
node_id,
std::mem::discriminant(&event)
);
if let iroh_gossip::api::Event::Received(msg) = event {
msg_count += 1;
println!("[Node {}] Received message #{} from gossip", node_id, msg_count);
println!(
"[Node {}] Received message #{} from gossip",
node_id, msg_count
);
// Deserialize the message
match bincode::deserialize::<VersionedMessage>(&msg.content) {
Ok(versioned_msg) => {
| Ok(versioned_msg) => {
// Push to bridge's incoming queue
if let Err(e) = bridge_in.push_incoming(versioned_msg) {
eprintln!("[Node {}] Failed to push to bridge incoming: {}", node_id, e);
eprintln!(
"[Node {}] Failed to push to bridge incoming: {}",
node_id, e
);
} else {
println!("[Node {}] Message #{} pushed to bridge incoming", node_id, msg_count);
println!(
"[Node {}] Message #{} pushed to bridge incoming",
node_id, msg_count
);
}
}
Err(e) => eprintln!("[Node {}] Failed to deserialize gossip message: {}", node_id, e),
},
| Err(e) => eprintln!(
"[Node {}] Failed to deserialize gossip message: {}",
node_id, e
),
}
}
}
Ok(Some(Err(e))) => eprintln!("[Node {}] Gossip receiver error: {}", node_id, e),
Ok(None) => {
},
| Ok(Some(Err(e))) => {
eprintln!("[Node {}] Gossip receiver error: {}", node_id, e)
},
| Ok(None) => {
// Stream ended
println!("[Node {}] Gossip stream ended", node_id);
break;
}
Err(_) => {
},
| Err(_) => {
// Timeout, no message available
}
},
}
}
});
@@ -500,12 +571,15 @@ async fn test_basic_entity_sync() -> Result<()> {
// Node 1 spawns entity
let entity_id = Uuid::new_v4();
println!("Spawning entity {} on node 1", entity_id);
let spawned_entity = app1.world_mut().spawn((
NetworkedEntity::with_id(entity_id, node1_id),
TestPosition { x: 10.0, y: 20.0 },
Persisted::with_id(entity_id),
Synced,
)).id();
let spawned_entity = app1
.world_mut()
.spawn((
NetworkedEntity::with_id(entity_id, node1_id),
TestPosition { x: 10.0, y: 20.0 },
Persisted::with_id(entity_id),
Synced,
))
.id();
// IMPORTANT: Trigger change detection for persistence
// Bevy only marks components as "changed" when mutated, not on spawn
@@ -536,7 +610,11 @@ async fn test_basic_entity_sync() -> Result<()> {
query.iter(w2).map(|ne| ne.network_id).collect()
};
if !all_networked.is_empty() {
println!(" Node 2 has {} networked entities: {:?}", all_networked.len(), all_networked);
println!(
" Node 2 has {} networked entities: {:?}",
all_networked.len(),
all_networked
);
println!(" Looking for: {}", entity_id);
}
false
@@ -565,7 +643,11 @@ async fn test_basic_entity_sync() -> Result<()> {
}
// Verify entity synced to node 2 (in-memory check)
assert_entity_synced(app2.world_mut(), entity_id, TestPosition { x: 10.0, y: 20.0 })?;
assert_entity_synced(
app2.world_mut(),
entity_id,
TestPosition { x: 10.0, y: 20.0 },
)?;
println!("✓ Entity synced in-memory on node 2");
// Give persistence system time to flush to disk
@@ -586,7 +668,11 @@ async fn test_basic_entity_sync() -> Result<()> {
);
assert!(
component_exists_in_db(&ctx1.db_path(), entity_id, "sync_integration_headless::TestPosition")?,
component_exists_in_db(
&ctx1.db_path(),
entity_id,
"sync_integration_headless::TestPosition"
)?,
"TestPosition component should exist in Node 1 database"
);
@@ -616,7 +702,11 @@ async fn test_basic_entity_sync() -> Result<()> {
);
assert!(
component_exists_in_db(&ctx2.db_path(), entity_id, "sync_integration_headless::TestPosition")?,
component_exists_in_db(
&ctx2.db_path(),
entity_id,
"sync_integration_headless::TestPosition"
)?,
"TestPosition component should exist in Node 2 database after sync"
);
@@ -666,12 +756,15 @@ async fn test_bidirectional_sync() -> Result<()> {
// Node 1 spawns entity A
let entity_a = Uuid::new_v4();
let entity_a_bevy = app1.world_mut().spawn((
NetworkedEntity::with_id(entity_a, node1_id),
TestPosition { x: 1.0, y: 2.0 },
Persisted::with_id(entity_a),
Synced,
)).id();
let entity_a_bevy = app1
.world_mut()
.spawn((
NetworkedEntity::with_id(entity_a, node1_id),
TestPosition { x: 1.0, y: 2.0 },
Persisted::with_id(entity_a),
Synced,
))
.id();
// Trigger persistence for entity A
{
@@ -685,12 +778,15 @@ async fn test_bidirectional_sync() -> Result<()> {
// Node 2 spawns entity B
let entity_b = Uuid::new_v4();
let entity_b_bevy = app2.world_mut().spawn((
NetworkedEntity::with_id(entity_b, node2_id),
TestPosition { x: 3.0, y: 4.0 },
Persisted::with_id(entity_b),
Synced,
)).id();
let entity_b_bevy = app2
.world_mut()
.spawn((
NetworkedEntity::with_id(entity_b, node2_id),
TestPosition { x: 3.0, y: 4.0 },
Persisted::with_id(entity_b),
Synced,
))
.id();
// Trigger persistence for entity B
{
@@ -709,10 +805,8 @@ async fn test_bidirectional_sync() -> Result<()> {
.await?;
// Verify both nodes have both entities
assert_entity_synced(app1.world_mut(), entity_b, TestPosition { x: 3.0, y: 4.0 })
?;
assert_entity_synced(app2.world_mut(), entity_a, TestPosition { x: 1.0, y: 2.0 })
?;
assert_entity_synced(app1.world_mut(), entity_b, TestPosition { x: 3.0, y: 4.0 })?;
assert_entity_synced(app2.world_mut(), entity_a, TestPosition { x: 1.0, y: 2.0 })?;
println!("✓ Bidirectional sync test passed");
@@ -742,13 +836,16 @@ async fn test_concurrent_conflict_resolution() -> Result<()> {
// Spawn shared entity on node 1 with Transform (which IS tracked for changes)
let entity_id = Uuid::new_v4();
let entity_bevy = app1.world_mut().spawn((
NetworkedEntity::with_id(entity_id, node1_id),
NetworkedTransform::default(),
Transform::from_xyz(0.0, 0.0, 0.0),
Persisted::with_id(entity_id),
Synced,
)).id();
let entity_bevy = app1
.world_mut()
.spawn((
NetworkedEntity::with_id(entity_id, node1_id),
NetworkedTransform::default(),
Transform::from_xyz(0.0, 0.0, 0.0),
Persisted::with_id(entity_id),
Synced,
))
.id();
// Trigger persistence
{
@@ -771,20 +868,44 @@ async fn test_concurrent_conflict_resolution() -> Result<()> {
// Check what components the entity has on each node
{
let world1 = app1.world_mut();
let mut query1 = world1.query::<(Entity, &NetworkedEntity, Option<&NetworkedTransform>, &Transform)>();
let mut query1 = world1.query::<(
Entity,
&NetworkedEntity,
Option<&NetworkedTransform>,
&Transform,
)>();
println!("Node 1 entities:");
for (entity, ne, nt, t) in query1.iter(world1) {
println!(" Entity {:?}: NetworkedEntity({:?}), NetworkedTransform={}, Transform=({}, {}, {})",
entity, ne.network_id, nt.is_some(), t.translation.x, t.translation.y, t.translation.z);
println!(
" Entity {:?}: NetworkedEntity({:?}), NetworkedTransform={}, Transform=({}, {}, {})",
entity,
ne.network_id,
nt.is_some(),
t.translation.x,
t.translation.y,
t.translation.z
);
}
}
{
let world2 = app2.world_mut();
let mut query2 = world2.query::<(Entity, &NetworkedEntity, Option<&NetworkedTransform>, &Transform)>();
let mut query2 = world2.query::<(
Entity,
&NetworkedEntity,
Option<&NetworkedTransform>,
&Transform,
)>();
println!("Node 2 entities:");
for (entity, ne, nt, t) in query2.iter(world2) {
println!(" Entity {:?}: NetworkedEntity({:?}), NetworkedTransform={}, Transform=({}, {}, {})",
entity, ne.network_id, nt.is_some(), t.translation.x, t.translation.y, t.translation.z);
println!(
" Entity {:?}: NetworkedEntity({:?}), NetworkedTransform={}, Transform=({}, {}, {})",
entity,
ne.network_id,
nt.is_some(),
t.translation.x,
t.translation.y,
t.translation.z
);
}
}
@@ -834,11 +955,14 @@ async fn test_concurrent_conflict_resolution() -> Result<()> {
let t2 = transforms2[0];
// Check if they converged (within floating point tolerance)
let converged = (t1.translation.x - t2.translation.x).abs() < 0.01
&& (t1.translation.y - t2.translation.y).abs() < 0.01;
let converged = (t1.translation.x - t2.translation.x).abs() < 0.01 &&
(t1.translation.y - t2.translation.y).abs() < 0.01;
if converged {
println!("✓ Nodes converged to: ({}, {})", t1.translation.x, t1.translation.y);
println!(
"✓ Nodes converged to: ({}, {})",
t1.translation.x, t1.translation.y
);
}
converged
@@ -876,10 +1000,7 @@ async fn test_persistence_crash_recovery() -> Result<()> {
app.world_mut().spawn((
NetworkedEntity::with_id(entity_id, node_id),
TestPosition {
x: 100.0,
y: 200.0,
},
TestPosition { x: 100.0, y: 200.0 },
Persisted::with_id(entity_id),
Synced,
));
@@ -905,8 +1026,12 @@ async fn test_persistence_crash_recovery() -> Result<()> {
app.update();
// Verify entity loaded from database
assert_entity_synced(app.world_mut(), entity_id, TestPosition { x: 100.0, y: 200.0 })
.map_err(|e| anyhow::anyhow!("Persistence recovery failed: {}", e))?;
assert_entity_synced(
app.world_mut(),
entity_id,
TestPosition { x: 100.0, y: 200.0 },
)
.map_err(|e| anyhow::anyhow!("Persistence recovery failed: {}", e))?;
println!("✓ Crash recovery test passed");
}

View File

@@ -1,8 +1,16 @@
//! Minimal test to verify Transform change detection works
use std::sync::{
Arc,
Mutex,
};
use bevy::prelude::*;
use lib::networking::{NetworkedEntity, NetworkedTransform, Synced};
use std::sync::{Arc, Mutex};
use lib::networking::{
NetworkedEntity,
NetworkedTransform,
Synced,
};
use uuid::Uuid;
#[test]
@@ -11,33 +19,46 @@ fn test_transform_change_detection_basic() {
app.add_plugins(MinimalPlugins);
// Add the auto_detect system
app.add_systems(Update, lib::networking::auto_detect_transform_changes_system);
app.add_systems(
Update,
lib::networking::auto_detect_transform_changes_system,
);
// Add a test system that runs AFTER auto_detect to check if NetworkedEntity was changed
// We need to check DURING the frame because change detection is cleared after each frame
// Add a test system that runs AFTER auto_detect to check if NetworkedEntity was
// changed We need to check DURING the frame because change detection is
// cleared after each frame
let was_changed = Arc::new(Mutex::new(false));
let was_changed_clone = was_changed.clone();
app.add_systems(Update, move |query: Query<&NetworkedEntity, Changed<NetworkedEntity>>| {
let count = query.iter().count();
if count > 0 {
println!("✓ Test system detected {} changed NetworkedEntity components", count);
*was_changed_clone.lock().unwrap() = true;
} else {
println!(" Test system detected 0 changed NetworkedEntity components");
}
});
app.add_systems(
Update,
move |query: Query<&NetworkedEntity, Changed<NetworkedEntity>>| {
let count = query.iter().count();
if count > 0 {
println!(
" Test system detected {} changed NetworkedEntity components",
count
);
*was_changed_clone.lock().unwrap() = true;
} else {
println!("✗ Test system detected 0 changed NetworkedEntity components");
}
},
);
// Spawn an entity with Transform and NetworkedTransform
let node_id = Uuid::new_v4();
let entity_id = Uuid::new_v4();
let _entity = app.world_mut().spawn((
NetworkedEntity::with_id(entity_id, node_id),
NetworkedTransform::default(),
Transform::from_xyz(0.0, 0.0, 0.0),
Synced,
)).id();
let _entity = app
.world_mut()
.spawn((
NetworkedEntity::with_id(entity_id, node_id),
NetworkedTransform::default(),
Transform::from_xyz(0.0, 0.0, 0.0),
Synced,
))
.id();
// Run one update to clear initial change detection
println!("First update (clearing initial change detection)...");
@@ -62,5 +83,8 @@ fn test_transform_change_detection_basic() {
// Check if our test system detected the change
let result = *was_changed.lock().unwrap();
println!("Was NetworkedEntity marked as changed? {}", result);
assert!(result, "NetworkedEntity should be marked as changed after Transform modification");
assert!(
result,
"NetworkedEntity should be marked as changed after Transform modification"
);
}