added persistence and networking

Signed-off-by: Sienna Meridian Satterwhite <sienna@r3t.io>
This commit is contained in:
2025-12-09 22:21:58 +00:00
parent 93ab598db0
commit e25078ba44
18 changed files with 1787 additions and 33528 deletions

1
.gitignore vendored
View File

@@ -75,3 +75,4 @@ target/doc/
# Project-specific (based on your untracked files)
emotion-gradient-config-*.json
**/*.csv

1
Cargo.lock generated
View File

@@ -4737,6 +4737,7 @@ dependencies = [
"serde_json",
"sha2 0.10.9",
"sync-macros",
"tempfile",
"thiserror 2.0.17",
"tokio",
"toml",

View File

@@ -22,6 +22,7 @@ sha2 = "0.10"
[dev-dependencies]
tokio.workspace = true
iroh.workspace = true
iroh = { workspace = true, features = ["discovery-local-network"] }
iroh-gossip.workspace = true
futures-lite = "2.0"
tempfile = "3"

File diff suppressed because one or more lines are too long

View File

@@ -3,10 +3,10 @@
//! This module handles incoming EntityDelta messages and applies them to the
//! local Bevy world using CRDT merge semantics.
use bevy::{
prelude::*,
reflect::TypeRegistry,
};
use std::collections::HashMap;
use bevy::prelude::*;
use uuid::Uuid;
use crate::{
networking::{
@@ -16,17 +16,52 @@ use crate::{
},
delta_generation::NodeVectorClock,
entity_map::NetworkEntityMap,
merge::compare_operations_lww,
messages::{
ComponentData,
EntityDelta,
SyncMessage,
},
operations::ComponentOp,
NetworkedEntity,
VectorClock,
},
persistence::reflection::deserialize_component,
persistence::reflection::deserialize_component_typed,
};
/// Resource to track the last vector clock and originating node for each component on each entity
///
/// This enables Last-Write-Wins conflict resolution by comparing incoming
/// operations' vector clocks with the current component's vector clock.
/// The node_id is used as a deterministic tiebreaker for concurrent operations.
#[derive(Resource, Default)]
pub struct ComponentVectorClocks {
/// Maps (entity_network_id, component_type) -> (vector_clock, originating_node_id)
clocks: HashMap<(Uuid, String), (VectorClock, Uuid)>,
}
impl ComponentVectorClocks {
pub fn new() -> Self {
Self {
clocks: HashMap::new(),
}
}
/// Get the current vector clock and node_id for a component
pub fn get(&self, entity_id: Uuid, component_type: &str) -> Option<&(VectorClock, Uuid)> {
self.clocks.get(&(entity_id, component_type.to_string()))
}
/// Update the vector clock and node_id for a component
pub fn set(&mut self, entity_id: Uuid, component_type: String, clock: VectorClock, node_id: Uuid) {
self.clocks.insert((entity_id, component_type), (clock, node_id));
}
/// Remove all clocks for an entity (when entity is deleted)
pub fn remove_entity(&mut self, entity_id: Uuid) {
self.clocks.retain(|(eid, _), _| *eid != entity_id);
}
}
/// Apply an EntityDelta message to the local world
///
/// This function:
@@ -38,39 +73,33 @@ use crate::{
/// # Parameters
///
/// - `delta`: The EntityDelta to apply
/// - `commands`: Bevy Commands for spawning/modifying entities
/// - `entity_map`: Map from network_id to Entity
/// - `type_registry`: Bevy's type registry for deserialization
/// - `node_clock`: Our node's vector clock (for causality tracking)
/// - `blob_store`: Optional blob store for resolving large component references
/// - `tombstone_registry`: Optional tombstone registry for deletion tracking
/// - `world`: The Bevy world to apply changes to
pub fn apply_entity_delta(
delta: &EntityDelta,
commands: &mut Commands,
entity_map: &mut NetworkEntityMap,
type_registry: &TypeRegistry,
node_clock: &mut NodeVectorClock,
blob_store: Option<&BlobStore>,
mut tombstone_registry: Option<&mut crate::networking::TombstoneRegistry>,
world: &mut World,
) {
// Validate and merge the remote vector clock
// Check for clock regression (shouldn't happen in correct implementations)
if delta.vector_clock.happened_before(&node_clock.clock) {
warn!(
"Received operation with clock from the past for entity {:?}. \
Remote clock happened before our clock. This may indicate clock issues.",
delta.entity_id
);
}
{
let mut node_clock = world.resource_mut::<NodeVectorClock>();
// Merge the remote vector clock into ours
node_clock.clock.merge(&delta.vector_clock);
// Check for clock regression (shouldn't happen in correct implementations)
if delta.vector_clock.happened_before(&node_clock.clock) {
warn!(
"Received operation with clock from the past for entity {:?}. \
Remote clock happened before our clock. This may indicate clock issues.",
delta.entity_id
);
}
// Merge the remote vector clock into ours
node_clock.clock.merge(&delta.vector_clock);
}
// Check if any operations are Delete operations
for op in &delta.operations {
if let crate::networking::ComponentOp::Delete { vector_clock } = op {
// Record tombstone
if let Some(ref mut registry) = tombstone_registry {
if let Some(mut registry) = world.get_resource_mut::<crate::networking::TombstoneRegistry>() {
registry.record_deletion(
delta.entity_id,
delta.node_id,
@@ -78,8 +107,13 @@ pub fn apply_entity_delta(
);
// Despawn the entity if it exists locally
if let Some(entity) = entity_map.get_entity(delta.entity_id) {
commands.entity(entity).despawn();
let entity_to_despawn = {
let entity_map = world.resource::<NetworkEntityMap>();
entity_map.get_entity(delta.entity_id)
};
if let Some(entity) = entity_to_despawn {
world.despawn(entity);
let mut entity_map = world.resource_mut::<NetworkEntityMap>();
entity_map.remove_by_network_id(delta.entity_id);
info!("Despawned entity {:?} due to Delete operation", delta.entity_id);
}
@@ -91,7 +125,7 @@ pub fn apply_entity_delta(
}
// Check if we should ignore this delta due to deletion
if let Some(ref registry) = tombstone_registry {
if let Some(registry) = world.get_resource::<crate::networking::TombstoneRegistry>() {
if registry.should_ignore_operation(delta.entity_id, &delta.vector_clock) {
debug!(
"Ignoring delta for deleted entity {:?}",
@@ -101,29 +135,30 @@ pub fn apply_entity_delta(
}
}
// Look up or create the entity
let entity = match entity_map.get_entity(delta.entity_id) {
Some(entity) => entity,
None => {
// Spawn new entity with NetworkedEntity component
let entity = commands
.spawn(NetworkedEntity::with_id(delta.entity_id, delta.node_id))
.id();
entity_map.insert(delta.entity_id, entity);
info!(
"Spawned new networked entity {:?} from node {}",
delta.entity_id, delta.node_id
);
let entity = {
let entity_map = world.resource::<NetworkEntityMap>();
if let Some(entity) = entity_map.get_entity(delta.entity_id) {
entity
} else {
// Use shared helper to spawn networked entity with persistence
crate::networking::spawn_networked_entity(world, delta.entity_id, delta.node_id)
}
};
// Apply each operation (skip Delete operations - handled above)
for op in &delta.operations {
if !op.is_delete() {
apply_component_op(entity, op, commands, type_registry, blob_store);
apply_component_op(entity, op, delta.node_id, world);
}
}
// Trigger persistence by marking Persisted as changed
// This ensures remote entities are persisted after sync
if let Ok(mut entity_mut) = world.get_entity_mut(entity) {
if let Some(mut persisted) = entity_mut.get_mut::<crate::persistence::Persisted>() {
// Accessing &mut triggers Bevy's change detection
let _ = &mut *persisted;
debug!("Triggered persistence for synced entity {:?}", delta.entity_id);
}
}
}
@@ -135,17 +170,16 @@ pub fn apply_entity_delta(
fn apply_component_op(
entity: Entity,
op: &ComponentOp,
commands: &mut Commands,
type_registry: &TypeRegistry,
blob_store: Option<&BlobStore>,
incoming_node_id: Uuid,
world: &mut World,
) {
match op {
| ComponentOp::Set {
component_type,
data,
vector_clock: _,
vector_clock,
} => {
apply_set_operation(entity, component_type, data, commands, type_registry, blob_store);
apply_set_operation_with_lww(entity, component_type, data, vector_clock, incoming_node_id, world);
}
| ComponentOp::SetAdd { component_type, .. } => {
// OR-Set add - Phase 10 provides OrSet<T> type
@@ -174,6 +208,120 @@ fn apply_component_op(
}
}
/// Apply a Set operation with Last-Write-Wins conflict resolution
///
/// Compares the incoming vector clock with the stored clock for this component.
/// Only applies the operation if the incoming clock wins the LWW comparison.
/// Uses node_id as a deterministic tiebreaker for concurrent operations.
fn apply_set_operation_with_lww(
entity: Entity,
component_type: &str,
data: &ComponentData,
incoming_clock: &VectorClock,
incoming_node_id: Uuid,
world: &mut World,
) {
// Get the network ID for this entity
let entity_network_id = {
if let Ok(entity_ref) = world.get_entity(entity) {
if let Some(networked) = entity_ref.get::<crate::networking::NetworkedEntity>() {
networked.network_id
} else {
warn!("Entity {:?} has no NetworkedEntity component", entity);
return;
}
} else {
warn!("Entity {:?} not found", entity);
return;
}
};
// Check if we should apply this operation based on LWW
let should_apply = {
if let Some(component_clocks) = world.get_resource::<ComponentVectorClocks>() {
if let Some((current_clock, current_node_id)) = component_clocks.get(entity_network_id, component_type) {
// We have a current clock - do LWW comparison with real node IDs
let decision = compare_operations_lww(
current_clock,
*current_node_id,
incoming_clock,
incoming_node_id,
);
match decision {
crate::networking::merge::MergeDecision::ApplyRemote => {
debug!(
"Applying remote Set for {} (remote is newer)",
component_type
);
true
}
crate::networking::merge::MergeDecision::KeepLocal => {
debug!(
"Ignoring remote Set for {} (local is newer)",
component_type
);
false
}
crate::networking::merge::MergeDecision::Concurrent => {
// For concurrent operations, use node_id comparison as deterministic tiebreaker
// This ensures all nodes make the same decision for concurrent updates
if incoming_node_id > *current_node_id {
debug!(
"Applying remote Set for {} (concurrent, remote node_id {:?} > local {:?})",
component_type, incoming_node_id, current_node_id
);
true
} else {
debug!(
"Ignoring remote Set for {} (concurrent, local node_id {:?} >= remote {:?})",
component_type, current_node_id, incoming_node_id
);
false
}
}
crate::networking::merge::MergeDecision::Equal => {
debug!("Ignoring remote Set for {} (clocks equal)", component_type);
false
}
}
} else {
// No current clock - this is the first time we're setting this component
debug!(
"Applying remote Set for {} (no current clock)",
component_type
);
true
}
} else {
// No ComponentVectorClocks resource - apply unconditionally
warn!("ComponentVectorClocks resource not found - applying Set without LWW check");
true
}
};
if !should_apply {
return;
}
// Apply the operation
apply_set_operation(entity, component_type, data, world);
// Update the stored vector clock with node_id
if let Some(mut component_clocks) = world.get_resource_mut::<ComponentVectorClocks>() {
component_clocks.set(
entity_network_id,
component_type.to_string(),
incoming_clock.clone(),
incoming_node_id,
);
debug!(
"Updated vector clock for {} on entity {:?} (node_id: {:?})",
component_type, entity_network_id, incoming_node_id
);
}
}
/// Apply a Set operation (Last-Write-Wins)
///
/// Deserializes the component and inserts/updates it on the entity.
@@ -182,10 +330,13 @@ fn apply_set_operation(
entity: Entity,
component_type: &str,
data: &ComponentData,
commands: &mut Commands,
type_registry: &TypeRegistry,
blob_store: Option<&BlobStore>,
world: &mut World,
) {
let type_registry = {
let registry_resource = world.resource::<AppTypeRegistry>();
registry_resource.read()
};
let blob_store = world.get_resource::<BlobStore>();
// Get the actual data (resolve blob if needed)
let data_bytes = match data {
| ComponentData::Inline(bytes) => bytes.clone(),
@@ -211,19 +362,14 @@ fn apply_set_operation(
}
};
// Deserialize the component
let reflected = match deserialize_component(&data_bytes, type_registry) {
let reflected = match deserialize_component_typed(&data_bytes, component_type, &type_registry) {
Ok(reflected) => reflected,
Err(e) => {
error!(
"Failed to deserialize component {}: {}",
component_type, e
);
error!("Failed to deserialize component {}: {}", component_type, e);
return;
}
};
// Get the type registration
let registration = match type_registry.get_with_type_path(component_type) {
Some(reg) => reg,
None => {
@@ -232,40 +378,36 @@ fn apply_set_operation(
}
};
// Get ReflectComponent data
let reflect_component = match registration.data::<ReflectComponent>() {
Some(rc) => rc.clone(),
None => {
error!(
"Component type {} does not have ReflectComponent data",
component_type
);
error!("Component type {} does not have ReflectComponent data", component_type);
return;
}
};
// Clone what we need to avoid lifetime issues
let component_type_owned = component_type.to_string();
drop(type_registry);
// Insert or update the component
commands.queue(move |world: &mut World| {
// Get the type registry from the world and clone it
let type_registry_arc = {
let Some(type_registry_res) = world.get_resource::<AppTypeRegistry>() else {
error!("AppTypeRegistry not found in world");
return;
};
type_registry_res.clone()
};
let type_registry_arc = world.resource::<AppTypeRegistry>().clone();
let type_registry_guard = type_registry_arc.read();
// Now we can safely get mutable access to the world
let type_registry = type_registry_arc.read();
if let Ok(mut entity_mut) = world.get_entity_mut(entity) {
reflect_component.insert(&mut entity_mut, &*reflected, &type_registry_guard);
debug!("Applied Set operation for {}", component_type);
if let Ok(mut entity_mut) = world.get_entity_mut(entity) {
reflect_component.insert(&mut entity_mut, &*reflected, &type_registry);
debug!("Applied Set operation for {}", component_type_owned);
// If we just inserted a Transform component, also add NetworkedTransform
// This ensures remote entities can have their Transform changes detected
if component_type == "bevy_transform::components::transform::Transform" {
if let Ok(mut entity_mut) = world.get_entity_mut(entity) {
if entity_mut.get::<crate::networking::NetworkedTransform>().is_none() {
entity_mut.insert(crate::networking::NetworkedTransform::default());
debug!("Added NetworkedTransform to entity with Transform");
}
}
}
});
} else {
error!("Entity {:?} not found when applying component {}", entity, component_type);
}
}
/// System to receive and apply incoming EntityDelta messages
@@ -282,21 +424,14 @@ fn apply_set_operation(
/// App::new()
/// .add_systems(Update, receive_and_apply_deltas_system);
/// ```
pub fn receive_and_apply_deltas_system(
mut commands: Commands,
bridge: Option<Res<crate::networking::GossipBridge>>,
mut entity_map: ResMut<NetworkEntityMap>,
type_registry: Res<AppTypeRegistry>,
mut node_clock: ResMut<NodeVectorClock>,
blob_store: Option<Res<BlobStore>>,
mut tombstone_registry: Option<ResMut<crate::networking::TombstoneRegistry>>,
) {
let Some(bridge) = bridge else {
pub fn receive_and_apply_deltas_system(world: &mut World) {
// Check if bridge exists
if world.get_resource::<crate::networking::GossipBridge>().is_none() {
return;
};
}
let registry = type_registry.read();
let blob_store_ref = blob_store.as_deref();
// Clone the bridge to avoid borrowing issues
let bridge = world.resource::<crate::networking::GossipBridge>().clone();
// Poll for incoming messages
while let Some(message) = bridge.try_recv() {
@@ -320,15 +455,7 @@ pub fn receive_and_apply_deltas_system(
delta.operations.len()
);
apply_entity_delta(
&delta,
&mut commands,
&mut entity_map,
&registry,
&mut node_clock,
blob_store_ref,
tombstone_registry.as_deref_mut(),
);
apply_entity_delta(&delta, world);
}
| SyncMessage::JoinRequest { .. } => {
// Handled by handle_join_requests_system

View File

@@ -28,17 +28,28 @@ use crate::networking::{
/// ```
pub fn auto_detect_transform_changes_system(
mut query: Query<
&mut NetworkedEntity,
(Entity, &mut NetworkedEntity, &Transform),
(
With<NetworkedTransform>,
Or<(Changed<Transform>, Changed<GlobalTransform>)>,
),
>,
) {
// Count how many changed entities we found
let count = query.iter().count();
if count > 0 {
debug!("auto_detect_transform_changes_system: Found {} entities with changed Transform", count);
}
// Simply accessing &mut NetworkedEntity triggers Bevy's change detection
for mut _networked in query.iter_mut() {
for (_entity, mut networked, transform) in query.iter_mut() {
debug!(
"Marking NetworkedEntity {:?} as changed due to Transform change (pos: {:?})",
networked.network_id, transform.translation
);
// No-op - the mutable access itself marks NetworkedEntity as changed
// This will trigger the delta generation system
let _ = &mut *networked;
}
}

View File

@@ -7,7 +7,6 @@ use bevy::prelude::*;
use crate::networking::{
change_detection::LastSyncVersions,
entity_map::NetworkEntityMap,
gossip_bridge::GossipBridge,
messages::{
EntityDelta,
@@ -67,82 +66,133 @@ impl NodeVectorClock {
/// App::new()
/// .add_systems(Update, generate_delta_system);
/// ```
pub fn generate_delta_system(
query: Query<(Entity, &NetworkedEntity), Changed<NetworkedEntity>>,
world: &World,
type_registry: Res<AppTypeRegistry>,
mut node_clock: ResMut<NodeVectorClock>,
mut last_versions: ResMut<LastSyncVersions>,
bridge: Option<Res<GossipBridge>>,
_entity_map: Res<NetworkEntityMap>,
mut operation_log: Option<ResMut<crate::networking::OperationLog>>,
) {
// Early return if no gossip bridge
let Some(bridge) = bridge else {
pub fn generate_delta_system(world: &mut World) {
// Check if bridge exists
if world.get_resource::<GossipBridge>().is_none() {
return;
}
let changed_entities: Vec<(Entity, uuid::Uuid, uuid::Uuid)> = {
let mut query = world.query_filtered::<(Entity, &NetworkedEntity), Changed<NetworkedEntity>>();
query.iter(world)
.map(|(entity, networked)| (entity, networked.network_id, networked.owner_node_id))
.collect()
};
let registry = type_registry.read();
if changed_entities.is_empty() {
return;
}
for (entity, networked) in query.iter() {
// Check if we should sync this entity
let current_seq = node_clock.sequence();
if !last_versions.should_sync(networked.network_id, current_seq) {
continue;
}
debug!(
"generate_delta_system: Processing {} changed entities",
changed_entities.len()
);
// Increment our vector clock
node_clock.tick();
// Process each entity separately to avoid borrow conflicts
for (entity, network_id, _owner_node_id) in changed_entities {
// Phase 1: Check and update clocks, collect data
let mut system_state: bevy::ecs::system::SystemState<(
Res<GossipBridge>,
Res<AppTypeRegistry>,
ResMut<NodeVectorClock>,
ResMut<LastSyncVersions>,
Option<ResMut<crate::networking::OperationLog>>,
)> = bevy::ecs::system::SystemState::new(world);
// Build operations for all components
// TODO: Add BlobStore support in future phases
let operations = build_entity_operations(
entity,
world,
node_clock.node_id,
node_clock.clock.clone(),
&registry,
None, // blob_store - will be added in later phases
);
let (node_id, vector_clock, current_seq) = {
let (_, _, mut node_clock, last_versions, _) = system_state.get_mut(world);
// Check if we should sync this entity
let current_seq = node_clock.sequence();
if !last_versions.should_sync(network_id, current_seq) {
drop(last_versions);
drop(node_clock);
system_state.apply(world);
continue;
}
// Increment our vector clock
node_clock.tick();
(node_clock.node_id, node_clock.clock.clone(), current_seq)
};
// Phase 2: Build operations (needs world access without holding other borrows)
let operations = {
let type_registry = world.resource::<AppTypeRegistry>().read();
let ops = build_entity_operations(
entity,
world,
node_id,
vector_clock.clone(),
&type_registry,
None, // blob_store - will be added in later phases
);
drop(type_registry);
ops
};
if operations.is_empty() {
system_state.apply(world);
continue;
}
// Create EntityDelta
let delta = EntityDelta::new(
networked.network_id,
node_clock.node_id,
node_clock.clock.clone(),
operations,
);
// Phase 3: Record, broadcast, and update
let delta = {
let (bridge, _, _, mut last_versions, mut operation_log) = system_state.get_mut(world);
// Record in operation log for anti-entropy
if let Some(ref mut log) = operation_log {
log.record_operation(delta.clone());
}
// Wrap in VersionedMessage
let message = VersionedMessage::new(SyncMessage::EntityDelta {
entity_id: delta.entity_id,
node_id: delta.node_id,
vector_clock: delta.vector_clock.clone(),
operations: delta.operations.clone(),
});
// Broadcast
if let Err(e) = bridge.send(message) {
error!("Failed to broadcast EntityDelta: {}", e);
} else {
debug!(
"Broadcast EntityDelta for entity {:?} with {} operations",
networked.network_id,
delta.operations.len()
// Create EntityDelta
let delta = EntityDelta::new(
network_id,
node_id,
vector_clock.clone(),
operations,
);
// Update last sync version
last_versions.update(networked.network_id, current_seq);
// Record in operation log for anti-entropy
if let Some(ref mut log) = operation_log {
log.record_operation(delta.clone());
}
// Wrap in VersionedMessage
let message = VersionedMessage::new(SyncMessage::EntityDelta {
entity_id: delta.entity_id,
node_id: delta.node_id,
vector_clock: delta.vector_clock.clone(),
operations: delta.operations.clone(),
});
// Broadcast
if let Err(e) = bridge.send(message) {
error!("Failed to broadcast EntityDelta: {}", e);
} else {
debug!(
"Broadcast EntityDelta for entity {:?} with {} operations",
network_id,
delta.operations.len()
);
last_versions.update(network_id, current_seq);
}
delta
};
// Phase 4: Update component vector clocks for local modifications
{
if let Some(mut component_clocks) = world.get_resource_mut::<crate::networking::ComponentVectorClocks>() {
for op in &delta.operations {
if let crate::networking::ComponentOp::Set { component_type, vector_clock: op_clock, .. } = op {
component_clocks.set(network_id, component_type.clone(), op_clock.clone(), node_id);
debug!(
"Updated local vector clock for {} on entity {:?} (node_id: {:?})",
component_type, network_id, node_id
);
}
}
}
}
system_state.apply(world);
}
}

View File

@@ -62,11 +62,37 @@ impl GossipBridge {
Ok(())
}
/// Try to receive a message from the gossip network
/// Try to receive a message from the gossip network (from incoming queue)
pub fn try_recv(&self) -> Option<VersionedMessage> {
self.incoming.lock().ok()?.pop_front()
}
/// Drain all pending messages from the incoming queue atomically
///
/// This acquires the lock once and drains all messages, preventing race conditions
/// where messages could arrive between individual try_recv() calls.
pub fn drain_incoming(&self) -> Vec<VersionedMessage> {
self.incoming
.lock()
.ok()
.map(|mut queue| queue.drain(..).collect())
.unwrap_or_default()
}
/// Try to get a message from the outgoing queue to send to gossip
pub fn try_recv_outgoing(&self) -> Option<VersionedMessage> {
self.outgoing.lock().ok()?.pop_front()
}
/// Push a message to the incoming queue (for testing/integration)
pub fn push_incoming(&self, message: VersionedMessage) -> Result<()> {
self.incoming
.lock()
.map_err(|e| NetworkingError::Gossip(format!("Failed to lock incoming queue: {}", e)))?
.push_back(message);
Ok(())
}
/// Get our node ID
pub fn node_id(&self) -> NodeId {
self.node_id

View File

@@ -199,11 +199,15 @@ pub fn apply_full_state(
continue;
}
// Spawn entity with NetworkedEntity component
// Spawn entity with NetworkedEntity and Persisted components
// This ensures entities received via FullState are persisted locally
let entity = commands
.spawn(NetworkedEntity::with_id(
entity_state.entity_id,
entity_state.owner_node_id,
.spawn((
NetworkedEntity::with_id(
entity_state.entity_id,
entity_state.owner_node_id,
),
crate::persistence::Persisted::with_id(entity_state.entity_id),
))
.id();

View File

@@ -4,13 +4,15 @@
//! multiple systems each polling the same message queue. Instead, a single
//! dispatcher system polls once and routes messages to appropriate handlers.
use bevy::prelude::*;
use bevy::{
ecs::system::SystemState,
prelude::*,
};
use crate::networking::{
apply_entity_delta,
apply_full_state,
blob_support::BlobStore,
build_full_state,
build_missing_deltas,
delta_generation::NodeVectorClock,
entity_map::NetworkEntityMap,
@@ -47,93 +49,122 @@ use crate::networking::{
/// App::new()
/// .add_systems(Update, message_dispatcher_system);
/// ```
pub fn message_dispatcher_system(
world: &World,
mut commands: Commands,
bridge: Option<Res<GossipBridge>>,
mut entity_map: ResMut<NetworkEntityMap>,
type_registry: Res<AppTypeRegistry>,
mut node_clock: ResMut<NodeVectorClock>,
blob_store: Option<Res<BlobStore>>,
mut tombstone_registry: Option<ResMut<TombstoneRegistry>>,
operation_log: Option<Res<OperationLog>>,
networked_entities: Query<(Entity, &NetworkedEntity)>,
) {
let Some(bridge) = bridge else {
pub fn message_dispatcher_system(world: &mut World) {
// This is an exclusive system to avoid parameter conflicts with world access
// Check if bridge exists
if world.get_resource::<GossipBridge>().is_none() {
return;
}
// Atomically drain all pending messages from the incoming queue
// This prevents race conditions where messages could arrive between individual try_recv() calls
let messages: Vec<crate::networking::VersionedMessage> = {
let bridge = world.resource::<GossipBridge>();
bridge.drain_incoming()
};
let registry = type_registry.read();
let blob_store_ref = blob_store.as_deref();
// Dispatch each message (bridge is no longer borrowed)
for message in messages {
dispatch_message(world, message);
}
// Poll messages once and route to appropriate handlers
while let Some(message) = bridge.try_recv() {
match message.message {
// EntityDelta - apply remote operations
| SyncMessage::EntityDelta {
// Flush all queued commands to ensure components are inserted immediately
world.flush();
}
/// Helper function to dispatch a single message
/// This is separate to allow proper borrowing of world resources
fn dispatch_message(
world: &mut World,
message: crate::networking::VersionedMessage,
) {
match message.message {
// EntityDelta - apply remote operations
| SyncMessage::EntityDelta {
entity_id,
node_id,
vector_clock,
operations,
} => {
let delta = crate::networking::EntityDelta {
entity_id,
node_id,
vector_clock,
operations,
} => {
let delta = crate::networking::EntityDelta {
entity_id,
node_id,
vector_clock,
operations,
};
};
debug!(
"Received EntityDelta for entity {:?} with {} operations",
delta.entity_id,
delta.operations.len()
);
debug!(
"Received EntityDelta for entity {:?} with {} operations",
delta.entity_id,
delta.operations.len()
);
apply_entity_delta(
&delta,
&mut commands,
&mut entity_map,
&registry,
&mut node_clock,
blob_store_ref,
tombstone_registry.as_deref_mut(),
);
apply_entity_delta(&delta, world);
}
// JoinRequest - new peer joining
| SyncMessage::JoinRequest {
node_id,
session_secret,
} => {
info!("Received JoinRequest from node {}", node_id);
// TODO: Validate session_secret in Phase 13
if let Some(_secret) = session_secret {
debug!("Session secret validation not yet implemented");
}
// JoinRequest - new peer joining
| SyncMessage::JoinRequest {
node_id,
session_secret,
} => {
info!("Received JoinRequest from node {}", node_id);
// Build and send full state
// We need to collect data in separate steps to avoid borrow conflicts
let networked_entities = {
let mut query = world.query::<(Entity, &NetworkedEntity)>();
query.iter(world).collect::<Vec<_>>()
};
// TODO: Validate session_secret in Phase 13
if let Some(_secret) = session_secret {
debug!("Session secret validation not yet implemented");
}
let full_state = {
let type_registry = world.resource::<AppTypeRegistry>().read();
let node_clock = world.resource::<NodeVectorClock>();
let blob_store = world.get_resource::<BlobStore>();
// Build and send full state
let full_state = build_full_state(
build_full_state_from_data(
world,
&networked_entities,
&registry,
&type_registry,
&node_clock,
blob_store_ref,
);
blob_store.map(|b| b as &BlobStore),
)
};
// Get bridge to send response
if let Some(bridge) = world.get_resource::<GossipBridge>() {
if let Err(e) = bridge.send(full_state) {
error!("Failed to send FullState: {}", e);
} else {
info!("Sent FullState to node {}", node_id);
}
}
}
// FullState - receiving world state after join
| SyncMessage::FullState {
entities,
vector_clock,
} => {
info!("Received FullState with {} entities", entities.len());
// FullState - receiving world state after join
| SyncMessage::FullState {
entities,
vector_clock,
} => {
info!("Received FullState with {} entities", entities.len());
// Use SystemState to properly borrow multiple resources
let mut system_state: SystemState<(
Commands,
ResMut<NetworkEntityMap>,
Res<AppTypeRegistry>,
ResMut<NodeVectorClock>,
Option<Res<BlobStore>>,
Option<ResMut<TombstoneRegistry>>,
)> = SystemState::new(world);
{
let (mut commands, mut entity_map, type_registry, mut node_clock, blob_store, mut tombstone_registry) = system_state.get_mut(world);
let registry = type_registry.read();
apply_full_state(
entities,
@@ -142,68 +173,163 @@ pub fn message_dispatcher_system(
&mut entity_map,
&registry,
&mut node_clock,
blob_store_ref,
blob_store.as_deref(),
tombstone_registry.as_deref_mut(),
);
// registry is dropped here
}
// SyncRequest - peer requesting missing operations
| SyncMessage::SyncRequest {
node_id: requesting_node,
vector_clock: their_clock,
} => {
debug!("Received SyncRequest from node {}", requesting_node);
system_state.apply(world);
}
if let Some(ref op_log) = operation_log {
// Find operations they're missing
let missing_deltas = op_log.get_all_operations_newer_than(&their_clock);
// SyncRequest - peer requesting missing operations
| SyncMessage::SyncRequest {
node_id: requesting_node,
vector_clock: their_clock,
} => {
debug!("Received SyncRequest from node {}", requesting_node);
if !missing_deltas.is_empty() {
info!(
"Sending {} missing deltas to node {}",
missing_deltas.len(),
requesting_node
);
if let Some(op_log) = world.get_resource::<OperationLog>() {
// Find operations they're missing
let missing_deltas = op_log.get_all_operations_newer_than(&their_clock);
// Send MissingDeltas response
let response = build_missing_deltas(missing_deltas);
if !missing_deltas.is_empty() {
info!(
"Sending {} missing deltas to node {}",
missing_deltas.len(),
requesting_node
);
// Send MissingDeltas response
let response = build_missing_deltas(missing_deltas);
if let Some(bridge) = world.get_resource::<GossipBridge>() {
if let Err(e) = bridge.send(response) {
error!("Failed to send MissingDeltas: {}", e);
}
} else {
debug!("No missing deltas for node {}", requesting_node);
}
} else {
warn!("Received SyncRequest but OperationLog resource not available");
debug!("No missing deltas for node {}", requesting_node);
}
} else {
warn!("Received SyncRequest but OperationLog resource not available");
}
}
// MissingDeltas - receiving operations we requested
| SyncMessage::MissingDeltas { deltas } => {
info!("Received MissingDeltas with {} operations", deltas.len());
// MissingDeltas - receiving operations we requested
| SyncMessage::MissingDeltas { deltas } => {
info!("Received MissingDeltas with {} operations", deltas.len());
// Apply each delta
for delta in deltas {
debug!(
"Applying missing delta for entity {:?}",
delta.entity_id
);
// Apply each delta
for delta in deltas {
debug!(
"Applying missing delta for entity {:?}",
delta.entity_id
);
apply_entity_delta(
&delta,
&mut commands,
&mut entity_map,
&registry,
&mut node_clock,
blob_store_ref,
tombstone_registry.as_deref_mut(),
);
}
apply_entity_delta(&delta, world);
}
}
}
}
/// Helper to build full state from collected data
fn build_full_state_from_data(
world: &World,
networked_entities: &[(Entity, &NetworkedEntity)],
type_registry: &bevy::reflect::TypeRegistry,
node_clock: &NodeVectorClock,
blob_store: Option<&BlobStore>,
) -> crate::networking::VersionedMessage {
use crate::{
networking::{
blob_support::create_component_data,
messages::{
ComponentState,
EntityState,
},
},
persistence::reflection::serialize_component,
};
// Get tombstone registry to filter out deleted entities
let tombstone_registry = world.get_resource::<crate::networking::TombstoneRegistry>();
let mut entities = Vec::new();
for (entity, networked) in networked_entities {
// Skip tombstoned entities to prevent resurrection on joining nodes
if let Some(registry) = &tombstone_registry {
if registry.is_deleted(networked.network_id) {
debug!(
"Skipping tombstoned entity {:?} in full state build",
networked.network_id
);
continue;
}
}
let entity_ref = world.entity(*entity);
let mut components = Vec::new();
// Iterate over all type registrations to find components
for registration in type_registry.iter() {
// Skip if no ReflectComponent data
let Some(reflect_component) = registration.data::<ReflectComponent>() else {
continue;
};
let type_path = registration.type_info().type_path();
// Skip networked wrapper components
if type_path.ends_with("::NetworkedEntity")
|| type_path.ends_with("::NetworkedTransform")
|| type_path.ends_with("::NetworkedSelection")
|| type_path.ends_with("::NetworkedDrawingPath")
{
continue;
}
// Try to reflect this component from the entity
if let Some(reflected) = reflect_component.reflect(entity_ref) {
// Serialize the component
if let Ok(serialized) = serialize_component(reflected, type_registry) {
// Create component data (inline or blob)
let data = if let Some(store) = blob_store {
match create_component_data(serialized, store) {
Ok(d) => d,
Err(_) => continue,
}
} else {
crate::networking::ComponentData::Inline(serialized)
};
components.push(ComponentState {
component_type: type_path.to_string(),
data,
});
}
}
}
entities.push(EntityState {
entity_id: networked.network_id,
owner_node_id: networked.owner_node_id,
vector_clock: node_clock.clock.clone(),
components,
is_deleted: false,
});
}
info!(
"Built FullState with {} entities for new peer",
entities.len()
);
crate::networking::VersionedMessage::new(SyncMessage::FullState {
entities,
vector_clock: node_clock.clock.clone(),
})
}
#[cfg(test)]
mod tests {
#[test]

View File

@@ -71,3 +71,56 @@ pub use rga::*;
pub use sync_component::*;
pub use tombstones::*;
pub use vector_clock::*;
/// Spawn a networked entity with persistence enabled
///
/// Creates an entity with both NetworkedEntity and Persisted components,
/// registers it in the NetworkEntityMap, and returns the entity ID.
/// This is the single source of truth for creating networked entities
/// that need to be synchronized and persisted across the network.
///
/// # Parameters
/// - `world`: Bevy world to spawn entity in
/// - `entity_id`: Network ID for the entity (UUID)
/// - `node_id`: ID of the node that owns this entity
///
/// # Returns
/// The spawned Bevy entity's ID
///
/// # Example
/// ```no_run
/// use bevy::prelude::*;
/// use lib::networking::spawn_networked_entity;
/// use uuid::Uuid;
///
/// fn my_system(world: &mut World) {
/// let entity_id = Uuid::new_v4();
/// let node_id = Uuid::new_v4();
/// let entity = spawn_networked_entity(world, entity_id, node_id);
/// // Entity is now registered and ready for sync/persistence
/// }
/// ```
pub fn spawn_networked_entity(
world: &mut bevy::prelude::World,
entity_id: uuid::Uuid,
node_id: uuid::Uuid,
) -> bevy::prelude::Entity {
use bevy::prelude::*;
// Spawn with both NetworkedEntity and Persisted components
let entity = world.spawn((
NetworkedEntity::with_id(entity_id, node_id),
crate::persistence::Persisted::with_id(entity_id),
)).id();
// Register in entity map
let mut entity_map = world.resource_mut::<NetworkEntityMap>();
entity_map.insert(entity_id, entity);
info!(
"Spawned new networked entity {:?} from node {}",
entity_id, node_id
);
entity
}

View File

@@ -25,7 +25,7 @@ use crate::{
VectorClock,
},
},
persistence::reflection::serialize_component,
persistence::reflection::serialize_component_typed,
};
/// Build a Set operation (LWW) from a component
@@ -55,7 +55,7 @@ pub fn build_set_operation(
blob_store: Option<&BlobStore>,
) -> Result<ComponentOp> {
// Serialize the component
let serialized = serialize_component(component, type_registry)?;
let serialized = serialize_component_typed(component, type_registry)?;
// Create component data (inline or blob)
let data = if let Some(store) = blob_store {
@@ -97,6 +97,8 @@ pub fn build_entity_operations(
let mut operations = Vec::new();
let entity_ref = world.entity(entity);
debug!("build_entity_operations: Building operations for entity {:?}", entity);
// Iterate over all type registrations
for registration in type_registry.iter() {
// Skip if no ReflectComponent data
@@ -119,7 +121,7 @@ pub fn build_entity_operations(
// Try to reflect this component from the entity
if let Some(reflected) = reflect_component.reflect(entity_ref) {
// Serialize the component
if let Ok(serialized) = serialize_component(reflected, type_registry) {
if let Ok(serialized) = serialize_component_typed(reflected, type_registry) {
// Create component data (inline or blob)
let data = if let Some(store) = blob_store {
if let Ok(component_data) = create_component_data(serialized, store) {
@@ -138,12 +140,19 @@ pub fn build_entity_operations(
operations.push(ComponentOp::Set {
component_type: type_path.to_string(),
data,
vector_clock: clock,
vector_clock: clock.clone(),
});
debug!(" ✓ Added Set operation for {}", type_path);
}
}
}
debug!(
"build_entity_operations: Built {} operations for entity {:?}",
operations.len(),
entity
);
operations
}
@@ -173,7 +182,7 @@ pub fn build_transform_operation(
blob_store: Option<&BlobStore>,
) -> Result<ComponentOp> {
// Use reflection to serialize Transform
let serialized = serialize_component(transform.as_reflect(), type_registry)?;
let serialized = serialize_component_typed(transform.as_reflect(), type_registry)?;
// Create component data (inline or blob)
let data = if let Some(store) = blob_store {

View File

@@ -307,21 +307,14 @@ pub fn handle_sync_requests_system(
/// System to handle MissingDeltas messages
///
/// When we receive MissingDeltas (in response to our SyncRequest), apply them.
pub fn handle_missing_deltas_system(
mut commands: Commands,
bridge: Option<Res<GossipBridge>>,
mut entity_map: ResMut<crate::networking::NetworkEntityMap>,
type_registry: Res<AppTypeRegistry>,
mut node_clock: ResMut<NodeVectorClock>,
blob_store: Option<Res<crate::networking::BlobStore>>,
mut tombstone_registry: Option<ResMut<crate::networking::TombstoneRegistry>>,
) {
let Some(bridge) = bridge else {
pub fn handle_missing_deltas_system(world: &mut World) {
// Check if bridge exists
if world.get_resource::<GossipBridge>().is_none() {
return;
};
}
let registry = type_registry.read();
let blob_store_ref = blob_store.as_deref();
// Clone the bridge to avoid borrowing issues
let bridge = world.resource::<GossipBridge>().clone();
// Poll for MissingDeltas messages
while let Some(message) = bridge.try_recv() {
@@ -336,15 +329,7 @@ pub fn handle_missing_deltas_system(
delta.entity_id
);
crate::networking::apply_entity_delta(
&delta,
&mut commands,
&mut entity_map,
&registry,
&mut node_clock,
blob_store_ref,
tombstone_registry.as_deref_mut(),
);
crate::networking::apply_entity_delta(&delta, world);
}
}
| _ => {

View File

@@ -27,7 +27,10 @@
use bevy::prelude::*;
use crate::networking::{
change_detection::LastSyncVersions,
change_detection::{
auto_detect_transform_changes_system,
LastSyncVersions,
},
delta_generation::{
generate_delta_system,
NodeVectorClock,
@@ -158,7 +161,8 @@ impl Plugin for NetworkingPlugin {
.insert_resource(NetworkEntityMap::new())
.insert_resource(LastSyncVersions::default())
.insert_resource(OperationLog::new())
.insert_resource(TombstoneRegistry::new());
.insert_resource(TombstoneRegistry::new())
.insert_resource(crate::networking::ComponentVectorClocks::new());
// PreUpdate systems - handle incoming messages first
app.add_systems(
@@ -178,6 +182,8 @@ impl Plugin for NetworkingPlugin {
app.add_systems(
Update,
(
// Track Transform changes and mark NetworkedTransform as changed
auto_detect_transform_changes_system,
// Handle local entity deletions
handle_local_deletions_system,
),

View File

@@ -167,29 +167,61 @@ fn persistence_startup_system(db: Res<PersistenceDb>, mut metrics: ResMut<Persis
/// For automatic tracking without manual `mark_dirty()` calls, use the
/// `auto_track_component_changes_system` which automatically detects changes
/// to common components like Transform, GlobalTransform, etc.
fn collect_dirty_entities_bevy_system(
mut dirty: ResMut<DirtyEntitiesResource>,
mut write_buffer: ResMut<WriteBufferResource>,
query: Query<(Entity, &Persisted), Changed<Persisted>>,
world: &World,
type_registry: Res<AppTypeRegistry>,
) {
let registry = type_registry.read();
fn collect_dirty_entities_bevy_system(world: &mut World) {
// Collect changed entities first
let changed_entities: Vec<(Entity, uuid::Uuid)> = {
let mut query = world.query_filtered::<(Entity, &Persisted), Changed<Persisted>>();
query.iter(world)
.map(|(entity, persisted)| (entity, persisted.network_id))
.collect()
};
if changed_entities.is_empty() {
return;
}
// Serialize components for each entity
for (entity, network_id) in changed_entities {
// First, ensure the entity exists in the database
{
let now = chrono::Utc::now();
let mut write_buffer = world.resource_mut::<WriteBufferResource>();
write_buffer.add(PersistenceOp::UpsertEntity {
id: network_id,
data: EntityData {
id: network_id,
created_at: now,
updated_at: now,
entity_type: "NetworkedEntity".to_string(),
},
});
}
// Track changed entities and serialize all their components
for (entity, persisted) in query.iter() {
// Serialize all components on this entity (generic tracking)
let components = serialize_all_components_from_entity(entity, world, &registry);
let components = {
let type_registry = world.resource::<AppTypeRegistry>().read();
let comps = serialize_all_components_from_entity(entity, world, &type_registry);
drop(type_registry);
comps
};
// Add operations for each component
for (component_type, data) in components {
dirty.mark_dirty(persisted.network_id, &component_type);
// Get mutable access to dirty and mark it
{
let mut dirty = world.resource_mut::<DirtyEntitiesResource>();
dirty.mark_dirty(network_id, &component_type);
}
write_buffer.add(PersistenceOp::UpsertComponent {
entity_id: persisted.network_id,
component_type,
data,
});
// Get mutable access to write_buffer and add the operation
{
let mut write_buffer = world.resource_mut::<WriteBufferResource>();
write_buffer.add(PersistenceOp::UpsertComponent {
entity_id: network_id,
component_type,
data,
});
}
}
}
}

View File

@@ -9,12 +9,14 @@ use bevy::{
reflect::{
TypeRegistry,
serde::{
ReflectDeserializer,
ReflectSerializer,
TypedReflectDeserializer,
TypedReflectSerializer,
},
},
};
use bincode::Options as _;
use serde::de::DeserializeSeed;
use crate::persistence::error::{
PersistenceError,
Result,
@@ -101,7 +103,21 @@ pub fn serialize_component(
type_registry: &TypeRegistry,
) -> Result<Vec<u8>> {
let serializer = ReflectSerializer::new(component, type_registry);
bincode::serialize(&serializer).map_err(PersistenceError::from)
bincode::options().serialize(&serializer)
.map_err(PersistenceError::from)
}
/// Serialize a component when the type is known (more efficient for bincode)
///
/// This uses `TypedReflectSerializer` which doesn't include type path information,
/// making it compatible with `TypedReflectDeserializer` for binary formats.
pub fn serialize_component_typed(
component: &dyn Reflect,
type_registry: &TypeRegistry,
) -> Result<Vec<u8>> {
let serializer = TypedReflectSerializer::new(component, type_registry);
bincode::options().serialize(&serializer)
.map_err(PersistenceError::from)
}
/// Deserialize a component using Bevy's reflection system
@@ -134,9 +150,30 @@ pub fn deserialize_component(
type_registry: &TypeRegistry,
) -> Result<Box<dyn PartialReflect>> {
let mut deserializer = bincode::Deserializer::from_slice(bytes, bincode::options());
let reflect_deserializer = ReflectDeserializer::new(type_registry);
let reflect_deserializer = bevy::reflect::serde::ReflectDeserializer::new(type_registry);
reflect_deserializer
.deserialize(&mut deserializer)
.map_err(|e| PersistenceError::Deserialization(e.to_string()))
}
/// Deserialize a component when the type is known
///
/// Uses `TypedReflectDeserializer` which is more efficient for binary formats like bincode
/// when the component type is known at deserialization time.
pub fn deserialize_component_typed(
bytes: &[u8],
component_type: &str,
type_registry: &TypeRegistry,
) -> Result<Box<dyn PartialReflect>> {
let registration = type_registry.get_with_type_path(component_type)
.ok_or_else(|| PersistenceError::Deserialization(
format!("Type {} not registered", component_type)
))?;
let mut deserializer = bincode::Deserializer::from_slice(bytes, bincode::options());
let reflect_deserializer = TypedReflectDeserializer::new(registration, type_registry);
use serde::de::DeserializeSeed;
reflect_deserializer
.deserialize(&mut deserializer)
.map_err(|e| PersistenceError::Deserialization(e.to_string()))
@@ -235,8 +272,9 @@ pub fn serialize_all_components_from_entity(
// Try to reflect this component from the entity
if let Some(reflected) = reflect_component.reflect(entity_ref) {
// Serialize the component
if let Ok(data) = serialize_component(reflected, type_registry) {
// Serialize the component using typed serialization for consistency
// This matches the format expected by deserialize_component_typed
if let Ok(data) = serialize_component_typed(reflected, type_registry) {
components.push((type_path.to_string(), data));
}
}

View File

@@ -0,0 +1,915 @@
//! Headless integration tests for networking and persistence
//!
//! These tests validate end-to-end CRDT synchronization and persistence
//! using multiple headless Bevy apps with real iroh-gossip networking.
use std::{
path::PathBuf,
time::{Duration, Instant},
};
use anyhow::Result;
use bevy::{
app::{App, ScheduleRunnerPlugin},
ecs::{
component::Component,
reflect::ReflectComponent,
world::World,
},
prelude::*,
reflect::Reflect,
MinimalPlugins,
};
use futures_lite::StreamExt;
use iroh::{Endpoint, protocol::Router};
use iroh_gossip::{
api::{GossipReceiver, GossipSender},
net::Gossip,
proto::TopicId,
};
use lib::{
networking::{
GossipBridge, NetworkedEntity, NetworkedTransform, NetworkingConfig, NetworkingPlugin,
Synced, VersionedMessage,
},
persistence::{PersistenceConfig, PersistencePlugin, Persisted},
};
use serde::{Deserialize, Serialize};
use sync_macros::Synced as SyncedDerive;
use tempfile::TempDir;
use uuid::Uuid;
// ============================================================================
// Test Components
// ============================================================================
/// Simple position component for testing sync
#[derive(Component, Reflect, Serialize, Deserialize, Clone, Debug, PartialEq)]
#[reflect(Component)]
#[derive(SyncedDerive)]
#[sync(version = 1, strategy = "LastWriteWins")]
struct TestPosition {
x: f32,
y: f32,
}
/// Simple health component for testing sync
#[derive(Component, Reflect, Serialize, Deserialize, Clone, Debug, PartialEq)]
#[reflect(Component)]
#[derive(SyncedDerive)]
#[sync(version = 1, strategy = "LastWriteWins")]
struct TestHealth {
current: f32,
max: f32,
}
// ============================================================================
// Test Utilities
// ============================================================================
mod test_utils {
use super::*;
use rusqlite::{Connection, OptionalExtension};
/// Test context that manages temporary directories with RAII cleanup
pub struct TestContext {
_temp_dir: TempDir,
db_path: PathBuf,
}
impl TestContext {
pub fn new() -> Self {
let temp_dir = TempDir::new().expect("Failed to create temp directory");
let db_path = temp_dir.path().join("test.db");
Self {
_temp_dir: temp_dir,
db_path,
}
}
pub fn db_path(&self) -> PathBuf {
self.db_path.clone()
}
}
/// Check if an entity exists in the database
pub fn entity_exists_in_db(db_path: &PathBuf, entity_id: Uuid) -> Result<bool> {
let conn = Connection::open(db_path)?;
let entity_id_bytes = entity_id.as_bytes();
let exists: bool = conn.query_row(
"SELECT COUNT(*) > 0 FROM entities WHERE id = ?1",
[entity_id_bytes.as_slice()],
|row| row.get(0),
)?;
Ok(exists)
}
/// Check if a component exists for an entity in the database
pub fn component_exists_in_db(
db_path: &PathBuf,
entity_id: Uuid,
component_type: &str,
) -> Result<bool> {
let conn = Connection::open(db_path)?;
let entity_id_bytes = entity_id.as_bytes();
let exists: bool = conn.query_row(
"SELECT COUNT(*) > 0 FROM components WHERE entity_id = ?1 AND component_type = ?2",
rusqlite::params![entity_id_bytes.as_slice(), component_type],
|row| row.get(0),
)?;
Ok(exists)
}
/// Load a component from the database and deserialize it
pub fn load_component_from_db<T: Component + Reflect + Clone>(
db_path: &PathBuf,
entity_id: Uuid,
component_type: &str,
type_registry: &bevy::reflect::TypeRegistry,
) -> Result<Option<T>> {
let conn = Connection::open(db_path)?;
let entity_id_bytes = entity_id.as_bytes();
let data_result: std::result::Result<Vec<u8>, rusqlite::Error> = conn
.query_row(
"SELECT data FROM components WHERE entity_id = ?1 AND component_type = ?2",
rusqlite::params![entity_id_bytes.as_slice(), component_type],
|row| row.get(0),
);
let data = data_result.optional()?;
if let Some(bytes) = data {
use lib::persistence::reflection::deserialize_component_typed;
let reflected = deserialize_component_typed(&bytes, component_type, type_registry)?;
if let Some(concrete) = reflected.try_downcast_ref::<T>() {
Ok(Some(concrete.clone()))
} else {
anyhow::bail!("Failed to downcast component to concrete type")
}
} else {
Ok(None)
}
}
/// Create a headless Bevy app configured for testing
pub fn create_test_app(node_id: Uuid, db_path: PathBuf, bridge: GossipBridge) -> App {
let mut app = App::new();
app.add_plugins(
MinimalPlugins.set(ScheduleRunnerPlugin::run_loop(Duration::from_secs_f64(
1.0 / 60.0,
))),
)
.insert_resource(bridge)
.add_plugins(NetworkingPlugin::new(NetworkingConfig {
node_id,
sync_interval_secs: 0.5, // Fast for testing
prune_interval_secs: 10.0,
tombstone_gc_interval_secs: 30.0,
}))
.add_plugins(PersistencePlugin::with_config(
db_path,
PersistenceConfig {
flush_interval_secs: 1,
checkpoint_interval_secs: 5,
battery_adaptive: false,
..Default::default()
},
));
// Register test component types for reflection
app.register_type::<TestPosition>()
.register_type::<TestHealth>();
app
}
/// Count entities with a specific network ID
pub fn count_entities_with_id(world: &mut World, network_id: Uuid) -> usize {
let mut query = world.query::<&NetworkedEntity>();
query
.iter(world)
.filter(|entity| entity.network_id == network_id)
.count()
}
/// Assert that an entity with specific network ID and position exists
pub fn assert_entity_synced(
world: &mut World,
network_id: Uuid,
expected_position: TestPosition,
) -> Result<()> {
let mut query = world.query::<(&NetworkedEntity, &TestPosition)>();
for (entity, position) in query.iter(world) {
if entity.network_id == network_id {
if position == &expected_position {
return Ok(());
} else {
anyhow::bail!(
"Position mismatch for entity {}: expected {:?}, got {:?}",
network_id,
expected_position,
position
);
}
}
}
anyhow::bail!("Entity {} not found in world", network_id)
}
/// Wait for sync condition to be met, polling both apps
pub async fn wait_for_sync<F>(
app1: &mut App,
app2: &mut App,
timeout: Duration,
check_fn: F,
) -> Result<()>
where
F: Fn(&mut World, &mut World) -> bool,
{
let start = Instant::now();
let mut tick_count = 0;
while start.elapsed() < timeout {
// Tick both apps
app1.update();
app2.update();
tick_count += 1;
if tick_count % 50 == 0 {
println!("Waiting for sync... tick {} ({:.1}s elapsed)", tick_count, start.elapsed().as_secs_f32());
}
// Check condition
if check_fn(app1.world_mut(), app2.world_mut()) {
println!("Sync completed after {} ticks ({:.3}s)", tick_count, start.elapsed().as_secs_f32());
return Ok(());
}
// Small delay to avoid spinning
tokio::time::sleep(Duration::from_millis(16)).await;
}
println!("Sync timeout after {} ticks", tick_count);
anyhow::bail!("Sync timeout after {:?}. Condition not met.", timeout)
}
/// Initialize a single iroh-gossip node
async fn init_gossip_node(
topic_id: TopicId,
bootstrap_addrs: Vec<iroh::EndpointAddr>,
) -> Result<(Endpoint, Gossip, Router, GossipBridge)> {
println!(" Creating endpoint with mDNS discovery...");
// Create the Iroh endpoint with mDNS local discovery
let endpoint = Endpoint::builder()
.discovery(iroh::discovery::mdns::MdnsDiscovery::builder())
.bind()
.await?;
let endpoint_id = endpoint.addr().id;
println!(" Endpoint created: {}", endpoint_id);
// Convert 32-byte endpoint ID to 16-byte UUID by taking first 16 bytes
let id_bytes = endpoint_id.as_bytes();
let mut uuid_bytes = [0u8; 16];
uuid_bytes.copy_from_slice(&id_bytes[..16]);
let node_id = Uuid::from_bytes(uuid_bytes);
println!(" Spawning gossip protocol...");
// Build the gossip protocol
let gossip = Gossip::builder().spawn(endpoint.clone());
println!(" Setting up router...");
// Setup the router to handle incoming connections
let router = Router::builder(endpoint.clone())
.accept(iroh_gossip::ALPN, gossip.clone())
.spawn();
// Add bootstrap peers to endpoint's discovery using StaticProvider
let bootstrap_count = bootstrap_addrs.len();
let has_bootstrap_peers = !bootstrap_addrs.is_empty();
// Collect bootstrap IDs before moving the addresses
let bootstrap_ids: Vec<_> = bootstrap_addrs.iter().map(|a| a.id).collect();
if has_bootstrap_peers {
let static_provider = iroh::discovery::static_provider::StaticProvider::default();
for addr in &bootstrap_addrs {
static_provider.add_endpoint_info(addr.clone());
}
endpoint.discovery().add(static_provider);
println!(" Added {} bootstrap peers to static discovery", bootstrap_count);
// Explicitly connect to bootstrap peers
println!(" Connecting to bootstrap peers...");
for addr in &bootstrap_addrs {
match endpoint.connect(addr.clone(), iroh_gossip::ALPN).await {
Ok(_conn) => println!(" ✓ Connected to bootstrap peer: {}", addr.id),
Err(e) => println!(" ✗ Failed to connect to bootstrap peer {}: {}", addr.id, e),
}
}
}
println!(" Subscribing to topic with {} bootstrap peers...", bootstrap_count);
// Subscribe to the topic (the IDs now have addresses via discovery)
let subscribe_handle = gossip.subscribe(topic_id, bootstrap_ids).await?;
println!(" Splitting sender/receiver...");
// Split into sender and receiver
let (sender, mut receiver) = subscribe_handle.split();
// Only wait for join if we have bootstrap peers
// receiver.joined() waits until we've connected to at least one peer
// If there are no bootstrap peers (first node), skip this step
if has_bootstrap_peers {
println!(" Waiting for join to complete (with timeout)...");
// Use a timeout in case mDNS discovery takes a while or fails
match tokio::time::timeout(Duration::from_secs(3), receiver.joined()).await {
Ok(Ok(())) => println!(" Join completed!"),
Ok(Err(e)) => println!(" Join error: {}", e),
Err(_) => println!(" Join timeout - proceeding anyway (mDNS may still connect later)"),
}
} else {
println!(" No bootstrap peers - skipping join wait (first node in swarm)");
}
// Create bridge and wire it up
let bridge = GossipBridge::new(node_id);
println!(" Spawning bridge tasks...");
// Spawn background tasks to forward messages between gossip and bridge
spawn_gossip_bridge_tasks(sender, receiver, bridge.clone());
println!(" Node initialization complete");
Ok((endpoint, gossip, router, bridge))
}
/// Setup a pair of iroh-gossip nodes connected to the same topic
pub async fn setup_gossip_pair(
) -> Result<(
Endpoint,
Endpoint,
Router,
Router,
GossipBridge,
GossipBridge,
)> {
// Use a shared topic for both nodes
let topic_id = TopicId::from_bytes([42; 32]);
println!("Using topic ID: {:?}", topic_id);
// Initialize node 1 with no bootstrap peers
println!("Initializing node 1...");
let (ep1, _gossip1, router1, bridge1) = init_gossip_node(topic_id, vec![]).await?;
println!("Node 1 initialized with ID: {}", ep1.addr().id);
// Get node 1's full address (ID + network addresses) for node 2 to bootstrap from
let node1_addr = ep1.addr().clone();
println!("Node 1 full address: {:?}", node1_addr);
// Initialize node 2 with node 1's full address as bootstrap peer
println!("Initializing node 2 with bootstrap peer: {}", node1_addr.id);
let (ep2, _gossip2, router2, bridge2) = init_gossip_node(topic_id, vec![node1_addr]).await?;
println!("Node 2 initialized with ID: {}", ep2.addr().id);
// Give mDNS and gossip time to discover peers
println!("Waiting for mDNS/gossip peer discovery...");
tokio::time::sleep(Duration::from_secs(2)).await;
println!("Peer discovery wait complete");
Ok((ep1, ep2, router1, router2, bridge1, bridge2))
}
/// Spawn background tasks to forward messages between iroh-gossip and GossipBridge
fn spawn_gossip_bridge_tasks(
sender: GossipSender,
mut receiver: GossipReceiver,
bridge: GossipBridge,
) {
let node_id = bridge.node_id();
// Task 1: Forward from bridge.outgoing → gossip sender
let bridge_out = bridge.clone();
tokio::spawn(async move {
let mut msg_count = 0;
loop {
// Poll the bridge's outgoing queue
if let Some(versioned_msg) = bridge_out.try_recv_outgoing() {
msg_count += 1;
println!("[Node {}] Sending message #{} via gossip", node_id, msg_count);
// Serialize the message
match bincode::serialize(&versioned_msg) {
Ok(bytes) => {
// Broadcast via gossip
if let Err(e) = sender.broadcast(bytes.into()).await {
eprintln!("[Node {}] Failed to broadcast message: {}", node_id, e);
} else {
println!("[Node {}] Message #{} broadcasted successfully", node_id, msg_count);
}
}
Err(e) => eprintln!("[Node {}] Failed to serialize message for broadcast: {}", node_id, e),
}
}
// Small delay to avoid spinning
tokio::time::sleep(Duration::from_millis(10)).await;
}
});
// Task 2: Forward from gossip receiver → bridge.incoming
let bridge_in = bridge.clone();
tokio::spawn(async move {
let mut msg_count = 0;
println!("[Node {}] Gossip receiver task started", node_id);
loop {
// Receive from gossip (GossipReceiver is a Stream)
match tokio::time::timeout(Duration::from_millis(100), receiver.next()).await {
Ok(Some(Ok(event))) => {
println!("[Node {}] Received gossip event: {:?}", node_id, std::mem::discriminant(&event));
if let iroh_gossip::api::Event::Received(msg) = event {
msg_count += 1;
println!("[Node {}] Received message #{} from gossip", node_id, msg_count);
// Deserialize the message
match bincode::deserialize::<VersionedMessage>(&msg.content) {
Ok(versioned_msg) => {
// Push to bridge's incoming queue
if let Err(e) = bridge_in.push_incoming(versioned_msg) {
eprintln!("[Node {}] Failed to push to bridge incoming: {}", node_id, e);
} else {
println!("[Node {}] Message #{} pushed to bridge incoming", node_id, msg_count);
}
}
Err(e) => eprintln!("[Node {}] Failed to deserialize gossip message: {}", node_id, e),
}
}
}
Ok(Some(Err(e))) => eprintln!("[Node {}] Gossip receiver error: {}", node_id, e),
Ok(None) => {
// Stream ended
println!("[Node {}] Gossip stream ended", node_id);
break;
}
Err(_) => {
// Timeout, no message available
}
}
}
});
}
}
// ============================================================================
// Integration Tests
// ============================================================================
/// Test 1: Basic entity sync (Node A spawns → Node B receives)
#[tokio::test(flavor = "multi_thread")]
async fn test_basic_entity_sync() -> Result<()> {
use test_utils::*;
println!("=== Starting test_basic_entity_sync ===");
// Setup contexts
println!("Creating test contexts...");
let ctx1 = TestContext::new();
let ctx2 = TestContext::new();
// Setup gossip networking
println!("Setting up gossip pair...");
let (ep1, ep2, router1, router2, bridge1, bridge2) = setup_gossip_pair().await?;
let node1_id = bridge1.node_id();
let node2_id = bridge2.node_id();
// Create headless apps
println!("Creating Bevy apps...");
let mut app1 = create_test_app(node1_id, ctx1.db_path(), bridge1);
let mut app2 = create_test_app(node2_id, ctx2.db_path(), bridge2);
println!("Apps created successfully");
println!("Node 1 ID: {}", node1_id);
println!("Node 2 ID: {}", node2_id);
// Node 1 spawns entity
let entity_id = Uuid::new_v4();
println!("Spawning entity {} on node 1", entity_id);
let spawned_entity = app1.world_mut().spawn((
NetworkedEntity::with_id(entity_id, node1_id),
TestPosition { x: 10.0, y: 20.0 },
Persisted::with_id(entity_id),
Synced,
)).id();
// IMPORTANT: Trigger change detection for persistence
// Bevy only marks components as "changed" when mutated, not on spawn
// Access Persisted mutably to trigger the persistence system
{
let world = app1.world_mut();
if let Ok(mut entity_mut) = world.get_entity_mut(spawned_entity) {
if let Some(mut persisted) = entity_mut.get_mut::<Persisted>() {
// Dereferencing the mutable borrow triggers change detection
let _ = &mut *persisted;
}
}
}
println!("Entity spawned, triggered persistence");
println!("Entity spawned, starting sync wait...");
// Wait for sync
wait_for_sync(&mut app1, &mut app2, Duration::from_secs(10), |_, w2| {
let count = count_entities_with_id(w2, entity_id);
if count > 0 {
println!("✓ Entity found on node 2!");
true
} else {
// Debug: print what entities we DO have
let all_networked: Vec<uuid::Uuid> = {
let mut query = w2.query::<&NetworkedEntity>();
query.iter(w2).map(|ne| ne.network_id).collect()
};
if !all_networked.is_empty() {
println!(" Node 2 has {} networked entities: {:?}", all_networked.len(), all_networked);
println!(" Looking for: {}", entity_id);
}
false
}
})
.await?;
// Update app2 one more time to ensure queued commands are applied
println!("Running final update to flush commands...");
app2.update();
// Debug: Check what components the entity has
{
let world = app2.world_mut();
let mut query = world.query::<(&NetworkedEntity, Option<&TestPosition>)>();
for (ne, pos) in query.iter(world) {
if ne.network_id == entity_id {
println!("Debug: Entity {} has NetworkedEntity", entity_id);
if let Some(pos) = pos {
println!("Debug: Entity has TestPosition: {:?}", pos);
} else {
println!("Debug: Entity MISSING TestPosition component!");
}
}
}
}
// Verify entity synced to node 2 (in-memory check)
assert_entity_synced(app2.world_mut(), entity_id, TestPosition { x: 10.0, y: 20.0 })?;
println!("✓ Entity synced in-memory on node 2");
// Give persistence system time to flush to disk
// The persistence system runs on Update with a 1-second flush interval
println!("Waiting for persistence to flush...");
for _ in 0..15 {
app1.update();
app2.update();
tokio::time::sleep(Duration::from_millis(100)).await;
}
// Verify persistence on Node 1 (originating node)
println!("Checking Node 1 database persistence...");
assert!(
entity_exists_in_db(&ctx1.db_path(), entity_id)?,
"Entity {} should exist in Node 1 database",
entity_id
);
assert!(
component_exists_in_db(&ctx1.db_path(), entity_id, "sync_integration_headless::TestPosition")?,
"TestPosition component should exist in Node 1 database"
);
let node1_position = {
let type_registry = app1.world().resource::<AppTypeRegistry>().read();
load_component_from_db::<TestPosition>(
&ctx1.db_path(),
entity_id,
"sync_integration_headless::TestPosition",
&type_registry,
)?
};
assert_eq!(
node1_position,
Some(TestPosition { x: 10.0, y: 20.0 }),
"TestPosition data should be correctly persisted in Node 1 database"
);
println!("✓ Node 1 persistence verified");
// Verify persistence on Node 2 (receiving node after sync)
println!("Checking Node 2 database persistence...");
assert!(
entity_exists_in_db(&ctx2.db_path(), entity_id)?,
"Entity {} should exist in Node 2 database after sync",
entity_id
);
assert!(
component_exists_in_db(&ctx2.db_path(), entity_id, "sync_integration_headless::TestPosition")?,
"TestPosition component should exist in Node 2 database after sync"
);
let node2_position = {
let type_registry = app2.world().resource::<AppTypeRegistry>().read();
load_component_from_db::<TestPosition>(
&ctx2.db_path(),
entity_id,
"sync_integration_headless::TestPosition",
&type_registry,
)?
};
assert_eq!(
node2_position,
Some(TestPosition { x: 10.0, y: 20.0 }),
"TestPosition data should be correctly persisted in Node 2 database after sync"
);
println!("✓ Node 2 persistence verified");
println!("✓ Full sync and persistence test passed!");
// Cleanup
router1.shutdown().await?;
router2.shutdown().await?;
ep1.close().await;
ep2.close().await;
Ok(())
}
/// Test 2: Bidirectional sync (both nodes modify different entities)
#[tokio::test(flavor = "multi_thread")]
async fn test_bidirectional_sync() -> Result<()> {
use test_utils::*;
let ctx1 = TestContext::new();
let ctx2 = TestContext::new();
let (ep1, ep2, router1, router2, bridge1, bridge2) = setup_gossip_pair().await?;
let node1_id = bridge1.node_id();
let node2_id = bridge2.node_id();
let mut app1 = create_test_app(node1_id, ctx1.db_path(), bridge1);
let mut app2 = create_test_app(node2_id, ctx2.db_path(), bridge2);
// Node 1 spawns entity A
let entity_a = Uuid::new_v4();
let entity_a_bevy = app1.world_mut().spawn((
NetworkedEntity::with_id(entity_a, node1_id),
TestPosition { x: 1.0, y: 2.0 },
Persisted::with_id(entity_a),
Synced,
)).id();
// Trigger persistence for entity A
{
let world = app1.world_mut();
if let Ok(mut entity_mut) = world.get_entity_mut(entity_a_bevy) {
if let Some(mut persisted) = entity_mut.get_mut::<Persisted>() {
let _ = &mut *persisted;
}
}
}
// Node 2 spawns entity B
let entity_b = Uuid::new_v4();
let entity_b_bevy = app2.world_mut().spawn((
NetworkedEntity::with_id(entity_b, node2_id),
TestPosition { x: 3.0, y: 4.0 },
Persisted::with_id(entity_b),
Synced,
)).id();
// Trigger persistence for entity B
{
let world = app2.world_mut();
if let Ok(mut entity_mut) = world.get_entity_mut(entity_b_bevy) {
if let Some(mut persisted) = entity_mut.get_mut::<Persisted>() {
let _ = &mut *persisted;
}
}
}
// Wait for bidirectional sync
wait_for_sync(&mut app1, &mut app2, Duration::from_secs(5), |w1, w2| {
count_entities_with_id(w1, entity_b) > 0 && count_entities_with_id(w2, entity_a) > 0
})
.await?;
// Verify both nodes have both entities
assert_entity_synced(app1.world_mut(), entity_b, TestPosition { x: 3.0, y: 4.0 })
?;
assert_entity_synced(app2.world_mut(), entity_a, TestPosition { x: 1.0, y: 2.0 })
?;
println!("✓ Bidirectional sync test passed");
router1.shutdown().await?;
router2.shutdown().await?;
ep1.close().await;
ep2.close().await;
Ok(())
}
/// Test 3: Concurrent conflict resolution (LWW merge semantics)
#[tokio::test(flavor = "multi_thread")]
async fn test_concurrent_conflict_resolution() -> Result<()> {
use test_utils::*;
let ctx1 = TestContext::new();
let ctx2 = TestContext::new();
let (ep1, ep2, router1, router2, bridge1, bridge2) = setup_gossip_pair().await?;
let node1_id = bridge1.node_id();
let node2_id = bridge2.node_id();
let mut app1 = create_test_app(node1_id, ctx1.db_path(), bridge1);
let mut app2 = create_test_app(node2_id, ctx2.db_path(), bridge2);
// Spawn shared entity on node 1 with Transform (which IS tracked for changes)
let entity_id = Uuid::new_v4();
let entity_bevy = app1.world_mut().spawn((
NetworkedEntity::with_id(entity_id, node1_id),
NetworkedTransform::default(),
Transform::from_xyz(0.0, 0.0, 0.0),
Persisted::with_id(entity_id),
Synced,
)).id();
// Trigger persistence
{
let world = app1.world_mut();
if let Ok(mut entity_mut) = world.get_entity_mut(entity_bevy) {
if let Some(mut persisted) = entity_mut.get_mut::<Persisted>() {
let _ = &mut *persisted;
}
}
}
// Wait for initial sync
wait_for_sync(&mut app1, &mut app2, Duration::from_secs(2), |_, w2| {
count_entities_with_id(w2, entity_id) > 0
})
.await?;
println!("✓ Initial sync complete, both nodes have the entity");
// Check what components the entity has on each node
{
let world1 = app1.world_mut();
let mut query1 = world1.query::<(Entity, &NetworkedEntity, Option<&NetworkedTransform>, &Transform)>();
println!("Node 1 entities:");
for (entity, ne, nt, t) in query1.iter(world1) {
println!(" Entity {:?}: NetworkedEntity({:?}), NetworkedTransform={}, Transform=({}, {}, {})",
entity, ne.network_id, nt.is_some(), t.translation.x, t.translation.y, t.translation.z);
}
}
{
let world2 = app2.world_mut();
let mut query2 = world2.query::<(Entity, &NetworkedEntity, Option<&NetworkedTransform>, &Transform)>();
println!("Node 2 entities:");
for (entity, ne, nt, t) in query2.iter(world2) {
println!(" Entity {:?}: NetworkedEntity({:?}), NetworkedTransform={}, Transform=({}, {}, {})",
entity, ne.network_id, nt.is_some(), t.translation.x, t.translation.y, t.translation.z);
}
}
// Both nodes modify the same entity concurrently
// Node 1 update (earlier timestamp)
{
let mut query1 = app1.world_mut().query::<&mut Transform>();
for mut transform in query1.iter_mut(app1.world_mut()) {
println!("Node 1: Modifying Transform to (10, 10)");
transform.translation.x = 10.0;
transform.translation.y = 10.0;
}
}
app1.update(); // Generate and send delta
// Small delay to ensure node 2's change has a later vector clock
tokio::time::sleep(Duration::from_millis(100)).await;
// Node 2 update (later timestamp, should win with LWW)
{
let mut query2 = app2.world_mut().query::<&mut Transform>();
let count = query2.iter(app2.world()).count();
println!("Node 2: Found {} entities with Transform", count);
for mut transform in query2.iter_mut(app2.world_mut()) {
println!("Node 2: Modifying Transform to (20, 20)");
transform.translation.x = 20.0;
transform.translation.y = 20.0;
}
}
app2.update(); // Generate and send delta
println!("Both nodes modified the entity, waiting for convergence...");
// Wait for convergence - both nodes should have the same Transform value
wait_for_sync(&mut app1, &mut app2, Duration::from_secs(5), |w1, w2| {
let mut query1 = w1.query::<&Transform>();
let mut query2 = w2.query::<&Transform>();
let transforms1: Vec<_> = query1.iter(w1).collect();
let transforms2: Vec<_> = query2.iter(w2).collect();
if transforms1.is_empty() || transforms2.is_empty() {
return false;
}
let t1 = transforms1[0];
let t2 = transforms2[0];
// Check if they converged (within floating point tolerance)
let converged = (t1.translation.x - t2.translation.x).abs() < 0.01
&& (t1.translation.y - t2.translation.y).abs() < 0.01;
if converged {
println!("✓ Nodes converged to: ({}, {})", t1.translation.x, t1.translation.y);
}
converged
})
.await?;
println!("✓ Conflict resolution test passed (converged)");
router1.shutdown().await?;
router2.shutdown().await?;
ep1.close().await;
ep2.close().await;
Ok(())
}
/// Test 4: Persistence crash recovery
///
/// NOTE: This test is expected to initially fail as the persistence system
/// doesn't currently have entity loading on startup. This test documents
/// the gap and will drive future feature implementation.
#[tokio::test(flavor = "multi_thread")]
#[ignore = "Persistence loading not yet implemented - documents gap"]
async fn test_persistence_crash_recovery() -> Result<()> {
use test_utils::*;
let ctx = TestContext::new();
let node_id = Uuid::new_v4();
let entity_id = Uuid::new_v4();
// Phase 1: Create entity, persist, "crash"
{
let bridge = GossipBridge::new(node_id);
let mut app = create_test_app(node_id, ctx.db_path(), bridge);
app.world_mut().spawn((
NetworkedEntity::with_id(entity_id, node_id),
TestPosition {
x: 100.0,
y: 200.0,
},
Persisted::with_id(entity_id),
Synced,
));
// Tick until flushed (2 seconds at 60 FPS)
for _ in 0..120 {
app.update();
tokio::time::sleep(Duration::from_millis(16)).await;
}
println!("Phase 1: Entity persisted, simulating crash...");
// App drops here (crash simulation)
}
// Phase 2: Restart app, verify state restored
{
let bridge = GossipBridge::new(node_id);
let mut app = create_test_app(node_id, ctx.db_path(), bridge);
// TODO: Need startup system to load entities from persistence
// This is currently missing from the implementation
app.update();
// Verify entity loaded from database
assert_entity_synced(app.world_mut(), entity_id, TestPosition { x: 100.0, y: 200.0 })
.map_err(|e| anyhow::anyhow!("Persistence recovery failed: {}", e))?;
println!("✓ Crash recovery test passed");
}
Ok(())
}

View File

@@ -0,0 +1,66 @@
//! Minimal test to verify Transform change detection works
use bevy::prelude::*;
use lib::networking::{NetworkedEntity, NetworkedTransform, Synced};
use std::sync::{Arc, Mutex};
use uuid::Uuid;
#[test]
fn test_transform_change_detection_basic() {
let mut app = App::new();
app.add_plugins(MinimalPlugins);
// Add the auto_detect system
app.add_systems(Update, lib::networking::auto_detect_transform_changes_system);
// Add a test system that runs AFTER auto_detect to check if NetworkedEntity was changed
// We need to check DURING the frame because change detection is cleared after each frame
let was_changed = Arc::new(Mutex::new(false));
let was_changed_clone = was_changed.clone();
app.add_systems(Update, move |query: Query<&NetworkedEntity, Changed<NetworkedEntity>>| {
let count = query.iter().count();
if count > 0 {
println!("✓ Test system detected {} changed NetworkedEntity components", count);
*was_changed_clone.lock().unwrap() = true;
} else {
println!("✗ Test system detected 0 changed NetworkedEntity components");
}
});
// Spawn an entity with Transform and NetworkedTransform
let node_id = Uuid::new_v4();
let entity_id = Uuid::new_v4();
let _entity = app.world_mut().spawn((
NetworkedEntity::with_id(entity_id, node_id),
NetworkedTransform::default(),
Transform::from_xyz(0.0, 0.0, 0.0),
Synced,
)).id();
// Run one update to clear initial change detection
println!("First update (clearing initial change detection)...");
app.update();
// Reset the flag
*was_changed.lock().unwrap() = false;
// Now modify the Transform
{
let mut query = app.world_mut().query::<&mut Transform>();
for mut transform in query.iter_mut(app.world_mut()) {
transform.translation.x = 10.0;
}
}
println!("Modified Transform, running second update...");
// Run update - should trigger auto_detect_transform_changes_system
app.update();
// Check if our test system detected the change
let result = *was_changed.lock().unwrap();
println!("Was NetworkedEntity marked as changed? {}", result);
assert!(result, "NetworkedEntity should be marked as changed after Transform modification");
}