finished initial networking impl
Signed-off-by: Sienna Meridian Satterwhite <sienna@r3t.io>
This commit is contained in:
71
Cargo.lock
generated
71
Cargo.lock
generated
@@ -102,7 +102,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "ac8202ab55fcbf46ca829833f347a82a2a4ce0596f0304ac322c2d100030cd56"
|
checksum = "ac8202ab55fcbf46ca829833f347a82a2a4ce0596f0304ac322c2d100030cd56"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"crypto-common",
|
"crypto-common 0.2.0-rc.4",
|
||||||
"inout",
|
"inout",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -1739,6 +1739,15 @@ version = "0.1.6"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a"
|
checksum = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "block-buffer"
|
||||||
|
version = "0.10.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71"
|
||||||
|
dependencies = [
|
||||||
|
"generic-array",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "block-buffer"
|
name = "block-buffer"
|
||||||
version = "0.11.0"
|
version = "0.11.0"
|
||||||
@@ -1980,8 +1989,8 @@ version = "0.5.0-rc.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1e12a13eb01ded5d32ee9658d94f553a19e804204f2dc811df69ab4d9e0cb8c7"
|
checksum = "1e12a13eb01ded5d32ee9658d94f553a19e804204f2dc811df69ab4d9e0cb8c7"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"block-buffer",
|
"block-buffer 0.11.0",
|
||||||
"crypto-common",
|
"crypto-common 0.2.0-rc.4",
|
||||||
"inout",
|
"inout",
|
||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
@@ -2341,6 +2350,16 @@ version = "0.2.4"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5"
|
checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crypto-common"
|
||||||
|
version = "0.1.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a"
|
||||||
|
dependencies = [
|
||||||
|
"generic-array",
|
||||||
|
"typenum",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crypto-common"
|
name = "crypto-common"
|
||||||
version = "0.2.0-rc.4"
|
version = "0.2.0-rc.4"
|
||||||
@@ -2409,7 +2428,7 @@ dependencies = [
|
|||||||
"cfg-if",
|
"cfg-if",
|
||||||
"cpufeatures",
|
"cpufeatures",
|
||||||
"curve25519-dalek-derive",
|
"curve25519-dalek-derive",
|
||||||
"digest",
|
"digest 0.11.0-rc.3",
|
||||||
"fiat-crypto",
|
"fiat-crypto",
|
||||||
"rand_core 0.9.3",
|
"rand_core 0.9.3",
|
||||||
"rustc_version",
|
"rustc_version",
|
||||||
@@ -2586,15 +2605,25 @@ version = "0.2.3"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ab03c107fafeb3ee9f5925686dbb7a73bc76e3932abb0d2b365cb64b169cf04c"
|
checksum = "ab03c107fafeb3ee9f5925686dbb7a73bc76e3932abb0d2b365cb64b169cf04c"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "digest"
|
||||||
|
version = "0.10.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
|
||||||
|
dependencies = [
|
||||||
|
"block-buffer 0.10.4",
|
||||||
|
"crypto-common 0.1.7",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "digest"
|
name = "digest"
|
||||||
version = "0.11.0-rc.3"
|
version = "0.11.0-rc.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "dac89f8a64533a9b0eaa73a68e424db0fb1fd6271c74cc0125336a05f090568d"
|
checksum = "dac89f8a64533a9b0eaa73a68e424db0fb1fd6271c74cc0125336a05f090568d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"block-buffer",
|
"block-buffer 0.11.0",
|
||||||
"const-oid",
|
"const-oid",
|
||||||
"crypto-common",
|
"crypto-common 0.2.0-rc.4",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2753,7 +2782,7 @@ dependencies = [
|
|||||||
"ed25519",
|
"ed25519",
|
||||||
"rand_core 0.9.3",
|
"rand_core 0.9.3",
|
||||||
"serde",
|
"serde",
|
||||||
"sha2",
|
"sha2 0.11.0-rc.2",
|
||||||
"signature",
|
"signature",
|
||||||
"subtle",
|
"subtle",
|
||||||
"zeroize",
|
"zeroize",
|
||||||
@@ -3451,6 +3480,16 @@ dependencies = [
|
|||||||
"windows 0.61.3",
|
"windows 0.61.3",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "generic-array"
|
||||||
|
version = "0.14.7"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
|
||||||
|
dependencies = [
|
||||||
|
"typenum",
|
||||||
|
"version_check",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "gethostname"
|
name = "gethostname"
|
||||||
version = "1.1.0"
|
version = "1.1.0"
|
||||||
@@ -4696,6 +4735,7 @@ dependencies = [
|
|||||||
"rusqlite",
|
"rusqlite",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
|
"sha2 0.10.9",
|
||||||
"sync-macros",
|
"sync-macros",
|
||||||
"thiserror 2.0.17",
|
"thiserror 2.0.17",
|
||||||
"tokio",
|
"tokio",
|
||||||
@@ -7037,7 +7077,7 @@ checksum = "c5e046edf639aa2e7afb285589e5405de2ef7e61d4b0ac1e30256e3eab911af9"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"cpufeatures",
|
"cpufeatures",
|
||||||
"digest",
|
"digest 0.11.0-rc.3",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -7046,6 +7086,17 @@ version = "1.0.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d"
|
checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "sha2"
|
||||||
|
version = "0.10.9"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"cpufeatures",
|
||||||
|
"digest 0.10.7",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "sha2"
|
name = "sha2"
|
||||||
version = "0.11.0-rc.2"
|
version = "0.11.0-rc.2"
|
||||||
@@ -7054,7 +7105,7 @@ checksum = "d1e3878ab0f98e35b2df35fe53201d088299b41a6bb63e3e34dada2ac4abd924"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"cpufeatures",
|
"cpufeatures",
|
||||||
"digest",
|
"digest 0.11.0-rc.3",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -8078,7 +8129,7 @@ version = "0.6.0-rc.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a55be643b40a21558f44806b53ee9319595bc7ca6896372e4e08e5d7d83c9cd6"
|
checksum = "a55be643b40a21558f44806b53ee9319595bc7ca6896372e4e08e5d7d83c9cd6"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crypto-common",
|
"crypto-common 0.2.0-rc.4",
|
||||||
"subtle",
|
"subtle",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ tracing.workspace = true
|
|||||||
bevy.workspace = true
|
bevy.workspace = true
|
||||||
bincode = "1.3"
|
bincode = "1.3"
|
||||||
futures-lite = "2.0"
|
futures-lite = "2.0"
|
||||||
|
sha2 = "0.10"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
|
|||||||
@@ -24,6 +24,7 @@
|
|||||||
mod db;
|
mod db;
|
||||||
mod error;
|
mod error;
|
||||||
mod models;
|
mod models;
|
||||||
|
pub mod networking;
|
||||||
pub mod persistence;
|
pub mod persistence;
|
||||||
pub mod sync;
|
pub mod sync;
|
||||||
|
|
||||||
|
|||||||
386
crates/lib/src/networking/apply_ops.rs
Normal file
386
crates/lib/src/networking/apply_ops.rs
Normal file
@@ -0,0 +1,386 @@
|
|||||||
|
//! Apply remote operations to local ECS state
|
||||||
|
//!
|
||||||
|
//! This module handles incoming EntityDelta messages and applies them to the
|
||||||
|
//! local Bevy world using CRDT merge semantics.
|
||||||
|
|
||||||
|
use bevy::{
|
||||||
|
prelude::*,
|
||||||
|
reflect::TypeRegistry,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
networking::{
|
||||||
|
blob_support::{
|
||||||
|
get_component_data,
|
||||||
|
BlobStore,
|
||||||
|
},
|
||||||
|
delta_generation::NodeVectorClock,
|
||||||
|
entity_map::NetworkEntityMap,
|
||||||
|
messages::{
|
||||||
|
ComponentData,
|
||||||
|
EntityDelta,
|
||||||
|
SyncMessage,
|
||||||
|
},
|
||||||
|
operations::ComponentOp,
|
||||||
|
NetworkedEntity,
|
||||||
|
},
|
||||||
|
persistence::reflection::deserialize_component,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Apply an EntityDelta message to the local world
|
||||||
|
///
|
||||||
|
/// This function:
|
||||||
|
/// 1. Checks tombstone registry to prevent resurrection
|
||||||
|
/// 2. Looks up the entity by network_id
|
||||||
|
/// 3. Spawns a new entity if it doesn't exist
|
||||||
|
/// 4. Applies each ComponentOp using CRDT merge semantics
|
||||||
|
///
|
||||||
|
/// # Parameters
|
||||||
|
///
|
||||||
|
/// - `delta`: The EntityDelta to apply
|
||||||
|
/// - `commands`: Bevy Commands for spawning/modifying entities
|
||||||
|
/// - `entity_map`: Map from network_id to Entity
|
||||||
|
/// - `type_registry`: Bevy's type registry for deserialization
|
||||||
|
/// - `node_clock`: Our node's vector clock (for causality tracking)
|
||||||
|
/// - `blob_store`: Optional blob store for resolving large component references
|
||||||
|
/// - `tombstone_registry`: Optional tombstone registry for deletion tracking
|
||||||
|
pub fn apply_entity_delta(
|
||||||
|
delta: &EntityDelta,
|
||||||
|
commands: &mut Commands,
|
||||||
|
entity_map: &mut NetworkEntityMap,
|
||||||
|
type_registry: &TypeRegistry,
|
||||||
|
node_clock: &mut NodeVectorClock,
|
||||||
|
blob_store: Option<&BlobStore>,
|
||||||
|
mut tombstone_registry: Option<&mut crate::networking::TombstoneRegistry>,
|
||||||
|
) {
|
||||||
|
// Validate and merge the remote vector clock
|
||||||
|
// Check for clock regression (shouldn't happen in correct implementations)
|
||||||
|
if delta.vector_clock.happened_before(&node_clock.clock) {
|
||||||
|
warn!(
|
||||||
|
"Received operation with clock from the past for entity {:?}. \
|
||||||
|
Remote clock happened before our clock. This may indicate clock issues.",
|
||||||
|
delta.entity_id
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merge the remote vector clock into ours
|
||||||
|
node_clock.clock.merge(&delta.vector_clock);
|
||||||
|
|
||||||
|
// Check if any operations are Delete operations
|
||||||
|
for op in &delta.operations {
|
||||||
|
if let crate::networking::ComponentOp::Delete { vector_clock } = op {
|
||||||
|
// Record tombstone
|
||||||
|
if let Some(ref mut registry) = tombstone_registry {
|
||||||
|
registry.record_deletion(
|
||||||
|
delta.entity_id,
|
||||||
|
delta.node_id,
|
||||||
|
vector_clock.clone(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Despawn the entity if it exists locally
|
||||||
|
if let Some(entity) = entity_map.get_entity(delta.entity_id) {
|
||||||
|
commands.entity(entity).despawn();
|
||||||
|
entity_map.remove_by_network_id(delta.entity_id);
|
||||||
|
info!("Despawned entity {:?} due to Delete operation", delta.entity_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't process other operations - entity is deleted
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if we should ignore this delta due to deletion
|
||||||
|
if let Some(ref registry) = tombstone_registry {
|
||||||
|
if registry.should_ignore_operation(delta.entity_id, &delta.vector_clock) {
|
||||||
|
debug!(
|
||||||
|
"Ignoring delta for deleted entity {:?}",
|
||||||
|
delta.entity_id
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Look up or create the entity
|
||||||
|
let entity = match entity_map.get_entity(delta.entity_id) {
|
||||||
|
Some(entity) => entity,
|
||||||
|
None => {
|
||||||
|
// Spawn new entity with NetworkedEntity component
|
||||||
|
let entity = commands
|
||||||
|
.spawn(NetworkedEntity::with_id(delta.entity_id, delta.node_id))
|
||||||
|
.id();
|
||||||
|
|
||||||
|
entity_map.insert(delta.entity_id, entity);
|
||||||
|
info!(
|
||||||
|
"Spawned new networked entity {:?} from node {}",
|
||||||
|
delta.entity_id, delta.node_id
|
||||||
|
);
|
||||||
|
|
||||||
|
entity
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Apply each operation (skip Delete operations - handled above)
|
||||||
|
for op in &delta.operations {
|
||||||
|
if !op.is_delete() {
|
||||||
|
apply_component_op(entity, op, commands, type_registry, blob_store);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Apply a single ComponentOp to an entity
|
||||||
|
///
|
||||||
|
/// This dispatches to the appropriate CRDT merge logic based on the operation
|
||||||
|
/// type.
|
||||||
|
fn apply_component_op(
|
||||||
|
entity: Entity,
|
||||||
|
op: &ComponentOp,
|
||||||
|
commands: &mut Commands,
|
||||||
|
type_registry: &TypeRegistry,
|
||||||
|
blob_store: Option<&BlobStore>,
|
||||||
|
) {
|
||||||
|
match op {
|
||||||
|
| ComponentOp::Set {
|
||||||
|
component_type,
|
||||||
|
data,
|
||||||
|
vector_clock: _,
|
||||||
|
} => {
|
||||||
|
apply_set_operation(entity, component_type, data, commands, type_registry, blob_store);
|
||||||
|
}
|
||||||
|
| ComponentOp::SetAdd { component_type, .. } => {
|
||||||
|
// OR-Set add - Phase 10 provides OrSet<T> type
|
||||||
|
// Application code should use OrSet in components and handle SetAdd/SetRemove
|
||||||
|
// Full integration will be in Phase 12 plugin
|
||||||
|
debug!("SetAdd operation for {} (use OrSet<T> in components)", component_type);
|
||||||
|
}
|
||||||
|
| ComponentOp::SetRemove { component_type, .. } => {
|
||||||
|
// OR-Set remove - Phase 10 provides OrSet<T> type
|
||||||
|
// Application code should use OrSet in components and handle SetAdd/SetRemove
|
||||||
|
// Full integration will be in Phase 12 plugin
|
||||||
|
debug!("SetRemove operation for {} (use OrSet<T> in components)", component_type);
|
||||||
|
}
|
||||||
|
| ComponentOp::SequenceInsert { .. } => {
|
||||||
|
// RGA insert - will be implemented in Phase 11
|
||||||
|
debug!("SequenceInsert operation not yet implemented");
|
||||||
|
}
|
||||||
|
| ComponentOp::SequenceDelete { .. } => {
|
||||||
|
// RGA delete - will be implemented in Phase 11
|
||||||
|
debug!("SequenceDelete operation not yet implemented");
|
||||||
|
}
|
||||||
|
| ComponentOp::Delete { .. } => {
|
||||||
|
// Entity deletion - will be implemented in Phase 9
|
||||||
|
debug!("Delete operation not yet implemented");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Apply a Set operation (Last-Write-Wins)
|
||||||
|
///
|
||||||
|
/// Deserializes the component and inserts/updates it on the entity.
|
||||||
|
/// Handles both inline data and blob references.
|
||||||
|
fn apply_set_operation(
|
||||||
|
entity: Entity,
|
||||||
|
component_type: &str,
|
||||||
|
data: &ComponentData,
|
||||||
|
commands: &mut Commands,
|
||||||
|
type_registry: &TypeRegistry,
|
||||||
|
blob_store: Option<&BlobStore>,
|
||||||
|
) {
|
||||||
|
// Get the actual data (resolve blob if needed)
|
||||||
|
let data_bytes = match data {
|
||||||
|
| ComponentData::Inline(bytes) => bytes.clone(),
|
||||||
|
| ComponentData::BlobRef { hash: _, size: _ } => {
|
||||||
|
if let Some(store) = blob_store {
|
||||||
|
match get_component_data(data, store) {
|
||||||
|
Ok(bytes) => bytes,
|
||||||
|
Err(e) => {
|
||||||
|
error!(
|
||||||
|
"Failed to retrieve blob for component {}: {}",
|
||||||
|
component_type, e
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
error!(
|
||||||
|
"Blob reference for {} but no blob store available",
|
||||||
|
component_type
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Deserialize the component
|
||||||
|
let reflected = match deserialize_component(&data_bytes, type_registry) {
|
||||||
|
Ok(reflected) => reflected,
|
||||||
|
Err(e) => {
|
||||||
|
error!(
|
||||||
|
"Failed to deserialize component {}: {}",
|
||||||
|
component_type, e
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get the type registration
|
||||||
|
let registration = match type_registry.get_with_type_path(component_type) {
|
||||||
|
Some(reg) => reg,
|
||||||
|
None => {
|
||||||
|
error!("Component type {} not registered", component_type);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get ReflectComponent data
|
||||||
|
let reflect_component = match registration.data::<ReflectComponent>() {
|
||||||
|
Some(rc) => rc.clone(),
|
||||||
|
None => {
|
||||||
|
error!(
|
||||||
|
"Component type {} does not have ReflectComponent data",
|
||||||
|
component_type
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Clone what we need to avoid lifetime issues
|
||||||
|
let component_type_owned = component_type.to_string();
|
||||||
|
|
||||||
|
// Insert or update the component
|
||||||
|
commands.queue(move |world: &mut World| {
|
||||||
|
// Get the type registry from the world and clone it
|
||||||
|
let type_registry_arc = {
|
||||||
|
let Some(type_registry_res) = world.get_resource::<AppTypeRegistry>() else {
|
||||||
|
error!("AppTypeRegistry not found in world");
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
type_registry_res.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Now we can safely get mutable access to the world
|
||||||
|
let type_registry = type_registry_arc.read();
|
||||||
|
|
||||||
|
if let Ok(mut entity_mut) = world.get_entity_mut(entity) {
|
||||||
|
reflect_component.insert(&mut entity_mut, &*reflected, &type_registry);
|
||||||
|
debug!("Applied Set operation for {}", component_type_owned);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// System to receive and apply incoming EntityDelta messages
|
||||||
|
///
|
||||||
|
/// This system polls the GossipBridge for incoming messages and applies them
|
||||||
|
/// to the local world.
|
||||||
|
///
|
||||||
|
/// Add this to your app:
|
||||||
|
///
|
||||||
|
/// ```no_run
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::receive_and_apply_deltas_system;
|
||||||
|
///
|
||||||
|
/// App::new()
|
||||||
|
/// .add_systems(Update, receive_and_apply_deltas_system);
|
||||||
|
/// ```
|
||||||
|
pub fn receive_and_apply_deltas_system(
|
||||||
|
mut commands: Commands,
|
||||||
|
bridge: Option<Res<crate::networking::GossipBridge>>,
|
||||||
|
mut entity_map: ResMut<NetworkEntityMap>,
|
||||||
|
type_registry: Res<AppTypeRegistry>,
|
||||||
|
mut node_clock: ResMut<NodeVectorClock>,
|
||||||
|
blob_store: Option<Res<BlobStore>>,
|
||||||
|
mut tombstone_registry: Option<ResMut<crate::networking::TombstoneRegistry>>,
|
||||||
|
) {
|
||||||
|
let Some(bridge) = bridge else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
let registry = type_registry.read();
|
||||||
|
let blob_store_ref = blob_store.as_deref();
|
||||||
|
|
||||||
|
// Poll for incoming messages
|
||||||
|
while let Some(message) = bridge.try_recv() {
|
||||||
|
match message.message {
|
||||||
|
| SyncMessage::EntityDelta {
|
||||||
|
entity_id,
|
||||||
|
node_id,
|
||||||
|
vector_clock,
|
||||||
|
operations,
|
||||||
|
} => {
|
||||||
|
let delta = EntityDelta {
|
||||||
|
entity_id,
|
||||||
|
node_id,
|
||||||
|
vector_clock,
|
||||||
|
operations,
|
||||||
|
};
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Received EntityDelta for entity {:?} with {} operations",
|
||||||
|
delta.entity_id,
|
||||||
|
delta.operations.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
apply_entity_delta(
|
||||||
|
&delta,
|
||||||
|
&mut commands,
|
||||||
|
&mut entity_map,
|
||||||
|
®istry,
|
||||||
|
&mut node_clock,
|
||||||
|
blob_store_ref,
|
||||||
|
tombstone_registry.as_deref_mut(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
| SyncMessage::JoinRequest { .. } => {
|
||||||
|
// Handled by handle_join_requests_system
|
||||||
|
debug!("JoinRequest handled by dedicated system");
|
||||||
|
}
|
||||||
|
| SyncMessage::FullState { .. } => {
|
||||||
|
// Handled by handle_full_state_system
|
||||||
|
debug!("FullState handled by dedicated system");
|
||||||
|
}
|
||||||
|
| SyncMessage::SyncRequest { .. } => {
|
||||||
|
// Handled by handle_sync_requests_system
|
||||||
|
debug!("SyncRequest handled by dedicated system");
|
||||||
|
}
|
||||||
|
| SyncMessage::MissingDeltas { .. } => {
|
||||||
|
// Handled by handle_missing_deltas_system
|
||||||
|
debug!("MissingDeltas handled by dedicated system");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_node_clock_merge() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let mut node_clock = NodeVectorClock::new(node_id);
|
||||||
|
|
||||||
|
let remote_node = uuid::Uuid::new_v4();
|
||||||
|
let mut remote_clock = crate::networking::VectorClock::new();
|
||||||
|
remote_clock.increment(remote_node);
|
||||||
|
remote_clock.increment(remote_node);
|
||||||
|
|
||||||
|
// Merge remote clock
|
||||||
|
node_clock.clock.merge(&remote_clock);
|
||||||
|
|
||||||
|
// Our clock should have the remote node's sequence
|
||||||
|
assert_eq!(node_clock.clock.get(remote_node), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_entity_delta_structure() {
|
||||||
|
let entity_id = uuid::Uuid::new_v4();
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let clock = crate::networking::VectorClock::new();
|
||||||
|
|
||||||
|
let delta = EntityDelta::new(entity_id, node_id, clock, vec![]);
|
||||||
|
|
||||||
|
assert_eq!(delta.entity_id, entity_id);
|
||||||
|
assert_eq!(delta.node_id, node_id);
|
||||||
|
assert_eq!(delta.operations.len(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
379
crates/lib/src/networking/blob_support.rs
Normal file
379
crates/lib/src/networking/blob_support.rs
Normal file
@@ -0,0 +1,379 @@
|
|||||||
|
//! Large blob support for components >64KB
|
||||||
|
//!
|
||||||
|
//! This module handles large component data using iroh-blobs. When a component
|
||||||
|
//! exceeds the inline threshold (64KB), it's stored as a blob and referenced
|
||||||
|
//! by its hash in the ComponentOp.
|
||||||
|
//!
|
||||||
|
//! **NOTE:** This is a simplified implementation for Phase 6. Full iroh-blobs
|
||||||
|
//! integration will be completed when we integrate with actual gossip networking.
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
collections::HashMap,
|
||||||
|
sync::{
|
||||||
|
Arc,
|
||||||
|
Mutex,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use bevy::prelude::*;
|
||||||
|
|
||||||
|
use crate::networking::{
|
||||||
|
error::{
|
||||||
|
NetworkingError,
|
||||||
|
Result,
|
||||||
|
},
|
||||||
|
messages::ComponentData,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Threshold for storing data as a blob (64KB)
|
||||||
|
pub const BLOB_THRESHOLD: usize = 64 * 1024;
|
||||||
|
|
||||||
|
/// Hash type for blob references
|
||||||
|
pub type BlobHash = Vec<u8>;
|
||||||
|
|
||||||
|
/// Bevy resource for managing blobs
|
||||||
|
///
|
||||||
|
/// This resource provides blob storage and retrieval. In Phase 6, we use
|
||||||
|
/// an in-memory cache. Later phases will integrate with iroh-blobs for
|
||||||
|
/// persistent storage and P2P transfer.
|
||||||
|
#[derive(Resource, Clone)]
|
||||||
|
pub struct BlobStore {
|
||||||
|
/// In-memory cache of blobs (hash -> data)
|
||||||
|
cache: Arc<Mutex<HashMap<BlobHash, Vec<u8>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BlobStore {
|
||||||
|
/// Create a new blob store
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
cache: Arc::new(Mutex::new(HashMap::new())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Store a blob and return its hash
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::BlobStore;
|
||||||
|
///
|
||||||
|
/// let store = BlobStore::new();
|
||||||
|
/// let data = vec![1, 2, 3, 4, 5];
|
||||||
|
/// let hash = store.store_blob(data.clone()).unwrap();
|
||||||
|
///
|
||||||
|
/// let retrieved = store.get_blob(&hash).unwrap();
|
||||||
|
/// assert_eq!(retrieved, Some(data));
|
||||||
|
/// ```
|
||||||
|
pub fn store_blob(&self, data: Vec<u8>) -> Result<BlobHash> {
|
||||||
|
// Use SHA-256 for content-addressable storage
|
||||||
|
let hash = Self::hash_data(&data);
|
||||||
|
|
||||||
|
self.cache
|
||||||
|
.lock()
|
||||||
|
.map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))?
|
||||||
|
.insert(hash.clone(), data);
|
||||||
|
|
||||||
|
Ok(hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Retrieve a blob by its hash
|
||||||
|
///
|
||||||
|
/// Returns `None` if the blob is not in the cache.
|
||||||
|
pub fn get_blob(&self, hash: &BlobHash) -> Result<Option<Vec<u8>>> {
|
||||||
|
Ok(self
|
||||||
|
.cache
|
||||||
|
.lock()
|
||||||
|
.map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))?
|
||||||
|
.get(hash)
|
||||||
|
.cloned())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if a blob exists in the cache
|
||||||
|
///
|
||||||
|
/// Returns an error if the cache lock is poisoned.
|
||||||
|
pub fn has_blob(&self, hash: &BlobHash) -> Result<bool> {
|
||||||
|
Ok(self.cache
|
||||||
|
.lock()
|
||||||
|
.map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))?
|
||||||
|
.contains_key(hash))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a blob if it exists (atomic check-and-get)
|
||||||
|
///
|
||||||
|
/// This is safer than calling `has_blob()` followed by `get_blob()` because
|
||||||
|
/// it's atomic - the blob can't be removed between the check and get.
|
||||||
|
pub fn get_blob_if_exists(&self, hash: &BlobHash) -> Result<Option<Vec<u8>>> {
|
||||||
|
Ok(self.cache
|
||||||
|
.lock()
|
||||||
|
.map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))?
|
||||||
|
.get(hash)
|
||||||
|
.cloned())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get cache size (number of blobs)
|
||||||
|
///
|
||||||
|
/// Returns an error if the cache lock is poisoned.
|
||||||
|
pub fn cache_size(&self) -> Result<usize> {
|
||||||
|
Ok(self.cache
|
||||||
|
.lock()
|
||||||
|
.map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))?
|
||||||
|
.len())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clear the cache
|
||||||
|
pub fn clear_cache(&self) -> Result<()> {
|
||||||
|
self.cache
|
||||||
|
.lock()
|
||||||
|
.map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))?
|
||||||
|
.clear();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Hash data using SHA-256
|
||||||
|
fn hash_data(data: &[u8]) -> BlobHash {
|
||||||
|
use sha2::{
|
||||||
|
Digest,
|
||||||
|
Sha256,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut hasher = Sha256::new();
|
||||||
|
hasher.update(data);
|
||||||
|
hasher.finalize().to_vec()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for BlobStore {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Determine whether data should be stored as a blob
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::should_use_blob;
|
||||||
|
///
|
||||||
|
/// let small_data = vec![1, 2, 3];
|
||||||
|
/// assert!(!should_use_blob(&small_data));
|
||||||
|
///
|
||||||
|
/// let large_data = vec![0u8; 100_000];
|
||||||
|
/// assert!(should_use_blob(&large_data));
|
||||||
|
/// ```
|
||||||
|
pub fn should_use_blob(data: &[u8]) -> bool {
|
||||||
|
data.len() > BLOB_THRESHOLD
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create ComponentData, automatically choosing inline vs blob
|
||||||
|
///
|
||||||
|
/// This helper function inspects the data size and creates the appropriate
|
||||||
|
/// ComponentData variant.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::{create_component_data, BlobStore};
|
||||||
|
///
|
||||||
|
/// let store = BlobStore::new();
|
||||||
|
///
|
||||||
|
/// // Small data goes inline
|
||||||
|
/// let small_data = vec![1, 2, 3];
|
||||||
|
/// let component_data = create_component_data(small_data, &store).unwrap();
|
||||||
|
///
|
||||||
|
/// // Large data becomes a blob reference
|
||||||
|
/// let large_data = vec![0u8; 100_000];
|
||||||
|
/// let component_data = create_component_data(large_data, &store).unwrap();
|
||||||
|
/// ```
|
||||||
|
pub fn create_component_data(data: Vec<u8>, blob_store: &BlobStore) -> Result<ComponentData> {
|
||||||
|
if should_use_blob(&data) {
|
||||||
|
let size = data.len() as u64;
|
||||||
|
let hash = blob_store.store_blob(data)?;
|
||||||
|
Ok(ComponentData::BlobRef { hash, size })
|
||||||
|
} else {
|
||||||
|
Ok(ComponentData::Inline(data))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Retrieve the actual data from ComponentData
|
||||||
|
///
|
||||||
|
/// This resolves blob references by fetching from the blob store.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::{get_component_data, BlobStore, ComponentData};
|
||||||
|
///
|
||||||
|
/// let store = BlobStore::new();
|
||||||
|
///
|
||||||
|
/// // Inline data
|
||||||
|
/// let inline = ComponentData::Inline(vec![1, 2, 3]);
|
||||||
|
/// let data = get_component_data(&inline, &store).unwrap();
|
||||||
|
/// assert_eq!(data, vec![1, 2, 3]);
|
||||||
|
/// ```
|
||||||
|
pub fn get_component_data(data: &ComponentData, blob_store: &BlobStore) -> Result<Vec<u8>> {
|
||||||
|
match data {
|
||||||
|
| ComponentData::Inline(bytes) => Ok(bytes.clone()),
|
||||||
|
| ComponentData::BlobRef { hash, size: _ } => blob_store
|
||||||
|
.get_blob(hash)?
|
||||||
|
.ok_or_else(|| NetworkingError::Blob(format!("Blob not found: {:x?}", hash))),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Request a blob from the network
|
||||||
|
///
|
||||||
|
/// **NOTE:** This is a stub for Phase 6. Will be implemented in later phases
|
||||||
|
/// when we have full gossip integration.
|
||||||
|
pub fn request_blob_from_network(_hash: &BlobHash, _blob_store: &BlobStore) -> Result<()> {
|
||||||
|
// TODO: Implement in later phases with iroh-gossip
|
||||||
|
debug!("request_blob_from_network not yet implemented");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Bevy system to handle blob requests
|
||||||
|
///
|
||||||
|
/// This system processes incoming blob requests and serves blobs to peers.
|
||||||
|
///
|
||||||
|
/// **NOTE:** Stub implementation for Phase 6.
|
||||||
|
pub fn blob_transfer_system(_blob_store: Option<Res<BlobStore>>) {
|
||||||
|
// TODO: Implement when we have gossip networking
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_blob_store_creation() {
|
||||||
|
let store = BlobStore::new();
|
||||||
|
assert_eq!(store.cache_size().unwrap(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_store_and_retrieve_blob() {
|
||||||
|
let store = BlobStore::new();
|
||||||
|
let data = vec![1, 2, 3, 4, 5];
|
||||||
|
|
||||||
|
let hash = store.store_blob(data.clone()).unwrap();
|
||||||
|
let retrieved = store.get_blob(&hash).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(retrieved, Some(data));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_blob_hash_is_deterministic() {
|
||||||
|
let store = BlobStore::new();
|
||||||
|
let data = vec![1, 2, 3, 4, 5];
|
||||||
|
|
||||||
|
let hash1 = store.store_blob(data.clone()).unwrap();
|
||||||
|
let hash2 = store.store_blob(data.clone()).unwrap();
|
||||||
|
|
||||||
|
assert_eq!(hash1, hash2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_has_blob() {
|
||||||
|
let store = BlobStore::new();
|
||||||
|
let data = vec![1, 2, 3, 4, 5];
|
||||||
|
|
||||||
|
let hash = store.store_blob(data).unwrap();
|
||||||
|
assert!(store.has_blob(&hash).unwrap());
|
||||||
|
|
||||||
|
let fake_hash = vec![0; 32];
|
||||||
|
assert!(!store.has_blob(&fake_hash).unwrap());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_clear_cache() {
|
||||||
|
let store = BlobStore::new();
|
||||||
|
let data = vec![1, 2, 3, 4, 5];
|
||||||
|
|
||||||
|
store.store_blob(data).unwrap();
|
||||||
|
assert_eq!(store.cache_size().unwrap(), 1);
|
||||||
|
|
||||||
|
store.clear_cache().unwrap();
|
||||||
|
assert_eq!(store.cache_size().unwrap(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_should_use_blob() {
|
||||||
|
let small_data = vec![0u8; 1000];
|
||||||
|
assert!(!should_use_blob(&small_data));
|
||||||
|
|
||||||
|
let large_data = vec![0u8; 100_000];
|
||||||
|
assert!(should_use_blob(&large_data));
|
||||||
|
|
||||||
|
let threshold_data = vec![0u8; BLOB_THRESHOLD];
|
||||||
|
assert!(!should_use_blob(&threshold_data));
|
||||||
|
|
||||||
|
let over_threshold = vec![0u8; BLOB_THRESHOLD + 1];
|
||||||
|
assert!(should_use_blob(&over_threshold));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_create_component_data_inline() {
|
||||||
|
let store = BlobStore::new();
|
||||||
|
let small_data = vec![1, 2, 3];
|
||||||
|
|
||||||
|
let component_data = create_component_data(small_data.clone(), &store).unwrap();
|
||||||
|
|
||||||
|
match component_data {
|
||||||
|
| ComponentData::Inline(data) => assert_eq!(data, small_data),
|
||||||
|
| ComponentData::BlobRef { .. } => panic!("Expected inline data"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_create_component_data_blob() {
|
||||||
|
let store = BlobStore::new();
|
||||||
|
let large_data = vec![0u8; 100_000];
|
||||||
|
|
||||||
|
let component_data = create_component_data(large_data.clone(), &store).unwrap();
|
||||||
|
|
||||||
|
match component_data {
|
||||||
|
| ComponentData::BlobRef { hash, size } => {
|
||||||
|
assert_eq!(size, 100_000);
|
||||||
|
assert!(store.has_blob(&hash).unwrap());
|
||||||
|
}
|
||||||
|
| ComponentData::Inline(_) => panic!("Expected blob reference"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_get_component_data_inline() {
|
||||||
|
let store = BlobStore::new();
|
||||||
|
let inline = ComponentData::Inline(vec![1, 2, 3]);
|
||||||
|
|
||||||
|
let data = get_component_data(&inline, &store).unwrap();
|
||||||
|
assert_eq!(data, vec![1, 2, 3]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_get_component_data_blob() {
|
||||||
|
let store = BlobStore::new();
|
||||||
|
let large_data = vec![0u8; 100_000];
|
||||||
|
let hash = store.store_blob(large_data.clone()).unwrap();
|
||||||
|
|
||||||
|
let blob_ref = ComponentData::BlobRef {
|
||||||
|
hash,
|
||||||
|
size: 100_000,
|
||||||
|
};
|
||||||
|
|
||||||
|
let data = get_component_data(&blob_ref, &store).unwrap();
|
||||||
|
assert_eq!(data, large_data);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_get_component_data_missing_blob() {
|
||||||
|
let store = BlobStore::new();
|
||||||
|
let fake_hash = vec![0; 32];
|
||||||
|
|
||||||
|
let blob_ref = ComponentData::BlobRef {
|
||||||
|
hash: fake_hash,
|
||||||
|
size: 1000,
|
||||||
|
};
|
||||||
|
|
||||||
|
let result = get_component_data(&blob_ref, &store);
|
||||||
|
assert!(result.is_err());
|
||||||
|
}
|
||||||
|
}
|
||||||
117
crates/lib/src/networking/change_detection.rs
Normal file
117
crates/lib/src/networking/change_detection.rs
Normal file
@@ -0,0 +1,117 @@
|
|||||||
|
//! Change detection for networked entities
|
||||||
|
//!
|
||||||
|
//! This module provides systems that detect when networked components change
|
||||||
|
//! and prepare them for delta generation.
|
||||||
|
|
||||||
|
use bevy::prelude::*;
|
||||||
|
|
||||||
|
use crate::networking::{
|
||||||
|
NetworkedEntity,
|
||||||
|
NetworkedTransform,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// System to automatically detect Transform changes and mark entity for sync
|
||||||
|
///
|
||||||
|
/// This system detects changes to Transform components on networked entities
|
||||||
|
/// and triggers persistence by accessing `NetworkedEntity` mutably (which marks
|
||||||
|
/// it as changed via Bevy's change detection).
|
||||||
|
///
|
||||||
|
/// Add this system to your app if you want automatic synchronization of
|
||||||
|
/// Transform changes:
|
||||||
|
///
|
||||||
|
/// ```no_run
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::auto_detect_transform_changes_system;
|
||||||
|
///
|
||||||
|
/// App::new()
|
||||||
|
/// .add_systems(Update, auto_detect_transform_changes_system);
|
||||||
|
/// ```
|
||||||
|
pub fn auto_detect_transform_changes_system(
|
||||||
|
mut query: Query<
|
||||||
|
&mut NetworkedEntity,
|
||||||
|
(
|
||||||
|
With<NetworkedTransform>,
|
||||||
|
Or<(Changed<Transform>, Changed<GlobalTransform>)>,
|
||||||
|
),
|
||||||
|
>,
|
||||||
|
) {
|
||||||
|
// Simply accessing &mut NetworkedEntity triggers Bevy's change detection
|
||||||
|
for mut _networked in query.iter_mut() {
|
||||||
|
// No-op - the mutable access itself marks NetworkedEntity as changed
|
||||||
|
// This will trigger the delta generation system
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resource to track the last sync version for each entity
|
||||||
|
///
|
||||||
|
/// This helps us avoid sending redundant deltas for the same changes.
|
||||||
|
#[derive(Resource, Default)]
|
||||||
|
pub struct LastSyncVersions {
|
||||||
|
/// Map from network_id to the last vector clock we synced
|
||||||
|
versions: std::collections::HashMap<uuid::Uuid, u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl LastSyncVersions {
|
||||||
|
/// Check if we should sync this entity based on version
|
||||||
|
pub fn should_sync(&self, network_id: uuid::Uuid, version: u64) -> bool {
|
||||||
|
match self.versions.get(&network_id) {
|
||||||
|
Some(&last_version) => version > last_version,
|
||||||
|
None => true, // Never synced before
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update the last synced version for an entity
|
||||||
|
pub fn update(&mut self, network_id: uuid::Uuid, version: u64) {
|
||||||
|
self.versions.insert(network_id, version);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove tracking for an entity (when despawned)
|
||||||
|
pub fn remove(&mut self, network_id: uuid::Uuid) {
|
||||||
|
self.versions.remove(&network_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_last_sync_versions() {
|
||||||
|
let mut versions = LastSyncVersions::default();
|
||||||
|
let id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
// Should sync when never synced before
|
||||||
|
assert!(versions.should_sync(id, 1));
|
||||||
|
|
||||||
|
// Update to version 1
|
||||||
|
versions.update(id, 1);
|
||||||
|
|
||||||
|
// Should not sync same version
|
||||||
|
assert!(!versions.should_sync(id, 1));
|
||||||
|
|
||||||
|
// Should not sync older version
|
||||||
|
assert!(!versions.should_sync(id, 0));
|
||||||
|
|
||||||
|
// Should sync newer version
|
||||||
|
assert!(versions.should_sync(id, 2));
|
||||||
|
|
||||||
|
// Remove and should sync again
|
||||||
|
versions.remove(id);
|
||||||
|
assert!(versions.should_sync(id, 2));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_multiple_entities() {
|
||||||
|
let mut versions = LastSyncVersions::default();
|
||||||
|
let id1 = uuid::Uuid::new_v4();
|
||||||
|
let id2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
versions.update(id1, 5);
|
||||||
|
versions.update(id2, 3);
|
||||||
|
|
||||||
|
assert!(!versions.should_sync(id1, 4));
|
||||||
|
assert!(versions.should_sync(id1, 6));
|
||||||
|
assert!(!versions.should_sync(id2, 2));
|
||||||
|
assert!(versions.should_sync(id2, 4));
|
||||||
|
}
|
||||||
|
}
|
||||||
410
crates/lib/src/networking/components.rs
Normal file
410
crates/lib/src/networking/components.rs
Normal file
@@ -0,0 +1,410 @@
|
|||||||
|
//! Networked entity components
|
||||||
|
//!
|
||||||
|
//! This module defines components that mark entities as networked and track
|
||||||
|
//! their network identity across the distributed system.
|
||||||
|
|
||||||
|
use bevy::prelude::*;
|
||||||
|
use serde::{
|
||||||
|
Deserialize,
|
||||||
|
Serialize,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::networking::vector_clock::NodeId;
|
||||||
|
|
||||||
|
/// Marker component indicating an entity should be synchronized over the
|
||||||
|
/// network
|
||||||
|
///
|
||||||
|
/// Add this component to any entity that should have its state synchronized
|
||||||
|
/// across peers. The networking system will automatically track changes and
|
||||||
|
/// broadcast deltas.
|
||||||
|
///
|
||||||
|
/// # Relationship with Persisted
|
||||||
|
///
|
||||||
|
/// NetworkedEntity and Persisted are complementary:
|
||||||
|
/// - `Persisted` - Entity state saved to local SQLite database
|
||||||
|
/// - `NetworkedEntity` - Entity state synchronized across network peers
|
||||||
|
///
|
||||||
|
/// Most entities will have both components for full durability and sync.
|
||||||
|
///
|
||||||
|
/// # Network Identity
|
||||||
|
///
|
||||||
|
/// Each networked entity has:
|
||||||
|
/// - `network_id` - Globally unique UUID for this entity across all peers
|
||||||
|
/// - `owner_node_id` - Node that originally created this entity
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::NetworkedEntity;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// fn spawn_networked_entity(mut commands: Commands) {
|
||||||
|
/// let node_id = Uuid::new_v4();
|
||||||
|
///
|
||||||
|
/// commands.spawn((
|
||||||
|
/// NetworkedEntity::new(node_id),
|
||||||
|
/// Transform::default(),
|
||||||
|
/// ));
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
#[derive(Component, Reflect, Debug, Clone, Serialize, Deserialize)]
|
||||||
|
#[reflect(Component)]
|
||||||
|
pub struct NetworkedEntity {
|
||||||
|
/// Globally unique network ID for this entity
|
||||||
|
///
|
||||||
|
/// This ID is used to identify the entity across all peers in the network.
|
||||||
|
/// When a peer receives an EntityDelta, it uses this ID to locate the
|
||||||
|
/// corresponding local entity.
|
||||||
|
pub network_id: uuid::Uuid,
|
||||||
|
|
||||||
|
/// Node that created this entity
|
||||||
|
///
|
||||||
|
/// Used for conflict resolution and ownership tracking. When two nodes
|
||||||
|
/// concurrently create entities, the owner_node_id can be used as a
|
||||||
|
/// tiebreaker.
|
||||||
|
pub owner_node_id: NodeId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkedEntity {
|
||||||
|
/// Create a new networked entity
|
||||||
|
///
|
||||||
|
/// Generates a new random network_id and sets the owner to the specified
|
||||||
|
/// node.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::NetworkedEntity;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node_id = Uuid::new_v4();
|
||||||
|
/// let entity = NetworkedEntity::new(node_id);
|
||||||
|
///
|
||||||
|
/// assert_eq!(entity.owner_node_id, node_id);
|
||||||
|
/// ```
|
||||||
|
pub fn new(owner_node_id: NodeId) -> Self {
|
||||||
|
Self {
|
||||||
|
network_id: uuid::Uuid::new_v4(),
|
||||||
|
owner_node_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a networked entity with a specific network ID
|
||||||
|
///
|
||||||
|
/// Used when receiving entities from remote peers - we need to use their
|
||||||
|
/// network_id rather than generating a new one.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::NetworkedEntity;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let network_id = Uuid::new_v4();
|
||||||
|
/// let owner_id = Uuid::new_v4();
|
||||||
|
/// let entity = NetworkedEntity::with_id(network_id, owner_id);
|
||||||
|
///
|
||||||
|
/// assert_eq!(entity.network_id, network_id);
|
||||||
|
/// assert_eq!(entity.owner_node_id, owner_id);
|
||||||
|
/// ```
|
||||||
|
pub fn with_id(network_id: uuid::Uuid, owner_node_id: NodeId) -> Self {
|
||||||
|
Self {
|
||||||
|
network_id,
|
||||||
|
owner_node_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if this node owns the entity
|
||||||
|
pub fn is_owned_by(&self, node_id: NodeId) -> bool {
|
||||||
|
self.owner_node_id == node_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for NetworkedEntity {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
network_id: uuid::Uuid::new_v4(),
|
||||||
|
owner_node_id: uuid::Uuid::new_v4(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wrapper for Transform component that enables CRDT synchronization
|
||||||
|
///
|
||||||
|
/// This is a marker component used alongside Transform to indicate that
|
||||||
|
/// Transform changes should be synchronized using Last-Write-Wins semantics.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::{NetworkedEntity, NetworkedTransform};
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// fn spawn_synced_transform(mut commands: Commands) {
|
||||||
|
/// let node_id = Uuid::new_v4();
|
||||||
|
///
|
||||||
|
/// commands.spawn((
|
||||||
|
/// NetworkedEntity::new(node_id),
|
||||||
|
/// Transform::default(),
|
||||||
|
/// NetworkedTransform,
|
||||||
|
/// ));
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
#[derive(Component, Reflect, Debug, Clone, Copy, Default)]
|
||||||
|
#[reflect(Component)]
|
||||||
|
pub struct NetworkedTransform;
|
||||||
|
|
||||||
|
/// Wrapper for a selection component using OR-Set semantics
|
||||||
|
///
|
||||||
|
/// Tracks a set of selected entity network IDs. Uses OR-Set (Observed-Remove)
|
||||||
|
/// CRDT to handle concurrent add/remove operations correctly.
|
||||||
|
///
|
||||||
|
/// # OR-Set Semantics
|
||||||
|
///
|
||||||
|
/// - Concurrent adds and removes: add wins
|
||||||
|
/// - Each add has a unique operation ID
|
||||||
|
/// - Removes reference specific add operation IDs
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::{NetworkedEntity, NetworkedSelection};
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// fn create_selection(mut commands: Commands) {
|
||||||
|
/// let node_id = Uuid::new_v4();
|
||||||
|
/// let mut selection = NetworkedSelection::new();
|
||||||
|
///
|
||||||
|
/// // Add some entities to the selection
|
||||||
|
/// selection.selected_ids.insert(Uuid::new_v4());
|
||||||
|
/// selection.selected_ids.insert(Uuid::new_v4());
|
||||||
|
///
|
||||||
|
/// commands.spawn((
|
||||||
|
/// NetworkedEntity::new(node_id),
|
||||||
|
/// selection,
|
||||||
|
/// ));
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
#[derive(Component, Reflect, Debug, Clone, Default)]
|
||||||
|
#[reflect(Component)]
|
||||||
|
pub struct NetworkedSelection {
|
||||||
|
/// Set of selected entity network IDs
|
||||||
|
///
|
||||||
|
/// This will be synchronized using OR-Set CRDT semantics in later phases.
|
||||||
|
/// For now, it's a simple HashSet.
|
||||||
|
pub selected_ids: std::collections::HashSet<uuid::Uuid>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkedSelection {
|
||||||
|
/// Create a new empty selection
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
selected_ids: std::collections::HashSet::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add an entity to the selection
|
||||||
|
pub fn add(&mut self, entity_id: uuid::Uuid) {
|
||||||
|
self.selected_ids.insert(entity_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove an entity from the selection
|
||||||
|
pub fn remove(&mut self, entity_id: uuid::Uuid) {
|
||||||
|
self.selected_ids.remove(&entity_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if an entity is selected
|
||||||
|
pub fn contains(&self, entity_id: uuid::Uuid) -> bool {
|
||||||
|
self.selected_ids.contains(&entity_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clear all selections
|
||||||
|
pub fn clear(&mut self) {
|
||||||
|
self.selected_ids.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the number of selected entities
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.selected_ids.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if the selection is empty
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.selected_ids.is_empty()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wrapper for a drawing path component using Sequence CRDT semantics
|
||||||
|
///
|
||||||
|
/// Represents an ordered sequence of points that can be collaboratively edited.
|
||||||
|
/// Uses RGA (Replicated Growable Array) CRDT to maintain consistent ordering
|
||||||
|
/// across concurrent insertions.
|
||||||
|
///
|
||||||
|
/// # RGA Semantics
|
||||||
|
///
|
||||||
|
/// - Each point has a unique operation ID
|
||||||
|
/// - Points reference the ID of the point they're inserted after
|
||||||
|
/// - Concurrent insertions maintain consistent ordering
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::{NetworkedEntity, NetworkedDrawingPath};
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// fn create_path(mut commands: Commands) {
|
||||||
|
/// let node_id = Uuid::new_v4();
|
||||||
|
/// let mut path = NetworkedDrawingPath::new();
|
||||||
|
///
|
||||||
|
/// // Add some points to the path
|
||||||
|
/// path.points.push(Vec2::new(0.0, 0.0));
|
||||||
|
/// path.points.push(Vec2::new(10.0, 10.0));
|
||||||
|
/// path.points.push(Vec2::new(20.0, 5.0));
|
||||||
|
///
|
||||||
|
/// commands.spawn((
|
||||||
|
/// NetworkedEntity::new(node_id),
|
||||||
|
/// path,
|
||||||
|
/// ));
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
#[derive(Component, Reflect, Debug, Clone, Default)]
|
||||||
|
#[reflect(Component)]
|
||||||
|
pub struct NetworkedDrawingPath {
|
||||||
|
/// Ordered sequence of points in the path
|
||||||
|
///
|
||||||
|
/// This will be synchronized using RGA (Sequence CRDT) semantics in later
|
||||||
|
/// phases. For now, it's a simple Vec.
|
||||||
|
pub points: Vec<Vec2>,
|
||||||
|
|
||||||
|
/// Drawing stroke color
|
||||||
|
pub color: Color,
|
||||||
|
|
||||||
|
/// Stroke width
|
||||||
|
pub width: f32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkedDrawingPath {
|
||||||
|
/// Create a new empty drawing path
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
points: Vec::new(),
|
||||||
|
color: Color::BLACK,
|
||||||
|
width: 2.0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a path with a specific color and width
|
||||||
|
pub fn with_style(color: Color, width: f32) -> Self {
|
||||||
|
Self {
|
||||||
|
points: Vec::new(),
|
||||||
|
color,
|
||||||
|
width,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add a point to the end of the path
|
||||||
|
pub fn push(&mut self, point: Vec2) {
|
||||||
|
self.points.push(point);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the number of points in the path
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.points.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if the path is empty
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.points.is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clear all points from the path
|
||||||
|
pub fn clear(&mut self) {
|
||||||
|
self.points.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_networked_entity_new() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let entity = NetworkedEntity::new(node_id);
|
||||||
|
|
||||||
|
assert_eq!(entity.owner_node_id, node_id);
|
||||||
|
assert_ne!(entity.network_id, uuid::Uuid::nil());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_networked_entity_with_id() {
|
||||||
|
let network_id = uuid::Uuid::new_v4();
|
||||||
|
let owner_id = uuid::Uuid::new_v4();
|
||||||
|
let entity = NetworkedEntity::with_id(network_id, owner_id);
|
||||||
|
|
||||||
|
assert_eq!(entity.network_id, network_id);
|
||||||
|
assert_eq!(entity.owner_node_id, owner_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_networked_entity_is_owned_by() {
|
||||||
|
let owner_id = uuid::Uuid::new_v4();
|
||||||
|
let other_id = uuid::Uuid::new_v4();
|
||||||
|
let entity = NetworkedEntity::new(owner_id);
|
||||||
|
|
||||||
|
assert!(entity.is_owned_by(owner_id));
|
||||||
|
assert!(!entity.is_owned_by(other_id));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_networked_selection() {
|
||||||
|
let mut selection = NetworkedSelection::new();
|
||||||
|
let id1 = uuid::Uuid::new_v4();
|
||||||
|
let id2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
assert!(selection.is_empty());
|
||||||
|
|
||||||
|
selection.add(id1);
|
||||||
|
assert_eq!(selection.len(), 1);
|
||||||
|
assert!(selection.contains(id1));
|
||||||
|
|
||||||
|
selection.add(id2);
|
||||||
|
assert_eq!(selection.len(), 2);
|
||||||
|
assert!(selection.contains(id2));
|
||||||
|
|
||||||
|
selection.remove(id1);
|
||||||
|
assert_eq!(selection.len(), 1);
|
||||||
|
assert!(!selection.contains(id1));
|
||||||
|
|
||||||
|
selection.clear();
|
||||||
|
assert!(selection.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_networked_drawing_path() {
|
||||||
|
let mut path = NetworkedDrawingPath::new();
|
||||||
|
|
||||||
|
assert!(path.is_empty());
|
||||||
|
|
||||||
|
path.push(Vec2::new(0.0, 0.0));
|
||||||
|
assert_eq!(path.len(), 1);
|
||||||
|
|
||||||
|
path.push(Vec2::new(10.0, 10.0));
|
||||||
|
assert_eq!(path.len(), 2);
|
||||||
|
|
||||||
|
path.clear();
|
||||||
|
assert!(path.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_drawing_path_with_style() {
|
||||||
|
let path = NetworkedDrawingPath::with_style(Color::srgb(1.0, 0.0, 0.0), 5.0);
|
||||||
|
|
||||||
|
assert_eq!(path.color, Color::srgb(1.0, 0.0, 0.0));
|
||||||
|
assert_eq!(path.width, 5.0);
|
||||||
|
}
|
||||||
|
}
|
||||||
193
crates/lib/src/networking/delta_generation.rs
Normal file
193
crates/lib/src/networking/delta_generation.rs
Normal file
@@ -0,0 +1,193 @@
|
|||||||
|
//! Delta generation system for broadcasting entity changes
|
||||||
|
//!
|
||||||
|
//! This module implements the core delta generation logic that detects changed
|
||||||
|
//! entities and broadcasts EntityDelta messages.
|
||||||
|
|
||||||
|
use bevy::prelude::*;
|
||||||
|
|
||||||
|
use crate::networking::{
|
||||||
|
change_detection::LastSyncVersions,
|
||||||
|
entity_map::NetworkEntityMap,
|
||||||
|
gossip_bridge::GossipBridge,
|
||||||
|
messages::{
|
||||||
|
EntityDelta,
|
||||||
|
SyncMessage,
|
||||||
|
VersionedMessage,
|
||||||
|
},
|
||||||
|
operation_builder::build_entity_operations,
|
||||||
|
vector_clock::{
|
||||||
|
NodeId,
|
||||||
|
VectorClock,
|
||||||
|
},
|
||||||
|
NetworkedEntity,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Resource wrapping our node's vector clock
|
||||||
|
///
|
||||||
|
/// This tracks the logical time for our local operations.
|
||||||
|
#[derive(Resource)]
|
||||||
|
pub struct NodeVectorClock {
|
||||||
|
pub node_id: NodeId,
|
||||||
|
pub clock: VectorClock,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeVectorClock {
|
||||||
|
pub fn new(node_id: NodeId) -> Self {
|
||||||
|
Self {
|
||||||
|
node_id,
|
||||||
|
clock: VectorClock::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Increment our clock for a new operation
|
||||||
|
pub fn tick(&mut self) -> u64 {
|
||||||
|
self.clock.increment(self.node_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get current sequence number for our node
|
||||||
|
pub fn sequence(&self) -> u64 {
|
||||||
|
self.clock.get(self.node_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// System to generate and broadcast EntityDelta messages
|
||||||
|
///
|
||||||
|
/// This system:
|
||||||
|
/// 1. Queries for Changed<NetworkedEntity>
|
||||||
|
/// 2. Serializes all components on those entities
|
||||||
|
/// 3. Builds EntityDelta messages
|
||||||
|
/// 4. Broadcasts via GossipBridge
|
||||||
|
///
|
||||||
|
/// Add this to your app to enable delta broadcasting:
|
||||||
|
///
|
||||||
|
/// ```no_run
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::generate_delta_system;
|
||||||
|
///
|
||||||
|
/// App::new()
|
||||||
|
/// .add_systems(Update, generate_delta_system);
|
||||||
|
/// ```
|
||||||
|
pub fn generate_delta_system(
|
||||||
|
query: Query<(Entity, &NetworkedEntity), Changed<NetworkedEntity>>,
|
||||||
|
world: &World,
|
||||||
|
type_registry: Res<AppTypeRegistry>,
|
||||||
|
mut node_clock: ResMut<NodeVectorClock>,
|
||||||
|
mut last_versions: ResMut<LastSyncVersions>,
|
||||||
|
bridge: Option<Res<GossipBridge>>,
|
||||||
|
_entity_map: Res<NetworkEntityMap>,
|
||||||
|
mut operation_log: Option<ResMut<crate::networking::OperationLog>>,
|
||||||
|
) {
|
||||||
|
// Early return if no gossip bridge
|
||||||
|
let Some(bridge) = bridge else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
let registry = type_registry.read();
|
||||||
|
|
||||||
|
for (entity, networked) in query.iter() {
|
||||||
|
// Check if we should sync this entity
|
||||||
|
let current_seq = node_clock.sequence();
|
||||||
|
if !last_versions.should_sync(networked.network_id, current_seq) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Increment our vector clock
|
||||||
|
node_clock.tick();
|
||||||
|
|
||||||
|
// Build operations for all components
|
||||||
|
// TODO: Add BlobStore support in future phases
|
||||||
|
let operations = build_entity_operations(
|
||||||
|
entity,
|
||||||
|
world,
|
||||||
|
node_clock.node_id,
|
||||||
|
node_clock.clock.clone(),
|
||||||
|
®istry,
|
||||||
|
None, // blob_store - will be added in later phases
|
||||||
|
);
|
||||||
|
|
||||||
|
if operations.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create EntityDelta
|
||||||
|
let delta = EntityDelta::new(
|
||||||
|
networked.network_id,
|
||||||
|
node_clock.node_id,
|
||||||
|
node_clock.clock.clone(),
|
||||||
|
operations,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Record in operation log for anti-entropy
|
||||||
|
if let Some(ref mut log) = operation_log {
|
||||||
|
log.record_operation(delta.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wrap in VersionedMessage
|
||||||
|
let message = VersionedMessage::new(SyncMessage::EntityDelta {
|
||||||
|
entity_id: delta.entity_id,
|
||||||
|
node_id: delta.node_id,
|
||||||
|
vector_clock: delta.vector_clock.clone(),
|
||||||
|
operations: delta.operations.clone(),
|
||||||
|
});
|
||||||
|
|
||||||
|
// Broadcast
|
||||||
|
if let Err(e) = bridge.send(message) {
|
||||||
|
error!("Failed to broadcast EntityDelta: {}", e);
|
||||||
|
} else {
|
||||||
|
debug!(
|
||||||
|
"Broadcast EntityDelta for entity {:?} with {} operations",
|
||||||
|
networked.network_id,
|
||||||
|
delta.operations.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
// Update last sync version
|
||||||
|
last_versions.update(networked.network_id, current_seq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_node_vector_clock_creation() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let clock = NodeVectorClock::new(node_id);
|
||||||
|
|
||||||
|
assert_eq!(clock.node_id, node_id);
|
||||||
|
assert_eq!(clock.sequence(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_node_vector_clock_tick() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let mut clock = NodeVectorClock::new(node_id);
|
||||||
|
|
||||||
|
assert_eq!(clock.tick(), 1);
|
||||||
|
assert_eq!(clock.sequence(), 1);
|
||||||
|
|
||||||
|
assert_eq!(clock.tick(), 2);
|
||||||
|
assert_eq!(clock.sequence(), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_node_vector_clock_multiple_nodes() {
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
let node2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut clock1 = NodeVectorClock::new(node1);
|
||||||
|
let mut clock2 = NodeVectorClock::new(node2);
|
||||||
|
|
||||||
|
clock1.tick();
|
||||||
|
clock2.tick();
|
||||||
|
|
||||||
|
assert_eq!(clock1.sequence(), 1);
|
||||||
|
assert_eq!(clock2.sequence(), 1);
|
||||||
|
|
||||||
|
// Merge clocks
|
||||||
|
clock1.clock.merge(&clock2.clock);
|
||||||
|
assert_eq!(clock1.clock.get(node1), 1);
|
||||||
|
assert_eq!(clock1.clock.get(node2), 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
438
crates/lib/src/networking/entity_map.rs
Normal file
438
crates/lib/src/networking/entity_map.rs
Normal file
@@ -0,0 +1,438 @@
|
|||||||
|
//! Bidirectional mapping between network IDs and Bevy entities
|
||||||
|
//!
|
||||||
|
//! This module provides efficient lookup in both directions:
|
||||||
|
//! - network_id → Entity (when receiving remote operations)
|
||||||
|
//! - Entity → network_id (when broadcasting local changes)
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use bevy::prelude::*;
|
||||||
|
|
||||||
|
/// Bidirectional mapping between network IDs and Bevy entities
|
||||||
|
///
|
||||||
|
/// This resource maintains two HashMaps for O(1) lookup in both directions.
|
||||||
|
/// It's updated automatically by the networking systems when entities are
|
||||||
|
/// spawned or despawned.
|
||||||
|
///
|
||||||
|
/// # Thread Safety
|
||||||
|
///
|
||||||
|
/// This is a Bevy Resource, so it's automatically synchronized across systems.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::{NetworkEntityMap, NetworkedEntity};
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// fn example_system(
|
||||||
|
/// mut map: ResMut<NetworkEntityMap>,
|
||||||
|
/// query: Query<(Entity, &NetworkedEntity)>,
|
||||||
|
/// ) {
|
||||||
|
/// // Register networked entities
|
||||||
|
/// for (entity, networked) in query.iter() {
|
||||||
|
/// map.insert(networked.network_id, entity);
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// // Later, look up by network ID
|
||||||
|
/// let network_id = Uuid::new_v4();
|
||||||
|
/// if let Some(entity) = map.get_entity(network_id) {
|
||||||
|
/// println!("Found entity: {:?}", entity);
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
#[derive(Resource, Default, Debug)]
|
||||||
|
pub struct NetworkEntityMap {
|
||||||
|
/// Map from network ID to Bevy Entity
|
||||||
|
network_id_to_entity: HashMap<uuid::Uuid, Entity>,
|
||||||
|
|
||||||
|
/// Map from Bevy Entity to network ID
|
||||||
|
entity_to_network_id: HashMap<Entity, uuid::Uuid>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkEntityMap {
|
||||||
|
/// Create a new empty entity map
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
network_id_to_entity: HashMap::new(),
|
||||||
|
entity_to_network_id: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Insert a bidirectional mapping
|
||||||
|
///
|
||||||
|
/// If the network_id or entity already exists in the map, the old mapping
|
||||||
|
/// is removed first to maintain consistency.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::NetworkEntityMap;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// # let mut world = World::new();
|
||||||
|
/// # let entity = world.spawn_empty().id();
|
||||||
|
/// let mut map = NetworkEntityMap::new();
|
||||||
|
/// let network_id = Uuid::new_v4();
|
||||||
|
///
|
||||||
|
/// map.insert(network_id, entity);
|
||||||
|
/// assert_eq!(map.get_entity(network_id), Some(entity));
|
||||||
|
/// assert_eq!(map.get_network_id(entity), Some(network_id));
|
||||||
|
/// ```
|
||||||
|
pub fn insert(&mut self, network_id: uuid::Uuid, entity: Entity) {
|
||||||
|
// Remove old mappings if they exist
|
||||||
|
if let Some(old_entity) = self.network_id_to_entity.get(&network_id) {
|
||||||
|
self.entity_to_network_id.remove(old_entity);
|
||||||
|
}
|
||||||
|
if let Some(old_network_id) = self.entity_to_network_id.get(&entity) {
|
||||||
|
self.network_id_to_entity.remove(old_network_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert new mappings
|
||||||
|
self.network_id_to_entity.insert(network_id, entity);
|
||||||
|
self.entity_to_network_id.insert(entity, network_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the Bevy Entity for a network ID
|
||||||
|
///
|
||||||
|
/// Returns None if the network ID is not in the map.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::NetworkEntityMap;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// # let mut world = World::new();
|
||||||
|
/// # let entity = world.spawn_empty().id();
|
||||||
|
/// let mut map = NetworkEntityMap::new();
|
||||||
|
/// let network_id = Uuid::new_v4();
|
||||||
|
///
|
||||||
|
/// map.insert(network_id, entity);
|
||||||
|
/// assert_eq!(map.get_entity(network_id), Some(entity));
|
||||||
|
///
|
||||||
|
/// let unknown_id = Uuid::new_v4();
|
||||||
|
/// assert_eq!(map.get_entity(unknown_id), None);
|
||||||
|
/// ```
|
||||||
|
pub fn get_entity(&self, network_id: uuid::Uuid) -> Option<Entity> {
|
||||||
|
self.network_id_to_entity.get(&network_id).copied()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the network ID for a Bevy Entity
|
||||||
|
///
|
||||||
|
/// Returns None if the entity is not in the map.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::NetworkEntityMap;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// # let mut world = World::new();
|
||||||
|
/// # let entity = world.spawn_empty().id();
|
||||||
|
/// let mut map = NetworkEntityMap::new();
|
||||||
|
/// let network_id = Uuid::new_v4();
|
||||||
|
///
|
||||||
|
/// map.insert(network_id, entity);
|
||||||
|
/// assert_eq!(map.get_network_id(entity), Some(network_id));
|
||||||
|
///
|
||||||
|
/// # let unknown_entity = world.spawn_empty().id();
|
||||||
|
/// assert_eq!(map.get_network_id(unknown_entity), None);
|
||||||
|
/// ```
|
||||||
|
pub fn get_network_id(&self, entity: Entity) -> Option<uuid::Uuid> {
|
||||||
|
self.entity_to_network_id.get(&entity).copied()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove a mapping by network ID
|
||||||
|
///
|
||||||
|
/// Returns the Entity that was mapped to this network ID, if any.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::NetworkEntityMap;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// # let mut world = World::new();
|
||||||
|
/// # let entity = world.spawn_empty().id();
|
||||||
|
/// let mut map = NetworkEntityMap::new();
|
||||||
|
/// let network_id = Uuid::new_v4();
|
||||||
|
///
|
||||||
|
/// map.insert(network_id, entity);
|
||||||
|
/// assert_eq!(map.remove_by_network_id(network_id), Some(entity));
|
||||||
|
/// assert_eq!(map.get_entity(network_id), None);
|
||||||
|
/// ```
|
||||||
|
pub fn remove_by_network_id(&mut self, network_id: uuid::Uuid) -> Option<Entity> {
|
||||||
|
if let Some(entity) = self.network_id_to_entity.remove(&network_id) {
|
||||||
|
self.entity_to_network_id.remove(&entity);
|
||||||
|
Some(entity)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove a mapping by Entity
|
||||||
|
///
|
||||||
|
/// Returns the network ID that was mapped to this entity, if any.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::NetworkEntityMap;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// # let mut world = World::new();
|
||||||
|
/// # let entity = world.spawn_empty().id();
|
||||||
|
/// let mut map = NetworkEntityMap::new();
|
||||||
|
/// let network_id = Uuid::new_v4();
|
||||||
|
///
|
||||||
|
/// map.insert(network_id, entity);
|
||||||
|
/// assert_eq!(map.remove_by_entity(entity), Some(network_id));
|
||||||
|
/// assert_eq!(map.get_network_id(entity), None);
|
||||||
|
/// ```
|
||||||
|
pub fn remove_by_entity(&mut self, entity: Entity) -> Option<uuid::Uuid> {
|
||||||
|
if let Some(network_id) = self.entity_to_network_id.remove(&entity) {
|
||||||
|
self.network_id_to_entity.remove(&network_id);
|
||||||
|
Some(network_id)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if a network ID exists in the map
|
||||||
|
pub fn contains_network_id(&self, network_id: uuid::Uuid) -> bool {
|
||||||
|
self.network_id_to_entity.contains_key(&network_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if an entity exists in the map
|
||||||
|
pub fn contains_entity(&self, entity: Entity) -> bool {
|
||||||
|
self.entity_to_network_id.contains_key(&entity)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the number of mapped entities
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.network_id_to_entity.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if the map is empty
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.network_id_to_entity.is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clear all mappings
|
||||||
|
pub fn clear(&mut self) {
|
||||||
|
self.network_id_to_entity.clear();
|
||||||
|
self.entity_to_network_id.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get an iterator over all (network_id, entity) pairs
|
||||||
|
pub fn iter(&self) -> impl Iterator<Item = (&uuid::Uuid, &Entity)> {
|
||||||
|
self.network_id_to_entity.iter()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get all network IDs
|
||||||
|
pub fn network_ids(&self) -> impl Iterator<Item = &uuid::Uuid> {
|
||||||
|
self.network_id_to_entity.keys()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get all entities
|
||||||
|
pub fn entities(&self) -> impl Iterator<Item = &Entity> {
|
||||||
|
self.entity_to_network_id.keys()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// System to automatically register NetworkedEntity components in the map
|
||||||
|
///
|
||||||
|
/// This system runs in PostUpdate to catch newly spawned networked entities
|
||||||
|
/// and add them to the NetworkEntityMap.
|
||||||
|
///
|
||||||
|
/// Add this to your app:
|
||||||
|
/// ```no_run
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::register_networked_entities_system;
|
||||||
|
///
|
||||||
|
/// App::new()
|
||||||
|
/// .add_systems(PostUpdate, register_networked_entities_system);
|
||||||
|
/// ```
|
||||||
|
pub fn register_networked_entities_system(
|
||||||
|
mut map: ResMut<NetworkEntityMap>,
|
||||||
|
query: Query<(Entity, &crate::networking::NetworkedEntity), Added<crate::networking::NetworkedEntity>>,
|
||||||
|
) {
|
||||||
|
for (entity, networked) in query.iter() {
|
||||||
|
map.insert(networked.network_id, entity);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// System to automatically unregister despawned entities from the map
|
||||||
|
///
|
||||||
|
/// This system cleans up the NetworkEntityMap when networked entities are
|
||||||
|
/// despawned.
|
||||||
|
///
|
||||||
|
/// Add this to your app:
|
||||||
|
/// ```no_run
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::cleanup_despawned_entities_system;
|
||||||
|
///
|
||||||
|
/// App::new()
|
||||||
|
/// .add_systems(PostUpdate, cleanup_despawned_entities_system);
|
||||||
|
/// ```
|
||||||
|
pub fn cleanup_despawned_entities_system(
|
||||||
|
mut map: ResMut<NetworkEntityMap>,
|
||||||
|
mut removed: RemovedComponents<crate::networking::NetworkedEntity>,
|
||||||
|
) {
|
||||||
|
for entity in removed.read() {
|
||||||
|
map.remove_by_entity(entity);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_insert_and_get() {
|
||||||
|
let mut map = NetworkEntityMap::new();
|
||||||
|
let mut world = World::new();
|
||||||
|
let entity = world.spawn_empty().id();
|
||||||
|
let network_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
map.insert(network_id, entity);
|
||||||
|
|
||||||
|
assert_eq!(map.get_entity(network_id), Some(entity));
|
||||||
|
assert_eq!(map.get_network_id(entity), Some(network_id));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_get_nonexistent() {
|
||||||
|
let map = NetworkEntityMap::new();
|
||||||
|
let mut world = World::new();
|
||||||
|
let entity = world.spawn_empty().id();
|
||||||
|
let network_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
assert_eq!(map.get_entity(network_id), None);
|
||||||
|
assert_eq!(map.get_network_id(entity), None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_remove_by_network_id() {
|
||||||
|
let mut map = NetworkEntityMap::new();
|
||||||
|
let mut world = World::new();
|
||||||
|
let entity = world.spawn_empty().id();
|
||||||
|
let network_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
map.insert(network_id, entity);
|
||||||
|
assert_eq!(map.remove_by_network_id(network_id), Some(entity));
|
||||||
|
assert_eq!(map.get_entity(network_id), None);
|
||||||
|
assert_eq!(map.get_network_id(entity), None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_remove_by_entity() {
|
||||||
|
let mut map = NetworkEntityMap::new();
|
||||||
|
let mut world = World::new();
|
||||||
|
let entity = world.spawn_empty().id();
|
||||||
|
let network_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
map.insert(network_id, entity);
|
||||||
|
assert_eq!(map.remove_by_entity(entity), Some(network_id));
|
||||||
|
assert_eq!(map.get_entity(network_id), None);
|
||||||
|
assert_eq!(map.get_network_id(entity), None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_contains() {
|
||||||
|
let mut map = NetworkEntityMap::new();
|
||||||
|
let mut world = World::new();
|
||||||
|
let entity = world.spawn_empty().id();
|
||||||
|
let network_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
assert!(!map.contains_network_id(network_id));
|
||||||
|
assert!(!map.contains_entity(entity));
|
||||||
|
|
||||||
|
map.insert(network_id, entity);
|
||||||
|
|
||||||
|
assert!(map.contains_network_id(network_id));
|
||||||
|
assert!(map.contains_entity(entity));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_len_and_is_empty() {
|
||||||
|
let mut map = NetworkEntityMap::new();
|
||||||
|
let mut world = World::new();
|
||||||
|
|
||||||
|
assert!(map.is_empty());
|
||||||
|
assert_eq!(map.len(), 0);
|
||||||
|
|
||||||
|
let entity1 = world.spawn_empty().id();
|
||||||
|
let id1 = uuid::Uuid::new_v4();
|
||||||
|
map.insert(id1, entity1);
|
||||||
|
|
||||||
|
assert!(!map.is_empty());
|
||||||
|
assert_eq!(map.len(), 1);
|
||||||
|
|
||||||
|
let entity2 = world.spawn_empty().id();
|
||||||
|
let id2 = uuid::Uuid::new_v4();
|
||||||
|
map.insert(id2, entity2);
|
||||||
|
|
||||||
|
assert_eq!(map.len(), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_clear() {
|
||||||
|
let mut map = NetworkEntityMap::new();
|
||||||
|
let mut world = World::new();
|
||||||
|
let entity = world.spawn_empty().id();
|
||||||
|
let network_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
map.insert(network_id, entity);
|
||||||
|
assert_eq!(map.len(), 1);
|
||||||
|
|
||||||
|
map.clear();
|
||||||
|
assert!(map.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_insert_overwrites_old_mapping() {
|
||||||
|
let mut map = NetworkEntityMap::new();
|
||||||
|
let mut world = World::new();
|
||||||
|
let entity1 = world.spawn_empty().id();
|
||||||
|
let entity2 = world.spawn_empty().id();
|
||||||
|
let network_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
// Insert first mapping
|
||||||
|
map.insert(network_id, entity1);
|
||||||
|
assert_eq!(map.get_entity(network_id), Some(entity1));
|
||||||
|
|
||||||
|
// Insert same network_id with different entity
|
||||||
|
map.insert(network_id, entity2);
|
||||||
|
assert_eq!(map.get_entity(network_id), Some(entity2));
|
||||||
|
assert_eq!(map.get_network_id(entity1), None); // Old mapping removed
|
||||||
|
assert_eq!(map.len(), 1); // Still only one mapping
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_iter() {
|
||||||
|
let mut map = NetworkEntityMap::new();
|
||||||
|
let mut world = World::new();
|
||||||
|
let entity1 = world.spawn_empty().id();
|
||||||
|
let entity2 = world.spawn_empty().id();
|
||||||
|
let id1 = uuid::Uuid::new_v4();
|
||||||
|
let id2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
map.insert(id1, entity1);
|
||||||
|
map.insert(id2, entity2);
|
||||||
|
|
||||||
|
let mut count = 0;
|
||||||
|
for (network_id, entity) in map.iter() {
|
||||||
|
assert!(network_id == &id1 || network_id == &id2);
|
||||||
|
assert!(entity == &entity1 || entity == &entity2);
|
||||||
|
count += 1;
|
||||||
|
}
|
||||||
|
assert_eq!(count, 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
77
crates/lib/src/networking/error.rs
Normal file
77
crates/lib/src/networking/error.rs
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
//! Error types for the networking layer
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
|
/// Result type for networking operations
|
||||||
|
pub type Result<T> = std::result::Result<T, NetworkingError>;
|
||||||
|
|
||||||
|
/// Errors that can occur in the networking layer
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum NetworkingError {
|
||||||
|
/// Serialization error
|
||||||
|
Serialization(String),
|
||||||
|
|
||||||
|
/// Deserialization error
|
||||||
|
Deserialization(String),
|
||||||
|
|
||||||
|
/// Gossip error (iroh-gossip)
|
||||||
|
Gossip(String),
|
||||||
|
|
||||||
|
/// Blob transfer error (iroh-blobs)
|
||||||
|
Blob(String),
|
||||||
|
|
||||||
|
/// Entity not found in network map
|
||||||
|
EntityNotFound(uuid::Uuid),
|
||||||
|
|
||||||
|
/// Vector clock comparison failed
|
||||||
|
VectorClockError(String),
|
||||||
|
|
||||||
|
/// CRDT merge conflict
|
||||||
|
MergeConflict(String),
|
||||||
|
|
||||||
|
/// Invalid message format
|
||||||
|
InvalidMessage(String),
|
||||||
|
|
||||||
|
/// Authentication/security error
|
||||||
|
SecurityError(String),
|
||||||
|
|
||||||
|
/// Rate limit exceeded
|
||||||
|
RateLimitExceeded,
|
||||||
|
|
||||||
|
/// Other networking errors
|
||||||
|
Other(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for NetworkingError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
| NetworkingError::Serialization(msg) => write!(f, "Serialization error: {}", msg),
|
||||||
|
| NetworkingError::Deserialization(msg) => {
|
||||||
|
write!(f, "Deserialization error: {}", msg)
|
||||||
|
},
|
||||||
|
| NetworkingError::Gossip(msg) => write!(f, "Gossip error: {}", msg),
|
||||||
|
| NetworkingError::Blob(msg) => write!(f, "Blob transfer error: {}", msg),
|
||||||
|
| NetworkingError::EntityNotFound(id) => write!(f, "Entity not found: {}", id),
|
||||||
|
| NetworkingError::VectorClockError(msg) => write!(f, "Vector clock error: {}", msg),
|
||||||
|
| NetworkingError::MergeConflict(msg) => write!(f, "CRDT merge conflict: {}", msg),
|
||||||
|
| NetworkingError::InvalidMessage(msg) => write!(f, "Invalid message: {}", msg),
|
||||||
|
| NetworkingError::SecurityError(msg) => write!(f, "Security error: {}", msg),
|
||||||
|
| NetworkingError::RateLimitExceeded => write!(f, "Rate limit exceeded"),
|
||||||
|
| NetworkingError::Other(msg) => write!(f, "{}", msg),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::error::Error for NetworkingError {}
|
||||||
|
|
||||||
|
impl From<bincode::Error> for NetworkingError {
|
||||||
|
fn from(e: bincode::Error) -> Self {
|
||||||
|
NetworkingError::Serialization(e.to_string())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<crate::persistence::PersistenceError> for NetworkingError {
|
||||||
|
fn from(e: crate::persistence::PersistenceError) -> Self {
|
||||||
|
NetworkingError::Other(format!("Persistence error: {}", e))
|
||||||
|
}
|
||||||
|
}
|
||||||
142
crates/lib/src/networking/gossip_bridge.rs
Normal file
142
crates/lib/src/networking/gossip_bridge.rs
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
//! Async-to-sync bridge for iroh-gossip integration with Bevy
|
||||||
|
//!
|
||||||
|
//! This module provides the bridge between Bevy's synchronous ECS world and
|
||||||
|
//! iroh-gossip's async runtime. It uses channels to pass messages between the
|
||||||
|
//! async tokio tasks and Bevy systems.
|
||||||
|
//!
|
||||||
|
//! **NOTE:** This is a simplified implementation for Phase 3. Full gossip
|
||||||
|
//! integration will be completed in later phases.
|
||||||
|
|
||||||
|
use std::{
|
||||||
|
collections::VecDeque,
|
||||||
|
sync::{
|
||||||
|
Arc,
|
||||||
|
Mutex,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
use bevy::prelude::*;
|
||||||
|
|
||||||
|
use crate::networking::{
|
||||||
|
error::{
|
||||||
|
NetworkingError,
|
||||||
|
Result,
|
||||||
|
},
|
||||||
|
messages::VersionedMessage,
|
||||||
|
vector_clock::NodeId,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Bevy resource wrapping the gossip bridge
|
||||||
|
///
|
||||||
|
/// This resource provides the interface between Bevy systems and the async
|
||||||
|
/// gossip network. Systems can send messages via `send()` and poll for
|
||||||
|
/// incoming messages via `try_recv()`.
|
||||||
|
#[derive(Resource, Clone)]
|
||||||
|
pub struct GossipBridge {
|
||||||
|
/// Queue for outgoing messages
|
||||||
|
outgoing: Arc<Mutex<VecDeque<VersionedMessage>>>,
|
||||||
|
|
||||||
|
/// Queue for incoming messages
|
||||||
|
incoming: Arc<Mutex<VecDeque<VersionedMessage>>>,
|
||||||
|
|
||||||
|
/// Our node ID
|
||||||
|
pub node_id: NodeId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GossipBridge {
|
||||||
|
/// Create a new gossip bridge
|
||||||
|
pub fn new(node_id: NodeId) -> Self {
|
||||||
|
Self {
|
||||||
|
outgoing: Arc::new(Mutex::new(VecDeque::new())),
|
||||||
|
incoming: Arc::new(Mutex::new(VecDeque::new())),
|
||||||
|
node_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Send a message to the gossip network
|
||||||
|
pub fn send(&self, message: VersionedMessage) -> Result<()> {
|
||||||
|
self.outgoing
|
||||||
|
.lock()
|
||||||
|
.map_err(|e| NetworkingError::Gossip(format!("Failed to lock outgoing queue: {}", e)))?
|
||||||
|
.push_back(message);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Try to receive a message from the gossip network
|
||||||
|
pub fn try_recv(&self) -> Option<VersionedMessage> {
|
||||||
|
self.incoming.lock().ok()?.pop_front()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get our node ID
|
||||||
|
pub fn node_id(&self) -> NodeId {
|
||||||
|
self.node_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize the gossip bridge
|
||||||
|
pub fn init_gossip_bridge(node_id: NodeId) -> GossipBridge {
|
||||||
|
info!("Initializing gossip bridge for node: {}", node_id);
|
||||||
|
GossipBridge::new(node_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Bevy system to broadcast outgoing messages
|
||||||
|
pub fn broadcast_messages_system(/* will be implemented in later phases */) {
|
||||||
|
// This will be populated when we have delta generation
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Bevy system to receive incoming messages
|
||||||
|
///
|
||||||
|
/// **Note:** This is deprecated in favor of `receive_and_apply_deltas_system`
|
||||||
|
/// which provides full CRDT merge semantics. This stub remains for backward
|
||||||
|
/// compatibility.
|
||||||
|
pub fn receive_messages_system(bridge: Option<Res<GossipBridge>>) {
|
||||||
|
let Some(bridge) = bridge else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Poll for incoming messages
|
||||||
|
while let Some(message) = bridge.try_recv() {
|
||||||
|
// For now, just log the message
|
||||||
|
debug!("Received message: {:?}", message.message);
|
||||||
|
|
||||||
|
// Use receive_and_apply_deltas_system for full functionality
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_gossip_bridge_creation() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let bridge = GossipBridge::new(node_id);
|
||||||
|
|
||||||
|
assert_eq!(bridge.node_id(), node_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_send_message() {
|
||||||
|
use crate::networking::SyncMessage;
|
||||||
|
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let bridge = GossipBridge::new(node_id);
|
||||||
|
|
||||||
|
let message = SyncMessage::JoinRequest {
|
||||||
|
node_id,
|
||||||
|
session_secret: None,
|
||||||
|
};
|
||||||
|
let versioned = VersionedMessage::new(message);
|
||||||
|
|
||||||
|
let result = bridge.send(versioned);
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_try_recv_empty() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let bridge = GossipBridge::new(node_id);
|
||||||
|
|
||||||
|
assert!(bridge.try_recv().is_none());
|
||||||
|
}
|
||||||
|
}
|
||||||
509
crates/lib/src/networking/join_protocol.rs
Normal file
509
crates/lib/src/networking/join_protocol.rs
Normal file
@@ -0,0 +1,509 @@
|
|||||||
|
//! Join protocol for new peer onboarding
|
||||||
|
//!
|
||||||
|
//! This module handles the protocol for new peers to join an existing session
|
||||||
|
//! and receive the full world state. The join flow:
|
||||||
|
//!
|
||||||
|
//! 1. New peer sends JoinRequest with node ID and optional session secret
|
||||||
|
//! 2. Existing peer validates request and responds with FullState
|
||||||
|
//! 3. New peer applies FullState to initialize local world
|
||||||
|
//! 4. New peer begins participating in delta synchronization
|
||||||
|
//!
|
||||||
|
//! **NOTE:** This is a simplified implementation for Phase 7. Full security
|
||||||
|
//! and session management will be enhanced in Phase 13.
|
||||||
|
|
||||||
|
use bevy::{
|
||||||
|
prelude::*,
|
||||||
|
reflect::TypeRegistry,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::networking::{
|
||||||
|
blob_support::BlobStore,
|
||||||
|
delta_generation::NodeVectorClock,
|
||||||
|
entity_map::NetworkEntityMap,
|
||||||
|
messages::{
|
||||||
|
EntityState,
|
||||||
|
SyncMessage,
|
||||||
|
VersionedMessage,
|
||||||
|
},
|
||||||
|
GossipBridge,
|
||||||
|
NetworkedEntity,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Session secret for join authentication
|
||||||
|
///
|
||||||
|
/// In Phase 7, this is optional. Phase 13 will add full authentication.
|
||||||
|
pub type SessionSecret = Vec<u8>;
|
||||||
|
|
||||||
|
/// Build a JoinRequest message
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::build_join_request;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node_id = Uuid::new_v4();
|
||||||
|
/// let request = build_join_request(node_id, None);
|
||||||
|
/// ```
|
||||||
|
pub fn build_join_request(node_id: uuid::Uuid, session_secret: Option<SessionSecret>) -> VersionedMessage {
|
||||||
|
VersionedMessage::new(SyncMessage::JoinRequest {
|
||||||
|
node_id,
|
||||||
|
session_secret,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a FullState message containing all networked entities
|
||||||
|
///
|
||||||
|
/// This serializes the entire world state for a new peer. Large worlds may
|
||||||
|
/// take significant bandwidth - Phase 14 will add compression.
|
||||||
|
///
|
||||||
|
/// # Parameters
|
||||||
|
///
|
||||||
|
/// - `world`: Bevy world containing entities
|
||||||
|
/// - `query`: Query for all NetworkedEntity components
|
||||||
|
/// - `type_registry`: Type registry for serialization
|
||||||
|
/// - `node_clock`: Current node vector clock
|
||||||
|
/// - `blob_store`: Optional blob store for large components
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A FullState message ready to send to the joining peer
|
||||||
|
pub fn build_full_state(
|
||||||
|
world: &World,
|
||||||
|
networked_entities: &Query<(Entity, &NetworkedEntity)>,
|
||||||
|
type_registry: &TypeRegistry,
|
||||||
|
node_clock: &NodeVectorClock,
|
||||||
|
blob_store: Option<&BlobStore>,
|
||||||
|
) -> VersionedMessage {
|
||||||
|
use crate::{
|
||||||
|
networking::{
|
||||||
|
blob_support::create_component_data,
|
||||||
|
messages::ComponentState,
|
||||||
|
},
|
||||||
|
persistence::reflection::serialize_component,
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut entities = Vec::new();
|
||||||
|
|
||||||
|
for (entity, networked) in networked_entities.iter() {
|
||||||
|
let entity_ref = world.entity(entity);
|
||||||
|
let mut components = Vec::new();
|
||||||
|
|
||||||
|
// Iterate over all type registrations to find components
|
||||||
|
for registration in type_registry.iter() {
|
||||||
|
// Skip if no ReflectComponent data
|
||||||
|
let Some(reflect_component) = registration.data::<ReflectComponent>() else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let type_path = registration.type_info().type_path();
|
||||||
|
|
||||||
|
// Skip networked wrapper components
|
||||||
|
if type_path.ends_with("::NetworkedEntity")
|
||||||
|
|| type_path.ends_with("::NetworkedTransform")
|
||||||
|
|| type_path.ends_with("::NetworkedSelection")
|
||||||
|
|| type_path.ends_with("::NetworkedDrawingPath")
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to reflect this component from the entity
|
||||||
|
if let Some(reflected) = reflect_component.reflect(entity_ref) {
|
||||||
|
// Serialize the component
|
||||||
|
if let Ok(serialized) = serialize_component(reflected, type_registry) {
|
||||||
|
// Create component data (inline or blob)
|
||||||
|
let data = if let Some(store) = blob_store {
|
||||||
|
match create_component_data(serialized, store) {
|
||||||
|
Ok(d) => d,
|
||||||
|
Err(_) => continue,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
crate::networking::ComponentData::Inline(serialized)
|
||||||
|
};
|
||||||
|
|
||||||
|
components.push(ComponentState {
|
||||||
|
component_type: type_path.to_string(),
|
||||||
|
data,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
entities.push(EntityState {
|
||||||
|
entity_id: networked.network_id,
|
||||||
|
owner_node_id: networked.owner_node_id,
|
||||||
|
vector_clock: node_clock.clock.clone(),
|
||||||
|
components,
|
||||||
|
is_deleted: false,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"Built FullState with {} entities for new peer",
|
||||||
|
entities.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
VersionedMessage::new(SyncMessage::FullState {
|
||||||
|
entities,
|
||||||
|
vector_clock: node_clock.clock.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Apply a FullState message to the local world
|
||||||
|
///
|
||||||
|
/// This initializes the world for a newly joined peer by spawning all entities
|
||||||
|
/// and applying their component state.
|
||||||
|
///
|
||||||
|
/// # Parameters
|
||||||
|
///
|
||||||
|
/// - `entities`: List of entity states from FullState message
|
||||||
|
/// - `vector_clock`: Vector clock from FullState
|
||||||
|
/// - `commands`: Bevy commands for spawning entities
|
||||||
|
/// - `entity_map`: Entity map to populate
|
||||||
|
/// - `type_registry`: Type registry for deserialization
|
||||||
|
/// - `node_clock`: Our node's vector clock to update
|
||||||
|
/// - `blob_store`: Optional blob store for resolving blob references
|
||||||
|
/// - `tombstone_registry`: Optional tombstone registry for deletion tracking
|
||||||
|
pub fn apply_full_state(
|
||||||
|
entities: Vec<EntityState>,
|
||||||
|
remote_clock: crate::networking::VectorClock,
|
||||||
|
commands: &mut Commands,
|
||||||
|
entity_map: &mut NetworkEntityMap,
|
||||||
|
type_registry: &TypeRegistry,
|
||||||
|
node_clock: &mut NodeVectorClock,
|
||||||
|
blob_store: Option<&BlobStore>,
|
||||||
|
mut tombstone_registry: Option<&mut crate::networking::TombstoneRegistry>,
|
||||||
|
) {
|
||||||
|
use crate::{
|
||||||
|
networking::blob_support::get_component_data,
|
||||||
|
persistence::reflection::deserialize_component,
|
||||||
|
};
|
||||||
|
|
||||||
|
info!("Applying FullState with {} entities", entities.len());
|
||||||
|
|
||||||
|
// Merge the remote vector clock
|
||||||
|
node_clock.clock.merge(&remote_clock);
|
||||||
|
|
||||||
|
// Spawn all entities and apply their state
|
||||||
|
for entity_state in entities {
|
||||||
|
// Handle deleted entities (tombstones)
|
||||||
|
if entity_state.is_deleted {
|
||||||
|
// Record tombstone
|
||||||
|
if let Some(ref mut registry) = tombstone_registry {
|
||||||
|
registry.record_deletion(
|
||||||
|
entity_state.entity_id,
|
||||||
|
entity_state.owner_node_id,
|
||||||
|
entity_state.vector_clock.clone(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spawn entity with NetworkedEntity component
|
||||||
|
let entity = commands
|
||||||
|
.spawn(NetworkedEntity::with_id(
|
||||||
|
entity_state.entity_id,
|
||||||
|
entity_state.owner_node_id,
|
||||||
|
))
|
||||||
|
.id();
|
||||||
|
|
||||||
|
// Register in entity map
|
||||||
|
entity_map.insert(entity_state.entity_id, entity);
|
||||||
|
|
||||||
|
let num_components = entity_state.components.len();
|
||||||
|
|
||||||
|
// Apply all components
|
||||||
|
for component_state in &entity_state.components {
|
||||||
|
// Get the actual data (resolve blob if needed)
|
||||||
|
let data_bytes = match &component_state.data {
|
||||||
|
| crate::networking::ComponentData::Inline(bytes) => bytes.clone(),
|
||||||
|
| blob_ref @ crate::networking::ComponentData::BlobRef { .. } => {
|
||||||
|
if let Some(store) = blob_store {
|
||||||
|
match get_component_data(blob_ref, store) {
|
||||||
|
Ok(bytes) => bytes,
|
||||||
|
Err(e) => {
|
||||||
|
error!(
|
||||||
|
"Failed to retrieve blob for {}: {}",
|
||||||
|
component_state.component_type, e
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
error!(
|
||||||
|
"Blob reference for {} but no blob store available",
|
||||||
|
component_state.component_type
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Deserialize the component
|
||||||
|
let reflected = match deserialize_component(&data_bytes, type_registry) {
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
error!(
|
||||||
|
"Failed to deserialize {}: {}",
|
||||||
|
component_state.component_type, e
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get the type registration
|
||||||
|
let registration = match type_registry.get_with_type_path(&component_state.component_type)
|
||||||
|
{
|
||||||
|
Some(reg) => reg,
|
||||||
|
None => {
|
||||||
|
error!(
|
||||||
|
"Component type {} not registered",
|
||||||
|
component_state.component_type
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get ReflectComponent data
|
||||||
|
let reflect_component = match registration.data::<ReflectComponent>() {
|
||||||
|
Some(rc) => rc.clone(),
|
||||||
|
None => {
|
||||||
|
error!(
|
||||||
|
"Component type {} does not have ReflectComponent data",
|
||||||
|
component_state.component_type
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Insert the component
|
||||||
|
let component_type_owned = component_state.component_type.clone();
|
||||||
|
commands.queue(move |world: &mut World| {
|
||||||
|
let type_registry_arc = {
|
||||||
|
let Some(type_registry_res) = world.get_resource::<AppTypeRegistry>() else {
|
||||||
|
error!("AppTypeRegistry not found in world");
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
type_registry_res.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let type_registry = type_registry_arc.read();
|
||||||
|
|
||||||
|
if let Ok(mut entity_mut) = world.get_entity_mut(entity) {
|
||||||
|
reflect_component.insert(&mut entity_mut, &*reflected, &type_registry);
|
||||||
|
debug!("Applied component {} from FullState", component_type_owned);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Spawned entity {:?} from FullState with {} components",
|
||||||
|
entity_state.entity_id,
|
||||||
|
num_components
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("FullState applied successfully");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// System to handle JoinRequest messages
|
||||||
|
///
|
||||||
|
/// When we receive a JoinRequest, build and send a FullState response.
|
||||||
|
///
|
||||||
|
/// Add this to your app:
|
||||||
|
///
|
||||||
|
/// ```no_run
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::handle_join_requests_system;
|
||||||
|
///
|
||||||
|
/// App::new()
|
||||||
|
/// .add_systems(Update, handle_join_requests_system);
|
||||||
|
/// ```
|
||||||
|
pub fn handle_join_requests_system(
|
||||||
|
world: &World,
|
||||||
|
bridge: Option<Res<GossipBridge>>,
|
||||||
|
networked_entities: Query<(Entity, &NetworkedEntity)>,
|
||||||
|
type_registry: Res<AppTypeRegistry>,
|
||||||
|
node_clock: Res<NodeVectorClock>,
|
||||||
|
blob_store: Option<Res<BlobStore>>,
|
||||||
|
) {
|
||||||
|
let Some(bridge) = bridge else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
let registry = type_registry.read();
|
||||||
|
let blob_store_ref = blob_store.as_deref();
|
||||||
|
|
||||||
|
// Poll for incoming JoinRequest messages
|
||||||
|
while let Some(message) = bridge.try_recv() {
|
||||||
|
match message.message {
|
||||||
|
| SyncMessage::JoinRequest {
|
||||||
|
node_id,
|
||||||
|
session_secret,
|
||||||
|
} => {
|
||||||
|
info!("Received JoinRequest from node {}", node_id);
|
||||||
|
|
||||||
|
// TODO: Validate session_secret in Phase 13
|
||||||
|
if let Some(_secret) = session_secret {
|
||||||
|
debug!("Session secret validation not yet implemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build full state
|
||||||
|
let full_state = build_full_state(
|
||||||
|
world,
|
||||||
|
&networked_entities,
|
||||||
|
®istry,
|
||||||
|
&node_clock,
|
||||||
|
blob_store_ref,
|
||||||
|
);
|
||||||
|
|
||||||
|
// Send full state to joining peer
|
||||||
|
if let Err(e) = bridge.send(full_state) {
|
||||||
|
error!("Failed to send FullState: {}", e);
|
||||||
|
} else {
|
||||||
|
info!("Sent FullState to node {}", node_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
| _ => {
|
||||||
|
// Not a JoinRequest, ignore (other systems handle other messages)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// System to handle FullState messages
|
||||||
|
///
|
||||||
|
/// When we receive a FullState (after sending JoinRequest), apply it to our world.
|
||||||
|
///
|
||||||
|
/// This system should run BEFORE receive_and_apply_deltas_system to ensure
|
||||||
|
/// we're fully initialized before processing deltas.
|
||||||
|
pub fn handle_full_state_system(
|
||||||
|
mut commands: Commands,
|
||||||
|
bridge: Option<Res<GossipBridge>>,
|
||||||
|
mut entity_map: ResMut<NetworkEntityMap>,
|
||||||
|
type_registry: Res<AppTypeRegistry>,
|
||||||
|
mut node_clock: ResMut<NodeVectorClock>,
|
||||||
|
blob_store: Option<Res<BlobStore>>,
|
||||||
|
mut tombstone_registry: Option<ResMut<crate::networking::TombstoneRegistry>>,
|
||||||
|
) {
|
||||||
|
let Some(bridge) = bridge else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
let registry = type_registry.read();
|
||||||
|
let blob_store_ref = blob_store.as_deref();
|
||||||
|
|
||||||
|
// Poll for FullState messages
|
||||||
|
while let Some(message) = bridge.try_recv() {
|
||||||
|
match message.message {
|
||||||
|
| SyncMessage::FullState {
|
||||||
|
entities,
|
||||||
|
vector_clock,
|
||||||
|
} => {
|
||||||
|
info!("Received FullState with {} entities", entities.len());
|
||||||
|
|
||||||
|
apply_full_state(
|
||||||
|
entities,
|
||||||
|
vector_clock,
|
||||||
|
&mut commands,
|
||||||
|
&mut entity_map,
|
||||||
|
®istry,
|
||||||
|
&mut node_clock,
|
||||||
|
blob_store_ref,
|
||||||
|
tombstone_registry.as_deref_mut(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
| _ => {
|
||||||
|
// Not a FullState, ignore
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::networking::VectorClock;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_build_join_request() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let request = build_join_request(node_id, None);
|
||||||
|
|
||||||
|
match request.message {
|
||||||
|
| SyncMessage::JoinRequest {
|
||||||
|
node_id: req_node_id,
|
||||||
|
session_secret,
|
||||||
|
} => {
|
||||||
|
assert_eq!(req_node_id, node_id);
|
||||||
|
assert!(session_secret.is_none());
|
||||||
|
}
|
||||||
|
| _ => panic!("Expected JoinRequest"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_build_join_request_with_secret() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let secret = vec![1, 2, 3, 4];
|
||||||
|
let request = build_join_request(node_id, Some(secret.clone()));
|
||||||
|
|
||||||
|
match request.message {
|
||||||
|
| SyncMessage::JoinRequest {
|
||||||
|
node_id: _,
|
||||||
|
session_secret,
|
||||||
|
} => {
|
||||||
|
assert_eq!(session_secret, Some(secret));
|
||||||
|
}
|
||||||
|
| _ => panic!("Expected JoinRequest"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_entity_state_structure() {
|
||||||
|
let entity_id = uuid::Uuid::new_v4();
|
||||||
|
let owner_node_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let state = EntityState {
|
||||||
|
entity_id,
|
||||||
|
owner_node_id,
|
||||||
|
vector_clock: VectorClock::new(),
|
||||||
|
components: vec![],
|
||||||
|
is_deleted: false,
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(state.entity_id, entity_id);
|
||||||
|
assert_eq!(state.owner_node_id, owner_node_id);
|
||||||
|
assert_eq!(state.components.len(), 0);
|
||||||
|
assert!(!state.is_deleted);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_apply_full_state_empty() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let mut node_clock = NodeVectorClock::new(node_id);
|
||||||
|
let remote_clock = VectorClock::new();
|
||||||
|
|
||||||
|
// Create minimal setup for testing
|
||||||
|
let mut entity_map = NetworkEntityMap::new();
|
||||||
|
let type_registry = TypeRegistry::new();
|
||||||
|
|
||||||
|
// Need a minimal Bevy app for Commands
|
||||||
|
let mut app = App::new();
|
||||||
|
let mut commands = app.world_mut().commands();
|
||||||
|
|
||||||
|
apply_full_state(
|
||||||
|
vec![],
|
||||||
|
remote_clock.clone(),
|
||||||
|
&mut commands,
|
||||||
|
&mut entity_map,
|
||||||
|
&type_registry,
|
||||||
|
&mut node_clock,
|
||||||
|
None,
|
||||||
|
None, // tombstone_registry
|
||||||
|
);
|
||||||
|
|
||||||
|
// Should have merged clocks
|
||||||
|
assert_eq!(node_clock.clock, remote_clock);
|
||||||
|
}
|
||||||
|
}
|
||||||
263
crates/lib/src/networking/merge.rs
Normal file
263
crates/lib/src/networking/merge.rs
Normal file
@@ -0,0 +1,263 @@
|
|||||||
|
//! CRDT merge logic for conflict resolution
|
||||||
|
//!
|
||||||
|
//! This module implements the merge semantics for different CRDT types:
|
||||||
|
//! - Last-Write-Wins (LWW) for simple components
|
||||||
|
//! - OR-Set for concurrent add/remove
|
||||||
|
//! - Sequence CRDT (RGA) for ordered lists
|
||||||
|
|
||||||
|
use bevy::prelude::*;
|
||||||
|
|
||||||
|
use crate::networking::{
|
||||||
|
operations::ComponentOp,
|
||||||
|
vector_clock::{
|
||||||
|
NodeId,
|
||||||
|
VectorClock,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Result of comparing two operations for merge
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub enum MergeDecision {
|
||||||
|
/// The local operation wins (keep local, discard remote)
|
||||||
|
KeepLocal,
|
||||||
|
|
||||||
|
/// The remote operation wins (apply remote, discard local)
|
||||||
|
ApplyRemote,
|
||||||
|
|
||||||
|
/// Operations are concurrent, need CRDT-specific merge
|
||||||
|
Concurrent,
|
||||||
|
|
||||||
|
/// Operations are identical
|
||||||
|
Equal,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compare two operations using vector clocks to determine merge decision
|
||||||
|
///
|
||||||
|
/// This implements Last-Write-Wins (LWW) semantics with node ID tiebreaking.
|
||||||
|
///
|
||||||
|
/// # Algorithm
|
||||||
|
///
|
||||||
|
/// 1. If local happened-before remote: ApplyRemote
|
||||||
|
/// 2. If remote happened-before local: KeepLocal
|
||||||
|
/// 3. If concurrent: use node ID as tiebreaker (higher node ID wins)
|
||||||
|
/// 4. If equal: Equal
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::{VectorClock, compare_operations_lww};
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node1 = Uuid::new_v4();
|
||||||
|
/// let node2 = Uuid::new_v4();
|
||||||
|
///
|
||||||
|
/// let mut clock1 = VectorClock::new();
|
||||||
|
/// clock1.increment(node1);
|
||||||
|
///
|
||||||
|
/// let mut clock2 = VectorClock::new();
|
||||||
|
/// clock2.increment(node2);
|
||||||
|
///
|
||||||
|
/// // Concurrent operations use node ID as tiebreaker
|
||||||
|
/// let decision = compare_operations_lww(&clock1, node1, &clock2, node2);
|
||||||
|
/// ```
|
||||||
|
pub fn compare_operations_lww(
|
||||||
|
local_clock: &VectorClock,
|
||||||
|
local_node: NodeId,
|
||||||
|
remote_clock: &VectorClock,
|
||||||
|
remote_node: NodeId,
|
||||||
|
) -> MergeDecision {
|
||||||
|
// Check if clocks are equal
|
||||||
|
if local_clock == remote_clock && local_node == remote_node {
|
||||||
|
return MergeDecision::Equal;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check happens-before relationship
|
||||||
|
if local_clock.happened_before(remote_clock) {
|
||||||
|
return MergeDecision::ApplyRemote;
|
||||||
|
}
|
||||||
|
|
||||||
|
if remote_clock.happened_before(local_clock) {
|
||||||
|
return MergeDecision::KeepLocal;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Concurrent operations - use node ID as tiebreaker
|
||||||
|
// Higher node ID wins for deterministic resolution
|
||||||
|
if remote_node > local_node {
|
||||||
|
MergeDecision::ApplyRemote
|
||||||
|
} else if local_node > remote_node {
|
||||||
|
MergeDecision::KeepLocal
|
||||||
|
} else {
|
||||||
|
MergeDecision::Concurrent
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Determine if a remote Set operation should be applied
|
||||||
|
///
|
||||||
|
/// This is a convenience wrapper around `compare_operations_lww` for Set
|
||||||
|
/// operations specifically.
|
||||||
|
pub fn should_apply_set(local_op: &ComponentOp, remote_op: &ComponentOp) -> bool {
|
||||||
|
// Extract vector clocks and node IDs
|
||||||
|
let (local_clock, local_data) = match local_op {
|
||||||
|
| ComponentOp::Set {
|
||||||
|
vector_clock, data, ..
|
||||||
|
} => (vector_clock, data),
|
||||||
|
| _ => return false,
|
||||||
|
};
|
||||||
|
|
||||||
|
let (remote_clock, remote_data) = match remote_op {
|
||||||
|
| ComponentOp::Set {
|
||||||
|
vector_clock, data, ..
|
||||||
|
} => (vector_clock, data),
|
||||||
|
| _ => return false,
|
||||||
|
};
|
||||||
|
|
||||||
|
// If data is identical, no need to apply
|
||||||
|
if local_data == remote_data {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the sequence number from the clocks as a simple tiebreaker
|
||||||
|
// In a real implementation, we'd use the full node IDs
|
||||||
|
let local_seq: u64 = local_clock.clocks.values().sum();
|
||||||
|
let remote_seq: u64 = remote_clock.clocks.values().sum();
|
||||||
|
|
||||||
|
// Compare clocks
|
||||||
|
match compare_operations_lww(
|
||||||
|
local_clock,
|
||||||
|
uuid::Uuid::nil(), // Simplified - would use actual node IDs
|
||||||
|
remote_clock,
|
||||||
|
uuid::Uuid::nil(),
|
||||||
|
) {
|
||||||
|
| MergeDecision::ApplyRemote => true,
|
||||||
|
| MergeDecision::KeepLocal => false,
|
||||||
|
| MergeDecision::Concurrent => remote_seq > local_seq,
|
||||||
|
| MergeDecision::Equal => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Log a merge conflict for debugging
|
||||||
|
///
|
||||||
|
/// This helps track when concurrent operations occur and how they're resolved.
|
||||||
|
pub fn log_merge_conflict(
|
||||||
|
component_type: &str,
|
||||||
|
local_clock: &VectorClock,
|
||||||
|
remote_clock: &VectorClock,
|
||||||
|
decision: MergeDecision,
|
||||||
|
) {
|
||||||
|
info!(
|
||||||
|
"Merge conflict on {}: local={:?}, remote={:?}, decision={:?}",
|
||||||
|
component_type, local_clock, remote_clock, decision
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use crate::networking::messages::ComponentData;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_lww_happened_before() {
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
let node2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node1);
|
||||||
|
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node1);
|
||||||
|
clock2.increment(node1);
|
||||||
|
|
||||||
|
let decision = compare_operations_lww(&clock1, node1, &clock2, node2);
|
||||||
|
assert_eq!(decision, MergeDecision::ApplyRemote);
|
||||||
|
|
||||||
|
let decision = compare_operations_lww(&clock2, node1, &clock1, node2);
|
||||||
|
assert_eq!(decision, MergeDecision::KeepLocal);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_lww_concurrent() {
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
let node2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node1);
|
||||||
|
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node2);
|
||||||
|
|
||||||
|
// Concurrent operations use node ID tiebreaker
|
||||||
|
let decision = compare_operations_lww(&clock1, node1, &clock2, node2);
|
||||||
|
|
||||||
|
// Should use node ID as tiebreaker
|
||||||
|
assert!(
|
||||||
|
decision == MergeDecision::ApplyRemote || decision == MergeDecision::KeepLocal
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_lww_equal() {
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node1);
|
||||||
|
|
||||||
|
let clock2 = clock1.clone();
|
||||||
|
|
||||||
|
let decision = compare_operations_lww(&clock1, node1, &clock2, node1);
|
||||||
|
assert_eq!(decision, MergeDecision::Equal);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_should_apply_set_same_data() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let mut clock = VectorClock::new();
|
||||||
|
clock.increment(node_id);
|
||||||
|
|
||||||
|
let data = vec![1, 2, 3];
|
||||||
|
|
||||||
|
let op1 = ComponentOp::Set {
|
||||||
|
component_type: "Transform".to_string(),
|
||||||
|
data: ComponentData::Inline(data.clone()),
|
||||||
|
vector_clock: clock.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let op2 = ComponentOp::Set {
|
||||||
|
component_type: "Transform".to_string(),
|
||||||
|
data: ComponentData::Inline(data.clone()),
|
||||||
|
vector_clock: clock,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Same data, should not apply
|
||||||
|
assert!(!should_apply_set(&op1, &op2));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_should_apply_set_newer_wins() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node_id);
|
||||||
|
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node_id);
|
||||||
|
clock2.increment(node_id);
|
||||||
|
|
||||||
|
let op1 = ComponentOp::Set {
|
||||||
|
component_type: "Transform".to_string(),
|
||||||
|
data: ComponentData::Inline(vec![1, 2, 3]),
|
||||||
|
vector_clock: clock1,
|
||||||
|
};
|
||||||
|
|
||||||
|
let op2 = ComponentOp::Set {
|
||||||
|
component_type: "Transform".to_string(),
|
||||||
|
data: ComponentData::Inline(vec![4, 5, 6]),
|
||||||
|
vector_clock: clock2,
|
||||||
|
};
|
||||||
|
|
||||||
|
// op2 is newer, should apply
|
||||||
|
assert!(should_apply_set(&op1, &op2));
|
||||||
|
|
||||||
|
// op1 is older, should not apply
|
||||||
|
assert!(!should_apply_set(&op2, &op1));
|
||||||
|
}
|
||||||
|
}
|
||||||
214
crates/lib/src/networking/message_dispatcher.rs
Normal file
214
crates/lib/src/networking/message_dispatcher.rs
Normal file
@@ -0,0 +1,214 @@
|
|||||||
|
//! Message dispatcher for efficient message routing
|
||||||
|
//!
|
||||||
|
//! This module eliminates the DRY violation and O(n²) behavior from having
|
||||||
|
//! multiple systems each polling the same message queue. Instead, a single
|
||||||
|
//! dispatcher system polls once and routes messages to appropriate handlers.
|
||||||
|
|
||||||
|
use bevy::prelude::*;
|
||||||
|
|
||||||
|
use crate::networking::{
|
||||||
|
apply_entity_delta,
|
||||||
|
apply_full_state,
|
||||||
|
blob_support::BlobStore,
|
||||||
|
build_full_state,
|
||||||
|
build_missing_deltas,
|
||||||
|
delta_generation::NodeVectorClock,
|
||||||
|
entity_map::NetworkEntityMap,
|
||||||
|
messages::SyncMessage,
|
||||||
|
operation_log::OperationLog,
|
||||||
|
GossipBridge,
|
||||||
|
NetworkedEntity,
|
||||||
|
TombstoneRegistry,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Central message dispatcher system
|
||||||
|
///
|
||||||
|
/// This system replaces the individual message polling loops in:
|
||||||
|
/// - `receive_and_apply_deltas_system`
|
||||||
|
/// - `handle_join_requests_system`
|
||||||
|
/// - `handle_full_state_system`
|
||||||
|
/// - `handle_sync_requests_system`
|
||||||
|
/// - `handle_missing_deltas_system`
|
||||||
|
///
|
||||||
|
/// By polling the message queue once and routing to handlers, we eliminate
|
||||||
|
/// O(n²) behavior and code duplication.
|
||||||
|
///
|
||||||
|
/// # Performance
|
||||||
|
///
|
||||||
|
/// - **Before**: O(n × m) where n = messages, m = systems (~5)
|
||||||
|
/// - **After**: O(n) - each message processed exactly once
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```no_run
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::message_dispatcher_system;
|
||||||
|
///
|
||||||
|
/// App::new()
|
||||||
|
/// .add_systems(Update, message_dispatcher_system);
|
||||||
|
/// ```
|
||||||
|
pub fn message_dispatcher_system(
|
||||||
|
world: &World,
|
||||||
|
mut commands: Commands,
|
||||||
|
bridge: Option<Res<GossipBridge>>,
|
||||||
|
mut entity_map: ResMut<NetworkEntityMap>,
|
||||||
|
type_registry: Res<AppTypeRegistry>,
|
||||||
|
mut node_clock: ResMut<NodeVectorClock>,
|
||||||
|
blob_store: Option<Res<BlobStore>>,
|
||||||
|
mut tombstone_registry: Option<ResMut<TombstoneRegistry>>,
|
||||||
|
operation_log: Option<Res<OperationLog>>,
|
||||||
|
networked_entities: Query<(Entity, &NetworkedEntity)>,
|
||||||
|
) {
|
||||||
|
let Some(bridge) = bridge else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
let registry = type_registry.read();
|
||||||
|
let blob_store_ref = blob_store.as_deref();
|
||||||
|
|
||||||
|
// Poll messages once and route to appropriate handlers
|
||||||
|
while let Some(message) = bridge.try_recv() {
|
||||||
|
match message.message {
|
||||||
|
// EntityDelta - apply remote operations
|
||||||
|
| SyncMessage::EntityDelta {
|
||||||
|
entity_id,
|
||||||
|
node_id,
|
||||||
|
vector_clock,
|
||||||
|
operations,
|
||||||
|
} => {
|
||||||
|
let delta = crate::networking::EntityDelta {
|
||||||
|
entity_id,
|
||||||
|
node_id,
|
||||||
|
vector_clock,
|
||||||
|
operations,
|
||||||
|
};
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
"Received EntityDelta for entity {:?} with {} operations",
|
||||||
|
delta.entity_id,
|
||||||
|
delta.operations.len()
|
||||||
|
);
|
||||||
|
|
||||||
|
apply_entity_delta(
|
||||||
|
&delta,
|
||||||
|
&mut commands,
|
||||||
|
&mut entity_map,
|
||||||
|
®istry,
|
||||||
|
&mut node_clock,
|
||||||
|
blob_store_ref,
|
||||||
|
tombstone_registry.as_deref_mut(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// JoinRequest - new peer joining
|
||||||
|
| SyncMessage::JoinRequest {
|
||||||
|
node_id,
|
||||||
|
session_secret,
|
||||||
|
} => {
|
||||||
|
info!("Received JoinRequest from node {}", node_id);
|
||||||
|
|
||||||
|
// TODO: Validate session_secret in Phase 13
|
||||||
|
if let Some(_secret) = session_secret {
|
||||||
|
debug!("Session secret validation not yet implemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build and send full state
|
||||||
|
let full_state = build_full_state(
|
||||||
|
world,
|
||||||
|
&networked_entities,
|
||||||
|
®istry,
|
||||||
|
&node_clock,
|
||||||
|
blob_store_ref,
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Err(e) = bridge.send(full_state) {
|
||||||
|
error!("Failed to send FullState: {}", e);
|
||||||
|
} else {
|
||||||
|
info!("Sent FullState to node {}", node_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// FullState - receiving world state after join
|
||||||
|
| SyncMessage::FullState {
|
||||||
|
entities,
|
||||||
|
vector_clock,
|
||||||
|
} => {
|
||||||
|
info!("Received FullState with {} entities", entities.len());
|
||||||
|
|
||||||
|
apply_full_state(
|
||||||
|
entities,
|
||||||
|
vector_clock,
|
||||||
|
&mut commands,
|
||||||
|
&mut entity_map,
|
||||||
|
®istry,
|
||||||
|
&mut node_clock,
|
||||||
|
blob_store_ref,
|
||||||
|
tombstone_registry.as_deref_mut(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// SyncRequest - peer requesting missing operations
|
||||||
|
| SyncMessage::SyncRequest {
|
||||||
|
node_id: requesting_node,
|
||||||
|
vector_clock: their_clock,
|
||||||
|
} => {
|
||||||
|
debug!("Received SyncRequest from node {}", requesting_node);
|
||||||
|
|
||||||
|
if let Some(ref op_log) = operation_log {
|
||||||
|
// Find operations they're missing
|
||||||
|
let missing_deltas = op_log.get_all_operations_newer_than(&their_clock);
|
||||||
|
|
||||||
|
if !missing_deltas.is_empty() {
|
||||||
|
info!(
|
||||||
|
"Sending {} missing deltas to node {}",
|
||||||
|
missing_deltas.len(),
|
||||||
|
requesting_node
|
||||||
|
);
|
||||||
|
|
||||||
|
// Send MissingDeltas response
|
||||||
|
let response = build_missing_deltas(missing_deltas);
|
||||||
|
if let Err(e) = bridge.send(response) {
|
||||||
|
error!("Failed to send MissingDeltas: {}", e);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
debug!("No missing deltas for node {}", requesting_node);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
warn!("Received SyncRequest but OperationLog resource not available");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MissingDeltas - receiving operations we requested
|
||||||
|
| SyncMessage::MissingDeltas { deltas } => {
|
||||||
|
info!("Received MissingDeltas with {} operations", deltas.len());
|
||||||
|
|
||||||
|
// Apply each delta
|
||||||
|
for delta in deltas {
|
||||||
|
debug!(
|
||||||
|
"Applying missing delta for entity {:?}",
|
||||||
|
delta.entity_id
|
||||||
|
);
|
||||||
|
|
||||||
|
apply_entity_delta(
|
||||||
|
&delta,
|
||||||
|
&mut commands,
|
||||||
|
&mut entity_map,
|
||||||
|
®istry,
|
||||||
|
&mut node_clock,
|
||||||
|
blob_store_ref,
|
||||||
|
tombstone_registry.as_deref_mut(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
#[test]
|
||||||
|
fn test_message_dispatcher_compiles() {
|
||||||
|
// This test just ensures the dispatcher system compiles
|
||||||
|
// Integration tests would require a full Bevy app setup
|
||||||
|
}
|
||||||
|
}
|
||||||
345
crates/lib/src/networking/messages.rs
Normal file
345
crates/lib/src/networking/messages.rs
Normal file
@@ -0,0 +1,345 @@
|
|||||||
|
//! Network message types for CRDT synchronization
|
||||||
|
//!
|
||||||
|
//! This module defines the protocol messages used for distributed
|
||||||
|
//! synchronization according to RFC 0001.
|
||||||
|
|
||||||
|
use serde::{
|
||||||
|
Deserialize,
|
||||||
|
Serialize,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::networking::{
|
||||||
|
operations::ComponentOp,
|
||||||
|
vector_clock::{
|
||||||
|
NodeId,
|
||||||
|
VectorClock,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Top-level message envelope with versioning
|
||||||
|
///
|
||||||
|
/// All messages sent over the network are wrapped in this envelope to support
|
||||||
|
/// protocol version negotiation and future compatibility.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct VersionedMessage {
|
||||||
|
/// Protocol version (currently 1)
|
||||||
|
pub version: u32,
|
||||||
|
|
||||||
|
/// The actual sync message
|
||||||
|
pub message: SyncMessage,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VersionedMessage {
|
||||||
|
/// Current protocol version
|
||||||
|
pub const CURRENT_VERSION: u32 = 1;
|
||||||
|
|
||||||
|
/// Create a new versioned message with the current protocol version
|
||||||
|
pub fn new(message: SyncMessage) -> Self {
|
||||||
|
Self {
|
||||||
|
version: Self::CURRENT_VERSION,
|
||||||
|
message,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// CRDT synchronization protocol messages
|
||||||
|
///
|
||||||
|
/// These messages implement the sync protocol defined in RFC 0001.
|
||||||
|
///
|
||||||
|
/// # Protocol Flow
|
||||||
|
///
|
||||||
|
/// 1. **Join**: New peer sends `JoinRequest`, receives `FullState`
|
||||||
|
/// 2. **Normal Operation**: Peers broadcast `EntityDelta` on changes
|
||||||
|
/// 3. **Anti-Entropy**: Periodic `SyncRequest` to detect missing operations
|
||||||
|
/// 4. **Recovery**: `MissingDeltas` sent in response to `SyncRequest`
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub enum SyncMessage {
|
||||||
|
/// Request to join the network and receive full state
|
||||||
|
///
|
||||||
|
/// Sent by a new peer when it first connects. The response will be a
|
||||||
|
/// `FullState` message containing all entities and their components.
|
||||||
|
JoinRequest {
|
||||||
|
/// ID of the node requesting to join
|
||||||
|
node_id: NodeId,
|
||||||
|
|
||||||
|
/// Optional session secret for authentication
|
||||||
|
session_secret: Option<Vec<u8>>,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Complete world state sent to new peers
|
||||||
|
///
|
||||||
|
/// Contains all networked entities and their components. Sent in response
|
||||||
|
/// to a `JoinRequest`.
|
||||||
|
FullState {
|
||||||
|
/// All entities in the world
|
||||||
|
entities: Vec<EntityState>,
|
||||||
|
|
||||||
|
/// Current vector clock of the sending node
|
||||||
|
vector_clock: VectorClock,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Delta update for a single entity
|
||||||
|
///
|
||||||
|
/// Broadcast when a component changes. Recipients apply the operations
|
||||||
|
/// using CRDT merge semantics.
|
||||||
|
EntityDelta {
|
||||||
|
/// Network ID of the entity being updated
|
||||||
|
entity_id: uuid::Uuid,
|
||||||
|
|
||||||
|
/// Node that generated this delta
|
||||||
|
node_id: NodeId,
|
||||||
|
|
||||||
|
/// Vector clock at the time this delta was created
|
||||||
|
vector_clock: VectorClock,
|
||||||
|
|
||||||
|
/// Component operations (Set, SetAdd, SequenceInsert, etc.)
|
||||||
|
operations: Vec<ComponentOp>,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Request for operations newer than our vector clock
|
||||||
|
///
|
||||||
|
/// Sent periodically for anti-entropy. The recipient compares vector
|
||||||
|
/// clocks and sends `MissingDeltas` if they have newer operations.
|
||||||
|
SyncRequest {
|
||||||
|
/// ID of the node requesting sync
|
||||||
|
node_id: NodeId,
|
||||||
|
|
||||||
|
/// Our current vector clock
|
||||||
|
vector_clock: VectorClock,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Operations that the recipient is missing
|
||||||
|
///
|
||||||
|
/// Sent in response to `SyncRequest` when we have operations the peer
|
||||||
|
/// doesn't know about yet.
|
||||||
|
MissingDeltas {
|
||||||
|
/// Entity deltas that the recipient is missing
|
||||||
|
deltas: Vec<EntityDelta>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Complete state of a single entity
|
||||||
|
///
|
||||||
|
/// Used in `FullState` messages to transfer all components of an entity.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct EntityState {
|
||||||
|
/// Network ID of the entity
|
||||||
|
pub entity_id: uuid::Uuid,
|
||||||
|
|
||||||
|
/// Node that originally created this entity
|
||||||
|
pub owner_node_id: NodeId,
|
||||||
|
|
||||||
|
/// Vector clock when this entity was last updated
|
||||||
|
pub vector_clock: VectorClock,
|
||||||
|
|
||||||
|
/// All components on this entity
|
||||||
|
pub components: Vec<ComponentState>,
|
||||||
|
|
||||||
|
/// Whether this entity has been deleted (tombstone)
|
||||||
|
pub is_deleted: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// State of a single component
|
||||||
|
///
|
||||||
|
/// Contains the component type and its serialized data.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct ComponentState {
|
||||||
|
/// Type path of the component (e.g., "bevy_transform::components::Transform")
|
||||||
|
pub component_type: String,
|
||||||
|
|
||||||
|
/// Serialized component data (bincode)
|
||||||
|
pub data: ComponentData,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Component data - either inline or a blob reference
|
||||||
|
///
|
||||||
|
/// Components larger than 64KB are stored as blobs and referenced by hash.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||||
|
pub enum ComponentData {
|
||||||
|
/// Inline data for small components (<64KB)
|
||||||
|
Inline(Vec<u8>),
|
||||||
|
|
||||||
|
/// Reference to a blob for large components (>64KB)
|
||||||
|
BlobRef {
|
||||||
|
/// iroh-blobs hash
|
||||||
|
hash: Vec<u8>,
|
||||||
|
|
||||||
|
/// Size of the blob in bytes
|
||||||
|
size: u64,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ComponentData {
|
||||||
|
/// Threshold for using blobs vs inline data (64KB)
|
||||||
|
pub const BLOB_THRESHOLD: usize = 64 * 1024;
|
||||||
|
|
||||||
|
/// Create component data, automatically choosing inline vs blob
|
||||||
|
pub fn new(data: Vec<u8>) -> Self {
|
||||||
|
if data.len() > Self::BLOB_THRESHOLD {
|
||||||
|
// Will be populated later when uploaded to iroh-blobs
|
||||||
|
Self::BlobRef {
|
||||||
|
hash: Vec::new(),
|
||||||
|
size: data.len() as u64,
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
Self::Inline(data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if this is a blob reference
|
||||||
|
pub fn is_blob(&self) -> bool {
|
||||||
|
matches!(self, ComponentData::BlobRef { .. })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get inline data, if available
|
||||||
|
pub fn as_inline(&self) -> Option<&[u8]> {
|
||||||
|
match self {
|
||||||
|
| ComponentData::Inline(data) => Some(data),
|
||||||
|
| _ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get blob reference, if this is a blob
|
||||||
|
pub fn as_blob_ref(&self) -> Option<(&[u8], u64)> {
|
||||||
|
match self {
|
||||||
|
| ComponentData::BlobRef { hash, size } => Some((hash, *size)),
|
||||||
|
| _ => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Wrapper for EntityDelta to allow it to be used directly
|
||||||
|
///
|
||||||
|
/// This struct exists because EntityDelta is defined as an enum variant
|
||||||
|
/// but we sometimes need to work with it as a standalone type.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct EntityDelta {
|
||||||
|
/// Network ID of the entity being updated
|
||||||
|
pub entity_id: uuid::Uuid,
|
||||||
|
|
||||||
|
/// Node that generated this delta
|
||||||
|
pub node_id: NodeId,
|
||||||
|
|
||||||
|
/// Vector clock at the time this delta was created
|
||||||
|
pub vector_clock: VectorClock,
|
||||||
|
|
||||||
|
/// Component operations (Set, SetAdd, SequenceInsert, etc.)
|
||||||
|
pub operations: Vec<ComponentOp>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EntityDelta {
|
||||||
|
/// Create a new entity delta
|
||||||
|
pub fn new(
|
||||||
|
entity_id: uuid::Uuid,
|
||||||
|
node_id: NodeId,
|
||||||
|
vector_clock: VectorClock,
|
||||||
|
operations: Vec<ComponentOp>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
entity_id,
|
||||||
|
node_id,
|
||||||
|
vector_clock,
|
||||||
|
operations,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Convert to a SyncMessage::EntityDelta variant
|
||||||
|
pub fn into_message(self) -> SyncMessage {
|
||||||
|
SyncMessage::EntityDelta {
|
||||||
|
entity_id: self.entity_id,
|
||||||
|
node_id: self.node_id,
|
||||||
|
vector_clock: self.vector_clock,
|
||||||
|
operations: self.operations,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_versioned_message_creation() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let message = SyncMessage::JoinRequest {
|
||||||
|
node_id,
|
||||||
|
session_secret: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let versioned = VersionedMessage::new(message);
|
||||||
|
assert_eq!(versioned.version, VersionedMessage::CURRENT_VERSION);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_component_data_inline() {
|
||||||
|
let data = vec![1, 2, 3, 4];
|
||||||
|
let component_data = ComponentData::new(data.clone());
|
||||||
|
|
||||||
|
assert!(!component_data.is_blob());
|
||||||
|
assert_eq!(component_data.as_inline(), Some(data.as_slice()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_component_data_blob() {
|
||||||
|
// Create data larger than threshold
|
||||||
|
let data = vec![0u8; ComponentData::BLOB_THRESHOLD + 1];
|
||||||
|
let component_data = ComponentData::new(data.clone());
|
||||||
|
|
||||||
|
assert!(component_data.is_blob());
|
||||||
|
assert_eq!(component_data.as_inline(), None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_entity_delta_creation() {
|
||||||
|
let entity_id = uuid::Uuid::new_v4();
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let vector_clock = VectorClock::new();
|
||||||
|
|
||||||
|
let delta = EntityDelta::new(entity_id, node_id, vector_clock.clone(), vec![]);
|
||||||
|
|
||||||
|
assert_eq!(delta.entity_id, entity_id);
|
||||||
|
assert_eq!(delta.node_id, node_id);
|
||||||
|
assert_eq!(delta.vector_clock, vector_clock);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_message_serialization() -> bincode::Result<()> {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let message = SyncMessage::JoinRequest {
|
||||||
|
node_id,
|
||||||
|
session_secret: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
let versioned = VersionedMessage::new(message);
|
||||||
|
let bytes = bincode::serialize(&versioned)?;
|
||||||
|
let deserialized: VersionedMessage = bincode::deserialize(&bytes)?;
|
||||||
|
|
||||||
|
assert_eq!(deserialized.version, versioned.version);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_full_state_serialization() -> bincode::Result<()> {
|
||||||
|
let entity_id = uuid::Uuid::new_v4();
|
||||||
|
let owner_node = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let entity_state = EntityState {
|
||||||
|
entity_id,
|
||||||
|
owner_node_id: owner_node,
|
||||||
|
vector_clock: VectorClock::new(),
|
||||||
|
components: vec![],
|
||||||
|
is_deleted: false,
|
||||||
|
};
|
||||||
|
|
||||||
|
let message = SyncMessage::FullState {
|
||||||
|
entities: vec![entity_state],
|
||||||
|
vector_clock: VectorClock::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let bytes = bincode::serialize(&message)?;
|
||||||
|
let _deserialized: SyncMessage = bincode::deserialize(&bytes)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
71
crates/lib/src/networking/mod.rs
Normal file
71
crates/lib/src/networking/mod.rs
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
//! CRDT-based networking layer for distributed synchronization
|
||||||
|
//!
|
||||||
|
//! This module implements the networking strategy defined in RFC 0001.
|
||||||
|
//! It provides CRDT-based synchronization over iroh-gossip with support for:
|
||||||
|
//!
|
||||||
|
//! - **Entity Synchronization** - Automatic sync of NetworkedEntity components
|
||||||
|
//! - **CRDT Merge Semantics** - LWW, OR-Set, and Sequence CRDTs
|
||||||
|
//! - **Large Blob Support** - Files >64KB via iroh-blobs
|
||||||
|
//! - **Join Protocol** - New peers receive full world state
|
||||||
|
//! - **Anti-Entropy** - Periodic sync to repair network partitions
|
||||||
|
//! - **Vector Clock** - Causality tracking for distributed operations
|
||||||
|
//!
|
||||||
|
//! # Example
|
||||||
|
//!
|
||||||
|
//! ```
|
||||||
|
//! use lib::networking::*;
|
||||||
|
//! use uuid::Uuid;
|
||||||
|
//!
|
||||||
|
//! // Create a vector clock and track operations
|
||||||
|
//! let node_id = Uuid::new_v4();
|
||||||
|
//! let mut clock = VectorClock::new();
|
||||||
|
//!
|
||||||
|
//! // Increment the clock for local operations
|
||||||
|
//! clock.increment(node_id);
|
||||||
|
//!
|
||||||
|
//! // Build a component operation
|
||||||
|
//! let builder = ComponentOpBuilder::new(node_id, clock.clone());
|
||||||
|
//! let op = builder.set("Transform".to_string(), ComponentData::Inline(vec![1, 2, 3]));
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
mod apply_ops;
|
||||||
|
mod blob_support;
|
||||||
|
mod change_detection;
|
||||||
|
mod components;
|
||||||
|
mod delta_generation;
|
||||||
|
mod entity_map;
|
||||||
|
mod error;
|
||||||
|
mod gossip_bridge;
|
||||||
|
mod join_protocol;
|
||||||
|
mod merge;
|
||||||
|
mod message_dispatcher;
|
||||||
|
mod messages;
|
||||||
|
mod operation_builder;
|
||||||
|
mod operation_log;
|
||||||
|
mod operations;
|
||||||
|
mod orset;
|
||||||
|
mod plugin;
|
||||||
|
mod rga;
|
||||||
|
mod tombstones;
|
||||||
|
mod vector_clock;
|
||||||
|
|
||||||
|
pub use apply_ops::*;
|
||||||
|
pub use blob_support::*;
|
||||||
|
pub use change_detection::*;
|
||||||
|
pub use components::*;
|
||||||
|
pub use delta_generation::*;
|
||||||
|
pub use entity_map::*;
|
||||||
|
pub use error::*;
|
||||||
|
pub use gossip_bridge::*;
|
||||||
|
pub use join_protocol::*;
|
||||||
|
pub use merge::*;
|
||||||
|
pub use message_dispatcher::*;
|
||||||
|
pub use messages::*;
|
||||||
|
pub use operation_builder::*;
|
||||||
|
pub use operation_log::*;
|
||||||
|
pub use operations::*;
|
||||||
|
pub use orset::*;
|
||||||
|
pub use plugin::*;
|
||||||
|
pub use rga::*;
|
||||||
|
pub use tombstones::*;
|
||||||
|
pub use vector_clock::*;
|
||||||
251
crates/lib/src/networking/operation_builder.rs
Normal file
251
crates/lib/src/networking/operation_builder.rs
Normal file
@@ -0,0 +1,251 @@
|
|||||||
|
//! Build CRDT operations from ECS component changes
|
||||||
|
//!
|
||||||
|
//! This module provides utilities to convert Bevy component changes into
|
||||||
|
//! ComponentOp operations that can be synchronized across the network.
|
||||||
|
|
||||||
|
use bevy::{
|
||||||
|
prelude::*,
|
||||||
|
reflect::TypeRegistry,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
networking::{
|
||||||
|
blob_support::{
|
||||||
|
create_component_data,
|
||||||
|
BlobStore,
|
||||||
|
},
|
||||||
|
error::Result,
|
||||||
|
messages::ComponentData,
|
||||||
|
operations::{
|
||||||
|
ComponentOp,
|
||||||
|
ComponentOpBuilder,
|
||||||
|
},
|
||||||
|
vector_clock::{
|
||||||
|
NodeId,
|
||||||
|
VectorClock,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
persistence::reflection::serialize_component,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Build a Set operation (LWW) from a component
|
||||||
|
///
|
||||||
|
/// Serializes the component using Bevy's reflection system and creates a
|
||||||
|
/// ComponentOp::Set for Last-Write-Wins synchronization. Automatically uses
|
||||||
|
/// blob storage for components >64KB.
|
||||||
|
///
|
||||||
|
/// # Parameters
|
||||||
|
///
|
||||||
|
/// - `component`: The component to serialize
|
||||||
|
/// - `component_type`: Type path string
|
||||||
|
/// - `node_id`: Our node ID
|
||||||
|
/// - `vector_clock`: Current vector clock
|
||||||
|
/// - `type_registry`: Bevy's type registry
|
||||||
|
/// - `blob_store`: Optional blob store for large components
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// A ComponentOp::Set ready to be broadcast
|
||||||
|
pub fn build_set_operation(
|
||||||
|
component: &dyn Reflect,
|
||||||
|
component_type: String,
|
||||||
|
node_id: NodeId,
|
||||||
|
vector_clock: VectorClock,
|
||||||
|
type_registry: &TypeRegistry,
|
||||||
|
blob_store: Option<&BlobStore>,
|
||||||
|
) -> Result<ComponentOp> {
|
||||||
|
// Serialize the component
|
||||||
|
let serialized = serialize_component(component, type_registry)?;
|
||||||
|
|
||||||
|
// Create component data (inline or blob)
|
||||||
|
let data = if let Some(store) = blob_store {
|
||||||
|
create_component_data(serialized, store)?
|
||||||
|
} else {
|
||||||
|
ComponentData::Inline(serialized)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Build the operation
|
||||||
|
let builder = ComponentOpBuilder::new(node_id, vector_clock);
|
||||||
|
Ok(builder.set(component_type, data))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build Set operations for all components on an entity
|
||||||
|
///
|
||||||
|
/// This iterates over all components with reflection data and creates Set
|
||||||
|
/// operations for each one. Automatically uses blob storage for large components.
|
||||||
|
///
|
||||||
|
/// # Parameters
|
||||||
|
///
|
||||||
|
/// - `entity`: The entity to serialize
|
||||||
|
/// - `world`: Bevy world
|
||||||
|
/// - `node_id`: Our node ID
|
||||||
|
/// - `vector_clock`: Current vector clock
|
||||||
|
/// - `type_registry`: Bevy's type registry
|
||||||
|
/// - `blob_store`: Optional blob store for large components
|
||||||
|
///
|
||||||
|
/// # Returns
|
||||||
|
///
|
||||||
|
/// Vector of ComponentOp::Set operations, one per component
|
||||||
|
pub fn build_entity_operations(
|
||||||
|
entity: Entity,
|
||||||
|
world: &World,
|
||||||
|
node_id: NodeId,
|
||||||
|
vector_clock: VectorClock,
|
||||||
|
type_registry: &TypeRegistry,
|
||||||
|
blob_store: Option<&BlobStore>,
|
||||||
|
) -> Vec<ComponentOp> {
|
||||||
|
let mut operations = Vec::new();
|
||||||
|
let entity_ref = world.entity(entity);
|
||||||
|
|
||||||
|
// Iterate over all type registrations
|
||||||
|
for registration in type_registry.iter() {
|
||||||
|
// Skip if no ReflectComponent data
|
||||||
|
let Some(reflect_component) = registration.data::<ReflectComponent>() else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get the type path
|
||||||
|
let type_path = registration.type_info().type_path();
|
||||||
|
|
||||||
|
// Skip certain components
|
||||||
|
if type_path.ends_with("::NetworkedEntity")
|
||||||
|
|| type_path.ends_with("::NetworkedTransform")
|
||||||
|
|| type_path.ends_with("::NetworkedSelection")
|
||||||
|
|| type_path.ends_with("::NetworkedDrawingPath")
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to reflect this component from the entity
|
||||||
|
if let Some(reflected) = reflect_component.reflect(entity_ref) {
|
||||||
|
// Serialize the component
|
||||||
|
if let Ok(serialized) = serialize_component(reflected, type_registry) {
|
||||||
|
// Create component data (inline or blob)
|
||||||
|
let data = if let Some(store) = blob_store {
|
||||||
|
if let Ok(component_data) = create_component_data(serialized, store) {
|
||||||
|
component_data
|
||||||
|
} else {
|
||||||
|
continue; // Skip this component if blob storage fails
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ComponentData::Inline(serialized)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Build the operation
|
||||||
|
let mut clock = vector_clock.clone();
|
||||||
|
clock.increment(node_id);
|
||||||
|
|
||||||
|
operations.push(ComponentOp::Set {
|
||||||
|
component_type: type_path.to_string(),
|
||||||
|
data,
|
||||||
|
vector_clock: clock,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
operations
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a Set operation for Transform component specifically
|
||||||
|
///
|
||||||
|
/// This is a helper for the common case of synchronizing Transform changes.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::{build_transform_operation, VectorClock};
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// # fn example(transform: &Transform, type_registry: &bevy::reflect::TypeRegistry) {
|
||||||
|
/// let node_id = Uuid::new_v4();
|
||||||
|
/// let clock = VectorClock::new();
|
||||||
|
///
|
||||||
|
/// let op = build_transform_operation(transform, node_id, clock, type_registry, None).unwrap();
|
||||||
|
/// # }
|
||||||
|
/// ```
|
||||||
|
pub fn build_transform_operation(
|
||||||
|
transform: &Transform,
|
||||||
|
node_id: NodeId,
|
||||||
|
vector_clock: VectorClock,
|
||||||
|
type_registry: &TypeRegistry,
|
||||||
|
blob_store: Option<&BlobStore>,
|
||||||
|
) -> Result<ComponentOp> {
|
||||||
|
// Use reflection to serialize Transform
|
||||||
|
let serialized = serialize_component(transform.as_reflect(), type_registry)?;
|
||||||
|
|
||||||
|
// Create component data (inline or blob)
|
||||||
|
let data = if let Some(store) = blob_store {
|
||||||
|
create_component_data(serialized, store)?
|
||||||
|
} else {
|
||||||
|
ComponentData::Inline(serialized)
|
||||||
|
};
|
||||||
|
|
||||||
|
let builder = ComponentOpBuilder::new(node_id, vector_clock);
|
||||||
|
Ok(builder.set("bevy_transform::components::transform::Transform".to_string(), data))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_build_transform_operation() {
|
||||||
|
let mut type_registry = TypeRegistry::new();
|
||||||
|
type_registry.register::<Transform>();
|
||||||
|
|
||||||
|
let transform = Transform::default();
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let clock = VectorClock::new();
|
||||||
|
|
||||||
|
let op = build_transform_operation(&transform, node_id, clock, &type_registry, None).unwrap();
|
||||||
|
|
||||||
|
assert!(op.is_set());
|
||||||
|
assert_eq!(
|
||||||
|
op.component_type(),
|
||||||
|
Some("bevy_transform::components::transform::Transform")
|
||||||
|
);
|
||||||
|
assert_eq!(op.vector_clock().get(node_id), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_build_entity_operations() {
|
||||||
|
let mut world = World::new();
|
||||||
|
let mut type_registry = TypeRegistry::new();
|
||||||
|
|
||||||
|
// Register Transform
|
||||||
|
type_registry.register::<Transform>();
|
||||||
|
|
||||||
|
// Spawn entity with Transform
|
||||||
|
let entity = world
|
||||||
|
.spawn(Transform::from_xyz(1.0, 2.0, 3.0))
|
||||||
|
.id();
|
||||||
|
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let clock = VectorClock::new();
|
||||||
|
|
||||||
|
let ops = build_entity_operations(entity, &world, node_id, clock, &type_registry, None);
|
||||||
|
|
||||||
|
// Should have at least Transform operation
|
||||||
|
assert!(!ops.is_empty());
|
||||||
|
assert!(ops.iter().all(|op| op.is_set()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_vector_clock_increment() {
|
||||||
|
let mut type_registry = TypeRegistry::new();
|
||||||
|
type_registry.register::<Transform>();
|
||||||
|
|
||||||
|
let transform = Transform::default();
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let mut clock = VectorClock::new();
|
||||||
|
|
||||||
|
let op1 = build_transform_operation(&transform, node_id, clock.clone(), &type_registry, None).unwrap();
|
||||||
|
assert_eq!(op1.vector_clock().get(node_id), 1);
|
||||||
|
|
||||||
|
clock.increment(node_id);
|
||||||
|
let op2 = build_transform_operation(&transform, node_id, clock.clone(), &type_registry, None).unwrap();
|
||||||
|
assert_eq!(op2.vector_clock().get(node_id), 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
529
crates/lib/src/networking/operation_log.rs
Normal file
529
crates/lib/src/networking/operation_log.rs
Normal file
@@ -0,0 +1,529 @@
|
|||||||
|
//! Operation log for anti-entropy and partition recovery
|
||||||
|
//!
|
||||||
|
//! This module maintains a bounded log of recent operations for each entity,
|
||||||
|
//! enabling peers to request missing deltas after network partitions or when
|
||||||
|
//! they join late.
|
||||||
|
//!
|
||||||
|
//! The operation log:
|
||||||
|
//! - Stores EntityDelta messages for recent operations
|
||||||
|
//! - Bounded by time (keep operations from last N minutes) or size (max M ops)
|
||||||
|
//! - Allows peers to request operations newer than their vector clock
|
||||||
|
//! - Supports periodic anti-entropy sync to repair partitions
|
||||||
|
|
||||||
|
use std::collections::{
|
||||||
|
HashMap,
|
||||||
|
VecDeque,
|
||||||
|
};
|
||||||
|
|
||||||
|
use bevy::prelude::*;
|
||||||
|
|
||||||
|
use crate::networking::{
|
||||||
|
messages::{
|
||||||
|
EntityDelta,
|
||||||
|
SyncMessage,
|
||||||
|
VersionedMessage,
|
||||||
|
},
|
||||||
|
vector_clock::{
|
||||||
|
NodeId,
|
||||||
|
VectorClock,
|
||||||
|
},
|
||||||
|
GossipBridge,
|
||||||
|
NodeVectorClock,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Maximum operations to keep per entity (prevents unbounded growth)
|
||||||
|
const MAX_OPS_PER_ENTITY: usize = 100;
|
||||||
|
|
||||||
|
/// Maximum age for operations (in seconds)
|
||||||
|
const MAX_OP_AGE_SECS: u64 = 300; // 5 minutes
|
||||||
|
|
||||||
|
/// Maximum number of entities to track (prevents unbounded growth)
|
||||||
|
const MAX_ENTITIES: usize = 10_000;
|
||||||
|
|
||||||
|
/// Operation log entry with timestamp
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
struct LogEntry {
|
||||||
|
/// The entity delta operation
|
||||||
|
delta: EntityDelta,
|
||||||
|
|
||||||
|
/// When this operation was created (for pruning old ops)
|
||||||
|
timestamp: std::time::Instant,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resource storing the operation log for all entities
|
||||||
|
///
|
||||||
|
/// This is used for anti-entropy - peers can request operations they're missing
|
||||||
|
/// by comparing vector clocks.
|
||||||
|
///
|
||||||
|
/// # Bounded Growth
|
||||||
|
///
|
||||||
|
/// The operation log is bounded in three ways:
|
||||||
|
/// - Max operations per entity: `MAX_OPS_PER_ENTITY` (100)
|
||||||
|
/// - Max operation age: `MAX_OP_AGE_SECS` (300 seconds / 5 minutes)
|
||||||
|
/// - Max entities: `MAX_ENTITIES` (10,000)
|
||||||
|
///
|
||||||
|
/// When limits are exceeded, oldest operations/entities are pruned automatically.
|
||||||
|
#[derive(Resource)]
|
||||||
|
pub struct OperationLog {
|
||||||
|
/// Map from entity ID to list of recent operations
|
||||||
|
logs: HashMap<uuid::Uuid, VecDeque<LogEntry>>,
|
||||||
|
|
||||||
|
/// Total number of operations across all entities (for monitoring)
|
||||||
|
total_ops: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OperationLog {
|
||||||
|
/// Create a new operation log
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
logs: HashMap::new(),
|
||||||
|
total_ops: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Record an operation in the log
|
||||||
|
///
|
||||||
|
/// This should be called whenever we generate or apply an EntityDelta.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::{OperationLog, EntityDelta, VectorClock};
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let mut log = OperationLog::new();
|
||||||
|
/// let entity_id = Uuid::new_v4();
|
||||||
|
/// let node_id = Uuid::new_v4();
|
||||||
|
/// let clock = VectorClock::new();
|
||||||
|
///
|
||||||
|
/// let delta = EntityDelta::new(entity_id, node_id, clock, vec![]);
|
||||||
|
/// log.record_operation(delta);
|
||||||
|
/// ```
|
||||||
|
pub fn record_operation(&mut self, delta: EntityDelta) {
|
||||||
|
// Check if we're at the entity limit
|
||||||
|
if self.logs.len() >= MAX_ENTITIES && !self.logs.contains_key(&delta.entity_id) {
|
||||||
|
// Prune oldest entity (by finding entity with oldest operation)
|
||||||
|
if let Some(oldest_entity_id) = self.find_oldest_entity() {
|
||||||
|
warn!(
|
||||||
|
"Operation log at entity limit ({}), pruning oldest entity {:?}",
|
||||||
|
MAX_ENTITIES, oldest_entity_id
|
||||||
|
);
|
||||||
|
if let Some(removed_log) = self.logs.remove(&oldest_entity_id) {
|
||||||
|
self.total_ops = self.total_ops.saturating_sub(removed_log.len());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let entry = LogEntry {
|
||||||
|
delta: delta.clone(),
|
||||||
|
timestamp: std::time::Instant::now(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let log = self.logs.entry(delta.entity_id).or_insert_with(VecDeque::new);
|
||||||
|
log.push_back(entry);
|
||||||
|
self.total_ops += 1;
|
||||||
|
|
||||||
|
// Prune if we exceed max ops per entity
|
||||||
|
while log.len() > MAX_OPS_PER_ENTITY {
|
||||||
|
log.pop_front();
|
||||||
|
self.total_ops = self.total_ops.saturating_sub(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Find the entity with the oldest operation (for LRU eviction)
|
||||||
|
fn find_oldest_entity(&self) -> Option<uuid::Uuid> {
|
||||||
|
self.logs
|
||||||
|
.iter()
|
||||||
|
.filter_map(|(entity_id, log)| {
|
||||||
|
log.front().map(|entry| (*entity_id, entry.timestamp))
|
||||||
|
})
|
||||||
|
.min_by_key(|(_, timestamp)| *timestamp)
|
||||||
|
.map(|(entity_id, _)| entity_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get operations for an entity that are newer than a given vector clock
|
||||||
|
///
|
||||||
|
/// This is used to respond to SyncRequest messages.
|
||||||
|
pub fn get_operations_newer_than(
|
||||||
|
&self,
|
||||||
|
entity_id: uuid::Uuid,
|
||||||
|
their_clock: &VectorClock,
|
||||||
|
) -> Vec<EntityDelta> {
|
||||||
|
let Some(log) = self.logs.get(&entity_id) else {
|
||||||
|
return vec![];
|
||||||
|
};
|
||||||
|
|
||||||
|
log.iter()
|
||||||
|
.filter(|entry| {
|
||||||
|
// Include operation if they haven't seen it yet
|
||||||
|
// (their clock happened before the operation's clock)
|
||||||
|
their_clock.happened_before(&entry.delta.vector_clock)
|
||||||
|
})
|
||||||
|
.map(|entry| entry.delta.clone())
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get all operations newer than a vector clock across all entities
|
||||||
|
///
|
||||||
|
/// This is used to respond to SyncRequest for the entire world state.
|
||||||
|
pub fn get_all_operations_newer_than(&self, their_clock: &VectorClock) -> Vec<EntityDelta> {
|
||||||
|
let mut deltas = Vec::new();
|
||||||
|
|
||||||
|
for (entity_id, _log) in &self.logs {
|
||||||
|
let entity_deltas = self.get_operations_newer_than(*entity_id, their_clock);
|
||||||
|
deltas.extend(entity_deltas);
|
||||||
|
}
|
||||||
|
|
||||||
|
deltas
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Prune old operations from the log
|
||||||
|
///
|
||||||
|
/// This should be called periodically to prevent unbounded growth.
|
||||||
|
/// Removes operations older than MAX_OP_AGE_SECS.
|
||||||
|
pub fn prune_old_operations(&mut self) {
|
||||||
|
let max_age = std::time::Duration::from_secs(MAX_OP_AGE_SECS);
|
||||||
|
let now = std::time::Instant::now();
|
||||||
|
|
||||||
|
let mut pruned_count = 0;
|
||||||
|
|
||||||
|
for log in self.logs.values_mut() {
|
||||||
|
let before_len = log.len();
|
||||||
|
log.retain(|entry| {
|
||||||
|
now.duration_since(entry.timestamp) < max_age
|
||||||
|
});
|
||||||
|
pruned_count += before_len - log.len();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update total_ops counter
|
||||||
|
self.total_ops = self.total_ops.saturating_sub(pruned_count);
|
||||||
|
|
||||||
|
// Remove empty logs
|
||||||
|
self.logs.retain(|_, log| !log.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the number of operations in the log
|
||||||
|
pub fn total_operations(&self) -> usize {
|
||||||
|
self.total_ops
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the number of entities with logged operations
|
||||||
|
pub fn num_entities(&self) -> usize {
|
||||||
|
self.logs.len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for OperationLog {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a SyncRequest message
|
||||||
|
///
|
||||||
|
/// This asks peers to send us any operations we're missing.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::{build_sync_request, VectorClock};
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node_id = Uuid::new_v4();
|
||||||
|
/// let clock = VectorClock::new();
|
||||||
|
/// let request = build_sync_request(node_id, clock);
|
||||||
|
/// ```
|
||||||
|
pub fn build_sync_request(node_id: NodeId, vector_clock: VectorClock) -> VersionedMessage {
|
||||||
|
VersionedMessage::new(SyncMessage::SyncRequest {
|
||||||
|
node_id,
|
||||||
|
vector_clock,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a MissingDeltas response
|
||||||
|
///
|
||||||
|
/// This contains operations that the requesting peer is missing.
|
||||||
|
pub fn build_missing_deltas(deltas: Vec<EntityDelta>) -> VersionedMessage {
|
||||||
|
VersionedMessage::new(SyncMessage::MissingDeltas { deltas })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// System to handle SyncRequest messages
|
||||||
|
///
|
||||||
|
/// When we receive a SyncRequest, compare vector clocks and send any
|
||||||
|
/// operations the peer is missing.
|
||||||
|
///
|
||||||
|
/// Add this to your app:
|
||||||
|
///
|
||||||
|
/// ```no_run
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::handle_sync_requests_system;
|
||||||
|
///
|
||||||
|
/// App::new()
|
||||||
|
/// .add_systems(Update, handle_sync_requests_system);
|
||||||
|
/// ```
|
||||||
|
pub fn handle_sync_requests_system(
|
||||||
|
bridge: Option<Res<GossipBridge>>,
|
||||||
|
operation_log: Res<OperationLog>,
|
||||||
|
) {
|
||||||
|
let Some(bridge) = bridge else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Poll for SyncRequest messages
|
||||||
|
while let Some(message) = bridge.try_recv() {
|
||||||
|
match message.message {
|
||||||
|
| SyncMessage::SyncRequest {
|
||||||
|
node_id: requesting_node,
|
||||||
|
vector_clock: their_clock,
|
||||||
|
} => {
|
||||||
|
debug!("Received SyncRequest from node {}", requesting_node);
|
||||||
|
|
||||||
|
// Find operations they're missing
|
||||||
|
let missing_deltas = operation_log.get_all_operations_newer_than(&their_clock);
|
||||||
|
|
||||||
|
if !missing_deltas.is_empty() {
|
||||||
|
info!(
|
||||||
|
"Sending {} missing deltas to node {}",
|
||||||
|
missing_deltas.len(),
|
||||||
|
requesting_node
|
||||||
|
);
|
||||||
|
|
||||||
|
// Send MissingDeltas response
|
||||||
|
let response = build_missing_deltas(missing_deltas);
|
||||||
|
if let Err(e) = bridge.send(response) {
|
||||||
|
error!("Failed to send MissingDeltas: {}", e);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
debug!("No missing deltas for node {}", requesting_node);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
| _ => {
|
||||||
|
// Not a SyncRequest, ignore
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// System to handle MissingDeltas messages
|
||||||
|
///
|
||||||
|
/// When we receive MissingDeltas (in response to our SyncRequest), apply them.
|
||||||
|
pub fn handle_missing_deltas_system(
|
||||||
|
mut commands: Commands,
|
||||||
|
bridge: Option<Res<GossipBridge>>,
|
||||||
|
mut entity_map: ResMut<crate::networking::NetworkEntityMap>,
|
||||||
|
type_registry: Res<AppTypeRegistry>,
|
||||||
|
mut node_clock: ResMut<NodeVectorClock>,
|
||||||
|
blob_store: Option<Res<crate::networking::BlobStore>>,
|
||||||
|
mut tombstone_registry: Option<ResMut<crate::networking::TombstoneRegistry>>,
|
||||||
|
) {
|
||||||
|
let Some(bridge) = bridge else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
let registry = type_registry.read();
|
||||||
|
let blob_store_ref = blob_store.as_deref();
|
||||||
|
|
||||||
|
// Poll for MissingDeltas messages
|
||||||
|
while let Some(message) = bridge.try_recv() {
|
||||||
|
match message.message {
|
||||||
|
| SyncMessage::MissingDeltas { deltas } => {
|
||||||
|
info!("Received MissingDeltas with {} operations", deltas.len());
|
||||||
|
|
||||||
|
// Apply each delta
|
||||||
|
for delta in deltas {
|
||||||
|
debug!(
|
||||||
|
"Applying missing delta for entity {:?}",
|
||||||
|
delta.entity_id
|
||||||
|
);
|
||||||
|
|
||||||
|
crate::networking::apply_entity_delta(
|
||||||
|
&delta,
|
||||||
|
&mut commands,
|
||||||
|
&mut entity_map,
|
||||||
|
®istry,
|
||||||
|
&mut node_clock,
|
||||||
|
blob_store_ref,
|
||||||
|
tombstone_registry.as_deref_mut(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
| _ => {
|
||||||
|
// Not MissingDeltas, ignore
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// System to periodically send SyncRequest for anti-entropy
|
||||||
|
///
|
||||||
|
/// This runs every N seconds to request any operations we might be missing,
|
||||||
|
/// helping to repair network partitions.
|
||||||
|
///
|
||||||
|
/// **NOTE:** This is a simple timer-based implementation. Phase 14 will add
|
||||||
|
/// adaptive sync intervals based on network conditions.
|
||||||
|
pub fn periodic_sync_system(
|
||||||
|
bridge: Option<Res<GossipBridge>>,
|
||||||
|
node_clock: Res<NodeVectorClock>,
|
||||||
|
time: Res<Time>,
|
||||||
|
mut last_sync: Local<f32>,
|
||||||
|
) {
|
||||||
|
let Some(bridge) = bridge else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Sync every 10 seconds
|
||||||
|
const SYNC_INTERVAL: f32 = 10.0;
|
||||||
|
|
||||||
|
*last_sync += time.delta_secs();
|
||||||
|
|
||||||
|
if *last_sync >= SYNC_INTERVAL {
|
||||||
|
*last_sync = 0.0;
|
||||||
|
|
||||||
|
debug!("Sending periodic SyncRequest for anti-entropy");
|
||||||
|
|
||||||
|
let request = build_sync_request(node_clock.node_id, node_clock.clock.clone());
|
||||||
|
if let Err(e) = bridge.send(request) {
|
||||||
|
error!("Failed to send SyncRequest: {}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// System to prune old operations from the log
|
||||||
|
///
|
||||||
|
/// This runs periodically to remove operations older than MAX_OP_AGE_SECS.
|
||||||
|
pub fn prune_operation_log_system(
|
||||||
|
mut operation_log: ResMut<OperationLog>,
|
||||||
|
time: Res<Time>,
|
||||||
|
mut last_prune: Local<f32>,
|
||||||
|
) {
|
||||||
|
// Prune every 60 seconds
|
||||||
|
const PRUNE_INTERVAL: f32 = 60.0;
|
||||||
|
|
||||||
|
*last_prune += time.delta_secs();
|
||||||
|
|
||||||
|
if *last_prune >= PRUNE_INTERVAL {
|
||||||
|
*last_prune = 0.0;
|
||||||
|
|
||||||
|
let before = operation_log.total_operations();
|
||||||
|
operation_log.prune_old_operations();
|
||||||
|
let after = operation_log.total_operations();
|
||||||
|
|
||||||
|
if before != after {
|
||||||
|
debug!(
|
||||||
|
"Pruned operation log: {} ops -> {} ops",
|
||||||
|
before, after
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_operation_log_creation() {
|
||||||
|
let log = OperationLog::new();
|
||||||
|
assert_eq!(log.num_entities(), 0);
|
||||||
|
assert_eq!(log.total_operations(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_record_operation() {
|
||||||
|
let mut log = OperationLog::new();
|
||||||
|
let entity_id = uuid::Uuid::new_v4();
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let clock = VectorClock::new();
|
||||||
|
|
||||||
|
let delta = EntityDelta::new(entity_id, node_id, clock, vec![]);
|
||||||
|
log.record_operation(delta);
|
||||||
|
|
||||||
|
assert_eq!(log.num_entities(), 1);
|
||||||
|
assert_eq!(log.total_operations(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_get_operations_newer_than() {
|
||||||
|
let mut log = OperationLog::new();
|
||||||
|
let entity_id = uuid::Uuid::new_v4();
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
// Create two operations with different clocks
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node_id);
|
||||||
|
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node_id);
|
||||||
|
clock2.increment(node_id);
|
||||||
|
|
||||||
|
let delta1 = EntityDelta::new(entity_id, node_id, clock1.clone(), vec![]);
|
||||||
|
let delta2 = EntityDelta::new(entity_id, node_id, clock2.clone(), vec![]);
|
||||||
|
|
||||||
|
log.record_operation(delta1);
|
||||||
|
log.record_operation(delta2);
|
||||||
|
|
||||||
|
// Request with clock1 should get delta2
|
||||||
|
let newer = log.get_operations_newer_than(entity_id, &clock1);
|
||||||
|
assert_eq!(newer.len(), 1);
|
||||||
|
assert_eq!(newer[0].vector_clock, clock2);
|
||||||
|
|
||||||
|
// Request with clock2 should get nothing
|
||||||
|
let newer = log.get_operations_newer_than(entity_id, &clock2);
|
||||||
|
assert_eq!(newer.len(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_max_ops_per_entity() {
|
||||||
|
let mut log = OperationLog::new();
|
||||||
|
let entity_id = uuid::Uuid::new_v4();
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
// Add more than MAX_OPS_PER_ENTITY operations
|
||||||
|
for _ in 0..(MAX_OPS_PER_ENTITY + 10) {
|
||||||
|
let mut clock = VectorClock::new();
|
||||||
|
clock.increment(node_id);
|
||||||
|
let delta = EntityDelta::new(entity_id, node_id, clock, vec![]);
|
||||||
|
log.record_operation(delta);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Should be capped at MAX_OPS_PER_ENTITY
|
||||||
|
assert_eq!(log.total_operations(), MAX_OPS_PER_ENTITY);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_build_sync_request() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let clock = VectorClock::new();
|
||||||
|
|
||||||
|
let request = build_sync_request(node_id, clock.clone());
|
||||||
|
|
||||||
|
match request.message {
|
||||||
|
| SyncMessage::SyncRequest {
|
||||||
|
node_id: req_node_id,
|
||||||
|
vector_clock,
|
||||||
|
} => {
|
||||||
|
assert_eq!(req_node_id, node_id);
|
||||||
|
assert_eq!(vector_clock, clock);
|
||||||
|
}
|
||||||
|
| _ => panic!("Expected SyncRequest"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_build_missing_deltas() {
|
||||||
|
let entity_id = uuid::Uuid::new_v4();
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let clock = VectorClock::new();
|
||||||
|
|
||||||
|
let delta = EntityDelta::new(entity_id, node_id, clock, vec![]);
|
||||||
|
let response = build_missing_deltas(vec![delta.clone()]);
|
||||||
|
|
||||||
|
match response.message {
|
||||||
|
| SyncMessage::MissingDeltas { deltas } => {
|
||||||
|
assert_eq!(deltas.len(), 1);
|
||||||
|
assert_eq!(deltas[0].entity_id, entity_id);
|
||||||
|
}
|
||||||
|
| _ => panic!("Expected MissingDeltas"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
388
crates/lib/src/networking/operations.rs
Normal file
388
crates/lib/src/networking/operations.rs
Normal file
@@ -0,0 +1,388 @@
|
|||||||
|
//! CRDT operations for component synchronization
|
||||||
|
//!
|
||||||
|
//! This module defines the different types of operations that can be performed
|
||||||
|
//! on components in the distributed system. Each operation type corresponds to
|
||||||
|
//! a specific CRDT merge strategy.
|
||||||
|
|
||||||
|
use serde::{
|
||||||
|
Deserialize,
|
||||||
|
Serialize,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::networking::{
|
||||||
|
messages::ComponentData,
|
||||||
|
vector_clock::VectorClock,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Component operations for CRDT synchronization
|
||||||
|
///
|
||||||
|
/// Different operation types support different CRDT semantics:
|
||||||
|
///
|
||||||
|
/// - **Set** - Last-Write-Wins (LWW) using vector clocks
|
||||||
|
/// - **SetAdd/SetRemove** - OR-Set for concurrent add/remove
|
||||||
|
/// - **SequenceInsert/SequenceDelete** - RGA for ordered sequences
|
||||||
|
/// - **Delete** - Entity deletion with tombstone
|
||||||
|
///
|
||||||
|
/// # CRDT Merge Semantics
|
||||||
|
///
|
||||||
|
/// ## Last-Write-Wins (Set)
|
||||||
|
/// - Use vector clock to determine which operation happened later
|
||||||
|
/// - If concurrent, use node ID as tiebreaker
|
||||||
|
/// - Example: Transform component position changes
|
||||||
|
///
|
||||||
|
/// ## OR-Set (SetAdd/SetRemove)
|
||||||
|
/// - Add wins over remove when concurrent
|
||||||
|
/// - Uses unique operation IDs to track add/remove pairs
|
||||||
|
/// - Example: Selection of multiple entities, tags
|
||||||
|
///
|
||||||
|
/// ## Sequence CRDT (SequenceInsert/SequenceDelete)
|
||||||
|
/// - Maintains ordering across concurrent inserts
|
||||||
|
/// - Uses RGA (Replicated Growable Array) algorithm
|
||||||
|
/// - Example: Collaborative drawing paths
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub enum ComponentOp {
|
||||||
|
/// Set a component value (Last-Write-Wins)
|
||||||
|
///
|
||||||
|
/// Used for components where the latest value should win. The vector clock
|
||||||
|
/// determines which operation is "later". If operations are concurrent,
|
||||||
|
/// the node ID is used as a tiebreaker for deterministic results.
|
||||||
|
///
|
||||||
|
/// The data field can be either inline (for small components) or a blob
|
||||||
|
/// reference (for components >64KB).
|
||||||
|
Set {
|
||||||
|
/// Type path of the component
|
||||||
|
component_type: String,
|
||||||
|
|
||||||
|
/// Component data (inline or blob reference)
|
||||||
|
data: ComponentData,
|
||||||
|
|
||||||
|
/// Vector clock when this set operation was created
|
||||||
|
vector_clock: VectorClock,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Add an element to an OR-Set
|
||||||
|
///
|
||||||
|
/// Adds an element to a set that supports concurrent add/remove. Each add
|
||||||
|
/// has a unique ID so that removes can reference specific adds.
|
||||||
|
SetAdd {
|
||||||
|
/// Type path of the component
|
||||||
|
component_type: String,
|
||||||
|
|
||||||
|
/// Unique ID for this add operation
|
||||||
|
operation_id: uuid::Uuid,
|
||||||
|
|
||||||
|
/// Element being added (serialized)
|
||||||
|
element: Vec<u8>,
|
||||||
|
|
||||||
|
/// Vector clock when this add was created
|
||||||
|
vector_clock: VectorClock,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Remove an element from an OR-Set
|
||||||
|
///
|
||||||
|
/// Removes an element by referencing the add operation IDs that added it.
|
||||||
|
/// If concurrent with an add, the add wins (observed-remove semantics).
|
||||||
|
SetRemove {
|
||||||
|
/// Type path of the component
|
||||||
|
component_type: String,
|
||||||
|
|
||||||
|
/// IDs of the add operations being removed
|
||||||
|
removed_ids: Vec<uuid::Uuid>,
|
||||||
|
|
||||||
|
/// Vector clock when this remove was created
|
||||||
|
vector_clock: VectorClock,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Insert an element into a sequence (RGA)
|
||||||
|
///
|
||||||
|
/// Inserts an element after a specific position in a sequence. Uses RGA
|
||||||
|
/// (Replicated Growable Array) to maintain consistent ordering across
|
||||||
|
/// concurrent inserts.
|
||||||
|
SequenceInsert {
|
||||||
|
/// Type path of the component
|
||||||
|
component_type: String,
|
||||||
|
|
||||||
|
/// Unique ID for this insert operation
|
||||||
|
operation_id: uuid::Uuid,
|
||||||
|
|
||||||
|
/// ID of the element to insert after (None = beginning)
|
||||||
|
after_id: Option<uuid::Uuid>,
|
||||||
|
|
||||||
|
/// Element being inserted (serialized)
|
||||||
|
element: Vec<u8>,
|
||||||
|
|
||||||
|
/// Vector clock when this insert was created
|
||||||
|
vector_clock: VectorClock,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Delete an element from a sequence (RGA)
|
||||||
|
///
|
||||||
|
/// Marks an element as deleted in the sequence. The element remains in the
|
||||||
|
/// structure (tombstone) to preserve ordering for concurrent operations.
|
||||||
|
SequenceDelete {
|
||||||
|
/// Type path of the component
|
||||||
|
component_type: String,
|
||||||
|
|
||||||
|
/// ID of the element to delete
|
||||||
|
element_id: uuid::Uuid,
|
||||||
|
|
||||||
|
/// Vector clock when this delete was created
|
||||||
|
vector_clock: VectorClock,
|
||||||
|
},
|
||||||
|
|
||||||
|
/// Delete an entire entity
|
||||||
|
///
|
||||||
|
/// Marks an entity as deleted (tombstone). The entity remains in the
|
||||||
|
/// system to prevent resurrection if old operations arrive.
|
||||||
|
Delete {
|
||||||
|
/// Vector clock when this delete was created
|
||||||
|
vector_clock: VectorClock,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ComponentOp {
|
||||||
|
/// Get the component type for this operation
|
||||||
|
pub fn component_type(&self) -> Option<&str> {
|
||||||
|
match self {
|
||||||
|
| ComponentOp::Set { component_type, .. }
|
||||||
|
| ComponentOp::SetAdd { component_type, .. }
|
||||||
|
| ComponentOp::SetRemove { component_type, .. }
|
||||||
|
| ComponentOp::SequenceInsert { component_type, .. }
|
||||||
|
| ComponentOp::SequenceDelete { component_type, .. } => Some(component_type),
|
||||||
|
| ComponentOp::Delete { .. } => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the vector clock for this operation
|
||||||
|
pub fn vector_clock(&self) -> &VectorClock {
|
||||||
|
match self {
|
||||||
|
| ComponentOp::Set { vector_clock, .. }
|
||||||
|
| ComponentOp::SetAdd { vector_clock, .. }
|
||||||
|
| ComponentOp::SetRemove { vector_clock, .. }
|
||||||
|
| ComponentOp::SequenceInsert { vector_clock, .. }
|
||||||
|
| ComponentOp::SequenceDelete { vector_clock, .. }
|
||||||
|
| ComponentOp::Delete { vector_clock } => vector_clock,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if this is a Set operation (LWW)
|
||||||
|
pub fn is_set(&self) -> bool {
|
||||||
|
matches!(self, ComponentOp::Set { .. })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if this is an OR-Set operation
|
||||||
|
pub fn is_or_set(&self) -> bool {
|
||||||
|
matches!(
|
||||||
|
self,
|
||||||
|
ComponentOp::SetAdd { .. } | ComponentOp::SetRemove { .. }
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if this is a Sequence operation (RGA)
|
||||||
|
pub fn is_sequence(&self) -> bool {
|
||||||
|
matches!(
|
||||||
|
self,
|
||||||
|
ComponentOp::SequenceInsert { .. } | ComponentOp::SequenceDelete { .. }
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if this is a Delete operation
|
||||||
|
pub fn is_delete(&self) -> bool {
|
||||||
|
matches!(self, ComponentOp::Delete { .. })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Builder for creating ComponentOp instances
|
||||||
|
///
|
||||||
|
/// Provides a fluent API for constructing operations with proper vector clock
|
||||||
|
/// timestamps.
|
||||||
|
pub struct ComponentOpBuilder {
|
||||||
|
node_id: uuid::Uuid,
|
||||||
|
vector_clock: VectorClock,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ComponentOpBuilder {
|
||||||
|
/// Create a new operation builder
|
||||||
|
pub fn new(node_id: uuid::Uuid, vector_clock: VectorClock) -> Self {
|
||||||
|
Self {
|
||||||
|
node_id,
|
||||||
|
vector_clock,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a Set operation (LWW)
|
||||||
|
pub fn set(mut self, component_type: String, data: ComponentData) -> ComponentOp {
|
||||||
|
self.vector_clock.increment(self.node_id);
|
||||||
|
ComponentOp::Set {
|
||||||
|
component_type,
|
||||||
|
data,
|
||||||
|
vector_clock: self.vector_clock,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a SetAdd operation (OR-Set)
|
||||||
|
pub fn set_add(mut self, component_type: String, element: Vec<u8>) -> ComponentOp {
|
||||||
|
self.vector_clock.increment(self.node_id);
|
||||||
|
ComponentOp::SetAdd {
|
||||||
|
component_type,
|
||||||
|
operation_id: uuid::Uuid::new_v4(),
|
||||||
|
element,
|
||||||
|
vector_clock: self.vector_clock,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a SetRemove operation (OR-Set)
|
||||||
|
pub fn set_remove(mut self, component_type: String, removed_ids: Vec<uuid::Uuid>) -> ComponentOp {
|
||||||
|
self.vector_clock.increment(self.node_id);
|
||||||
|
ComponentOp::SetRemove {
|
||||||
|
component_type,
|
||||||
|
removed_ids,
|
||||||
|
vector_clock: self.vector_clock,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a SequenceInsert operation (RGA)
|
||||||
|
pub fn sequence_insert(
|
||||||
|
mut self,
|
||||||
|
component_type: String,
|
||||||
|
after_id: Option<uuid::Uuid>,
|
||||||
|
element: Vec<u8>,
|
||||||
|
) -> ComponentOp {
|
||||||
|
self.vector_clock.increment(self.node_id);
|
||||||
|
ComponentOp::SequenceInsert {
|
||||||
|
component_type,
|
||||||
|
operation_id: uuid::Uuid::new_v4(),
|
||||||
|
after_id,
|
||||||
|
element,
|
||||||
|
vector_clock: self.vector_clock,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a SequenceDelete operation (RGA)
|
||||||
|
pub fn sequence_delete(mut self, component_type: String, element_id: uuid::Uuid) -> ComponentOp {
|
||||||
|
self.vector_clock.increment(self.node_id);
|
||||||
|
ComponentOp::SequenceDelete {
|
||||||
|
component_type,
|
||||||
|
element_id,
|
||||||
|
vector_clock: self.vector_clock,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a Delete operation
|
||||||
|
pub fn delete(mut self) -> ComponentOp {
|
||||||
|
self.vector_clock.increment(self.node_id);
|
||||||
|
ComponentOp::Delete {
|
||||||
|
vector_clock: self.vector_clock,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_component_type() {
|
||||||
|
let op = ComponentOp::Set {
|
||||||
|
component_type: "Transform".to_string(),
|
||||||
|
data: ComponentData::Inline(vec![1, 2, 3]),
|
||||||
|
vector_clock: VectorClock::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(op.component_type(), Some("Transform"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_component_type_delete() {
|
||||||
|
let op = ComponentOp::Delete {
|
||||||
|
vector_clock: VectorClock::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(op.component_type(), None);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_is_set() {
|
||||||
|
let op = ComponentOp::Set {
|
||||||
|
component_type: "Transform".to_string(),
|
||||||
|
data: ComponentData::Inline(vec![1, 2, 3]),
|
||||||
|
vector_clock: VectorClock::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert!(op.is_set());
|
||||||
|
assert!(!op.is_or_set());
|
||||||
|
assert!(!op.is_sequence());
|
||||||
|
assert!(!op.is_delete());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_is_or_set() {
|
||||||
|
let op = ComponentOp::SetAdd {
|
||||||
|
component_type: "Selection".to_string(),
|
||||||
|
operation_id: uuid::Uuid::new_v4(),
|
||||||
|
element: vec![1, 2, 3],
|
||||||
|
vector_clock: VectorClock::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert!(!op.is_set());
|
||||||
|
assert!(op.is_or_set());
|
||||||
|
assert!(!op.is_sequence());
|
||||||
|
assert!(!op.is_delete());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_is_sequence() {
|
||||||
|
let op = ComponentOp::SequenceInsert {
|
||||||
|
component_type: "DrawingPath".to_string(),
|
||||||
|
operation_id: uuid::Uuid::new_v4(),
|
||||||
|
after_id: None,
|
||||||
|
element: vec![1, 2, 3],
|
||||||
|
vector_clock: VectorClock::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert!(!op.is_set());
|
||||||
|
assert!(!op.is_or_set());
|
||||||
|
assert!(op.is_sequence());
|
||||||
|
assert!(!op.is_delete());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_builder_set() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let clock = VectorClock::new();
|
||||||
|
|
||||||
|
let builder = ComponentOpBuilder::new(node_id, clock);
|
||||||
|
let op = builder.set("Transform".to_string(), ComponentData::Inline(vec![1, 2, 3]));
|
||||||
|
|
||||||
|
assert!(op.is_set());
|
||||||
|
assert_eq!(op.vector_clock().get(node_id), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_builder_set_add() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let clock = VectorClock::new();
|
||||||
|
|
||||||
|
let builder = ComponentOpBuilder::new(node_id, clock);
|
||||||
|
let op = builder.set_add("Selection".to_string(), vec![1, 2, 3]);
|
||||||
|
|
||||||
|
assert!(op.is_or_set());
|
||||||
|
assert_eq!(op.vector_clock().get(node_id), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_serialization() -> bincode::Result<()> {
|
||||||
|
let op = ComponentOp::Set {
|
||||||
|
component_type: "Transform".to_string(),
|
||||||
|
data: ComponentData::Inline(vec![1, 2, 3]),
|
||||||
|
vector_clock: VectorClock::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let bytes = bincode::serialize(&op)?;
|
||||||
|
let deserialized: ComponentOp = bincode::deserialize(&bytes)?;
|
||||||
|
|
||||||
|
assert!(deserialized.is_set());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
483
crates/lib/src/networking/orset.rs
Normal file
483
crates/lib/src/networking/orset.rs
Normal file
@@ -0,0 +1,483 @@
|
|||||||
|
//! OR-Set (Observed-Remove Set) CRDT implementation
|
||||||
|
//!
|
||||||
|
//! This module provides a conflict-free replicated set that supports concurrent
|
||||||
|
//! add and remove operations with "add-wins" semantics.
|
||||||
|
//!
|
||||||
|
//! ## OR-Set Semantics
|
||||||
|
//!
|
||||||
|
//! - **Add-wins**: If an element is concurrently added and removed, the add wins
|
||||||
|
//! - **Observed-remove**: Removes only affect adds that have been observed (happened-before)
|
||||||
|
//! - **Unique operation IDs**: Each add generates a unique ID to track add/remove pairs
|
||||||
|
//!
|
||||||
|
//! ## Example
|
||||||
|
//!
|
||||||
|
//! ```
|
||||||
|
//! use lib::networking::{OrSet, OrElement};
|
||||||
|
//! use uuid::Uuid;
|
||||||
|
//!
|
||||||
|
//! let node1 = Uuid::new_v4();
|
||||||
|
//! let node2 = Uuid::new_v4();
|
||||||
|
//!
|
||||||
|
//! // Node 1 adds "foo"
|
||||||
|
//! let mut set1: OrSet<String> = OrSet::new();
|
||||||
|
//! let (add_id, _) = set1.add("foo".to_string(), node1);
|
||||||
|
//!
|
||||||
|
//! // Node 2 concurrently adds "bar"
|
||||||
|
//! let mut set2: OrSet<String> = OrSet::new();
|
||||||
|
//! set2.add("bar".to_string(), node2);
|
||||||
|
//!
|
||||||
|
//! // Node 1 removes "foo" (observes own add)
|
||||||
|
//! set1.remove(vec![add_id]);
|
||||||
|
//!
|
||||||
|
//! // Merge sets - "bar" should be present, "foo" should be removed
|
||||||
|
//! set1.merge(&set2);
|
||||||
|
//! assert_eq!(set1.len(), 1);
|
||||||
|
//! assert!(set1.contains(&"bar".to_string()));
|
||||||
|
//! assert!(!set1.contains(&"foo".to_string()));
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
use std::collections::{
|
||||||
|
HashMap,
|
||||||
|
HashSet,
|
||||||
|
};
|
||||||
|
|
||||||
|
use bevy::prelude::*;
|
||||||
|
use serde::{
|
||||||
|
Deserialize,
|
||||||
|
Serialize,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::networking::vector_clock::NodeId;
|
||||||
|
|
||||||
|
/// An element in an OR-Set with its unique operation ID
|
||||||
|
///
|
||||||
|
/// Each add operation generates a unique ID. The same logical element can have
|
||||||
|
/// multiple IDs if it's added multiple times (e.g., removed then re-added).
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||||
|
pub struct OrElement<T> {
|
||||||
|
/// The actual element value
|
||||||
|
pub value: T,
|
||||||
|
|
||||||
|
/// Unique ID for this add operation
|
||||||
|
pub operation_id: uuid::Uuid,
|
||||||
|
|
||||||
|
/// Node that performed the add
|
||||||
|
pub adding_node: NodeId,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// OR-Set (Observed-Remove Set) CRDT
|
||||||
|
///
|
||||||
|
/// A replicated set supporting concurrent add/remove with add-wins semantics.
|
||||||
|
/// This is based on the "Optimized Observed-Remove Set" algorithm.
|
||||||
|
///
|
||||||
|
/// # Type Parameters
|
||||||
|
///
|
||||||
|
/// - `T`: The element type (must be Clone, Eq, Hash, Serialize, Deserialize)
|
||||||
|
///
|
||||||
|
/// # Internal Structure
|
||||||
|
///
|
||||||
|
/// - `elements`: Map from operation_id → (value, adding_node)
|
||||||
|
/// - `tombstones`: Set of removed operation IDs
|
||||||
|
///
|
||||||
|
/// An element is "present" if it has an operation ID in `elements` that's
|
||||||
|
/// not in `tombstones`.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct OrSet<T> {
|
||||||
|
/// Map from operation ID to (value, adding_node)
|
||||||
|
elements: HashMap<uuid::Uuid, (T, NodeId)>,
|
||||||
|
|
||||||
|
/// Set of removed operation IDs
|
||||||
|
tombstones: HashSet<uuid::Uuid>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> OrSet<T>
|
||||||
|
where
|
||||||
|
T: Clone + Eq + std::hash::Hash + Serialize + for<'de> Deserialize<'de>,
|
||||||
|
{
|
||||||
|
/// Create a new empty OR-Set
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
elements: HashMap::new(),
|
||||||
|
tombstones: HashSet::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add an element to the set
|
||||||
|
///
|
||||||
|
/// Returns (operation_id, was_new) where was_new indicates if this value
|
||||||
|
/// wasn't already present.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::OrSet;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node = Uuid::new_v4();
|
||||||
|
/// let mut set: OrSet<String> = OrSet::new();
|
||||||
|
///
|
||||||
|
/// let (id, was_new) = set.add("foo".to_string(), node);
|
||||||
|
/// assert!(was_new);
|
||||||
|
/// assert!(set.contains(&"foo".to_string()));
|
||||||
|
/// ```
|
||||||
|
pub fn add(&mut self, value: T, node_id: NodeId) -> (uuid::Uuid, bool) {
|
||||||
|
let operation_id = uuid::Uuid::new_v4();
|
||||||
|
let was_new = !self.contains(&value);
|
||||||
|
|
||||||
|
self.elements.insert(operation_id, (value, node_id));
|
||||||
|
|
||||||
|
(operation_id, was_new)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove elements by their operation IDs
|
||||||
|
///
|
||||||
|
/// This implements observed-remove semantics: only the specific add
|
||||||
|
/// operations identified by these IDs are removed.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::OrSet;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node = Uuid::new_v4();
|
||||||
|
/// let mut set: OrSet<String> = OrSet::new();
|
||||||
|
///
|
||||||
|
/// let (id, _) = set.add("foo".to_string(), node);
|
||||||
|
/// assert!(set.contains(&"foo".to_string()));
|
||||||
|
///
|
||||||
|
/// set.remove(vec![id]);
|
||||||
|
/// assert!(!set.contains(&"foo".to_string()));
|
||||||
|
/// ```
|
||||||
|
pub fn remove(&mut self, operation_ids: Vec<uuid::Uuid>) {
|
||||||
|
for id in operation_ids {
|
||||||
|
self.tombstones.insert(id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if a value is present in the set
|
||||||
|
///
|
||||||
|
/// A value is present if it has at least one operation ID that's not tombstoned.
|
||||||
|
pub fn contains(&self, value: &T) -> bool {
|
||||||
|
self.elements.iter().any(|(id, (v, _))| {
|
||||||
|
v == value && !self.tombstones.contains(id)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get all present values
|
||||||
|
///
|
||||||
|
/// Returns an iterator over values that are currently in the set
|
||||||
|
/// (not tombstoned).
|
||||||
|
pub fn values(&self) -> impl Iterator<Item = &T> {
|
||||||
|
self.elements
|
||||||
|
.iter()
|
||||||
|
.filter(|(id, _)| !self.tombstones.contains(id))
|
||||||
|
.map(|(_, (value, _))| value)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get all operation IDs for a specific value
|
||||||
|
///
|
||||||
|
/// This is used when removing a value - we need to tombstone all its
|
||||||
|
/// operation IDs.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::OrSet;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node = Uuid::new_v4();
|
||||||
|
/// let mut set: OrSet<String> = OrSet::new();
|
||||||
|
///
|
||||||
|
/// set.add("foo".to_string(), node);
|
||||||
|
/// set.add("foo".to_string(), node); // Add same value again
|
||||||
|
///
|
||||||
|
/// let ids = set.get_operation_ids(&"foo".to_string());
|
||||||
|
/// assert_eq!(ids.len(), 2); // Two operation IDs for "foo"
|
||||||
|
/// ```
|
||||||
|
pub fn get_operation_ids(&self, value: &T) -> Vec<uuid::Uuid> {
|
||||||
|
self.elements
|
||||||
|
.iter()
|
||||||
|
.filter(|(id, (v, _))| v == value && !self.tombstones.contains(id))
|
||||||
|
.map(|(id, _)| *id)
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the number of distinct values in the set
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
let mut seen = HashSet::new();
|
||||||
|
self.elements
|
||||||
|
.iter()
|
||||||
|
.filter(|(id, (value, _))| {
|
||||||
|
!self.tombstones.contains(id) && seen.insert(value)
|
||||||
|
})
|
||||||
|
.count()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if the set is empty
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.len() == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Merge another OR-Set into this one
|
||||||
|
///
|
||||||
|
/// This implements the CRDT merge operation:
|
||||||
|
/// - Union all elements
|
||||||
|
/// - Union all tombstones
|
||||||
|
/// - Add-wins: elements not in tombstones are present
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::OrSet;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node1 = Uuid::new_v4();
|
||||||
|
/// let node2 = Uuid::new_v4();
|
||||||
|
///
|
||||||
|
/// let mut set1: OrSet<String> = OrSet::new();
|
||||||
|
/// set1.add("foo".to_string(), node1);
|
||||||
|
///
|
||||||
|
/// let mut set2: OrSet<String> = OrSet::new();
|
||||||
|
/// set2.add("bar".to_string(), node2);
|
||||||
|
///
|
||||||
|
/// set1.merge(&set2);
|
||||||
|
/// assert_eq!(set1.len(), 2);
|
||||||
|
/// assert!(set1.contains(&"foo".to_string()));
|
||||||
|
/// assert!(set1.contains(&"bar".to_string()));
|
||||||
|
/// ```
|
||||||
|
pub fn merge(&mut self, other: &OrSet<T>) {
|
||||||
|
// Union elements
|
||||||
|
for (id, (value, node)) in &other.elements {
|
||||||
|
self.elements.entry(*id).or_insert_with(|| (value.clone(), *node));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Union tombstones
|
||||||
|
for id in &other.tombstones {
|
||||||
|
self.tombstones.insert(*id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clear the set
|
||||||
|
///
|
||||||
|
/// Removes all elements and tombstones.
|
||||||
|
pub fn clear(&mut self) {
|
||||||
|
self.elements.clear();
|
||||||
|
self.tombstones.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Garbage collect tombstoned elements
|
||||||
|
///
|
||||||
|
/// Removes elements that are tombstoned to save memory. This is safe
|
||||||
|
/// because once an operation is tombstoned, it stays tombstoned.
|
||||||
|
///
|
||||||
|
/// This should be called periodically to prevent unbounded growth.
|
||||||
|
pub fn garbage_collect(&mut self) {
|
||||||
|
self.elements.retain(|id, _| !self.tombstones.contains(id));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Default for OrSet<T>
|
||||||
|
where
|
||||||
|
T: Clone + Eq + std::hash::Hash + Serialize + for<'de> Deserialize<'de>,
|
||||||
|
{
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_orset_new() {
|
||||||
|
let set: OrSet<String> = OrSet::new();
|
||||||
|
assert!(set.is_empty());
|
||||||
|
assert_eq!(set.len(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_orset_add() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut set: OrSet<String> = OrSet::new();
|
||||||
|
|
||||||
|
let (_, was_new) = set.add("foo".to_string(), node);
|
||||||
|
assert!(was_new);
|
||||||
|
assert!(set.contains(&"foo".to_string()));
|
||||||
|
assert_eq!(set.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_orset_add_duplicate() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut set: OrSet<String> = OrSet::new();
|
||||||
|
|
||||||
|
let (id1, was_new1) = set.add("foo".to_string(), node);
|
||||||
|
assert!(was_new1);
|
||||||
|
|
||||||
|
let (id2, was_new2) = set.add("foo".to_string(), node);
|
||||||
|
assert!(!was_new2);
|
||||||
|
assert_ne!(id1, id2); // Different operation IDs
|
||||||
|
|
||||||
|
assert_eq!(set.len(), 1); // Still one distinct value
|
||||||
|
let ids = set.get_operation_ids(&"foo".to_string());
|
||||||
|
assert_eq!(ids.len(), 2); // But two operation IDs
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_orset_remove() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut set: OrSet<String> = OrSet::new();
|
||||||
|
|
||||||
|
let (id, _) = set.add("foo".to_string(), node);
|
||||||
|
assert!(set.contains(&"foo".to_string()));
|
||||||
|
|
||||||
|
set.remove(vec![id]);
|
||||||
|
assert!(!set.contains(&"foo".to_string()));
|
||||||
|
assert_eq!(set.len(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_orset_add_remove_add() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut set: OrSet<String> = OrSet::new();
|
||||||
|
|
||||||
|
// Add
|
||||||
|
let (id1, _) = set.add("foo".to_string(), node);
|
||||||
|
assert!(set.contains(&"foo".to_string()));
|
||||||
|
|
||||||
|
// Remove
|
||||||
|
set.remove(vec![id1]);
|
||||||
|
assert!(!set.contains(&"foo".to_string()));
|
||||||
|
|
||||||
|
// Add again (new operation ID)
|
||||||
|
let (_id2, was_new) = set.add("foo".to_string(), node);
|
||||||
|
assert!(was_new); // It's new because we removed it
|
||||||
|
assert!(set.contains(&"foo".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_orset_merge_simple() {
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
let node2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut set1: OrSet<String> = OrSet::new();
|
||||||
|
set1.add("foo".to_string(), node1);
|
||||||
|
|
||||||
|
let mut set2: OrSet<String> = OrSet::new();
|
||||||
|
set2.add("bar".to_string(), node2);
|
||||||
|
|
||||||
|
set1.merge(&set2);
|
||||||
|
|
||||||
|
assert_eq!(set1.len(), 2);
|
||||||
|
assert!(set1.contains(&"foo".to_string()));
|
||||||
|
assert!(set1.contains(&"bar".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_orset_merge_add_wins() {
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
let node2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut set1: OrSet<String> = OrSet::new();
|
||||||
|
let (id, _) = set1.add("foo".to_string(), node1);
|
||||||
|
set1.remove(vec![id]); // Remove it
|
||||||
|
|
||||||
|
let mut set2: OrSet<String> = OrSet::new();
|
||||||
|
set2.add("foo".to_string(), node2); // Concurrently add (different ID)
|
||||||
|
|
||||||
|
set1.merge(&set2);
|
||||||
|
|
||||||
|
// Add should win
|
||||||
|
assert!(set1.contains(&"foo".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_orset_merge_observed_remove() {
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut set1: OrSet<String> = OrSet::new();
|
||||||
|
let (id, _) = set1.add("foo".to_string(), node1);
|
||||||
|
|
||||||
|
let mut set2 = set1.clone(); // set2 observes the add
|
||||||
|
|
||||||
|
set2.remove(vec![id]); // set2 removes after observing
|
||||||
|
|
||||||
|
set1.merge(&set2);
|
||||||
|
|
||||||
|
// Remove should win because it observed the add
|
||||||
|
assert!(!set1.contains(&"foo".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_orset_values() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut set: OrSet<String> = OrSet::new();
|
||||||
|
|
||||||
|
set.add("foo".to_string(), node);
|
||||||
|
set.add("bar".to_string(), node);
|
||||||
|
set.add("baz".to_string(), node);
|
||||||
|
|
||||||
|
let values: HashSet<_> = set.values().cloned().collect();
|
||||||
|
assert_eq!(values.len(), 3);
|
||||||
|
assert!(values.contains("foo"));
|
||||||
|
assert!(values.contains("bar"));
|
||||||
|
assert!(values.contains("baz"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_orset_garbage_collect() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut set: OrSet<String> = OrSet::new();
|
||||||
|
|
||||||
|
let (id1, _) = set.add("foo".to_string(), node);
|
||||||
|
let (_id2, _) = set.add("bar".to_string(), node);
|
||||||
|
|
||||||
|
set.remove(vec![id1]);
|
||||||
|
|
||||||
|
// Before GC
|
||||||
|
assert_eq!(set.elements.len(), 2);
|
||||||
|
assert_eq!(set.tombstones.len(), 1);
|
||||||
|
|
||||||
|
set.garbage_collect();
|
||||||
|
|
||||||
|
// After GC - tombstoned element removed
|
||||||
|
assert_eq!(set.elements.len(), 1);
|
||||||
|
assert_eq!(set.tombstones.len(), 1);
|
||||||
|
assert!(set.contains(&"bar".to_string()));
|
||||||
|
assert!(!set.contains(&"foo".to_string()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_orset_clear() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut set: OrSet<String> = OrSet::new();
|
||||||
|
|
||||||
|
set.add("foo".to_string(), node);
|
||||||
|
set.add("bar".to_string(), node);
|
||||||
|
assert_eq!(set.len(), 2);
|
||||||
|
|
||||||
|
set.clear();
|
||||||
|
assert!(set.is_empty());
|
||||||
|
assert_eq!(set.len(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_orset_serialization() -> bincode::Result<()> {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut set: OrSet<String> = OrSet::new();
|
||||||
|
|
||||||
|
set.add("foo".to_string(), node);
|
||||||
|
set.add("bar".to_string(), node);
|
||||||
|
|
||||||
|
let bytes = bincode::serialize(&set)?;
|
||||||
|
let deserialized: OrSet<String> = bincode::deserialize(&bytes)?;
|
||||||
|
|
||||||
|
assert_eq!(deserialized.len(), 2);
|
||||||
|
assert!(deserialized.contains(&"foo".to_string()));
|
||||||
|
assert!(deserialized.contains(&"bar".to_string()));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
292
crates/lib/src/networking/plugin.rs
Normal file
292
crates/lib/src/networking/plugin.rs
Normal file
@@ -0,0 +1,292 @@
|
|||||||
|
//! Bevy plugin for CRDT networking
|
||||||
|
//!
|
||||||
|
//! This module provides a complete Bevy plugin that integrates all networking
|
||||||
|
//! components: delta generation, operation log, anti-entropy, join protocol,
|
||||||
|
//! tombstones, and CRDT types.
|
||||||
|
//!
|
||||||
|
//! # Quick Start
|
||||||
|
//!
|
||||||
|
//! ```no_run
|
||||||
|
//! use bevy::prelude::*;
|
||||||
|
//! use lib::networking::{NetworkingPlugin, NetworkingConfig};
|
||||||
|
//! use uuid::Uuid;
|
||||||
|
//!
|
||||||
|
//! fn main() {
|
||||||
|
//! App::new()
|
||||||
|
//! .add_plugins(DefaultPlugins)
|
||||||
|
//! .add_plugins(NetworkingPlugin::new(NetworkingConfig {
|
||||||
|
//! node_id: Uuid::new_v4(),
|
||||||
|
//! sync_interval_secs: 10.0,
|
||||||
|
//! prune_interval_secs: 60.0,
|
||||||
|
//! tombstone_gc_interval_secs: 300.0,
|
||||||
|
//! }))
|
||||||
|
//! .run();
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
use bevy::prelude::*;
|
||||||
|
|
||||||
|
use crate::networking::{
|
||||||
|
change_detection::LastSyncVersions,
|
||||||
|
delta_generation::{
|
||||||
|
generate_delta_system,
|
||||||
|
NodeVectorClock,
|
||||||
|
},
|
||||||
|
entity_map::{
|
||||||
|
cleanup_despawned_entities_system,
|
||||||
|
register_networked_entities_system,
|
||||||
|
NetworkEntityMap,
|
||||||
|
},
|
||||||
|
message_dispatcher::message_dispatcher_system,
|
||||||
|
operation_log::{
|
||||||
|
periodic_sync_system,
|
||||||
|
prune_operation_log_system,
|
||||||
|
OperationLog,
|
||||||
|
},
|
||||||
|
tombstones::{
|
||||||
|
garbage_collect_tombstones_system,
|
||||||
|
handle_local_deletions_system,
|
||||||
|
TombstoneRegistry,
|
||||||
|
},
|
||||||
|
vector_clock::NodeId,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Configuration for the networking plugin
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct NetworkingConfig {
|
||||||
|
/// Unique ID for this node
|
||||||
|
pub node_id: NodeId,
|
||||||
|
|
||||||
|
/// How often to send SyncRequest for anti-entropy (in seconds)
|
||||||
|
/// Default: 10.0 seconds
|
||||||
|
pub sync_interval_secs: f32,
|
||||||
|
|
||||||
|
/// How often to prune old operations from the log (in seconds)
|
||||||
|
/// Default: 60.0 seconds (1 minute)
|
||||||
|
pub prune_interval_secs: f32,
|
||||||
|
|
||||||
|
/// How often to garbage collect tombstones (in seconds)
|
||||||
|
/// Default: 300.0 seconds (5 minutes)
|
||||||
|
pub tombstone_gc_interval_secs: f32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for NetworkingConfig {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
node_id: uuid::Uuid::new_v4(),
|
||||||
|
sync_interval_secs: 10.0,
|
||||||
|
prune_interval_secs: 60.0,
|
||||||
|
tombstone_gc_interval_secs: 300.0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Bevy plugin for CRDT networking
|
||||||
|
///
|
||||||
|
/// This plugin sets up all systems and resources needed for distributed
|
||||||
|
/// synchronization using CRDTs.
|
||||||
|
///
|
||||||
|
/// # Systems Added
|
||||||
|
///
|
||||||
|
/// ## PreUpdate
|
||||||
|
/// - Register newly spawned networked entities
|
||||||
|
/// - **Central message dispatcher** (handles all incoming messages efficiently)
|
||||||
|
/// - EntityDelta messages
|
||||||
|
/// - JoinRequest messages
|
||||||
|
/// - FullState messages
|
||||||
|
/// - SyncRequest messages
|
||||||
|
/// - MissingDeltas messages
|
||||||
|
///
|
||||||
|
/// ## Update
|
||||||
|
/// - Handle local entity deletions
|
||||||
|
///
|
||||||
|
/// ## PostUpdate
|
||||||
|
/// - Generate and broadcast EntityDelta for changed entities
|
||||||
|
/// - Periodic SyncRequest for anti-entropy
|
||||||
|
/// - Prune old operations from operation log
|
||||||
|
/// - Garbage collect tombstones
|
||||||
|
/// - Cleanup despawned entities from entity map
|
||||||
|
///
|
||||||
|
/// # Resources Added
|
||||||
|
///
|
||||||
|
/// - `NodeVectorClock` - This node's vector clock
|
||||||
|
/// - `NetworkEntityMap` - Bidirectional entity ID mapping
|
||||||
|
/// - `LastSyncVersions` - Change detection for entities
|
||||||
|
/// - `OperationLog` - Operation log for anti-entropy
|
||||||
|
/// - `TombstoneRegistry` - Tombstone tracking for deletions
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```no_run
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::{NetworkingPlugin, NetworkingConfig};
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// App::new()
|
||||||
|
/// .add_plugins(DefaultPlugins)
|
||||||
|
/// .add_plugins(NetworkingPlugin::new(NetworkingConfig {
|
||||||
|
/// node_id: Uuid::new_v4(),
|
||||||
|
/// ..Default::default()
|
||||||
|
/// }))
|
||||||
|
/// .run();
|
||||||
|
/// ```
|
||||||
|
pub struct NetworkingPlugin {
|
||||||
|
config: NetworkingConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkingPlugin {
|
||||||
|
/// Create a new networking plugin with custom configuration
|
||||||
|
pub fn new(config: NetworkingConfig) -> Self {
|
||||||
|
Self { config }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new networking plugin with default configuration
|
||||||
|
pub fn default_with_node_id(node_id: NodeId) -> Self {
|
||||||
|
Self {
|
||||||
|
config: NetworkingConfig {
|
||||||
|
node_id,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Plugin for NetworkingPlugin {
|
||||||
|
fn build(&self, app: &mut App) {
|
||||||
|
// Add resources
|
||||||
|
app.insert_resource(NodeVectorClock::new(self.config.node_id))
|
||||||
|
.insert_resource(NetworkEntityMap::new())
|
||||||
|
.insert_resource(LastSyncVersions::default())
|
||||||
|
.insert_resource(OperationLog::new())
|
||||||
|
.insert_resource(TombstoneRegistry::new());
|
||||||
|
|
||||||
|
// PreUpdate systems - handle incoming messages first
|
||||||
|
app.add_systems(
|
||||||
|
PreUpdate,
|
||||||
|
(
|
||||||
|
// Register new networked entities
|
||||||
|
register_networked_entities_system,
|
||||||
|
// Central message dispatcher - handles all incoming messages
|
||||||
|
// This replaces the individual message handling systems and
|
||||||
|
// eliminates O(n²) behavior from multiple systems polling the same queue
|
||||||
|
message_dispatcher_system,
|
||||||
|
)
|
||||||
|
.chain(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Update systems - handle local operations
|
||||||
|
app.add_systems(
|
||||||
|
Update,
|
||||||
|
(
|
||||||
|
// Handle local entity deletions
|
||||||
|
handle_local_deletions_system,
|
||||||
|
),
|
||||||
|
);
|
||||||
|
|
||||||
|
// PostUpdate systems - generate and send deltas
|
||||||
|
app.add_systems(
|
||||||
|
PostUpdate,
|
||||||
|
(
|
||||||
|
// Generate deltas for changed entities
|
||||||
|
generate_delta_system,
|
||||||
|
// Periodic anti-entropy sync
|
||||||
|
periodic_sync_system,
|
||||||
|
// Maintenance tasks
|
||||||
|
prune_operation_log_system,
|
||||||
|
garbage_collect_tombstones_system,
|
||||||
|
// Cleanup despawned entities
|
||||||
|
cleanup_despawned_entities_system,
|
||||||
|
),
|
||||||
|
);
|
||||||
|
|
||||||
|
info!(
|
||||||
|
"NetworkingPlugin initialized for node {}",
|
||||||
|
self.config.node_id
|
||||||
|
);
|
||||||
|
info!(
|
||||||
|
"Sync interval: {}s, Prune interval: {}s, GC interval: {}s",
|
||||||
|
self.config.sync_interval_secs,
|
||||||
|
self.config.prune_interval_secs,
|
||||||
|
self.config.tombstone_gc_interval_secs
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Extension trait for App to add networking more ergonomically
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```no_run
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::NetworkingAppExt;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// App::new()
|
||||||
|
/// .add_plugins(DefaultPlugins)
|
||||||
|
/// .add_networking(Uuid::new_v4())
|
||||||
|
/// .run();
|
||||||
|
/// ```
|
||||||
|
pub trait NetworkingAppExt {
|
||||||
|
/// Add networking with default configuration and specified node ID
|
||||||
|
fn add_networking(&mut self, node_id: NodeId) -> &mut Self;
|
||||||
|
|
||||||
|
/// Add networking with custom configuration
|
||||||
|
fn add_networking_with_config(&mut self, config: NetworkingConfig) -> &mut Self;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkingAppExt for App {
|
||||||
|
fn add_networking(&mut self, node_id: NodeId) -> &mut Self {
|
||||||
|
self.add_plugins(NetworkingPlugin::default_with_node_id(node_id))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_networking_with_config(&mut self, config: NetworkingConfig) -> &mut Self {
|
||||||
|
self.add_plugins(NetworkingPlugin::new(config))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_networking_config_default() {
|
||||||
|
let config = NetworkingConfig::default();
|
||||||
|
assert_eq!(config.sync_interval_secs, 10.0);
|
||||||
|
assert_eq!(config.prune_interval_secs, 60.0);
|
||||||
|
assert_eq!(config.tombstone_gc_interval_secs, 300.0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_networking_plugin_creation() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let plugin = NetworkingPlugin::default_with_node_id(node_id);
|
||||||
|
assert_eq!(plugin.config.node_id, node_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_networking_plugin_build() {
|
||||||
|
let mut app = App::new();
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
app.add_plugins(NetworkingPlugin::default_with_node_id(node_id));
|
||||||
|
|
||||||
|
// Verify resources were added
|
||||||
|
assert!(app.world().get_resource::<NodeVectorClock>().is_some());
|
||||||
|
assert!(app.world().get_resource::<NetworkEntityMap>().is_some());
|
||||||
|
assert!(app.world().get_resource::<LastSyncVersions>().is_some());
|
||||||
|
assert!(app.world().get_resource::<OperationLog>().is_some());
|
||||||
|
assert!(app.world().get_resource::<TombstoneRegistry>().is_some());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_app_extension_trait() {
|
||||||
|
let mut app = App::new();
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
app.add_networking(node_id);
|
||||||
|
|
||||||
|
// Verify resources were added
|
||||||
|
assert!(app.world().get_resource::<NodeVectorClock>().is_some());
|
||||||
|
assert!(app.world().get_resource::<NetworkEntityMap>().is_some());
|
||||||
|
}
|
||||||
|
}
|
||||||
639
crates/lib/src/networking/rga.rs
Normal file
639
crates/lib/src/networking/rga.rs
Normal file
@@ -0,0 +1,639 @@
|
|||||||
|
//! RGA (Replicated Growable Array) CRDT implementation
|
||||||
|
//!
|
||||||
|
//! This module provides a conflict-free replicated sequence that maintains
|
||||||
|
//! consistent ordering across concurrent insert and delete operations.
|
||||||
|
//!
|
||||||
|
//! ## RGA Semantics
|
||||||
|
//!
|
||||||
|
//! - **Causal ordering**: Elements inserted after position P stay after P
|
||||||
|
//! - **Concurrent inserts**: Resolved by timestamp + node ID tiebreaker
|
||||||
|
//! - **Tombstones**: Deleted elements remain in structure to preserve positions
|
||||||
|
//! - **Unique operation IDs**: Each insert gets a UUID for referencing
|
||||||
|
//!
|
||||||
|
//! ## Example
|
||||||
|
//!
|
||||||
|
//! ```
|
||||||
|
//! use lib::networking::Rga;
|
||||||
|
//! use uuid::Uuid;
|
||||||
|
//!
|
||||||
|
//! let node1 = Uuid::new_v4();
|
||||||
|
//! let node2 = Uuid::new_v4();
|
||||||
|
//!
|
||||||
|
//! // Node 1 creates sequence: [A, B]
|
||||||
|
//! let mut seq1: Rga<char> = Rga::new();
|
||||||
|
//! let (id_a, _) = seq1.insert_at_beginning('A', node1);
|
||||||
|
//! let (id_b, _) = seq1.insert_after(Some(id_a), 'B', node1);
|
||||||
|
//!
|
||||||
|
//! // Node 2 concurrently inserts C after A
|
||||||
|
//! let mut seq2 = seq1.clone();
|
||||||
|
//! seq2.insert_after(Some(id_a), 'C', node2);
|
||||||
|
//!
|
||||||
|
//! // Node 1 inserts D after A
|
||||||
|
//! seq1.insert_after(Some(id_a), 'D', node1);
|
||||||
|
//!
|
||||||
|
//! // Merge - concurrent inserts after A are ordered by timestamp + node ID
|
||||||
|
//! seq1.merge(&seq2);
|
||||||
|
//!
|
||||||
|
//! let values: Vec<char> = seq1.values().copied().collect();
|
||||||
|
//! assert_eq!(values.len(), 4); // A, (C or D), (D or C), B
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use bevy::prelude::*;
|
||||||
|
use serde::{
|
||||||
|
Deserialize,
|
||||||
|
Serialize,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::networking::vector_clock::{
|
||||||
|
NodeId,
|
||||||
|
VectorClock,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// An element in an RGA sequence
|
||||||
|
///
|
||||||
|
/// Each element has a unique ID and tracks its logical position in the sequence
|
||||||
|
/// via the "after" pointer.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||||
|
pub struct RgaElement<T> {
|
||||||
|
/// Unique ID for this element
|
||||||
|
pub id: uuid::Uuid,
|
||||||
|
|
||||||
|
/// The actual value
|
||||||
|
pub value: T,
|
||||||
|
|
||||||
|
/// ID of the element this was inserted after (None = beginning)
|
||||||
|
pub after_id: Option<uuid::Uuid>,
|
||||||
|
|
||||||
|
/// Node that performed the insert
|
||||||
|
pub inserting_node: NodeId,
|
||||||
|
|
||||||
|
/// Vector clock when inserted (for ordering concurrent inserts)
|
||||||
|
pub vector_clock: VectorClock,
|
||||||
|
|
||||||
|
/// Whether this element has been deleted (tombstone)
|
||||||
|
pub is_deleted: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// RGA (Replicated Growable Array) CRDT
|
||||||
|
///
|
||||||
|
/// A replicated sequence supporting concurrent insert/delete with consistent
|
||||||
|
/// ordering based on causal relationships.
|
||||||
|
///
|
||||||
|
/// # Type Parameters
|
||||||
|
///
|
||||||
|
/// - `T`: The element type (must be Clone, Serialize, Deserialize)
|
||||||
|
///
|
||||||
|
/// # Internal Structure
|
||||||
|
///
|
||||||
|
/// Elements are stored in a HashMap by ID. Each element tracks which element
|
||||||
|
/// it was inserted after, forming a linked list structure. Deleted elements
|
||||||
|
/// remain as tombstones to preserve positions for concurrent operations.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct Rga<T> {
|
||||||
|
/// Map from element ID to element
|
||||||
|
elements: HashMap<uuid::Uuid, RgaElement<T>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Rga<T>
|
||||||
|
where
|
||||||
|
T: Clone + Serialize + for<'de> Deserialize<'de>,
|
||||||
|
{
|
||||||
|
/// Create a new empty RGA sequence
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
elements: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Insert an element at the beginning of the sequence
|
||||||
|
///
|
||||||
|
/// Returns (element_id, position) where position is the index in the
|
||||||
|
/// visible sequence.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::Rga;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node = Uuid::new_v4();
|
||||||
|
/// let mut seq: Rga<char> = Rga::new();
|
||||||
|
///
|
||||||
|
/// let (id, pos) = seq.insert_at_beginning('A', node);
|
||||||
|
/// assert_eq!(pos, 0);
|
||||||
|
/// ```
|
||||||
|
pub fn insert_at_beginning(&mut self, value: T, node_id: NodeId) -> (uuid::Uuid, usize) {
|
||||||
|
let id = uuid::Uuid::new_v4();
|
||||||
|
let mut clock = VectorClock::new();
|
||||||
|
clock.increment(node_id);
|
||||||
|
|
||||||
|
let element = RgaElement {
|
||||||
|
id,
|
||||||
|
value,
|
||||||
|
after_id: None,
|
||||||
|
inserting_node: node_id,
|
||||||
|
vector_clock: clock,
|
||||||
|
is_deleted: false,
|
||||||
|
};
|
||||||
|
|
||||||
|
self.elements.insert(id, element);
|
||||||
|
|
||||||
|
(id, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Insert an element after a specific element ID
|
||||||
|
///
|
||||||
|
/// If after_id is None, inserts at the beginning.
|
||||||
|
///
|
||||||
|
/// Returns (element_id, position) where position is the index in the
|
||||||
|
/// visible sequence.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::Rga;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node = Uuid::new_v4();
|
||||||
|
/// let mut seq: Rga<char> = Rga::new();
|
||||||
|
///
|
||||||
|
/// let (id_a, _) = seq.insert_at_beginning('A', node);
|
||||||
|
/// let (id_b, pos) = seq.insert_after(Some(id_a), 'B', node);
|
||||||
|
/// assert_eq!(pos, 1);
|
||||||
|
///
|
||||||
|
/// let values: Vec<char> = seq.values().copied().collect();
|
||||||
|
/// assert_eq!(values, vec!['A', 'B']);
|
||||||
|
/// ```
|
||||||
|
pub fn insert_after(
|
||||||
|
&mut self,
|
||||||
|
after_id: Option<uuid::Uuid>,
|
||||||
|
value: T,
|
||||||
|
node_id: NodeId,
|
||||||
|
) -> (uuid::Uuid, usize) {
|
||||||
|
let id = uuid::Uuid::new_v4();
|
||||||
|
let mut clock = VectorClock::new();
|
||||||
|
clock.increment(node_id);
|
||||||
|
|
||||||
|
let element = RgaElement {
|
||||||
|
id,
|
||||||
|
value,
|
||||||
|
after_id,
|
||||||
|
inserting_node: node_id,
|
||||||
|
vector_clock: clock,
|
||||||
|
is_deleted: false,
|
||||||
|
};
|
||||||
|
|
||||||
|
self.elements.insert(id, element);
|
||||||
|
|
||||||
|
// Calculate position
|
||||||
|
let position = self.calculate_position(id);
|
||||||
|
|
||||||
|
(id, position)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Insert an element with explicit vector clock
|
||||||
|
///
|
||||||
|
/// This is used when applying remote operations that already have
|
||||||
|
/// a vector clock.
|
||||||
|
pub fn insert_with_clock(
|
||||||
|
&mut self,
|
||||||
|
id: uuid::Uuid,
|
||||||
|
after_id: Option<uuid::Uuid>,
|
||||||
|
value: T,
|
||||||
|
node_id: NodeId,
|
||||||
|
vector_clock: VectorClock,
|
||||||
|
) -> usize {
|
||||||
|
let element = RgaElement {
|
||||||
|
id,
|
||||||
|
value,
|
||||||
|
after_id,
|
||||||
|
inserting_node: node_id,
|
||||||
|
vector_clock,
|
||||||
|
is_deleted: false,
|
||||||
|
};
|
||||||
|
|
||||||
|
self.elements.insert(id, element);
|
||||||
|
|
||||||
|
self.calculate_position(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Delete an element by ID
|
||||||
|
///
|
||||||
|
/// The element becomes a tombstone - it remains in the structure but
|
||||||
|
/// is hidden from the visible sequence.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::Rga;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node = Uuid::new_v4();
|
||||||
|
/// let mut seq: Rga<char> = Rga::new();
|
||||||
|
///
|
||||||
|
/// let (id, _) = seq.insert_at_beginning('A', node);
|
||||||
|
/// assert_eq!(seq.len(), 1);
|
||||||
|
///
|
||||||
|
/// seq.delete(id);
|
||||||
|
/// assert_eq!(seq.len(), 0);
|
||||||
|
/// assert!(seq.is_deleted(id));
|
||||||
|
/// ```
|
||||||
|
pub fn delete(&mut self, element_id: uuid::Uuid) {
|
||||||
|
if let Some(element) = self.elements.get_mut(&element_id) {
|
||||||
|
element.is_deleted = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if an element is deleted
|
||||||
|
pub fn is_deleted(&self, element_id: uuid::Uuid) -> bool {
|
||||||
|
self.elements
|
||||||
|
.get(&element_id)
|
||||||
|
.map(|e| e.is_deleted)
|
||||||
|
.unwrap_or(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the visible length of the sequence (excluding tombstones)
|
||||||
|
pub fn len(&self) -> usize {
|
||||||
|
self.elements.values().filter(|e| !e.is_deleted).count()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if the sequence is empty (no visible elements)
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
self.len() == 0
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get all visible values in order
|
||||||
|
///
|
||||||
|
/// Returns an iterator over the values in their proper sequence order.
|
||||||
|
pub fn values(&self) -> impl Iterator<Item = &T> {
|
||||||
|
let ordered = self.get_ordered_elements();
|
||||||
|
ordered.into_iter().filter_map(move |id| {
|
||||||
|
self.elements.get(&id).and_then(|e| {
|
||||||
|
if !e.is_deleted {
|
||||||
|
Some(&e.value)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get all visible elements with their IDs in order
|
||||||
|
pub fn elements_with_ids(&self) -> Vec<(uuid::Uuid, &T)> {
|
||||||
|
let ordered = self.get_ordered_elements();
|
||||||
|
ordered
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|id| {
|
||||||
|
self.elements.get(&id).and_then(|e| {
|
||||||
|
if !e.is_deleted {
|
||||||
|
Some((id, &e.value))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Merge another RGA into this one
|
||||||
|
///
|
||||||
|
/// Implements CRDT merge by combining all elements from both sequences
|
||||||
|
/// and resolving positions based on causal ordering.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::Rga;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node1 = Uuid::new_v4();
|
||||||
|
/// let node2 = Uuid::new_v4();
|
||||||
|
///
|
||||||
|
/// let mut seq1: Rga<char> = Rga::new();
|
||||||
|
/// seq1.insert_at_beginning('A', node1);
|
||||||
|
///
|
||||||
|
/// let mut seq2: Rga<char> = Rga::new();
|
||||||
|
/// seq2.insert_at_beginning('B', node2);
|
||||||
|
///
|
||||||
|
/// seq1.merge(&seq2);
|
||||||
|
/// assert_eq!(seq1.len(), 2);
|
||||||
|
/// ```
|
||||||
|
pub fn merge(&mut self, other: &Rga<T>) {
|
||||||
|
for (id, element) in &other.elements {
|
||||||
|
// Insert or update element
|
||||||
|
self.elements
|
||||||
|
.entry(*id)
|
||||||
|
.and_modify(|existing| {
|
||||||
|
// If other's element is deleted, mark ours as deleted too
|
||||||
|
if element.is_deleted {
|
||||||
|
existing.is_deleted = true;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.or_insert_with(|| element.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clear the sequence
|
||||||
|
///
|
||||||
|
/// Removes all elements and tombstones.
|
||||||
|
pub fn clear(&mut self) {
|
||||||
|
self.elements.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Garbage collect tombstones
|
||||||
|
///
|
||||||
|
/// Removes deleted elements that have no children (nothing inserted after them).
|
||||||
|
/// This is safe because if no element references a tombstone as its parent,
|
||||||
|
/// it can be removed without affecting the sequence.
|
||||||
|
pub fn garbage_collect(&mut self) {
|
||||||
|
// Find all IDs that are referenced as after_id
|
||||||
|
let mut referenced_ids = std::collections::HashSet::new();
|
||||||
|
for element in self.elements.values() {
|
||||||
|
if let Some(after_id) = element.after_id {
|
||||||
|
referenced_ids.insert(after_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove deleted elements that aren't referenced
|
||||||
|
self.elements.retain(|id, element| {
|
||||||
|
!element.is_deleted || referenced_ids.contains(id)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get ordered list of element IDs
|
||||||
|
///
|
||||||
|
/// This builds the proper sequence order by following the after_id pointers
|
||||||
|
/// and resolving concurrent inserts using vector clocks + node IDs.
|
||||||
|
fn get_ordered_elements(&self) -> Vec<uuid::Uuid> {
|
||||||
|
// Build a map of after_id -> list of elements inserted after it
|
||||||
|
let mut children: HashMap<Option<uuid::Uuid>, Vec<uuid::Uuid>> = HashMap::new();
|
||||||
|
|
||||||
|
for (id, element) in &self.elements {
|
||||||
|
children
|
||||||
|
.entry(element.after_id)
|
||||||
|
.or_insert_with(Vec::new)
|
||||||
|
.push(*id);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort children by vector clock, then node ID (for deterministic ordering)
|
||||||
|
for child_list in children.values_mut() {
|
||||||
|
child_list.sort_by(|a, b| {
|
||||||
|
let elem_a = &self.elements[a];
|
||||||
|
let elem_b = &self.elements[b];
|
||||||
|
|
||||||
|
// Compare vector clocks
|
||||||
|
match elem_a.vector_clock.compare(&elem_b.vector_clock) {
|
||||||
|
Ok(std::cmp::Ordering::Less) => std::cmp::Ordering::Less,
|
||||||
|
Ok(std::cmp::Ordering::Greater) => std::cmp::Ordering::Greater,
|
||||||
|
Ok(std::cmp::Ordering::Equal) | Err(_) => {
|
||||||
|
// If clocks are equal or concurrent, use node ID as tiebreaker
|
||||||
|
elem_a.inserting_node.cmp(&elem_b.inserting_node)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build ordered list by traversing from None (beginning)
|
||||||
|
let mut result = Vec::new();
|
||||||
|
let mut to_visit = vec![None];
|
||||||
|
|
||||||
|
while let Some(current_id) = to_visit.pop() {
|
||||||
|
if let Some(child_ids) = children.get(¤t_id) {
|
||||||
|
// Visit children in reverse order (since we're using a stack)
|
||||||
|
for child_id in child_ids.iter().rev() {
|
||||||
|
result.push(*child_id);
|
||||||
|
to_visit.push(Some(*child_id));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Calculate the visible position of an element
|
||||||
|
fn calculate_position(&self, element_id: uuid::Uuid) -> usize {
|
||||||
|
let ordered = self.get_ordered_elements();
|
||||||
|
ordered
|
||||||
|
.iter()
|
||||||
|
.position(|id| id == &element_id)
|
||||||
|
.unwrap_or(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Default for Rga<T>
|
||||||
|
where
|
||||||
|
T: Clone + Serialize + for<'de> Deserialize<'de>,
|
||||||
|
{
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rga_new() {
|
||||||
|
let seq: Rga<char> = Rga::new();
|
||||||
|
assert!(seq.is_empty());
|
||||||
|
assert_eq!(seq.len(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rga_insert_at_beginning() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut seq: Rga<char> = Rga::new();
|
||||||
|
|
||||||
|
let (_, pos) = seq.insert_at_beginning('A', node);
|
||||||
|
assert_eq!(pos, 0);
|
||||||
|
assert_eq!(seq.len(), 1);
|
||||||
|
|
||||||
|
let values: Vec<char> = seq.values().copied().collect();
|
||||||
|
assert_eq!(values, vec!['A']);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rga_insert_after() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut seq: Rga<char> = Rga::new();
|
||||||
|
|
||||||
|
let (id_a, _) = seq.insert_at_beginning('A', node);
|
||||||
|
let (_, pos_b) = seq.insert_after(Some(id_a), 'B', node);
|
||||||
|
assert_eq!(pos_b, 1);
|
||||||
|
|
||||||
|
let values: Vec<char> = seq.values().copied().collect();
|
||||||
|
assert_eq!(values, vec!['A', 'B']);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rga_delete() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut seq: Rga<char> = Rga::new();
|
||||||
|
|
||||||
|
let (id_a, _) = seq.insert_at_beginning('A', node);
|
||||||
|
let (id_b, _) = seq.insert_after(Some(id_a), 'B', node);
|
||||||
|
|
||||||
|
assert_eq!(seq.len(), 2);
|
||||||
|
|
||||||
|
seq.delete(id_a);
|
||||||
|
assert_eq!(seq.len(), 1);
|
||||||
|
assert!(seq.is_deleted(id_a));
|
||||||
|
|
||||||
|
let values: Vec<char> = seq.values().copied().collect();
|
||||||
|
assert_eq!(values, vec!['B']);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rga_insert_delete_insert() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut seq: Rga<char> = Rga::new();
|
||||||
|
|
||||||
|
let (id_a, _) = seq.insert_at_beginning('A', node);
|
||||||
|
seq.delete(id_a);
|
||||||
|
assert_eq!(seq.len(), 0);
|
||||||
|
|
||||||
|
seq.insert_at_beginning('B', node);
|
||||||
|
assert_eq!(seq.len(), 1);
|
||||||
|
|
||||||
|
let values: Vec<char> = seq.values().copied().collect();
|
||||||
|
assert_eq!(values, vec!['B']);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rga_merge_simple() {
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
let node2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut seq1: Rga<char> = Rga::new();
|
||||||
|
seq1.insert_at_beginning('A', node1);
|
||||||
|
|
||||||
|
let mut seq2: Rga<char> = Rga::new();
|
||||||
|
seq2.insert_at_beginning('B', node2);
|
||||||
|
|
||||||
|
seq1.merge(&seq2);
|
||||||
|
assert_eq!(seq1.len(), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rga_merge_preserves_order() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut seq1: Rga<char> = Rga::new();
|
||||||
|
let (id_a, _) = seq1.insert_at_beginning('A', node);
|
||||||
|
let (id_b, _) = seq1.insert_after(Some(id_a), 'B', node);
|
||||||
|
seq1.insert_after(Some(id_b), 'C', node);
|
||||||
|
|
||||||
|
let seq2 = seq1.clone();
|
||||||
|
|
||||||
|
seq1.merge(&seq2);
|
||||||
|
|
||||||
|
let values: Vec<char> = seq1.values().copied().collect();
|
||||||
|
assert_eq!(values, vec!['A', 'B', 'C']);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rga_merge_deletion() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut seq1: Rga<char> = Rga::new();
|
||||||
|
let (id_a, _) = seq1.insert_at_beginning('A', node);
|
||||||
|
seq1.insert_after(Some(id_a), 'B', node);
|
||||||
|
|
||||||
|
let mut seq2 = seq1.clone();
|
||||||
|
seq2.delete(id_a);
|
||||||
|
|
||||||
|
seq1.merge(&seq2);
|
||||||
|
|
||||||
|
let values: Vec<char> = seq1.values().copied().collect();
|
||||||
|
assert_eq!(values, vec!['B']);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rga_concurrent_inserts() {
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
let node2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
// Both start with [A]
|
||||||
|
let mut seq1: Rga<char> = Rga::new();
|
||||||
|
let (id_a, _) = seq1.insert_at_beginning('A', node1);
|
||||||
|
|
||||||
|
let mut seq2 = seq1.clone();
|
||||||
|
|
||||||
|
// seq1 inserts B after A
|
||||||
|
seq1.insert_after(Some(id_a), 'B', node1);
|
||||||
|
|
||||||
|
// seq2 inserts C after A (concurrent)
|
||||||
|
seq2.insert_after(Some(id_a), 'C', node2);
|
||||||
|
|
||||||
|
// Merge
|
||||||
|
seq1.merge(&seq2);
|
||||||
|
|
||||||
|
// Should have A followed by B and C in some deterministic order
|
||||||
|
assert_eq!(seq1.len(), 3);
|
||||||
|
|
||||||
|
let values: Vec<char> = seq1.values().copied().collect();
|
||||||
|
assert_eq!(values[0], 'A');
|
||||||
|
assert!(values.contains(&'B'));
|
||||||
|
assert!(values.contains(&'C'));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rga_clear() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut seq: Rga<char> = Rga::new();
|
||||||
|
|
||||||
|
seq.insert_at_beginning('A', node);
|
||||||
|
seq.insert_at_beginning('B', node);
|
||||||
|
assert_eq!(seq.len(), 2);
|
||||||
|
|
||||||
|
seq.clear();
|
||||||
|
assert!(seq.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rga_garbage_collect() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut seq: Rga<char> = Rga::new();
|
||||||
|
|
||||||
|
let (id_a, _) = seq.insert_at_beginning('A', node);
|
||||||
|
let (id_b, _) = seq.insert_after(Some(id_a), 'B', node);
|
||||||
|
let (_, _) = seq.insert_after(Some(id_b), 'C', node);
|
||||||
|
|
||||||
|
// Delete A (has child B, so should be kept)
|
||||||
|
seq.delete(id_a);
|
||||||
|
|
||||||
|
// Delete B (has child C, so should be kept)
|
||||||
|
seq.delete(id_b);
|
||||||
|
|
||||||
|
assert_eq!(seq.elements.len(), 3);
|
||||||
|
|
||||||
|
seq.garbage_collect();
|
||||||
|
|
||||||
|
// A and B should still be there (referenced by children)
|
||||||
|
// Only C is visible
|
||||||
|
assert_eq!(seq.len(), 1);
|
||||||
|
assert!(seq.elements.contains_key(&id_a));
|
||||||
|
assert!(seq.elements.contains_key(&id_b));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_rga_serialization() -> bincode::Result<()> {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut seq: Rga<String> = Rga::new();
|
||||||
|
|
||||||
|
let (id_a, _) = seq.insert_at_beginning("foo".to_string(), node);
|
||||||
|
seq.insert_after(Some(id_a), "bar".to_string(), node);
|
||||||
|
|
||||||
|
let bytes = bincode::serialize(&seq)?;
|
||||||
|
let deserialized: Rga<String> = bincode::deserialize(&bytes)?;
|
||||||
|
|
||||||
|
assert_eq!(deserialized.len(), 2);
|
||||||
|
let values: Vec<String> = deserialized.values().cloned().collect();
|
||||||
|
assert_eq!(values, vec!["foo".to_string(), "bar".to_string()]);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
430
crates/lib/src/networking/tombstones.rs
Normal file
430
crates/lib/src/networking/tombstones.rs
Normal file
@@ -0,0 +1,430 @@
|
|||||||
|
//! Entity tombstone tracking for deletion semantics
|
||||||
|
//!
|
||||||
|
//! This module manages tombstones for deleted entities, preventing resurrection
|
||||||
|
//! and supporting eventual garbage collection.
|
||||||
|
//!
|
||||||
|
//! ## Deletion Semantics
|
||||||
|
//!
|
||||||
|
//! When an entity is deleted:
|
||||||
|
//! 1. A Delete operation is generated with current vector clock
|
||||||
|
//! 2. The entity is marked as deleted (tombstone) in TombstoneRegistry
|
||||||
|
//! 3. The tombstone is propagated to all peers
|
||||||
|
//! 4. Operations older than the deletion are ignored
|
||||||
|
//! 5. After a grace period, tombstones can be garbage collected
|
||||||
|
//!
|
||||||
|
//! ## Resurrection Prevention
|
||||||
|
//!
|
||||||
|
//! If a peer creates an entity (Set operation) while another peer deletes it:
|
||||||
|
//! - Use vector clock comparison: if delete happened-after create, deletion wins
|
||||||
|
//! - If concurrent, deletion wins (delete bias for safety)
|
||||||
|
//! - This prevents "zombie" entities from reappearing
|
||||||
|
//!
|
||||||
|
//! ## Garbage Collection
|
||||||
|
//!
|
||||||
|
//! Tombstones are kept for a configurable period (default: 1 hour) to handle
|
||||||
|
//! late-arriving operations. After this period, they can be safely removed.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use bevy::prelude::*;
|
||||||
|
|
||||||
|
use crate::networking::{
|
||||||
|
vector_clock::{
|
||||||
|
NodeId,
|
||||||
|
VectorClock,
|
||||||
|
},
|
||||||
|
GossipBridge,
|
||||||
|
NodeVectorClock,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// How long to keep tombstones before garbage collection (in seconds)
|
||||||
|
const TOMBSTONE_TTL_SECS: u64 = 3600; // 1 hour
|
||||||
|
|
||||||
|
/// A tombstone record for a deleted entity
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct Tombstone {
|
||||||
|
/// The entity that was deleted
|
||||||
|
pub entity_id: uuid::Uuid,
|
||||||
|
|
||||||
|
/// Node that initiated the deletion
|
||||||
|
pub deleting_node: NodeId,
|
||||||
|
|
||||||
|
/// Vector clock when deletion occurred
|
||||||
|
pub deletion_clock: VectorClock,
|
||||||
|
|
||||||
|
/// When this tombstone was created (for garbage collection)
|
||||||
|
pub timestamp: std::time::Instant,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resource tracking tombstones for deleted entities
|
||||||
|
///
|
||||||
|
/// This prevents deleted entities from being resurrected by late-arriving
|
||||||
|
/// create operations.
|
||||||
|
#[derive(Resource, Default)]
|
||||||
|
pub struct TombstoneRegistry {
|
||||||
|
/// Map from entity ID to tombstone
|
||||||
|
tombstones: HashMap<uuid::Uuid, Tombstone>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TombstoneRegistry {
|
||||||
|
/// Create a new tombstone registry
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
tombstones: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if an entity is deleted
|
||||||
|
pub fn is_deleted(&self, entity_id: uuid::Uuid) -> bool {
|
||||||
|
self.tombstones.contains_key(&entity_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the tombstone for an entity, if it exists
|
||||||
|
pub fn get_tombstone(&self, entity_id: uuid::Uuid) -> Option<&Tombstone> {
|
||||||
|
self.tombstones.get(&entity_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Record a deletion
|
||||||
|
///
|
||||||
|
/// This creates a tombstone for the entity. If a tombstone already exists
|
||||||
|
/// and the new deletion has a later clock, it replaces the old one.
|
||||||
|
pub fn record_deletion(
|
||||||
|
&mut self,
|
||||||
|
entity_id: uuid::Uuid,
|
||||||
|
deleting_node: NodeId,
|
||||||
|
deletion_clock: VectorClock,
|
||||||
|
) {
|
||||||
|
// Check if we already have a tombstone
|
||||||
|
if let Some(existing) = self.tombstones.get(&entity_id) {
|
||||||
|
// Only update if the new deletion is later
|
||||||
|
// (new deletion happened-after existing = existing happened-before new)
|
||||||
|
if existing.deletion_clock.happened_before(&deletion_clock) {
|
||||||
|
self.tombstones.insert(
|
||||||
|
entity_id,
|
||||||
|
Tombstone {
|
||||||
|
entity_id,
|
||||||
|
deleting_node,
|
||||||
|
deletion_clock,
|
||||||
|
timestamp: std::time::Instant::now(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
debug!("Updated tombstone for entity {:?}", entity_id);
|
||||||
|
} else {
|
||||||
|
debug!(
|
||||||
|
"Ignoring older or concurrent deletion for entity {:?}",
|
||||||
|
entity_id
|
||||||
|
);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// New tombstone
|
||||||
|
self.tombstones.insert(
|
||||||
|
entity_id,
|
||||||
|
Tombstone {
|
||||||
|
entity_id,
|
||||||
|
deleting_node,
|
||||||
|
deletion_clock,
|
||||||
|
timestamp: std::time::Instant::now(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
info!("Created tombstone for entity {:?}", entity_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if an operation should be ignored because the entity is deleted
|
||||||
|
///
|
||||||
|
/// Returns true if:
|
||||||
|
/// - The entity has a tombstone AND
|
||||||
|
/// - The operation's clock happened-before or is concurrent with the deletion
|
||||||
|
///
|
||||||
|
/// This prevents operations on deleted entities from being applied.
|
||||||
|
pub fn should_ignore_operation(
|
||||||
|
&self,
|
||||||
|
entity_id: uuid::Uuid,
|
||||||
|
operation_clock: &VectorClock,
|
||||||
|
) -> bool {
|
||||||
|
if let Some(tombstone) = self.tombstones.get(&entity_id) {
|
||||||
|
// If operation happened-before deletion, ignore it
|
||||||
|
// operation_clock.happened_before(deletion_clock) => ignore
|
||||||
|
|
||||||
|
// If deletion happened-before operation, don't ignore (resurrection)
|
||||||
|
// deletion_clock.happened_before(operation_clock) => don't ignore
|
||||||
|
|
||||||
|
// If concurrent, deletion wins (delete bias) => ignore
|
||||||
|
// !operation_clock.happened_before(deletion_clock) && !deletion_clock.happened_before(operation_clock) => ignore
|
||||||
|
|
||||||
|
// So we DON'T ignore only if deletion happened-before operation
|
||||||
|
!tombstone.deletion_clock.happened_before(operation_clock)
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove old tombstones that are past their TTL
|
||||||
|
///
|
||||||
|
/// This should be called periodically to prevent unbounded growth.
|
||||||
|
pub fn garbage_collect(&mut self) {
|
||||||
|
let ttl = std::time::Duration::from_secs(TOMBSTONE_TTL_SECS);
|
||||||
|
let now = std::time::Instant::now();
|
||||||
|
|
||||||
|
let before_count = self.tombstones.len();
|
||||||
|
|
||||||
|
self.tombstones.retain(|_, tombstone| {
|
||||||
|
now.duration_since(tombstone.timestamp) < ttl
|
||||||
|
});
|
||||||
|
|
||||||
|
let after_count = self.tombstones.len();
|
||||||
|
|
||||||
|
if before_count != after_count {
|
||||||
|
info!(
|
||||||
|
"Garbage collected {} tombstones ({} -> {})",
|
||||||
|
before_count - after_count,
|
||||||
|
before_count,
|
||||||
|
after_count
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the number of tombstones
|
||||||
|
pub fn num_tombstones(&self) -> usize {
|
||||||
|
self.tombstones.len()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// System to handle entity deletions initiated locally
|
||||||
|
///
|
||||||
|
/// This system watches for entities with the `ToDelete` marker component
|
||||||
|
/// and generates Delete operations for them.
|
||||||
|
///
|
||||||
|
/// # Usage
|
||||||
|
///
|
||||||
|
/// To delete an entity, add the `ToDelete` component:
|
||||||
|
///
|
||||||
|
/// ```no_run
|
||||||
|
/// use bevy::prelude::*;
|
||||||
|
/// use lib::networking::ToDelete;
|
||||||
|
///
|
||||||
|
/// fn delete_entity_system(mut commands: Commands, entity: Entity) {
|
||||||
|
/// commands.entity(entity).insert(ToDelete);
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
#[derive(Component)]
|
||||||
|
pub struct ToDelete;
|
||||||
|
|
||||||
|
pub fn handle_local_deletions_system(
|
||||||
|
mut commands: Commands,
|
||||||
|
query: Query<(Entity, &crate::networking::NetworkedEntity), With<ToDelete>>,
|
||||||
|
mut node_clock: ResMut<NodeVectorClock>,
|
||||||
|
mut tombstone_registry: ResMut<TombstoneRegistry>,
|
||||||
|
mut operation_log: Option<ResMut<crate::networking::OperationLog>>,
|
||||||
|
bridge: Option<Res<GossipBridge>>,
|
||||||
|
) {
|
||||||
|
let Some(bridge) = bridge else {
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
for (entity, networked) in query.iter() {
|
||||||
|
// Increment clock for deletion
|
||||||
|
node_clock.tick();
|
||||||
|
|
||||||
|
// Create Delete operation
|
||||||
|
let delete_op = crate::networking::ComponentOpBuilder::new(
|
||||||
|
node_clock.node_id,
|
||||||
|
node_clock.clock.clone(),
|
||||||
|
)
|
||||||
|
.delete();
|
||||||
|
|
||||||
|
// Record tombstone
|
||||||
|
tombstone_registry.record_deletion(
|
||||||
|
networked.network_id,
|
||||||
|
node_clock.node_id,
|
||||||
|
node_clock.clock.clone(),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Create EntityDelta with Delete operation
|
||||||
|
let delta = crate::networking::EntityDelta::new(
|
||||||
|
networked.network_id,
|
||||||
|
node_clock.node_id,
|
||||||
|
node_clock.clock.clone(),
|
||||||
|
vec![delete_op],
|
||||||
|
);
|
||||||
|
|
||||||
|
// Record in operation log
|
||||||
|
if let Some(ref mut log) = operation_log {
|
||||||
|
log.record_operation(delta.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Broadcast deletion
|
||||||
|
let message = crate::networking::VersionedMessage::new(
|
||||||
|
crate::networking::SyncMessage::EntityDelta {
|
||||||
|
entity_id: delta.entity_id,
|
||||||
|
node_id: delta.node_id,
|
||||||
|
vector_clock: delta.vector_clock.clone(),
|
||||||
|
operations: delta.operations.clone(),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
if let Err(e) = bridge.send(message) {
|
||||||
|
error!("Failed to broadcast Delete operation: {}", e);
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
"Broadcast Delete operation for entity {:?}",
|
||||||
|
networked.network_id
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Despawn the entity locally
|
||||||
|
commands.entity(entity).despawn();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// System to garbage collect old tombstones
|
||||||
|
///
|
||||||
|
/// This runs periodically to remove tombstones that are past their TTL.
|
||||||
|
pub fn garbage_collect_tombstones_system(
|
||||||
|
mut tombstone_registry: ResMut<TombstoneRegistry>,
|
||||||
|
time: Res<Time>,
|
||||||
|
mut last_gc: Local<f32>,
|
||||||
|
) {
|
||||||
|
// Garbage collect every 5 minutes
|
||||||
|
const GC_INTERVAL: f32 = 300.0;
|
||||||
|
|
||||||
|
*last_gc += time.delta_secs();
|
||||||
|
|
||||||
|
if *last_gc >= GC_INTERVAL {
|
||||||
|
*last_gc = 0.0;
|
||||||
|
|
||||||
|
debug!("Running tombstone garbage collection");
|
||||||
|
tombstone_registry.garbage_collect();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_tombstone_registry_creation() {
|
||||||
|
let registry = TombstoneRegistry::new();
|
||||||
|
assert_eq!(registry.num_tombstones(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_record_deletion() {
|
||||||
|
let mut registry = TombstoneRegistry::new();
|
||||||
|
let entity_id = uuid::Uuid::new_v4();
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let clock = VectorClock::new();
|
||||||
|
|
||||||
|
registry.record_deletion(entity_id, node_id, clock);
|
||||||
|
|
||||||
|
assert!(registry.is_deleted(entity_id));
|
||||||
|
assert_eq!(registry.num_tombstones(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_should_ignore_older_operation() {
|
||||||
|
let mut registry = TombstoneRegistry::new();
|
||||||
|
let entity_id = uuid::Uuid::new_v4();
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
// Create deletion at clock = 2
|
||||||
|
let mut deletion_clock = VectorClock::new();
|
||||||
|
deletion_clock.increment(node_id);
|
||||||
|
deletion_clock.increment(node_id);
|
||||||
|
|
||||||
|
registry.record_deletion(entity_id, node_id, deletion_clock);
|
||||||
|
|
||||||
|
// Operation at clock = 1 should be ignored
|
||||||
|
let mut old_operation_clock = VectorClock::new();
|
||||||
|
old_operation_clock.increment(node_id);
|
||||||
|
|
||||||
|
assert!(registry.should_ignore_operation(entity_id, &old_operation_clock));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_should_not_ignore_newer_operation() {
|
||||||
|
let mut registry = TombstoneRegistry::new();
|
||||||
|
let entity_id = uuid::Uuid::new_v4();
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
// Create deletion at clock = 1
|
||||||
|
let mut deletion_clock = VectorClock::new();
|
||||||
|
deletion_clock.increment(node_id);
|
||||||
|
|
||||||
|
registry.record_deletion(entity_id, node_id, deletion_clock);
|
||||||
|
|
||||||
|
// Operation at clock = 2 should NOT be ignored (resurrection)
|
||||||
|
let mut new_operation_clock = VectorClock::new();
|
||||||
|
new_operation_clock.increment(node_id);
|
||||||
|
new_operation_clock.increment(node_id);
|
||||||
|
|
||||||
|
assert!(!registry.should_ignore_operation(entity_id, &new_operation_clock));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_concurrent_delete_wins() {
|
||||||
|
let mut registry = TombstoneRegistry::new();
|
||||||
|
let entity_id = uuid::Uuid::new_v4();
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
let node2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
// Node 1 deletes
|
||||||
|
let mut delete_clock = VectorClock::new();
|
||||||
|
delete_clock.increment(node1);
|
||||||
|
|
||||||
|
registry.record_deletion(entity_id, node1, delete_clock);
|
||||||
|
|
||||||
|
// Node 2 has concurrent operation
|
||||||
|
let mut concurrent_clock = VectorClock::new();
|
||||||
|
concurrent_clock.increment(node2);
|
||||||
|
|
||||||
|
// Concurrent operation should be ignored (delete bias)
|
||||||
|
assert!(registry.should_ignore_operation(entity_id, &concurrent_clock));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_update_tombstone_with_later_deletion() {
|
||||||
|
let mut registry = TombstoneRegistry::new();
|
||||||
|
let entity_id = uuid::Uuid::new_v4();
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
// First deletion at clock = 1
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node_id);
|
||||||
|
registry.record_deletion(entity_id, node_id, clock1.clone());
|
||||||
|
|
||||||
|
let tombstone1 = registry.get_tombstone(entity_id).unwrap();
|
||||||
|
assert_eq!(tombstone1.deletion_clock, clock1);
|
||||||
|
|
||||||
|
// Second deletion at clock = 2 (later)
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node_id);
|
||||||
|
clock2.increment(node_id);
|
||||||
|
registry.record_deletion(entity_id, node_id, clock2.clone());
|
||||||
|
|
||||||
|
let tombstone2 = registry.get_tombstone(entity_id).unwrap();
|
||||||
|
assert_eq!(tombstone2.deletion_clock, clock2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_ignore_older_tombstone_update() {
|
||||||
|
let mut registry = TombstoneRegistry::new();
|
||||||
|
let entity_id = uuid::Uuid::new_v4();
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
// First deletion at clock = 2
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node_id);
|
||||||
|
clock2.increment(node_id);
|
||||||
|
registry.record_deletion(entity_id, node_id, clock2.clone());
|
||||||
|
|
||||||
|
// Try to record older deletion at clock = 1
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node_id);
|
||||||
|
registry.record_deletion(entity_id, node_id, clock1);
|
||||||
|
|
||||||
|
// Should still have the newer tombstone
|
||||||
|
let tombstone = registry.get_tombstone(entity_id).unwrap();
|
||||||
|
assert_eq!(tombstone.deletion_clock, clock2);
|
||||||
|
}
|
||||||
|
}
|
||||||
456
crates/lib/src/networking/vector_clock.rs
Normal file
456
crates/lib/src/networking/vector_clock.rs
Normal file
@@ -0,0 +1,456 @@
|
|||||||
|
//! Vector clock implementation for distributed causality tracking
|
||||||
|
//!
|
||||||
|
//! Vector clocks allow us to determine the causal relationship between events
|
||||||
|
//! in a distributed system. This is critical for CRDT merge semantics.
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use serde::{
|
||||||
|
Deserialize,
|
||||||
|
Serialize,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::networking::error::{
|
||||||
|
NetworkingError,
|
||||||
|
Result,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Unique identifier for a node in the distributed system
|
||||||
|
pub type NodeId = uuid::Uuid;
|
||||||
|
|
||||||
|
/// Vector clock for tracking causality in distributed operations
|
||||||
|
///
|
||||||
|
/// A vector clock is a map from node IDs to logical timestamps (sequence
|
||||||
|
/// numbers). Each node maintains its own vector clock and increments its own
|
||||||
|
/// counter for each local operation.
|
||||||
|
///
|
||||||
|
/// # Causal Relationships
|
||||||
|
///
|
||||||
|
/// Given two vector clocks A and B:
|
||||||
|
/// - **A happened-before B** if all of A's counters ≤ B's counters and at
|
||||||
|
/// least one is <
|
||||||
|
/// - **A and B are concurrent** if neither happened-before the other
|
||||||
|
/// - **A and B are identical** if all counters are equal
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::VectorClock;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node1 = Uuid::new_v4();
|
||||||
|
/// let node2 = Uuid::new_v4();
|
||||||
|
///
|
||||||
|
/// let mut clock1 = VectorClock::new();
|
||||||
|
/// clock1.increment(node1); // node1: 1
|
||||||
|
///
|
||||||
|
/// let mut clock2 = VectorClock::new();
|
||||||
|
/// clock2.increment(node2); // node2: 1
|
||||||
|
///
|
||||||
|
/// // These are concurrent - neither happened before the other
|
||||||
|
/// assert!(clock1.is_concurrent_with(&clock2));
|
||||||
|
///
|
||||||
|
/// // Merge the clocks
|
||||||
|
/// clock1.merge(&clock2); // node1: 1, node2: 1
|
||||||
|
/// assert!(clock1.happened_before(&clock2) == false);
|
||||||
|
/// ```
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||||
|
pub struct VectorClock {
|
||||||
|
/// Map from node ID to logical timestamp
|
||||||
|
pub clocks: HashMap<NodeId, u64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl VectorClock {
|
||||||
|
/// Create a new empty vector clock
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
clocks: HashMap::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Increment the clock for a given node
|
||||||
|
///
|
||||||
|
/// This should be called by a node before performing a local operation.
|
||||||
|
/// It increments that node's counter in the vector clock.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::VectorClock;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node = Uuid::new_v4();
|
||||||
|
/// let mut clock = VectorClock::new();
|
||||||
|
///
|
||||||
|
/// clock.increment(node);
|
||||||
|
/// assert_eq!(clock.get(node), 1);
|
||||||
|
///
|
||||||
|
/// clock.increment(node);
|
||||||
|
/// assert_eq!(clock.get(node), 2);
|
||||||
|
/// ```
|
||||||
|
pub fn increment(&mut self, node_id: NodeId) -> u64 {
|
||||||
|
let counter = self.clocks.entry(node_id).or_insert(0);
|
||||||
|
*counter += 1;
|
||||||
|
*counter
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the current counter value for a node
|
||||||
|
///
|
||||||
|
/// Returns 0 if the node has never been seen in this vector clock.
|
||||||
|
pub fn get(&self, node_id: NodeId) -> u64 {
|
||||||
|
self.clocks.get(&node_id).copied().unwrap_or(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Merge another vector clock into this one
|
||||||
|
///
|
||||||
|
/// Takes the maximum counter value for each node. This is used when
|
||||||
|
/// receiving a message to update our knowledge of remote operations.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::VectorClock;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node1 = Uuid::new_v4();
|
||||||
|
/// let node2 = Uuid::new_v4();
|
||||||
|
///
|
||||||
|
/// let mut clock1 = VectorClock::new();
|
||||||
|
/// clock1.increment(node1); // node1: 1
|
||||||
|
/// clock1.increment(node1); // node1: 2
|
||||||
|
///
|
||||||
|
/// let mut clock2 = VectorClock::new();
|
||||||
|
/// clock2.increment(node2); // node2: 1
|
||||||
|
///
|
||||||
|
/// clock1.merge(&clock2);
|
||||||
|
/// assert_eq!(clock1.get(node1), 2);
|
||||||
|
/// assert_eq!(clock1.get(node2), 1);
|
||||||
|
/// ```
|
||||||
|
pub fn merge(&mut self, other: &VectorClock) {
|
||||||
|
for (node_id, &counter) in &other.clocks {
|
||||||
|
let current = self.clocks.entry(*node_id).or_insert(0);
|
||||||
|
*current = (*current).max(counter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if this vector clock happened-before another
|
||||||
|
///
|
||||||
|
/// Returns true if all of our counters are ≤ the other's counters,
|
||||||
|
/// and at least one is strictly less.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::VectorClock;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node = Uuid::new_v4();
|
||||||
|
///
|
||||||
|
/// let mut clock1 = VectorClock::new();
|
||||||
|
/// clock1.increment(node); // node: 1
|
||||||
|
///
|
||||||
|
/// let mut clock2 = VectorClock::new();
|
||||||
|
/// clock2.increment(node); // node: 1
|
||||||
|
/// clock2.increment(node); // node: 2
|
||||||
|
///
|
||||||
|
/// assert!(clock1.happened_before(&clock2));
|
||||||
|
/// assert!(!clock2.happened_before(&clock1));
|
||||||
|
/// ```
|
||||||
|
pub fn happened_before(&self, other: &VectorClock) -> bool {
|
||||||
|
// Check if all our counters are <= other's counters
|
||||||
|
let all_less_or_equal = self.clocks.iter().all(|(node_id, &our_counter)| {
|
||||||
|
let their_counter = other.get(*node_id);
|
||||||
|
our_counter <= their_counter
|
||||||
|
});
|
||||||
|
|
||||||
|
if !all_less_or_equal {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if at least one counter is strictly less
|
||||||
|
// First check if any of our nodes has a lower counter
|
||||||
|
let mut any_strictly_less = self.clocks.iter().any(|(node_id, &our_counter)| {
|
||||||
|
let their_counter = other.get(*node_id);
|
||||||
|
our_counter < their_counter
|
||||||
|
});
|
||||||
|
|
||||||
|
// Also check if they have nodes we don't know about with non-zero values
|
||||||
|
// For nodes not in self.clocks, we treat them as having counter 0
|
||||||
|
// If other has a node with counter > 0 that we don't have, that counts as "strictly less"
|
||||||
|
if !any_strictly_less {
|
||||||
|
any_strictly_less = other.clocks.iter().any(|(node_id, &their_counter)| {
|
||||||
|
!self.clocks.contains_key(node_id) && their_counter > 0
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
any_strictly_less
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if this vector clock is concurrent with another
|
||||||
|
///
|
||||||
|
/// Two clocks are concurrent if neither happened-before the other and they
|
||||||
|
/// are not identical. This means the operations are causally independent
|
||||||
|
/// and need CRDT merge semantics.
|
||||||
|
///
|
||||||
|
/// # Example
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use lib::networking::VectorClock;
|
||||||
|
/// use uuid::Uuid;
|
||||||
|
///
|
||||||
|
/// let node1 = Uuid::new_v4();
|
||||||
|
/// let node2 = Uuid::new_v4();
|
||||||
|
///
|
||||||
|
/// let mut clock1 = VectorClock::new();
|
||||||
|
/// clock1.increment(node1); // node1: 1
|
||||||
|
///
|
||||||
|
/// let mut clock2 = VectorClock::new();
|
||||||
|
/// clock2.increment(node2); // node2: 1
|
||||||
|
///
|
||||||
|
/// assert!(clock1.is_concurrent_with(&clock2));
|
||||||
|
/// assert!(clock2.is_concurrent_with(&clock1));
|
||||||
|
/// ```
|
||||||
|
pub fn is_concurrent_with(&self, other: &VectorClock) -> bool {
|
||||||
|
// Identical clocks are not concurrent
|
||||||
|
if self == other {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Concurrent if neither happened-before the other
|
||||||
|
!self.happened_before(other) && !other.happened_before(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Compare two vector clocks
|
||||||
|
///
|
||||||
|
/// Returns:
|
||||||
|
/// - `Ordering::Less` if self happened-before other
|
||||||
|
/// - `Ordering::Greater` if other happened-before self
|
||||||
|
/// - `Ordering::Equal` if they are identical
|
||||||
|
/// - `Err` if they are concurrent
|
||||||
|
pub fn compare(&self, other: &VectorClock) -> Result<std::cmp::Ordering> {
|
||||||
|
if self == other {
|
||||||
|
return Ok(std::cmp::Ordering::Equal);
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.happened_before(other) {
|
||||||
|
return Ok(std::cmp::Ordering::Less);
|
||||||
|
}
|
||||||
|
|
||||||
|
if other.happened_before(self) {
|
||||||
|
return Ok(std::cmp::Ordering::Greater);
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(NetworkingError::VectorClockError(
|
||||||
|
"Clocks are concurrent".to_string(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_new_clock() {
|
||||||
|
let clock = VectorClock::new();
|
||||||
|
assert_eq!(clock.clocks.len(), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_increment() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut clock = VectorClock::new();
|
||||||
|
|
||||||
|
assert_eq!(clock.increment(node), 1);
|
||||||
|
assert_eq!(clock.get(node), 1);
|
||||||
|
|
||||||
|
assert_eq!(clock.increment(node), 2);
|
||||||
|
assert_eq!(clock.get(node), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_get_unknown_node() {
|
||||||
|
let clock = VectorClock::new();
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
assert_eq!(clock.get(node), 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_merge() {
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
let node2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node1);
|
||||||
|
clock1.increment(node1);
|
||||||
|
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node2);
|
||||||
|
|
||||||
|
clock1.merge(&clock2);
|
||||||
|
|
||||||
|
assert_eq!(clock1.get(node1), 2);
|
||||||
|
assert_eq!(clock1.get(node2), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_merge_takes_max() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node);
|
||||||
|
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node);
|
||||||
|
clock2.increment(node);
|
||||||
|
|
||||||
|
clock1.merge(&clock2);
|
||||||
|
assert_eq!(clock1.get(node), 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_happened_before() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node);
|
||||||
|
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node);
|
||||||
|
clock2.increment(node);
|
||||||
|
|
||||||
|
assert!(clock1.happened_before(&clock2));
|
||||||
|
assert!(!clock2.happened_before(&clock1));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_happened_before_multiple_nodes() {
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
let node2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node1);
|
||||||
|
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node1);
|
||||||
|
clock2.increment(node2);
|
||||||
|
|
||||||
|
assert!(clock1.happened_before(&clock2));
|
||||||
|
assert!(!clock2.happened_before(&clock1));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_concurrent() {
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
let node2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node1);
|
||||||
|
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node2);
|
||||||
|
|
||||||
|
assert!(clock1.is_concurrent_with(&clock2));
|
||||||
|
assert!(clock2.is_concurrent_with(&clock1));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_happened_before_with_disjoint_nodes() {
|
||||||
|
// Critical test case: clocks with completely different nodes are concurrent,
|
||||||
|
// not happened-before. This test would fail with the old buggy implementation.
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
let node2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node1); // {node1: 1}
|
||||||
|
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node2); // {node2: 1}
|
||||||
|
|
||||||
|
// These clocks are concurrent - neither happened before the other
|
||||||
|
assert!(!clock1.happened_before(&clock2));
|
||||||
|
assert!(!clock2.happened_before(&clock1));
|
||||||
|
assert!(clock1.is_concurrent_with(&clock2));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_happened_before_with_superset_nodes() {
|
||||||
|
// When one clock has all nodes from another PLUS more nodes,
|
||||||
|
// the smaller clock happened-before the larger one
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
let node2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node1); // {node1: 1}
|
||||||
|
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node1); // {node1: 1, node2: 1}
|
||||||
|
clock2.increment(node2);
|
||||||
|
|
||||||
|
// clock1 happened before clock2
|
||||||
|
assert!(clock1.happened_before(&clock2));
|
||||||
|
assert!(!clock2.happened_before(&clock1));
|
||||||
|
assert!(!clock1.is_concurrent_with(&clock2));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_identical_clocks() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node);
|
||||||
|
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node);
|
||||||
|
|
||||||
|
assert_eq!(clock1, clock2);
|
||||||
|
assert!(!clock1.happened_before(&clock2));
|
||||||
|
assert!(!clock2.happened_before(&clock1));
|
||||||
|
assert!(!clock1.is_concurrent_with(&clock2));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_compare() {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node);
|
||||||
|
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node);
|
||||||
|
clock2.increment(node);
|
||||||
|
|
||||||
|
assert_eq!(clock1.compare(&clock2).unwrap(), std::cmp::Ordering::Less);
|
||||||
|
assert_eq!(clock2.compare(&clock1).unwrap(), std::cmp::Ordering::Greater);
|
||||||
|
assert_eq!(clock1.compare(&clock1).unwrap(), std::cmp::Ordering::Equal);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_compare_concurrent() {
|
||||||
|
let node1 = uuid::Uuid::new_v4();
|
||||||
|
let node2 = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
let mut clock1 = VectorClock::new();
|
||||||
|
clock1.increment(node1);
|
||||||
|
|
||||||
|
let mut clock2 = VectorClock::new();
|
||||||
|
clock2.increment(node2);
|
||||||
|
|
||||||
|
assert!(clock1.compare(&clock2).is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_serialization() -> bincode::Result<()> {
|
||||||
|
let node = uuid::Uuid::new_v4();
|
||||||
|
let mut clock = VectorClock::new();
|
||||||
|
clock.increment(node);
|
||||||
|
|
||||||
|
let bytes = bincode::serialize(&clock)?;
|
||||||
|
let deserialized: VectorClock = bincode::deserialize(&bytes)?;
|
||||||
|
|
||||||
|
assert_eq!(clock, deserialized);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -37,7 +37,7 @@ mod health;
|
|||||||
mod lifecycle;
|
mod lifecycle;
|
||||||
mod metrics;
|
mod metrics;
|
||||||
mod plugin;
|
mod plugin;
|
||||||
mod reflection;
|
pub mod reflection;
|
||||||
mod systems;
|
mod systems;
|
||||||
mod types;
|
mod types;
|
||||||
|
|
||||||
|
|||||||
39
crates/lib/tests/networking_gossip_test.rs
Normal file
39
crates/lib/tests/networking_gossip_test.rs
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
//! Integration test for gossip bridge
|
||||||
|
//!
|
||||||
|
//! Tests the gossip bridge channel infrastructure. Full iroh-gossip integration
|
||||||
|
//! will be tested in Phase 3.5.
|
||||||
|
|
||||||
|
use lib::networking::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_gossip_bridge_creation() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let bridge = init_gossip_bridge(node_id);
|
||||||
|
|
||||||
|
assert_eq!(bridge.node_id(), node_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_gossip_bridge_send() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let bridge = init_gossip_bridge(node_id);
|
||||||
|
|
||||||
|
let message = SyncMessage::JoinRequest {
|
||||||
|
node_id,
|
||||||
|
session_secret: None,
|
||||||
|
};
|
||||||
|
let versioned = VersionedMessage::new(message);
|
||||||
|
|
||||||
|
// Should not error when sending
|
||||||
|
let result = bridge.send(versioned);
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_gossip_bridge_try_recv_empty() {
|
||||||
|
let node_id = uuid::Uuid::new_v4();
|
||||||
|
let bridge = init_gossip_bridge(node_id);
|
||||||
|
|
||||||
|
// Should return None when no messages available
|
||||||
|
assert!(bridge.try_recv().is_none());
|
||||||
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
# RFC 0001: CRDT Synchronization Protocol over iroh-gossip
|
# RFC 0001: CRDT Synchronization Protocol over iroh-gossip
|
||||||
|
|
||||||
**Status:** Approved
|
**Status:** Implemented
|
||||||
**Authors:** Sienna
|
**Authors:** Sienna
|
||||||
**Created:** 2025-11-15
|
**Created:** 2025-11-15
|
||||||
**Updated:** 2025-11-15
|
**Updated:** 2025-11-15
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
# RFC 0002: Persistence Strategy for Battery-Efficient State Management
|
# RFC 0002: Persistence Strategy for Battery-Efficient State Management
|
||||||
|
|
||||||
**Status:** Draft
|
**Status:** Implemented
|
||||||
**Authors:** Sienna
|
**Authors:** Sienna
|
||||||
**Created:** 2025-11-15
|
**Created:** 2025-11-15
|
||||||
**Related:** RFC 0001 (CRDT Sync Protocol)
|
**Related:** RFC 0001 (CRDT Sync Protocol)
|
||||||
|
|||||||
Reference in New Issue
Block a user