code review results

Signed-off-by: Sienna Meridian Satterwhite <sienna@r3t.io>
This commit is contained in:
2025-12-11 18:39:57 +00:00
parent 2847e7236f
commit 1504807afe
40 changed files with 2600 additions and 678 deletions

234
Cargo.lock generated
View File

@@ -198,6 +198,18 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "anes"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
[[package]]
name = "anstyle"
version = "1.0.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78"
[[package]] [[package]]
name = "anyhow" name = "anyhow"
version = "1.0.100" version = "1.0.100"
@@ -1918,6 +1930,12 @@ dependencies = [
"tracing", "tracing",
] ]
[[package]]
name = "cast"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]] [[package]]
name = "cc" name = "cc"
version = "1.2.46" version = "1.2.46"
@@ -1983,6 +2001,33 @@ dependencies = [
"windows-link 0.2.1", "windows-link 0.2.1",
] ]
[[package]]
name = "ciborium"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
dependencies = [
"ciborium-io",
"ciborium-ll",
"serde",
]
[[package]]
name = "ciborium-io"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
[[package]]
name = "ciborium-ll"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
dependencies = [
"ciborium-io",
"half",
]
[[package]] [[package]]
name = "cipher" name = "cipher"
version = "0.5.0-rc.1" version = "0.5.0-rc.1"
@@ -2006,6 +2051,31 @@ dependencies = [
"libloading", "libloading",
] ]
[[package]]
name = "clap"
version = "4.5.53"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8"
dependencies = [
"clap_builder",
]
[[package]]
name = "clap_builder"
version = "4.5.53"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00"
dependencies = [
"anstyle",
"clap_lex",
]
[[package]]
name = "clap_lex"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1d728cc89cf3aee9ff92b05e62b19ee65a02b5702cff7d5a377e32c6ae29d8d"
[[package]] [[package]]
name = "client" name = "client"
version = "0.1.0" version = "0.1.0"
@@ -2295,6 +2365,42 @@ dependencies = [
"tiny-keccak", "tiny-keccak",
] ]
[[package]]
name = "criterion"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"
dependencies = [
"anes",
"cast",
"ciborium",
"clap",
"criterion-plot",
"is-terminal",
"itertools 0.10.5",
"num-traits",
"once_cell",
"oorandom",
"plotters",
"rayon",
"regex",
"serde",
"serde_derive",
"serde_json",
"tinytemplate",
"walkdir",
]
[[package]]
name = "criterion-plot"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
dependencies = [
"cast",
"itertools 0.10.5",
]
[[package]] [[package]]
name = "critical-section" name = "critical-section"
version = "1.2.0" version = "1.2.0"
@@ -4602,6 +4708,26 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "is-terminal"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46"
dependencies = [
"hermit-abi",
"libc",
"windows-sys 0.61.2",
]
[[package]]
name = "itertools"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
dependencies = [
"either",
]
[[package]] [[package]]
name = "itertools" name = "itertools"
version = "0.11.0" version = "0.11.0"
@@ -4727,11 +4853,14 @@ dependencies = [
"anyhow", "anyhow",
"bevy", "bevy",
"bincode", "bincode",
"blocking",
"chrono", "chrono",
"crdts", "crdts",
"criterion",
"futures-lite", "futures-lite",
"iroh", "iroh",
"iroh-gossip", "iroh-gossip",
"proptest",
"rusqlite", "rusqlite",
"serde", "serde",
"serde_json", "serde_json",
@@ -5798,6 +5927,12 @@ dependencies = [
"pkg-config", "pkg-config",
] ]
[[package]]
name = "oorandom"
version = "11.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
[[package]] [[package]]
name = "openssl" name = "openssl"
version = "0.10.75" version = "0.10.75"
@@ -6038,6 +6173,34 @@ version = "0.3.32"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
[[package]]
name = "plotters"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
dependencies = [
"num-traits",
"plotters-backend",
"plotters-svg",
"wasm-bindgen",
"web-sys",
]
[[package]]
name = "plotters-backend"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
[[package]]
name = "plotters-svg"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
dependencies = [
"plotters-backend",
]
[[package]] [[package]]
name = "png" name = "png"
version = "0.18.0" version = "0.18.0"
@@ -6208,6 +6371,25 @@ version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3eb8486b569e12e2c32ad3e204dbaba5e4b5b216e9367044f25f1dba42341773" checksum = "3eb8486b569e12e2c32ad3e204dbaba5e4b5b216e9367044f25f1dba42341773"
[[package]]
name = "proptest"
version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bee689443a2bd0a16ab0348b52ee43e3b2d1b1f931c8aa5c9f8de4c86fbe8c40"
dependencies = [
"bit-set 0.8.0",
"bit-vec 0.8.0",
"bitflags 2.10.0",
"num-traits",
"rand 0.9.2",
"rand_chacha 0.9.0",
"rand_xorshift",
"regex-syntax",
"rusty-fork",
"tempfile",
"unarray",
]
[[package]] [[package]]
name = "pulp" name = "pulp"
version = "0.18.22" version = "0.18.22"
@@ -6243,6 +6425,12 @@ dependencies = [
"num-traits", "num-traits",
] ]
[[package]]
name = "quick-error"
version = "1.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
[[package]] [[package]]
name = "quick-xml" name = "quick-xml"
version = "0.37.5" version = "0.37.5"
@@ -6408,6 +6596,15 @@ dependencies = [
"rand 0.9.2", "rand 0.9.2",
] ]
[[package]]
name = "rand_xorshift"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a"
dependencies = [
"rand_core 0.9.3",
]
[[package]] [[package]]
name = "range-alloc" name = "range-alloc"
version = "0.1.4" version = "0.1.4"
@@ -6793,6 +6990,18 @@ version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
[[package]]
name = "rusty-fork"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2"
dependencies = [
"fnv",
"quick-error",
"tempfile",
"wait-timeout",
]
[[package]] [[package]]
name = "rustybuzz" name = "rustybuzz"
version = "0.14.1" version = "0.14.1"
@@ -7671,6 +7880,16 @@ dependencies = [
"zerovec", "zerovec",
] ]
[[package]]
name = "tinytemplate"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
dependencies = [
"serde",
"serde_json",
]
[[package]] [[package]]
name = "tinyvec" name = "tinyvec"
version = "1.10.0" version = "1.10.0"
@@ -8054,6 +8273,12 @@ dependencies = [
"yoke 0.7.5", "yoke 0.7.5",
] ]
[[package]]
name = "unarray"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94"
[[package]] [[package]]
name = "unicode-bidi" name = "unicode-bidi"
version = "0.3.18" version = "0.3.18"
@@ -8229,6 +8454,15 @@ version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "wait-timeout"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11"
dependencies = [
"libc",
]
[[package]] [[package]]
name = "walkdir" name = "walkdir"
version = "2.5.0" version = "2.5.0"

View File

@@ -19,6 +19,8 @@ bevy.workspace = true
bincode = "1.3" bincode = "1.3"
futures-lite = "2.0" futures-lite = "2.0"
sha2 = "0.10" sha2 = "0.10"
tokio.workspace = true
blocking = "1.6"
[dev-dependencies] [dev-dependencies]
tokio.workspace = true tokio.workspace = true
@@ -26,3 +28,13 @@ iroh = { workspace = true, features = ["discovery-local-network"] }
iroh-gossip.workspace = true iroh-gossip.workspace = true
futures-lite = "2.0" futures-lite = "2.0"
tempfile = "3" tempfile = "3"
proptest = "1.4"
criterion = "0.5"
[[bench]]
name = "write_buffer"
harness = false
[[bench]]
name = "vector_clock"
harness = false

View File

@@ -0,0 +1,301 @@
//! VectorClock performance benchmarks
//!
//! These benchmarks measure the performance improvements from the two-pass →
//! single-pass optimization in the happened_before() method.
use criterion::{
black_box,
criterion_group,
criterion_main,
BenchmarkId,
Criterion,
};
use lib::networking::VectorClock;
/// Helper to create a vector clock with N nodes
fn create_clock_with_nodes(num_nodes: usize) -> VectorClock {
let mut clock = VectorClock::new();
for i in 0..num_nodes {
let node_id = uuid::Uuid::from_u128(i as u128);
clock.increment(node_id);
}
clock
}
/// Benchmark: happened_before comparison with small clocks (5 nodes)
///
/// This is the common case in typical distributed systems with a small number
/// of replicas.
fn bench_happened_before_small_clocks(c: &mut Criterion) {
let mut group = c.benchmark_group("VectorClock::happened_before (5 nodes)");
group.bench_function("happened_before (true)", |b| {
let node = uuid::Uuid::from_u128(1);
let mut clock1 = VectorClock::new();
clock1.increment(node);
let mut clock2 = VectorClock::new();
clock2.increment(node);
clock2.increment(node);
b.iter(|| black_box(clock1.happened_before(&clock2)));
});
group.bench_function("happened_before (false)", |b| {
let node1 = uuid::Uuid::from_u128(1);
let node2 = uuid::Uuid::from_u128(2);
let mut clock1 = VectorClock::new();
clock1.increment(node1);
let mut clock2 = VectorClock::new();
clock2.increment(node2);
b.iter(|| black_box(clock1.happened_before(&clock2)));
});
group.bench_function("happened_before (concurrent)", |b| {
let node1 = uuid::Uuid::from_u128(1);
let node2 = uuid::Uuid::from_u128(2);
let node3 = uuid::Uuid::from_u128(3);
let mut clock1 = create_clock_with_nodes(5);
clock1.increment(node1);
let mut clock2 = create_clock_with_nodes(5);
clock2.increment(node2);
clock2.increment(node3);
b.iter(|| black_box(clock1.happened_before(&clock2)));
});
group.finish();
}
/// Benchmark: happened_before comparison with large clocks (100 nodes)
///
/// This tests the optimization's effectiveness with larger clock sizes.
/// The single-pass algorithm should show better improvements here.
fn bench_happened_before_large_clocks(c: &mut Criterion) {
let mut group = c.benchmark_group("VectorClock::happened_before (100 nodes)");
group.bench_function("happened_before (100 nodes, true)", |b| {
let clock1 = create_clock_with_nodes(100);
let mut clock2 = clock1.clone();
let node = uuid::Uuid::from_u128(50);
clock2.increment(node);
b.iter(|| black_box(clock1.happened_before(&clock2)));
});
group.bench_function("happened_before (100 nodes, concurrent)", |b| {
let mut clock1 = create_clock_with_nodes(100);
let node1 = uuid::Uuid::from_u128(200);
clock1.increment(node1);
let mut clock2 = create_clock_with_nodes(100);
let node2 = uuid::Uuid::from_u128(201);
clock2.increment(node2);
b.iter(|| black_box(clock1.happened_before(&clock2)));
});
group.finish();
}
/// Benchmark: happened_before with disjoint node sets
///
/// This is the scenario where early exit optimization provides the most benefit.
fn bench_happened_before_disjoint(c: &mut Criterion) {
let mut group = c.benchmark_group("VectorClock::happened_before (disjoint)");
for num_nodes in [5, 20, 50, 100] {
group.bench_with_input(
BenchmarkId::from_parameter(num_nodes),
&num_nodes,
|b, &num_nodes| {
// Create two clocks with completely different nodes
let mut clock1 = VectorClock::new();
for i in 0..num_nodes {
clock1.increment(uuid::Uuid::from_u128(i as u128));
}
let mut clock2 = VectorClock::new();
for i in 0..num_nodes {
clock2.increment(uuid::Uuid::from_u128((i + 1000) as u128));
}
b.iter(|| black_box(clock1.happened_before(&clock2)));
},
);
}
group.finish();
}
/// Benchmark: merge operation
fn bench_merge(c: &mut Criterion) {
let mut group = c.benchmark_group("VectorClock::merge");
for num_nodes in [5, 20, 50, 100] {
group.bench_with_input(
BenchmarkId::from_parameter(num_nodes),
&num_nodes,
|b, &num_nodes| {
b.iter_batched(
|| {
// Setup: Create two clocks with overlapping nodes
let clock1 = create_clock_with_nodes(num_nodes);
let clock2 = create_clock_with_nodes(num_nodes / 2);
(clock1, clock2)
},
|(mut clock1, clock2)| {
clock1.merge(&clock2);
black_box(clock1)
},
criterion::BatchSize::SmallInput,
);
},
);
}
group.finish();
}
/// Benchmark: increment operation
fn bench_increment(c: &mut Criterion) {
let mut group = c.benchmark_group("VectorClock::increment");
group.bench_function("increment (new node)", |b| {
b.iter_batched(
|| {
let clock = VectorClock::new();
let node_id = uuid::Uuid::new_v4();
(clock, node_id)
},
|(mut clock, node_id)| {
black_box(clock.increment(node_id));
clock
},
criterion::BatchSize::SmallInput,
);
});
group.bench_function("increment (existing node)", |b| {
b.iter_batched(
|| {
let node_id = uuid::Uuid::new_v4();
let mut clock = VectorClock::new();
clock.increment(node_id);
(clock, node_id)
},
|(mut clock, node_id)| {
black_box(clock.increment(node_id));
clock
},
criterion::BatchSize::SmallInput,
);
});
group.finish();
}
/// Benchmark: is_concurrent_with operation
fn bench_is_concurrent(c: &mut Criterion) {
let mut group = c.benchmark_group("VectorClock::is_concurrent_with");
group.bench_function("concurrent (5 nodes)", |b| {
let node1 = uuid::Uuid::from_u128(1);
let node2 = uuid::Uuid::from_u128(2);
let mut clock1 = VectorClock::new();
clock1.increment(node1);
let mut clock2 = VectorClock::new();
clock2.increment(node2);
b.iter(|| black_box(clock1.is_concurrent_with(&clock2)));
});
group.bench_function("not concurrent (ordered)", |b| {
let node = uuid::Uuid::from_u128(1);
let mut clock1 = VectorClock::new();
clock1.increment(node);
let mut clock2 = VectorClock::new();
clock2.increment(node);
clock2.increment(node);
b.iter(|| black_box(clock1.is_concurrent_with(&clock2)));
});
group.finish();
}
/// Benchmark: Realistic workload simulation
///
/// Simulates a typical distributed system workflow with increments, merges,
/// and comparisons.
fn bench_realistic_workload(c: &mut Criterion) {
let mut group = c.benchmark_group("VectorClock::realistic_workload");
group.bench_function("3 nodes, 100 operations", |b| {
let node1 = uuid::Uuid::from_u128(1);
let node2 = uuid::Uuid::from_u128(2);
let node3 = uuid::Uuid::from_u128(3);
b.iter(|| {
let mut clock1 = VectorClock::new();
let mut clock2 = VectorClock::new();
let mut clock3 = VectorClock::new();
// Simulate 100 operations across 3 nodes
for i in 0..100 {
match i % 7 {
0 => {
clock1.increment(node1);
}
1 => {
clock2.increment(node2);
}
2 => {
clock3.increment(node3);
}
3 => {
clock1.merge(&clock2);
}
4 => {
clock2.merge(&clock3);
}
5 => {
let _ = clock1.happened_before(&clock2);
}
_ => {
let _ = clock2.is_concurrent_with(&clock3);
}
}
}
black_box((clock1, clock2, clock3))
});
});
group.finish();
}
criterion_group!(
benches,
bench_happened_before_small_clocks,
bench_happened_before_large_clocks,
bench_happened_before_disjoint,
bench_merge,
bench_increment,
bench_is_concurrent,
bench_realistic_workload,
);
criterion_main!(benches);

View File

@@ -0,0 +1,217 @@
//! WriteBuffer performance benchmarks
//!
//! These benchmarks measure the performance improvements from the O(n²) → O(1)
//! optimization that replaced Vec::retain() with HashMap-based indexing.
use criterion::{
black_box,
criterion_group,
criterion_main,
BenchmarkId,
Criterion,
};
use lib::persistence::{
PersistenceOp,
WriteBuffer,
};
/// Benchmark: Add many updates to the same component (worst case for old implementation)
///
/// This scenario heavily stresses the deduplication logic. In the old O(n)
/// implementation, each add() would scan the entire buffer. With HashMap
/// indexing, this is now O(1).
fn bench_repeated_updates_same_component(c: &mut Criterion) {
let mut group = c.benchmark_group("WriteBuffer::add (same component)");
for num_updates in [10, 100, 500, 1000] {
group.bench_with_input(
BenchmarkId::from_parameter(num_updates),
&num_updates,
|b, &num_updates| {
b.iter(|| {
let mut buffer = WriteBuffer::new(2000);
let entity_id = uuid::Uuid::new_v4();
// Apply many updates to the same (entity, component) pair
for i in 0..num_updates {
let op = PersistenceOp::UpsertComponent {
entity_id,
component_type: "Transform".to_string(),
data: vec![i as u8; 100], // 100 bytes per update
};
buffer.add(op).ok(); // Ignore errors for benchmarking
}
black_box(buffer);
});
},
);
}
group.finish();
}
/// Benchmark: Add updates to different components (best case, no deduplication)
///
/// This tests the performance when there's no deduplication happening.
/// Both old and new implementations should perform similarly here.
fn bench_different_components(c: &mut Criterion) {
let mut group = c.benchmark_group("WriteBuffer::add (different components)");
for num_components in [10, 100, 500, 1000] {
group.bench_with_input(
BenchmarkId::from_parameter(num_components),
&num_components,
|b, &num_components| {
b.iter(|| {
let mut buffer = WriteBuffer::new(2000);
let entity_id = uuid::Uuid::new_v4();
// Add different components (no deduplication)
for i in 0..num_components {
let op = PersistenceOp::UpsertComponent {
entity_id,
component_type: format!("Component{}", i),
data: vec![i as u8; 100],
};
buffer.add(op).ok();
}
black_box(buffer);
});
},
);
}
group.finish();
}
/// Benchmark: Mixed workload (realistic scenario)
///
/// This simulates a realistic workload with a mix of repeated updates to
/// popular components and unique component updates.
fn bench_mixed_workload(c: &mut Criterion) {
let mut group = c.benchmark_group("WriteBuffer::add (mixed workload)");
for num_ops in [100, 500, 1000] {
group.bench_with_input(
BenchmarkId::from_parameter(num_ops),
&num_ops,
|b, &num_ops| {
b.iter(|| {
let mut buffer = WriteBuffer::new(2000);
let entity_id = uuid::Uuid::new_v4();
// 70% updates to Transform, 20% to Material, 10% to unique components
for i in 0..num_ops {
let (component_type, data_size) = match i % 10 {
0..=6 => ("Transform".to_string(), 64), // 70%
7..=8 => ("Material".to_string(), 128), // 20%
_ => (format!("Component{}", i), 32), // 10%
};
let op = PersistenceOp::UpsertComponent {
entity_id,
component_type,
data: vec![i as u8; data_size],
};
buffer.add(op).ok();
}
black_box(buffer);
});
},
);
}
group.finish();
}
/// Benchmark: Entity updates (different code path)
///
/// Tests the performance of entity-level deduplication.
fn bench_entity_updates(c: &mut Criterion) {
let mut group = c.benchmark_group("WriteBuffer::add (entity updates)");
for num_updates in [10, 100, 500] {
group.bench_with_input(
BenchmarkId::from_parameter(num_updates),
&num_updates,
|b, &num_updates| {
b.iter(|| {
let mut buffer = WriteBuffer::new(2000);
let entity_id = uuid::Uuid::new_v4();
// Repeatedly update the same entity
for i in 0..num_updates {
let op = PersistenceOp::UpsertEntity {
id: entity_id,
data: lib::persistence::EntityData {
id: entity_id,
created_at: chrono::Utc::now(),
updated_at: chrono::Utc::now(),
entity_type: format!("Type{}", i),
},
};
buffer.add(op).ok();
}
black_box(buffer);
});
},
);
}
group.finish();
}
/// Benchmark: take_operations() performance
///
/// Measures the cost of clearing the buffer and returning operations.
fn bench_take_operations(c: &mut Criterion) {
let mut group = c.benchmark_group("WriteBuffer::take_operations");
for buffer_size in [10, 100, 500, 1000] {
group.bench_with_input(
BenchmarkId::from_parameter(buffer_size),
&buffer_size,
|b, &buffer_size| {
b.iter_batched(
|| {
// Setup: Create a buffer with many operations
let mut buffer = WriteBuffer::new(2000);
let entity_id = uuid::Uuid::new_v4();
for i in 0..buffer_size {
let op = PersistenceOp::UpsertComponent {
entity_id,
component_type: format!("Component{}", i),
data: vec![i as u8; 64],
};
buffer.add(op).ok();
}
buffer
},
|mut buffer| {
// Benchmark: Take all operations
black_box(buffer.take_operations())
},
criterion::BatchSize::SmallInput,
);
},
);
}
group.finish();
}
criterion_group!(
benches,
bench_repeated_updates_same_component,
bench_different_components,
bench_mixed_workload,
bench_entity_updates,
bench_take_operations,
);
criterion_main!(benches);

View File

@@ -7,6 +7,11 @@ use serde::{
Serialize, Serialize,
}; };
/// Seconds between Unix epoch (1970-01-01) and Apple epoch (2001-01-01)
/// Apple's Cocoa timestamps use 2001-01-01 00:00:00 UTC as their reference
/// point
const APPLE_EPOCH_OFFSET: i64 = 978307200;
/// Represents a message in the iMessage database /// Represents a message in the iMessage database
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Message { pub struct Message {
@@ -52,8 +57,6 @@ pub struct Chat {
pub fn apple_timestamp_to_datetime(timestamp: i64) -> DateTime<Utc> { pub fn apple_timestamp_to_datetime(timestamp: i64) -> DateTime<Utc> {
// Apple's Cocoa timestamps are in nanoseconds since 2001-01-01 00:00:00 UTC // Apple's Cocoa timestamps are in nanoseconds since 2001-01-01 00:00:00 UTC
// Convert to Unix timestamp (seconds since 1970-01-01 00:00:00 UTC) // Convert to Unix timestamp (seconds since 1970-01-01 00:00:00 UTC)
const APPLE_EPOCH_OFFSET: i64 = 978307200; // Seconds between 1970-01-01 and 2001-01-01
let seconds = timestamp / 1_000_000_000 + APPLE_EPOCH_OFFSET; let seconds = timestamp / 1_000_000_000 + APPLE_EPOCH_OFFSET;
let nanos = (timestamp % 1_000_000_000) as u32; let nanos = (timestamp % 1_000_000_000) as u32;
@@ -63,8 +66,6 @@ pub fn apple_timestamp_to_datetime(timestamp: i64) -> DateTime<Utc> {
/// Helper function to convert DateTime to Apple's Cocoa timestamp /// Helper function to convert DateTime to Apple's Cocoa timestamp
pub fn datetime_to_apple_timestamp(dt: DateTime<Utc>) -> i64 { pub fn datetime_to_apple_timestamp(dt: DateTime<Utc>) -> i64 {
const APPLE_EPOCH_OFFSET: i64 = 978307200;
let unix_timestamp = dt.timestamp(); let unix_timestamp = dt.timestamp();
let nanos = dt.timestamp_subsec_nanos() as i64; let nanos = dt.timestamp_subsec_nanos() as i64;

View File

@@ -10,9 +10,10 @@ use uuid::Uuid;
use crate::{ use crate::{
networking::{ networking::{
VectorClock,
blob_support::{ blob_support::{
get_component_data,
BlobStore, BlobStore,
get_component_data,
}, },
delta_generation::NodeVectorClock, delta_generation::NodeVectorClock,
entity_map::NetworkEntityMap, entity_map::NetworkEntityMap,
@@ -23,19 +24,20 @@ use crate::{
SyncMessage, SyncMessage,
}, },
operations::ComponentOp, operations::ComponentOp,
VectorClock,
}, },
persistence::reflection::deserialize_component_typed, persistence::reflection::deserialize_component_typed,
}; };
/// Resource to track the last vector clock and originating node for each component on each entity /// Resource to track the last vector clock and originating node for each
/// component on each entity
/// ///
/// This enables Last-Write-Wins conflict resolution by comparing incoming /// This enables Last-Write-Wins conflict resolution by comparing incoming
/// operations' vector clocks with the current component's vector clock. /// operations' vector clocks with the current component's vector clock.
/// The node_id is used as a deterministic tiebreaker for concurrent operations. /// The node_id is used as a deterministic tiebreaker for concurrent operations.
#[derive(Resource, Default)] #[derive(Resource, Default)]
pub struct ComponentVectorClocks { pub struct ComponentVectorClocks {
/// Maps (entity_network_id, component_type) -> (vector_clock, originating_node_id) /// Maps (entity_network_id, component_type) -> (vector_clock,
/// originating_node_id)
clocks: HashMap<(Uuid, String), (VectorClock, Uuid)>, clocks: HashMap<(Uuid, String), (VectorClock, Uuid)>,
} }
@@ -52,8 +54,15 @@ impl ComponentVectorClocks {
} }
/// Update the vector clock and node_id for a component /// Update the vector clock and node_id for a component
pub fn set(&mut self, entity_id: Uuid, component_type: String, clock: VectorClock, node_id: Uuid) { pub fn set(
self.clocks.insert((entity_id, component_type), (clock, node_id)); &mut self,
entity_id: Uuid,
component_type: String,
clock: VectorClock,
node_id: Uuid,
) {
self.clocks
.insert((entity_id, component_type), (clock, node_id));
} }
/// Remove all clocks for an entity (when entity is deleted) /// Remove all clocks for an entity (when entity is deleted)
@@ -74,10 +83,7 @@ impl ComponentVectorClocks {
/// ///
/// - `delta`: The EntityDelta to apply /// - `delta`: The EntityDelta to apply
/// - `world`: The Bevy world to apply changes to /// - `world`: The Bevy world to apply changes to
pub fn apply_entity_delta( pub fn apply_entity_delta(delta: &EntityDelta, world: &mut World) {
delta: &EntityDelta,
world: &mut World,
) {
// Validate and merge the remote vector clock // Validate and merge the remote vector clock
{ {
let mut node_clock = world.resource_mut::<NodeVectorClock>(); let mut node_clock = world.resource_mut::<NodeVectorClock>();
@@ -99,12 +105,10 @@ pub fn apply_entity_delta(
for op in &delta.operations { for op in &delta.operations {
if let crate::networking::ComponentOp::Delete { vector_clock } = op { if let crate::networking::ComponentOp::Delete { vector_clock } = op {
// Record tombstone // Record tombstone
if let Some(mut registry) = world.get_resource_mut::<crate::networking::TombstoneRegistry>() { if let Some(mut registry) =
registry.record_deletion( world.get_resource_mut::<crate::networking::TombstoneRegistry>()
delta.entity_id, {
delta.node_id, registry.record_deletion(delta.entity_id, delta.node_id, vector_clock.clone());
vector_clock.clone(),
);
// Despawn the entity if it exists locally // Despawn the entity if it exists locally
let entity_to_despawn = { let entity_to_despawn = {
@@ -115,7 +119,10 @@ pub fn apply_entity_delta(
world.despawn(entity); world.despawn(entity);
let mut entity_map = world.resource_mut::<NetworkEntityMap>(); let mut entity_map = world.resource_mut::<NetworkEntityMap>();
entity_map.remove_by_network_id(delta.entity_id); entity_map.remove_by_network_id(delta.entity_id);
info!("Despawned entity {:?} due to Delete operation", delta.entity_id); info!(
"Despawned entity {:?} due to Delete operation",
delta.entity_id
);
} }
// Don't process other operations - entity is deleted // Don't process other operations - entity is deleted
@@ -127,10 +134,7 @@ pub fn apply_entity_delta(
// Check if we should ignore this delta due to deletion // Check if we should ignore this delta due to deletion
if let Some(registry) = world.get_resource::<crate::networking::TombstoneRegistry>() { if let Some(registry) = world.get_resource::<crate::networking::TombstoneRegistry>() {
if registry.should_ignore_operation(delta.entity_id, &delta.vector_clock) { if registry.should_ignore_operation(delta.entity_id, &delta.vector_clock) {
debug!( debug!("Ignoring delta for deleted entity {:?}", delta.entity_id);
"Ignoring delta for deleted entity {:?}",
delta.entity_id
);
return; return;
} }
} }
@@ -158,7 +162,10 @@ pub fn apply_entity_delta(
if let Some(mut persisted) = entity_mut.get_mut::<crate::persistence::Persisted>() { if let Some(mut persisted) = entity_mut.get_mut::<crate::persistence::Persisted>() {
// Accessing &mut triggers Bevy's change detection // Accessing &mut triggers Bevy's change detection
let _ = &mut *persisted; let _ = &mut *persisted;
debug!("Triggered persistence for synced entity {:?}", delta.entity_id); debug!(
"Triggered persistence for synced entity {:?}",
delta.entity_id
);
} }
} }
} }
@@ -167,44 +174,52 @@ pub fn apply_entity_delta(
/// ///
/// This dispatches to the appropriate CRDT merge logic based on the operation /// This dispatches to the appropriate CRDT merge logic based on the operation
/// type. /// type.
fn apply_component_op( fn apply_component_op(entity: Entity, op: &ComponentOp, incoming_node_id: Uuid, world: &mut World) {
entity: Entity,
op: &ComponentOp,
incoming_node_id: Uuid,
world: &mut World,
) {
match op { match op {
| ComponentOp::Set { | ComponentOp::Set {
component_type, component_type,
data, data,
vector_clock, vector_clock,
} => { } => {
apply_set_operation_with_lww(entity, component_type, data, vector_clock, incoming_node_id, world); apply_set_operation_with_lww(
} entity,
component_type,
data,
vector_clock,
incoming_node_id,
world,
);
},
| ComponentOp::SetAdd { component_type, .. } => { | ComponentOp::SetAdd { component_type, .. } => {
// OR-Set add - Phase 10 provides OrSet<T> type // OR-Set add - Phase 10 provides OrSet<T> type
// Application code should use OrSet in components and handle SetAdd/SetRemove // Application code should use OrSet in components and handle SetAdd/SetRemove
// Full integration will be in Phase 12 plugin // Full integration will be in Phase 12 plugin
debug!("SetAdd operation for {} (use OrSet<T> in components)", component_type); debug!(
} "SetAdd operation for {} (use OrSet<T> in components)",
component_type
);
},
| ComponentOp::SetRemove { component_type, .. } => { | ComponentOp::SetRemove { component_type, .. } => {
// OR-Set remove - Phase 10 provides OrSet<T> type // OR-Set remove - Phase 10 provides OrSet<T> type
// Application code should use OrSet in components and handle SetAdd/SetRemove // Application code should use OrSet in components and handle SetAdd/SetRemove
// Full integration will be in Phase 12 plugin // Full integration will be in Phase 12 plugin
debug!("SetRemove operation for {} (use OrSet<T> in components)", component_type); debug!(
} "SetRemove operation for {} (use OrSet<T> in components)",
component_type
);
},
| ComponentOp::SequenceInsert { .. } => { | ComponentOp::SequenceInsert { .. } => {
// RGA insert - will be implemented in Phase 11 // RGA insert - will be implemented in Phase 11
debug!("SequenceInsert operation not yet implemented"); debug!("SequenceInsert operation not yet implemented");
} },
| ComponentOp::SequenceDelete { .. } => { | ComponentOp::SequenceDelete { .. } => {
// RGA delete - will be implemented in Phase 11 // RGA delete - will be implemented in Phase 11
debug!("SequenceDelete operation not yet implemented"); debug!("SequenceDelete operation not yet implemented");
} },
| ComponentOp::Delete { .. } => { | ComponentOp::Delete { .. } => {
// Entity deletion - will be implemented in Phase 9 // Entity deletion - will be implemented in Phase 9
debug!("Delete operation not yet implemented"); debug!("Delete operation not yet implemented");
} },
} }
} }
@@ -239,7 +254,9 @@ fn apply_set_operation_with_lww(
// Check if we should apply this operation based on LWW // Check if we should apply this operation based on LWW
let should_apply = { let should_apply = {
if let Some(component_clocks) = world.get_resource::<ComponentVectorClocks>() { if let Some(component_clocks) = world.get_resource::<ComponentVectorClocks>() {
if let Some((current_clock, current_node_id)) = component_clocks.get(entity_network_id, component_type) { if let Some((current_clock, current_node_id)) =
component_clocks.get(entity_network_id, component_type)
{
// We have a current clock - do LWW comparison with real node IDs // We have a current clock - do LWW comparison with real node IDs
let decision = compare_operations_lww( let decision = compare_operations_lww(
current_clock, current_clock,
@@ -249,23 +266,24 @@ fn apply_set_operation_with_lww(
); );
match decision { match decision {
crate::networking::merge::MergeDecision::ApplyRemote => { | crate::networking::merge::MergeDecision::ApplyRemote => {
debug!( debug!(
"Applying remote Set for {} (remote is newer)", "Applying remote Set for {} (remote is newer)",
component_type component_type
); );
true true
} },
crate::networking::merge::MergeDecision::KeepLocal => { | crate::networking::merge::MergeDecision::KeepLocal => {
debug!( debug!(
"Ignoring remote Set for {} (local is newer)", "Ignoring remote Set for {} (local is newer)",
component_type component_type
); );
false false
} },
crate::networking::merge::MergeDecision::Concurrent => { | crate::networking::merge::MergeDecision::Concurrent => {
// For concurrent operations, use node_id comparison as deterministic tiebreaker // For concurrent operations, use node_id comparison as deterministic
// This ensures all nodes make the same decision for concurrent updates // tiebreaker This ensures all nodes make the same
// decision for concurrent updates
if incoming_node_id > *current_node_id { if incoming_node_id > *current_node_id {
debug!( debug!(
"Applying remote Set for {} (concurrent, remote node_id {:?} > local {:?})", "Applying remote Set for {} (concurrent, remote node_id {:?} > local {:?})",
@@ -279,11 +297,11 @@ fn apply_set_operation_with_lww(
); );
false false
} }
} },
crate::networking::merge::MergeDecision::Equal => { | crate::networking::merge::MergeDecision::Equal => {
debug!("Ignoring remote Set for {} (clocks equal)", component_type); debug!("Ignoring remote Set for {} (clocks equal)", component_type);
false false
} },
} }
} else { } else {
// No current clock - this is the first time we're setting this component // No current clock - this is the first time we're setting this component
@@ -343,14 +361,14 @@ fn apply_set_operation(
| ComponentData::BlobRef { hash: _, size: _ } => { | ComponentData::BlobRef { hash: _, size: _ } => {
if let Some(store) = blob_store { if let Some(store) = blob_store {
match get_component_data(data, store) { match get_component_data(data, store) {
Ok(bytes) => bytes, | Ok(bytes) => bytes,
Err(e) => { | Err(e) => {
error!( error!(
"Failed to retrieve blob for component {}: {}", "Failed to retrieve blob for component {}: {}",
component_type, e component_type, e
); );
return; return;
} },
} }
} else { } else {
error!( error!(
@@ -359,31 +377,34 @@ fn apply_set_operation(
); );
return; return;
} }
} },
}; };
let reflected = match deserialize_component_typed(&data_bytes, component_type, &type_registry) { let reflected = match deserialize_component_typed(&data_bytes, component_type, &type_registry) {
Ok(reflected) => reflected, | Ok(reflected) => reflected,
Err(e) => { | Err(e) => {
error!("Failed to deserialize component {}: {}", component_type, e); error!("Failed to deserialize component {}: {}", component_type, e);
return; return;
} },
}; };
let registration = match type_registry.get_with_type_path(component_type) { let registration = match type_registry.get_with_type_path(component_type) {
Some(reg) => reg, | Some(reg) => reg,
None => { | None => {
error!("Component type {} not registered", component_type); error!("Component type {} not registered", component_type);
return; return;
} },
}; };
let reflect_component = match registration.data::<ReflectComponent>() { let reflect_component = match registration.data::<ReflectComponent>() {
Some(rc) => rc.clone(), | Some(rc) => rc.clone(),
None => { | None => {
error!("Component type {} does not have ReflectComponent data", component_type); error!(
"Component type {} does not have ReflectComponent data",
component_type
);
return; return;
} },
}; };
drop(type_registry); drop(type_registry);
@@ -399,14 +420,20 @@ fn apply_set_operation(
// This ensures remote entities can have their Transform changes detected // This ensures remote entities can have their Transform changes detected
if component_type == "bevy_transform::components::transform::Transform" { if component_type == "bevy_transform::components::transform::Transform" {
if let Ok(mut entity_mut) = world.get_entity_mut(entity) { if let Ok(mut entity_mut) = world.get_entity_mut(entity) {
if entity_mut.get::<crate::networking::NetworkedTransform>().is_none() { if entity_mut
.get::<crate::networking::NetworkedTransform>()
.is_none()
{
entity_mut.insert(crate::networking::NetworkedTransform::default()); entity_mut.insert(crate::networking::NetworkedTransform::default());
debug!("Added NetworkedTransform to entity with Transform"); debug!("Added NetworkedTransform to entity with Transform");
} }
} }
} }
} else { } else {
error!("Entity {:?} not found when applying component {}", entity, component_type); error!(
"Entity {:?} not found when applying component {}",
entity, component_type
);
} }
} }
@@ -421,12 +448,14 @@ fn apply_set_operation(
/// use bevy::prelude::*; /// use bevy::prelude::*;
/// use lib::networking::receive_and_apply_deltas_system; /// use lib::networking::receive_and_apply_deltas_system;
/// ///
/// App::new() /// App::new().add_systems(Update, receive_and_apply_deltas_system);
/// .add_systems(Update, receive_and_apply_deltas_system);
/// ``` /// ```
pub fn receive_and_apply_deltas_system(world: &mut World) { pub fn receive_and_apply_deltas_system(world: &mut World) {
// Check if bridge exists // Check if bridge exists
if world.get_resource::<crate::networking::GossipBridge>().is_none() { if world
.get_resource::<crate::networking::GossipBridge>()
.is_none()
{
return; return;
} }
@@ -456,23 +485,23 @@ pub fn receive_and_apply_deltas_system(world: &mut World) {
); );
apply_entity_delta(&delta, world); apply_entity_delta(&delta, world);
} },
| SyncMessage::JoinRequest { .. } => { | SyncMessage::JoinRequest { .. } => {
// Handled by handle_join_requests_system // Handled by handle_join_requests_system
debug!("JoinRequest handled by dedicated system"); debug!("JoinRequest handled by dedicated system");
} },
| SyncMessage::FullState { .. } => { | SyncMessage::FullState { .. } => {
// Handled by handle_full_state_system // Handled by handle_full_state_system
debug!("FullState handled by dedicated system"); debug!("FullState handled by dedicated system");
} },
| SyncMessage::SyncRequest { .. } => { | SyncMessage::SyncRequest { .. } => {
// Handled by handle_sync_requests_system // Handled by handle_sync_requests_system
debug!("SyncRequest handled by dedicated system"); debug!("SyncRequest handled by dedicated system");
} },
| SyncMessage::MissingDeltas { .. } => { | SyncMessage::MissingDeltas { .. } => {
// Handled by handle_missing_deltas_system // Handled by handle_missing_deltas_system
debug!("MissingDeltas handled by dedicated system"); debug!("MissingDeltas handled by dedicated system");
} },
} }
} }
} }

View File

@@ -0,0 +1,121 @@
//! Authentication and authorization for the networking layer
use sha2::{
Digest,
Sha256,
};
use crate::networking::error::{
NetworkingError,
Result,
};
/// Validate session secret using constant-time comparison
///
/// This function uses SHA-256 hash comparison to perform constant-time
/// comparison and prevent timing attacks. The session secret is a pre-shared
/// key that controls access to the gossip network.
///
/// # Arguments
/// * `provided` - The session secret provided by the joining peer
/// * `expected` - The expected session secret configured for this node
///
/// # Returns
/// * `Ok(())` - Session secret is valid
/// * `Err(NetworkingError::SecurityError)` - Session secret is invalid
///
/// # Examples
/// ```
/// use lib::networking::auth::validate_session_secret;
///
/// let secret = b"my_secret_key";
/// assert!(validate_session_secret(secret, secret).is_ok());
///
/// let wrong_secret = b"wrong_key";
/// assert!(validate_session_secret(wrong_secret, secret).is_err());
/// ```
pub fn validate_session_secret(provided: &[u8], expected: &[u8]) -> Result<()> {
// Different lengths = definitely not equal, fail fast
if provided.len() != expected.len() {
return Err(NetworkingError::SecurityError(
"Invalid session secret".to_string(),
));
}
// Hash both secrets for constant-time comparison
let provided_hash = hash_secret(provided);
let expected_hash = hash_secret(expected);
// Compare hashes using constant-time comparison
// This prevents timing attacks that could leak information about the secret
if provided_hash != expected_hash {
return Err(NetworkingError::SecurityError(
"Invalid session secret".to_string(),
));
}
Ok(())
}
/// Hash a secret using SHA-256
///
/// This is used internally for constant-time comparison of session secrets.
fn hash_secret(secret: &[u8]) -> Vec<u8> {
let mut hasher = Sha256::new();
hasher.update(secret);
hasher.finalize().to_vec()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_valid_secret() {
let secret = b"my_secret_key";
assert!(validate_session_secret(secret, secret).is_ok());
}
#[test]
fn test_invalid_secret() {
let secret1 = b"my_secret_key";
let secret2 = b"wrong_secret_key";
let result = validate_session_secret(secret1, secret2);
assert!(result.is_err());
match result {
| Err(NetworkingError::SecurityError(_)) => {}, // Expected
| _ => panic!("Expected SecurityError"),
}
}
#[test]
fn test_different_lengths() {
let secret1 = b"short";
let secret2 = b"much_longer_secret";
let result = validate_session_secret(secret1, secret2);
assert!(result.is_err());
}
#[test]
fn test_empty_secrets() {
let empty = b"";
assert!(validate_session_secret(empty, empty).is_ok());
}
#[test]
fn test_hash_is_deterministic() {
let secret = b"test_secret";
let hash1 = hash_secret(secret);
let hash2 = hash_secret(secret);
assert_eq!(hash1, hash2);
}
#[test]
fn test_different_secrets_have_different_hashes() {
let secret1 = b"secret1";
let secret2 = b"secret2";
let hash1 = hash_secret(secret1);
let hash2 = hash_secret(secret2);
assert_ne!(hash1, hash2);
}
}

View File

@@ -5,7 +5,8 @@
//! by its hash in the ComponentOp. //! by its hash in the ComponentOp.
//! //!
//! **NOTE:** This is a simplified implementation for Phase 6. Full iroh-blobs //! **NOTE:** This is a simplified implementation for Phase 6. Full iroh-blobs
//! integration will be completed when we integrate with actual gossip networking. //! integration will be completed when we integrate with actual gossip
//! networking.
use std::{ use std::{
collections::HashMap, collections::HashMap,
@@ -92,7 +93,8 @@ impl BlobStore {
/// ///
/// Returns an error if the cache lock is poisoned. /// Returns an error if the cache lock is poisoned.
pub fn has_blob(&self, hash: &BlobHash) -> Result<bool> { pub fn has_blob(&self, hash: &BlobHash) -> Result<bool> {
Ok(self.cache Ok(self
.cache
.lock() .lock()
.map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))? .map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))?
.contains_key(hash)) .contains_key(hash))
@@ -103,7 +105,8 @@ impl BlobStore {
/// This is safer than calling `has_blob()` followed by `get_blob()` because /// This is safer than calling `has_blob()` followed by `get_blob()` because
/// it's atomic - the blob can't be removed between the check and get. /// it's atomic - the blob can't be removed between the check and get.
pub fn get_blob_if_exists(&self, hash: &BlobHash) -> Result<Option<Vec<u8>>> { pub fn get_blob_if_exists(&self, hash: &BlobHash) -> Result<Option<Vec<u8>>> {
Ok(self.cache Ok(self
.cache
.lock() .lock()
.map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))? .map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))?
.get(hash) .get(hash)
@@ -114,7 +117,8 @@ impl BlobStore {
/// ///
/// Returns an error if the cache lock is poisoned. /// Returns an error if the cache lock is poisoned.
pub fn cache_size(&self) -> Result<usize> { pub fn cache_size(&self) -> Result<usize> {
Ok(self.cache Ok(self
.cache
.lock() .lock()
.map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))? .map_err(|e| NetworkingError::Blob(format!("Failed to lock cache: {}", e)))?
.len()) .len())
@@ -173,7 +177,10 @@ pub fn should_use_blob(data: &[u8]) -> bool {
/// # Example /// # Example
/// ///
/// ``` /// ```
/// use lib::networking::{create_component_data, BlobStore}; /// use lib::networking::{
/// BlobStore,
/// create_component_data,
/// };
/// ///
/// let store = BlobStore::new(); /// let store = BlobStore::new();
/// ///
@@ -202,7 +209,11 @@ pub fn create_component_data(data: Vec<u8>, blob_store: &BlobStore) -> Result<Co
/// # Example /// # Example
/// ///
/// ``` /// ```
/// use lib::networking::{get_component_data, BlobStore, ComponentData}; /// use lib::networking::{
/// BlobStore,
/// ComponentData,
/// get_component_data,
/// };
/// ///
/// let store = BlobStore::new(); /// let store = BlobStore::new();
/// ///
@@ -334,7 +345,7 @@ mod tests {
| ComponentData::BlobRef { hash, size } => { | ComponentData::BlobRef { hash, size } => {
assert_eq!(size, 100_000); assert_eq!(size, 100_000);
assert!(store.has_blob(&hash).unwrap()); assert!(store.has_blob(&hash).unwrap());
} },
| ComponentData::Inline(_) => panic!("Expected blob reference"), | ComponentData::Inline(_) => panic!("Expected blob reference"),
} }
} }

View File

@@ -23,8 +23,7 @@ use crate::networking::{
/// use bevy::prelude::*; /// use bevy::prelude::*;
/// use lib::networking::auto_detect_transform_changes_system; /// use lib::networking::auto_detect_transform_changes_system;
/// ///
/// App::new() /// App::new().add_systems(Update, auto_detect_transform_changes_system);
/// .add_systems(Update, auto_detect_transform_changes_system);
/// ``` /// ```
pub fn auto_detect_transform_changes_system( pub fn auto_detect_transform_changes_system(
mut query: Query< mut query: Query<
@@ -38,7 +37,10 @@ pub fn auto_detect_transform_changes_system(
// Count how many changed entities we found // Count how many changed entities we found
let count = query.iter().count(); let count = query.iter().count();
if count > 0 { if count > 0 {
debug!("auto_detect_transform_changes_system: Found {} entities with changed Transform", count); debug!(
"auto_detect_transform_changes_system: Found {} entities with changed Transform",
count
);
} }
// Simply accessing &mut NetworkedEntity triggers Bevy's change detection // Simply accessing &mut NetworkedEntity triggers Bevy's change detection
@@ -66,8 +68,8 @@ impl LastSyncVersions {
/// Check if we should sync this entity based on version /// Check if we should sync this entity based on version
pub fn should_sync(&self, network_id: uuid::Uuid, version: u64) -> bool { pub fn should_sync(&self, network_id: uuid::Uuid, version: u64) -> bool {
match self.versions.get(&network_id) { match self.versions.get(&network_id) {
Some(&last_version) => version > last_version, | Some(&last_version) => version > last_version,
None => true, // Never synced before | None => true, // Never synced before
} }
} }

View File

@@ -42,10 +42,7 @@ use crate::networking::vector_clock::NodeId;
/// fn spawn_networked_entity(mut commands: Commands) { /// fn spawn_networked_entity(mut commands: Commands) {
/// let node_id = Uuid::new_v4(); /// let node_id = Uuid::new_v4();
/// ///
/// commands.spawn(( /// commands.spawn((NetworkedEntity::new(node_id), Transform::default()));
/// NetworkedEntity::new(node_id),
/// Transform::default(),
/// ));
/// } /// }
/// ``` /// ```
#[derive(Component, Reflect, Debug, Clone, Serialize, Deserialize)] #[derive(Component, Reflect, Debug, Clone, Serialize, Deserialize)]
@@ -139,7 +136,10 @@ impl Default for NetworkedEntity {
/// ///
/// ``` /// ```
/// use bevy::prelude::*; /// use bevy::prelude::*;
/// use lib::networking::{NetworkedEntity, NetworkedTransform}; /// use lib::networking::{
/// NetworkedEntity,
/// NetworkedTransform,
/// };
/// use uuid::Uuid; /// use uuid::Uuid;
/// ///
/// fn spawn_synced_transform(mut commands: Commands) { /// fn spawn_synced_transform(mut commands: Commands) {
@@ -171,7 +171,10 @@ pub struct NetworkedTransform;
/// ///
/// ``` /// ```
/// use bevy::prelude::*; /// use bevy::prelude::*;
/// use lib::networking::{NetworkedEntity, NetworkedSelection}; /// use lib::networking::{
/// NetworkedEntity,
/// NetworkedSelection,
/// };
/// use uuid::Uuid; /// use uuid::Uuid;
/// ///
/// fn create_selection(mut commands: Commands) { /// fn create_selection(mut commands: Commands) {
@@ -182,10 +185,7 @@ pub struct NetworkedTransform;
/// selection.selected_ids.insert(Uuid::new_v4()); /// selection.selected_ids.insert(Uuid::new_v4());
/// selection.selected_ids.insert(Uuid::new_v4()); /// selection.selected_ids.insert(Uuid::new_v4());
/// ///
/// commands.spawn(( /// commands.spawn((NetworkedEntity::new(node_id), selection));
/// NetworkedEntity::new(node_id),
/// selection,
/// ));
/// } /// }
/// ``` /// ```
#[derive(Component, Reflect, Debug, Clone, Default)] #[derive(Component, Reflect, Debug, Clone, Default)]
@@ -253,7 +253,10 @@ impl NetworkedSelection {
/// ///
/// ``` /// ```
/// use bevy::prelude::*; /// use bevy::prelude::*;
/// use lib::networking::{NetworkedEntity, NetworkedDrawingPath}; /// use lib::networking::{
/// NetworkedDrawingPath,
/// NetworkedEntity,
/// };
/// use uuid::Uuid; /// use uuid::Uuid;
/// ///
/// fn create_path(mut commands: Commands) { /// fn create_path(mut commands: Commands) {
@@ -265,10 +268,7 @@ impl NetworkedSelection {
/// path.points.push(Vec2::new(10.0, 10.0)); /// path.points.push(Vec2::new(10.0, 10.0));
/// path.points.push(Vec2::new(20.0, 5.0)); /// path.points.push(Vec2::new(20.0, 5.0));
/// ///
/// commands.spawn(( /// commands.spawn((NetworkedEntity::new(node_id), path));
/// NetworkedEntity::new(node_id),
/// path,
/// ));
/// } /// }
/// ``` /// ```
#[derive(Component, Reflect, Debug, Clone, Default)] #[derive(Component, Reflect, Debug, Clone, Default)]

View File

@@ -6,6 +6,7 @@
use bevy::prelude::*; use bevy::prelude::*;
use crate::networking::{ use crate::networking::{
NetworkedEntity,
change_detection::LastSyncVersions, change_detection::LastSyncVersions,
gossip_bridge::GossipBridge, gossip_bridge::GossipBridge,
messages::{ messages::{
@@ -18,7 +19,6 @@ use crate::networking::{
NodeId, NodeId,
VectorClock, VectorClock,
}, },
NetworkedEntity,
}; };
/// Resource wrapping our node's vector clock /// Resource wrapping our node's vector clock
@@ -63,8 +63,7 @@ impl NodeVectorClock {
/// use bevy::prelude::*; /// use bevy::prelude::*;
/// use lib::networking::generate_delta_system; /// use lib::networking::generate_delta_system;
/// ///
/// App::new() /// App::new().add_systems(Update, generate_delta_system);
/// .add_systems(Update, generate_delta_system);
/// ``` /// ```
pub fn generate_delta_system(world: &mut World) { pub fn generate_delta_system(world: &mut World) {
// Check if bridge exists // Check if bridge exists
@@ -73,8 +72,10 @@ pub fn generate_delta_system(world: &mut World) {
} }
let changed_entities: Vec<(Entity, uuid::Uuid, uuid::Uuid)> = { let changed_entities: Vec<(Entity, uuid::Uuid, uuid::Uuid)> = {
let mut query = world.query_filtered::<(Entity, &NetworkedEntity), Changed<NetworkedEntity>>(); let mut query =
query.iter(world) world.query_filtered::<(Entity, &NetworkedEntity), Changed<NetworkedEntity>>();
query
.iter(world)
.map(|(entity, networked)| (entity, networked.network_id, networked.owner_node_id)) .map(|(entity, networked)| (entity, networked.network_id, networked.owner_node_id))
.collect() .collect()
}; };
@@ -142,12 +143,7 @@ pub fn generate_delta_system(world: &mut World) {
let (bridge, _, _, mut last_versions, mut operation_log) = system_state.get_mut(world); let (bridge, _, _, mut last_versions, mut operation_log) = system_state.get_mut(world);
// Create EntityDelta // Create EntityDelta
let delta = EntityDelta::new( let delta = EntityDelta::new(network_id, node_id, vector_clock.clone(), operations);
network_id,
node_id,
vector_clock.clone(),
operations,
);
// Record in operation log for anti-entropy // Record in operation log for anti-entropy
if let Some(ref mut log) = operation_log { if let Some(ref mut log) = operation_log {
@@ -179,10 +175,22 @@ pub fn generate_delta_system(world: &mut World) {
// Phase 4: Update component vector clocks for local modifications // Phase 4: Update component vector clocks for local modifications
{ {
if let Some(mut component_clocks) = world.get_resource_mut::<crate::networking::ComponentVectorClocks>() { if let Some(mut component_clocks) =
world.get_resource_mut::<crate::networking::ComponentVectorClocks>()
{
for op in &delta.operations { for op in &delta.operations {
if let crate::networking::ComponentOp::Set { component_type, vector_clock: op_clock, .. } = op { if let crate::networking::ComponentOp::Set {
component_clocks.set(network_id, component_type.clone(), op_clock.clone(), node_id); component_type,
vector_clock: op_clock,
..
} = op
{
component_clocks.set(
network_id,
component_type.clone(),
op_clock.clone(),
node_id,
);
debug!( debug!(
"Updated local vector clock for {} on entity {:?} (node_id: {:?})", "Updated local vector clock for {} on entity {:?} (node_id: {:?})",
component_type, network_id, node_id component_type, network_id, node_id

View File

@@ -22,13 +22,13 @@ use bevy::prelude::*;
/// ///
/// ``` /// ```
/// use bevy::prelude::*; /// use bevy::prelude::*;
/// use lib::networking::{NetworkEntityMap, NetworkedEntity}; /// use lib::networking::{
/// NetworkEntityMap,
/// NetworkedEntity,
/// };
/// use uuid::Uuid; /// use uuid::Uuid;
/// ///
/// fn example_system( /// fn example_system(mut map: ResMut<NetworkEntityMap>, query: Query<(Entity, &NetworkedEntity)>) {
/// mut map: ResMut<NetworkEntityMap>,
/// query: Query<(Entity, &NetworkedEntity)>,
/// ) {
/// // Register networked entities /// // Register networked entities
/// for (entity, networked) in query.iter() { /// for (entity, networked) in query.iter() {
/// map.insert(networked.network_id, entity); /// map.insert(networked.network_id, entity);
@@ -256,12 +256,14 @@ impl NetworkEntityMap {
/// use bevy::prelude::*; /// use bevy::prelude::*;
/// use lib::networking::register_networked_entities_system; /// use lib::networking::register_networked_entities_system;
/// ///
/// App::new() /// App::new().add_systems(PostUpdate, register_networked_entities_system);
/// .add_systems(PostUpdate, register_networked_entities_system);
/// ``` /// ```
pub fn register_networked_entities_system( pub fn register_networked_entities_system(
mut map: ResMut<NetworkEntityMap>, mut map: ResMut<NetworkEntityMap>,
query: Query<(Entity, &crate::networking::NetworkedEntity), Added<crate::networking::NetworkedEntity>>, query: Query<
(Entity, &crate::networking::NetworkedEntity),
Added<crate::networking::NetworkedEntity>,
>,
) { ) {
for (entity, networked) in query.iter() { for (entity, networked) in query.iter() {
map.insert(networked.network_id, entity); map.insert(networked.network_id, entity);
@@ -278,8 +280,7 @@ pub fn register_networked_entities_system(
/// use bevy::prelude::*; /// use bevy::prelude::*;
/// use lib::networking::cleanup_despawned_entities_system; /// use lib::networking::cleanup_despawned_entities_system;
/// ///
/// App::new() /// App::new().add_systems(PostUpdate, cleanup_despawned_entities_system);
/// .add_systems(PostUpdate, cleanup_despawned_entities_system);
/// ``` /// ```
pub fn cleanup_despawned_entities_system( pub fn cleanup_despawned_entities_system(
mut map: ResMut<NetworkEntityMap>, mut map: ResMut<NetworkEntityMap>,

View File

@@ -69,8 +69,9 @@ impl GossipBridge {
/// Drain all pending messages from the incoming queue atomically /// Drain all pending messages from the incoming queue atomically
/// ///
/// This acquires the lock once and drains all messages, preventing race conditions /// This acquires the lock once and drains all messages, preventing race
/// where messages could arrive between individual try_recv() calls. /// conditions where messages could arrive between individual try_recv()
/// calls.
pub fn drain_incoming(&self) -> Vec<VersionedMessage> { pub fn drain_incoming(&self) -> Vec<VersionedMessage> {
self.incoming self.incoming
.lock() .lock()

View File

@@ -17,6 +17,8 @@ use bevy::{
}; };
use crate::networking::{ use crate::networking::{
GossipBridge,
NetworkedEntity,
blob_support::BlobStore, blob_support::BlobStore,
delta_generation::NodeVectorClock, delta_generation::NodeVectorClock,
entity_map::NetworkEntityMap, entity_map::NetworkEntityMap,
@@ -25,17 +27,14 @@ use crate::networking::{
SyncMessage, SyncMessage,
VersionedMessage, VersionedMessage,
}, },
GossipBridge,
NetworkedEntity,
}; };
/// Session secret for join authentication
///
/// In Phase 7, this is optional. Phase 13 will add full authentication.
pub type SessionSecret = Vec<u8>;
/// Build a JoinRequest message /// Build a JoinRequest message
/// ///
/// # Arguments
/// * `node_id` - The UUID of the node requesting to join
/// * `session_secret` - Optional pre-shared secret for authentication
///
/// # Example /// # Example
/// ///
/// ``` /// ```
@@ -45,7 +44,10 @@ pub type SessionSecret = Vec<u8>;
/// let node_id = Uuid::new_v4(); /// let node_id = Uuid::new_v4();
/// let request = build_join_request(node_id, None); /// let request = build_join_request(node_id, None);
/// ``` /// ```
pub fn build_join_request(node_id: uuid::Uuid, session_secret: Option<SessionSecret>) -> VersionedMessage { pub fn build_join_request(
node_id: uuid::Uuid,
session_secret: Option<Vec<u8>>,
) -> VersionedMessage {
VersionedMessage::new(SyncMessage::JoinRequest { VersionedMessage::new(SyncMessage::JoinRequest {
node_id, node_id,
session_secret, session_secret,
@@ -99,10 +101,10 @@ pub fn build_full_state(
let type_path = registration.type_info().type_path(); let type_path = registration.type_info().type_path();
// Skip networked wrapper components // Skip networked wrapper components
if type_path.ends_with("::NetworkedEntity") if type_path.ends_with("::NetworkedEntity") ||
|| type_path.ends_with("::NetworkedTransform") type_path.ends_with("::NetworkedTransform") ||
|| type_path.ends_with("::NetworkedSelection") type_path.ends_with("::NetworkedSelection") ||
|| type_path.ends_with("::NetworkedDrawingPath") type_path.ends_with("::NetworkedDrawingPath")
{ {
continue; continue;
} }
@@ -114,8 +116,8 @@ pub fn build_full_state(
// Create component data (inline or blob) // Create component data (inline or blob)
let data = if let Some(store) = blob_store { let data = if let Some(store) = blob_store {
match create_component_data(serialized, store) { match create_component_data(serialized, store) {
Ok(d) => d, | Ok(d) => d,
Err(_) => continue, | Err(_) => continue,
} }
} else { } else {
crate::networking::ComponentData::Inline(serialized) crate::networking::ComponentData::Inline(serialized)
@@ -203,10 +205,7 @@ pub fn apply_full_state(
// This ensures entities received via FullState are persisted locally // This ensures entities received via FullState are persisted locally
let entity = commands let entity = commands
.spawn(( .spawn((
NetworkedEntity::with_id( NetworkedEntity::with_id(entity_state.entity_id, entity_state.owner_node_id),
entity_state.entity_id,
entity_state.owner_node_id,
),
crate::persistence::Persisted::with_id(entity_state.entity_id), crate::persistence::Persisted::with_id(entity_state.entity_id),
)) ))
.id(); .id();
@@ -224,14 +223,14 @@ pub fn apply_full_state(
| blob_ref @ crate::networking::ComponentData::BlobRef { .. } => { | blob_ref @ crate::networking::ComponentData::BlobRef { .. } => {
if let Some(store) = blob_store { if let Some(store) = blob_store {
match get_component_data(blob_ref, store) { match get_component_data(blob_ref, store) {
Ok(bytes) => bytes, | Ok(bytes) => bytes,
Err(e) => { | Err(e) => {
error!( error!(
"Failed to retrieve blob for {}: {}", "Failed to retrieve blob for {}: {}",
component_state.component_type, e component_state.component_type, e
); );
continue; continue;
} },
} }
} else { } else {
error!( error!(
@@ -240,44 +239,44 @@ pub fn apply_full_state(
); );
continue; continue;
} }
} },
}; };
// Deserialize the component // Deserialize the component
let reflected = match deserialize_component(&data_bytes, type_registry) { let reflected = match deserialize_component(&data_bytes, type_registry) {
Ok(r) => r, | Ok(r) => r,
Err(e) => { | Err(e) => {
error!( error!(
"Failed to deserialize {}: {}", "Failed to deserialize {}: {}",
component_state.component_type, e component_state.component_type, e
); );
continue; continue;
} },
}; };
// Get the type registration // Get the type registration
let registration = match type_registry.get_with_type_path(&component_state.component_type) let registration =
{ match type_registry.get_with_type_path(&component_state.component_type) {
Some(reg) => reg, | Some(reg) => reg,
None => { | None => {
error!( error!(
"Component type {} not registered", "Component type {} not registered",
component_state.component_type component_state.component_type
); );
continue; continue;
} },
}; };
// Get ReflectComponent data // Get ReflectComponent data
let reflect_component = match registration.data::<ReflectComponent>() { let reflect_component = match registration.data::<ReflectComponent>() {
Some(rc) => rc.clone(), | Some(rc) => rc.clone(),
None => { | None => {
error!( error!(
"Component type {} does not have ReflectComponent data", "Component type {} does not have ReflectComponent data",
component_state.component_type component_state.component_type
); );
continue; continue;
} },
}; };
// Insert the component // Insert the component
@@ -302,8 +301,7 @@ pub fn apply_full_state(
debug!( debug!(
"Spawned entity {:?} from FullState with {} components", "Spawned entity {:?} from FullState with {} components",
entity_state.entity_id, entity_state.entity_id, num_components
num_components
); );
} }
@@ -320,8 +318,7 @@ pub fn apply_full_state(
/// use bevy::prelude::*; /// use bevy::prelude::*;
/// use lib::networking::handle_join_requests_system; /// use lib::networking::handle_join_requests_system;
/// ///
/// App::new() /// App::new().add_systems(Update, handle_join_requests_system);
/// .add_systems(Update, handle_join_requests_system);
/// ``` /// ```
pub fn handle_join_requests_system( pub fn handle_join_requests_system(
world: &World, world: &World,
@@ -347,9 +344,32 @@ pub fn handle_join_requests_system(
} => { } => {
info!("Received JoinRequest from node {}", node_id); info!("Received JoinRequest from node {}", node_id);
// TODO: Validate session_secret in Phase 13 // Validate session secret if configured
if let Some(_secret) = session_secret { if let Some(expected) =
debug!("Session secret validation not yet implemented"); world.get_resource::<crate::networking::plugin::SessionSecret>()
{
match &session_secret {
| Some(provided_secret) => {
if let Err(e) = crate::networking::validate_session_secret(
provided_secret,
expected.as_bytes(),
) {
error!("JoinRequest from {} rejected: {}", node_id, e);
continue; // Skip this request, don't send FullState
}
info!("Session secret validated for node {}", node_id);
},
| None => {
warn!(
"JoinRequest from {} missing required session secret, rejecting",
node_id
);
continue; // Reject requests without secret when one is configured
},
}
} else if session_secret.is_some() {
// No session secret configured but peer provided one
debug!("Session secret provided but none configured, accepting");
} }
// Build full state // Build full state
@@ -367,17 +387,19 @@ pub fn handle_join_requests_system(
} else { } else {
info!("Sent FullState to node {}", node_id); info!("Sent FullState to node {}", node_id);
} }
} },
| _ => { | _ => {
// Not a JoinRequest, ignore (other systems handle other messages) // Not a JoinRequest, ignore (other systems handle other
} // messages)
},
} }
} }
} }
/// System to handle FullState messages /// System to handle FullState messages
/// ///
/// When we receive a FullState (after sending JoinRequest), apply it to our world. /// When we receive a FullState (after sending JoinRequest), apply it to our
/// world.
/// ///
/// This system should run BEFORE receive_and_apply_deltas_system to ensure /// This system should run BEFORE receive_and_apply_deltas_system to ensure
/// we're fully initialized before processing deltas. /// we're fully initialized before processing deltas.
@@ -416,10 +438,10 @@ pub fn handle_full_state_system(
blob_store_ref, blob_store_ref,
tombstone_registry.as_deref_mut(), tombstone_registry.as_deref_mut(),
); );
} },
| _ => { | _ => {
// Not a FullState, ignore // Not a FullState, ignore
} },
} }
} }
} }
@@ -441,7 +463,7 @@ mod tests {
} => { } => {
assert_eq!(req_node_id, node_id); assert_eq!(req_node_id, node_id);
assert!(session_secret.is_none()); assert!(session_secret.is_none());
} },
| _ => panic!("Expected JoinRequest"), | _ => panic!("Expected JoinRequest"),
} }
} }
@@ -458,7 +480,7 @@ mod tests {
session_secret, session_secret,
} => { } => {
assert_eq!(session_secret, Some(secret)); assert_eq!(session_secret, Some(secret));
} },
| _ => panic!("Expected JoinRequest"), | _ => panic!("Expected JoinRequest"),
} }
} }

View File

@@ -45,7 +45,10 @@ pub enum MergeDecision {
/// # Example /// # Example
/// ///
/// ``` /// ```
/// use lib::networking::{VectorClock, compare_operations_lww}; /// use lib::networking::{
/// VectorClock,
/// compare_operations_lww,
/// };
/// use uuid::Uuid; /// use uuid::Uuid;
/// ///
/// let node1 = Uuid::new_v4(); /// let node1 = Uuid::new_v4();
@@ -189,9 +192,7 @@ mod tests {
let decision = compare_operations_lww(&clock1, node1, &clock2, node2); let decision = compare_operations_lww(&clock1, node1, &clock2, node2);
// Should use node ID as tiebreaker // Should use node ID as tiebreaker
assert!( assert!(decision == MergeDecision::ApplyRemote || decision == MergeDecision::KeepLocal);
decision == MergeDecision::ApplyRemote || decision == MergeDecision::KeepLocal
);
} }
#[test] #[test]

View File

@@ -10,6 +10,9 @@ use bevy::{
}; };
use crate::networking::{ use crate::networking::{
GossipBridge,
NetworkedEntity,
TombstoneRegistry,
apply_entity_delta, apply_entity_delta,
apply_full_state, apply_full_state,
blob_support::BlobStore, blob_support::BlobStore,
@@ -18,9 +21,8 @@ use crate::networking::{
entity_map::NetworkEntityMap, entity_map::NetworkEntityMap,
messages::SyncMessage, messages::SyncMessage,
operation_log::OperationLog, operation_log::OperationLog,
GossipBridge, plugin::SessionSecret,
NetworkedEntity, validate_session_secret,
TombstoneRegistry,
}; };
/// Central message dispatcher system /// Central message dispatcher system
@@ -46,8 +48,7 @@ use crate::networking::{
/// use bevy::prelude::*; /// use bevy::prelude::*;
/// use lib::networking::message_dispatcher_system; /// use lib::networking::message_dispatcher_system;
/// ///
/// App::new() /// App::new().add_systems(Update, message_dispatcher_system);
/// .add_systems(Update, message_dispatcher_system);
/// ``` /// ```
pub fn message_dispatcher_system(world: &mut World) { pub fn message_dispatcher_system(world: &mut World) {
// This is an exclusive system to avoid parameter conflicts with world access // This is an exclusive system to avoid parameter conflicts with world access
@@ -57,7 +58,8 @@ pub fn message_dispatcher_system(world: &mut World) {
} }
// Atomically drain all pending messages from the incoming queue // Atomically drain all pending messages from the incoming queue
// This prevents race conditions where messages could arrive between individual try_recv() calls // This prevents race conditions where messages could arrive between individual
// try_recv() calls
let messages: Vec<crate::networking::VersionedMessage> = { let messages: Vec<crate::networking::VersionedMessage> = {
let bridge = world.resource::<GossipBridge>(); let bridge = world.resource::<GossipBridge>();
bridge.drain_incoming() bridge.drain_incoming()
@@ -74,10 +76,7 @@ pub fn message_dispatcher_system(world: &mut World) {
/// Helper function to dispatch a single message /// Helper function to dispatch a single message
/// This is separate to allow proper borrowing of world resources /// This is separate to allow proper borrowing of world resources
fn dispatch_message( fn dispatch_message(world: &mut World, message: crate::networking::VersionedMessage) {
world: &mut World,
message: crate::networking::VersionedMessage,
) {
match message.message { match message.message {
// EntityDelta - apply remote operations // EntityDelta - apply remote operations
| SyncMessage::EntityDelta { | SyncMessage::EntityDelta {
@@ -100,7 +99,7 @@ fn dispatch_message(
); );
apply_entity_delta(&delta, world); apply_entity_delta(&delta, world);
} },
// JoinRequest - new peer joining // JoinRequest - new peer joining
| SyncMessage::JoinRequest { | SyncMessage::JoinRequest {
@@ -109,9 +108,29 @@ fn dispatch_message(
} => { } => {
info!("Received JoinRequest from node {}", node_id); info!("Received JoinRequest from node {}", node_id);
// TODO: Validate session_secret in Phase 13 // Validate session secret if configured
if let Some(_secret) = session_secret { if let Some(expected) = world.get_resource::<SessionSecret>() {
debug!("Session secret validation not yet implemented"); match &session_secret {
| Some(provided_secret) => {
if let Err(e) =
validate_session_secret(provided_secret, expected.as_bytes())
{
error!("JoinRequest from {} rejected: {}", node_id, e);
return; // Stop processing this message
}
info!("Session secret validated for node {}", node_id);
},
| None => {
warn!(
"JoinRequest from {} missing required session secret, rejecting",
node_id
);
return; // Reject requests without secret when one is configured
},
}
} else if session_secret.is_some() {
// No session secret configured but peer provided one
debug!("Session secret provided but none configured, accepting");
} }
// Build and send full state // Build and send full state
@@ -143,7 +162,7 @@ fn dispatch_message(
info!("Sent FullState to node {}", node_id); info!("Sent FullState to node {}", node_id);
} }
} }
} },
// FullState - receiving world state after join // FullState - receiving world state after join
| SyncMessage::FullState { | SyncMessage::FullState {
@@ -163,7 +182,14 @@ fn dispatch_message(
)> = SystemState::new(world); )> = SystemState::new(world);
{ {
let (mut commands, mut entity_map, type_registry, mut node_clock, blob_store, mut tombstone_registry) = system_state.get_mut(world); let (
mut commands,
mut entity_map,
type_registry,
mut node_clock,
blob_store,
mut tombstone_registry,
) = system_state.get_mut(world);
let registry = type_registry.read(); let registry = type_registry.read();
apply_full_state( apply_full_state(
@@ -180,7 +206,7 @@ fn dispatch_message(
} }
system_state.apply(world); system_state.apply(world);
} },
// SyncRequest - peer requesting missing operations // SyncRequest - peer requesting missing operations
| SyncMessage::SyncRequest { | SyncMessage::SyncRequest {
@@ -213,7 +239,7 @@ fn dispatch_message(
} else { } else {
warn!("Received SyncRequest but OperationLog resource not available"); warn!("Received SyncRequest but OperationLog resource not available");
} }
} },
// MissingDeltas - receiving operations we requested // MissingDeltas - receiving operations we requested
| SyncMessage::MissingDeltas { deltas } => { | SyncMessage::MissingDeltas { deltas } => {
@@ -221,14 +247,11 @@ fn dispatch_message(
// Apply each delta // Apply each delta
for delta in deltas { for delta in deltas {
debug!( debug!("Applying missing delta for entity {:?}", delta.entity_id);
"Applying missing delta for entity {:?}",
delta.entity_id
);
apply_entity_delta(&delta, world); apply_entity_delta(&delta, world);
} }
} },
} }
} }
@@ -280,10 +303,10 @@ fn build_full_state_from_data(
let type_path = registration.type_info().type_path(); let type_path = registration.type_info().type_path();
// Skip networked wrapper components // Skip networked wrapper components
if type_path.ends_with("::NetworkedEntity") if type_path.ends_with("::NetworkedEntity") ||
|| type_path.ends_with("::NetworkedTransform") type_path.ends_with("::NetworkedTransform") ||
|| type_path.ends_with("::NetworkedSelection") type_path.ends_with("::NetworkedSelection") ||
|| type_path.ends_with("::NetworkedDrawingPath") type_path.ends_with("::NetworkedDrawingPath")
{ {
continue; continue;
} }
@@ -295,8 +318,8 @@ fn build_full_state_from_data(
// Create component data (inline or blob) // Create component data (inline or blob)
let data = if let Some(store) = blob_store { let data = if let Some(store) = blob_store {
match create_component_data(serialized, store) { match create_component_data(serialized, store) {
Ok(d) => d, | Ok(d) => d,
Err(_) => continue, | Err(_) => continue,
} }
} else { } else {
crate::networking::ComponentData::Inline(serialized) crate::networking::ComponentData::Inline(serialized)

View File

@@ -144,7 +144,8 @@ pub struct EntityState {
/// Contains the component type and its serialized data. /// Contains the component type and its serialized data.
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ComponentState { pub struct ComponentState {
/// Type path of the component (e.g., "bevy_transform::components::Transform") /// Type path of the component (e.g.,
/// "bevy_transform::components::Transform")
pub component_type: String, pub component_type: String,
/// Serialized component data (bincode) /// Serialized component data (bincode)

View File

@@ -25,10 +25,14 @@
//! //!
//! // Build a component operation //! // Build a component operation
//! let builder = ComponentOpBuilder::new(node_id, clock.clone()); //! let builder = ComponentOpBuilder::new(node_id, clock.clone());
//! let op = builder.set("Transform".to_string(), ComponentData::Inline(vec![1, 2, 3])); //! let op = builder.set(
//! "Transform".to_string(),
//! ComponentData::Inline(vec![1, 2, 3]),
//! );
//! ``` //! ```
mod apply_ops; mod apply_ops;
mod auth;
mod blob_support; mod blob_support;
mod change_detection; mod change_detection;
mod components; mod components;
@@ -51,6 +55,7 @@ mod tombstones;
mod vector_clock; mod vector_clock;
pub use apply_ops::*; pub use apply_ops::*;
pub use auth::*;
pub use blob_support::*; pub use blob_support::*;
pub use change_detection::*; pub use change_detection::*;
pub use components::*; pub use components::*;
@@ -108,10 +113,12 @@ pub fn spawn_networked_entity(
use bevy::prelude::*; use bevy::prelude::*;
// Spawn with both NetworkedEntity and Persisted components // Spawn with both NetworkedEntity and Persisted components
let entity = world.spawn(( let entity = world
NetworkedEntity::with_id(entity_id, node_id), .spawn((
crate::persistence::Persisted::with_id(entity_id), NetworkedEntity::with_id(entity_id, node_id),
)).id(); crate::persistence::Persisted::with_id(entity_id),
))
.id();
// Register in entity map // Register in entity map
let mut entity_map = world.resource_mut::<NetworkEntityMap>(); let mut entity_map = world.resource_mut::<NetworkEntityMap>();

View File

@@ -11,8 +11,8 @@ use bevy::{
use crate::{ use crate::{
networking::{ networking::{
blob_support::{ blob_support::{
create_component_data,
BlobStore, BlobStore,
create_component_data,
}, },
error::Result, error::Result,
messages::ComponentData, messages::ComponentData,
@@ -72,7 +72,8 @@ pub fn build_set_operation(
/// Build Set operations for all components on an entity /// Build Set operations for all components on an entity
/// ///
/// This iterates over all components with reflection data and creates Set /// This iterates over all components with reflection data and creates Set
/// operations for each one. Automatically uses blob storage for large components. /// operations for each one. Automatically uses blob storage for large
/// components.
/// ///
/// # Parameters /// # Parameters
/// ///
@@ -97,7 +98,10 @@ pub fn build_entity_operations(
let mut operations = Vec::new(); let mut operations = Vec::new();
let entity_ref = world.entity(entity); let entity_ref = world.entity(entity);
debug!("build_entity_operations: Building operations for entity {:?}", entity); debug!(
"build_entity_operations: Building operations for entity {:?}",
entity
);
// Iterate over all type registrations // Iterate over all type registrations
for registration in type_registry.iter() { for registration in type_registry.iter() {
@@ -110,10 +114,10 @@ pub fn build_entity_operations(
let type_path = registration.type_info().type_path(); let type_path = registration.type_info().type_path();
// Skip certain components // Skip certain components
if type_path.ends_with("::NetworkedEntity") if type_path.ends_with("::NetworkedEntity") ||
|| type_path.ends_with("::NetworkedTransform") type_path.ends_with("::NetworkedTransform") ||
|| type_path.ends_with("::NetworkedSelection") type_path.ends_with("::NetworkedSelection") ||
|| type_path.ends_with("::NetworkedDrawingPath") type_path.ends_with("::NetworkedDrawingPath")
{ {
continue; continue;
} }
@@ -164,7 +168,10 @@ pub fn build_entity_operations(
/// ///
/// ``` /// ```
/// use bevy::prelude::*; /// use bevy::prelude::*;
/// use lib::networking::{build_transform_operation, VectorClock}; /// use lib::networking::{
/// VectorClock,
/// build_transform_operation,
/// };
/// use uuid::Uuid; /// use uuid::Uuid;
/// ///
/// # fn example(transform: &Transform, type_registry: &bevy::reflect::TypeRegistry) { /// # fn example(transform: &Transform, type_registry: &bevy::reflect::TypeRegistry) {
@@ -192,7 +199,10 @@ pub fn build_transform_operation(
}; };
let builder = ComponentOpBuilder::new(node_id, vector_clock); let builder = ComponentOpBuilder::new(node_id, vector_clock);
Ok(builder.set("bevy_transform::components::transform::Transform".to_string(), data)) Ok(builder.set(
"bevy_transform::components::transform::Transform".to_string(),
data,
))
} }
#[cfg(test)] #[cfg(test)]
@@ -208,7 +218,8 @@ mod tests {
let node_id = uuid::Uuid::new_v4(); let node_id = uuid::Uuid::new_v4();
let clock = VectorClock::new(); let clock = VectorClock::new();
let op = build_transform_operation(&transform, node_id, clock, &type_registry, None).unwrap(); let op =
build_transform_operation(&transform, node_id, clock, &type_registry, None).unwrap();
assert!(op.is_set()); assert!(op.is_set());
assert_eq!( assert_eq!(
@@ -227,9 +238,7 @@ mod tests {
type_registry.register::<Transform>(); type_registry.register::<Transform>();
// Spawn entity with Transform // Spawn entity with Transform
let entity = world let entity = world.spawn(Transform::from_xyz(1.0, 2.0, 3.0)).id();
.spawn(Transform::from_xyz(1.0, 2.0, 3.0))
.id();
let node_id = uuid::Uuid::new_v4(); let node_id = uuid::Uuid::new_v4();
let clock = VectorClock::new(); let clock = VectorClock::new();
@@ -250,11 +259,15 @@ mod tests {
let node_id = uuid::Uuid::new_v4(); let node_id = uuid::Uuid::new_v4();
let mut clock = VectorClock::new(); let mut clock = VectorClock::new();
let op1 = build_transform_operation(&transform, node_id, clock.clone(), &type_registry, None).unwrap(); let op1 =
build_transform_operation(&transform, node_id, clock.clone(), &type_registry, None)
.unwrap();
assert_eq!(op1.vector_clock().get(node_id), 1); assert_eq!(op1.vector_clock().get(node_id), 1);
clock.increment(node_id); clock.increment(node_id);
let op2 = build_transform_operation(&transform, node_id, clock.clone(), &type_registry, None).unwrap(); let op2 =
build_transform_operation(&transform, node_id, clock.clone(), &type_registry, None)
.unwrap();
assert_eq!(op2.vector_clock().get(node_id), 2); assert_eq!(op2.vector_clock().get(node_id), 2);
} }
} }

View File

@@ -18,6 +18,8 @@ use std::collections::{
use bevy::prelude::*; use bevy::prelude::*;
use crate::networking::{ use crate::networking::{
GossipBridge,
NodeVectorClock,
messages::{ messages::{
EntityDelta, EntityDelta,
SyncMessage, SyncMessage,
@@ -27,8 +29,6 @@ use crate::networking::{
NodeId, NodeId,
VectorClock, VectorClock,
}, },
GossipBridge,
NodeVectorClock,
}; };
/// Maximum operations to keep per entity (prevents unbounded growth) /// Maximum operations to keep per entity (prevents unbounded growth)
@@ -62,7 +62,8 @@ struct LogEntry {
/// - Max operation age: `MAX_OP_AGE_SECS` (300 seconds / 5 minutes) /// - Max operation age: `MAX_OP_AGE_SECS` (300 seconds / 5 minutes)
/// - Max entities: `MAX_ENTITIES` (10,000) /// - Max entities: `MAX_ENTITIES` (10,000)
/// ///
/// When limits are exceeded, oldest operations/entities are pruned automatically. /// When limits are exceeded, oldest operations/entities are pruned
/// automatically.
#[derive(Resource)] #[derive(Resource)]
pub struct OperationLog { pub struct OperationLog {
/// Map from entity ID to list of recent operations /// Map from entity ID to list of recent operations
@@ -88,7 +89,11 @@ impl OperationLog {
/// # Example /// # Example
/// ///
/// ``` /// ```
/// use lib::networking::{OperationLog, EntityDelta, VectorClock}; /// use lib::networking::{
/// EntityDelta,
/// OperationLog,
/// VectorClock,
/// };
/// use uuid::Uuid; /// use uuid::Uuid;
/// ///
/// let mut log = OperationLog::new(); /// let mut log = OperationLog::new();
@@ -119,7 +124,10 @@ impl OperationLog {
timestamp: std::time::Instant::now(), timestamp: std::time::Instant::now(),
}; };
let log = self.logs.entry(delta.entity_id).or_insert_with(VecDeque::new); let log = self
.logs
.entry(delta.entity_id)
.or_insert_with(VecDeque::new);
log.push_back(entry); log.push_back(entry);
self.total_ops += 1; self.total_ops += 1;
@@ -134,9 +142,7 @@ impl OperationLog {
fn find_oldest_entity(&self) -> Option<uuid::Uuid> { fn find_oldest_entity(&self) -> Option<uuid::Uuid> {
self.logs self.logs
.iter() .iter()
.filter_map(|(entity_id, log)| { .filter_map(|(entity_id, log)| log.front().map(|entry| (*entity_id, entry.timestamp)))
log.front().map(|entry| (*entity_id, entry.timestamp))
})
.min_by_key(|(_, timestamp)| *timestamp) .min_by_key(|(_, timestamp)| *timestamp)
.map(|(entity_id, _)| entity_id) .map(|(entity_id, _)| entity_id)
} }
@@ -189,9 +195,7 @@ impl OperationLog {
for log in self.logs.values_mut() { for log in self.logs.values_mut() {
let before_len = log.len(); let before_len = log.len();
log.retain(|entry| { log.retain(|entry| now.duration_since(entry.timestamp) < max_age);
now.duration_since(entry.timestamp) < max_age
});
pruned_count += before_len - log.len(); pruned_count += before_len - log.len();
} }
@@ -226,7 +230,10 @@ impl Default for OperationLog {
/// # Example /// # Example
/// ///
/// ``` /// ```
/// use lib::networking::{build_sync_request, VectorClock}; /// use lib::networking::{
/// VectorClock,
/// build_sync_request,
/// };
/// use uuid::Uuid; /// use uuid::Uuid;
/// ///
/// let node_id = Uuid::new_v4(); /// let node_id = Uuid::new_v4();
@@ -258,8 +265,7 @@ pub fn build_missing_deltas(deltas: Vec<EntityDelta>) -> VersionedMessage {
/// use bevy::prelude::*; /// use bevy::prelude::*;
/// use lib::networking::handle_sync_requests_system; /// use lib::networking::handle_sync_requests_system;
/// ///
/// App::new() /// App::new().add_systems(Update, handle_sync_requests_system);
/// .add_systems(Update, handle_sync_requests_system);
/// ``` /// ```
pub fn handle_sync_requests_system( pub fn handle_sync_requests_system(
bridge: Option<Res<GossipBridge>>, bridge: Option<Res<GossipBridge>>,
@@ -296,10 +302,10 @@ pub fn handle_sync_requests_system(
} else { } else {
debug!("No missing deltas for node {}", requesting_node); debug!("No missing deltas for node {}", requesting_node);
} }
} },
| _ => { | _ => {
// Not a SyncRequest, ignore // Not a SyncRequest, ignore
} },
} }
} }
} }
@@ -324,17 +330,14 @@ pub fn handle_missing_deltas_system(world: &mut World) {
// Apply each delta // Apply each delta
for delta in deltas { for delta in deltas {
debug!( debug!("Applying missing delta for entity {:?}", delta.entity_id);
"Applying missing delta for entity {:?}",
delta.entity_id
);
crate::networking::apply_entity_delta(&delta, world); crate::networking::apply_entity_delta(&delta, world);
} }
} },
| _ => { | _ => {
// Not MissingDeltas, ignore // Not MissingDeltas, ignore
} },
} }
} }
} }
@@ -394,10 +397,7 @@ pub fn prune_operation_log_system(
let after = operation_log.total_operations(); let after = operation_log.total_operations();
if before != after { if before != after {
debug!( debug!("Pruned operation log: {} ops -> {} ops", before, after);
"Pruned operation log: {} ops -> {} ops",
before, after
);
} }
} }
} }
@@ -489,7 +489,7 @@ mod tests {
} => { } => {
assert_eq!(req_node_id, node_id); assert_eq!(req_node_id, node_id);
assert_eq!(vector_clock, clock); assert_eq!(vector_clock, clock);
} },
| _ => panic!("Expected SyncRequest"), | _ => panic!("Expected SyncRequest"),
} }
} }
@@ -507,7 +507,7 @@ mod tests {
| SyncMessage::MissingDeltas { deltas } => { | SyncMessage::MissingDeltas { deltas } => {
assert_eq!(deltas.len(), 1); assert_eq!(deltas.len(), 1);
assert_eq!(deltas[0].entity_id, entity_id); assert_eq!(deltas[0].entity_id, entity_id);
} },
| _ => panic!("Expected MissingDeltas"), | _ => panic!("Expected MissingDeltas"),
} }
} }

View File

@@ -144,11 +144,11 @@ impl ComponentOp {
/// Get the component type for this operation /// Get the component type for this operation
pub fn component_type(&self) -> Option<&str> { pub fn component_type(&self) -> Option<&str> {
match self { match self {
| ComponentOp::Set { component_type, .. } | ComponentOp::Set { component_type, .. } |
| ComponentOp::SetAdd { component_type, .. } ComponentOp::SetAdd { component_type, .. } |
| ComponentOp::SetRemove { component_type, .. } ComponentOp::SetRemove { component_type, .. } |
| ComponentOp::SequenceInsert { component_type, .. } ComponentOp::SequenceInsert { component_type, .. } |
| ComponentOp::SequenceDelete { component_type, .. } => Some(component_type), ComponentOp::SequenceDelete { component_type, .. } => Some(component_type),
| ComponentOp::Delete { .. } => None, | ComponentOp::Delete { .. } => None,
} }
} }
@@ -156,12 +156,12 @@ impl ComponentOp {
/// Get the vector clock for this operation /// Get the vector clock for this operation
pub fn vector_clock(&self) -> &VectorClock { pub fn vector_clock(&self) -> &VectorClock {
match self { match self {
| ComponentOp::Set { vector_clock, .. } | ComponentOp::Set { vector_clock, .. } |
| ComponentOp::SetAdd { vector_clock, .. } ComponentOp::SetAdd { vector_clock, .. } |
| ComponentOp::SetRemove { vector_clock, .. } ComponentOp::SetRemove { vector_clock, .. } |
| ComponentOp::SequenceInsert { vector_clock, .. } ComponentOp::SequenceInsert { vector_clock, .. } |
| ComponentOp::SequenceDelete { vector_clock, .. } ComponentOp::SequenceDelete { vector_clock, .. } |
| ComponentOp::Delete { vector_clock } => vector_clock, ComponentOp::Delete { vector_clock } => vector_clock,
} }
} }
@@ -232,7 +232,11 @@ impl ComponentOpBuilder {
} }
/// Build a SetRemove operation (OR-Set) /// Build a SetRemove operation (OR-Set)
pub fn set_remove(mut self, component_type: String, removed_ids: Vec<uuid::Uuid>) -> ComponentOp { pub fn set_remove(
mut self,
component_type: String,
removed_ids: Vec<uuid::Uuid>,
) -> ComponentOp {
self.vector_clock.increment(self.node_id); self.vector_clock.increment(self.node_id);
ComponentOp::SetRemove { ComponentOp::SetRemove {
component_type, component_type,
@@ -259,7 +263,11 @@ impl ComponentOpBuilder {
} }
/// Build a SequenceDelete operation (RGA) /// Build a SequenceDelete operation (RGA)
pub fn sequence_delete(mut self, component_type: String, element_id: uuid::Uuid) -> ComponentOp { pub fn sequence_delete(
mut self,
component_type: String,
element_id: uuid::Uuid,
) -> ComponentOp {
self.vector_clock.increment(self.node_id); self.vector_clock.increment(self.node_id);
ComponentOp::SequenceDelete { ComponentOp::SequenceDelete {
component_type, component_type,
@@ -352,7 +360,10 @@ mod tests {
let clock = VectorClock::new(); let clock = VectorClock::new();
let builder = ComponentOpBuilder::new(node_id, clock); let builder = ComponentOpBuilder::new(node_id, clock);
let op = builder.set("Transform".to_string(), ComponentData::Inline(vec![1, 2, 3])); let op = builder.set(
"Transform".to_string(),
ComponentData::Inline(vec![1, 2, 3]),
);
assert!(op.is_set()); assert!(op.is_set());
assert_eq!(op.vector_clock().get(node_id), 1); assert_eq!(op.vector_clock().get(node_id), 1);

View File

@@ -5,14 +5,20 @@
//! //!
//! ## OR-Set Semantics //! ## OR-Set Semantics
//! //!
//! - **Add-wins**: If an element is concurrently added and removed, the add wins //! - **Add-wins**: If an element is concurrently added and removed, the add
//! - **Observed-remove**: Removes only affect adds that have been observed (happened-before) //! wins
//! - **Unique operation IDs**: Each add generates a unique ID to track add/remove pairs //! - **Observed-remove**: Removes only affect adds that have been observed
//! (happened-before)
//! - **Unique operation IDs**: Each add generates a unique ID to track
//! add/remove pairs
//! //!
//! ## Example //! ## Example
//! //!
//! ``` //! ```
//! use lib::networking::{OrSet, OrElement}; //! use lib::networking::{
//! OrElement,
//! OrSet,
//! };
//! use uuid::Uuid; //! use uuid::Uuid;
//! //!
//! let node1 = Uuid::new_v4(); //! let node1 = Uuid::new_v4();
@@ -157,11 +163,12 @@ where
/// Check if a value is present in the set /// Check if a value is present in the set
/// ///
/// A value is present if it has at least one operation ID that's not tombstoned. /// A value is present if it has at least one operation ID that's not
/// tombstoned.
pub fn contains(&self, value: &T) -> bool { pub fn contains(&self, value: &T) -> bool {
self.elements.iter().any(|(id, (v, _))| { self.elements
v == value && !self.tombstones.contains(id) .iter()
}) .any(|(id, (v, _))| v == value && !self.tombstones.contains(id))
} }
/// Get all present values /// Get all present values
@@ -208,9 +215,7 @@ where
let mut seen = HashSet::new(); let mut seen = HashSet::new();
self.elements self.elements
.iter() .iter()
.filter(|(id, (value, _))| { .filter(|(id, (value, _))| !self.tombstones.contains(id) && seen.insert(value))
!self.tombstones.contains(id) && seen.insert(value)
})
.count() .count()
} }
@@ -249,7 +254,9 @@ where
pub fn merge(&mut self, other: &OrSet<T>) { pub fn merge(&mut self, other: &OrSet<T>) {
// Union elements // Union elements
for (id, (value, node)) in &other.elements { for (id, (value, node)) in &other.elements {
self.elements.entry(*id).or_insert_with(|| (value.clone(), *node)); self.elements
.entry(*id)
.or_insert_with(|| (value.clone(), *node));
} }
// Union tombstones // Union tombstones

View File

@@ -8,7 +8,10 @@
//! //!
//! ```no_run //! ```no_run
//! use bevy::prelude::*; //! use bevy::prelude::*;
//! use lib::networking::{NetworkingPlugin, NetworkingConfig}; //! use lib::networking::{
//! NetworkingConfig,
//! NetworkingPlugin,
//! };
//! use uuid::Uuid; //! use uuid::Uuid;
//! //!
//! fn main() { //! fn main() {
@@ -28,28 +31,28 @@ use bevy::prelude::*;
use crate::networking::{ use crate::networking::{
change_detection::{ change_detection::{
auto_detect_transform_changes_system,
LastSyncVersions, LastSyncVersions,
auto_detect_transform_changes_system,
}, },
delta_generation::{ delta_generation::{
generate_delta_system,
NodeVectorClock, NodeVectorClock,
generate_delta_system,
}, },
entity_map::{ entity_map::{
NetworkEntityMap,
cleanup_despawned_entities_system, cleanup_despawned_entities_system,
register_networked_entities_system, register_networked_entities_system,
NetworkEntityMap,
}, },
message_dispatcher::message_dispatcher_system, message_dispatcher::message_dispatcher_system,
operation_log::{ operation_log::{
OperationLog,
periodic_sync_system, periodic_sync_system,
prune_operation_log_system, prune_operation_log_system,
OperationLog,
}, },
tombstones::{ tombstones::{
TombstoneRegistry,
garbage_collect_tombstones_system, garbage_collect_tombstones_system,
handle_local_deletions_system, handle_local_deletions_system,
TombstoneRegistry,
}, },
vector_clock::NodeId, vector_clock::NodeId,
}; };
@@ -84,6 +87,51 @@ impl Default for NetworkingConfig {
} }
} }
/// Optional session secret for authentication
///
/// This is a pre-shared secret that controls access to the gossip network.
/// If configured, all joining nodes must provide the correct session secret
/// to receive the full state.
///
/// # Security Model
///
/// The session secret provides network-level access control by:
/// - Preventing unauthorized nodes from joining the gossip
/// - Hash-based comparison prevents timing attacks
/// - Works alongside iroh-gossip's built-in QUIC transport encryption
///
/// # Usage
///
/// Insert this as a Bevy resource to enable session secret validation:
///
/// ```no_run
/// use bevy::prelude::*;
/// use lib::networking::{
/// NetworkingPlugin,
/// SessionSecret,
/// };
/// use uuid::Uuid;
///
/// App::new()
/// .add_plugins(NetworkingPlugin::default_with_node_id(Uuid::new_v4()))
/// .insert_resource(SessionSecret::new(b"my_secret_key"))
/// .run();
/// ```
#[derive(Resource, Clone)]
pub struct SessionSecret(Vec<u8>);
impl SessionSecret {
/// Create a new session secret from bytes
pub fn new(secret: impl Into<Vec<u8>>) -> Self {
Self(secret.into())
}
/// Get the secret as a byte slice
pub fn as_bytes(&self) -> &[u8] {
&self.0
}
}
/// Bevy plugin for CRDT networking /// Bevy plugin for CRDT networking
/// ///
/// This plugin sets up all systems and resources needed for distributed /// This plugin sets up all systems and resources needed for distributed
@@ -122,7 +170,10 @@ impl Default for NetworkingConfig {
/// ///
/// ```no_run /// ```no_run
/// use bevy::prelude::*; /// use bevy::prelude::*;
/// use lib::networking::{NetworkingPlugin, NetworkingConfig}; /// use lib::networking::{
/// NetworkingConfig,
/// NetworkingPlugin,
/// };
/// use uuid::Uuid; /// use uuid::Uuid;
/// ///
/// App::new() /// App::new()

View File

@@ -270,13 +270,9 @@ where
pub fn values(&self) -> impl Iterator<Item = &T> { pub fn values(&self) -> impl Iterator<Item = &T> {
let ordered = self.get_ordered_elements(); let ordered = self.get_ordered_elements();
ordered.into_iter().filter_map(move |id| { ordered.into_iter().filter_map(move |id| {
self.elements.get(&id).and_then(|e| { self.elements
if !e.is_deleted { .get(&id)
Some(&e.value) .and_then(|e| if !e.is_deleted { Some(&e.value) } else { None })
} else {
None
}
})
}) })
} }
@@ -344,9 +340,9 @@ where
/// Garbage collect tombstones /// Garbage collect tombstones
/// ///
/// Removes deleted elements that have no children (nothing inserted after them). /// Removes deleted elements that have no children (nothing inserted after
/// This is safe because if no element references a tombstone as its parent, /// them). This is safe because if no element references a tombstone as
/// it can be removed without affecting the sequence. /// its parent, it can be removed without affecting the sequence.
pub fn garbage_collect(&mut self) { pub fn garbage_collect(&mut self) {
// Find all IDs that are referenced as after_id // Find all IDs that are referenced as after_id
let mut referenced_ids = std::collections::HashSet::new(); let mut referenced_ids = std::collections::HashSet::new();
@@ -357,9 +353,8 @@ where
} }
// Remove deleted elements that aren't referenced // Remove deleted elements that aren't referenced
self.elements.retain(|id, element| { self.elements
!element.is_deleted || referenced_ids.contains(id) .retain(|id, element| !element.is_deleted || referenced_ids.contains(id));
});
} }
/// Get ordered list of element IDs /// Get ordered list of element IDs
@@ -385,12 +380,12 @@ where
// Compare vector clocks // Compare vector clocks
match elem_a.vector_clock.compare(&elem_b.vector_clock) { match elem_a.vector_clock.compare(&elem_b.vector_clock) {
Ok(std::cmp::Ordering::Less) => std::cmp::Ordering::Less, | Ok(std::cmp::Ordering::Less) => std::cmp::Ordering::Less,
Ok(std::cmp::Ordering::Greater) => std::cmp::Ordering::Greater, | Ok(std::cmp::Ordering::Greater) => std::cmp::Ordering::Greater,
Ok(std::cmp::Ordering::Equal) | Err(_) => { | Ok(std::cmp::Ordering::Equal) | Err(_) => {
// If clocks are equal or concurrent, use node ID as tiebreaker // If clocks are equal or concurrent, use node ID as tiebreaker
elem_a.inserting_node.cmp(&elem_b.inserting_node) elem_a.inserting_node.cmp(&elem_b.inserting_node)
} },
} }
}); });
} }
@@ -415,10 +410,7 @@ where
/// Calculate the visible position of an element /// Calculate the visible position of an element
fn calculate_position(&self, element_id: uuid::Uuid) -> usize { fn calculate_position(&self, element_id: uuid::Uuid) -> usize {
let ordered = self.get_ordered_elements(); let ordered = self.get_ordered_elements();
ordered ordered.iter().position(|id| id == &element_id).unwrap_or(0)
.iter()
.position(|id| id == &element_id)
.unwrap_or(0)
} }
} }

View File

@@ -48,7 +48,12 @@ pub enum ComponentMergeDecision {
/// # Example /// # Example
/// ``` /// ```
/// use bevy::prelude::*; /// use bevy::prelude::*;
/// use lib::networking::{SyncComponent, SyncStrategy, ClockComparison, ComponentMergeDecision}; /// use lib::networking::{
/// ClockComparison,
/// ComponentMergeDecision,
/// SyncComponent,
/// SyncStrategy,
/// };
/// ///
/// // Example showing what the trait looks like - normally generated by #[derive(Synced)] /// // Example showing what the trait looks like - normally generated by #[derive(Synced)]
/// #[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize)] /// #[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize)]
@@ -77,7 +82,8 @@ pub trait SyncComponent: Component + Reflect + Sized {
/// Merge remote state with local state /// Merge remote state with local state
/// ///
/// The merge logic is strategy-specific: /// The merge logic is strategy-specific:
/// - **LWW**: Takes newer value based on vector clock, uses tiebreaker for concurrent /// - **LWW**: Takes newer value based on vector clock, uses tiebreaker for
/// concurrent
/// - **Set**: Merges both sets (OR-Set semantics) /// - **Set**: Merges both sets (OR-Set semantics)
/// - **Sequence**: Merges sequences preserving order (RGA semantics) /// - **Sequence**: Merges sequences preserving order (RGA semantics)
/// - **Custom**: Calls user-defined ConflictResolver /// - **Custom**: Calls user-defined ConflictResolver
@@ -102,21 +108,22 @@ pub trait SyncComponent: Component + Reflect + Sized {
/// use lib::networking::Synced; /// use lib::networking::Synced;
/// use sync_macros::Synced as SyncedDerive; /// use sync_macros::Synced as SyncedDerive;
/// ///
/// #[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize)] /// #[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize, SyncedDerive)]
/// #[derive(SyncedDerive)]
/// #[sync(version = 1, strategy = "LastWriteWins")] /// #[sync(version = 1, strategy = "LastWriteWins")]
/// struct Health(f32); /// struct Health(f32);
/// ///
/// #[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize)] /// #[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize, SyncedDerive)]
/// #[derive(SyncedDerive)]
/// #[sync(version = 1, strategy = "LastWriteWins")] /// #[sync(version = 1, strategy = "LastWriteWins")]
/// struct Position { x: f32, y: f32 } /// struct Position {
/// x: f32,
/// y: f32,
/// }
/// ///
/// let mut world = World::new(); /// let mut world = World::new();
/// world.spawn(( /// world.spawn((
/// Health(100.0), /// Health(100.0),
/// Position { x: 0.0, y: 0.0 }, /// Position { x: 0.0, y: 0.0 },
/// Synced, // Marker enables sync /// Synced, // Marker enables sync
/// )); /// ));
/// ``` /// ```
#[derive(Component, Reflect, Default, Clone, Copy)] #[derive(Component, Reflect, Default, Clone, Copy)]

View File

@@ -15,7 +15,8 @@
//! ## Resurrection Prevention //! ## Resurrection Prevention
//! //!
//! If a peer creates an entity (Set operation) while another peer deletes it: //! If a peer creates an entity (Set operation) while another peer deletes it:
//! - Use vector clock comparison: if delete happened-after create, deletion wins //! - Use vector clock comparison: if delete happened-after create, deletion
//! wins
//! - If concurrent, deletion wins (delete bias for safety) //! - If concurrent, deletion wins (delete bias for safety)
//! - This prevents "zombie" entities from reappearing //! - This prevents "zombie" entities from reappearing
//! //!
@@ -29,12 +30,12 @@ use std::collections::HashMap;
use bevy::prelude::*; use bevy::prelude::*;
use crate::networking::{ use crate::networking::{
GossipBridge,
NodeVectorClock,
vector_clock::{ vector_clock::{
NodeId, NodeId,
VectorClock, VectorClock,
}, },
GossipBridge,
NodeVectorClock,
}; };
/// How long to keep tombstones before garbage collection (in seconds) /// How long to keep tombstones before garbage collection (in seconds)
@@ -134,7 +135,8 @@ impl TombstoneRegistry {
/// ///
/// Returns true if: /// Returns true if:
/// - The entity has a tombstone AND /// - The entity has a tombstone AND
/// - The operation's clock happened-before or is concurrent with the deletion /// - The operation's clock happened-before or is concurrent with the
/// deletion
/// ///
/// This prevents operations on deleted entities from being applied. /// This prevents operations on deleted entities from being applied.
pub fn should_ignore_operation( pub fn should_ignore_operation(
@@ -150,7 +152,8 @@ impl TombstoneRegistry {
// deletion_clock.happened_before(operation_clock) => don't ignore // deletion_clock.happened_before(operation_clock) => don't ignore
// If concurrent, deletion wins (delete bias) => ignore // If concurrent, deletion wins (delete bias) => ignore
// !operation_clock.happened_before(deletion_clock) && !deletion_clock.happened_before(operation_clock) => ignore // !operation_clock.happened_before(deletion_clock) &&
// !deletion_clock.happened_before(operation_clock) => ignore
// So we DON'T ignore only if deletion happened-before operation // So we DON'T ignore only if deletion happened-before operation
!tombstone.deletion_clock.happened_before(operation_clock) !tombstone.deletion_clock.happened_before(operation_clock)
@@ -168,9 +171,8 @@ impl TombstoneRegistry {
let before_count = self.tombstones.len(); let before_count = self.tombstones.len();
self.tombstones.retain(|_, tombstone| { self.tombstones
now.duration_since(tombstone.timestamp) < ttl .retain(|_, tombstone| now.duration_since(tombstone.timestamp) < ttl);
});
let after_count = self.tombstones.len(); let after_count = self.tombstones.len();
@@ -254,14 +256,13 @@ pub fn handle_local_deletions_system(
} }
// Broadcast deletion // Broadcast deletion
let message = crate::networking::VersionedMessage::new( let message =
crate::networking::SyncMessage::EntityDelta { crate::networking::VersionedMessage::new(crate::networking::SyncMessage::EntityDelta {
entity_id: delta.entity_id, entity_id: delta.entity_id,
node_id: delta.node_id, node_id: delta.node_id,
vector_clock: delta.vector_clock.clone(), vector_clock: delta.vector_clock.clone(),
operations: delta.operations.clone(), operations: delta.operations.clone(),
}, });
);
if let Err(e) = bridge.send(message) { if let Err(e) = bridge.send(message) {
error!("Failed to broadcast Delete operation: {}", e); error!("Failed to broadcast Delete operation: {}", e);

View File

@@ -27,8 +27,8 @@ pub type NodeId = uuid::Uuid;
/// # Causal Relationships /// # Causal Relationships
/// ///
/// Given two vector clocks A and B: /// Given two vector clocks A and B:
/// - **A happened-before B** if all of A's counters ≤ B's counters and at /// - **A happened-before B** if all of A's counters ≤ B's counters and at least
/// least one is < /// one is <
/// - **A and B are concurrent** if neither happened-before the other /// - **A and B are concurrent** if neither happened-before the other
/// - **A and B are identical** if all counters are equal /// - **A and B are identical** if all counters are equal
/// ///
@@ -42,16 +42,16 @@ pub type NodeId = uuid::Uuid;
/// let node2 = Uuid::new_v4(); /// let node2 = Uuid::new_v4();
/// ///
/// let mut clock1 = VectorClock::new(); /// let mut clock1 = VectorClock::new();
/// clock1.increment(node1); // node1: 1 /// clock1.increment(node1); // node1: 1
/// ///
/// let mut clock2 = VectorClock::new(); /// let mut clock2 = VectorClock::new();
/// clock2.increment(node2); // node2: 1 /// clock2.increment(node2); // node2: 1
/// ///
/// // These are concurrent - neither happened before the other /// // These are concurrent - neither happened before the other
/// assert!(clock1.is_concurrent_with(&clock2)); /// assert!(clock1.is_concurrent_with(&clock2));
/// ///
/// // Merge the clocks /// // Merge the clocks
/// clock1.merge(&clock2); // node1: 1, node2: 1 /// clock1.merge(&clock2); // node1: 1, node2: 1
/// assert!(clock1.happened_before(&clock2) == false); /// assert!(clock1.happened_before(&clock2) == false);
/// ``` /// ```
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
@@ -116,11 +116,11 @@ impl VectorClock {
/// let node2 = Uuid::new_v4(); /// let node2 = Uuid::new_v4();
/// ///
/// let mut clock1 = VectorClock::new(); /// let mut clock1 = VectorClock::new();
/// clock1.increment(node1); // node1: 1 /// clock1.increment(node1); // node1: 1
/// clock1.increment(node1); // node1: 2 /// clock1.increment(node1); // node1: 2
/// ///
/// let mut clock2 = VectorClock::new(); /// let mut clock2 = VectorClock::new();
/// clock2.increment(node2); // node2: 1 /// clock2.increment(node2); // node2: 1
/// ///
/// clock1.merge(&clock2); /// clock1.merge(&clock2);
/// assert_eq!(clock1.get(node1), 2); /// assert_eq!(clock1.get(node1), 2);
@@ -147,36 +147,36 @@ impl VectorClock {
/// let node = Uuid::new_v4(); /// let node = Uuid::new_v4();
/// ///
/// let mut clock1 = VectorClock::new(); /// let mut clock1 = VectorClock::new();
/// clock1.increment(node); // node: 1 /// clock1.increment(node); // node: 1
/// ///
/// let mut clock2 = VectorClock::new(); /// let mut clock2 = VectorClock::new();
/// clock2.increment(node); // node: 1 /// clock2.increment(node); // node: 1
/// clock2.increment(node); // node: 2 /// clock2.increment(node); // node: 2
/// ///
/// assert!(clock1.happened_before(&clock2)); /// assert!(clock1.happened_before(&clock2));
/// assert!(!clock2.happened_before(&clock1)); /// assert!(!clock2.happened_before(&clock1));
/// ``` /// ```
pub fn happened_before(&self, other: &VectorClock) -> bool { pub fn happened_before(&self, other: &VectorClock) -> bool {
// Check if all our counters are <= other's counters // Single-pass optimization: check both conditions simultaneously
let all_less_or_equal = self.clocks.iter().all(|(node_id, &our_counter)| { let mut any_strictly_less = false;
let their_counter = other.get(*node_id);
our_counter <= their_counter
});
if !all_less_or_equal { // Check our nodes in a single pass
return false; for (node_id, &our_counter) in &self.clocks {
let their_counter = other.get(*node_id);
// Early exit if we have a counter greater than theirs
if our_counter > their_counter {
return false;
}
// Track if any counter is strictly less
if our_counter < their_counter {
any_strictly_less = true;
}
} }
// Check if at least one counter is strictly less // If we haven't found a strictly less counter yet, check if they have
// First check if any of our nodes has a lower counter // nodes we don't know about with non-zero values (those count as strictly less)
let mut any_strictly_less = self.clocks.iter().any(|(node_id, &our_counter)| {
let their_counter = other.get(*node_id);
our_counter < their_counter
});
// Also check if they have nodes we don't know about with non-zero values
// For nodes not in self.clocks, we treat them as having counter 0
// If other has a node with counter > 0 that we don't have, that counts as "strictly less"
if !any_strictly_less { if !any_strictly_less {
any_strictly_less = other.clocks.iter().any(|(node_id, &their_counter)| { any_strictly_less = other.clocks.iter().any(|(node_id, &their_counter)| {
!self.clocks.contains_key(node_id) && their_counter > 0 !self.clocks.contains_key(node_id) && their_counter > 0
@@ -202,10 +202,10 @@ impl VectorClock {
/// let node2 = Uuid::new_v4(); /// let node2 = Uuid::new_v4();
/// ///
/// let mut clock1 = VectorClock::new(); /// let mut clock1 = VectorClock::new();
/// clock1.increment(node1); // node1: 1 /// clock1.increment(node1); // node1: 1
/// ///
/// let mut clock2 = VectorClock::new(); /// let mut clock2 = VectorClock::new();
/// clock2.increment(node2); // node2: 1 /// clock2.increment(node2); // node2: 1
/// ///
/// assert!(clock1.is_concurrent_with(&clock2)); /// assert!(clock1.is_concurrent_with(&clock2));
/// assert!(clock2.is_concurrent_with(&clock1)); /// assert!(clock2.is_concurrent_with(&clock1));
@@ -422,7 +422,10 @@ mod tests {
clock2.increment(node); clock2.increment(node);
assert_eq!(clock1.compare(&clock2).unwrap(), std::cmp::Ordering::Less); assert_eq!(clock1.compare(&clock2).unwrap(), std::cmp::Ordering::Less);
assert_eq!(clock2.compare(&clock1).unwrap(), std::cmp::Ordering::Greater); assert_eq!(
clock2.compare(&clock1).unwrap(),
std::cmp::Ordering::Greater
);
assert_eq!(clock1.compare(&clock1).unwrap(), std::cmp::Ordering::Equal); assert_eq!(clock1.compare(&clock1).unwrap(), std::cmp::Ordering::Equal);
} }

View File

@@ -215,7 +215,7 @@ pub fn flush_to_sqlite(ops: &[PersistenceOp], conn: &mut Connection) -> Result<u
"INSERT OR REPLACE INTO operation_log (node_id, sequence_number, operation, timestamp) "INSERT OR REPLACE INTO operation_log (node_id, sequence_number, operation, timestamp)
VALUES (?1, ?2, ?3, ?4)", VALUES (?1, ?2, ?3, ?4)",
rusqlite::params![ rusqlite::params![
node_id, &node_id.to_string(), // Convert UUID to string for SQLite TEXT column
sequence, sequence,
operation, operation,
current_timestamp(), current_timestamp(),
@@ -228,7 +228,7 @@ pub fn flush_to_sqlite(ops: &[PersistenceOp], conn: &mut Connection) -> Result<u
tx.execute( tx.execute(
"INSERT OR REPLACE INTO vector_clock (node_id, counter, updated_at) "INSERT OR REPLACE INTO vector_clock (node_id, counter, updated_at)
VALUES (?1, ?2, ?3)", VALUES (?1, ?2, ?3)",
rusqlite::params![node_id, counter, current_timestamp()], rusqlite::params![&node_id.to_string(), counter, current_timestamp()], // Convert UUID to string
)?; )?;
count += 1; count += 1;
}, },

View File

@@ -35,6 +35,13 @@ pub enum PersistenceError {
retry_after_secs: u64, retry_after_secs: u64,
}, },
/// Component data exceeds maximum size
ComponentTooLarge {
component_type: String,
size_bytes: usize,
max_bytes: usize,
},
/// Other error /// Other error
Other(String), Other(String),
} }
@@ -59,6 +66,16 @@ impl fmt::Display for PersistenceError {
"Circuit breaker open after {} consecutive failures, retry after {} seconds", "Circuit breaker open after {} consecutive failures, retry after {} seconds",
consecutive_failures, retry_after_secs consecutive_failures, retry_after_secs
), ),
| Self::ComponentTooLarge {
component_type,
size_bytes,
max_bytes,
} => write!(
f,
"Component '{}' size ({} bytes) exceeds maximum ({} bytes). \
This may indicate unbounded data growth or serialization issues.",
component_type, size_bytes, max_bytes
),
| Self::Other(msg) => write!(f, "{}", msg), | Self::Other(msg) => write!(f, "{}", msg),
} }
} }

View File

@@ -171,7 +171,8 @@ fn collect_dirty_entities_bevy_system(world: &mut World) {
// Collect changed entities first // Collect changed entities first
let changed_entities: Vec<(Entity, uuid::Uuid)> = { let changed_entities: Vec<(Entity, uuid::Uuid)> = {
let mut query = world.query_filtered::<(Entity, &Persisted), Changed<Persisted>>(); let mut query = world.query_filtered::<(Entity, &Persisted), Changed<Persisted>>();
query.iter(world) query
.iter(world)
.map(|(entity, persisted)| (entity, persisted.network_id)) .map(|(entity, persisted)| (entity, persisted.network_id))
.collect() .collect()
}; };
@@ -186,7 +187,7 @@ fn collect_dirty_entities_bevy_system(world: &mut World) {
{ {
let now = chrono::Utc::now(); let now = chrono::Utc::now();
let mut write_buffer = world.resource_mut::<WriteBufferResource>(); let mut write_buffer = world.resource_mut::<WriteBufferResource>();
write_buffer.add(PersistenceOp::UpsertEntity { if let Err(e) = write_buffer.add(PersistenceOp::UpsertEntity {
id: network_id, id: network_id,
data: EntityData { data: EntityData {
id: network_id, id: network_id,
@@ -194,7 +195,13 @@ fn collect_dirty_entities_bevy_system(world: &mut World) {
updated_at: now, updated_at: now,
entity_type: "NetworkedEntity".to_string(), entity_type: "NetworkedEntity".to_string(),
}, },
}); }) {
error!(
"Failed to add UpsertEntity operation for {}: {}",
network_id, e
);
return; // Skip this entity if we can't even add the entity op
}
} }
// Serialize all components on this entity (generic tracking) // Serialize all components on this entity (generic tracking)
@@ -216,11 +223,17 @@ fn collect_dirty_entities_bevy_system(world: &mut World) {
// Get mutable access to write_buffer and add the operation // Get mutable access to write_buffer and add the operation
{ {
let mut write_buffer = world.resource_mut::<WriteBufferResource>(); let mut write_buffer = world.resource_mut::<WriteBufferResource>();
write_buffer.add(PersistenceOp::UpsertComponent { if let Err(e) = write_buffer.add(PersistenceOp::UpsertComponent {
entity_id: network_id, entity_id: network_id,
component_type, component_type: component_type.clone(),
data, data,
}); }) {
error!(
"Failed to add UpsertComponent operation for entity {} component {}: {}",
network_id, component_type, e
);
// Continue with other components even if one fails
}
} }
} }
} }

View File

@@ -17,6 +17,7 @@ use bevy::{
}; };
use bincode::Options as _; use bincode::Options as _;
use serde::de::DeserializeSeed; use serde::de::DeserializeSeed;
use crate::persistence::error::{ use crate::persistence::error::{
PersistenceError, PersistenceError,
Result, Result,
@@ -103,20 +104,23 @@ pub fn serialize_component(
type_registry: &TypeRegistry, type_registry: &TypeRegistry,
) -> Result<Vec<u8>> { ) -> Result<Vec<u8>> {
let serializer = ReflectSerializer::new(component, type_registry); let serializer = ReflectSerializer::new(component, type_registry);
bincode::options().serialize(&serializer) bincode::options()
.serialize(&serializer)
.map_err(PersistenceError::from) .map_err(PersistenceError::from)
} }
/// Serialize a component when the type is known (more efficient for bincode) /// Serialize a component when the type is known (more efficient for bincode)
/// ///
/// This uses `TypedReflectSerializer` which doesn't include type path information, /// This uses `TypedReflectSerializer` which doesn't include type path
/// making it compatible with `TypedReflectDeserializer` for binary formats. /// information, making it compatible with `TypedReflectDeserializer` for binary
/// formats.
pub fn serialize_component_typed( pub fn serialize_component_typed(
component: &dyn Reflect, component: &dyn Reflect,
type_registry: &TypeRegistry, type_registry: &TypeRegistry,
) -> Result<Vec<u8>> { ) -> Result<Vec<u8>> {
let serializer = TypedReflectSerializer::new(component, type_registry); let serializer = TypedReflectSerializer::new(component, type_registry);
bincode::options().serialize(&serializer) bincode::options()
.serialize(&serializer)
.map_err(PersistenceError::from) .map_err(PersistenceError::from)
} }
@@ -159,17 +163,18 @@ pub fn deserialize_component(
/// Deserialize a component when the type is known /// Deserialize a component when the type is known
/// ///
/// Uses `TypedReflectDeserializer` which is more efficient for binary formats like bincode /// Uses `TypedReflectDeserializer` which is more efficient for binary formats
/// when the component type is known at deserialization time. /// like bincode when the component type is known at deserialization time.
pub fn deserialize_component_typed( pub fn deserialize_component_typed(
bytes: &[u8], bytes: &[u8],
component_type: &str, component_type: &str,
type_registry: &TypeRegistry, type_registry: &TypeRegistry,
) -> Result<Box<dyn PartialReflect>> { ) -> Result<Box<dyn PartialReflect>> {
let registration = type_registry.get_with_type_path(component_type) let registration = type_registry
.ok_or_else(|| PersistenceError::Deserialization( .get_with_type_path(component_type)
format!("Type {} not registered", component_type) .ok_or_else(|| {
))?; PersistenceError::Deserialization(format!("Type {} not registered", component_type))
})?;
let mut deserializer = bincode::Deserializer::from_slice(bytes, bincode::options()); let mut deserializer = bincode::Deserializer::from_slice(bytes, bincode::options());
let reflect_deserializer = TypedReflectDeserializer::new(registration, type_registry); let reflect_deserializer = TypedReflectDeserializer::new(registration, type_registry);

View File

@@ -135,8 +135,11 @@ fn perform_flush_sync(
/// Helper function to perform a flush asynchronously (for normal operations) /// Helper function to perform a flush asynchronously (for normal operations)
/// ///
/// This runs on the I/O task pool to avoid blocking the main thread /// This runs the blocking SQLite operations on a thread pool via
fn perform_flush_async(ops: Vec<PersistenceOp>, db: PersistenceDb) -> Result<FlushResult> { /// blocking::unblock to avoid blocking the async runtime. This works with both
/// Bevy's async-executor and tokio runtimes, making it compatible with the
/// current Bevy integration and the future dedicated iOS async runtime.
async fn perform_flush_async(ops: Vec<PersistenceOp>, db: PersistenceDb) -> Result<FlushResult> {
if ops.is_empty() { if ops.is_empty() {
return Ok(FlushResult { return Ok(FlushResult {
operations_count: 0, operations_count: 0,
@@ -146,14 +149,25 @@ fn perform_flush_async(ops: Vec<PersistenceOp>, db: PersistenceDb) -> Result<Flu
} }
let bytes_written = calculate_bytes_written(&ops); let bytes_written = calculate_bytes_written(&ops);
let start = Instant::now();
let count = { // Use blocking::unblock which works with any async runtime (async-executor,
let mut conn = db.lock()?; // tokio, etc.) This spawns the blocking operation on a dedicated thread
flush_to_sqlite(&ops, &mut conn)? // pool
}; let result = blocking::unblock(move || {
let start = Instant::now();
let duration = start.elapsed(); let count = {
let mut conn = db.lock()?;
flush_to_sqlite(&ops, &mut conn)?
};
let duration = start.elapsed();
Ok::<_, crate::persistence::PersistenceError>((count, duration))
})
.await?;
let (count, duration) = result;
Ok(FlushResult { Ok(FlushResult {
operations_count: count, operations_count: count,
@@ -248,7 +262,7 @@ pub fn flush_system(
let task_pool = IoTaskPool::get(); let task_pool = IoTaskPool::get();
let db_clone = db.clone(); let db_clone = db.clone();
let task = task_pool.spawn(async move { perform_flush_async(ops, db_clone.clone()) }); let task = task_pool.spawn(async move { perform_flush_async(ops, db_clone.clone()).await });
pending_tasks.tasks.push(task); pending_tasks.tasks.push(task);
@@ -448,22 +462,26 @@ mod tests {
let entity_id = uuid::Uuid::new_v4(); let entity_id = uuid::Uuid::new_v4();
// First add the entity // First add the entity
write_buffer.add(PersistenceOp::UpsertEntity { write_buffer
id: entity_id, .add(PersistenceOp::UpsertEntity {
data: EntityData {
id: entity_id, id: entity_id,
created_at: chrono::Utc::now(), data: EntityData {
updated_at: chrono::Utc::now(), id: entity_id,
entity_type: "TestEntity".to_string(), created_at: chrono::Utc::now(),
}, updated_at: chrono::Utc::now(),
}); entity_type: "TestEntity".to_string(),
},
})
.unwrap();
// Then add a component // Then add a component
write_buffer.add(PersistenceOp::UpsertComponent { write_buffer
entity_id, .add(PersistenceOp::UpsertComponent {
component_type: "Transform".to_string(), entity_id,
data: vec![1, 2, 3], component_type: "Transform".to_string(),
}); data: vec![1, 2, 3],
})
.unwrap();
// Take operations and flush synchronously (testing the flush logic) // Take operations and flush synchronously (testing the flush logic)
let ops = write_buffer.take_operations(); let ops = write_buffer.take_operations();

View File

@@ -30,7 +30,7 @@ const CRITICAL_FLUSH_DEADLINE_MS: u64 = 1000;
pub type EntityId = uuid::Uuid; pub type EntityId = uuid::Uuid;
/// Node identifier for CRDT operations /// Node identifier for CRDT operations
pub type NodeId = String; pub type NodeId = uuid::Uuid;
/// Priority level for persistence operations /// Priority level for persistence operations
/// ///
@@ -162,6 +162,15 @@ pub struct WriteBuffer {
/// Pending operations not yet committed to SQLite /// Pending operations not yet committed to SQLite
pub pending_operations: Vec<PersistenceOp>, pub pending_operations: Vec<PersistenceOp>,
/// Index mapping (entity_id, component_type) to position in
/// pending_operations Enables O(1) deduplication for UpsertComponent
/// operations
component_index: std::collections::HashMap<(EntityId, String), usize>,
/// Index mapping entity_id to position in pending_operations
/// Enables O(1) deduplication for UpsertEntity operations
entity_index: std::collections::HashMap<EntityId, usize>,
/// When the buffer was last flushed /// When the buffer was last flushed
pub last_flush: Instant, pub last_flush: Instant,
@@ -179,6 +188,8 @@ impl WriteBuffer {
pub fn new(max_operations: usize) -> Self { pub fn new(max_operations: usize) -> Self {
Self { Self {
pending_operations: Vec::new(), pending_operations: Vec::new(),
component_index: std::collections::HashMap::new(),
entity_index: std::collections::HashMap::new(),
last_flush: Instant::now(), last_flush: Instant::now(),
max_operations, max_operations,
highest_priority: FlushPriority::Normal, highest_priority: FlushPriority::Normal,
@@ -191,10 +202,11 @@ impl WriteBuffer {
/// This is a convenience method that calls `add_with_priority` with /// This is a convenience method that calls `add_with_priority` with
/// `FlushPriority::Normal`. /// `FlushPriority::Normal`.
/// ///
/// # Panics /// # Errors
/// Panics if component data exceeds MAX_COMPONENT_SIZE_BYTES (10MB) /// Returns `PersistenceError::ComponentTooLarge` if component data exceeds
pub fn add(&mut self, op: PersistenceOp) { /// MAX_COMPONENT_SIZE_BYTES (10MB)
self.add_with_priority(op, FlushPriority::Normal); pub fn add(&mut self, op: PersistenceOp) -> Result<(), crate::persistence::PersistenceError> {
self.add_with_priority(op, FlushPriority::Normal)
} }
/// Add an operation using its default priority /// Add an operation using its default priority
@@ -203,11 +215,15 @@ impl WriteBuffer {
/// automatically. CRDT operations will be added as Critical, others as /// automatically. CRDT operations will be added as Critical, others as
/// Normal. /// Normal.
/// ///
/// # Panics /// # Errors
/// Panics if component data exceeds MAX_COMPONENT_SIZE_BYTES (10MB) /// Returns `PersistenceError::ComponentTooLarge` if component data exceeds
pub fn add_with_default_priority(&mut self, op: PersistenceOp) { /// MAX_COMPONENT_SIZE_BYTES (10MB)
pub fn add_with_default_priority(
&mut self,
op: PersistenceOp,
) -> Result<(), crate::persistence::PersistenceError> {
let priority = op.default_priority(); let priority = op.default_priority();
self.add_with_priority(op, priority); self.add_with_priority(op, priority)
} }
/// Add an operation to the write buffer with the specified priority /// Add an operation to the write buffer with the specified priority
@@ -216,9 +232,14 @@ impl WriteBuffer {
/// it will be replaced (keeping only the latest state). The priority /// it will be replaced (keeping only the latest state). The priority
/// is tracked separately to determine flush urgency. /// is tracked separately to determine flush urgency.
/// ///
/// # Panics /// # Errors
/// Panics if component data exceeds MAX_COMPONENT_SIZE_BYTES (10MB) /// Returns `PersistenceError::ComponentTooLarge` if component data exceeds
pub fn add_with_priority(&mut self, op: PersistenceOp, priority: FlushPriority) { /// MAX_COMPONENT_SIZE_BYTES (10MB)
pub fn add_with_priority(
&mut self,
op: PersistenceOp,
priority: FlushPriority,
) -> Result<(), crate::persistence::PersistenceError> {
// Validate component size to prevent unbounded memory growth // Validate component size to prevent unbounded memory growth
match &op { match &op {
| PersistenceOp::UpsertComponent { | PersistenceOp::UpsertComponent {
@@ -227,22 +248,20 @@ impl WriteBuffer {
.. ..
} => { } => {
if data.len() > MAX_COMPONENT_SIZE_BYTES { if data.len() > MAX_COMPONENT_SIZE_BYTES {
panic!( return Err(crate::persistence::PersistenceError::ComponentTooLarge {
"Component {} size ({} bytes) exceeds maximum ({} bytes). \ component_type: component_type.clone(),
This may indicate unbounded data growth or serialization issues.", size_bytes: data.len(),
component_type, max_bytes: MAX_COMPONENT_SIZE_BYTES,
data.len(), });
MAX_COMPONENT_SIZE_BYTES
);
} }
}, },
| PersistenceOp::LogOperation { operation, .. } => { | PersistenceOp::LogOperation { operation, .. } => {
if operation.len() > MAX_COMPONENT_SIZE_BYTES { if operation.len() > MAX_COMPONENT_SIZE_BYTES {
panic!( return Err(crate::persistence::PersistenceError::ComponentTooLarge {
"Operation size ({} bytes) exceeds maximum ({} bytes)", component_type: "Operation".to_string(),
operation.len(), size_bytes: operation.len(),
MAX_COMPONENT_SIZE_BYTES max_bytes: MAX_COMPONENT_SIZE_BYTES,
); });
} }
}, },
| _ => {}, | _ => {},
@@ -254,25 +273,27 @@ impl WriteBuffer {
component_type, component_type,
.. ..
} => { } => {
// Remove any existing pending write for this entity+component // O(1) lookup: check if we already have this component
self.pending_operations.retain(|existing_op| { let key = (*entity_id, component_type.clone());
!matches!(existing_op, if let Some(&old_pos) = self.component_index.get(&key) {
PersistenceOp::UpsertComponent { // Replace existing operation in-place
entity_id: e_id, self.pending_operations[old_pos] = op;
component_type: c_type, return Ok(());
.. }
} if e_id == entity_id && c_type == component_type // New operation: add to index
) let new_pos = self.pending_operations.len();
}); self.component_index.insert(key, new_pos);
}, },
| PersistenceOp::UpsertEntity { id, .. } => { | PersistenceOp::UpsertEntity { id, .. } => {
// Remove any existing pending write for this entity // O(1) lookup: check if we already have this entity
self.pending_operations.retain(|existing_op| { if let Some(&old_pos) = self.entity_index.get(id) {
!matches!(existing_op, // Replace existing operation in-place
PersistenceOp::UpsertEntity { id: e_id, .. } self.pending_operations[old_pos] = op;
if e_id == id return Ok(());
) }
}); // New operation: add to index
let new_pos = self.pending_operations.len();
self.entity_index.insert(*id, new_pos);
}, },
| _ => { | _ => {
// Other operations don't need coalescing // Other operations don't need coalescing
@@ -290,15 +311,22 @@ impl WriteBuffer {
} }
self.pending_operations.push(op); self.pending_operations.push(op);
Ok(())
} }
/// Take all pending operations and return them for flushing /// Take all pending operations and return them for flushing
/// ///
/// This resets the priority tracking state. /// This resets the priority tracking state and clears the deduplication
/// indices.
pub fn take_operations(&mut self) -> Vec<PersistenceOp> { pub fn take_operations(&mut self) -> Vec<PersistenceOp> {
// Reset priority tracking when operations are taken // Reset priority tracking when operations are taken
self.highest_priority = FlushPriority::Normal; self.highest_priority = FlushPriority::Normal;
self.first_critical_time = None; self.first_critical_time = None;
// Clear deduplication indices
self.component_index.clear();
self.entity_index.clear();
std::mem::take(&mut self.pending_operations) std::mem::take(&mut self.pending_operations)
} }
@@ -437,7 +465,7 @@ mod tests {
} }
#[test] #[test]
fn test_write_buffer_coalescing() { fn test_write_buffer_coalescing() -> Result<(), crate::persistence::PersistenceError> {
let mut buffer = WriteBuffer::new(100); let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4(); let entity_id = EntityId::new_v4();
@@ -446,7 +474,7 @@ mod tests {
entity_id, entity_id,
component_type: "Transform".to_string(), component_type: "Transform".to_string(),
data: vec![1, 2, 3], data: vec![1, 2, 3],
}); })?;
assert_eq!(buffer.len(), 1); assert_eq!(buffer.len(), 1);
// Add second version (should replace first) // Add second version (should replace first)
@@ -454,7 +482,7 @@ mod tests {
entity_id, entity_id,
component_type: "Transform".to_string(), component_type: "Transform".to_string(),
data: vec![4, 5, 6], data: vec![4, 5, 6],
}); })?;
assert_eq!(buffer.len(), 1); assert_eq!(buffer.len(), 1);
// Verify only latest version exists // Verify only latest version exists
@@ -465,6 +493,7 @@ mod tests {
} else { } else {
panic!("Expected UpsertComponent"); panic!("Expected UpsertComponent");
} }
Ok(())
} }
#[test] #[test]
@@ -473,18 +502,22 @@ mod tests {
let entity_id = EntityId::new_v4(); let entity_id = EntityId::new_v4();
// Add Transform // Add Transform
buffer.add(PersistenceOp::UpsertComponent { buffer
entity_id, .add(PersistenceOp::UpsertComponent {
component_type: "Transform".to_string(), entity_id,
data: vec![1, 2, 3], component_type: "Transform".to_string(),
}); data: vec![1, 2, 3],
})
.expect("Should successfully add Transform");
// Add Velocity (different component, should not coalesce) // Add Velocity (different component, should not coalesce)
buffer.add(PersistenceOp::UpsertComponent { buffer
entity_id, .add(PersistenceOp::UpsertComponent {
component_type: "Velocity".to_string(), entity_id,
data: vec![4, 5, 6], component_type: "Velocity".to_string(),
}); data: vec![4, 5, 6],
})
.expect("Should successfully add Velocity");
assert_eq!(buffer.len(), 2); assert_eq!(buffer.len(), 2);
} }
@@ -495,18 +528,20 @@ mod tests {
let entity_id = EntityId::new_v4(); let entity_id = EntityId::new_v4();
// Add operation with immediate priority // Add operation with immediate priority
buffer.add_with_priority( buffer
PersistenceOp::UpsertEntity { .add_with_priority(
id: entity_id, PersistenceOp::UpsertEntity {
data: EntityData {
id: entity_id, id: entity_id,
created_at: chrono::Utc::now(), data: EntityData {
updated_at: chrono::Utc::now(), id: entity_id,
entity_type: "TestEntity".to_string(), created_at: chrono::Utc::now(),
updated_at: chrono::Utc::now(),
entity_type: "TestEntity".to_string(),
},
}, },
}, FlushPriority::Immediate,
FlushPriority::Immediate, )
); .expect("Should successfully add entity with immediate priority");
// Should flush immediately regardless of interval // Should flush immediately regardless of interval
assert!(buffer.should_flush(std::time::Duration::from_secs(100))); assert!(buffer.should_flush(std::time::Duration::from_secs(100)));
@@ -514,7 +549,7 @@ mod tests {
} }
#[test] #[test]
fn test_flush_priority_critical_deadline() { fn test_flush_priority_critical_deadline() -> Result<(), crate::persistence::PersistenceError> {
let mut buffer = WriteBuffer::new(100); let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4(); let entity_id = EntityId::new_v4();
@@ -530,7 +565,7 @@ mod tests {
}, },
}, },
FlushPriority::Critical, FlushPriority::Critical,
); )?;
assert_eq!(buffer.highest_priority, FlushPriority::Critical); assert_eq!(buffer.highest_priority, FlushPriority::Critical);
assert!(buffer.first_critical_time.is_some()); assert!(buffer.first_critical_time.is_some());
@@ -545,10 +580,11 @@ mod tests {
// Now should flush due to deadline // Now should flush due to deadline
assert!(buffer.should_flush(std::time::Duration::from_secs(100))); assert!(buffer.should_flush(std::time::Duration::from_secs(100)));
Ok(())
} }
#[test] #[test]
fn test_flush_priority_normal() { fn test_flush_priority_normal() -> Result<(), crate::persistence::PersistenceError> {
let mut buffer = WriteBuffer::new(100); let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4(); let entity_id = EntityId::new_v4();
@@ -561,7 +597,7 @@ mod tests {
updated_at: chrono::Utc::now(), updated_at: chrono::Utc::now(),
entity_type: "TestEntity".to_string(), entity_type: "TestEntity".to_string(),
}, },
}); })?;
assert_eq!(buffer.highest_priority, FlushPriority::Normal); assert_eq!(buffer.highest_priority, FlushPriority::Normal);
assert!(buffer.first_critical_time.is_none()); assert!(buffer.first_critical_time.is_none());
@@ -574,10 +610,11 @@ mod tests {
// Now should flush // Now should flush
assert!(buffer.should_flush(std::time::Duration::from_secs(100))); assert!(buffer.should_flush(std::time::Duration::from_secs(100)));
Ok(())
} }
#[test] #[test]
fn test_priority_reset_on_take() { fn test_priority_reset_on_take() -> Result<(), crate::persistence::PersistenceError> {
let mut buffer = WriteBuffer::new(100); let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4(); let entity_id = EntityId::new_v4();
@@ -593,7 +630,7 @@ mod tests {
}, },
}, },
FlushPriority::Critical, FlushPriority::Critical,
); )?;
assert_eq!(buffer.highest_priority, FlushPriority::Critical); assert_eq!(buffer.highest_priority, FlushPriority::Critical);
assert!(buffer.first_critical_time.is_some()); assert!(buffer.first_critical_time.is_some());
@@ -605,18 +642,21 @@ mod tests {
// Priority should be reset // Priority should be reset
assert_eq!(buffer.highest_priority, FlushPriority::Normal); assert_eq!(buffer.highest_priority, FlushPriority::Normal);
assert!(buffer.first_critical_time.is_none()); assert!(buffer.first_critical_time.is_none());
Ok(())
} }
#[test] #[test]
fn test_default_priority_for_crdt_ops() { fn test_default_priority_for_crdt_ops() {
let node_id = NodeId::new_v4();
let log_op = PersistenceOp::LogOperation { let log_op = PersistenceOp::LogOperation {
node_id: "node1".to_string(), node_id,
sequence: 1, sequence: 1,
operation: vec![1, 2, 3], operation: vec![1, 2, 3],
}; };
let vector_clock_op = PersistenceOp::UpdateVectorClock { let vector_clock_op = PersistenceOp::UpdateVectorClock {
node_id: "node1".to_string(), node_id,
counter: 42, counter: 42,
}; };
@@ -638,19 +678,209 @@ mod tests {
assert_eq!(entity_op.default_priority(), FlushPriority::Normal); assert_eq!(entity_op.default_priority(), FlushPriority::Normal);
} }
#[test]
fn test_index_consistency_after_operations() -> Result<(), crate::persistence::PersistenceError>
{
let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4();
// Add component multiple times - should only keep latest
for i in 0..10 {
buffer.add(PersistenceOp::UpsertComponent {
entity_id,
component_type: "Transform".to_string(),
data: vec![i],
})?;
}
// Buffer should only have 1 operation (latest)
assert_eq!(buffer.len(), 1);
// Verify it's the latest data
let ops = buffer.take_operations();
assert_eq!(ops.len(), 1);
if let PersistenceOp::UpsertComponent { data, .. } = &ops[0] {
assert_eq!(data, &vec![9]);
} else {
panic!("Expected UpsertComponent");
}
// After take, indices should be cleared and we can reuse
buffer.add(PersistenceOp::UpsertComponent {
entity_id,
component_type: "Transform".to_string(),
data: vec![100],
})?;
assert_eq!(buffer.len(), 1);
Ok(())
}
#[test]
fn test_index_handles_multiple_entities() -> Result<(), crate::persistence::PersistenceError> {
let mut buffer = WriteBuffer::new(100);
let entity1 = EntityId::new_v4();
let entity2 = EntityId::new_v4();
// Add same component type for different entities
buffer.add(PersistenceOp::UpsertComponent {
entity_id: entity1,
component_type: "Transform".to_string(),
data: vec![1],
})?;
buffer.add(PersistenceOp::UpsertComponent {
entity_id: entity2,
component_type: "Transform".to_string(),
data: vec![2],
})?;
// Should have 2 operations (different entities)
assert_eq!(buffer.len(), 2);
// Update first entity
buffer.add(PersistenceOp::UpsertComponent {
entity_id: entity1,
component_type: "Transform".to_string(),
data: vec![3],
})?;
// Still 2 operations (first was replaced in-place)
assert_eq!(buffer.len(), 2);
Ok(())
}
#[test] #[test]
fn test_add_with_default_priority() { fn test_add_with_default_priority() {
let mut buffer = WriteBuffer::new(100); let mut buffer = WriteBuffer::new(100);
let node_id = NodeId::new_v4();
// Add CRDT operation using default priority // Add CRDT operation using default priority
buffer.add_with_default_priority(PersistenceOp::LogOperation { buffer
node_id: "node1".to_string(), .add_with_default_priority(PersistenceOp::LogOperation {
sequence: 1, node_id,
operation: vec![1, 2, 3], sequence: 1,
}); operation: vec![1, 2, 3],
})
.unwrap();
// Should be tracked as Critical // Should be tracked as Critical
assert_eq!(buffer.highest_priority, FlushPriority::Critical); assert_eq!(buffer.highest_priority, FlushPriority::Critical);
assert!(buffer.first_critical_time.is_some()); assert!(buffer.first_critical_time.is_some());
} }
#[test]
fn test_oversized_component_returns_error() {
let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4();
// Create 11MB component (exceeds 10MB limit)
let oversized_data = vec![0u8; 11 * 1024 * 1024];
let result = buffer.add(PersistenceOp::UpsertComponent {
entity_id,
component_type: "HugeComponent".to_string(),
data: oversized_data,
});
// Should return error, not panic
assert!(result.is_err());
match result {
| Err(crate::persistence::PersistenceError::ComponentTooLarge {
component_type,
size_bytes,
max_bytes,
}) => {
assert_eq!(component_type, "HugeComponent");
assert_eq!(size_bytes, 11 * 1024 * 1024);
assert_eq!(max_bytes, MAX_COMPONENT_SIZE_BYTES);
},
| _ => panic!("Expected ComponentTooLarge error"),
}
// Buffer should be unchanged
assert_eq!(buffer.len(), 0);
}
#[test]
fn test_max_size_component_succeeds() {
let mut buffer = WriteBuffer::new(100);
let entity_id = EntityId::new_v4();
// Create exactly 10MB component (at limit)
let max_data = vec![0u8; 10 * 1024 * 1024];
let result = buffer.add(PersistenceOp::UpsertComponent {
entity_id,
component_type: "MaxComponent".to_string(),
data: max_data,
});
assert!(result.is_ok());
assert_eq!(buffer.len(), 1);
}
#[test]
fn test_oversized_operation_returns_error() {
let mut buffer = WriteBuffer::new(100);
let oversized_op = vec![0u8; 11 * 1024 * 1024];
let result = buffer.add(PersistenceOp::LogOperation {
node_id: uuid::Uuid::new_v4(),
sequence: 1,
operation: oversized_op,
});
assert!(result.is_err());
match result {
| Err(crate::persistence::PersistenceError::ComponentTooLarge {
component_type,
..
}) => {
assert_eq!(component_type, "Operation");
},
| _ => panic!("Expected ComponentTooLarge error for Operation"),
}
}
#[test]
fn test_write_buffer_never_panics_property() {
// Property test: WriteBuffer should never panic on any size
let sizes = [
0,
1000,
1_000_000,
5_000_000,
10_000_000, // Exactly at limit
10_000_001, // Just over limit
11_000_000,
100_000_000,
];
for size in sizes {
let mut buffer = WriteBuffer::new(100);
let data = vec![0u8; size];
let result = buffer.add(PersistenceOp::UpsertComponent {
entity_id: uuid::Uuid::new_v4(),
component_type: "TestComponent".to_string(),
data,
});
// Should never panic, always return Ok or Err
match result {
| Ok(_) => assert!(
size <= MAX_COMPONENT_SIZE_BYTES,
"Size {} should have failed",
size
),
| Err(_) => assert!(
size > MAX_COMPONENT_SIZE_BYTES,
"Size {} should have succeeded",
size
),
}
}
}
} }

View File

@@ -1,7 +1,4 @@
use std::ops::{ use std::ops::Deref;
Deref,
DerefMut,
};
use chrono::{ use chrono::{
DateTime, DateTime,
@@ -23,7 +20,7 @@ use serde::{
// Re-export the Synced derive macro // Re-export the Synced derive macro
pub use sync_macros::Synced; pub use sync_macros::Synced;
pub type NodeId = String; pub type NodeId = uuid::Uuid;
/// Transparent wrapper for synced values /// Transparent wrapper for synced values
/// ///
@@ -70,12 +67,14 @@ impl<T: Clone> SyncedValue<T> {
{ {
self.value = other.value.clone(); self.value = other.value.clone();
self.timestamp = other.timestamp; self.timestamp = other.timestamp;
self.node_id = other.node_id.clone(); self.node_id = other.node_id; // UUID is Copy, no need to clone
} }
} }
} }
// Allow transparent access to the inner value // Allow transparent read-only access to the inner value
// Note: DerefMut is intentionally NOT implemented to preserve LWW semantics
// Use `.set()` method to update values, which properly updates timestamps
impl<T: Clone> Deref for SyncedValue<T> { impl<T: Clone> Deref for SyncedValue<T> {
type Target = T; type Target = T;
@@ -84,12 +83,6 @@ impl<T: Clone> Deref for SyncedValue<T> {
} }
} }
impl<T: Clone> DerefMut for SyncedValue<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.value
}
}
/// Wrapper for a sync message that goes over gossip /// Wrapper for a sync message that goes over gossip
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SyncMessage<T> { pub struct SyncMessage<T> {
@@ -143,7 +136,7 @@ pub trait Syncable: Sized {
/// Create a sync message for an operation /// Create a sync message for an operation
fn create_sync_message(&self, op: Self::Operation) -> SyncMessage<Self::Operation> { fn create_sync_message(&self, op: Self::Operation) -> SyncMessage<Self::Operation> {
SyncMessage::new(self.node_id().clone(), op) SyncMessage::new(*self.node_id(), op) // UUID is Copy, dereference instead of clone
} }
} }
@@ -153,15 +146,17 @@ mod tests {
#[test] #[test]
fn test_synced_value() { fn test_synced_value() {
let mut val = SyncedValue::new(42, "node1".to_string()); let node1 = uuid::Uuid::new_v4();
let mut val = SyncedValue::new(42, node1);
assert_eq!(*val.get(), 42); assert_eq!(*val.get(), 42);
val.set(100, "node1".to_string()); val.set(100, node1);
assert_eq!(*val.get(), 100); assert_eq!(*val.get(), 100);
// Test LWW semantics // Test LWW semantics
let node2 = uuid::Uuid::new_v4();
let old_time = Utc::now() - chrono::Duration::seconds(10); let old_time = Utc::now() - chrono::Duration::seconds(10);
val.apply_lww(50, old_time, "node2".to_string()); val.apply_lww(50, old_time, node2);
assert_eq!(*val.get(), 100); // Should not update with older timestamp assert_eq!(*val.get(), 100); // Should not update with older timestamp
} }
@@ -172,13 +167,48 @@ mod tests {
value: i32, value: i32,
} }
let node1 = uuid::Uuid::new_v4();
let op = TestOp { value: 42 }; let op = TestOp { value: 42 };
let msg = SyncMessage::new("node1".to_string(), op); let msg = SyncMessage::new(node1, op);
let bytes = msg.to_bytes().unwrap(); let bytes = msg.to_bytes().unwrap();
let decoded = SyncMessage::<TestOp>::from_bytes(&bytes).unwrap(); let decoded = SyncMessage::<TestOp>::from_bytes(&bytes).unwrap();
assert_eq!(decoded.node_id, "node1"); assert_eq!(decoded.node_id, node1);
assert_eq!(decoded.operation.value, 42); assert_eq!(decoded.operation.value, 42);
} }
#[test]
fn test_uuid_comparison() {
let node1 = uuid::Uuid::from_u128(1);
let node2 = uuid::Uuid::from_u128(2);
println!("node1: {}", node1);
println!("node2: {}", node2);
println!("node2 > node1: {}", node2 > node1);
assert!(node2 > node1, "UUID from_u128(2) should be > from_u128(1)");
}
#[test]
fn test_lww_tiebreaker() {
let node1 = uuid::Uuid::from_u128(1);
let node2 = uuid::Uuid::from_u128(2);
// Create SyncedValue FIRST, then capture a timestamp that's guaranteed to be newer
let mut lww = SyncedValue::new(100, node1);
std::thread::sleep(std::time::Duration::from_millis(1)); // Ensure ts is after init
let ts = Utc::now();
// Apply update from node1 at timestamp ts
lww.apply_lww(100, ts, node1);
println!("After node1 update: value={}, ts={:?}, node={}", lww.get(), lww.timestamp, lww.node_id);
// Apply conflicting update from node2 at SAME timestamp
lww.apply_lww(200, ts, node2);
println!("After node2 update: value={}, ts={:?}, node={}", lww.get(), lww.timestamp, lww.node_id);
// node2 > node1, so value2 should win
assert_eq!(*lww.get(), 200, "Higher node_id should win tiebreaker");
}
} }

View File

@@ -0,0 +1,7 @@
# Seeds for failure cases proptest has generated in the past. It is
# automatically read and these particular cases re-run before any
# novel cases are generated.
#
# It is recommended to check this file in to source control so that
# everyone who runs the test benefits from these saved cases.
cc f2e56c98051c9da4146af4236447b4b8572a5990b84ae6e64fd93be95fe029df # shrinks to value1 = -733535506, value2 = -37242108

View File

@@ -0,0 +1,366 @@
//! Property-based tests for CRDT invariants
//!
//! This module uses proptest to verify that our CRDT implementations maintain
//! their mathematical properties under all possible inputs and operation
//! sequences.
use lib::{
networking::{
NodeId,
VectorClock,
},
persistence::{
EntityId,
PersistenceError,
PersistenceOp,
WriteBuffer,
},
sync::SyncedValue,
};
use proptest::prelude::*;
// ============================================================================
// VectorClock Property Tests
// ============================================================================
/// Generate arbitrary NodeId (UUID)
fn arb_node_id() -> impl Strategy<Value = NodeId> {
any::<[u8; 16]>().prop_map(|bytes| uuid::Uuid::from_bytes(bytes))
}
/// Generate arbitrary VectorClock with 1-10 nodes
fn arb_vector_clock() -> impl Strategy<Value = VectorClock> {
prop::collection::vec((arb_node_id(), 0u64..100), 1..10).prop_map(|entries| {
let mut clock = VectorClock::new();
for (node_id, count) in entries {
for _ in 0..count {
clock.increment(node_id);
}
}
clock
})
}
proptest! {
/// Test: VectorClock merge is idempotent
/// Property: merge(A, A) = A
#[test]
fn vector_clock_merge_idempotent(clock in arb_vector_clock()) {
let mut merged = clock.clone();
merged.merge(&clock);
prop_assert_eq!(merged, clock);
}
/// Test: VectorClock merge is commutative
/// Property: merge(A, B) = merge(B, A)
#[test]
fn vector_clock_merge_commutative(
clock_a in arb_vector_clock(),
clock_b in arb_vector_clock()
) {
let mut result1 = clock_a.clone();
result1.merge(&clock_b);
let mut result2 = clock_b.clone();
result2.merge(&clock_a);
prop_assert_eq!(result1, result2);
}
/// Test: VectorClock merge is associative
/// Property: merge(merge(A, B), C) = merge(A, merge(B, C))
#[test]
fn vector_clock_merge_associative(
clock_a in arb_vector_clock(),
clock_b in arb_vector_clock(),
clock_c in arb_vector_clock()
) {
// (A merge B) merge C
let mut result1 = clock_a.clone();
result1.merge(&clock_b);
result1.merge(&clock_c);
// A merge (B merge C)
let mut temp = clock_b.clone();
temp.merge(&clock_c);
let mut result2 = clock_a.clone();
result2.merge(&temp);
prop_assert_eq!(result1, result2);
}
/// Test: happened_before is transitive
/// Property: If A < B and B < C, then A < C
#[test]
fn vector_clock_happened_before_transitive(node_id in arb_node_id()) {
let mut clock_a = VectorClock::new();
clock_a.increment(node_id);
let mut clock_b = clock_a.clone();
clock_b.increment(node_id);
let mut clock_c = clock_b.clone();
clock_c.increment(node_id);
prop_assert!(clock_a.happened_before(&clock_b));
prop_assert!(clock_b.happened_before(&clock_c));
prop_assert!(clock_a.happened_before(&clock_c)); // Transitivity
}
/// Test: happened_before is antisymmetric
/// Property: If A < B, then NOT (B < A)
#[test]
fn vector_clock_happened_before_antisymmetric(
clock_a in arb_vector_clock(),
clock_b in arb_vector_clock()
) {
if clock_a.happened_before(&clock_b) {
prop_assert!(!clock_b.happened_before(&clock_a));
}
}
/// Test: A clock never happens before itself
/// Property: NOT (A < A)
#[test]
fn vector_clock_not_happened_before_self(clock in arb_vector_clock()) {
prop_assert!(!clock.happened_before(&clock));
}
/// Test: Merge creates upper bound
/// Property: If C = merge(A, B), then A ≤ C and B ≤ C
#[test]
fn vector_clock_merge_upper_bound(
clock_a in arb_vector_clock(),
clock_b in arb_vector_clock()
) {
let mut merged = clock_a.clone();
merged.merge(&clock_b);
// A ≤ merged (either happened_before or equal)
prop_assert!(clock_a.happened_before(&merged) || clock_a == merged);
// B ≤ merged (either happened_before or equal)
prop_assert!(clock_b.happened_before(&merged) || clock_b == merged);
}
}
// ============================================================================
// WriteBuffer Property Tests
// ============================================================================
/// Generate arbitrary EntityId (UUID)
fn arb_entity_id() -> impl Strategy<Value = EntityId> {
any::<[u8; 16]>().prop_map(|bytes| uuid::Uuid::from_bytes(bytes))
}
/// Generate arbitrary component name
fn arb_component_type() -> impl Strategy<Value = String> {
prop::string::string_regex("[A-Z][a-zA-Z0-9]{0,20}").unwrap()
}
/// Generate arbitrary component data (small to avoid size limits)
fn arb_component_data() -> impl Strategy<Value = Vec<u8>> {
prop::collection::vec(any::<u8>(), 0..1000)
}
proptest! {
/// Test: WriteBuffer index consistency after multiple operations
/// Property: Index always points to valid operations in the buffer
#[test]
fn write_buffer_index_consistency(
operations in prop::collection::vec(
(arb_entity_id(), arb_component_type(), arb_component_data()),
1..50
)
) {
let mut buffer = WriteBuffer::new(1000);
for (entity_id, component_type, data) in operations {
let op = PersistenceOp::UpsertComponent {
entity_id,
component_type,
data,
};
// Should never fail for valid data
let result = buffer.add(op);
prop_assert!(result.is_ok());
}
// After all operations, buffer length should be valid
prop_assert!(buffer.len() <= 1000);
}
/// Test: WriteBuffer deduplication correctness
/// Property: Adding same (entity, component) twice keeps only latest
#[test]
fn write_buffer_deduplication(
entity_id in arb_entity_id(),
component_type in arb_component_type(),
data1 in arb_component_data(),
data2 in arb_component_data()
) {
let mut buffer = WriteBuffer::new(100);
// Add first version
let op1 = PersistenceOp::UpsertComponent {
entity_id,
component_type: component_type.clone(),
data: data1.clone(),
};
prop_assert!(buffer.add(op1).is_ok());
// Add second version (should replace)
let op2 = PersistenceOp::UpsertComponent {
entity_id,
component_type: component_type.clone(),
data: data2.clone(),
};
prop_assert!(buffer.add(op2).is_ok());
// Should only have 1 operation
prop_assert_eq!(buffer.len(), 1);
// Should be the latest data
let ops = buffer.take_operations();
prop_assert_eq!(ops.len(), 1);
if let PersistenceOp::UpsertComponent { data, .. } = &ops[0] {
prop_assert_eq!(data, &data2);
}
}
/// Test: WriteBuffer respects size limits
/// Property: Operations larger than MAX_SIZE are rejected
///
/// Note: This test uses a smaller sample size (5 cases) to avoid generating
/// massive amounts of data. We only need to verify the size check works.
#[test]
fn write_buffer_respects_size_limits(
entity_id in arb_entity_id(),
component_type in arb_component_type(),
) {
let mut buffer = WriteBuffer::new(100);
// Create data that's definitely too large (11MB)
// We don't need to randomize the content, just verify size check works
let oversized_data = vec![0u8; 11_000_000];
let op = PersistenceOp::UpsertComponent {
entity_id,
component_type,
data: oversized_data,
};
let result = buffer.add(op);
prop_assert!(result.is_err());
// Verify it's the right error type
if let Err(PersistenceError::ComponentTooLarge { .. }) = result {
// Expected
} else {
prop_assert!(false, "Expected ComponentTooLarge error");
}
}
}
// ============================================================================
// LWW (Last-Write-Wins) Property Tests
// ============================================================================
proptest! {
/// Test: LWW convergence
/// Property: Two replicas applying same updates in different order converge
#[test]
fn lww_convergence(
node1 in arb_node_id(),
node2 in arb_node_id(),
initial_value in any::<i32>(),
value1 in any::<i32>(),
value2 in any::<i32>(),
) {
// Create two replicas with same initial value
let mut replica_a = SyncedValue::new(initial_value, node1);
let mut replica_b = SyncedValue::new(initial_value, node1);
// Create two updates
let ts1 = chrono::Utc::now();
let ts2 = ts1 + chrono::Duration::milliseconds(100);
// Replica A applies updates in order: update1, update2
replica_a.apply_lww(value1, ts1, node1);
replica_a.apply_lww(value2, ts2, node2);
// Replica B applies updates in reverse: update2, update1
replica_b.apply_lww(value2, ts2, node2);
replica_b.apply_lww(value1, ts1, node1);
// Both should converge to same value (latest timestamp wins)
prop_assert_eq!(*replica_a.get(), *replica_b.get());
prop_assert_eq!(*replica_a.get(), value2); // ts2 is newer
}
/// Test: LWW merge idempotence
/// Property: Merging the same value multiple times has no effect
#[test]
fn lww_merge_idempotent(
node_id in arb_node_id(),
value in any::<i32>(),
) {
let original = SyncedValue::new(value, node_id);
let mut replica = original.clone();
// Merge with itself multiple times
replica.merge(&original);
replica.merge(&original);
replica.merge(&original);
prop_assert_eq!(*replica.get(), *original.get());
}
/// Test: LWW respects timestamp ordering
/// Property: Older updates don't overwrite newer ones
#[test]
fn lww_respects_timestamp(
node_id in arb_node_id(),
old_value in any::<i32>(),
new_value in any::<i32>(),
) {
let mut lww = SyncedValue::new(old_value, node_id);
let old_ts = chrono::Utc::now();
let new_ts = old_ts + chrono::Duration::seconds(10);
// Apply newer update first
lww.apply_lww(new_value, new_ts, node_id);
// Apply older update (should be ignored)
lww.apply_lww(old_value, old_ts, node_id);
// Should keep the newer value
prop_assert_eq!(*lww.get(), new_value);
}
/// Test: LWW tiebreaker uses node_id
/// Property: When timestamps equal, higher node_id wins
#[test]
fn lww_tiebreaker(
value1 in any::<i32>(),
value2 in any::<i32>(),
) {
let node1 = uuid::Uuid::from_u128(1);
let node2 = uuid::Uuid::from_u128(2);
// Create SyncedValue FIRST, then capture a timestamp that's guaranteed to be newer
let mut lww = SyncedValue::new(value1, node1);
std::thread::sleep(std::time::Duration::from_millis(1)); // Ensure ts is after init
let ts = chrono::Utc::now();
// Apply update from node1 at timestamp ts
lww.apply_lww(value1, ts, node1);
// Apply conflicting update from node2 at SAME timestamp
lww.apply_lww(value2, ts, node2);
// node2 > node1, so value2 should win
prop_assert_eq!(*lww.get(), value2);
}
}

View File

@@ -5,12 +5,19 @@
use std::{ use std::{
path::PathBuf, path::PathBuf,
time::{Duration, Instant}, time::{
Duration,
Instant,
},
}; };
use anyhow::Result; use anyhow::Result;
use bevy::{ use bevy::{
app::{App, ScheduleRunnerPlugin}, MinimalPlugins,
app::{
App,
ScheduleRunnerPlugin,
},
ecs::{ ecs::{
component::Component, component::Component,
reflect::ReflectComponent, reflect::ReflectComponent,
@@ -18,23 +25,40 @@ use bevy::{
}, },
prelude::*, prelude::*,
reflect::Reflect, reflect::Reflect,
MinimalPlugins,
}; };
use futures_lite::StreamExt; use futures_lite::StreamExt;
use iroh::{Endpoint, protocol::Router}; use iroh::{
Endpoint,
protocol::Router,
};
use iroh_gossip::{ use iroh_gossip::{
api::{GossipReceiver, GossipSender}, api::{
GossipReceiver,
GossipSender,
},
net::Gossip, net::Gossip,
proto::TopicId, proto::TopicId,
}; };
use lib::{ use lib::{
networking::{ networking::{
GossipBridge, NetworkedEntity, NetworkedTransform, NetworkingConfig, NetworkingPlugin, GossipBridge,
Synced, VersionedMessage, NetworkedEntity,
NetworkedTransform,
NetworkingConfig,
NetworkingPlugin,
Synced,
VersionedMessage,
},
persistence::{
Persisted,
PersistenceConfig,
PersistencePlugin,
}, },
persistence::{PersistenceConfig, PersistencePlugin, Persisted},
}; };
use serde::{Deserialize, Serialize}; use serde::{
Deserialize,
Serialize,
};
use sync_macros::Synced as SyncedDerive; use sync_macros::Synced as SyncedDerive;
use tempfile::TempDir; use tempfile::TempDir;
use uuid::Uuid; use uuid::Uuid;
@@ -68,8 +92,12 @@ struct TestHealth {
// ============================================================================ // ============================================================================
mod test_utils { mod test_utils {
use rusqlite::{
Connection,
OptionalExtension,
};
use super::*; use super::*;
use rusqlite::{Connection, OptionalExtension};
/// Test context that manages temporary directories with RAII cleanup /// Test context that manages temporary directories with RAII cleanup
pub struct TestContext { pub struct TestContext {
@@ -134,12 +162,11 @@ mod test_utils {
let conn = Connection::open(db_path)?; let conn = Connection::open(db_path)?;
let entity_id_bytes = entity_id.as_bytes(); let entity_id_bytes = entity_id.as_bytes();
let data_result: std::result::Result<Vec<u8>, rusqlite::Error> = conn let data_result: std::result::Result<Vec<u8>, rusqlite::Error> = conn.query_row(
.query_row( "SELECT data FROM components WHERE entity_id = ?1 AND component_type = ?2",
"SELECT data FROM components WHERE entity_id = ?1 AND component_type = ?2", rusqlite::params![entity_id_bytes.as_slice(), component_type],
rusqlite::params![entity_id_bytes.as_slice(), component_type], |row| row.get(0),
|row| row.get(0), );
);
let data = data_result.optional()?; let data = data_result.optional()?;
@@ -161,11 +188,9 @@ mod test_utils {
pub fn create_test_app(node_id: Uuid, db_path: PathBuf, bridge: GossipBridge) -> App { pub fn create_test_app(node_id: Uuid, db_path: PathBuf, bridge: GossipBridge) -> App {
let mut app = App::new(); let mut app = App::new();
app.add_plugins( app.add_plugins(MinimalPlugins.set(ScheduleRunnerPlugin::run_loop(
MinimalPlugins.set(ScheduleRunnerPlugin::run_loop(Duration::from_secs_f64( Duration::from_secs_f64(1.0 / 60.0),
1.0 / 60.0, )))
))),
)
.insert_resource(bridge) .insert_resource(bridge)
.add_plugins(NetworkingPlugin::new(NetworkingConfig { .add_plugins(NetworkingPlugin::new(NetworkingConfig {
node_id, node_id,
@@ -233,8 +258,7 @@ mod test_utils {
check_fn: F, check_fn: F,
) -> Result<()> ) -> Result<()>
where where
F: Fn(&mut World, &mut World) -> bool, F: Fn(&mut World, &mut World) -> bool, {
{
let start = Instant::now(); let start = Instant::now();
let mut tick_count = 0; let mut tick_count = 0;
@@ -245,12 +269,20 @@ mod test_utils {
tick_count += 1; tick_count += 1;
if tick_count % 50 == 0 { if tick_count % 50 == 0 {
println!("Waiting for sync... tick {} ({:.1}s elapsed)", tick_count, start.elapsed().as_secs_f32()); println!(
"Waiting for sync... tick {} ({:.1}s elapsed)",
tick_count,
start.elapsed().as_secs_f32()
);
} }
// Check condition // Check condition
if check_fn(app1.world_mut(), app2.world_mut()) { if check_fn(app1.world_mut(), app2.world_mut()) {
println!("Sync completed after {} ticks ({:.3}s)", tick_count, start.elapsed().as_secs_f32()); println!(
"Sync completed after {} ticks ({:.3}s)",
tick_count,
start.elapsed().as_secs_f32()
);
return Ok(()); return Ok(());
} }
@@ -305,19 +337,27 @@ mod test_utils {
static_provider.add_endpoint_info(addr.clone()); static_provider.add_endpoint_info(addr.clone());
} }
endpoint.discovery().add(static_provider); endpoint.discovery().add(static_provider);
println!(" Added {} bootstrap peers to static discovery", bootstrap_count); println!(
" Added {} bootstrap peers to static discovery",
bootstrap_count
);
// Explicitly connect to bootstrap peers // Explicitly connect to bootstrap peers
println!(" Connecting to bootstrap peers..."); println!(" Connecting to bootstrap peers...");
for addr in &bootstrap_addrs { for addr in &bootstrap_addrs {
match endpoint.connect(addr.clone(), iroh_gossip::ALPN).await { match endpoint.connect(addr.clone(), iroh_gossip::ALPN).await {
Ok(_conn) => println!(" ✓ Connected to bootstrap peer: {}", addr.id), | Ok(_conn) => println!(" ✓ Connected to bootstrap peer: {}", addr.id),
Err(e) => println!(" ✗ Failed to connect to bootstrap peer {}: {}", addr.id, e), | Err(e) => {
println!(" ✗ Failed to connect to bootstrap peer {}: {}", addr.id, e)
},
} }
} }
} }
println!(" Subscribing to topic with {} bootstrap peers...", bootstrap_count); println!(
" Subscribing to topic with {} bootstrap peers...",
bootstrap_count
);
// Subscribe to the topic (the IDs now have addresses via discovery) // Subscribe to the topic (the IDs now have addresses via discovery)
let subscribe_handle = gossip.subscribe(topic_id, bootstrap_ids).await?; let subscribe_handle = gossip.subscribe(topic_id, bootstrap_ids).await?;
@@ -332,9 +372,11 @@ mod test_utils {
println!(" Waiting for join to complete (with timeout)..."); println!(" Waiting for join to complete (with timeout)...");
// Use a timeout in case mDNS discovery takes a while or fails // Use a timeout in case mDNS discovery takes a while or fails
match tokio::time::timeout(Duration::from_secs(3), receiver.joined()).await { match tokio::time::timeout(Duration::from_secs(3), receiver.joined()).await {
Ok(Ok(())) => println!(" Join completed!"), | Ok(Ok(())) => println!(" Join completed!"),
Ok(Err(e)) => println!(" Join error: {}", e), | Ok(Err(e)) => println!(" Join error: {}", e),
Err(_) => println!(" Join timeout - proceeding anyway (mDNS may still connect later)"), | Err(_) => {
println!(" Join timeout - proceeding anyway (mDNS may still connect later)")
},
} }
} else { } else {
println!(" No bootstrap peers - skipping join wait (first node in swarm)"); println!(" No bootstrap peers - skipping join wait (first node in swarm)");
@@ -352,8 +394,7 @@ mod test_utils {
} }
/// Setup a pair of iroh-gossip nodes connected to the same topic /// Setup a pair of iroh-gossip nodes connected to the same topic
pub async fn setup_gossip_pair( pub async fn setup_gossip_pair() -> Result<(
) -> Result<(
Endpoint, Endpoint,
Endpoint, Endpoint,
Router, Router,
@@ -370,13 +411,15 @@ mod test_utils {
let (ep1, _gossip1, router1, bridge1) = init_gossip_node(topic_id, vec![]).await?; let (ep1, _gossip1, router1, bridge1) = init_gossip_node(topic_id, vec![]).await?;
println!("Node 1 initialized with ID: {}", ep1.addr().id); println!("Node 1 initialized with ID: {}", ep1.addr().id);
// Get node 1's full address (ID + network addresses) for node 2 to bootstrap from // Get node 1's full address (ID + network addresses) for node 2 to bootstrap
// from
let node1_addr = ep1.addr().clone(); let node1_addr = ep1.addr().clone();
println!("Node 1 full address: {:?}", node1_addr); println!("Node 1 full address: {:?}", node1_addr);
// Initialize node 2 with node 1's full address as bootstrap peer // Initialize node 2 with node 1's full address as bootstrap peer
println!("Initializing node 2 with bootstrap peer: {}", node1_addr.id); println!("Initializing node 2 with bootstrap peer: {}", node1_addr.id);
let (ep2, _gossip2, router2, bridge2) = init_gossip_node(topic_id, vec![node1_addr]).await?; let (ep2, _gossip2, router2, bridge2) =
init_gossip_node(topic_id, vec![node1_addr]).await?;
println!("Node 2 initialized with ID: {}", ep2.addr().id); println!("Node 2 initialized with ID: {}", ep2.addr().id);
// Give mDNS and gossip time to discover peers // Give mDNS and gossip time to discover peers
@@ -387,7 +430,8 @@ mod test_utils {
Ok((ep1, ep2, router1, router2, bridge1, bridge2)) Ok((ep1, ep2, router1, router2, bridge1, bridge2))
} }
/// Spawn background tasks to forward messages between iroh-gossip and GossipBridge /// Spawn background tasks to forward messages between iroh-gossip and
/// GossipBridge
fn spawn_gossip_bridge_tasks( fn spawn_gossip_bridge_tasks(
sender: GossipSender, sender: GossipSender,
mut receiver: GossipReceiver, mut receiver: GossipReceiver,
@@ -403,18 +447,27 @@ mod test_utils {
// Poll the bridge's outgoing queue // Poll the bridge's outgoing queue
if let Some(versioned_msg) = bridge_out.try_recv_outgoing() { if let Some(versioned_msg) = bridge_out.try_recv_outgoing() {
msg_count += 1; msg_count += 1;
println!("[Node {}] Sending message #{} via gossip", node_id, msg_count); println!(
"[Node {}] Sending message #{} via gossip",
node_id, msg_count
);
// Serialize the message // Serialize the message
match bincode::serialize(&versioned_msg) { match bincode::serialize(&versioned_msg) {
Ok(bytes) => { | Ok(bytes) => {
// Broadcast via gossip // Broadcast via gossip
if let Err(e) = sender.broadcast(bytes.into()).await { if let Err(e) = sender.broadcast(bytes.into()).await {
eprintln!("[Node {}] Failed to broadcast message: {}", node_id, e); eprintln!("[Node {}] Failed to broadcast message: {}", node_id, e);
} else { } else {
println!("[Node {}] Message #{} broadcasted successfully", node_id, msg_count); println!(
"[Node {}] Message #{} broadcasted successfully",
node_id, msg_count
);
} }
} },
Err(e) => eprintln!("[Node {}] Failed to serialize message for broadcast: {}", node_id, e), | Err(e) => eprintln!(
"[Node {}] Failed to serialize message for broadcast: {}",
node_id, e
),
} }
} }
@@ -431,34 +484,52 @@ mod test_utils {
loop { loop {
// Receive from gossip (GossipReceiver is a Stream) // Receive from gossip (GossipReceiver is a Stream)
match tokio::time::timeout(Duration::from_millis(100), receiver.next()).await { match tokio::time::timeout(Duration::from_millis(100), receiver.next()).await {
Ok(Some(Ok(event))) => { | Ok(Some(Ok(event))) => {
println!("[Node {}] Received gossip event: {:?}", node_id, std::mem::discriminant(&event)); println!(
"[Node {}] Received gossip event: {:?}",
node_id,
std::mem::discriminant(&event)
);
if let iroh_gossip::api::Event::Received(msg) = event { if let iroh_gossip::api::Event::Received(msg) = event {
msg_count += 1; msg_count += 1;
println!("[Node {}] Received message #{} from gossip", node_id, msg_count); println!(
"[Node {}] Received message #{} from gossip",
node_id, msg_count
);
// Deserialize the message // Deserialize the message
match bincode::deserialize::<VersionedMessage>(&msg.content) { match bincode::deserialize::<VersionedMessage>(&msg.content) {
Ok(versioned_msg) => { | Ok(versioned_msg) => {
// Push to bridge's incoming queue // Push to bridge's incoming queue
if let Err(e) = bridge_in.push_incoming(versioned_msg) { if let Err(e) = bridge_in.push_incoming(versioned_msg) {
eprintln!("[Node {}] Failed to push to bridge incoming: {}", node_id, e); eprintln!(
"[Node {}] Failed to push to bridge incoming: {}",
node_id, e
);
} else { } else {
println!("[Node {}] Message #{} pushed to bridge incoming", node_id, msg_count); println!(
"[Node {}] Message #{} pushed to bridge incoming",
node_id, msg_count
);
} }
} },
Err(e) => eprintln!("[Node {}] Failed to deserialize gossip message: {}", node_id, e), | Err(e) => eprintln!(
"[Node {}] Failed to deserialize gossip message: {}",
node_id, e
),
} }
} }
} },
Ok(Some(Err(e))) => eprintln!("[Node {}] Gossip receiver error: {}", node_id, e), | Ok(Some(Err(e))) => {
Ok(None) => { eprintln!("[Node {}] Gossip receiver error: {}", node_id, e)
},
| Ok(None) => {
// Stream ended // Stream ended
println!("[Node {}] Gossip stream ended", node_id); println!("[Node {}] Gossip stream ended", node_id);
break; break;
} },
Err(_) => { | Err(_) => {
// Timeout, no message available // Timeout, no message available
} },
} }
} }
}); });
@@ -500,12 +571,15 @@ async fn test_basic_entity_sync() -> Result<()> {
// Node 1 spawns entity // Node 1 spawns entity
let entity_id = Uuid::new_v4(); let entity_id = Uuid::new_v4();
println!("Spawning entity {} on node 1", entity_id); println!("Spawning entity {} on node 1", entity_id);
let spawned_entity = app1.world_mut().spawn(( let spawned_entity = app1
NetworkedEntity::with_id(entity_id, node1_id), .world_mut()
TestPosition { x: 10.0, y: 20.0 }, .spawn((
Persisted::with_id(entity_id), NetworkedEntity::with_id(entity_id, node1_id),
Synced, TestPosition { x: 10.0, y: 20.0 },
)).id(); Persisted::with_id(entity_id),
Synced,
))
.id();
// IMPORTANT: Trigger change detection for persistence // IMPORTANT: Trigger change detection for persistence
// Bevy only marks components as "changed" when mutated, not on spawn // Bevy only marks components as "changed" when mutated, not on spawn
@@ -536,7 +610,11 @@ async fn test_basic_entity_sync() -> Result<()> {
query.iter(w2).map(|ne| ne.network_id).collect() query.iter(w2).map(|ne| ne.network_id).collect()
}; };
if !all_networked.is_empty() { if !all_networked.is_empty() {
println!(" Node 2 has {} networked entities: {:?}", all_networked.len(), all_networked); println!(
" Node 2 has {} networked entities: {:?}",
all_networked.len(),
all_networked
);
println!(" Looking for: {}", entity_id); println!(" Looking for: {}", entity_id);
} }
false false
@@ -565,7 +643,11 @@ async fn test_basic_entity_sync() -> Result<()> {
} }
// Verify entity synced to node 2 (in-memory check) // Verify entity synced to node 2 (in-memory check)
assert_entity_synced(app2.world_mut(), entity_id, TestPosition { x: 10.0, y: 20.0 })?; assert_entity_synced(
app2.world_mut(),
entity_id,
TestPosition { x: 10.0, y: 20.0 },
)?;
println!("✓ Entity synced in-memory on node 2"); println!("✓ Entity synced in-memory on node 2");
// Give persistence system time to flush to disk // Give persistence system time to flush to disk
@@ -586,7 +668,11 @@ async fn test_basic_entity_sync() -> Result<()> {
); );
assert!( assert!(
component_exists_in_db(&ctx1.db_path(), entity_id, "sync_integration_headless::TestPosition")?, component_exists_in_db(
&ctx1.db_path(),
entity_id,
"sync_integration_headless::TestPosition"
)?,
"TestPosition component should exist in Node 1 database" "TestPosition component should exist in Node 1 database"
); );
@@ -616,7 +702,11 @@ async fn test_basic_entity_sync() -> Result<()> {
); );
assert!( assert!(
component_exists_in_db(&ctx2.db_path(), entity_id, "sync_integration_headless::TestPosition")?, component_exists_in_db(
&ctx2.db_path(),
entity_id,
"sync_integration_headless::TestPosition"
)?,
"TestPosition component should exist in Node 2 database after sync" "TestPosition component should exist in Node 2 database after sync"
); );
@@ -666,12 +756,15 @@ async fn test_bidirectional_sync() -> Result<()> {
// Node 1 spawns entity A // Node 1 spawns entity A
let entity_a = Uuid::new_v4(); let entity_a = Uuid::new_v4();
let entity_a_bevy = app1.world_mut().spawn(( let entity_a_bevy = app1
NetworkedEntity::with_id(entity_a, node1_id), .world_mut()
TestPosition { x: 1.0, y: 2.0 }, .spawn((
Persisted::with_id(entity_a), NetworkedEntity::with_id(entity_a, node1_id),
Synced, TestPosition { x: 1.0, y: 2.0 },
)).id(); Persisted::with_id(entity_a),
Synced,
))
.id();
// Trigger persistence for entity A // Trigger persistence for entity A
{ {
@@ -685,12 +778,15 @@ async fn test_bidirectional_sync() -> Result<()> {
// Node 2 spawns entity B // Node 2 spawns entity B
let entity_b = Uuid::new_v4(); let entity_b = Uuid::new_v4();
let entity_b_bevy = app2.world_mut().spawn(( let entity_b_bevy = app2
NetworkedEntity::with_id(entity_b, node2_id), .world_mut()
TestPosition { x: 3.0, y: 4.0 }, .spawn((
Persisted::with_id(entity_b), NetworkedEntity::with_id(entity_b, node2_id),
Synced, TestPosition { x: 3.0, y: 4.0 },
)).id(); Persisted::with_id(entity_b),
Synced,
))
.id();
// Trigger persistence for entity B // Trigger persistence for entity B
{ {
@@ -709,10 +805,8 @@ async fn test_bidirectional_sync() -> Result<()> {
.await?; .await?;
// Verify both nodes have both entities // Verify both nodes have both entities
assert_entity_synced(app1.world_mut(), entity_b, TestPosition { x: 3.0, y: 4.0 }) assert_entity_synced(app1.world_mut(), entity_b, TestPosition { x: 3.0, y: 4.0 })?;
?; assert_entity_synced(app2.world_mut(), entity_a, TestPosition { x: 1.0, y: 2.0 })?;
assert_entity_synced(app2.world_mut(), entity_a, TestPosition { x: 1.0, y: 2.0 })
?;
println!("✓ Bidirectional sync test passed"); println!("✓ Bidirectional sync test passed");
@@ -742,13 +836,16 @@ async fn test_concurrent_conflict_resolution() -> Result<()> {
// Spawn shared entity on node 1 with Transform (which IS tracked for changes) // Spawn shared entity on node 1 with Transform (which IS tracked for changes)
let entity_id = Uuid::new_v4(); let entity_id = Uuid::new_v4();
let entity_bevy = app1.world_mut().spawn(( let entity_bevy = app1
NetworkedEntity::with_id(entity_id, node1_id), .world_mut()
NetworkedTransform::default(), .spawn((
Transform::from_xyz(0.0, 0.0, 0.0), NetworkedEntity::with_id(entity_id, node1_id),
Persisted::with_id(entity_id), NetworkedTransform::default(),
Synced, Transform::from_xyz(0.0, 0.0, 0.0),
)).id(); Persisted::with_id(entity_id),
Synced,
))
.id();
// Trigger persistence // Trigger persistence
{ {
@@ -771,20 +868,44 @@ async fn test_concurrent_conflict_resolution() -> Result<()> {
// Check what components the entity has on each node // Check what components the entity has on each node
{ {
let world1 = app1.world_mut(); let world1 = app1.world_mut();
let mut query1 = world1.query::<(Entity, &NetworkedEntity, Option<&NetworkedTransform>, &Transform)>(); let mut query1 = world1.query::<(
Entity,
&NetworkedEntity,
Option<&NetworkedTransform>,
&Transform,
)>();
println!("Node 1 entities:"); println!("Node 1 entities:");
for (entity, ne, nt, t) in query1.iter(world1) { for (entity, ne, nt, t) in query1.iter(world1) {
println!(" Entity {:?}: NetworkedEntity({:?}), NetworkedTransform={}, Transform=({}, {}, {})", println!(
entity, ne.network_id, nt.is_some(), t.translation.x, t.translation.y, t.translation.z); " Entity {:?}: NetworkedEntity({:?}), NetworkedTransform={}, Transform=({}, {}, {})",
entity,
ne.network_id,
nt.is_some(),
t.translation.x,
t.translation.y,
t.translation.z
);
} }
} }
{ {
let world2 = app2.world_mut(); let world2 = app2.world_mut();
let mut query2 = world2.query::<(Entity, &NetworkedEntity, Option<&NetworkedTransform>, &Transform)>(); let mut query2 = world2.query::<(
Entity,
&NetworkedEntity,
Option<&NetworkedTransform>,
&Transform,
)>();
println!("Node 2 entities:"); println!("Node 2 entities:");
for (entity, ne, nt, t) in query2.iter(world2) { for (entity, ne, nt, t) in query2.iter(world2) {
println!(" Entity {:?}: NetworkedEntity({:?}), NetworkedTransform={}, Transform=({}, {}, {})", println!(
entity, ne.network_id, nt.is_some(), t.translation.x, t.translation.y, t.translation.z); " Entity {:?}: NetworkedEntity({:?}), NetworkedTransform={}, Transform=({}, {}, {})",
entity,
ne.network_id,
nt.is_some(),
t.translation.x,
t.translation.y,
t.translation.z
);
} }
} }
@@ -834,11 +955,14 @@ async fn test_concurrent_conflict_resolution() -> Result<()> {
let t2 = transforms2[0]; let t2 = transforms2[0];
// Check if they converged (within floating point tolerance) // Check if they converged (within floating point tolerance)
let converged = (t1.translation.x - t2.translation.x).abs() < 0.01 let converged = (t1.translation.x - t2.translation.x).abs() < 0.01 &&
&& (t1.translation.y - t2.translation.y).abs() < 0.01; (t1.translation.y - t2.translation.y).abs() < 0.01;
if converged { if converged {
println!("✓ Nodes converged to: ({}, {})", t1.translation.x, t1.translation.y); println!(
"✓ Nodes converged to: ({}, {})",
t1.translation.x, t1.translation.y
);
} }
converged converged
@@ -876,10 +1000,7 @@ async fn test_persistence_crash_recovery() -> Result<()> {
app.world_mut().spawn(( app.world_mut().spawn((
NetworkedEntity::with_id(entity_id, node_id), NetworkedEntity::with_id(entity_id, node_id),
TestPosition { TestPosition { x: 100.0, y: 200.0 },
x: 100.0,
y: 200.0,
},
Persisted::with_id(entity_id), Persisted::with_id(entity_id),
Synced, Synced,
)); ));
@@ -905,8 +1026,12 @@ async fn test_persistence_crash_recovery() -> Result<()> {
app.update(); app.update();
// Verify entity loaded from database // Verify entity loaded from database
assert_entity_synced(app.world_mut(), entity_id, TestPosition { x: 100.0, y: 200.0 }) assert_entity_synced(
.map_err(|e| anyhow::anyhow!("Persistence recovery failed: {}", e))?; app.world_mut(),
entity_id,
TestPosition { x: 100.0, y: 200.0 },
)
.map_err(|e| anyhow::anyhow!("Persistence recovery failed: {}", e))?;
println!("✓ Crash recovery test passed"); println!("✓ Crash recovery test passed");
} }

View File

@@ -1,8 +1,16 @@
//! Minimal test to verify Transform change detection works //! Minimal test to verify Transform change detection works
use std::sync::{
Arc,
Mutex,
};
use bevy::prelude::*; use bevy::prelude::*;
use lib::networking::{NetworkedEntity, NetworkedTransform, Synced}; use lib::networking::{
use std::sync::{Arc, Mutex}; NetworkedEntity,
NetworkedTransform,
Synced,
};
use uuid::Uuid; use uuid::Uuid;
#[test] #[test]
@@ -11,33 +19,46 @@ fn test_transform_change_detection_basic() {
app.add_plugins(MinimalPlugins); app.add_plugins(MinimalPlugins);
// Add the auto_detect system // Add the auto_detect system
app.add_systems(Update, lib::networking::auto_detect_transform_changes_system); app.add_systems(
Update,
lib::networking::auto_detect_transform_changes_system,
);
// Add a test system that runs AFTER auto_detect to check if NetworkedEntity was changed // Add a test system that runs AFTER auto_detect to check if NetworkedEntity was
// We need to check DURING the frame because change detection is cleared after each frame // changed We need to check DURING the frame because change detection is
// cleared after each frame
let was_changed = Arc::new(Mutex::new(false)); let was_changed = Arc::new(Mutex::new(false));
let was_changed_clone = was_changed.clone(); let was_changed_clone = was_changed.clone();
app.add_systems(Update, move |query: Query<&NetworkedEntity, Changed<NetworkedEntity>>| { app.add_systems(
let count = query.iter().count(); Update,
if count > 0 { move |query: Query<&NetworkedEntity, Changed<NetworkedEntity>>| {
println!("✓ Test system detected {} changed NetworkedEntity components", count); let count = query.iter().count();
*was_changed_clone.lock().unwrap() = true; if count > 0 {
} else { println!(
println!(" Test system detected 0 changed NetworkedEntity components"); " Test system detected {} changed NetworkedEntity components",
} count
}); );
*was_changed_clone.lock().unwrap() = true;
} else {
println!("✗ Test system detected 0 changed NetworkedEntity components");
}
},
);
// Spawn an entity with Transform and NetworkedTransform // Spawn an entity with Transform and NetworkedTransform
let node_id = Uuid::new_v4(); let node_id = Uuid::new_v4();
let entity_id = Uuid::new_v4(); let entity_id = Uuid::new_v4();
let _entity = app.world_mut().spawn(( let _entity = app
NetworkedEntity::with_id(entity_id, node_id), .world_mut()
NetworkedTransform::default(), .spawn((
Transform::from_xyz(0.0, 0.0, 0.0), NetworkedEntity::with_id(entity_id, node_id),
Synced, NetworkedTransform::default(),
)).id(); Transform::from_xyz(0.0, 0.0, 0.0),
Synced,
))
.id();
// Run one update to clear initial change detection // Run one update to clear initial change detection
println!("First update (clearing initial change detection)..."); println!("First update (clearing initial change detection)...");
@@ -62,5 +83,8 @@ fn test_transform_change_detection_basic() {
// Check if our test system detected the change // Check if our test system detected the change
let result = *was_changed.lock().unwrap(); let result = *was_changed.lock().unwrap();
println!("Was NetworkedEntity marked as changed? {}", result); println!("Was NetworkedEntity marked as changed? {}", result);
assert!(result, "NetworkedEntity should be marked as changed after Transform modification"); assert!(
result,
"NetworkedEntity should be marked as changed after Transform modification"
);
} }

View File

@@ -1,7 +1,8 @@
use proc_macro::TokenStream; use proc_macro::TokenStream;
use quote::quote; use quote::quote;
use syn::{ use syn::{
parse_macro_input, DeriveInput, DeriveInput,
parse_macro_input,
}; };
/// Sync strategy types /// Sync strategy types
@@ -16,11 +17,11 @@ enum SyncStrategy {
impl SyncStrategy { impl SyncStrategy {
fn from_str(s: &str) -> Result<Self, String> { fn from_str(s: &str) -> Result<Self, String> {
match s { match s {
"LastWriteWins" => Ok(SyncStrategy::LastWriteWins), | "LastWriteWins" => Ok(SyncStrategy::LastWriteWins),
"Set" => Ok(SyncStrategy::Set), | "Set" => Ok(SyncStrategy::Set),
"Sequence" => Ok(SyncStrategy::Sequence), | "Sequence" => Ok(SyncStrategy::Sequence),
"Custom" => Ok(SyncStrategy::Custom), | "Custom" => Ok(SyncStrategy::Custom),
_ => Err(format!( | _ => Err(format!(
"Unknown strategy '{}'. Choose one of: \"LastWriteWins\", \"Set\", \"Sequence\", \"Custom\"", "Unknown strategy '{}'. Choose one of: \"LastWriteWins\", \"Set\", \"Sequence\", \"Custom\"",
s s
)), )),
@@ -29,10 +30,12 @@ impl SyncStrategy {
fn to_tokens(&self) -> proc_macro2::TokenStream { fn to_tokens(&self) -> proc_macro2::TokenStream {
match self { match self {
SyncStrategy::LastWriteWins => quote! { lib::networking::SyncStrategy::LastWriteWins }, | SyncStrategy::LastWriteWins => {
SyncStrategy::Set => quote! { lib::networking::SyncStrategy::Set }, quote! { lib::networking::SyncStrategy::LastWriteWins }
SyncStrategy::Sequence => quote! { lib::networking::SyncStrategy::Sequence }, },
SyncStrategy::Custom => quote! { lib::networking::SyncStrategy::Custom }, | SyncStrategy::Set => quote! { lib::networking::SyncStrategy::Set },
| SyncStrategy::Sequence => quote! { lib::networking::SyncStrategy::Sequence },
| SyncStrategy::Custom => quote! { lib::networking::SyncStrategy::Custom },
} }
} }
} }
@@ -68,7 +71,7 @@ impl SyncAttributes {
let strategy_str = value.value(); let strategy_str = value.value();
strategy = Some( strategy = Some(
SyncStrategy::from_str(&strategy_str) SyncStrategy::from_str(&strategy_str)
.map_err(|e| syn::Error::new_spanned(&value, e))? .map_err(|e| syn::Error::new_spanned(&value, e))?,
); );
Ok(()) Ok(())
} else if meta.path.is_ident("persist") { } else if meta.path.is_ident("persist") {
@@ -92,7 +95,7 @@ impl SyncAttributes {
"Missing required attribute `version`\n\ "Missing required attribute `version`\n\
\n\ \n\
= help: Add #[sync(version = 1, strategy = \"...\")] to your struct\n\ = help: Add #[sync(version = 1, strategy = \"...\")] to your struct\n\
= note: See documentation: https://docs.rs/lonni/sync/strategies.html" = note: See documentation: https://docs.rs/lonni/sync/strategies.html",
) )
})?; })?;
@@ -103,7 +106,7 @@ impl SyncAttributes {
\n\ \n\
= help: Choose one of: \"LastWriteWins\", \"Set\", \"Sequence\", \"Custom\"\n\ = help: Choose one of: \"LastWriteWins\", \"Set\", \"Sequence\", \"Custom\"\n\
= help: Add #[sync(version = 1, strategy = \"LastWriteWins\")] to your struct\n\ = help: Add #[sync(version = 1, strategy = \"LastWriteWins\")] to your struct\n\
= note: See documentation: https://docs.rs/lonni/sync/strategies.html" = note: See documentation: https://docs.rs/lonni/sync/strategies.html",
) )
})?; })?;
@@ -141,8 +144,8 @@ pub fn derive_synced(input: TokenStream) -> TokenStream {
// Parse attributes // Parse attributes
let attrs = match SyncAttributes::parse(&input) { let attrs = match SyncAttributes::parse(&input) {
Ok(attrs) => attrs, | Ok(attrs) => attrs,
Err(e) => return TokenStream::from(e.to_compile_error()), | Err(e) => return TokenStream::from(e.to_compile_error()),
}; };
let name = &input.ident; let name = &input.ident;
@@ -200,15 +203,34 @@ fn generate_deserialize(_input: &DeriveInput, _name: &syn::Ident) -> proc_macro2
/// Generate merge logic based on strategy /// Generate merge logic based on strategy
fn generate_merge(input: &DeriveInput, strategy: &SyncStrategy) -> proc_macro2::TokenStream { fn generate_merge(input: &DeriveInput, strategy: &SyncStrategy) -> proc_macro2::TokenStream {
match strategy { match strategy {
SyncStrategy::LastWriteWins => generate_lww_merge(input), | SyncStrategy::LastWriteWins => generate_lww_merge(input),
SyncStrategy::Set => generate_set_merge(input), | SyncStrategy::Set => generate_set_merge(input),
SyncStrategy::Sequence => generate_sequence_merge(input), | SyncStrategy::Sequence => generate_sequence_merge(input),
SyncStrategy::Custom => generate_custom_merge(input), | SyncStrategy::Custom => generate_custom_merge(input),
}
}
/// Generate hash calculation code for tiebreaking in concurrent merges
///
/// Returns a TokenStream that computes hashes for both local and remote values
/// and compares them for deterministic conflict resolution.
fn generate_hash_tiebreaker() -> proc_macro2::TokenStream {
quote! {
let local_hash = {
let bytes = bincode::serialize(self).unwrap_or_default();
bytes.iter().fold(0u64, |acc, &b| acc.wrapping_mul(31).wrapping_add(b as u64))
};
let remote_hash = {
let bytes = bincode::serialize(&remote).unwrap_or_default();
bytes.iter().fold(0u64, |acc, &b| acc.wrapping_mul(31).wrapping_add(b as u64))
};
} }
} }
/// Generate Last-Write-Wins merge logic /// Generate Last-Write-Wins merge logic
fn generate_lww_merge(_input: &DeriveInput) -> proc_macro2::TokenStream { fn generate_lww_merge(_input: &DeriveInput) -> proc_macro2::TokenStream {
let hash_tiebreaker = generate_hash_tiebreaker();
quote! { quote! {
use tracing::info; use tracing::info;
@@ -228,14 +250,7 @@ fn generate_lww_merge(_input: &DeriveInput) -> proc_macro2::TokenStream {
lib::networking::ClockComparison::Concurrent => { lib::networking::ClockComparison::Concurrent => {
// Tiebreaker: Compare serialized representations for deterministic choice // Tiebreaker: Compare serialized representations for deterministic choice
// In a real implementation, we'd use node_id, but for now use a simple hash // In a real implementation, we'd use node_id, but for now use a simple hash
let local_hash = { #hash_tiebreaker
let bytes = bincode::serialize(self).unwrap_or_default();
bytes.iter().fold(0u64, |acc, &b| acc.wrapping_mul(31).wrapping_add(b as u64))
};
let remote_hash = {
let bytes = bincode::serialize(&remote).unwrap_or_default();
bytes.iter().fold(0u64, |acc, &b| acc.wrapping_mul(31).wrapping_add(b as u64))
};
if remote_hash > local_hash { if remote_hash > local_hash {
info!( info!(
@@ -256,8 +271,11 @@ fn generate_lww_merge(_input: &DeriveInput) -> proc_macro2::TokenStream {
/// Generate OR-Set merge logic /// Generate OR-Set merge logic
/// ///
/// For OR-Set strategy, the component must contain an OrSet<T> field. /// For OR-Set strategy, the component must contain an OrSet<T> field.
/// We merge by calling the OrSet's merge method which implements add-wins semantics. /// We merge by calling the OrSet's merge method which implements add-wins
/// semantics.
fn generate_set_merge(_input: &DeriveInput) -> proc_macro2::TokenStream { fn generate_set_merge(_input: &DeriveInput) -> proc_macro2::TokenStream {
let hash_tiebreaker = generate_hash_tiebreaker();
quote! { quote! {
use tracing::info; use tracing::info;
@@ -284,14 +302,7 @@ fn generate_set_merge(_input: &DeriveInput) -> proc_macro2::TokenStream {
lib::networking::ClockComparison::Concurrent => { lib::networking::ClockComparison::Concurrent => {
// In a full implementation, we would merge the OrSet here // In a full implementation, we would merge the OrSet here
// For now, use LWW with tiebreaker as fallback // For now, use LWW with tiebreaker as fallback
let local_hash = { #hash_tiebreaker
let bytes = bincode::serialize(self).unwrap_or_default();
bytes.iter().fold(0u64, |acc, &b| acc.wrapping_mul(31).wrapping_add(b as u64))
};
let remote_hash = {
let bytes = bincode::serialize(&remote).unwrap_or_default();
bytes.iter().fold(0u64, |acc, &b| acc.wrapping_mul(31).wrapping_add(b as u64))
};
if remote_hash > local_hash { if remote_hash > local_hash {
*self = remote; *self = remote;
@@ -309,6 +320,8 @@ fn generate_set_merge(_input: &DeriveInput) -> proc_macro2::TokenStream {
/// For Sequence strategy, the component must contain an Rga<T> field. /// For Sequence strategy, the component must contain an Rga<T> field.
/// We merge by calling the Rga's merge method which maintains causal ordering. /// We merge by calling the Rga's merge method which maintains causal ordering.
fn generate_sequence_merge(_input: &DeriveInput) -> proc_macro2::TokenStream { fn generate_sequence_merge(_input: &DeriveInput) -> proc_macro2::TokenStream {
let hash_tiebreaker = generate_hash_tiebreaker();
quote! { quote! {
use tracing::info; use tracing::info;
@@ -335,14 +348,7 @@ fn generate_sequence_merge(_input: &DeriveInput) -> proc_macro2::TokenStream {
lib::networking::ClockComparison::Concurrent => { lib::networking::ClockComparison::Concurrent => {
// In a full implementation, we would merge the Rga here // In a full implementation, we would merge the Rga here
// For now, use LWW with tiebreaker as fallback // For now, use LWW with tiebreaker as fallback
let local_hash = { #hash_tiebreaker
let bytes = bincode::serialize(self).unwrap_or_default();
bytes.iter().fold(0u64, |acc, &b| acc.wrapping_mul(31).wrapping_add(b as u64))
};
let remote_hash = {
let bytes = bincode::serialize(&remote).unwrap_or_default();
bytes.iter().fold(0u64, |acc, &b| acc.wrapping_mul(31).wrapping_add(b as u64))
};
if remote_hash > local_hash { if remote_hash > local_hash {
*self = remote; *self = remote;

View File

@@ -1,7 +1,11 @@
/// Basic tests for the Synced derive macro /// Basic tests for the Synced derive macro
use bevy::prelude::*; use bevy::prelude::*;
use lib::networking::{ use lib::networking::{
ClockComparison, ComponentMergeDecision, SyncComponent, SyncStrategy, Synced, ClockComparison,
ComponentMergeDecision,
SyncComponent,
SyncStrategy,
Synced,
}; };
use sync_macros::Synced as SyncedDerive; use sync_macros::Synced as SyncedDerive;
@@ -56,7 +60,7 @@ fn test_health_lww_merge_concurrent() {
// Either TookRemote or KeptLocal depending on hash // Either TookRemote or KeptLocal depending on hash
assert!( assert!(
decision == ComponentMergeDecision::TookRemote || decision == ComponentMergeDecision::TookRemote ||
decision == ComponentMergeDecision::KeptLocal decision == ComponentMergeDecision::KeptLocal
); );
} }