chore: honestly fixed so much and forgot to commit

Signed-off-by: Sienna Meridian Satterwhite <sienna@r3t.io>
This commit is contained in:
2025-12-28 17:39:27 +00:00
parent f9f289f5b2
commit d1d3aec8aa
47 changed files with 2248 additions and 438 deletions

View File

@@ -46,4 +46,7 @@ pub enum EngineCommand {
// Clock
TickClock,
// Lifecycle
Shutdown,
}

View File

@@ -44,13 +44,19 @@ impl EngineCore {
// Process commands as they arrive
while let Some(cmd) = self.handle.command_rx.recv().await {
self.handle_command(cmd).await;
let should_continue = self.handle_command(cmd).await;
if !should_continue {
tracing::info!("EngineCore received shutdown command");
break;
}
}
tracing::info!("EngineCore shutting down (command channel closed)");
tracing::info!("EngineCore shutting down");
}
async fn handle_command(&mut self, cmd: EngineCommand) {
/// Handle a command from Bevy
/// Returns true to continue running, false to shutdown
async fn handle_command(&mut self, cmd: EngineCommand) -> bool {
match cmd {
EngineCommand::StartNetworking { session_id } => {
self.start_networking(session_id).await;
@@ -74,11 +80,16 @@ impl EngineCore {
EngineCommand::TickClock => {
self.tick_clock();
}
EngineCommand::Shutdown => {
tracing::info!("Shutdown command received");
return false;
}
// TODO: Handle CRDT and lock commands in Phase 2
_ => {
tracing::debug!("Unhandled command: {:?}", cmd);
}
}
true
}
fn tick_clock(&mut self) {
@@ -98,6 +109,25 @@ impl EngineCore {
tracing::info!("Starting networking initialization for session {}", session_id.to_code());
// Test mode: Skip actual networking and send event immediately
#[cfg(feature = "fast_tests")]
{
let bridge = crate::networking::GossipBridge::new(self.node_id);
let _ = self.handle.event_tx.send(EngineEvent::NetworkingStarted {
session_id: session_id.clone(),
node_id: self.node_id,
bridge,
});
tracing::info!("Networking started (test mode) for session {}", session_id.to_code());
// Create a dummy task that just waits
let task = tokio::spawn(async {
tokio::time::sleep(tokio::time::Duration::from_secs(3600)).await;
});
self.networking_task = Some(task);
return;
}
// Create cancellation token for graceful shutdown
let cancel_token = CancellationToken::new();
let cancel_token_clone = cancel_token.clone();
@@ -105,10 +135,10 @@ impl EngineCore {
// Spawn NetworkingManager initialization in background to avoid blocking
// DHT peer discovery can take 15+ seconds with retries
let event_tx = self.handle.event_tx.clone();
// Create channel for progress updates
let (progress_tx, mut progress_rx) = tokio::sync::mpsc::unbounded_channel();
// Spawn task to forward progress updates to Bevy
let event_tx_clone = event_tx.clone();
let session_id_clone = session_id.clone();
@@ -120,7 +150,7 @@ impl EngineCore {
});
}
});
let task = tokio::spawn(async move {
match NetworkingManager::new(session_id.clone(), Some(progress_tx), cancel_token_clone.clone()).await {
Ok((net_manager, bridge)) => {

View File

@@ -4,9 +4,29 @@ use crate::networking::{NodeId, SessionId, VectorClock};
use bevy::prelude::*;
use uuid::Uuid;
#[derive(Debug, Clone)]
pub enum NetworkingInitStatus {
CreatingEndpoint,
EndpointReady,
DiscoveringPeers {
session_code: String,
attempt: u8,
},
PeersFound {
count: usize,
},
NoPeersFound,
PublishingToDHT,
InitializingGossip,
}
#[derive(Debug, Clone)]
pub enum EngineEvent {
// Networking status
NetworkingInitializing {
session_id: SessionId,
status: NetworkingInitStatus,
},
NetworkingStarted {
session_id: SessionId,
node_id: NodeId,

View File

@@ -14,12 +14,13 @@ mod core;
mod events;
mod game_actions;
mod networking;
mod peer_discovery;
mod persistence;
pub use bridge::{EngineBridge, EngineHandle};
pub use commands::EngineCommand;
pub use core::EngineCore;
pub use events::EngineEvent;
pub use events::{EngineEvent, NetworkingInitStatus};
pub use game_actions::GameAction;
pub use networking::NetworkingManager;
pub use persistence::PersistenceManager;

View File

@@ -249,9 +249,31 @@ impl NetworkingManager {
}
Event::NeighborUp(peer) => {
tracing::info!("Peer connected: {}", peer);
// Convert PublicKey to NodeId for Bevy
let peer_bytes = peer.as_bytes();
let mut node_id_bytes = [0u8; 16];
node_id_bytes.copy_from_slice(&peer_bytes[..16]);
let peer_node_id = NodeId::from_bytes(node_id_bytes);
// Notify Bevy of peer join
let _ = event_tx.send(EngineEvent::PeerJoined {
node_id: peer_node_id,
});
}
Event::NeighborDown(peer) => {
tracing::warn!("Peer disconnected: {}", peer);
// Convert PublicKey to NodeId for Bevy
let peer_bytes = peer.as_bytes();
let mut node_id_bytes = [0u8; 16];
node_id_bytes.copy_from_slice(&peer_bytes[..16]);
let peer_node_id = NodeId::from_bytes(node_id_bytes);
// Notify Bevy of peer leave
let _ = event_tx.send(EngineEvent::PeerLeft {
node_id: peer_node_id,
});
}
Event::Lagged => {
tracing::warn!("Event stream lagged");

View File

@@ -0,0 +1,151 @@
//! DHT-based peer discovery for session collaboration
//!
//! Each peer publishes their EndpointId to the DHT using a session-derived pkarr key.
//! Other peers query the DHT to discover all peers in the session.
use anyhow::Result;
use iroh::EndpointId;
use std::time::Duration;
use crate::networking::SessionId;
pub async fn publish_peer_to_dht(
session_id: &SessionId,
our_endpoint_id: EndpointId,
dht_client: &pkarr::Client,
) -> Result<()> {
use pkarr::dns::{self, rdata};
use pkarr::dns::rdata::RData;
let keypair = session_id.to_pkarr_keypair();
let public_key = keypair.public_key();
// Query DHT for existing peers in this session
let existing_peers = match dht_client.resolve(&public_key).await {
Some(packet) => {
let mut peers = Vec::new();
for rr in packet.all_resource_records() {
if let RData::TXT(txt) = &rr.rdata {
if let Ok(txt_str) = String::try_from(txt.clone()) {
if let Some(hex) = txt_str.strip_prefix("peer=") {
if let Ok(bytes) = hex::decode(hex) {
if bytes.len() == 32 {
if let Ok(endpoint_id) = EndpointId::from_bytes(&bytes.try_into().unwrap()) {
// Don't include ourselves if we're already in the list
if endpoint_id != our_endpoint_id {
peers.push(endpoint_id);
}
}
}
}
}
}
}
}
peers
}
None => Vec::new(),
};
// Build packet with all peers (existing + ourselves)
let name = dns::Name::new("_peers").expect("constant");
let mut builder = pkarr::SignedPacket::builder();
// Add TXT record for each existing peer
for peer in existing_peers {
let peer_hex = hex::encode(peer.as_bytes());
let peer_str = format!("peer={}", peer_hex);
let mut txt = rdata::TXT::new();
txt.add_string(&peer_str)?;
builder = builder.txt(name.clone(), txt.into_owned(), 3600);
}
// Add TXT record for ourselves
let our_hex = hex::encode(our_endpoint_id.as_bytes());
let our_str = format!("peer={}", our_hex);
let mut our_txt = rdata::TXT::new();
our_txt.add_string(&our_str)?;
builder = builder.txt(name, our_txt.into_owned(), 3600);
// Build and sign the packet
let signed_packet = builder.build(&keypair)?;
// Publish to DHT
dht_client.publish(&signed_packet, None).await?;
tracing::info!(
"Published peer {} to DHT for session {}",
our_endpoint_id.fmt_short(),
session_id.to_code()
);
Ok(())
}
pub async fn discover_peers_from_dht(
session_id: &SessionId,
dht_client: &pkarr::Client,
) -> Result<Vec<EndpointId>> {
use pkarr::dns::rdata::RData;
let keypair = session_id.to_pkarr_keypair();
let public_key = keypair.public_key();
// Query DHT for the session's public key
let signed_packet = match dht_client.resolve(&public_key).await {
Some(packet) => packet,
None => {
tracing::debug!("No peers found in DHT for session {}", session_id.to_code());
return Ok(vec![]);
}
};
// Parse TXT records to extract peer endpoint IDs
let mut peers = Vec::new();
for rr in signed_packet.all_resource_records() {
if let RData::TXT(txt) = &rr.rdata {
// Try to parse as a String
if let Ok(txt_str) = String::try_from(txt.clone()) {
// Parse "peer=<hex_endpoint_id>"
if let Some(hex) = txt_str.strip_prefix("peer=") {
if let Ok(bytes) = hex::decode(hex) {
if bytes.len() == 32 {
if let Ok(endpoint_id) = EndpointId::from_bytes(&bytes.try_into().unwrap()) {
peers.push(endpoint_id);
}
}
}
}
}
}
}
tracing::info!(
"Discovered {} peers from DHT for session {}",
peers.len(),
session_id.to_code()
);
Ok(peers)
}
/// Periodically republishes our presence to the DHT
///
/// Should be called in a background task to maintain our DHT presence.
/// Republishes every 30 minutes (well before the 1-hour TTL expires).
pub async fn maintain_dht_presence(
session_id: SessionId,
our_endpoint_id: EndpointId,
dht_client: pkarr::Client,
) {
let mut interval = tokio::time::interval(Duration::from_secs(30 * 60)); // 30 minutes
loop {
interval.tick().await;
if let Err(e) = publish_peer_to_dht(&session_id, our_endpoint_id, &dht_client).await {
tracing::warn!("Failed to republish to DHT: {}", e);
}
}
}

View File

@@ -29,6 +29,7 @@ pub mod networking;
pub mod persistence;
pub mod platform;
pub mod render; // Vendored Bevy rendering (bevy_render + bevy_core_pipeline + bevy_pbr)
pub mod transform; // Vendored Transform with rkyv support
pub mod utils;
pub mod sync;

View File

@@ -156,49 +156,36 @@ impl Default for NetworkedEntity {
#[reflect(Component)]
pub struct NetworkedTransform;
/// Wrapper for a selection component using OR-Set semantics
/// Local selection tracking resource
///
/// Tracks a set of selected entity network IDs. Uses OR-Set (Observed-Remove)
/// CRDT to handle concurrent add/remove operations correctly.
/// This global resource tracks which entities are currently selected by THIS node.
/// It's used in conjunction with the entity lock system to coordinate concurrent editing.
///
/// # OR-Set Semantics
///
/// - Concurrent adds and removes: add wins
/// - Each add has a unique operation ID
/// - Removes reference specific add operation IDs
/// **Selections are local-only UI state** and are NOT synchronized across the network.
/// Each node maintains its own independent selection.
///
/// # Example
///
/// ```
/// use bevy::prelude::*;
/// use libmarathon::networking::{
/// NetworkedEntity,
/// NetworkedSelection,
/// };
/// use libmarathon::networking::LocalSelection;
/// use uuid::Uuid;
///
/// fn create_selection(mut commands: Commands) {
/// let node_id = Uuid::new_v4();
/// let mut selection = NetworkedSelection::new();
/// fn handle_click(mut selection: ResMut<LocalSelection>) {
/// // Clear previous selection
/// selection.clear();
///
/// // Add some entities to the selection
/// selection.selected_ids.insert(Uuid::new_v4());
/// selection.selected_ids.insert(Uuid::new_v4());
///
/// commands.spawn((NetworkedEntity::new(node_id), selection));
/// // Select a new entity
/// selection.insert(Uuid::new_v4());
/// }
/// ```
#[derive(Component, Reflect, Debug, Clone, Default)]
#[reflect(Component)]
pub struct NetworkedSelection {
#[derive(Resource, Debug, Clone, Default)]
pub struct LocalSelection {
/// Set of selected entity network IDs
///
/// This will be synchronized using OR-Set CRDT semantics in later phases.
/// For now, it's a simple HashSet.
pub selected_ids: std::collections::HashSet<uuid::Uuid>,
selected_ids: std::collections::HashSet<uuid::Uuid>,
}
impl NetworkedSelection {
impl LocalSelection {
/// Create a new empty selection
pub fn new() -> Self {
Self {
@@ -207,13 +194,13 @@ impl NetworkedSelection {
}
/// Add an entity to the selection
pub fn add(&mut self, entity_id: uuid::Uuid) {
self.selected_ids.insert(entity_id);
pub fn insert(&mut self, entity_id: uuid::Uuid) -> bool {
self.selected_ids.insert(entity_id)
}
/// Remove an entity from the selection
pub fn remove(&mut self, entity_id: uuid::Uuid) {
self.selected_ids.remove(&entity_id);
pub fn remove(&mut self, entity_id: uuid::Uuid) -> bool {
self.selected_ids.remove(&entity_id)
}
/// Check if an entity is selected
@@ -235,6 +222,11 @@ impl NetworkedSelection {
pub fn is_empty(&self) -> bool {
self.selected_ids.is_empty()
}
/// Get an iterator over selected entity IDs
pub fn iter(&self) -> impl Iterator<Item = &uuid::Uuid> {
self.selected_ids.iter()
}
}
/// Wrapper for a drawing path component using Sequence CRDT semantics
@@ -361,18 +353,18 @@ mod tests {
}
#[test]
fn test_networked_selection() {
let mut selection = NetworkedSelection::new();
fn test_local_selection() {
let mut selection = LocalSelection::new();
let id1 = uuid::Uuid::new_v4();
let id2 = uuid::Uuid::new_v4();
assert!(selection.is_empty());
selection.add(id1);
selection.insert(id1);
assert_eq!(selection.len(), 1);
assert!(selection.contains(id1));
selection.add(id2);
selection.insert(id2);
assert_eq!(selection.len(), 2);
assert!(selection.contains(id2));

View File

@@ -66,10 +66,8 @@ impl NodeVectorClock {
/// App::new().add_systems(Update, generate_delta_system);
/// ```
pub fn generate_delta_system(world: &mut World) {
// Check if bridge exists
if world.get_resource::<GossipBridge>().is_none() {
return;
}
// Works both online and offline - clock increments and operations are recorded
// Broadcast only happens when online
let changed_entities: Vec<(Entity, uuid::Uuid, uuid::Uuid)> = {
let mut query =
@@ -93,7 +91,7 @@ pub fn generate_delta_system(world: &mut World) {
for (entity, network_id, _owner_node_id) in changed_entities {
// Phase 1: Check and update clocks, collect data
let mut system_state: bevy::ecs::system::SystemState<(
Res<GossipBridge>,
Option<Res<GossipBridge>>,
Res<crate::persistence::ComponentTypeRegistryResource>,
ResMut<NodeVectorClock>,
ResMut<LastSyncVersions>,
@@ -144,31 +142,41 @@ pub fn generate_delta_system(world: &mut World) {
// Create EntityDelta
let delta = EntityDelta::new(network_id, node_id, vector_clock.clone(), operations);
// Record in operation log for anti-entropy
// Record in operation log for anti-entropy (works offline!)
if let Some(ref mut log) = operation_log {
log.record_operation(delta.clone());
}
// Wrap in VersionedMessage
let message = VersionedMessage::new(SyncMessage::EntityDelta {
entity_id: delta.entity_id,
node_id: delta.node_id,
vector_clock: delta.vector_clock.clone(),
operations: delta.operations.clone(),
});
// Broadcast if online
if let Some(ref bridge) = bridge {
// Wrap in VersionedMessage
let message = VersionedMessage::new(SyncMessage::EntityDelta {
entity_id: delta.entity_id,
node_id: delta.node_id,
vector_clock: delta.vector_clock.clone(),
operations: delta.operations.clone(),
});
// Broadcast
if let Err(e) = bridge.send(message) {
error!("Failed to broadcast EntityDelta: {}", e);
// Broadcast to peers
if let Err(e) = bridge.send(message) {
error!("Failed to broadcast EntityDelta: {}", e);
} else {
debug!(
"Broadcast EntityDelta for entity {:?} with {} operations",
network_id,
delta.operations.len()
);
}
} else {
debug!(
"Broadcast EntityDelta for entity {:?} with {} operations",
network_id,
delta.operations.len()
"Generated EntityDelta for entity {:?} offline (will sync when online)",
network_id
);
last_versions.update(network_id, current_seq);
}
// Update last sync version (both online and offline)
last_versions.update(network_id, current_seq);
delta
};

View File

@@ -47,7 +47,6 @@ use uuid::Uuid;
use crate::networking::{
GossipBridge,
NetworkedSelection,
NodeId,
VersionedMessage,
delta_generation::NodeVectorClock,
@@ -334,10 +333,63 @@ impl EntityLockRegistry {
}
}
/// System to acquire locks when entities are selected
///
/// This system detects when entities are added to the global `LocalSelection`
/// resource and attempts to acquire locks on those entities, broadcasting
/// the request to other peers.
pub fn acquire_locks_on_selection_system(
mut registry: ResMut<EntityLockRegistry>,
node_clock: Res<NodeVectorClock>,
bridge: Option<Res<GossipBridge>>,
selection: Res<crate::networking::LocalSelection>,
) {
// Only run when selection changes
if !selection.is_changed() {
return;
}
let node_id = node_clock.node_id;
// Try to acquire locks for all selected entities
for &entity_id in selection.iter() {
let already_locked = registry.is_locked_by(entity_id, node_id, node_id);
// Only try to acquire if we don't already hold the lock
if !already_locked {
match registry.try_acquire(entity_id, node_id) {
Ok(()) => {
info!("Acquired lock on newly selected entity {}", entity_id);
// Broadcast LockRequest
if let Some(ref bridge) = bridge {
let msg = VersionedMessage::new(SyncMessage::Lock(LockMessage::LockRequest {
entity_id,
node_id,
}));
if let Err(e) = bridge.send(msg) {
error!("Failed to broadcast LockRequest on selection: {}", e);
} else {
debug!("LockRequest broadcast successful for entity {}", entity_id);
}
} else {
warn!("No GossipBridge available to broadcast LockRequest");
}
}
Err(holder) => {
warn!("Failed to acquire lock on selected entity {} (held by {})", entity_id, holder);
}
}
}
}
}
/// System to release locks when entities are deselected
///
/// This system detects when entities are removed from selection and releases
/// any locks held on those entities, broadcasting the release to other peers.
/// This system detects when entities are removed from the global `LocalSelection`
/// resource and releases any locks held on those entities, broadcasting the release
/// to other peers.
///
/// Add to your app as an Update system:
/// ```no_run
@@ -350,42 +402,46 @@ pub fn release_locks_on_deselection_system(
mut registry: ResMut<EntityLockRegistry>,
node_clock: Res<NodeVectorClock>,
bridge: Option<Res<GossipBridge>>,
mut selection_query: Query<&mut NetworkedSelection, Changed<NetworkedSelection>>,
selection: Res<crate::networking::LocalSelection>,
) {
// Only run when selection changes
if !selection.is_changed() {
return;
}
let node_id = node_clock.node_id;
for selection in selection_query.iter_mut() {
// Find entities that were previously locked but are no longer selected
let currently_selected: std::collections::HashSet<Uuid> = selection.selected_ids.clone();
// Check all locks held by this node
let locks_to_release: Vec<Uuid> = registry
.locks
.iter()
.filter(|(entity_id, lock)| {
// Release if held by us and not currently selected
lock.holder == node_id && !selection.contains(**entity_id)
})
.map(|(entity_id, _)| *entity_id)
.collect();
// Check all locks held by this node
let locks_to_release: Vec<Uuid> = registry
.locks
.iter()
.filter(|(entity_id, lock)| {
// Release if held by us and not currently selected
lock.holder == node_id && !currently_selected.contains(entity_id)
})
.map(|(entity_id, _)| *entity_id)
.collect();
if !locks_to_release.is_empty() {
info!("Selection cleared, releasing {} locks", locks_to_release.len());
}
// Release each lock and broadcast
for entity_id in locks_to_release {
if registry.release(entity_id, node_id) {
debug!("Releasing lock on deselected entity {}", entity_id);
// Release each lock and broadcast
for entity_id in locks_to_release {
if registry.release(entity_id, node_id) {
info!("Released lock on deselected entity {}", entity_id);
// Broadcast LockRelease
if let Some(ref bridge) = bridge {
let msg = VersionedMessage::new(SyncMessage::Lock(LockMessage::LockRelease {
entity_id,
node_id,
}));
// Broadcast LockRelease
if let Some(ref bridge) = bridge {
let msg = VersionedMessage::new(SyncMessage::Lock(LockMessage::LockRelease {
entity_id,
node_id,
}));
if let Err(e) = bridge.send(msg) {
error!("Failed to broadcast LockRelease on deselection: {}", e);
} else {
info!("Lock released on deselection: entity {}", entity_id);
}
if let Err(e) = bridge.send(msg) {
error!("Failed to broadcast LockRelease on deselection: {}", e);
} else {
info!("Lock released on deselection: entity {}", entity_id);
}
}
}

View File

@@ -121,8 +121,8 @@ pub fn should_apply_set(local_op: &ComponentOp, remote_op: &ComponentOp) -> bool
// Use the sequence number from the clocks as a simple tiebreaker
// In a real implementation, we'd use the full node IDs
let local_seq: u64 = local_clock.clocks.values().sum();
let remote_seq: u64 = remote_clock.clocks.values().sum();
let local_seq: u64 = local_clock.timestamps.values().sum();
let remote_seq: u64 = remote_clock.timestamps.values().sum();
// Compare clocks
match compare_operations_lww(

View File

@@ -449,7 +449,6 @@ fn build_full_state_from_data(
// Skip networked wrapper components
if type_path.ends_with("::NetworkedEntity") ||
type_path.ends_with("::NetworkedTransform") ||
type_path.ends_with("::NetworkedSelection") ||
type_path.ends_with("::NetworkedDrawingPath")
{
continue;

View File

@@ -26,6 +26,13 @@ pub struct VersionedMessage {
/// The actual sync message
pub message: SyncMessage,
/// Timestamp (nanos since UNIX epoch) to make messages unique
///
/// This prevents iroh-gossip from deduplicating identical messages sent at different times.
/// For example, releasing and re-acquiring a lock sends identical LockRequest messages,
/// but they need to be treated as separate events.
pub timestamp_nanos: u64,
}
impl VersionedMessage {
@@ -34,9 +41,17 @@ impl VersionedMessage {
/// Create a new versioned message with the current protocol version
pub fn new(message: SyncMessage) -> Self {
use std::time::{SystemTime, UNIX_EPOCH};
let timestamp_nanos = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_nanos() as u64;
Self {
version: Self::CURRENT_VERSION,
message,
timestamp_nanos,
}
}
}

View File

@@ -120,11 +120,13 @@ pub fn spawn_networked_entity(
) -> bevy::prelude::Entity {
use bevy::prelude::*;
// Spawn with both NetworkedEntity and Persisted components
// Spawn with NetworkedEntity, Persisted, and Synced components
// The Synced marker triggers auto-insert of NetworkedTransform if entity has Transform
let entity = world
.spawn((
NetworkedEntity::with_id(entity_id, node_id),
crate::persistence::Persisted::with_id(entity_id),
Synced,
))
.id();

View File

@@ -34,6 +34,7 @@ use crate::networking::{
LastSyncVersions,
auto_detect_transform_changes_system,
},
components::{NetworkedEntity, NetworkedTransform},
delta_generation::{
NodeVectorClock,
generate_delta_system,
@@ -43,8 +44,10 @@ use crate::networking::{
cleanup_despawned_entities_system,
register_networked_entities_system,
},
gossip_bridge::GossipBridge,
locks::{
EntityLockRegistry,
acquire_locks_on_selection_system,
broadcast_lock_heartbeats_system,
cleanup_expired_locks_system,
release_locks_on_deselection_system,
@@ -59,6 +62,7 @@ use crate::networking::{
initialize_session_system,
save_session_on_shutdown_system,
},
sync_component::Synced,
tombstones::{
TombstoneRegistry,
garbage_collect_tombstones_system,
@@ -142,6 +146,104 @@ impl SessionSecret {
}
}
/// System that auto-inserts required sync components when `Synced` marker is detected.
///
/// This system runs in PreUpdate and automatically adds:
/// - `NetworkedEntity` with a new UUID and node ID
/// - `Persisted` with the same UUID
/// - `NetworkedTransform` if the entity has a `Transform` component
///
/// Note: Selection is now a global `LocalSelection` resource, not a per-entity component.
///
/// This eliminates the need for users to manually add these components when spawning synced entities.
fn auto_insert_sync_components(
mut commands: Commands,
query: Query<Entity, (Added<Synced>, Without<NetworkedEntity>)>,
node_clock: Res<NodeVectorClock>,
// We need access to check if entity has Transform
transforms: Query<&Transform>,
) {
for entity in &query {
let entity_id = uuid::Uuid::new_v4();
let node_id = node_clock.node_id;
// Always add NetworkedEntity and Persisted
let mut entity_commands = commands.entity(entity);
entity_commands.insert((
NetworkedEntity::with_id(entity_id, node_id),
crate::persistence::Persisted::with_id(entity_id),
));
// Auto-add NetworkedTransform if entity has Transform
if transforms.contains(entity) {
entity_commands.insert(NetworkedTransform);
}
debug!("Auto-inserted sync components for entity {:?} (UUID: {})", entity, entity_id);
}
}
/// System that adds NetworkedTransform to networked entities when Transform is added.
///
/// This handles entities received from the network that already have NetworkedEntity,
/// Persisted, and Synced, but need NetworkedTransform when Transform is added.
fn auto_insert_networked_transform(
mut commands: Commands,
query: Query<
Entity,
(
With<NetworkedEntity>,
With<Synced>,
Added<Transform>,
Without<NetworkedTransform>,
),
>,
) {
for entity in &query {
commands.entity(entity).insert(NetworkedTransform);
debug!("Auto-inserted NetworkedTransform for networked entity {:?}", entity);
}
}
/// System that triggers anti-entropy sync when going online (GossipBridge added).
///
/// This handles the offline-to-online transition: when GossipBridge is inserted,
/// we immediately send a SyncRequest to trigger anti-entropy and broadcast all
/// operations from the operation log.
///
/// Uses a Local resource to track if we've already sent the sync request, so this only runs once.
fn trigger_sync_on_connect(
mut has_synced: Local<bool>,
bridge: Res<GossipBridge>,
node_clock: Res<NodeVectorClock>,
operation_log: Res<OperationLog>,
) {
if *has_synced {
return; // Already did this
}
let op_count = operation_log.total_operations();
debug!(
"Going online: triggering anti-entropy sync to broadcast {} offline operations",
op_count
);
// Send a SyncRequest to trigger anti-entropy
// This will cause the message_dispatcher to respond with all operations from our log
let request = crate::networking::operation_log::build_sync_request(
node_clock.node_id,
node_clock.clock.clone(),
);
if let Err(e) = bridge.send(request) {
error!("Failed to send SyncRequest on connect: {}", e);
} else {
debug!("Sent SyncRequest to trigger anti-entropy sync");
}
*has_synced = true;
}
/// Bevy plugin for CRDT networking
///
/// This plugin sets up all systems and resources needed for distributed
@@ -236,7 +338,8 @@ impl Plugin for NetworkingPlugin {
.insert_resource(OperationLog::new())
.insert_resource(TombstoneRegistry::new())
.insert_resource(EntityLockRegistry::new())
.insert_resource(crate::networking::ComponentVectorClocks::new());
.insert_resource(crate::networking::ComponentVectorClocks::new())
.insert_resource(crate::networking::LocalSelection::new());
// Startup systems - initialize session from persistence
app.add_systems(Startup, initialize_session_system);
@@ -245,12 +348,16 @@ impl Plugin for NetworkingPlugin {
app.add_systems(
PreUpdate,
(
// Auto-insert sync components when Synced marker is added (must run first)
auto_insert_sync_components,
// Register new networked entities
register_networked_entities_system,
// Central message dispatcher - handles all incoming messages
// This replaces the individual message handling systems and
// eliminates O(n²) behavior from multiple systems polling the same queue
message_dispatcher_system,
// Auto-insert NetworkedTransform for networked entities when Transform is added
auto_insert_networked_transform,
)
.chain(),
);
@@ -263,11 +370,20 @@ impl Plugin for NetworkingPlugin {
auto_detect_transform_changes_system,
// Handle local entity deletions
handle_local_deletions_system,
// Acquire locks when entities are selected
acquire_locks_on_selection_system,
// Release locks when entities are deselected
release_locks_on_deselection_system,
),
);
// Trigger anti-entropy sync when going online (separate from chain to allow conditional execution)
app.add_systems(
PostUpdate,
trigger_sync_on_connect
.run_if(bevy::ecs::schedule::common_conditions::resource_exists::<GossipBridge>),
);
// PostUpdate systems - generate and send deltas
app.add_systems(
PostUpdate,

View File

@@ -112,6 +112,24 @@ impl SessionId {
*hash.as_bytes()
}
/// Derive deterministic pkarr keypair for DHT-based peer discovery
///
/// All peers in the same session derive the same keypair from the session code.
/// This shared keypair is used to publish and discover peer EndpointIds in the DHT.
///
/// # Security
/// The session code is the secret - anyone with the code can discover peers.
/// The domain separation prefix ensures no collision with other uses.
pub fn to_pkarr_keypair(&self) -> pkarr::Keypair {
let mut hasher = blake3::Hasher::new();
hasher.update(b"/app/v1/session-pkarr-key/");
hasher.update(self.uuid.as_bytes());
let hash = hasher.finalize();
let secret_bytes: [u8; 32] = *hash.as_bytes();
pkarr::Keypair::from_secret_key(&secret_bytes)
}
/// Get raw UUID
pub fn as_uuid(&self) -> &Uuid {
&self.uuid

View File

@@ -97,33 +97,29 @@ pub trait SyncComponent: Component + Reflect + Sized {
fn merge(&mut self, remote: Self, clock_cmp: ClockComparison) -> ComponentMergeDecision;
}
/// Marker component for entities that should be synced
/// Marker component indicating that an entity should be synchronized across the network.
///
/// Add this to any entity with synced components to enable automatic
/// change detection and synchronization.
/// When this component is added to an entity, the `auto_insert_sync_components` system
/// will automatically add the required infrastructure components:
/// - `NetworkedEntity` - for network synchronization
/// - `Persisted` - for persistence
/// - `NetworkedTransform` - if the entity has a `Transform` component
///
/// # Example
/// ```
/// use bevy::prelude::*;
/// use libmarathon::networking::Synced;
/// use sync_macros::Synced as SyncedDerive;
///
/// #[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize, SyncedDerive)]
/// #[sync(version = 1, strategy = "LastWriteWins")]
/// struct Health(f32);
///
/// #[derive(Component, Reflect, Clone, serde::Serialize, serde::Deserialize, SyncedDerive)]
/// #[sync(version = 1, strategy = "LastWriteWins")]
/// struct Position {
/// x: f32,
/// y: f32,
/// ```no_compile
/// // Define a synced component with the #[synced] attribute
/// #[macros::synced]
/// pub struct CubeMarker {
/// pub color_r: f32,
/// pub size: f32,
/// }
///
/// let mut world = World::new();
/// world.spawn((
/// Health(100.0),
/// Position { x: 0.0, y: 0.0 },
/// Synced, // Marker enables sync
/// // Spawn with just the Synced marker - infrastructure auto-added
/// commands.spawn((
/// CubeMarker::with_color(Color::RED, 1.0),
/// Transform::from_translation(pos),
/// Synced, // Auto-adds NetworkedEntity, Persisted, NetworkedTransform
/// ));
/// ```
#[derive(Component, Reflect, Default, Clone, Copy)]

View File

@@ -220,10 +220,6 @@ pub fn handle_local_deletions_system(
mut operation_log: Option<ResMut<crate::networking::OperationLog>>,
bridge: Option<Res<GossipBridge>>,
) {
let Some(bridge) = bridge else {
return;
};
for (entity, networked) in query.iter() {
// Increment clock for deletion
node_clock.tick();
@@ -250,25 +246,32 @@ pub fn handle_local_deletions_system(
vec![delete_op],
);
// Record in operation log
// Record in operation log (for when we go online later)
if let Some(ref mut log) = operation_log {
log.record_operation(delta.clone());
}
// Broadcast deletion
let message =
crate::networking::VersionedMessage::new(crate::networking::SyncMessage::EntityDelta {
entity_id: delta.entity_id,
node_id: delta.node_id,
vector_clock: delta.vector_clock.clone(),
operations: delta.operations.clone(),
});
// Broadcast deletion if online
if let Some(ref bridge) = bridge {
let message =
crate::networking::VersionedMessage::new(crate::networking::SyncMessage::EntityDelta {
entity_id: delta.entity_id,
node_id: delta.node_id,
vector_clock: delta.vector_clock.clone(),
operations: delta.operations.clone(),
});
if let Err(e) = bridge.send(message) {
error!("Failed to broadcast Delete operation: {}", e);
if let Err(e) = bridge.send(message) {
error!("Failed to broadcast Delete operation: {}", e);
} else {
info!(
"Broadcast Delete operation for entity {:?}",
networked.network_id
);
}
} else {
info!(
"Broadcast Delete operation for entity {:?}",
"Deleted entity {:?} locally (offline mode - will sync when online)",
networked.network_id
);
}

View File

@@ -54,17 +54,22 @@ pub type NodeId = uuid::Uuid;
#[derive(Debug, Clone, PartialEq, Eq, rkyv::Archive, rkyv::Serialize, rkyv::Deserialize, Default)]
pub struct VectorClock {
/// Map from node ID to logical timestamp
pub clocks: HashMap<NodeId, u64>,
pub timestamps: HashMap<NodeId, u64>,
}
impl VectorClock {
/// Create a new empty vector clock
pub fn new() -> Self {
Self {
clocks: HashMap::new(),
timestamps: HashMap::new(),
}
}
/// Get the number of nodes tracked in this clock
pub fn node_count(&self) -> usize {
self.timestamps.len()
}
/// Increment the clock for a given node
///
/// This should be called by a node before performing a local operation.
@@ -86,7 +91,7 @@ impl VectorClock {
/// assert_eq!(clock.get(node), 2);
/// ```
pub fn increment(&mut self, node_id: NodeId) -> u64 {
let counter = self.clocks.entry(node_id).or_insert(0);
let counter = self.timestamps.entry(node_id).or_insert(0);
*counter += 1;
*counter
}
@@ -95,7 +100,7 @@ impl VectorClock {
///
/// Returns 0 if the node has never been seen in this vector clock.
pub fn get(&self, node_id: NodeId) -> u64 {
self.clocks.get(&node_id).copied().unwrap_or(0)
self.timestamps.get(&node_id).copied().unwrap_or(0)
}
/// Merge another vector clock into this one
@@ -124,8 +129,8 @@ impl VectorClock {
/// assert_eq!(clock1.get(node2), 1);
/// ```
pub fn merge(&mut self, other: &VectorClock) {
for (node_id, &counter) in &other.clocks {
let current = self.clocks.entry(*node_id).or_insert(0);
for (node_id, &counter) in &other.timestamps {
let current = self.timestamps.entry(*node_id).or_insert(0);
*current = (*current).max(counter);
}
}
@@ -158,7 +163,7 @@ impl VectorClock {
let mut any_strictly_less = false;
// Check our nodes in a single pass
for (node_id, &our_counter) in &self.clocks {
for (node_id, &our_counter) in &self.timestamps {
let their_counter = other.get(*node_id);
// Early exit if we have a counter greater than theirs
@@ -175,8 +180,8 @@ impl VectorClock {
// If we haven't found a strictly less counter yet, check if they have
// nodes we don't know about with non-zero values (those count as strictly less)
if !any_strictly_less {
any_strictly_less = other.clocks.iter().any(|(node_id, &their_counter)| {
!self.clocks.contains_key(node_id) && their_counter > 0
any_strictly_less = other.timestamps.iter().any(|(node_id, &their_counter)| {
!self.timestamps.contains_key(node_id) && their_counter > 0
});
}
@@ -250,7 +255,7 @@ mod tests {
#[test]
fn test_new_clock() {
let clock = VectorClock::new();
assert_eq!(clock.clocks.len(), 0);
assert_eq!(clock.timestamps.len(), 0);
}
#[test]

View File

@@ -573,7 +573,7 @@ pub fn save_session_vector_clock(
)?;
// Insert current clock state
for (node_id, &counter) in &clock.clocks {
for (node_id, &counter) in &clock.timestamps {
tx.execute(
"INSERT INTO vector_clock (session_id, node_id, counter, updated_at)
VALUES (?1, ?2, ?3, ?4)",
@@ -608,7 +608,7 @@ pub fn load_session_vector_clock(
for row in rows {
let (node_id_str, counter) = row?;
if let Ok(node_id) = uuid::Uuid::parse_str(&node_id_str) {
clock.clocks.insert(node_id, counter as u64);
clock.timestamps.insert(node_id, counter as u64);
}
}

View File

@@ -39,6 +39,7 @@ mod metrics;
mod migrations;
mod plugin;
pub mod reflection;
mod registered_components;
mod systems;
mod type_registry;
mod types;

View File

@@ -38,6 +38,8 @@ pub struct Persisted {
pub network_id: uuid::Uuid,
}
impl Persisted {
pub fn new() -> Self {
Self {

View File

@@ -0,0 +1,64 @@
//! Component registrations for CRDT synchronization
//!
//! This module registers all components that should be synchronized across
//! the network using the inventory-based type registry.
//!
//! # When to use this file vs `#[synced]` attribute
//!
//! **Use `#[synced]` attribute for:**
//! - Your own component types defined in this codebase
//! - Any type you have source access to
//! - Most game components (entities, markers, etc.)
//! - Example: `#[synced] pub struct CubeMarker { ... }`
//!
//! **Use manual `inventory::submit!` here for:**
//! - Third-party types (Bevy's Transform, external crates)
//! - Types that need custom serialization logic
//! - Types where the serialized format differs from in-memory format
//!
//! # Currently registered external types
//!
//! - `Transform` - Bevy's transform component (needs custom rkyv conversion)
use std::any::TypeId;
// Register Transform for synchronization
// We serialize Bevy's Transform but convert to our rkyv-compatible type
inventory::submit! {
crate::persistence::ComponentMeta {
type_name: "Transform",
type_path: "bevy::transform::components::transform::Transform",
type_id: TypeId::of::<bevy::prelude::Transform>(),
deserialize_fn: |bytes: &[u8]| -> anyhow::Result<Box<dyn std::any::Any>> {
let transform: crate::transform::Transform = rkyv::from_bytes::<crate::transform::Transform, rkyv::rancor::Failure>(bytes)?;
// Convert back to Bevy Transform
let bevy_transform = bevy::prelude::Transform {
translation: transform.translation.into(),
rotation: transform.rotation.into(),
scale: transform.scale.into(),
};
Ok(Box::new(bevy_transform))
},
serialize_fn: |world: &bevy::ecs::world::World, entity: bevy::ecs::entity::Entity| -> Option<bytes::Bytes> {
world.get::<bevy::prelude::Transform>(entity).map(|bevy_transform| {
// Convert to our rkyv-compatible Transform
let transform = crate::transform::Transform {
translation: bevy_transform.translation.into(),
rotation: bevy_transform.rotation.into(),
scale: bevy_transform.scale.into(),
};
let serialized = rkyv::to_bytes::<rkyv::rancor::Failure>(&transform)
.expect("Failed to serialize Transform");
bytes::Bytes::from(serialized.to_vec())
})
},
insert_fn: |entity_mut: &mut bevy::ecs::world::EntityWorldMut, boxed: Box<dyn std::any::Any>| {
if let Ok(transform) = boxed.downcast::<bevy::prelude::Transform>() {
entity_mut.insert(*transform);
}
},
}
}

View File

@@ -261,6 +261,51 @@ impl Default for ComponentTypeRegistryResource {
}
}
/// Macro to register a component type with the inventory system
///
/// This generates the necessary serialize/deserialize functions and submits
/// the ComponentMeta to inventory for runtime registration.
///
/// # Example
///
/// ```ignore
/// use bevy::prelude::*;
/// register_component!(Transform, "bevy::transform::components::Transform");
/// ```
#[macro_export]
macro_rules! register_component {
($component_type:ty, $type_path:expr) => {
// Submit component metadata to inventory
inventory::submit! {
$crate::persistence::ComponentMeta {
type_name: stringify!($component_type),
type_path: $type_path,
type_id: std::any::TypeId::of::<$component_type>(),
deserialize_fn: |bytes: &[u8]| -> anyhow::Result<Box<dyn std::any::Any>> {
let component: $component_type = rkyv::from_bytes(bytes)?;
Ok(Box::new(component))
},
serialize_fn: |world: &bevy::ecs::world::World, entity: bevy::ecs::entity::Entity| -> Option<bytes::Bytes> {
world.get::<$component_type>(entity).map(|component| {
let serialized = rkyv::to_bytes::<rkyv::rancor::Failure>(component)
.expect("Failed to serialize component");
bytes::Bytes::from(serialized.to_vec())
})
},
insert_fn: |entity_mut: &mut bevy::ecs::world::EntityWorldMut, boxed: Box<dyn std::any::Any>| {
if let Ok(component) = boxed.downcast::<$component_type>() {
entity_mut.insert(*component);
}
},
}
}
};
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -118,6 +118,12 @@ fn send_window_closing(app: &mut App, window: Entity) {
.write(WindowClosing { window });
}
fn send_app_exit(app: &mut App) {
app.world_mut()
.resource_mut::<Messages<bevy::app::AppExit>>()
.write(bevy::app::AppExit::Success);
}
impl AppHandler {
/// Initialize the window and transition to Running state.
///
@@ -233,7 +239,10 @@ impl AppHandler {
// Send WindowClosing event
send_window_closing(bevy_app, *bevy_window_entity);
// Run one final update to process close event
// Send AppExit event to trigger cleanup systems
send_app_exit(bevy_app);
// Run one final update to process close events and cleanup
bevy_app.update();
// Don't call finish/cleanup - let Bevy's AppExit handle it

View File

@@ -238,7 +238,10 @@ impl AppHandler {
// Send WindowClosing event
send_window_closing(bevy_app, *bevy_window_entity);
// Run one final update to process close event
// Send AppExit event to trigger cleanup systems
bevy_app.world_mut().send_message(AppExit::Success);
// Run one final update to process close events and cleanup
bevy_app.update();
}

View File

@@ -0,0 +1,63 @@
//! Math primitives with rkyv support
//!
//! Vendored from bevy_math with rkyv derives added.
/// A 3-dimensional vector.
#[derive(Debug, Clone, Copy, PartialEq)]
#[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)]
#[repr(C)]
pub struct Vec3 {
pub x: f32,
pub y: f32,
pub z: f32,
}
impl Vec3 {
pub const ZERO: Self = Self { x: 0.0, y: 0.0, z: 0.0 };
pub const ONE: Self = Self { x: 1.0, y: 1.0, z: 1.0 };
#[inline]
pub const fn new(x: f32, y: f32, z: f32) -> Self {
Self { x, y, z }
}
}
/// A quaternion representing an orientation.
#[derive(Debug, Clone, Copy, PartialEq)]
#[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)]
#[repr(C)]
pub struct Quat {
pub x: f32,
pub y: f32,
pub z: f32,
pub w: f32,
}
impl Quat {
pub const IDENTITY: Self = Self { x: 0.0, y: 0.0, z: 0.0, w: 1.0 };
}
// Conversion from bevy_math types
impl From<bevy::math::Vec3> for Vec3 {
fn from(v: bevy::math::Vec3) -> Self {
Self { x: v.x, y: v.y, z: v.z }
}
}
impl From<Vec3> for bevy::math::Vec3 {
fn from(v: Vec3) -> Self {
Self::new(v.x, v.y, v.z)
}
}
impl From<bevy::math::Quat> for Quat {
fn from(q: bevy::math::Quat) -> Self {
Self { x: q.x, y: q.y, z: q.z, w: q.w }
}
}
impl From<Quat> for bevy::math::Quat {
fn from(q: Quat) -> Self {
Self::from_xyzw(q.x, q.y, q.z, q.w)
}
}

View File

@@ -0,0 +1,73 @@
//! Transform component with rkyv support
//!
//! Vendored from bevy_transform with rkyv derives added for network synchronization.
mod math;
pub use math::{Quat, Vec3};
/// Describe the position of an entity. If the entity has a parent, the position is relative
/// to its parent position.
///
/// This is a pure data type used for serialization. Use bevy::transform::components::Transform
/// for actual ECS components.
#[derive(Debug, PartialEq, Clone, Copy)]
#[derive(rkyv::Archive, rkyv::Serialize, rkyv::Deserialize)]
pub struct Transform {
/// Position of the entity. In 2d, the last value of the `Vec3` is used for z-ordering.
pub translation: Vec3,
/// Rotation of the entity.
pub rotation: Quat,
/// Scale of the entity.
pub scale: Vec3,
}
impl Default for Transform {
fn default() -> Self {
Self {
translation: Vec3::ZERO,
rotation: Quat::IDENTITY,
scale: Vec3::ONE,
}
}
}
impl Transform {
/// Creates a new [`Transform`] at the position `(x, y, z)`. In 2d, the `z` component
/// is used for z-ordering elements: higher `z`-value will be in front of lower
/// `z`-value.
#[inline]
pub const fn from_xyz(x: f32, y: f32, z: f32) -> Self {
Self::from_translation(Vec3::new(x, y, z))
}
/// Creates a new [`Transform`] with the specified `translation`.
#[inline]
pub const fn from_translation(translation: Vec3) -> Self {
Self {
translation,
rotation: Quat::IDENTITY,
scale: Vec3::ONE,
}
}
/// Creates a new [`Transform`] with the specified `rotation`.
#[inline]
pub const fn from_rotation(rotation: Quat) -> Self {
Self {
translation: Vec3::ZERO,
rotation,
scale: Vec3::ONE,
}
}
/// Creates a new [`Transform`] with the specified `scale`.
#[inline]
pub const fn from_scale(scale: Vec3) -> Self {
Self {
translation: Vec3::ZERO,
rotation: Quat::IDENTITY,
scale,
}
}
}